- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 138 for relu6 (0.1 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32> return %0 : tensor<1x4x4x3xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn // CHECK: func private @quantized_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32 } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32> %conv2 = "tfl.conv_2d"(%0, %w, %b2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]]) sq = biased_dense(t1, t2, t3, act='relu') self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12]) def testWithKnownKernel(self): @def_function.function def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]]) sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu') self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12]) def testWithKnownKernel(self): def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir
// tf.MyCustomOp is the result of conversion to a Custom op %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp") %3 = "tfl.exp"(%2) : (tensor<4xf32>) -> tensor<4xf32> loc("exp") func.return %3 : tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir
%cst = arith.constant dense<0.0> : tensor<2x3xbf16> %0 = "tfl.maximum"(%arg0, %cst) : (tensor<2x3xbf16>, tensor<2x3xbf16>) -> tensor<2x3xbf16> func.return %0 : tensor<2x3xbf16> // CHECK: %[[RESULT:.*]] = "tfl.relu"(%arg0) // CHECK: return %[[RESULT]] } // CHECK-LABEL: fuseScalarAddIntoConv2dBf16 func.func @fuseScalarAddIntoConv2dBf16(%arg0: tensor<256x32x32x3xbf16>, %arg1: tensor<16x3x3x3xbf16>) -> tensor<256x8x7x16xbf16> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir
%14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32> %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32> %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32> %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32> %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
func_op.eraseResult(0); func_op.insertResult(0, new_call_op.getResult(0).getType(), /*resultAttrs=*/nullptr); // Modify the quantized fused function to do dequantize+relu(6). rewriter.setInsertionPoint(req_op); Value new_result = rewriter.create<mlir::stablehlo::UniformDequantizeOp>( req_op.getLoc(), func_op.getResultTypes()[0], req_op.getOperand());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0)