- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 119 for relu (0.17 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
%2 = "tf.Relu6"(%1) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %3 = "tf.MatMul"(%arg0, %arg1) { transpose_a = true, transpose_b = false } : (tensor<1x10xf32>, tensor<10x10xf32>) -> tensor<*xf32> %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<10xf32>) -> tensor<*xf32> %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir
// CHECK-SAME: f = @composite_conv3d_fn_1}> // CHECK-NOT: {_tfl_quant_trait = "fully_quantizable" // CHECK: %[[RELU:.*]] = "tf.Relu"(%[[PARTITIONEDCALL_0]]) // CHECK: return %[[RELU]] // CHECK-LABEL: private @composite_conv3d_fn_1 // WEIGHTONLY-DAG: %[[CST:.*]] = "tf.Const"() {{.*}} : () -> tensor<2x3x3x3x2xf32> // WEIGHTONLY: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %[[CST]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
{"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "!tf_type.qint8"}, {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "!tf_type.qint8"}, ]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir
// CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
def MaximumOfZeroToRelu : Pat< (TF_MaximumOp:$maximum_op $x, $y), (TF_ReluOp:$dest $x), [(IsConstantValueOf<0> $y), (IsDeviceCompatible $maximum_op)], [(CopyAttrs $maximum_op, $dest)]>; //===----------------------------------------------------------------------===// // Canonicalize tf.Relu of Minimul six to tf.Relu6
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}"], "act_func": "internal_dequantize_no_activation_fn", "output_type": "f32"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir
%cst = "arith.constant"() {value = dense<[[[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>} : () -> tensor<1x1x3xf32> %prelu = "tfl.prelu"(%arg0, %cst) : (tensor<1x10x10x3xf32>, tensor<1x1x3xf32>) -> tensor<1x10x10x3xf32> func.return %prelu : tensor<1x10x10x3xf32> // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}[1.66394591, 3.61694336, 2.0382936]]]> : tensor<1x1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/decompose.mlir
%relu_attr = tfr.constant "RELU" -> !tfr.attr %relu6_attr = tfr.constant "RELU6" -> !tfr.attr %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor) %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 16.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
// Currently, GPU only supports Conv2D+BiasAdd+Relu fusion. if (IsGpuDevice(conv)) { auto activation = GetActivation(bias_add); if (!activation || activation->getName().stripDialect() != "Relu" || !bias_add.getOutput().hasOneUse()) { (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) { diag << "GPU only supports Conv2D+BiasAdd+Relu fusion"; }); return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir
%14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32> %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32> %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32> %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32> %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.5K bytes - Viewed (0)