- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 138 for relu6 (0.24 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK-PER-TENSOR: return %[[UNIFORM_QUANTIZE_0]] : tensor<?x3x4x2x!quant.uniform<i8:f32, 0.0031372549487095253:-128>> // ----- // Tests that fused pattern for convolution + bias + relu6 with // dynamic batch dimension is properly quantized. // Note that this checks for identical condition as // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.clamp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/gpu_fusion.mlir
// Since the tf.AddV2 op has two uses, we have a _FusedBatchNormEx without the // Relu activation and we only fuse the add. // CHECK-NEXT: %[[Y:[a-z0-9]*]], {{.*}}_FusedBatchNormEx // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]] // CHECK-NEXT: return %[[relu]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:47:26 UTC 2022 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = "Computes rectified linear 6 gradients for a Relu6 operation."; let arguments = (ins Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients, Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or its output; using either one produces the same result.}]>:$features );
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/graph-scalar-input.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 24 00:20:25 UTC 2020 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/raise-custom-ops.mlir
// will be preserved since it has uses. %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> // will be preserved since it has side-effect. "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> func.return %2 : tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/device_assignment_by_func_attr.mlir
// CHECK: device = "cpu" %2 = "tf.Relu"(%1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "cpu"} : (tensor<3x3xf32>) -> tensor<3x3xf32> // CHECK: device = "xpu" %3 = "tf.Relu"(%2) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"]} : (tensor<3x3xf32>) -> tensor<3x3xf32> func.return %3 : tensor<3x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 00:30:05 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir
%1 = "chlo.broadcast_maximum"(%0, %arg0) {broadcast_dimensions = array<i64>} : (tensor<i32>, tensor<?xi32>) -> tensor<?xi32> func.return %1 : tensor<?xi32> } // CHECK-LABEL: func @relu6( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1xi32>) -> tensor<1xi32> { // CHECK-DAG: %[[VAL_1:.*]] = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 340.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc
func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) { %0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32> %1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32> %2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32> %3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32> %4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/ir/testdata/test.mlir
%cpu = corert.get_op_handler %ch "cpu" %0 = corert.executeop(%cpu) "tf.Relu"(%arg0) { T = f32 } : 1 %arg1 = tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor %arg1_th {_tfrt_cost = 1 : i64, device = "/CPU:0"} : (!corert.tensorhandle) -> (!tfrt_fallback.tf_tensor) %1 = tfrt_fallback_async.executeop key(0) cost(100) device("/CPU:0") "tf.Relu"(%arg1) { T = f32 } : 1 tfrt.return
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 25 11:03:04 UTC 2022 - 496 bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
rewriter.replaceOp(batch_norm, op->getResults()); // Depending on the case, we may fuse the add, the relu, or both. if (!add_op || add_op.getZ().hasOneUse()) { // We fuse the Relu only if the add has a single use, otherwise we only // fuse the add itself. op->setAttr("activation_mode", rewriter.getStringAttr("Relu")); rewriter.replaceOp(relu_op, op->getResult(0)); } if (add_op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 5.2K bytes - Viewed (0)