- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 51 for _relu6_ (0.24 sec)
-
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
}]> ]; } def TFL_Relu6Op: TFL_Op<"relu6", [ PredOpTrait<"x and y must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>, Pure, QuantizableResult, SameOperandsAndResultShape]> { let summary = "Relu6 operator"; let description = [{ Element-wise Relu6 operator x -> max(0, min(6, x)) }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
// CHECK: return %[[CLAMP]] : tensor<?x28x28x16xf32> // CHECK: } // ----- // Because the operand of shape_of is other than the target conv, // should not match conv bias relu6 dynamic pattern. // CHECK-LABEL: @conv_with_bias_and_relu6_dynamic_shape_not_same_op_fn( // CHECK-SAME: %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// ReLU patterns def MatchReluPattern : Pat< (TFL_MaximumOp $input, (Arith_ConstantOp $Zero)), (TFL_ReluOp $input), [(FloatValueEquals<"0"> $Zero)]>; // Optimize Minimum of tf.Relu and constant six to tf.Relu6 def MinimumOfReluAnd6ToRelu6 : Pat<(TFL_MinimumOp (TFL_ReluOp $x), (Arith_ConstantOp $y)), (TFL_Relu6Op $x), [(IsConstantValueOf<6> $y)]>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0 : tensor<4xi64> } // CHECK-LABEL: testReluOfMinimum6ToRelu6Float func.func @testReluOfMinimum6ToRelu6Float(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: %0 = "tf.Relu6"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32> // CHECK: return %0 %cst_6 = arith.constant dense<6.000000e+00> : tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual", // Others "AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty", "Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad", "LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select", "SelectV2", "Transpose", "ConjugateTranspose", "_UnaryOpsComposition", "CollectiveReduceV2",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
%0 = "tf.SquaredDifference"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> %1 = "tf.Relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> func.return %1: tensor<1xf32> // CHECK-LABEL: squaredDifferenceRelu // CHECK: tfl.squared_difference %arg0, %arg1 : tensor<1xf32> // CHECK: %1 = "tfl.relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: return }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir
} // ----- // CHECK-LABEL: func @relu6 func.func @relu6(%arg0: tensor<1xi32>) -> tensor<1xi32> { // CHECK-DAG: %[[ZERO:.*]] = mhlo.constant dense<0> : tensor<i32> // CHECK-DAG: %[[SIX:.*]] = mhlo.constant dense<6> : tensor<i32> // CHECK: mhlo.clamp %[[ZERO]], %arg0, %[[SIX]] : (tensor<i32>, tensor<1xi32>, tensor<i32>) -> tensor<1xi32> %0 = "tf.Relu6"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 335.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK-PER-TENSOR: return %[[UNIFORM_QUANTIZE_0]] : tensor<?x3x4x2x!quant.uniform<i8:f32, 0.0031372549487095253:-128>> // ----- // Tests that fused pattern for convolution + bias + relu6 with // dynamic batch dimension is properly quantized. // Note that this checks for identical condition as // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.clamp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = "Computes rectified linear 6 gradients for a Relu6 operation."; let arguments = (ins Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients, Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or its output; using either one produces the same result.}]>:$features );
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir
%1 = "chlo.broadcast_maximum"(%0, %arg0) {broadcast_dimensions = array<i64>} : (tensor<i32>, tensor<?xi32>) -> tensor<?xi32> func.return %1 : tensor<?xi32> } // CHECK-LABEL: func @relu6( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1xi32>) -> tensor<1xi32> { // CHECK-DAG: %[[VAL_1:.*]] = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 340.2K bytes - Viewed (0)