- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 73 for kRelu6 (0.17 sec)
-
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
.Case("NONE", tflite::ActivationFunctionType_NONE) .Case("RELU", tflite::ActivationFunctionType_RELU) .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1) .Case("RELU6", tflite::ActivationFunctionType_RELU6) .Case("TANH", tflite::ActivationFunctionType_TANH) .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
double range = std::fabs(max - min); if (num_bits <= 8 && range >= 10.0) { op.emitWarning() << "Tensor range is too wide to be quantized. Use tf.clip_by_value " "or tf.relu6 to narrow the tensor range. Range: " << range << ", bit width: " << num_bits; } if (std::abs(max - min) < kNearZeroTolerance) { op.emitWarning() << "Tensor range (" << min << ", " << max
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
}]> ]; } def TFL_Relu6Op: TFL_Op<"relu6", [ PredOpTrait<"x and y must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>, Pure, QuantizableResult, SameOperandsAndResultShape]> { let summary = "Relu6 operator"; let description = [{ Element-wise Relu6 operator x -> max(0, min(6, x)) }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
// CHECK: return %[[CLAMP]] : tensor<?x28x28x16xf32> // CHECK: } // ----- // Because the operand of shape_of is other than the target conv, // should not match conv bias relu6 dynamic pattern. // CHECK-LABEL: @conv_with_bias_and_relu6_dynamic_shape_not_same_op_fn( // CHECK-SAME: %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// ReLU patterns def MatchReluPattern : Pat< (TFL_MaximumOp $input, (Arith_ConstantOp $Zero)), (TFL_ReluOp $input), [(FloatValueEquals<"0"> $Zero)]>; // Optimize Minimum of tf.Relu and constant six to tf.Relu6 def MinimumOfReluAnd6ToRelu6 : Pat<(TFL_MinimumOp (TFL_ReluOp $x), (Arith_ConstantOp $y)), (TFL_Relu6Op $x), [(IsConstantValueOf<6> $y)]>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0 : tensor<4xi64> } // CHECK-LABEL: testReluOfMinimum6ToRelu6Float func.func @testReluOfMinimum6ToRelu6Float(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: %0 = "tf.Relu6"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32> // CHECK: return %0 %cst_6 = arith.constant dense<6.000000e+00> : tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual", // Others "AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty", "Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad", "LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select", "SelectV2", "Transpose", "ConjugateTranspose", "_UnaryOpsComposition", "CollectiveReduceV2",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
%0 = "tf.SquaredDifference"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> %1 = "tf.Relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> func.return %1: tensor<1xf32> // CHECK-LABEL: squaredDifferenceRelu // CHECK: tfl.squared_difference %arg0, %arg1 : tensor<1xf32> // CHECK: %1 = "tfl.relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: return }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir
} // ----- // CHECK-LABEL: func @relu6 func.func @relu6(%arg0: tensor<1xi32>) -> tensor<1xi32> { // CHECK-DAG: %[[ZERO:.*]] = mhlo.constant dense<0> : tensor<i32> // CHECK-DAG: %[[SIX:.*]] = mhlo.constant dense<6> : tensor<i32> // CHECK: mhlo.clamp %[[ZERO]], %arg0, %[[SIX]] : (tensor<i32>, tensor<1xi32>, tensor<i32>) -> tensor<1xi32> %0 = "tf.Relu6"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 335.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK-PER-TENSOR: return %[[UNIFORM_QUANTIZE_0]] : tensor<?x3x4x2x!quant.uniform<i8:f32, 0.0031372549487095253:-128>> // ----- // Tests that fused pattern for convolution + bias + relu6 with // dynamic batch dimension is properly quantized. // Note that this checks for identical condition as // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.clamp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0)