- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 138 for relu6 (0.1 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
double range = std::fabs(max - min); if (num_bits <= 8 && range >= 10.0) { op.emitWarning() << "Tensor range is too wide to be quantized. Use tf.clip_by_value " "or tf.relu6 to narrow the tensor range. Range: " << range << ", bit width: " << num_bits; } if (std::abs(max - min) < kNearZeroTolerance) { op.emitWarning() << "Tensor range (" << min << ", " << max
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/jit/cluster_scoping_pass_test.cc
Node* add1 = ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1")); Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0")); ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1")); BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0}); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(ClusterScoping(&graph));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 29 16:20:48 UTC 2020 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
(HasRankAtMost<4> $a), (HasRankAtMost<4> $b)]>; } // We can eliminate Relu from Relu(SquaredDifference(x, y)), // since the result of SquaredDifference is always non-negative. // TFLite interpreter doesn't support Relu+int32 for now. So the test cases // are failing without the following pattern to optimize Relu away fixes // the problem. def OptimizeReluSquaredDifference : Pat<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir
// CHECK: return %[[CLAMP]] : tensor<?x28x28x16xf32> // CHECK: } // ----- // Because the operand of shape_of is other than the target conv, // should not match conv bias relu6 dynamic pattern. // CHECK-LABEL: @conv_with_bias_and_relu6_dynamic_shape_not_same_op_fn( // CHECK-SAME: %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 49.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
Node* add1 = ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1")); Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0")); ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1")); MakeStageNode(builder, "stage", {DT_FLOAT}, {relu0}); return GraphDefBuilderToGraph(builder, graph->get()); };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// The actual Optimize Pass. namespace { #define GEN_PASS_DEF_OPTIMIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" constexpr char kRelu[] = "RELU"; constexpr char kRelu6[] = "RELU6"; constexpr char kRelu1[] = "RELU_N1_TO_1"; ElementsAttr FlattenTo1D(Attribute a) { auto elements = mlir::cast<DenseElementsAttr>(a);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual", // Others "AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty", "Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad", "LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select", "SelectV2", "Transpose", "ConjugateTranspose", "_UnaryOpsComposition", "CollectiveReduceV2",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
%0 = "tf.SquaredDifference"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> %1 = "tf.Relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> func.return %1: tensor<1xf32> // CHECK-LABEL: squaredDifferenceRelu // CHECK: tfl.squared_difference %arg0, %arg1 : tensor<1xf32> // CHECK: %1 = "tfl.relu6"(%0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: return }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir
%0 = "tf.Relu"(%arg0) : (tensor<?xui32>) -> tensor<?xui32> func.return %0: tensor<?xui32> } // ----- // CHECK-LABEL: func @relu6 func.func @relu6(%arg0: tensor<1xi32>) -> tensor<1xi32> { // CHECK-DAG: %[[ZERO:.*]] = mhlo.constant dense<0> : tensor<i32> // CHECK-DAG: %[[SIX:.*]] = mhlo.constant dense<6> : tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 335.5K bytes - Viewed (0)