- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 138 for relu6 (0.09 sec)
-
tensorflow/cc/gradients/nn_grad_test.cc
using ops::FusedBatchNormV3; using ops::L2Loss; using ops::LogSoftmax; using ops::LRN; using ops::MaxPool; using ops::MaxPool3D; using ops::MaxPoolV2; using ops::Placeholder; using ops::Relu; using ops::Relu6; using ops::Selu; using ops::Softmax; using ops::Softplus; using ops::Softsign; class NNGradTest : public ::testing::Test { protected: NNGradTest() : scope_(Scope::NewRootScope()) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
%1 = "tf.BiasAdd"(%0, %arg2) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %2 = "tf.Relu6"(%1) : (tensor<*xf32>) -> tensor<*xf32> func.return %2 : tensor<*xf32> } // CHECK-LABEL: func @conv_with_single_layer // CHECK: %[[quantize:.*]] = "tf.PartitionedCall"(%arg0 // CHECK-SAME: f = @quantize_i8
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
%biasadd = "tf.BiasAdd"(%conv, %dq_bias) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %res = "tf.Relu6"(%biasadd) : (tensor<*xf32>) -> tensor<*xf32> %q_res = "quantfork.qcast"(%res) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 0.023529411764705882:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir
%6 = "quantfork.stats"(%5) {layerStats = dense<[-2788.31055, 4616.62842]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32> %7 = "tf.Relu6"(%6) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %8 = "quantfork.stats"(%7) {layerStats = dense<[0.000000e+00, 6.000000e+00]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32> return %8 : tensor<*xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs
LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, RNN = 24, SOFTMAX = 25, SPACE_TO_DEPTH = 26,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 19 19:46:06 UTC 2021 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td
[(IsNotInLiftedFunc $res), (IsEinsumSupportedByXlaDotV2 $equation)], [], (addBenefit 10)>; } defm : LiftCompositeOpsWithActivation<TF_ReluOp, "relu">; defm : LiftCompositeOpsWithActivation<TF_Relu6Op, "relu6">; def LiftGather : Pat< (TF_GatherV2Op:$res $params, $indices, $axis, $batch_dims), (LiftAsTFPartitionedCall<"composite_gather_fn"> (ArgumentList $params, $indices, $axis),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
def MaximumOfZeroToRelu : Pat< (TF_MaximumOp:$maximum_op $x, $y), (TF_ReluOp:$dest $x), [(IsConstantValueOf<0> $y), (IsDeviceCompatible $maximum_op)], [(CopyAttrs $maximum_op, $dest)]>; //===----------------------------------------------------------------------===// // Canonicalize tf.Relu of Minimul six to tf.Relu6
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir
// CHECK: %[[conv:.*]] = "tf.Conv2D"(%[[dq_input]], %[[dq_weight]]) // CHECK: %[[bias:.*]] = "tf.BiasAdd"(%[[conv]], %[[cst_0]]) <{data_format = "NHWC"}> // CHECK: %[[relu6:.*]] = "tf.Relu6"(%[[bias]]) // ----- func.func @remove_check_numerics_op(%arg0: tensor<*xf32>) -> tensor<*xf32> { %0 = "tf.CheckNumerics"(%arg0) {device = "", message = "transformer"} : (tensor<*xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, RNN = 24, SOFTMAX = 25, SPACE_TO_DEPTH = 26,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0)