- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 73 for kRelu6 (0.11 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
} // ----- func.func @testAddReluPack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) { // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc
// tfl.log / tfl.logistic / tfl.max_pool_2d / tfl.mirror_pad / tfl.maximum / // tfl.custom / tfl.mean / tfl.minimum / tfl.pad / tfl.pow / tfl.prelu / // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax / // tfl.space_to_depth / tfl.sqrt / tfl.square / tfl.squared_difference / // tfl.strided_slice / tfl.tanh / tfl.transpose / tfl.transpose_conv
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
%1 = "tf.BiasAdd"(%0, %arg2) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %2 = "tf.Relu6"(%1) : (tensor<*xf32>) -> tensor<*xf32> func.return %2 : tensor<*xf32> } // CHECK-LABEL: func @conv_with_single_layer // CHECK: %[[quantize:.*]] = "tf.PartitionedCall"(%arg0 // CHECK-SAME: f = @quantize_i8
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
using ops::FusedBatchNormV3; using ops::L2Loss; using ops::LogSoftmax; using ops::LRN; using ops::MaxPool; using ops::MaxPool3D; using ops::MaxPoolV2; using ops::Placeholder; using ops::Relu; using ops::Relu6; using ops::Selu; using ops::Softmax; using ops::Softplus; using ops::Softsign; class NNGradTest : public ::testing::Test { protected: NNGradTest() : scope_(Scope::NewRootScope()) {}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
%biasadd = "tf.BiasAdd"(%conv, %dq_bias) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %res = "tf.Relu6"(%biasadd) : (tensor<*xf32>) -> tensor<*xf32> %q_res = "quantfork.qcast"(%res) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 0.023529411764705882:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir
%6 = "quantfork.stats"(%5) {layerStats = dense<[-2788.31055, 4616.62842]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32> %7 = "tf.Relu6"(%6) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %8 = "quantfork.stats"(%7) {layerStats = dense<[0.000000e+00, 6.000000e+00]> : tensor<2xf32>} : (tensor<*xf32>) -> tensor<*xf32> return %8 : tensor<*xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir
// CHECK: %[[conv:.*]] = "tf.Conv2D"(%[[dq_input]], %[[dq_weight]]) // CHECK: %[[bias:.*]] = "tf.BiasAdd"(%[[conv]], %[[cst_0]]) <{data_format = "NHWC"}> // CHECK: %[[relu6:.*]] = "tf.Relu6"(%[[bias]]) // ----- func.func @remove_check_numerics_op(%arg0: tensor<*xf32>) -> tensor<*xf32> { %0 = "tf.CheckNumerics"(%arg0) {device = "", message = "transformer"} : (tensor<*xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td
[(IsNotInLiftedFunc $res), (IsEinsumSupportedByXlaDotV2 $equation)], [], (addBenefit 10)>; } defm : LiftCompositeOpsWithActivation<TF_ReluOp, "relu">; defm : LiftCompositeOpsWithActivation<TF_Relu6Op, "relu6">; def LiftGather : Pat< (TF_GatherV2Op:$res $params, $indices, $axis, $batch_dims), (LiftAsTFPartitionedCall<"composite_gather_fn"> (ArgumentList $params, $indices, $axis), (ResultList $res),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs
RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, RNN = 24, SOFTMAX = 25, SPACE_TO_DEPTH = 26, SVDF = 27, TANH = 28, CONCAT_EMBEDDINGS = 29, SKIP_GRAM = 30, CALL = 31,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 19 19:46:06 UTC 2021 - 26.1K bytes - Viewed (0)