- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 66 for RELU (0.03 sec)
-
tensorflow/compiler/mlir/lite/tests/raise-custom-ops.mlir
// will be preserved since it has uses. %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> // will be preserved since it has side-effect. "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> func.return %2 : tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py
if act == 'RELU': return tf.raw_ops.Relu(features=res) elif act == 'RELU6': return tf.raw_ops.Relu6(features=res) elif act == 'TANH': return tf.raw_ops.Tanh(x=res) else: return res @tf.RegisterGradient('NewConv2D') def _conv_add_relu_grad(op: ops.Operation, grad): act = op.get_attr('act') y = op.outputs[0] if act == 'RELU': grad = gen_nn_ops.relu_grad(grad, y)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 20:23:51 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc
func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) { %0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32> %1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32> %2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32> %3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32> %4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir
// CHECK: %[[cast:.*]] = "tf.Cast"(%[[sub]]) <{Truncate = false}> : (tensor<1x3x2x2xi32>) -> tensor<1x3x2x2xf32> // CHECK: %[[dequant1:.*]] = "tf.Mul"(%[[cast]] // CHECK: %[[relu:.*]] = "tf.Relu"(%[[dequant1]] // CHECK: %[[clamped:.*]] = "tf.Minimum"(%[[relu]] // CHECK: %[[rescale1:.*]] = "tf.Mul"(%[[cast]] // CHECK: %[[add2:.*]] = "tf.AddV2"(%[[rescale1]] // CHECK: %[[maximum2:.*]] = "tf.Maximum"(%[[add2]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/basic_lstm.mlir
// CHECK-LABEL: @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/function-func-attr.pbtxt
op: "custom_embedding_matmul" } library { function { signature { name: "custom_relu" } attr { key: "_implements" value { func { name: "tensorflow.relu" } } } } function { signature { name: "custom_embedding_matmul" } attr { key: "_implements" value { func {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 01 20:09:54 UTC 2023 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir
%14 = "tf.AddV2"(%10, %12) : (tensor<?x256x56x56xf32>, tensor<?x256x56x56xf32>) -> tensor<?x256x56x56xf32> %15 = "tf.Relu"(%14) : (tensor<?x256x56x56xf32>) -> tensor<?x256x56x56xf32> // CHECK: %[[ADD:[0-9]*]] = "tf.AddV2"(%[[BATCH_NORM1]], %[[BATCH_NORM2]]) // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[ADD]]) // Reduce spatial dimensions %16 = "tf.Mean"(%15, %1) : (tensor<?x256x56x56xf32>, tensor<2xi32>) -> tensor<?x256xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir
// CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32> // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x8x4x4xf32> // CHECK: return %[[RELU]] %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32> %1 = "tf.Relu"(%0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32> %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %6 = "tf.Conv2D"(%arg0, %arg1) { data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir
// CHECK-SAME: f = @composite_conv3d_fn_1}> // CHECK-NOT: {_tfl_quant_trait = "fully_quantizable" // CHECK: %[[RELU:.*]] = "tf.Relu"(%[[PARTITIONEDCALL_0]]) // CHECK: return %[[RELU]] // CHECK-LABEL: private @composite_conv3d_fn_1 // WEIGHTONLY-DAG: %[[CST:.*]] = "tf.Const"() {{.*}} : () -> tensor<2x3x3x3x2xf32> // WEIGHTONLY: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %[[CST]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.8K bytes - Viewed (0)