- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 73 for kRelu6 (0.09 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// The actual Optimize Pass. namespace { #define GEN_PASS_DEF_OPTIMIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" constexpr char kRelu[] = "RELU"; constexpr char kRelu6[] = "RELU6"; constexpr char kRelu1[] = "RELU_N1_TO_1"; ElementsAttr FlattenTo1D(Attribute a) { auto elements = mlir::cast<DenseElementsAttr>(a); const std::array<int64_t, 1> flattened_shape = {elements.getNumElements()};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/inlining.mlir
// Inline a function that contains only tfl ops. func.func @func_with_tfl_ops(%arg0 : tensor<2xi32>) -> tensor<2xi32> { %0 = "tfl.sub"(%arg0, %arg0) {fused_activation_function = "RELU6"} : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32> %1 = "tfl.add"(%0, %arg0) {fused_activation_function = "RELU6"} : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32> func.return %1: tensor<2xi32> } // CHECK-LABEL: func @inline_with_arg
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 28 14:24:59 UTC 2022 - 1000 bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py
elif act == 'RELU6': return tf.raw_ops.Relu6(features=res) elif act == 'TANH': return tf.raw_ops.Tanh(x=res) else: return res @tf.RegisterGradient('NewConv2D') def _conv_add_relu_grad(op: ops.Operation, grad): act = op.get_attr('act') y = op.outputs[0] if act == 'RELU': grad = gen_nn_ops.relu_grad(grad, y) elif act == 'RELU6': grad = gen_nn_ops.relu6_grad(grad, y)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 20:23:51 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> %1 = "tf.BiasAdd"(%0, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32> %2 = "tf.Relu6"(%1) {device = ""} : (tensor<*xf32>) -> tensor<*xf32> %3 = "tf.Conv2D"(%arg0, %arg1) { data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir
%0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> %1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> %2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/tac-filter.mlir
func.func @testFunctionSkiped(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) { // CHECK: tfl.add // CHECK-SAME: tac.skip_target_annotation %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: tfl.add // CHECK-SAME: tac.skip_target_annotation
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 24 01:08:29 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir
%relu = tfr.constant "relu" -> !tfr.attr %relu6 = tfr.constant "relu6" -> !tfr.attr %is_relu = tfr.equal %act, %relu -> i1 %res = scf.if %is_relu -> !tfr.tensor { %applied_relu = tfr.call @tf__relu(%add) : (!tfr.tensor) -> !tfr.tensor scf.yield %applied_relu : !tfr.tensor } else { %is_relu6 = tfr.equal %act, %relu6 -> i1 %res1 = scf.if %is_relu6 -> !tfr.tensor {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir
// CHECK: _arithmetic_count = 230686720 : i64 %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32, fused_activation_function = "RELU6"} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<256x32x32x16xf32> func.return %0 : tensor<256x32x32x16xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 14 04:58:17 UTC 2022 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir
// CHECK: tac.cost = 0x4D5C0000 %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32, fused_activation_function = "RELU6", tac.device = "CPU"} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<256x32x32x16xf32> func.return %0 : tensor<256x32x32x16xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:29:10 UTC 2022 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/simple.mlir
// CHECK: %{{.*}} = "tfl.pseudo_const"() <{value = dense<{{\[\[1, 2\], \[3, 4\], \[5, 6\]\]}}> : tensor<3x2xi32>}> // CHECK-NEXT: [[SUB:%.*]] = tfl.sub %{{.*}}, %{{.*}} {fused_activation_function = "RELU6"} : tensor<3x2xi32> // CHECK-NEXT: [[SCALAR:%.*]] = "tfl.pseudo_const"() <{value = dense<10> : tensor<i32>}> : () -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.6K bytes - Viewed (0)