Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 138 for relu6 (0.09 sec)

  1. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

      if act == 'RELU':
        return tf.raw_ops.Relu(features=res)
      elif act == 'RELU6':
        return tf.raw_ops.Relu6(features=res)
      elif act == 'TANH':
        return tf.raw_ops.Tanh(x=res)
      else:
        return res
    
    
    @tf.RegisterGradient('NewConv2D')
    def _conv_add_relu_grad(op: ops.Operation, grad):
      act = op.get_attr('act')
      y = op.outputs[0]
      if act == 'RELU':
        grad = gen_nn_ops.relu_grad(grad, y)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/inlining.mlir

    // Inline a function that contains only tfl ops.
    func.func @func_with_tfl_ops(%arg0 : tensor<2xi32>) -> tensor<2xi32> {
      %0 = "tfl.sub"(%arg0, %arg0) {fused_activation_function = "RELU6"} : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32>
      %1 = "tfl.add"(%0, %arg0) {fused_activation_function = "RELU6"} : (tensor<2xi32>, tensor<2xi32>) -> tensor<2xi32>
      func.return %1: tensor<2xi32>
    }
    
    // CHECK-LABEL: func @inline_with_arg
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 14:24:59 UTC 2022
    - 1000 bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      %2 = "tf.Relu6"(%1) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
      %3 = "tf.MatMul"(%arg0, %arg1) {
        transpose_a = true, transpose_b = false
      } : (tensor<1x10xf32>, tensor<10x10xf32>) -> tensor<*xf32>
      %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<10xf32>) -> tensor<*xf32>
      %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/tac-filter.mlir

        %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
        // CHECK: tfl.add
        // CHECK-SAME: tac.skip_target_annotation
        %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
        // CHECK: tfl.relu
        // CHECK-SAME: tac.skip_target_annotation
        %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
        func.return
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 24 01:08:29 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir

      %relu = tfr.constant "relu" -> !tfr.attr
      %relu6 = tfr.constant "relu6" -> !tfr.attr
    
      %is_relu = tfr.equal %act, %relu -> i1
      %res = scf.if %is_relu -> !tfr.tensor {
        %applied_relu = tfr.call @tf__relu(%add) : (!tfr.tensor) -> !tfr.tensor
        scf.yield %applied_relu : !tfr.tensor
      } else {
        %is_relu6 = tfr.equal %act, %relu6 -> i1
        %res1 = scf.if %is_relu6 -> !tfr.tensor {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 4.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32>
      // CHECK: tac.cost = 1.000000e+03
      %1 = "tfl.mul"(%0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<10x10x10xf32>, tensor<10xf32>) -> tensor<10x10x10xf32>
      func.return %1 : tensor<10x10x10xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir

      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

      // CHECK: _arithmetic_count = 230686720 : i64
      %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32, fused_activation_function = "RELU6"} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<256x32x32x16xf32>
      func.return %0 : tensor<256x32x32x16xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/simple.mlir

      // CHECK:          %{{.*}} = "tfl.pseudo_const"() <{value = dense<{{\[\[1, 2\], \[3, 4\], \[5, 6\]\]}}> : tensor<3x2xi32>}>
      // CHECK-NEXT:     [[SUB:%.*]] = tfl.sub %{{.*}}, %{{.*}} {fused_activation_function = "RELU6"} : tensor<3x2xi32>
      // CHECK-NEXT:     [[SCALAR:%.*]] = "tfl.pseudo_const"() <{value = dense<10> : tensor<i32>}> : () -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    // CHECK-SAME: f = @composite_conv3d_fn_1}>
    // CHECK-NOT: {_tfl_quant_trait = "fully_quantizable"
    // CHECK: %[[RELU:.*]] = "tf.Relu"(%[[PARTITIONEDCALL_0]])
    // CHECK: return %[[RELU]]
    
    // CHECK-LABEL: private @composite_conv3d_fn_1
    
    // WEIGHTONLY-DAG: %[[CST:.*]] = "tf.Const"() {{.*}} : () -> tensor<2x3x3x3x2xf32>
    // WEIGHTONLY: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %[[CST]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
Back to top