Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 52 for Selu (0.17 sec)

  1. tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir

      %add = tfr.call @tf__add(%dot, %bias) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
    
      %relu = tfr.constant "relu" -> !tfr.attr
      %relu6 = tfr.constant "relu6" -> !tfr.attr
    
      %is_relu = tfr.equal %act, %relu -> i1
      %res = scf.if %is_relu -> !tfr.tensor {
        %applied_relu = tfr.call @tf__relu(%add) : (!tfr.tensor) -> !tfr.tensor
        scf.yield %applied_relu : !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 4.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py

        t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
        sq = biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        @def_function.function
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
          return nn_ops.elu(dot)  # with known kernel, should not expand.
    
        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py

        sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
          return nn_ops.elu(dot)  # with known kernel, should not expand.
    
        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/gpu_fusion.mlir

    // Since the tf.AddV2 op has two uses, we have a _FusedBatchNormEx without the
    // Relu activation and we only fuse the add.
    // CHECK-NEXT: %[[Y:[a-z0-9]*]], {{.*}}_FusedBatchNormEx
    // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]]
    // CHECK-NEXT: return %[[relu]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 3.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/tac-filter.mlir

        // CHECK: tfl.add
        // CHECK-SAME: tac.skip_target_annotation
        %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
        // CHECK: tfl.relu
        // CHECK-SAME: tac.skip_target_annotation
        %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
        func.return
      }
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 24 01:08:29 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/raise-custom-ops.mlir

      // will be preserved since it has uses.
      %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      // will be preserved since it has side-effect.
      "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      func.return %2 : tensor<4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/device_assignment_by_func_attr.mlir

      // CHECK: device = "cpu"
      %2 = "tf.Relu"(%1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "cpu"} : (tensor<3x3xf32>) -> tensor<3x3xf32>
      // CHECK: device = "xpu"
      %3 = "tf.Relu"(%2) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"]} : (tensor<3x3xf32>) -> tensor<3x3xf32>
      func.return %3 : tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 00:30:05 UTC 2022
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

      if act == 'RELU':
        return tf.raw_ops.Relu(features=res)
      elif act == 'RELU6':
        return tf.raw_ops.Relu6(features=res)
      elif act == 'TANH':
        return tf.raw_ops.Tanh(x=res)
      else:
        return res
    
    
    @tf.RegisterGradient('NewConv2D')
    def _conv_add_relu_grad(op: ops.Operation, grad):
      act = op.get_attr('act')
      y = op.outputs[0]
      if act == 'RELU':
        grad = gen_nn_ops.relu_grad(grad, y)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc

    func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) {
        %0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
        %1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
        %2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
        %3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32>
        %4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/ir/testdata/test.mlir

      %cpu = corert.get_op_handler %ch "cpu"
      %0 = corert.executeop(%cpu) "tf.Relu"(%arg0) { T = f32 } : 1
      %arg1 = tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor %arg1_th {_tfrt_cost = 1 : i64, device = "/CPU:0"} : (!corert.tensorhandle) -> (!tfrt_fallback.tf_tensor)
      %1 = tfrt_fallback_async.executeop key(0) cost(100) device("/CPU:0") "tf.Relu"(%arg1) { T = f32 } : 1
      tfrt.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 11:03:04 UTC 2022
    - 496 bytes
    - Viewed (0)
Back to top