Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 54 for RELU (0.04 sec)

  1. tensorflow/c/experimental/ops/update_cpp_ops.sh

      MatMul \
      Neg \
      Sum \
      Sub \
      Div \
      DivNoNan \
      Exp \
      Sqrt \
      SqrtGrad \
      Log1p
    
    ${generate} \
      --category=nn \
      SparseSoftmaxCrossEntropyWithLogits \
      ReluGrad \
      Relu \
      BiasAdd \
      BiasAddGrad
    
    ${generate} \
      --category=resource_variable \
      VarHandleOp \
      ReadVariableOp \
      AssignVariableOp \
      DestroyResourceOp
    
    ${generate} \
      --category=io \
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 17 17:54:34 UTC 2022
    - 1.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py

    
    def mnist_model():
      """Creates a MNIST model."""
      model = tf.keras.models.Sequential()
      model.add(tf.keras.layers.Flatten())
      model.add(tf.keras.layers.Dense(128, activation='relu'))
      model.add(tf.keras.layers.Dense(10, activation='softmax'))
      return model
    
    
    class TestModule(tf.Module):
    
      def __init__(self):
        super(TestModule, self).__init__()
        self.model = mnist_model()
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 1.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/insert_fallback_tensor_copy.mlir

      // CHECK-NOT: tfrt_fallback_async.copy_if_small
      %0 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1
      %1 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1
      tfrt.return %0, %1 : !tfrt_fallback.tf_tensor, !tfrt_fallback.tf_tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 10:51:48 UTC 2022
    - 5.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

          %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
          return %0 : tensor<1x4x4x3xf32>
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK: func private @quantized_conv2d_with_relu6_fn
    // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"]
    // CHECK: func private @quantized_matmul_with_bias_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
    // CHECK: func private @quantized_matmul_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py

        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
        sq = biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        @def_function.function
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py

        t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
        t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
        sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
        self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
    
      def testWithKnownKernel(self):
    
        def biasd_dense_elu(x, y, z):
          dot = gen_composite_ops.my_biased_dense(x, y, z)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 3.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir

      // tf.MyCustomOp is the result of conversion to a Custom op
      %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
      %3 = "tfl.exp"(%2)  : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      func.return %3 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

      %cst = arith.constant dense<0.0> : tensor<2x3xbf16>
      %0 = "tfl.maximum"(%arg0, %cst) : (tensor<2x3xbf16>, tensor<2x3xbf16>) -> tensor<2x3xbf16>
      func.return %0 : tensor<2x3xbf16>
    
      // CHECK: %[[RESULT:.*]] = "tfl.relu"(%arg0)
      // CHECK: return %[[RESULT]]
    }
    
    // CHECK-LABEL: fuseScalarAddIntoConv2dBf16
    func.func @fuseScalarAddIntoConv2dBf16(%arg0: tensor<256x32x32x3xbf16>, %arg1: tensor<16x3x3x3xbf16>) -> tensor<256x8x7x16xbf16> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td

    }
    
    // Allowed activation function cases
    // These should match the ActivationFunctionType enum in TFLite schema.
    def TFL_AFEnum_None  : I32EnumAttrCase<"NONE", 0>;
    def TFL_AFEnum_Relu  : I32EnumAttrCase<"RELU", 1>;
    def TFL_AFEnum_Relu1 : I32EnumAttrCase<"RELU_N1_TO_1", 2>;
    def TFL_AFEnum_Relu6 : I32EnumAttrCase<"RELU6", 3>;
    def TFL_AFEnum_Tanh  : I32EnumAttrCase<"TANH", 4>;
    def TFL_AFEnum_Sign  : I32EnumAttrCase<"SIGN_BIT", 5>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 20 00:05:24 UTC 2022
    - 6.4K bytes
    - Viewed (0)
Back to top