Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 104 for RELU (0.06 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td

    // Canonicalize tf.Maximum of zero to tf.Relu
    //===----------------------------------------------------------------------===//
    
    def IsInteger32Pred: CPred<
      "getElementTypeOrSelf($0.getType()).isInteger(32)">;
    
    // Whether the transformation is compatible with the device if given.
    // Currently, Relu with int32 is not supported on GPU.
    def IsDeviceCompatible: Constraint<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 17K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td

         (TFR_ConstantTensorOp (Arith_ConstantOp ConstantAttr<I32Attr, "127">))]>;
    
    def QuantActRangeReluPattern :
      Pattern<
        (TFR_TFRQuantActRangeOp
         (TFR_ConstOp HasStringAttr<"RELU">:$act),
         (ConstantLikeMatcher F32Attr:$scale),
         (ConstantLikeMatcher I64Attr:$zp)),
        [(TFR_ConstantTensorOp (Arith_ConstantOp (Quantize<"0.0f"> $scale, $zp))),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 2.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/ops/update_cpp_ops.sh

      MatMul \
      Neg \
      Sum \
      Sub \
      Div \
      DivNoNan \
      Exp \
      Sqrt \
      SqrtGrad \
      Log1p
    
    ${generate} \
      --category=nn \
      SparseSoftmaxCrossEntropyWithLogits \
      ReluGrad \
      Relu \
      BiasAdd \
      BiasAddGrad
    
    ${generate} \
      --category=resource_variable \
      VarHandleOp \
      ReadVariableOp \
      AssignVariableOp \
      DestroyResourceOp
    
    ${generate} \
      --category=io \
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 17 17:54:34 UTC 2022
    - 1.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py

    
    def mnist_model():
      """Creates a MNIST model."""
      model = tf.keras.models.Sequential()
      model.add(tf.keras.layers.Flatten())
      model.add(tf.keras.layers.Dense(128, activation='relu'))
      model.add(tf.keras.layers.Dense(10, activation='softmax'))
      return model
    
    
    class TestModule(tf.Module):
    
      def __init__(self):
        super(TestModule, self).__init__()
        self.model = mnist_model()
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 1.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/insert_fallback_tensor_copy.mlir

      // CHECK-NOT: tfrt_fallback_async.copy_if_small
      %0 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1
      %1 = tfrt_fallback_async.executeop key(0) cost(1024) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.Relu"(%arg) {T = f32} : 1
      tfrt.return %0, %1 : !tfrt_fallback.tf_tensor, !tfrt_fallback.tf_tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 10:51:48 UTC 2022
    - 5.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
        else:
          # Check activation functions are implicit.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

          %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
          return %0 : tensor<1x4x4x3xf32>
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
Back to top