Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 104 for RELU (0.03 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir

      // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%[[ARG_TRANSPOSE]]) {{.*}} tensor<1x8x4x4xf32>
      // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x8x4x4xf32>
      // CHECK: return %[[RELU]]
    
      %0 = "tf.Tanh"(%arg0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
      %1 = "tf.Relu"(%0) : (tensor<1x4x4x8xf32>) -> tensor<1x4x4x8xf32>
    
      %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      } : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
      %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
      %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
    
      %6 = "tf.Conv2D"(%arg0, %arg1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    // CHECK-SAME: f = @composite_conv3d_fn_1}>
    // CHECK-NOT: {_tfl_quant_trait = "fully_quantizable"
    // CHECK: %[[RELU:.*]] = "tf.Relu"(%[[PARTITIONEDCALL_0]])
    // CHECK: return %[[RELU]]
    
    // CHECK-LABEL: private @composite_conv3d_fn_1
    
    // WEIGHTONLY-DAG: %[[CST:.*]] = "tf.Const"() {{.*}} : () -> tensor<2x3x3x3x2xf32>
    // WEIGHTONLY: %[[PARTITIONEDCALL_0:.*]] = "tf.PartitionedCall"(%arg0, %[[CST]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/ops/nn_ops.h

    // Computes rectified linear gradients for a Relu operation.
    Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients,
                    AbstractTensorHandle* const features,
                    AbstractTensorHandle** backprops, const char* name = nullptr,
                    const char* raw_device_name = nullptr);
    
    // Computes rectified linear: `max(features, 0)`.
    Status Relu(AbstractContext* ctx, AbstractTensorHandle* const features,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 2.6K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/gradients/nn_grad_test.cc

    using tensorflow::TF_StatusPtr;
    
    Status ReluModel(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> inputs,
                     absl::Span<AbstractTensorHandle*> outputs) {
      return ops::Relu(ctx, inputs[0], &outputs[0], "Relu");
    }
    
    Status SparseSoftmaxCrossEntropyWithLogitsModel(
        AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs,
        absl::Span<AbstractTensorHandle*> outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/end2end/back2back_fake_quant.pbtxt

        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "data_format"
        value {
          s: "NHWC"
        }
      }
    }
    node {
      name: "sequential/quant_dense/Relu"
      op: "Relu"
      input: "sequential/quant_dense/BiasAdd"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
    }
    node {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 15 19:42:47 UTC 2021
    - 25.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "!tf_type.qint8"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "!tf_type.qint8"},
        ]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

        Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
        Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
        Node* d =
            ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D"));
        Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E"));
        ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfr/python/op_reg_gen_test.py

    @composite.Composite(
        'TestNoOp', derived_attrs=['T: numbertype'], outputs=['o1: T'])
    def _composite_no_op():
      pass
    
    
    @Composite(
        'TestCompositeOp',
        inputs=['x: T', 'y: T'],
        attrs=['act: {"", "relu"}', 'trans: bool = true'],
        derived_attrs=['T: numbertype'],
        outputs=['o1: T', 'o2: T'])
    def _composite_op(x, y, act, trans):
      return x + act, y + trans
    
    
    class TFRGenTensorTest(test.TestCase):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 2.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

      // CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%arg0) {{.*}} tensor<1x4x4x8xf32>
      // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x4x4x8xf32>
      // CHECK: %[[RES_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%[[RELU]], %[[RES_PERM]])
      // CHECK: return %[[RES_TRANSPOSE]]
    
      %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
Back to top