Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for RELU (0.08 sec)

  1. tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td

                            (TFL_RangeOp $start, $limit, $delta)>;
    def LegalizeRelu6 : Pat<(TF_Relu6Op $arg), (TFL_Relu6Op $arg)>;
    def LegalizeRelu : Pat<(TF_ReluOp $arg), (TFL_ReluOp $arg)>;
    // TFL Relu doesn't support I32/I64 type, so legalizes TF Relu to TFL Maximum.
    def LegalizeReluI32 :
      Pat<(TF_ReluOp TensorOf<[I32]>:$arg),
          (TFL_MaximumOp $arg,
            (Arith_ConstantOp ConstantAttr<RankedI32ElementsAttr<[]>,"0">))>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 28.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let hasFolder = 1;
    }
    
    def TFL_ReluOp: TFL_Op<"relu", [
        PredOpTrait<"x and y must have same element type",
          TFL_TCresVTEtIsSameAsOp<0, 0>>,
        Pure,
        QuantizableResult,
        SameOperandsAndResultShape]> {
      let summary = "Relu operator";
    
      let description = [{
        Element-wise Relu operator
          x -> max(0, x)
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/ops.mlir

      // CHECK: "NONE"
      %0 = tfl.add %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<4xi32>
      // CHECK: "RELU"
      %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32>
      // CHECK: "RELU_N1_TO_1"
      %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32>
      // CHECK: "RELU6"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    // CHECK: "tfl.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<4x5xi32>, tensor<1x5xi32>, tensor<2xi64>) -> tensor<4x5xi32>
    }
    
    func.func @testReluI32(%arg0: tensor<1xi32>) -> tensor<1xi32> {
      %0 = "tf.Relu"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
      func.return %0: tensor<1xi32>
    
    // CHECK-LABEL: testReluI32
    // CHECK:  %[[CONST_0:.*]] = arith.constant dense<0> : tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      let summary = "Computes rectified linear gradients for a Relu operation.";
    
      let arguments = (ins
        Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients,
        Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR
    the outputs of that operation (both work equivalently).}]>:$features
      );
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  6. RELEASE.md

        to matrix multiplication and convolution, these building blocks include:
        Direct batched convolution Pooling: maximum, minimum, average Normalization:
        LRN, batch normalization Activation: rectified linear unit (ReLU) Data
        manipulation: multi-dimensional transposition (conversion), split, concat,
        sum and scale.
    
    *   TensorForest Estimator now supports SavedModel export for serving.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top