Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for RELU (0.06 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

          ('none', None, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu', nn_ops.relu, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu6', nn_ops.relu6, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('with_bias', None, True, False, quant_opts_pb2.TF, False, 'SAME'),
          (
              'with_bias_and_relu',
              nn_ops.relu,
              True,
              False,
              quant_opts_pb2.TF,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      func.return %0 : tensor<*xf32>
    }
    
    // CHECK-LABEL: testMaximumOfZeroToReluFloat
    func.func @testMaximumOfZeroToReluFloat(%arg0: tensor<4xf32>) -> tensor<4xf32> {
      // CHECK: %0 = "tf.Relu"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32>
      // CHECK: return %0
      %cst_0 = arith.constant dense<0.000000e+00> : tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let hasFolder = 1;
    }
    
    def TFL_ReluOp: TFL_Op<"relu", [
        PredOpTrait<"x and y must have same element type",
          TFL_TCresVTEtIsSameAsOp<0, 0>>,
        Pure,
        QuantizableResult,
        SameOperandsAndResultShape]> {
      let summary = "Relu operator";
    
      let description = [{
        Element-wise Relu operator
          x -> max(0, x)
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/ops.mlir

      // CHECK: "NONE"
      %0 = tfl.add %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<4xi32>
      // CHECK: "RELU"
      %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32>
      // CHECK: "RELU_N1_TO_1"
      %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32>
      // CHECK: "RELU6"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    // CHECK: "tfl.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<4x5xi32>, tensor<1x5xi32>, tensor<2xi64>) -> tensor<4x5xi32>
    }
    
    func.func @testReluI32(%arg0: tensor<1xi32>) -> tensor<1xi32> {
      %0 = "tf.Relu"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
      func.return %0: tensor<1xi32>
    
    // CHECK-LABEL: testReluI32
    // CHECK:  %[[CONST_0:.*]] = arith.constant dense<0> : tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

        "tf.Yield"(%t0, %t1, %t2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> ()
        }, {
         %e0 = "tf.Neg"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         %e1 = "tf.Relu"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         %e2 = "tf.Sin"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         "tf.Yield"(%e0, %e1, %e2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

    // The actual Optimize Pass.
    namespace {
    #define GEN_PASS_DEF_OPTIMIZEPASS
    #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
    
    constexpr char kRelu[] = "RELU";
    constexpr char kRelu6[] = "RELU6";
    constexpr char kRelu1[] = "RELU_N1_TO_1";
    
    ElementsAttr FlattenTo1D(Attribute a) {
      auto elements = mlir::cast<DenseElementsAttr>(a);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

    //===----------------------------------------------------------------------===//
    
    OpFoldResult LeakyReluOp::fold(FoldAdaptor adaptor) {
      auto operands = adaptor.getOperands();
      assert(operands.size() == 1 && "leaky relu has one operand");
    
      // leaky_relu(x, alpha: 1) -> x
      if (getAlpha().convertToFloat() == 1.0f &&
          getOperand().getType() == getType())
        return getOperand();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
Back to top