Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 79 for RELU (0.07 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %none_attr = tfr.constant "NONE" -> !tfr.attr
      %relu_attr = tfr.constant "RELU" -> !tfr.attr
      %relu6_attr = tfr.constant "RELU6" -> !tfr.attr
      %reluN1_1_attr = tfr.constant "RELU_N1_TO_1" -> !tfr.attr
      %none:2 = "tfr.quant_act_range"(%none_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
      %relu:2 = "tfr.quant_act_range"(%relu_attr, %scale, %zp) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        // Currently, GPU only supports Conv2D+BiasAdd+Relu fusion.
        if (IsGpuDevice(conv)) {
          auto activation = GetActivation(bias_add);
          if (!activation || activation->getName().stripDialect() != "Relu" ||
              !bias_add.getOutput().hasOneUse()) {
            (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) {
              diag << "GPU only supports Conv2D+BiasAdd+Relu fusion";
            });
            return false;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

          ('none', None, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu', nn_ops.relu, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu6', nn_ops.relu6, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('with_bias', None, True, False, quant_opts_pb2.TF, False, 'SAME'),
          (
              'with_bias_and_relu',
              nn_ops.relu,
              True,
              False,
              quant_opts_pb2.TF,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir

      // tf.MyCustomOp is the result of conversion to a Custom op
      %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
      %3 = "tfl.exp"(%2)  : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      func.return %3 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

      %cst = arith.constant dense<0.0> : tensor<2x3xbf16>
      %0 = "tfl.maximum"(%arg0, %cst) : (tensor<2x3xbf16>, tensor<2x3xbf16>) -> tensor<2x3xbf16>
      func.return %0 : tensor<2x3xbf16>
    
      // CHECK: %[[RESULT:.*]] = "tfl.relu"(%arg0)
      // CHECK: return %[[RESULT]]
    }
    
    // CHECK-LABEL: fuseScalarAddIntoConv2dBf16
    func.func @fuseScalarAddIntoConv2dBf16(%arg0: tensor<256x32x32x3xbf16>, %arg1: tensor<16x3x3x3xbf16>) -> tensor<256x8x7x16xbf16> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td

    }
    
    // Allowed activation function cases
    // These should match the ActivationFunctionType enum in TFLite schema.
    def TFL_AFEnum_None  : I32EnumAttrCase<"NONE", 0>;
    def TFL_AFEnum_Relu  : I32EnumAttrCase<"RELU", 1>;
    def TFL_AFEnum_Relu1 : I32EnumAttrCase<"RELU_N1_TO_1", 2>;
    def TFL_AFEnum_Relu6 : I32EnumAttrCase<"RELU6", 3>;
    def TFL_AFEnum_Tanh  : I32EnumAttrCase<"TANH", 4>;
    def TFL_AFEnum_Sign  : I32EnumAttrCase<"SIGN_BIT", 5>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 20 00:05:24 UTC 2022
    - 6.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    // Fusing:  %[[add1:[0-9].*]] = tfl.add %arg0, %[[add]] {fused_activation_function = "RELU"} : tensor<1xf32>
    // Fusing:  %[[relu:[0-9].*]] = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
    // Fusing:  %[[add2:[0-9].*]] = tfl.add %[[relu]], %[[add1]] {fused_activation_function = "RELU6"} : tensor<1xf32>
    // Fusing:  %[[add3:[0-9].*]] = tfl.add %[[add2]], %[[relu]] {fused_activation_function = "RELU6"} : tensor<1xf32>
    // Fusing:  return
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

         (HasRankAtMost<4> $a),
         (HasRankAtMost<4> $b)]>;
    }
    
    // We can eliminate Relu from Relu(SquaredDifference(x, y)),
    // since the result of SquaredDifference is always non-negative.
    // TFLite interpreter doesn't support Relu+int32 for now. So the test cases
    // are failing without the following pattern to optimize Relu away fixes
    // the problem.
    def OptimizeReluSquaredDifference : Pat<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
Back to top