Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 18 for RELU (0.61 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      } : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
      %4 = "tf.BiasAdd"(%3, %cst) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
      %5 = "tf.Relu"(%4) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
    
      %6 = "tf.Conv2D"(%arg0, %arg1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        // Currently, GPU only supports Conv2D+BiasAdd+Relu fusion.
        if (IsGpuDevice(conv)) {
          auto activation = GetActivation(bias_add);
          if (!activation || activation->getName().stripDialect() != "Relu" ||
              !bias_add.getOutput().hasOneUse()) {
            (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) {
              diag << "GPU only supports Conv2D+BiasAdd+Relu fusion";
            });
            return false;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

      %broad2_s = "quantfork.stats"(%broad2) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
      %add = "tfl.add"(%broad1_s, %broad2_s) {fused_activation_function = "RELU"} : (tensor<?x26x26x26x16xf32>, tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/legalize_patterns.td

                            (TFL_RangeOp $start, $limit, $delta)>;
    def LegalizeRelu6 : Pat<(TF_Relu6Op $arg), (TFL_Relu6Op $arg)>;
    def LegalizeRelu : Pat<(TF_ReluOp $arg), (TFL_ReluOp $arg)>;
    // TFL Relu doesn't support I32/I64 type, so legalizes TF Relu to TFL Maximum.
    def LegalizeReluI32 :
      Pat<(TF_ReluOp TensorOf<[I32]>:$arg),
          (TFL_MaximumOp $arg,
            (Arith_ConstantOp ConstantAttr<RankedI32ElementsAttr<[]>,"0">))>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 28.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

      %0 = stablehlo.constant dense<2.000000e+00> : tensor<4x2x3x3xf32>  // weight
      %1 = stablehlo.constant dense<3.000000e+00> : tensor<4xf32>  // bias
      %2 = stablehlo.constant dense<0.000000e+00> : tensor<1x4x5x5xf32>  // relu
      %3 = stablehlo.broadcast_in_dim %1, dims = [1] : (tensor<4xf32>) -> tensor<1x4x5x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
      RELU_N1_TO_1 = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
      %3 = "tfl.conv_2d"(%0, %1, %2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
        fused_activation_function = "RELU", padding = "VALID",
        stride_h = 1 : i32, stride_w = 1 : i32} : (
          tensor<?x5x5x2xf32>, tensor<3x5x5x2xf32>, tensor<3xf32>) -> tensor<?x1x1x3xf32>
      %4 = "quantfork.stats"(%3) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
Back to top