Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 50 for dilation_h (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

                                  llvm::StringMap<Attribute>& identifier_to_attr) {
      ArrayAttr dilations =
          mlir::dyn_cast<ArrayAttr>(identifier_to_attr["dilations"]);
      const int dilation_h = mlir::cast<IntegerAttr>(dilations[1]).getInt();
      const int dilation_w = mlir::cast<IntegerAttr>(dilations[2]).getInt();
      return rewriter.getI64ArrayAttr({dilation_h, dilation_w});
    }
    
    Attribute GetPaddingValue(PatternRewriter& rewriter,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

        inferredReturnTypes.assign({result_type});
        return success();
      }
    
      auto stride_h = op.getStrideHAttr().getInt();
      auto stride_w = op.getStrideWAttr().getInt();
      auto dilation_h = op.getDilationHFactorAttr().getInt();
      auto dilation_w = op.getDilationWFactorAttr().getInt();
    
      // We don't have EXPLICIT PADDING in TfLite.
      auto paddings = op.getPadding();
      tensorflow::Padding padding;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc

          int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
          int out_size = tflite::ComputeOutSize(kTfLitePaddingSame, input_size,
                                                filter_size, stride_i, dilation_i);
    
          int offset = 0;
          int padding_before = tflite::ComputePaddingWithOffset(
              stride_i, dilation_i, input_size, filter_size, out_size, &offset);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

                                           "Conv2D op doesn't have valid padding");
      }
      // Make sure dilations are all ones if set.
      const ArrayAttr& dilations =
          op->template getAttrOfType<ArrayAttr>("dilations");
      if (dilations && !TFIntListIsAllOnes(dilations)) {
        return rewriter.notifyMatchFailure(op, "dilations should be all 1");
      }
    
      if (!TFL::TFTypeIsFloat32Tensor(op.getInput()) &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

        // Constructs dilation array.
        SmallVector<int64_t, 4> dilation;
        if (auto rhs_dilation = conv_op.getRhsDilation()) {
          // For example, [2, 3] -> [1, 2, 3, 1].
          dilation.emplace_back(1);
          dilation.append(rhs_dilation.value().getValues<int64_t>().begin(),
                          rhs_dilation.value().getValues<int64_t>().end());
          dilation.emplace_back(1);
        } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

    // CHECK-NEXT: %[[CONV2D_0:.*]] = "tf.Conv2D"(%arg0, %arg1)
    // CHECK-SAME: data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true
    // CHECK-SAME: attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations"
    // CHECK-NEXT: %[[BIASADD_0:.*]] = "tf.BiasAdd"(%[[CONV2D_0]], %arg2)
    // CHECK-NEXT: %[[RELU6_0:.*]] = "tf.Relu6"(%[[BIASADD_0]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

      %3 = "tf.AddV2"(%2, %1): (tensor<15x28x28x1xf32>, tensor<1xf32>) -> tensor<15x28x28x1xf32>
      func.return %2 : tensor<15x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.500000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
      %0 = "tf.Conv2D"(%arg0, %cst) {data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

      %3 = "tf.AddV2"(%2, %1): (tensor<15x28x28x1xf32>, tensor<1xf32>) -> tensor<15x28x28x1xf32>
      func.return %2 : tensor<15x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

        %0 = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
Back to top