Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 80 for Convolution (0.16 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h

    // Processes tensors with NCHW format (== (batch, channel, height, weight)) by
    // converting them to NHWC formats along with extra optimizations such as
    // constant folding the transpose->convolution pattern. This is useful when
    // downstream pipeline (e.g. XLA) is more optimized when accepting NHWC formats.
    void AddProcessNchwTensorPasses(OpPassManager& pm);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

    // usually has the following pattern. In the example below,
    // the input operand would be stablehlo.convolution op, and return value would
    // be stablehlo.add op.
    //
    // ```
    // %0 = stablehlo.constant dense<3>
    // %1 = stablehlo.constant dense<4>
    // %2 = stablehlo.constant dense<2>
    // %3 = stablehlo.convolution(%%arg0, %%arg1) :
    //          (tensor<?x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<?x3x4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

        }
    
        // If all quantized or floating point then types are consistent.
        // Int is valid in combination with both quantized and floating point.
        // This occurs when doing qi16 convolution, as bias is passed as a
        // non-quantized int64
        if (allTypesFp || allTypesQuantizedOrInt) return failure();
    
        Location loc = op->getLoc();
        SmallVector<Value> newOperands;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/nchw_convolution_to_nhwc.cc

    class NchwConvolutionToNhwcPass
        : public impl::NchwConvolutionToNhwcPassBase<NchwConvolutionToNhwcPass> {
     private:
      void runOnOperation() override;
    };
    
    // Rewrites NCHW convolution to NHWC.
    // * Src dimension numbers: [b, f, 0, 1]x[o, i, 0, 1]->[b, f, 0, 1]
    // * Dst dimension numbers: [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f]
    class RewriteNchwConvolutionToNhwc
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           return %[[VAL_14]] : tensor<16x32x256xbf16>
    // CHECK:         }
    func.func @convert_conv1d(%arg0: tensor<16x32x256xbf16>, %arg1: tensor<1x256x256xbf16>) -> tensor<16x32x256xbf16> {
    	%0 = "mhlo.convolution"(%arg0, %arg1) {
        batch_group_count = 1 : i64,
        dimension_numbers = #mhlo.conv<[b, 0, f]x[0, i, o]->[b, 0, f]>,
        feature_group_count = 1 : i64,
        lhs_dilation = dense<1> : tensor<1xi64>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py

            'b4': tf.Variable(tf.zeros([num_classes])),
        }
    
      @tf.function
      def __call__(self, data):
        """The Model definition."""
        x = tf.reshape(data, [-1, 28, 28, 1])
    
        # 2D convolution, with 'SAME' padding (i.e. the output feature map has
        # the same size as the input).
    
        # NOTE: The data/x/input is always specified in floating point precision.
        # output shape: [-1, 28, 28, 32]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 20 03:05:18 UTC 2021
    - 6.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_70.mlir

    }
    
    // CHECK-LABEL: func @transposeConv2D_1x1_f32
    func.func @transposeConv2D_1x1_f32(%input: tensor<1x64x28x28xf32>, %filter: tensor<1x1x64x64xf32>) -> tensor<1x64x28x28xf32> {
      // 1x1 convolution can be computed as a GEMM in NHWC data format.
      // CHECK: "tf.Conv2D"(%[[INPUT_TRANSPOSE:[0-9]*]], %arg1)
      // CHECK-SAME: data_format = "NHWC"
      %0 = "tf.Conv2D"(%input, %filter)
           {
             data_format = "NCHW",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 21 08:41:18 UTC 2022
    - 8.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      // CHECK: %[[CONST:.*]] = mhlo.constant()
      // CHECK-SAME{LITERAL} value = dense<127> : tensor<2x3x3x2xi8>
      // CHECK-SAME: tensor<2x3x3x2x!quant.uniform<i8:f32, 1.000000e+00:3>>
      // CHECK: mhlo.convolution(%arg0, %[[CONST]])
      // CHECK-SAME{LITERAL}: dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f]
      // CHECK-SAME{LITERAL}: window = {stride = [1, 2], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [2, 2]}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

          return failure();
        }
        // Check if the given op is based on grouped convolution.
        // Dim size zero will be verified by the tf.Conv2D operator verification.
        if (input_type.getDimSize(3) % filter_type.getDimSize(2) != 0) {
          return failure();
        }
    
        // TensorFlow convolution op only has two inputs, while the TFLite one has
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/defer_activation_transpose.cc

    class DeferActivationTransposeForAddOp : public OpRewritePattern<AddOp> {
     public:
      using OpRewritePattern<AddOp>::OpRewritePattern;
    
      LogicalResult match(AddOp op) const override {
        // Only supports the case for 2D convolution.
        const Value lhs = op.getOperand(0);
        if (!HasRankOf(lhs, /*rank=*/4)) return failure();
    
        const Value rhs = op.getOperand(1);
        Operation* rhs_op = rhs.getDefiningOp();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
Back to top