Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 30 for Convolution (0.17 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // CHECK: return %[[TRANSPOSE_1]]
    
    // -----
    
    // Tests that a `add(convolution(%activation, %weight), %bias)` pattern with the
    // activation tensor of NCHW format and non-constant bias is converted to NHWC
    // convolution, but without the deferred transpose for `stablehlo.add`.
    // Transpose ops are inserted to the activation and output of
    // `stablehlo.convolution`. The weight constants is transposed.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

      ) -> tensor<?x2x2x1xi32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
      // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[zp_offset:.*]], %[[bias:.*]]
      // CHECK-DAG: %[[result:.*]] = chlo.broadcast_add %[[conv]], %[[combined]]
      // CHECK: return %[[result]]
      %0 = mhlo.convolution(%lhs, %rhs)
          dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

      // Iterate through block argument and its convolution users. Space to depth
      // transform will be applied only if all the below conditions are satisfied:
      //  1. All the users of the block argument will lead to convolutions;
      //  2. block_size of for the space to depth transform for these convolutions
      //     are the same;
      //  3. block_size of for the space to depth transform for these convolutions
      //     are larger than 1.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc

    using ::stablehlo::quantization::QuantizedType;
    using ::stablehlo::quantization::WeightOnlyPtq;
    
    // Inserts quantization parameters of weights for weight-only quantization and
    // dynamic range quantization of `stablehlo.convolution` and
    // `stablehlo.dot_general`.
    class InsertWeightParamPass
        : public impl::InsertWeightParamPassBase<InsertWeightParamPass> {
     public:
      MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(InsertWeightParamPass)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

      let dependentDialects = ["mlir::stablehlo::StablehloDialect",];
    }
    
    def NchwConvolutionToNhwcPass : Pass<"stablehlo-nchw-convolution-to-nhwc", "mlir::func::FuncOp"> {
      let summary = "Converts stablehlo.convolution op of NCHW format to -> NHWC.";
      let description = [{
        Matches `ConvolutionOp`s with NCHW format and converts it to NHWC
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

    namespace mlir {
    namespace TFL {
    
    // A dilated convolution can be emulated with a regular convolution by chaining
    // SpaceToBatch and BatchToSpace ops before and after it:
    //
    //     SpaceToBatchND -> Conv2D -> BatchToSpaceND
    //
    // This method was common before Conv2D fully supported dilated convolution in
    // TensorFlow. This transformation detects this "emulation", and replaces it
    // with a true dilated convolution, eliminating the SpaceToBatch and
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir

    // CHECK-DAG: %[[CONST_0:.*]] = stablehlo.constant dense<[{{.*}}]> : tensor<2xf32>
    // CHECK-DAG: %[[CONST_1:.*]] = stablehlo.constant dense<[{{.*}}]> : tensor<2x3x3x2xf32>
    // CHECK-DAG: %[[CONV:.*]] = stablehlo.convolution(%[[ARG]], %[[CONST_1]]) {{.*}} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    // CHECK-DAG: %[[BROADCAST:.*]] = stablehlo.broadcast_in_dim %[[CONST_0]], dims = [3] : (tensor<2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 20:05:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

      // once available.
      //
      // If set to true, enable channel-wise quantization for:
      //   * Convolution ops: When the attached `Method` also specifies per-channel
      //                      quantization.
      //   * Non-convolution ops: All
      //
      // Default value: true
      bool enable_per_channel_quantized_weight = 2 [deprecated = true];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

    // CHECK: @uniform_dequantize_0
    }
    
    // -----
    
    // Tests a variant where there is no stablehlo.convert op in between the
    // filter constant and the convolution op.
    //
    // `filter (f32) -> convolution`
    //
    // instead of:
    //
    // `filter (i8) -> convert (i8 -> f32) -> convolution`
    
    module {
    // CHECK-LABEL: quantized_conv_op_with_no_filter_convert
    // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

            %0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
            %1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
Back to top