Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 29 for Convolution (0.22 sec)

  1. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

    speedup and reduce memory usage in the first convolution.
    
    The first convolution in many image models, including ResNet or ResNet-like, is
    a (kernel=7, stride=2) 2D convolution. The input of the convolution is images,
    which usually has RGB channels. The input of this first convolution is of shape
    [batch\_size, height, width, 3] and the kernel size is [kernel\_size,
    kernel\_size, 3, out\_channel]. Space to depth is to transform this first
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/nchw_convolution_to_nhwc.mlir

      %2 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[o, i, 0, 1]->[b, f, 0, 1], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x4x4x8xf32>, tensor<8x8x3x3xf32>) -> tensor<1x8x4x4xf32>
      return %2 : tensor<1x8x4x4xf32>
    }
    
    // CHECK-NOT: stablehlo.transpose
    // CHECK: %[[CONV:.+]] = stablehlo.convolution
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 23:00:47 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/tests/fuse_mhlo_convolution.mlir

    // RUN: odml-to-stablehlo-opt %s -fuse-mhlo-convolution-pass -cse | FileCheck %s
    
    // CHECK-LABEL: @fuseMulAndConv2D
    // CHECK-SAME: %[[INPUT:[^:[:space:]]+]]
    func.func @fuseMulAndConv2D(%input: tensor<1x256x256x3xf32>) -> (tensor<1x256x256x2xf32>) {
      // CHECK-DAG: %[[FILTER:.+]] = mhlo.constant dense<{{\[\[\[\[}}1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00], [5.000000e+00, 6.000000e+00]]]]> : tensor<1x1x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-tfl-stablehlo-conv.mlir

    module {
      func.func @main(%arg0: tensor<8x8x1x207xf32>, %arg1: tensor<3x3x16x207xf32>) -> tensor<16x8x8x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 24 06:08:43 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir

    // CHECK-SAME: (tensor<1x2xf32>, tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03>>) -> tensor<1x3xf32>
    // CHECK: return %[[DOT]]
    
    // -----
    
    // Test that hybrid quantized convolution is produced when q/dq pair only exists
    // for weight.
    
    module attributes {tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc

      // For models with NCHW convolution format. This pass is required because
      // downstream pipeline handles NHWC convolution better for most cases.
      pm.addNestedPass<func::FuncOp>(createNchwConvolutionToNhwcPass());
    
      // Folds `stablehlo.constant`->`stablehlo.transpose` patterns, which is often
      // generated as by-products after optimizing dimension numbers (e.g.
      // NCHW->NHWC convolution conversion).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/optimize_graph.mlir

      // CHECK: %[[QUANT_CST:.*]] = stablehlo.uniform_quantize %[[CST]]
      // CHECK: %[[QUANT_ARG_0:.*]] = stablehlo.uniform_quantize %[[ARG_0]]
      // CHECK: %[[CONV:.*]] = stablehlo.convolution(%[[QUANT_ARG_0]], %[[QUANT_CST]])
      // CHECK-NOT: stablehlo.uniform_quantize
      // CHECK: %[[DEQUANT:.*]] = stablehlo.uniform_dequantize %[[CONV]]
      // CHECK: return %[[DEQUANT]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

    // CHECK-SAME: (tensor<1x2xf32>, tensor<2x3x!quant.uniform<i8<-127:127>:f32, 0.0023622048182750312>>) -> tensor<1x3xf32>
    // CHECK: return %[[DOT]]
    
    // -----
    
    // Test that per-tensor weight-only quantized convolution op is produced when
    // empty `weight_only_ptq` is provided.
    
    module attributes {tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc

      QuantizationSpec spec{};
      if (method_case != Method::kStaticRangePtq) {
        return spec;
      }
    
      // Matches all convolution quantizable unit family.
      spec.mutable_matcher()->mutable_function_name()->set_regex(
          "composite_conv.*");
    
      // Enable per-channel quantization for convolution weights.
      QuantizedType conv_weight_quantized_type{};
    
      // Assumes NHWC format, specifying the channel dimension (3) as the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // NOFOLD: %[[PAD:[0-9]*]] = "tf.Pad"(%[[TRANSPOSE]], %[[PADDING]])
    
      // ------------------------------------------------------------------------ //
      // Convolution layer #0.
      // ------------------------------------------------------------------------ //
      %5 = "tf.Conv2D"(%4, %arg3)
            {
              data_format = "NCHW",
              dilations = [1, 1, 1, 1],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
Back to top