Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 46 for depthwise_conv_2d (0.23 sec)

  1. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir

      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
      // CHECK-NEXT:    tensors: [ {
      // CHECK-NEXT:      shape: [ 1, 224, 224, 3 ],
      // CHECK-NEXT:      buffer: 1,
      // CHECK-NEXT:      name: "arg0",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir

      // CHECK-NEXT:    version: 1,
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 2,
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
      // CHECK-NEXT:    tensors: [ {
      // CHECK-NEXT:      shape: [ 1, 224, 224, 3 ],
      // CHECK-NEXT:      buffer: 1,
      // CHECK-NEXT:      name: "arg0",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    func.func @QuantizeDepthwiseConv2D(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x112x112x64xf32> {
      %w = arith.constant dense<127.0> : tensor<64x3x3x3xf32>
      %b = arith.constant dense<0.0> : tensor<64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

      %w = arith.constant dense<127.0> : tensor<64x3x3x3xf32>
      %b = arith.constant dense<0.0> : tensor<64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf-with-allowing-bf16-and-f16-type-legalization.mlir

      func.return %0 : tensor<256x30x30x12xbf16>
      // CHECK: "tfl.depthwise_conv_2d"
    }
    
    // CHECK-LABEL: conv_2d_f16
    func.func @conv_2d_f16(%arg0 : tensor<256x32x32x3xf16>, %arg1 : tensor<3x3x3x16xf16>) -> tensor<256x8x7x16xf16> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 26 23:53:32 UTC 2022
    - 2.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    // CHECK: %[[conv:.*]] = "tfl.depthwise_conv_2d"(%arg0, %[[dq]]
    
    // PerTensor: %[[cst:.*]] = arith.constant dense<1.270000e+02> : tensor<32x3x3x3xf32>
    // PerTensor: %[[q:.*]] = "tfl.quantize"(%[[cst]]) <{qtype = tensor<32x3x3x3x!quant.uniform<i8<-127:127>:f32,
    // PerTensor: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
    // PerTensor: %[[conv:.*]] = "tfl.depthwise_conv_2d"(%arg0, %[[dq]]
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]]) : (tensor<32x!quant.uniform<{{.+}})
      // CHECK-DAG: %[[VAL4:.+]] = "tfl.depthwise_conv_2d"(%arg0, %[[VAL2]], %[[VAL3]]) <{depth_multiplier = 4 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 4 : i32, stride_w = 5 : i32}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CONSTANT0]]) <{qtype = tensor<1x3x3x48x!quant.uniform<u8:f32, 1.000000e+00>>}>
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tfl.depthwise_conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithDepthwiseConv2D
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc

      TargetHardwareOpRegistration<CpuHardware, Op> Op##_CpuHardware_hardware( \
          Create);
    
    // Operation costs on CPU
    
    // Currently used for these ops:
    // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected
    class CpuConvOp : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override {
        float cost = 0.0;
        int64_t arithmetic_count;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

    // -----
    
    func.func @testDepthwiseConv(%arg0: tensor<1x112x112x32xf32>, %arg1: tensor<1x3x3x32xf32>, %arg2: tensor<32xf32>) -> tensor<1x112x112x32xf32> {
      // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
Back to top