- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for depthwise_conv_2d (0.47 sec)
-
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir
// CHECK-NEXT: version: 1 // CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 4, // CHECK-NEXT: version: 1 // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { // CHECK-NEXT: shape: [ 1, 224, 224, 3 ], // CHECK-NEXT: buffer: 1, // CHECK-NEXT: name: "arg0",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir
// CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 4, // CHECK-NEXT: version: 2, // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { // CHECK-NEXT: shape: [ 1, 224, 224, 3 ], // CHECK-NEXT: buffer: 1, // CHECK-NEXT: name: "arg0",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc
TargetHardwareOpRegistration<CpuHardware, Op> Op##_CpuHardware_hardware( \ Create); // Operation costs on CPU // Currently used for these ops: // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected class CpuConvOp : public TargetHardwareOperation { double GetOpCost(mlir::Operation* op) const override { float cost = 0.0; int64_t arithmetic_count;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
// ----- func.func @testDepthwiseConv(%arg0: tensor<1x112x112x32xf32>, %arg1: tensor<1x3x3x32xf32>, %arg2: tensor<32xf32>) -> tensor<1x112x112x32xf32> { // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc
} return true; } }; std::unique_ptr<TargetHardwareOperation> CreateConcatOp() { return std::make_unique<GpuConcatOp>(); } // Currently used for these ops: // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected class GpuConvOp : public TargetHardwareOperation { double GetOpCost(mlir::Operation* op) const override { int64_t arithmetic_count;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir
^bb0(%arg0: tensor<1x112x112x3xf32>, %arg1: tensor<1x3x3x32xf32>, %arg2: tensor<32xf32>): // CHECK: _arithmetic_count = 7626752 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 14 04:58:17 UTC 2022 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_drq.mlir
// CHECK: func private @quantized_conv2d_fn // CHECK-SAME: tf_quant.quantized_ops = ["Conv2D"] // CHECK: func private @quantized_depthwise_conv2d_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D"] // UQ-CHECK: func private @quantized_conv2d_fn // UQ-CHECK: func private @quantized_depthwise_conv2d_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
%3 = "tf.BatchMatMulV2"(%input, %2) { attr_map = "adj_x:0,adj_y:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } // DepthwiseConv2D with float computation func.func private @internal_depthwise_conv2d_fn( %input : tensor<*xf32>, %filter : tensor<*xi8>) -> tensor<*xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir
%input : tensor<*xf32>, %weight : tensor<*x!tf_type.qint8>, %weight_scale : tensor<*xf32>, %weight_zp : tensor<*xi32>) -> tensor<*xf32> attributes {tf_quant.quantized_ops = ["DepthwiseConv2D"]} { %out = "tf.UniformQuantizedConvolutionHybrid"(%input, %weight, %weight_scale, %weight_zp) { Tlhs = "tfdtype$DT_FLOAT", Trhs = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 3.9K bytes - Viewed (0)