- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 44 for depthwise_conv_2d (0.96 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
Type result_type, Value input, Value filter, Value bias) const { // Compared to tfl.conv_2d, tfl.depthwise_conv_2d has an additional // 'depth_multiplier' attribute. However, tf.DepthwiseConv2dNative does not // have a corresponding 'depth_multiplier' attribute; the multiplier is the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
for (int k = 0; k < filter_shape[2]; ++k) { for (int l = 0; l < filter_shape[3]; ++l) { // [o, 0, 1, i] for `tfl.conv_2d` case`, // [i, 0, 1, o] for `tfl.depthwise_conv_2d` case. int old_idx = get_array_idx(filter_shape, i, j, k, l); int new_idx = is_depthwise ? get_array_idx(new_filter_shape, k, i, j, l)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns)))); auto depthwise_conv_op = FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func); EXPECT_THAT(depthwise_conv_op, NotNull()); // The filter of the DepthwiseConv2dNativeOp is expected to be a constant. EXPECT_TRUE(isa<TF::ConstOp>(depthwise_conv_op.getFilter().getDefiningOp())); } TEST_F(ConstantFoldingTest, DepthwiseConvWeightNotFoldable) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 10.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir
// CHECK-NEXT: %[[DEPTHWISE_CONV2D:.*]] = "tf.DepthwiseConv2dNative"(%arg0, %[[CONST]]) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]}> {device = ""} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_drq.mlir
// CHECK: func private @quantized_conv2d_fn // CHECK-SAME: tf_quant.quantized_ops = ["Conv2D"] // CHECK: func private @quantized_depthwise_conv2d_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D"] // UQ-CHECK: func private @quantized_conv2d_fn // UQ-CHECK: func private @quantized_depthwise_conv2d_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
// Disable quantization for the DepthwiseConv since it has no benefits in // the XLA opset. if (function_name.contains("depthwise_conv2d")) { return absl::InternalError( "DepthwiseConv2D doesn't get any benefit of quantization in XLA."); } else if (function_name.contains("conv2d")) { // For Conv2D, the channel dimension must be static to calculate the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
%3 = "tf.BatchMatMulV2"(%input, %2) { attr_map = "adj_x:0,adj_y:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } // DepthwiseConv2D with float computation func.func private @internal_depthwise_conv2d_fn( %input : tensor<*xf32>, %filter : tensor<*xi8>) -> tensor<*xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(TFL_DequantizeOp (TFL_QuantizeOp (TF_ReshapeOp $input, $shape), (UpdateShapeWithAxis<3> $qtype, $old_value))), [(UsedBy<"DepthwiseConv2D"> $old_value), (CanUpdateShapeWithAxis<3> $qtype, $old_value)], [], (addBenefit 10)>; // The axis is set to 3, because this transpose is from the legalization of
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0)