- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 42 for depthwise_conv_2d (0.4 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
StringRef function_name = mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue(); if (!function_name.starts_with("composite_")) { return spec; } if (function_name.contains("depthwise_conv2d")) { spec->coeff_op_quant_dim[1] = 3; if (function_name.contains("with_bias")) { spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
attr_map = "strides:0,use_cudnn_on_gpu:1,padding:2,explicit_paddings:3,dilations:4" } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32> func.return %5 : tensor<*xi32> } // DepthwiseConv2D with float computation func.func private @internal_depthwise_conv2d_fn( %input : tensor<*xi8>, %filter : tensor<*xi8>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td
(IsConstTensor $filter), (IsInt32ElementType $conv), (HasStaticShapeConstraint $filter), (HasStaticShapeAtDimsConstraint<"3"> $input)], [], (addBenefit 10)>; // Converts inlined DepthwiseConv2D pattern to TF XlaConvV2 op. This pattern // doesn't support non-constant weights. def ConvertTFDepthwiseConv2DToXLAConvOp : Pat< (TF_CastOp:$conv (TF_DepthwiseConv2dNativeOp (TF_CastOp:$cast_input
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 21.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
// than function name. if (!function_name.starts_with("composite_")) { return failure(); } if (function_name.contains("depthwise_conv2d")) { // Uniform Quantized op requires weights of tf.DepthwiseConv2dNative to // be transformed from [H,W,C,M] to [H,W,1,CxM] where // H=height,W=width,C=channel,M=multiplier. Therefore, a reshape op is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
attr_map = "strides:0,use_cudnn_on_gpu:1,padding:2,explicit_paddings:3,dilations:4" } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32> func.return %5 : tensor<*xi32> } // DepthwiseConv2D with (simulated) int32 accumulation. func.func private @internal_depthwise_conv2d_fn( %input : tensor<*xi8>, %filter : tensor<*xi8>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir
// CHECK: %[[DEPTHWISE_CONV2D:.*]] = "tf.DepthwiseConv2dNative"(%arg0, %[[DEQUANTIZED]]) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1]}> {attr_map = "0:strides,1:padding,2:explicit_paddings,3:dilations", device = ""} : (tensor<1x3x4x512xf32>, tensor<2x3x3x512xf32>) -> tensor<*xf32> // CHECK: return %[[DEPTHWISE_CONV2D]] : tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 42K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
// ...${key2}... // } // ``` // The above template with generate two functions by substituting `key1` and // `key2` with given values. module { for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul"] { parameters[ {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
if (!function_name.starts_with("quantized_") && !function_name.starts_with("composite_")) { return failure(); } if (function_name.contains("depthwise_conv2d")) { return addReshapeOpToDepthwiseWeight(call_op, rewriter); } return failure(); } }; // Prints a summary about the quantization results. class QuantizationSummary {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_generated.h
} inline const char * const *EnumNamesBuiltinOperator() { static const char * const names[208] = { "ADD", "AVERAGE_POOL_2D", "CONCATENATION", "CONV_2D", "DEPTHWISE_CONV_2D", "DEPTH_TO_SPACE", "DEQUANTIZE", "EMBEDDING_LOOKUP", "FLOOR", "FULLY_CONNECTED", "HASHTABLE_LOOKUP", "L2_NORMALIZATION", "L2_POOL_2D",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 1M bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// Returns the dimension length of the channel dimension and also the slide // size by each position in the channel dimension accordingly. tfl.conv2d and // tfl.fully_connected has heading channel dimension, but tfl.depthwise_conv2d // has tailing channel dimension. This function is to provide a utility to // create the above information from the op property. static std::pair<int64_t, int64_t> GetBiasDimAndSliceSize(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0)