- Sort Score
- Result 10 results
- Languages All
Results 11 - 17 of 17 for conv_2d (0.11 sec)
-
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
if (!input_type || !input_type.hasStaticShape()) { return false; } total_count += input_type.getNumElements(); } *count = total_count; return true; } // For conv2d/depthwise_conv/fully_connected ops. // This algorithm actually comes from TOCO tooling_util.cc static bool GetArithmeticCountForConvAndFullyconnectedOp(mlir::Operation* op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
def MultiplyFakeQuantValue : NativeCodeCall< "MultiplyFakeQuantValue($_builder, $_loc, $0...)">; // Convert AddV2Op following an AffineOp to BiasAddOp. // For Conv3D, even though the Conv3D op has "NDHWC" data format, the BiasAdd // will still has the data format of "NHWC". def ConvertAddToBiasAdd : Pat< (TF_AddV2Op (SupportedAffineOpMatcher $conv_out, $input, $weight),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
%dq_bias = "quantfork.dcast"(%q_bias) : (tensor<2x!quant.uniform<i32:f32, 0.044022349891595126>>) -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir
%outputs_6, %control_7 = tf_executor.island wraps "tf.Const"() {device = "", value = dense<[-1, 16384]> : tensor<2xi32>} : () -> tensor<2xi32> %outputs_8, %control_9 = tf_executor.island wraps "tf.Conv2D"(%arg0, %outputs_0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 2, 2, 1], use_cudnn_on_gpu = true} : (tensor<16x224x224x3xf32>, tensor<*xf32>) -> tensor<16x112x112x?xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// NEXT ID: 7 message UnitWiseQuantizationSpec { // Quantization unit granularity. // NEXT ID: 4 message QuantizationUnit { // Type of the op, ex: Conv2D, MatMul, Einsum... The node_name field can // be omitted if it is intended to match all nodes with this type. string op_type = 1; // Name of the node. This field accepts re2 regex format. If the node name
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
if ((quantization_method_ == tensorflow::quantization::QuantizationMethod:: METHOD_DYNAMIC_RANGE_INT8) && (function_name.contains("batch_matmul") || function_name.contains("conv3d"))) { call_op->removeAttr(kQuantTraitAttrName); } // TODO(b/270906404): Support weight-only gather for uniform quantized opset // in PTQ mode if (target_opset_ == OpSet::UNIFORM_QUANTIZED &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0)