- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 200 for requantize (0.14 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
call_op, result_types, args, FlatSymbolRefAttr::get(new_quant_func_name)); return success(); } // For composite functions followed by Dequantize ops, merges the Dequantize // op into the functions by creating quantized functions with float output. LogicalResult mergeDequantizeOpFollowingQuantizedFunction( TF::PartitionedCallOp call_op, const SmallVector<Value, 4>& args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
// execution to be safe. Although normally they should support float // execution. Not Quantized ops. if (!int8_type_observed && !uint8_type_observed) return; // Insert dequantize ops for every quantized input. SmallVector<Value, 4> dequantized_inputs; for (auto& input : op->getOpOperands()) { auto input_type = input.get().getType();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize_op_order.mlir
%0 = "tfl.dequantize"(%arg0) : (tensor<1000x1000x!quant.uniform<i8:f32, 7.812500e-03>>) -> tensor<1000x1000xf32> %1 = "tfl.gather"(%0, %arg1) {axis = 0 : i32, batch_dims = 0 : i32}: (tensor<1000x1000xf32>, tensor<1x1xi32>) -> tensor<1x1x1000xf32> func.return %1 : tensor<1x1x1000xf32> // CHECK-NEXT: tfl.gather // CHECK-NEXT: tfl.dequantize } // CHECK-LABEL: dequantize_pushdown_gather_with_reduction
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 01 02:06:15 UTC 2022 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
// Stores information about how to quantize a user-specified custom operation. // CustomOpInfo contains info of its corresponding CustomOp registered in the // CustomOpMap. 'quantizable_input_indices' is used to determine which indices // of the CustomOp are quantizable. 'is_weight_only' is used specify whether the // custom op is quantized only for storage and dequantized at runtime.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
%4 = "tfl.reshape"(%3, %1) : (tensor<1x4x384x32x!quant.uniform<i8:f32, 0.19:1>>, tensor<3xi32>) -> tensor<4x384x32x!quant.uniform<i8:f32, 0.19:1>> // CHECK-NOT: tac.device tac.inference_type %5 = "tfl.dequantize"(%4) : (tensor<4x384x32x!quant.uniform<i8:f32, 0.19:1>>) -> tensor<4x384x32xf32> func.return %5 : tensor<4x384x32xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
// have side effects e.g. reduced flatbuffer size. Only certain type // conversions are supported. bool reduce_type_precision = false; // Whether to consider this model a quantized model with quantize/dequantize // ops and to convert kernels to quantized kernels wherever appropriate. quant::QDQConversionMode qdq_conversion_mode = quant::QDQConversionMode::kQDQNone;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// whose storage type is 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedPerAxisType(Type type); // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type); // Returns true if a type is quantized tensor type. bool IsQuantizedTensorType(Type type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
func_name, rewriter, quant_type, val_to_dequantize, result_type, LogicsForUniformDequanization); return dequant_op; } } // namespace // Generate quantize and dequantize functions with uniform quantization. std::optional<TF::PartitionedCallOp> ApplyUniformQuantization( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/lower_quantized.mlir
// RUN: tf-opt %s -test-tf-lower-tf | FILECHECK_OPTS="" FileCheck %s // CHECK-LABEL: dequantize func.func @dequantize(%arg0: tensor<2x3x!tf_type.qint8>, %min_range: tensor<f32>, %max_range: tensor<f32>) -> tensor<2x3xf32> { // CHECK-DAG: %[[HALF_RANGE:.*]] = "tf.Const"() <{value = dense<1.280000e+02> : tensor<f32>}> // CHECK-DAG: %[[C255:.*]] = "tf.Const"() <{value = dense<2.550000e+02> : tensor<f32>}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.8K bytes - Viewed (0) -
RELEASE.md
supported. The following ops are not currently implemented: Dequantize, QuantizeAndDequantize, QuantizedAvgPool, QuantizedBatchNomWithGlobalNormalization, QuantizedBiasAdd, QuantizedConcat, QuantizedConv2D, QuantizedMatmul, QuantizedMaxPool, QuantizeDownAndShrinkRange, QuantizedRelu, QuantizedRelu6, QuantizedReshape, QuantizeV2, RequantizationRange, and Requantize. * Go: Experimental API in Go to create and execute graphs
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 730.3K bytes - Viewed (0)