- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 81 for dequantize (0.4 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/operator_property.h" //===----------------------------------------------------------------------===// // The prepare-quantize Pass for LSTM. // namespace mlir { namespace TFL { constexpr double power_of_two_scale = 32768.0; // Same with the ordering of //tensorflow/compiler/mlir/lite/ir/tfl_ops.td
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
// The pass to decompose unregistered TF ops with the TFR compose function. // namespace mlir { namespace TFR { namespace { // Quantize the float value based on given scale and zero point attributes. IntegerAttr Quantize(float value, Attribute scale_attr, Attribute zp_attr, OpBuilder builder) { double scale = mlir::cast<FloatAttr>(scale_attr).getValueAsDouble();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
_QuantizationComponent.COMPONENT_ACTIVATION ].tensor_type ) # Unlike the HISTOGRAM_PERCENTILE method, the HISTOGRAM_MSE method uses # num_bits because it actually quantizes and dequantizes values. if activation_tensor_type != _TensorType.TENSORTYPE_INT_8: raise ValueError( 'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration'
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// asymmetric range. For a state tensor, assigning correct quantization // parameters is sufficient, and for constants with asymmetric range it's // not correctly quantized by legacy quantizer so call the new Quantize. return Quantize(real_value, tensor_type); } else if (width == 16) { if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { const auto quantized_values =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
class UsedBy<string op> : Constraint< CPred<"llvm::isa<mlir::TFL::" # op # "Op>(*$0.getUsers().begin())">>; // When the op is passing-through, the output types of the quantized ops need // to be updated as well. Since the quantize op manages its own type by the // "qtype" attribute, we should update the type shape in this attribute. def ReorderTransposeDequantQuant : Pat<(TF_TransposeOp:$old_value
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -quant-quantize-composite-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -symbol-dce | FileCheck %s module { // TODO(b/260020937): Support transpose_a, transpose_b for matmul. func.func @matmul(%arg0: tensor<2x12xf32>) -> (tensor<*xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
&q_builder, input_model, quantized_type, use_updated_hybrid_scheme, ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) { return absl::InvalidArgumentError( "Quantize weights transformation failed."); } const uint8_t* q_buffer = q_builder.GetBufferPointer(); *result = std::string(reinterpret_cast<const char*>(q_buffer), q_builder.GetSize());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.h
bool enable_dynamic_update_slice); std::unique_ptr<OperationPass<ModuleOp>> CreateLowerStaticTensorListPass(); // Creates an instance of the TensorFlow Lite dialect Quantize pass. // Use quant_specs.ops_blocklist and quant_specs.nodes_blocklist if possible // as they are now structure variables of QuantizationSpecs. std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 07 21:29:34 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// quantized_tensor_data.pb. // Since this process doesn't happen for per layer, we need to set file_name // as quantized_tensor_data.pb here. // TODO: b/296933893 - Refactor the debugger code when no quantize option // is added std::string file_name = debugger_type_ == DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL ? "unquantized_tensor_data.pb" : "quantized_tensor_data.pb";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td
let regions = (region SizedRegion<1>:$body); let hasVerifier = 1; } def Quantization_ReturnOp : Quantization_Op<"return", [Terminator]> { let summary = [{ The `return` operation terminates a quantize region and returns values. }]; let arguments = (ins Variadic<AnyTensor>:$results); } //===----------------------------------------------------------------------===//
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 03:10:59 UTC 2024 - 10.2K bytes - Viewed (0)