- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 43 for dequantize (0.35 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
foreach BinaryOp = [TFL_DivOp, TFL_MulOp]<Op> in defm : FuseMulOrDivWithConv2dOrDepthwiseConv2d<BinaryOp>; // This pattern applies when the same quantize/dequantize have been used twice // with the same scale. We want to remove the redundancy. // TODO(fengliuai): move this to the sanity check of pre-quantize pass. def eliminate_dq_q_pairs : Pat< (TFL_QuantizeOp (TFL_DequantizeOp $in), $qt), (replaceWithValue $in),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
// Quantization ops. //===----------------------------------------------------------------------===// def TFL_DequantizeOp: TFL_Op<"dequantize", [NoMemoryEffect]> { let summary = "Dequantize operator"; let description = [{ Converts quantized array of integers to floating-points according to the quantization parameters. }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
func.func @testDequantize(tensor<? x !quant.uniform<i8:f32, 0.1>>) -> tensor<? x f32> { ^bb0(%arg0: tensor<? x !quant.uniform<i8:f32, 0.1>>): // CHECK: "tfl.dequantize"(%arg0) : (tensor<?x!quant.uniform<i8:f32, 1.000000e-01>>) -> tensor<?xf32> %0 = "tfl.dequantize"(%arg0): (tensor<? x !quant.uniform<i8:f32, 0.1>>) -> tensor<? x f32> func.return %0 : tensor<? x f32> } // CHECK-LABEL: testLogicalNot
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
// Stores information about how to quantize a user-specified custom operation. // CustomOpInfo contains info of its corresponding CustomOp registered in the // CustomOpMap. 'quantizable_input_indices' is used to determine which indices // of the CustomOp are quantizable. 'is_weight_only' is used specify whether the // custom op is quantized only for storage and dequantized at runtime.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = "Quantizes then dequantizes a tensor."; let description = [{ This is almost identical to QuantizeAndDequantizeV2, except that it returns a gradient of 1 for inputs that are within the quantization range, or 0 otherwise. }]; let arguments = (ins Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
): quantize_model.quantize( self._input_saved_model_path, self._output_saved_model_path, quantization_options=quantization_options, representative_dataset=representative_dataset, ) converted_model = quantize_model.quantize( self._input_saved_model_path, self._output_saved_model_path,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.h
const tensorflow::quantization::PyFunctionLibrary* quantization_py_function_library = nullptr); // Quantize the model with calibration data. Throw errors if `fully_quantize` // is specified by the calibration data are not sufficient to quantize the // model. PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel, bool fully_quantize, int inference_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/wrap_converter.py
enable_whole_model_verify, denylisted_ops, denylisted_nodes, enable_variable_quantization, disable_per_channel_for_dense_layers, debug_options_str, ): """Wraps experimental mlir quantize model.""" return _pywrap_converter_api.ExperimentalMlirQuantizeModel( input_data_str, disable_per_channel, fully_quantize, inference_type, input_data_type, output_data_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
if (IsCallToQuantizableLiftedFunction(op)) { std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(op); if (!composite_function_name.has_value()) return failure(); // Quantize inputs of quantizable composite functions. for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize | FileCheck %s // Test that hybrid quantized dot_general is produced when q/dq pair only exists // for weight. module attributes {tf_saved_model.semantics} { func.func private @quantize_dot_general_fn(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} { %cst = stablehlo.constant dense<3.000000e-01> : tensor<2x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 4.8K bytes - Viewed (0)