- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 306 for Quantized (0.26 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
op_set=target_opset, ) if target_opset != quant_opts_pb2.XLA: # Uniform quantized opset is not supported for weight-only with self.assertRaisesRegex( ValueError, 'TF/Uniform quantized opset does not support weight-only.' ): converted_model = quantize_model.quantize( input_saved_model_path, output_directory, quantization_options,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/testing/passes.td
Runs the post-calibration passes for post-training quantization. }]; let options = [ Option<"unpack_quantized_types_", "unpack-quantized-types", "bool", /*default=*/"true", "Unpacks ops with uniform quantized types into " "operations without uniform quantized types (mostly i8 or i32)."> ]; let dependentDialects = [ "mlir::stablehlo::StablehloDialect", "mlir::TF::TensorFlowDialect",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 28 23:21:42 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions | FileCheck %s // RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=ptq target-opset=UNIFORM_QUANTIZED' --mlir-print-ir-after-all | FileCheck --check-prefix=UQ-CHECK %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_rescale_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h
namespace mlir::quant::stablehlo { // Performs int8 weight-only quantization on dot_general ops. // // The resulting `ModuleOp` contains quantized StableHLO ops serialized in // `TF::XlaCallModuleOp`s. They are quantized using the weight constants, not // relying on calibration. class WeightOnlyPtqComponent : public Component { public: // Used for debugging purposes.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 12:18:22 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// unquantized tensors are only inserted in the unquantized model // whereas `DumpTensor` ops for the quantized tensors are only inserted // in the quantized model. Both models are required to be able to dump // both quantized and unquantized tensors and compare them offline. if (quantization_options.has_debugger_config() && quantization_options.debugger_config().debugger_type() ==
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc
if (method.ok() && method->has_static_range_ptq()) { // TODO: b/331145946 - Use `Method` accessors. const StaticRangePtq& static_range_ptq_spec = method->static_range_ptq(); // Look for quantized dimension specs for each quantized type and // populate `coeff_op_quant_dim`. for (const auto& [operand_idx, quantized_type] : static_range_ptq_spec.input_quantized_types()) { if (quantized_type.has_dimension_specs()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
std::min(quantized, static_cast<int>(std::numeric_limits<int8_t>::max())); quantized = std::max(quantized, static_cast<int>(std::numeric_limits<int8_t>::min())); return builder.getI32IntegerAttr(quantized); } // Decompose the TF ops with the registered composition library. class DecomposeTFOpsPass : public PassWrapper<DecomposeTFOpsPass, OperationPass<func::FuncOp>> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// asymmetric range. For a state tensor, assigning correct quantization // parameters is sufficient, and for constants with asymmetric range it's // not correctly quantized by legacy quantizer so call the new Quantize. return Quantize(real_value, tensor_type); } else if (width == 16) { if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { const auto quantized_values =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h
std::unique_ptr<Pass> createOptimizePass(); // Creates a pass that finds quantization patterns and compose them to uniform // quantized types. std::unique_ptr<OperationPass<ModuleOp>> CreateComposeUniformQuantizedTypePass(); // Creates a pass that finds stablehlo ops that accept or produce uniform // quantized typed tensors and converts them to equivalent ops in the TFLite // dialect. std::unique_ptr<OperationPass<func::FuncOp>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
} op.replaceAllUsesWith(q.getArg()); return success(); } return failure(); } }; // The StorageCastOp is used to cast from a quantized type to its storage type // or the opposite. If none of its input and output is quantized, the op has // no effect and should be removed. class RemoveRedundantScast : public mlir::OpRewritePattern<quantfork::StorageCastOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.6K bytes - Viewed (0)