- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 95 for quant_type (0.29 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
tensorflow::quantization::QuantizationComponentSpec& weight_spec) { QuantizedType quant_type = CalculateUniformQuantParams(rewriter, op, weight_spec); if (!quant_type) return nullptr; std::optional<Value> quantized_val = AddUniformQuantizeOps(rewriter, op, quant_type); if (!quantized_val.has_value()) return std::nullopt; std::optional<TF::PartitionedCallOp> dequantized_val =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/variables_utils.cc
} // Check quantized types. if (auto quant_type = element_type.dyn_cast<mlir::quant::QuantizedType>()) { // TFLite supports QI16, QI32, QI8, and QUI8 if ((quant_type.getStorageTypeIntegralWidth() == 16 && quant_type.isSigned()) || quant_type.getStorageTypeIntegralWidth() == 8 || (quant_type.getStorageTypeIntegralWidth() == 32 && quant_type.isSigned())) return true; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 21 19:32:03 UTC 2021 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
/*isSigned=*/true); } if (quant_specs_.legacy_float_scale) { quant_type = quant::DownCastScale(quant_type, min, max, op.getLoc()); } } rewriter.setInsertionPointAfter(stats_op); Type result_type = quant_type.castFromExpressedType(stats_op.getType()); auto q = rewriter.create<Q>(stats_op.getLoc(), result_type, stats_op.getArg());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
Block& block = func.front(); Operation* terminator = block.getTerminator(); // Replace input_arg(tensor<quant_type>) -> tfr.cast // with input_arg(tensor<storage_type>) -> tfr.cast for (BlockArgument arg : block.getArguments()) { Type arg_type = arg.getType(); if (auto quant_type = arg_type.cast<TensorType>() .getElementType()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
is_narrow_range, is_legacy_float)); } return insertQDQ(rewriter, op, quant_type, quant_op); } // Insert Quantize and Dequantize ops. bool insertQDQ(PatternRewriter& rewriter, arith::ConstantOp op, QuantizedType quant_type, QuantizationUnit quant_op) const { if (!quant_type) return false; Operation* quantize_op = quant_op.first;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
is_narrow_range, is_legacy_float)); } return insertQDQ(rewriter, op, quant_type, quant_op); } // Insert Quantize and Dequantize ops. bool insertQDQ(PatternRewriter& rewriter, arith::ConstantOp op, QuantizedType quant_type, std::pair<Operation*, int> quant_op) const { if (!quant_type) return false; Operation* quantize_op = quant_op.first;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
} auto quant_type = dyn_cast<quant::QuantizedType>(weight_type); if (!quant_type) { op->emitError( "Failed to get weight quantization parameters for weight-only " "quantization."); return; } const Type expressed_type = op->getResult(0).getType(); const Type quantized_type = quant_type.castFromExpressedType(expressed_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc
TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model, BufferType quant_type, bool use_updated_hybrid_scheme) { tflite::TensorType inference_type; switch (quant_type) { case BufferType::QUANTIZED_FLOAT16: inference_type = tflite::TensorType_FLOAT16; break; default:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model, BufferType quant_type = BufferType::QUANTIZED_INT8, bool use_updated_hybrid_scheme = true); TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0)