- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 442 for quantize (0.31 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// previous quantized layer (Please note that this part is different part // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model // has a DumpTensor, and it is used to save the entire value of outputs from // both the quantized and unquantized layer. DEBUGGER_TYPE_INT_PER_LAYER = 2; // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
assert(scales_.size() == zero_points_.size()); } // Quantize an Attribute by the quantization parameters. Return nullptr if // the conversion fails or the input array isn't an ElementsAttr. ElementsAttr convert(Attribute real_value); private: // Quantize an DenseFPElementsAttr by the quantization parameters. DenseElementsAttr convert(DenseFPElementsAttr attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// The prepare-dynamic-range-quantize Pass. // namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_PREPAREDYNAMICRANGEQUANTIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" // A boolean attribute used to describe whether input activations need to be // asymmetrically quantized. constexpr char kAsymmetricQuantizeInputsAttr[] = "asymmetric_quantize_inputs";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/convert_type.h
// Returns element type from attribute Type 'type_attr'. mlir::Type GetShapeStrippedType(mlir::TypeAttr type_attr); // Returns true if 'val' is not from Quantize op or // from Quantize Op with same quant type as 'qtype_attr' bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr); } // namespace tflite
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
<< quantized_per_axis_type << ".\n"); return false; } return true; } // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
// the textual format (on the commandline for example). return "quant-propagate-quantize-type"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Propagate quantized type through allowed ops."; } void runOnOperation() override; }; // Propagate dequantize op if the next op supports the data type. // Given the below graph,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
/*value=*/filter_i8_value_attr); // Replace filter uses with uniform quantized filter. rewriter.replaceAllUsesWith(filter_op->getResult(0), quantized_filter_constant_op.getResult()); // Replace conv op with a new convolution op that has quantized output type. // Quantize -> Dequantize following r3. auto output_uniform_quantize_call_op = cast<func::CallOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op_itr = func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>(); ASSERT_THAT( uniform_quantize_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>())); // `uniform_quantize` is considered partially quantized because its output is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0)