- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 69 for Quantile (0.37 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax()); } auto quantize = builder.create<quantfork::QuantizeCastOp>( q_op.getLoc(), new_value_type.clone(new_qtype), new_value); auto dequantize = builder.create<quantfork::DequantizeCastOp>( dq_op.getLoc(), new_value_type, quantize.getResult()); return dequantize.getResult(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
src/runtime/debug/garbage.go
// stats.Pause slice will be reused if large enough, reallocated otherwise. // ReadGCStats may use the full capacity of the stats.Pause slice. // If stats.PauseQuantiles is non-empty, ReadGCStats fills it with quantiles // summarizing the distribution of pause time. For example, if // len(stats.PauseQuantiles) is 5, it will be filled with the minimum, // 25%, 50%, 75%, and maximum pause times. func ReadGCStats(stats *GCStats) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/operator_property.h" //===----------------------------------------------------------------------===// // The prepare-quantize Pass for LSTM. // namespace mlir { namespace TFL { constexpr double power_of_two_scale = 32768.0; // Same with the ordering of //tensorflow/compiler/mlir/lite/ir/tfl_ops.td
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
// The pass to decompose unregistered TF ops with the TFR compose function. // namespace mlir { namespace TFR { namespace { // Quantize the float value based on given scale and zero point attributes. IntegerAttr Quantize(float value, Attribute scale_attr, Attribute zp_attr, OpBuilder builder) { double scale = mlir::cast<FloatAttr>(scale_attr).getValueAsDouble();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// asymmetric range. For a state tensor, assigning correct quantization // parameters is sufficient, and for constants with asymmetric range it's // not correctly quantized by legacy quantizer so call the new Quantize. return Quantize(real_value, tensor_type); } else if (width == 16) { if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { const auto quantized_values =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
class UsedBy<string op> : Constraint< CPred<"llvm::isa<mlir::TFL::" # op # "Op>(*$0.getUsers().begin())">>; // When the op is passing-through, the output types of the quantized ops need // to be updated as well. Since the quantize op manages its own type by the // "qtype" attribute, we should update the type shape in this attribute. def ReorderTransposeDequantQuant : Pat<(TF_TransposeOp:$old_value
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// `stablehlo.convolution` assumes the following format: // [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f] // `stablehlo.dot_general` can take various formats. We only per-channel // quantize non-batch ops. // `stablehlo.dot_general` legalizable to `tfl.fully_connected` has a // filter rank of 2 with the last dimension as the channel dimension. const int64_t quantization_dimension =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
&q_builder, input_model, quantized_type, use_updated_hybrid_scheme, ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) { return absl::InvalidArgumentError( "Quantize weights transformation failed."); } const uint8_t* q_buffer = q_builder.GetBufferPointer(); *result = std::string(reinterpret_cast<const char*>(q_buffer), q_builder.GetSize());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 20.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.h
bool enable_dynamic_update_slice); std::unique_ptr<OperationPass<ModuleOp>> CreateLowerStaticTensorListPass(); // Creates an instance of the TensorFlow Lite dialect Quantize pass. // Use quant_specs.ops_blocklist and quant_specs.nodes_blocklist if possible // as they are now structure variables of QuantizationSpecs. std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 07 21:29:34 UTC 2024 - 10.9K bytes - Viewed (0)