- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for getValueAsDouble (0.3 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
min_value.push_back(FloatAttr::getValueAsDouble(*it)); } for (auto it = maxs.begin(); it != maxs.end(); ++it) { max_value.push_back(FloatAttr::getValueAsDouble(*it)); } } else { const auto fmin = dyn_cast<FloatAttr>(min); const auto fmax = dyn_cast<FloatAttr>(max); if (fmin && fmax) { min_value.push_back(fmin.getValueAsDouble());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
stats_op.emitError("Stats should have 2 values."); return failure(); } quant::QuantizedType quant_type; double min = FloatAttr::getValueAsDouble(stats.getValues<APFloat>()[0]); double max = FloatAttr::getValueAsDouble(stats.getValues<APFloat>()[1]); // Make sure the range includes zero. min = std::min(min, 0.0); max = std::max(max, 0.0);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
if (!stats) return failure(); for (auto it = stats.begin(), e = stats.end(); it != e; ++it) { double rmin = FloatAttr::getValueAsDouble(*it++); double rmax = FloatAttr::getValueAsDouble(*it); // The default nudging implementation of mlir quant library might cause // clamping during inference if the calibration range isn't wide enough.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc
value->set_i(attr.getInt()); return absl::OkStatus(); } Status ConvertAttribute(const mlir::FloatAttr& attr, AttrValue* value) { value->set_f(attr.getValueAsDouble()); return absl::OkStatus(); } Status ConvertAttribute(const mlir::ElementsAttr& attr, AttrValue* value) { return ConvertToTensorProto(attr, value->mutable_tensor()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 19.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
return {}; } std::vector<double> multiplier_values; absl::c_transform(multiplier_attr, std::back_inserter(multiplier_values), [](auto v) { return FloatAttr::getValueAsDouble(v); }); ArrayRef<double> multiplier_array(multiplier_values.data(), multiplier_values.size()); // Multiply the quantization parameters by the multiplier.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
IntegerAttr Quantize(float value, Attribute scale_attr, Attribute zp_attr, OpBuilder builder) { double scale = mlir::cast<FloatAttr>(scale_attr).getValueAsDouble(); int64_t zp = mlir::cast<IntegerAttr>(zp_attr).getInt(); int quantized = static_cast<int>(std::round(value / scale) + zp); quantized =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc
scale_factor_op.getOutScale().getDefiningOp<arith::ConstantOp>(); if (!out_scale_op) { return failure(); } const double out_scale = out_scale_op.getValue().cast<FloatAttr>().getValueAsDouble(); auto in_scales_op = scale_factor_op.getInScales().getDefiningOp<BuildListOp>(); if (!in_scales_op || in_scales_op.getNumOperands() != 2) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 21 16:55:41 UTC 2023 - 38.2K bytes - Viewed (0)