- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for scale_fn (0.25 sec)
-
tensorflow/compiler/mlir/lite/quantization/device_target.h
struct KernelSpec { // Scale constraint ScaleConstraintType type; // Custom function to derive the scales. Only available when the scale // constraint is `CustomScale`. ScaleFn scale_fn; }; class KernelSpecs { public: using Signature = llvm::SmallVector<quant::AnyQuantizedType, 4>; // Returns the kernel specification for the kernel signature.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
switch (spec->type) { case ScaleConstraintType::OutputInputFreeScale: { // no propagation. *changed |= false; break; } case ScaleConstraintType::CustomScale: { if (failed(spec->scale_fn(this, op, new_items, changed))) { return failure(); } break; } case ScaleConstraintType::OutputInputSameScale: { auto params = GetQuantParamsForSameScaleConstraint(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/api/resource/scale_int.go
Dr. Stefan Schimanski <******@****.***> 1485349987 +0100
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sun Jan 29 20:41:44 UTC 2017 - 2.5K bytes - Viewed (0) -
pkg/controller/podautoscaler/monitor/metrics.go
&metrics.CounterOpts{ Subsystem: hpaControllerSubsystem, Name: "reconciliations_total", Help: "Number of reconciliations of HPA controller. The label 'action' should be either 'scale_down', 'scale_up', or 'none'. Also, the label 'error' should be either 'spec', 'internal', or 'none'. Note that if both spec and internal errors happen during a reconciliation, the first one to occur is reported in `error` label.",
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Mar 14 22:47:24 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
Value scale_op = rewriter.create<TF::ConstOp>( loc, scale_type, DenseFPElementsAttr::get(scale_type, {static_cast<float>(qtype.getScale())})); if (original_input_tensor_type.getElementType().isBF16()) { // Add bf16 cast op after scale to match with the next op's data // type. scale_op = rewriter.create<TF::CastOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
return quantizeF32ToInt8(expressed_value); } bool lossy; expressed_value.convert(scale_.getSemantics(), round_mode_, &lossy); // fixed_point = clamp(clamp_min, clamp_max, ( // roundHalfToEven(expressed / scale) + zero_point)) APFloat scaled = (expressed_value / scale_); scaled.roundToIntegral(round_mode_); scaled.add(zero_point_, round_mode_);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
%scale_f64 = "tf.Div"(%range_nume, %range_deno) : (tensor<1xf64>, tensor<f64>) -> tensor<1xf64> %scale = "tf.Cast"(%scale_f64) : (tensor<1xf64>) -> tensor<1xf32> // Add comparison with minimum if needed %intermediate_val = "tf.Div"(%r_max_f64, %scale_f64) : (tensor<1xf64>, tensor<1xf64>) -> tensor<1xf64>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
pkg/controller/podautoscaler/monitor/monitor.go
*/ package monitor import ( "time" v2 "k8s.io/api/autoscaling/v2" ) type ActionLabel string type ErrorLabel string const ( ActionLabelScaleUp ActionLabel = "scale_up" ActionLabelScaleDown ActionLabel = "scale_down" ActionLabelNone ActionLabel = "none" // ErrorLabelSpec represents an error due to an invalid spec of HPA object. ErrorLabelSpec ErrorLabel = "spec"
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Mar 14 22:47:24 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc
return failure(); } TF::ConstOp scale_op; TF::ConstOp zp_op; // Reads quantization parameters from the quantized type, and converts // them to constants. rewriter.setInsertionPoint(qparams_op); Location loc = qparams_op->getLoc(); if (auto qtype = cast_qtype.dyn_cast<quant::UniformQuantizedType>()) { scale_op = rewriter.create<TF::ConstOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 21 16:55:41 UTC 2023 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.cc
} else { // float signature->push_back(AnyQuantizedType()); } } LogicalResult DeviceTarget::RegisterKernel( llvm::StringRef kernel, const KernelSpecs::Signature& signature, const ScaleFn& fn, const ScaleDecomposeFn& dfn) { return specs_[kernel].Add(signature, {ScaleConstraintType::CustomScale, fn}); } namespace ph = std::placeholders; LogicalResult DeviceTarget::RegisterKernel(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.3K bytes - Viewed (0)