- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 77 for numBits (0.15 sec)
-
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
quant_dim = mlir::cast<ShapedType>(res.getType()).getRank() - 1; } // Use the min/max from the operands and the num_bits and narrow_range // attribute to create the quantization parameter for the new quantize op. rewriter.setInsertionPointAfter(tf_op.getOperation()); IntegerAttr num_bits = rewriter.getI64IntegerAttr(tf_op.getNumBits()); BoolAttr narrow_range = rewriter.getBoolAttr(tf_op.getNarrowRange());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-skip-quantization-ops.mlir
func.func @fake_quant_with_min_max_vars(%arg0: tensor<1x1x28x48xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x28x48xf32> { %0 = "tf.FakeQuantWithMinMaxVars"(%arg0, %arg1, %arg2) {device = "", narrow_range = true, num_bits = 8 : i64} : (tensor<1x1x28x48xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x28x48xf32> func.return %0 : tensor<1x1x28x48xf32> // CHECK-SKIP: tf.FakeQuantWithMinMaxVars // CHECK-NOSKIP-NOT: tf.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 14 07:38:29 UTC 2022 - 676 bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
quant_dim = mlir::cast<ShapedType>(res.getType()).getRank() - 1; } // Use the min/max from the operands and the num_bits and narrow_range // attribute to create the quantization parameter for the new quantize op. rewriter.setInsertionPointAfter(tf_op.getOperation()); IntegerAttr num_bits = rewriter.getI64IntegerAttr(tf_op.getNumBits()); BoolAttr narrow_range = rewriter.getBoolAttr(tf_op.getNarrowRange());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
quant_dim = input_type.getRank() - 1; } // Use the min/max from the operands and the num_bits and narrow_range // attribute to create the quantization parameter for the new quantize op. rewriter.setInsertionPointAfter(tf_op.getOperation()); IntegerAttr num_bits = rewriter.getI64IntegerAttr(tf_op.getNumBits()); BoolAttr narrow_range = rewriter.getBoolAttr(tf_op.getNarrowRange());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td
let summary = [{ Simulates the effect of uniform quantization with const range. }]; let description = [{ Given a const min, max, num_bits and narrow_range attribute, applies the same uniform quantization simulation as is done by the TensorFlow fake_quant_with_min_max_args op. See the fakeQuantAttrsToType() utility
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 03:10:59 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.td
let summary = [{ Simulates the effect of uniform quantization with const range. }]; let description = [{ Given a const min, max, num_bits and narrow_range attribute, applies the same uniform quantization simulation as is done by the TensorFlow fake_quant_with_min_max_args op. See the fakeQuantAttrsToType() utility
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 13 12:46:08 UTC 2022 - 10.2K bytes - Viewed (0) -
src/cmd/vendor/github.com/google/pprof/profile/encode.go
s.Label = labels } if len(numLabels) > 0 { s.NumLabel = numLabels for key, units := range numUnits { if len(units) > 0 { numUnits[key] = padStringArray(units, len(numLabels[key])) } } s.NumUnit = numUnits } } s.Location = locBuffer[:len(s.locationIDX)] locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 16 15:19:53 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
"""Quantizes and dequantizes hist_mids using quant_min and quant_max. Quantization converts the range of numbers from [quant_min, quant_max] to [0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and values greater than quant_max are converted to 2^num_bits - 1. The histogram represents the distribution of the data, and our goal is to find the quant_min and quant_max that best describe this distribution. To do
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
min = fakequant_op.getODSOperands(1); max = fakequant_op.getODSOperands(2); { auto target_attr = op->getAttrOfType<IntegerAttr>("num_bits"); if (!target_attr) target_attr = rewriter.getIntegerAttr(rewriter.getIntegerType(64), 8); num_bits = target_attr; } { auto target_attr = op->getAttrOfType<BoolAttr>("narrow_range");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/c/tensor_interface.h
// Release any underlying resources, including the interface object. virtual void Release() = 0; // Returns tensor dtype. virtual DataType Type() const = 0; // Returns number of dimensions. virtual int NumDims() const = 0; // Returns size of specified dimension virtual int64_t Dim(int dim_index) const = 0; // Returns number of elements across all dimensions. virtual int64_t NumElements() const = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 09 23:17:07 UTC 2021 - 2.4K bytes - Viewed (0)