- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 40 for numBits (0.24 sec)
-
tensorflow/c/eager/c_api.cc
if (h == nullptr) { status->status = tensorflow::errors::InvalidArgument("Invalid handle"); return -1; } int num_dims = -1; status->status = tensorflow::unwrap(h)->NumDims(&num_dims); return num_dims; } int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h, TF_Status* status) { if (h == nullptr) { status->status = tensorflow::errors::InvalidArgument("Invalid handle");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
} else { quant_type = mlir::dyn_cast<quant::UniformQuantizedType>( quant::GetUniformQuantizedTypeForWeight( attr, /*symmetric=*/true, /*num_bits=*/tensor_property.number_of_bits, /*is_signed=*/true, /*narrow_range=*/true, quant_specs_.legacy_float_scale)); } if (!quant_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
* `[min; max]` define the clamping range for the `inputs` data. * `inputs` values are quantized into the quantization range ( `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. Before quantization, `min` and `max` values are adjusted with the following
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
_QuantizationComponent.COMPONENT_ACTIVATION ].tensor_type ) # Unlike the HISTOGRAM_PERCENTILE method, the HISTOGRAM_MSE method uses # num_bits because it actually quantizes and dequantizes values. if activation_tensor_type != _TensorType.TENSORTYPE_INT_8: raise ValueError( 'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration'
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
%7 = "tf.FakeQuantWithMinMaxVarsPerChannel"(%arg1, %arg2, %arg3) {device = "", narrow_range = false, num_bits = 8 : i64} : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> "tfl.yield"(%7) : (tensor<*xf32>) -> () }) {device = "", narrow_range = false, num_bits = 8 : i64} : (tensor<3x4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<3x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
auto element_ty = input_ty.getElementType(); auto scalar_ty = tensorflow::GetTypeFromTFTensorShape({}, element_ty); auto num_bits = op.getNumBits(); auto narrow_range = op.getNarrowRange(); const double bits_min = narrow_range ? 1 : 0; const double bits_max = (1 << num_bits) - 1; auto float_min = op.getMin(); auto float_max = op.getMax();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_generated.h
} void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); } void add_num_bits(int32_t num_bits) { fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); } void add_narrow_range(bool narrow_range) { fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), 0); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 1M bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
^bb0(%arg0: tensor<? x f32>, %arg1: f32, %arg2: f32): // CHECK: "tfl.fake_quant"(%arg0) <{max = 1.400000e+00 : f32, min = 3.000000e-01 : f32, narrow_range = false, num_bits = 6 : i32}> : (tensor<?xf32>) -> tensor<?xf32> %1 = "tfl.fake_quant"(%arg0) {num_bits = 6 : i32, narrow_range = false, min = 0.3:f32, max = 1.4:f32} : (tensor<? x f32>) -> tensor<? x f32> func.return %1 : tensor<? x f32> } // CHECK-LABEL: testQuantize
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0 : tensor<?x?xf32> // CHECK: %[[QUANT:.*]] = "tf.QuantizeAndDequantizeV4"(%arg0, %arg1, %arg2) <{axis = -1 : i64, narrow_range = false, num_bits = 8 : i64, range_given = false, round_mode = "HALF_TO_EVEN", signed_input = true}> {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<?x?xf32>, tensor<f32>, tensor<f32>) -> tensor<?x?xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
F32Attr:$min, F32Attr:$max, // The bitwidth of the quantization; between 2 and 16, inclusive. ConfinedAttr<I32Attr, [IntMinValue<2>, IntMaxValue<16>]>:$num_bits, // Quantization range starts from 0 or 1; starts from 1 if true. ConfinedAttr<BoolAttr, [TFL_BoolFalse]>:$narrow_range); let results = (outs TFL_FpTensor:$output); let hasCanonicalizer = 0b1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0)