- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 21 for Quantized (0.23 sec)
-
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
union QuantizationDetails { CustomQuantization, } // Parameters for converting a quantized tensor back to float. table QuantizationParameters { // These four parameters are the asymmetric linear quantization parameters. // Given a quantized value q, the corresponding float value f should be: // f = scale * (q - zero_point)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
// TensorFlow. std::unique_ptr<OperationPass<ModuleOp>> CreatePrepareTpuComputationForTfExportPass(); // Rewrites ops that require quantized inputs or outputs to ops that allow // non-quantized inputs and outputs. std::unique_ptr<OperationPass<func::FuncOp>> CreateLowerQuantizedPass(); // Reorders ops so ops of the same dialect are next to each other.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
//===----------------------------------------------------------------------===// def DequantizeHalfRange : NativeCodeCall< "DequantizeHalfRange(&$_builder, $0)">; // TODO(b/188530181): Generalize to more quantized input types, // allow num_slices > 1, and allow non default arguments for $mode, // $narrow_range, and $axis. def LowerDequantizeOp : Pat< (TF_DequantizeOp:$result $input, $min_range, $max_range,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/quantize.mlir
// RUN: tf-opt %s -tfl-prepare-quantize -tfl-quantize | FileCheck %s // RUN: tf-opt %s -tfl-quantize="legacy-quantize=true" | FileCheck --check-prefix=LEGACY %s // RUN: tf-opt %s -tfl-prepare-quantize -tfl-quantize="ops-blocklist=tfl.fully_connected,tfl.softmax locs-blocklist=Block,NullBlock" | FileCheck --check-prefix=BLOCK %s // CHECK-LABEL: QuantizeFloatConst func.func @QuantizeFloatConst() -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 39.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
// attribute which maps an attribute identifier to its attribute name. The // identifier is the order of that attribute in `attributes`. This map // is then used to set attributes in the quantized functions in the // QuantizeCompositeFunctionsPass. // For example, for tf.MatMul with `attributes` = {{"transpose_a", false}, // {"transpose_b", false}}, the generated attr_map is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir
// RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true" | FileCheck %s // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true enable-weight-only-quantization=true" | FileCheck --check-prefix=PerChannelWeightOnly %s
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 23 21:09:00 UTC 2024 - 23.2K bytes - Viewed (0) -
src/image/gif/writer.go
type Options struct { // NumColors is the maximum number of colors used in the image. // It ranges from 1 to 256. NumColors int // Quantizer is used to produce a palette with size NumColors. // palette.Plan9 is used in place of a nil Quantizer. Quantizer draw.Quantizer // Drawer is used to convert the source image to the desired palette. // draw.FloydSteinberg is used in place of a nil Drawer. Drawer draw.Drawer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 21:38:09 UTC 2024 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
if (IsCallToQuantizableLiftedFunction(op)) { std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(op); if (!composite_function_name.has_value()) return failure(); // Quantize inputs of quantizable composite functions. for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/BUILD
"passes/prepare_quantize.inc", "passes/prepare_quantize_drq.cc", "passes/preprocess_op.cc", "passes/preprocess_op.inc", "passes/propagate_quantize_type.cc", "passes/quantize.cc", "passes/quantize_composite_functions.cc", "passes/quantize_composite_functions.inc", "passes/quantize_weights.cc", "passes/quantized_function_library.h",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 22:58:42 UTC 2024 - 21.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s module attributes {tf_saved_model.semantics} { // CHECK-LABEL: same_scale_after_composite // CHECK-SAME: %[[ARG0:.*]]: tensor<1x2xf32> // CHECK-SAME: %[[ARG1:.*]]: tensor<2x3xf32> func.func private @same_scale_after_composite(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<3x1xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 35.4K bytes - Viewed (0)