- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 279 for quantized (0.24 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
Eq(TensorType_INT8)); // Verify FC bias should be int32 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(), Eq(TensorType_FLOAT32)); EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type, Eq(TensorType_INT32)); // The output tensor of FC should be int8 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc
} } elem_type = mlir::TF::VariantType::get(tensor_types, builder.getContext()); } if (IsQuantized(tensor) && !get_storage) { TF_ASSIGN_OR_RETURN(elem_type, GetQuantizedType(tensor, builder, is_constant)); } else if (IsQuantized(tensor) && get_storage) { // If the type is quantized we strip the signedness from the storage type. elem_type = mlir::IntegerType::get(elem_type.getContext(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.td
include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.td" // Re-orders the Identity op following a quantized composite function. This // allows the QuantizeCompositeFunctionsPass to merge the DequantizeCast with // the quantized composite function to optimize the requantization part. def ReorderIdentityFollowingQuantizedFunction : Pat< (quantfork_DequantizeCastOp:$output
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/post_calibration.h
namespace mlir::quant::stablehlo { // Performs post-calibration graph transformation as part of post-training // static-range quantization. // // The resulting `ModuleOp` contains quantized StableHLO ops serialized in // `TF::XlaCallModuleOp`s. They are quantized using the statistics collected // after the calibration step, corresponding to each `TF::CustomAggregatorOp`s // in the input module op. class PostCalibrationComponent : public Component {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.cc
} return nullptr; } /// Converts a real expressed DenseFPElementsAttr to a corresponding /// DenseElementsAttr (typically DenseIntElementsAttr) containing quantized /// storage values assuming the given quantizedElementType and converter. static DenseElementsAttr convertDenseFPElementsAttr( DenseFPElementsAttr realFPElementsAttr, quant::QuantizedType quantizedElementType,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/testing/passes.td
Runs the post-calibration passes for post-training quantization. }]; let options = [ Option<"unpack_quantized_types_", "unpack-quantized-types", "bool", /*default=*/"true", "Unpacks ops with uniform quantized types into " "operations without uniform quantized types (mostly i8 or i32)."> ]; let dependentDialects = [ "mlir::stablehlo::StablehloDialect", "mlir::TF::TensorFlowDialect",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 28 23:21:42 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions | FileCheck %s // RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=ptq target-opset=UNIFORM_QUANTIZED' --mlir-print-ir-after-all | FileCheck --check-prefix=UQ-CHECK %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_rescale_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
std::min(quantized, static_cast<int>(std::numeric_limits<int8_t>::max())); quantized = std::max(quantized, static_cast<int>(std::numeric_limits<int8_t>::min())); return builder.getI32IntegerAttr(quantized); } // Decompose the TF ops with the registered composition library. class DecomposeTFOpsPass : public PassWrapper<DecomposeTFOpsPass, OperationPass<func::FuncOp>> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h
namespace mlir::quant::stablehlo { // Performs int8 weight-only quantization on dot_general ops. // // The resulting `ModuleOp` contains quantized StableHLO ops serialized in // `TF::XlaCallModuleOp`s. They are quantized using the weight constants, not // relying on calibration. class WeightOnlyPtqComponent : public Component { public: // Used for debugging purposes.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 12:18:22 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc
if (method.ok() && method->has_static_range_ptq()) { // TODO: b/331145946 - Use `Method` accessors. const StaticRangePtq& static_range_ptq_spec = method->static_range_ptq(); // Look for quantized dimension specs for each quantized type and // populate `coeff_op_quant_dim`. for (const auto& [operand_idx, quantized_type] : static_range_ptq_spec.input_quantized_types()) { if (quantized_type.has_dimension_specs()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 7.8K bytes - Viewed (0)