- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for quantize_i8 (0.14 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
using ::tensorflow::quantization::OpSet; constexpr absl::string_view kQuantizeCompositeFunctionsStepName = "_quantize_composite_functions"; constexpr StringRef kQuantizeFuncName = "quantize_i8"; constexpr StringRef kDequantizeFuncName = "dequantize_i8"; constexpr StringRef kAttrMapAttribute = "attr_map"; constexpr StringRef kQuantizedOpsAttribute = "tf_quant.quantized_ops"; constexpr StringRef kCompositeFuncPrefix = "composite_";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir
// CHECK-NO-UNPACK: %[[QUANTIZE_0:.+]] = stablehlo.uniform_quantize %[[ARG_0]] : (tensor<1x1024xf32>) -> tensor<1x1024x!quant.uniform<i8:f32, {{.*}}>> // CHECK-NO-UNPACK: %[[DOT:.+]] = stablehlo.dot_general %[[QUANTIZE_0]], %[[CONST]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
quantize_op.setOperand(new_arg); } else { input_type.print(llvm::errs() << "Requested input type "); quantize_op.emitError(" Couldn't be modified to the requested type."); return failure(); } new_input_types[i] = arg_type; arg.dropAllUses(); if (quantize_op.use_empty()) { quantize_op.erase(); } } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
int bit_width = quant_specs_.GetQuantizationTypeWidth(); Operation* quantize_op = quant_op.first; int quantize_operand_num = quant_op.second; auto affine_user = dyn_cast<AffineQuantizedOpInterface>(quantize_op); bool op_with_per_axis_support = false; if (!llvm::dyn_cast_or_null<CustomOp>(quantize_op)) { bool op_with_narrow_range = affine_user &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
auto arg = bb.getArgument(0); auto remove_quantize_op = [&](QuantizeOp quantize_op) { auto quantize_output = quantize_op.getOutput(); auto quantize_type = quantize_output.getType(); input_types.push_back(quantize_type); auto new_arg = bb.addArgument(quantize_type, loc); quantize_output.replaceAllUsesWith(new_arg); quantize_op.erase(); arg.dropAllUses(); bb.eraseArgument(0); };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
} // Collects all candidate ops for quantization, which is the operand of // `quantize_op`. If successful, this always returns one element which is the // operand of `quantize_op`. FailureOr<SmallVector<Operation*>> CollectCandidateOps( QuantizeOpT quantize_op) const { Value operand = quantize_op->getOperand(0); if (QuantizedType::getQuantizedElementType(operand.getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
bool insertQDQ(PatternRewriter& rewriter, arith::ConstantOp op, QuantizedType quant_type, QuantizationUnit quant_op) const { if (!quant_type) return false; Operation* quantize_op = quant_op.first; int quantize_operand_num = quant_op.second; Type expressed_type = op.getResult().getType(); Type cast_type = quant_type.castFromExpressedType(expressed_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc
auto test_func = module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite"); ASSERT_THAT(test_func, NotNull()); auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func); EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op)); auto dequantize_op = FindOperationOfType<quantfork::DequantizeCastOp>(test_func); EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) { // Uniform quantized opset is not supported for weight-only as inputs for // weight quantization are floats. And only dequantize_i8 is used from the // quantized function library. function_library_map = { {OpSet::TF, kQuantizedFunctionLibraryInMLIR}, {OpSet::XLA, kQuantizedFunctionLibraryInMLIR_XLA_WEIGHT_ONLY}}; } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// For not quantizable ops, search for dequantize attached to the // quantized op of the output. if (Operation* quantize_op = dyn_cast_or_null<QuantizeOpT>( *quantized_op->getResult(i).getUsers().begin())) { result = quantize_op->getResult(0); } else { quantized_op->emitError() << "Output[" << i
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0)