- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for dequantize_i8 (0.45 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) { // Uniform quantized opset is not supported for weight-only as inputs for // weight quantization are floats. And only dequantize_i8 is used from the // quantized function library. function_library_map = { {OpSet::TF, kQuantizedFunctionLibraryInMLIR}, {OpSet::XLA, kQuantizedFunctionLibraryInMLIR_XLA_WEIGHT_ONLY}}; } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
using ::tensorflow::quantization::OpSet; constexpr absl::string_view kQuantizeCompositeFunctionsStepName = "_quantize_composite_functions"; constexpr StringRef kQuantizeFuncName = "quantize_i8"; constexpr StringRef kDequantizeFuncName = "dequantize_i8"; constexpr StringRef kAttrMapAttribute = "attr_map"; constexpr StringRef kQuantizedOpsAttribute = "tf_quant.quantized_ops"; constexpr StringRef kCompositeFuncPrefix = "composite_";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
: OpRewritePattern<DequantizeOp>(context) {} LogicalResult matchAndRewrite(DequantizeOp dequantize_op, PatternRewriter& rewriter) const override { if (!dequantize_op->hasOneUse()) return failure(); auto use = dequantize_op->use_begin(); Operation* passthrough_op = use->getOwner(); unsigned operand_index = use->getOperandNumber();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
output_type.print(llvm::errs() << "Requested output type "); dequantize_op.emitError(" Couldn't be modified to the requested type."); return failure(); } new_output_types[i] = returned_type; terminator->setOperand(i, returned_value); if (dequantize_op.use_empty()) { dequantize_op.erase(); } } } return success(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
: OpRewritePattern<RootOpT>(context, /*benefit=*/300) {} private: // Collects all candidate ops for quantization, which are the // `dequantize_op`'s users. FailureOr<SmallVector<Operation*>> CollectCandidateOps( DequantizeOpT dequantize_op) const { auto users = dequantize_op->getResult(0).getUsers(); return SmallVector<Operation*>(users.begin(), users.end()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
mlir::quant::QuantizedType::castToExpressedType(input_type); builder->setInsertionPoint(op); auto dequantize_op = builder->create<TFL::DequantizeOp>( op->getLoc(), dequantized_input_type, input.get()); dequantized_inputs.push_back(dequantize_op); } else { dequantized_inputs.push_back(input.get()); } } // Result types.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc
auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func); EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op)); auto dequantize_op = FindOperationOfType<quantfork::DequantizeCastOp>(test_func); EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op)); } TEST_F(IsOpQuantizableStableHloTest, XlaCallModuleOpQuantizableWhenNotDenylisted) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
Operation* returned_op = returned_value.getDefiningOp(); if (returned_op && returned_op->hasOneUse() && llvm::isa<DequantizeOp>(returned_op)) { auto dequantize_op = llvm::cast<DequantizeOp>(returned_op); Value dequantized_result = dequantize_op.getInput(); output_types.push_back(dequantized_result.getType()); terminator->setOperand(i, dequantized_result); returned_op->erase(); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
} void runOnOperation() override; }; // Propagate dequantize op if the next op supports the data type. // Given the below graph, // op_before_dequantize -> dequantize_op -> user_op -> rest_op // the transformation is applied to result the following graph: // op_before_dequantize -> user_op -> new_dequantize_op -> rest_op class PropagateDequantizeOpIfAllowed
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0)