- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for FakeQuant (0.3 sec)
-
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
// tfl.dequantize // | // // // Warns if the (most likely unwanted, currently not quite correctly handled) // case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class InsertTFLQuantOpsAfterTFFakeQuantOp { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// | | // // Warns if the (most likely unwanted, currently not quite correctly handled) // case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class ConvertFakeQuantOpToQuantOps { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
diag << "Skipping reordering between FakeQuant and " << (*target_ops.begin())->getName() << ", since there are other ops using the FakeQuant result."; }); } } } return ::mlir::success(); } // Reorder the FakeQuant operation for specific ops (ReorderOp). // The transformation pattern looks like below: //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path) for meta_graph in saved_model_proto.meta_graphs: if any( node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node ): return True for function in meta_graph.graph_def.library.function: if any(node.op.startswith('FakeQuant') for node in function.node_def):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
SmallVector<T> result(max_size); for (size_t i : llvm::seq<size_t>(0, max_size)) { result[i] = get_value_at(a, i) * get_value_at(b, i); } return result; } // Multiplies the value followed by a FakeQuant op and adjusts the quantization // params. This function only supports symmetrically quantized values. Value MultiplyFakeQuantValue(OpBuilder& builder, Location loc, Value value, Value multiplier) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
// can parsed by the pass option. constexpr char kDefaultMode[] = "DEFAULT"; constexpr char kLegacyIntegerMode[] = "LEGACY_INTEGER"; // Checks if the operation is TF FakeQuant ops. bool IsTfFakeQuantOp(Operation *op) { return llvm::isa< // clang-format off TF::FakeQuantWithMinMaxArgsOp, TF::FakeQuantWithMinMaxVarsOp, TF::FakeQuantWithMinMaxVarsPerChannelOp
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// TODO(b/204265523): Removes this pass after the exporting MLIR to SavedModel // path is available. std::unique_ptr<OperationPass<ModuleOp>> CreateInsertMainFunctionPass(); // Converts FakeQuant ops to quant.qcast and quant.dcast (QDQ) pairs. std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertFakeQuantToQdqPass(); // Lifts the quantizable spots as composite functions. std::unique_ptr<OperationPass<ModuleOp>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// it. void AddPreVariableFreezingTFToTFLConversionPasses( const mlir::TFL::PassConfig& pass_config, mlir::OpPassManager* pass_manager) { // This pass wraps all the tf.FakeQuant ops in a custom op so they are not // folded before being converted to tfl.quantize and tfl.dequantize ops. auto wrapped_ops = mlir::TFL::AllTfFakeQuantOps(); pass_manager->addNestedPass<mlir::func::FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
(Arith_ConstantOp ConstantAttr<RankedF32ElementsAttr<[]>, "0.166666666f">), TFL_AF_None), $x, TFL_AF_None), (TFL_HardSwishOp $x)>; // Matching HardSwish with extra FakeQuant. These FakeQuant ops were due to // incorrect placement in the quantization aware training. def MatchHardSwishQuantized : Pat< (TFL_MulOp (TFL_DequantizeOp (TFL_QuantizeOp (TFL_MulOp
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
!dyn_cast<quantfork::QuantizeCastOp>(user)) { // Needs to scan the content of weights to get the quantization // parameters if there are no quantization parameters (FakeQuant ops). // For this case, the weight will not be duplicated. weights_.insert(cst); if (spec->coeff_op_quant_dim.find(operand_num) != spec->coeff_op_quant_dim.end()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0)