- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 21 for FakeQuant (0.17 sec)
-
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h
limitations under the License. ==============================================================================*/ // // This file defines support utilities for interoperating with FakeQuant* based // QAT (Quantized Aware Training) computations, as implemented by TFLite. Note // that FakeQuant* operators mix multiple concerns specific to how TFLite // originally implemented quantization. As such, utilities here enforce
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
// tfl.dequantize // | // // // Warns if the (most likely unwanted, currently not quite correctly handled) // case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class InsertTFLQuantOpsAfterTFFakeQuantOp { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// | | // // Warns if the (most likely unwanted, currently not quite correctly handled) // case of back-to-back tf.FakeQuant occurs // // tf.FakeQuant* // | // tf.FakeQuant* // template <typename TFFakeQuantOp, bool PerAxis, class FetchMinMax> class ConvertFakeQuantOpToQuantOps { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
diag << "Skipping reordering between FakeQuant and " << (*target_ops.begin())->getName() << ", since there are other ops using the FakeQuant result."; }); } } } return ::mlir::success(); } // Reorder the FakeQuant operation for specific ops (ReorderOp). // The transformation pattern looks like below: //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc
Type storageType; int64_t qmin; int64_t qmax; if (getDefaultStorageParams(numBits, narrowRange, isSigned, ctx, storageType, qmin, qmax)) { return (emitError(loc, "unsupported FakeQuant number of bits: ") << numBits, nullptr); } // Special case where min/max is close enough. The tensor contents are all // 0.0s, so the scale is set to 1.0 and the tensor can be quantized to zero
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
// Checks if a value can be symetrically quantized. def CanBeSymmetricallyQuantized : Constraint<CPred<"CanBeSymmetricallyQuantized($0)">>; // Multiplies the value followed by a FakeQuant op and adjusts its params. def MultiplyFakeQuantValue : NativeCodeCall< "MultiplyFakeQuantValue($_builder, $_loc, $0...)">; // Convert AddV2Op following an AffineOp to BiasAddOp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path) for meta_graph in saved_model_proto.meta_graphs: if any( node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node ): return True for function in meta_graph.graph_def.library.function: if any(node.op.startswith('FakeQuant') for node in function.node_def):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
// valid when the quantization parameters need to be created by scanning the // constant content (post-training quantization or QAT without weight // FakeQuant). bool disable_per_channel = false; // Whether to disable per-channel weight quantization and enable legacy per // tensor quantization. The legacy quantization for Dense layers is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
SmallVector<T> result(max_size); for (size_t i : llvm::seq<size_t>(0, max_size)) { result[i] = get_value_at(a, i) * get_value_at(b, i); } return result; } // Multiplies the value followed by a FakeQuant op and adjusts the quantization // params. This function only supports symmetrically quantized values. Value MultiplyFakeQuantValue(OpBuilder& builder, Location loc, Value value, Value multiplier) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
// can parsed by the pass option. constexpr char kDefaultMode[] = "DEFAULT"; constexpr char kLegacyIntegerMode[] = "LEGACY_INTEGER"; // Checks if the operation is TF FakeQuant ops. bool IsTfFakeQuantOp(Operation *op) { return llvm::isa< // clang-format off TF::FakeQuantWithMinMaxArgsOp, TF::FakeQuantWithMinMaxVarsOp, TF::FakeQuantWithMinMaxVarsPerChannelOp
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.2K bytes - Viewed (0)