- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for QuantizationUnit (0.28 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.cc
#include "mlir/Support/LLVM.h" // from @llvm-project namespace mlir { namespace quant { namespace { // Prefix and suffix to the QuantizationUnit string representation. constexpr std::string_view kQuantizationUnitPrefix = "QuantizationUnit("; constexpr std::string_view kQuantizationUnitSuffix = ")"; // Concatenates node name and func name with a "@" separator.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_quantization_unit_loc.cc
} else { for (Location child_loc : locations) { FindQuantizationUnitsRecursively(child_loc, units); } } } // Finds the QuantizationUnit from location. std::optional<QuantizationUnit> FindQuantizationUnit(Operation* op) { SmallVector<QuantizationUnit> quant_units; FindQuantizationUnitsRecursively(op->getLoc(), quant_units); if (quant_units.size() == 1) { return *quant_units.begin(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
} // `DumpTensor` op saves entire value of input to as a tensor proto into a // specified directory and filename. When enabled is set to false, op is // disabled and won't save any value. It also creates `QuantizationUnit` proto // with `func_name` and `node_name` to identify the op. REGISTER_OP("DumpTensor") .Input("tensor_data: T") .Attr("log_dir_path: string") .Attr("file_name: string") .Attr("T: type")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
//===----------------------------------------------------------------------===// // The prepare-quantize-drq Pass. // namespace mlir { namespace quant { namespace { using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Applies prepare quantization on the model in TF dialect for dynamic range // quantization case. class PrepareQuantizeDRQPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
%1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { namespace { using QuantizationUnit = ::tensorflow::quantization::UnitWiseQuantizationSpec::QuantizationUnit; using ::tensorflow::quantization::OpSet; using ::tensorflow::quantization::QuantizationComponentSpec; using ::tensorflow::quantization::QuantizationMethod;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
// namespace mlir { namespace quant { namespace { using QuantMethod = ::tensorflow::quantization::QuantizationMethod::PresetMethod; using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Preprocesses ops to allow multi-axis quantization, prior to quantization
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// contains the scope of the unit, and the quantization method for each unit. // NEXT ID: 7 message UnitWiseQuantizationSpec { // Quantization unit granularity. // NEXT ID: 4 message QuantizationUnit { // Type of the op, ex: Conv2D, MatMul, Einsum... The node_name field can // be omitted if it is intended to match all nodes with this type. string op_type = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// filled and node_name is always set to "_empty_node". std::pair<std::string, std::string> GetFuncNameAndNodeName( TF::PartitionedCallOp call_op, const FlatSymbolRefAttr &f_attr) { std::optional<QuantizationUnitLoc::QuantizationUnit> quant_unit = FindQuantizationUnitFromLoc(call_op->getLoc()); return std::make_pair(quant_unit->func_name(), quant_unit->node_name()); } std::pair<std::string, std::string> GetFuncNameAndNodeName(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 22.9K bytes - Viewed (0)