- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for QuantizationUnit (0.2 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.h
class QuantizationUnitLoc : public CallSiteLoc { public: using QuantizationUnit = tensorflow::quantization::UnitWiseQuantizationSpec::QuantizationUnit; MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizationUnitLoc) QuantizationUnitLoc(MLIRContext* context, const QuantizationUnit& unit); // Checks if the given location is QuantizationUnitLoc. Users could call
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 03 02:39:10 UTC 2023 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.cc
#include "mlir/Support/LLVM.h" // from @llvm-project namespace mlir { namespace quant { namespace { // Prefix and suffix to the QuantizationUnit string representation. constexpr std::string_view kQuantizationUnitPrefix = "QuantizationUnit("; constexpr std::string_view kQuantizationUnitSuffix = ")"; // Concatenates node name and func name with a "@" separator.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_quantization_unit_loc.cc
} else { for (Location child_loc : locations) { FindQuantizationUnitsRecursively(child_loc, units); } } } // Finds the QuantizationUnit from location. std::optional<QuantizationUnit> FindQuantizationUnit(Operation* op) { SmallVector<QuantizationUnit> quant_units; FindQuantizationUnitsRecursively(op->getLoc(), quant_units); if (quant_units.size() == 1) { return *quant_units.begin(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_quantization_unit_loc.mlir
%3 = "tf.IdentityN"(%2) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> return %3 : tensor<1x3x2x2xf32> // CHECK: tf.Conv2D // CHECK-SAME: loc(callsite("Model/conv2d@conv2d_with_valid_loc"("Conv2D") at "QuantizationUnit({{.*}})")) } func.func @conv2d_with_callsite_loc(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x3x2x2xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 03 02:39:10 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla_selective_quantization.mlir
// CHECK-SAME: _tfl_quant_trait = "fully_quantizable" // CHECK-SAME: loc(callsite("Model/conv2d@conv2d_unmatching_unit"("Conv2D") at "QuantizationUnit({{.*}})")) // ----- module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1269 : i32}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
} // `DumpTensor` op saves entire value of input to as a tensor proto into a // specified directory and filename. When enabled is set to false, op is // disabled and won't save any value. It also creates `QuantizationUnit` proto // with `func_name` and `node_name` to identify the op. REGISTER_OP("DumpTensor") .Input("tensor_data: T") .Attr("log_dir_path: string") .Attr("file_name: string") .Attr("T: type")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
//===----------------------------------------------------------------------===// // The prepare-quantize-drq Pass. // namespace mlir { namespace quant { namespace { using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Applies prepare quantization on the model in TF dialect for dynamic range // quantization case. class PrepareQuantizeDRQPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
%1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { namespace { using QuantizationUnit = ::tensorflow::quantization::UnitWiseQuantizationSpec::QuantizationUnit; using ::tensorflow::quantization::OpSet; using ::tensorflow::quantization::QuantizationComponentSpec; using ::tensorflow::quantization::QuantizationMethod;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
// namespace mlir { namespace quant { namespace { using QuantMethod = ::tensorflow::quantization::QuantizationMethod::PresetMethod; using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Preprocesses ops to allow multi-axis quantization, prior to quantization
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0)