- Sort Score
- Result 10 results
- Languages All
Results 121 - 130 of 323 for quantized (3.29 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h
// checkpoint saving and restoring. This function returns a `SaverDef` instance // with four fields populated: `version`, `filename_tensor_name`, // `restore_op_name` and `save_tensor_name`. For valid quantized `graph_def` and // `control_ret_node_names`, it should be able to retrieve the last three fields // if there is at lest one variable in the graph. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:11:25 UTC 2024 - 6.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
func.return %dot_out : tensor<*x!tf_type.qint32> } // Quantize initial input at the start of the graph. Output is qint8. func.func @quantize_i8(%input : tensor<*xf32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*x!tf_type.qint8> { %quantize = "tf.UniformQuantize"(%input, %input_scale, %input_zp) { Tin = "tfdtype$DT_FLOAT", Tout = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op_itr = func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>(); ASSERT_THAT( uniform_quantize_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>())); // `uniform_quantize` is considered partially quantized because its output is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h
#include "mlir/IR/Builders.h" // from @llvm-project namespace mlir::quant { // Caclulate padding values for XLA ops. // Padding values for Uniform Quantized ops can be generated with this method as // well as it shares the same definition for padding attribute with the XLA ops. Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA' -symbol-dce | FileCheck --check-prefix=PerTensor %s
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<int64_t> min_num_elements_for_weights_{ *this, "min-num-elements-for-weights", llvm::cl::init(0), llvm::cl::desc("The minimum required number of elements in a weight "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/variables_utils.cc
auto complex_element_type = complex_type.getElementType(); if (complex_element_type.isF32() || complex_element_type.isF64()) return true; } // Check quantized types. if (auto quant_type = element_type.dyn_cast<mlir::quant::QuantizedType>()) { // TFLite supports QI16, QI32, QI8, and QUI8 if ((quant_type.getStorageTypeIntegralWidth() == 16 && quant_type.isSigned()) ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 21 19:32:03 UTC 2021 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
quantize_op.erase(); arg.dropAllUses(); bb.eraseArgument(0); }; // This is looking for a pattern: arg -> tfl.quantize if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) { auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin()); remove_quantize_op(quantize_op); continue; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/g3doc/dialects.md
Dialects can define entirely custom types, which is how MLIR can model things like the LLVM IR type system (which has first class aggregates), domain abstractions important for ML-optimized accelerators like quantized types, and even the Swift or Clang type systems (which are built around Swift/Clang declaration nodes) in the future. If you want to connect a new low-level compiler, you would create a new dialect
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 21 01:37:38 UTC 2020 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
Constraint<CPred<"IsEinsumSupportedByXlaDotV2($0)">>; // This attribute can be used in the `AttributeList` for missing attributes. It // is necessary to keep other attributes in the same index as the quantized // composite function.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0)