- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 291 for Quantized (0.16 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/gen_quantized_function_library.py
# See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generates the quantized function library contained header file.""" import ast import re import string from typing import Sequence from absl import app from absl import flags
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 20 01:38:06 UTC 2022 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h
// checkpoint saving and restoring. This function returns a `SaverDef` instance // with four fields populated: `version`, `filename_tensor_name`, // `restore_op_name` and `save_tensor_name`. For valid quantized `graph_def` and // `control_ret_node_names`, it should be able to retrieve the last three fields // if there is at lest one variable in the graph. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:11:25 UTC 2024 - 6.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h
#include "mlir/IR/Builders.h" // from @llvm-project namespace mlir::quant { // Caclulate padding values for XLA ops. // Padding values for Uniform Quantized ops can be generated with this method as // well as it shares the same definition for padding attribute with the XLA ops. Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
func.return %dot_out : tensor<*x!tf_type.qint32> } // Quantize initial input at the start of the graph. Output is qint8. func.func @quantize_i8(%input : tensor<*xf32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*x!tf_type.qint8> { %quantize = "tf.UniformQuantize"(%input, %input_scale, %input_zp) { Tin = "tfdtype$DT_FLOAT", Tout = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<int64_t> min_num_elements_for_weights_{ *this, "min-num-elements-for-weights", llvm::cl::init(0), llvm::cl::desc("The minimum required number of elements in a weight "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op_itr = func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>(); ASSERT_THAT( uniform_quantize_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>())); // `uniform_quantize` is considered partially quantized because its output is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA' -symbol-dce | FileCheck --check-prefix=PerTensor %s
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
Constraint<CPred<"IsEinsumSupportedByXlaDotV2($0)">>; // This attribute can be used in the `AttributeList` for missing attributes. It // is necessary to keep other attributes in the same index as the quantized // composite function.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.td
"quant::QuantizationDialect"]; } def OptimizeIntGraph : Pass<"optimize-int-graph", "mlir::func::FuncOp"> { let summary = "Optimization patterns for quantized integer graph"; let description = [{ This includes patterns for merging addition of zp offset and bias. }]; let constructor = "mlir::quant::stablehlo::CreateOptimizeIntGraphPass()";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 23 01:41:18 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc
const double qmaxDouble = qmax; scale = (rmax - rmin) / (qmaxDouble - qminDouble); // Zero point computation. // In float, solve the affine equation for any known pair // (real value, corresponding quantized value), of which, two such pairs // are known: (rmin, qmin), (rmax, qmax). // The arithmetic error on the zero point computed from either pair will be // roughly machine_epsilon * (sum of absolute values of terms).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 7.7K bytes - Viewed (0)