- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 370 for quantization (0.2 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/python/quantization.py
"""StableHLO Quantizer.""" from typing import Mapping from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as qc from tensorflow.compiler.mlir.quantization.stablehlo.python import pywrap_quantization from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib from tensorflow.compiler.mlir.quantization.tensorflow.python import save_model from tensorflow.core.protobuf import meta_graph_pb2
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td
!interleave([bias, op1, op2], ", "), ">::Impl")>; // Specify the operand index of the coefficient operand for an affine op // and also the quantization dimension if per-axis quantization is support. // If the quantization dimension is -1, per-axis quantization isn't supported. class AffineOpCoefficient<int dim, int index> : NativeOpTrait< !strconcat("quant::AffineOpCoefficient<", !interleave([dim, index], ", "),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
Christian Sigg <******@****.***> 1714640622 -0700
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 10:49:12 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h" namespace tensorflow { // Runs quantization on `module_op`. `saved_model_bundle` is required to // retrieve information about the original model (e.g. signature def mapping) // because quantization requires exporting the intermediate `ModuleOp` back to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir
A. Unique TensorFlower <******@****.***> 1704479080 -0800
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-skip-quantization-ops.mlir
// RUN: odml-to-stablehlo-opt %s --tf-stablehlo=skip-quantization-ops=true | FileCheck %s --check-prefix=CHECK-SKIP // RUN: odml-to-stablehlo-opt %s --tf-stablehlo=skip-quantization-ops=false | FileCheck %s --check-prefix=CHECK-NOSKIP func.func @fake_quant_with_min_max_vars(%arg0: tensor<1x1x28x48xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x28x48xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 14 07:38:29 UTC 2022 - 676 bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// activation of static range quantization (SRQ). Quantization calibration // method is set to MIN_MAX by default. stablehlo.quantization.CalibrationOptions calibration_options = 15; // Configuration related to quantization debugger. stablehlo.quantization.DebuggerConfig debugger_config = 16; reserved 3;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
"array to apply quantization.")}; Option<QuantMethod> quantization_method_{ *this, "quantization-method", llvm::cl::init(tensorflow::quantization::QuantizationMethod:: METHOD_DYNAMIC_RANGE_INT8), llvm::cl::desc("Choose quantization method."), llvm::cl::values( clEnumValN(tensorflow::quantization::QuantizationMethod::
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
llvm::cl::values( clEnumValN(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8, "ptq", "Post-training static-range quantization"), clEnumValN(tensorflow::quantization::QuantizationMethod:: METHOD_DYNAMIC_RANGE_INT8, "drq", "Post-training dynamic-range quantizaiton"),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0)