Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 451 for quantization (0.33 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/quantization.py

    """StableHLO Quantizer."""
    from typing import Mapping
    
    from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as qc
    from tensorflow.compiler.mlir.quantization.stablehlo.python import pywrap_quantization
    from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib
    from tensorflow.compiler.mlir.quantization.tensorflow.python import save_model
    from tensorflow.core.protobuf import meta_graph_pb2
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

                 !interleave([bias, op1, op2], ", "),
                 ">::Impl")>;
    
    // Specify the operand index of the coefficient operand for an affine op
    // and also the quantization dimension if per-axis quantization is support.
    // If the quantization dimension is -1, per-axis quantization isn't supported.
    class AffineOpCoefficient<int dim, int index> : NativeOpTrait<
      !strconcat("quant::AffineOpCoefficient<",
                 !interleave([dim, index], ", "),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

    Christian Sigg <******@****.***> 1714640622 -0700
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc

    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 10:49:12 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h

    #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
    #include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
    
    namespace tensorflow {
    
    // Runs quantization on `module_op`. `saved_model_bundle` is required to
    // retrieve information about the original model (e.g. signature def mapping)
    // because quantization requires exporting the intermediate `ModuleOp` back to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 02:44:03 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir

    A. Unique TensorFlower <******@****.***> 1704479080 -0800
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

    // RUN: tf-opt --tfl-decompose-hybrid-quantization --verify-each %s | FileCheck %s
    
    // CHECK-LABEL: @test_conv2d_float
    func.func @test_conv2d_float(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x16xf32> {
      // CHECK-DAG: %[[VAL0:.+]] = "tfl.pseudo_const"() <{value = dense<42> : tensor<16x1x1x8xi8>}>
      // CHECK-DAG: %[[VAL1:.+]] = "tfl.pseudo_const"() <{value = dense<1> : tensor<16x1x1x8xi8>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

      // activation of static range quantization (SRQ). Quantization calibration
      // method is set to MIN_MAX by default.
      stablehlo.quantization.CalibrationOptions calibration_options = 15;
    
      // Configuration related to quantization debugger.
      stablehlo.quantization.DebuggerConfig debugger_config = 16;
    
      reserved 3;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

                         "array to apply quantization.")};
    
      Option<QuantMethod> quantization_method_{
          *this, "quantization-method",
          llvm::cl::init(tensorflow::quantization::QuantizationMethod::
                             METHOD_DYNAMIC_RANGE_INT8),
          llvm::cl::desc("Choose quantization method."),
          llvm::cl::values(
              clEnumValN(tensorflow::quantization::QuantizationMethod::
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc

    namespace {
    
    using QuantMethod =
        ::tensorflow::quantization::QuantizationMethod::PresetMethod;
    using QuantizationUnit = std::pair<Operation*, int>;
    using QuantizationUnits = llvm::SetVector<QuantizationUnit>;
    using ::tensorflow::quantization::OpSet;
    
    // Preprocesses ops to allow multi-axis quantization, prior to quantization
    // passes. Currently, per-channel quantization only supports 1D results.
    class PreprocessOpPass
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.4K bytes
    - Viewed (0)
Back to top