Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 82 for dequantize (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

      converters.reserve(dim_size);
      for (int i = 0, e = dim_size; i != e; ++i) {
        converters.push_back(getPerChunkConverter(i));
      }
    
      // Scan the elements of the dense elements attributes and quantize them by
      // using the right quantization parameters.
      int64_t flatten_index = 0;
      auto shape = type.getShape();
      int64_t chunk_size =
          std::accumulate(std::next(shape.begin(), quantization_dim_ + 1),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir

    // PTQ
    //
    // Internal functions should be marked as private. They will be inlined and
    // deleted in `InsertQuantizedFunctionsPass`.
    //
    // For Uniform Quantized op case, attributes are generated during quantize
    // composite pass. Therefore, attr_map is set to an empty string.
    
    module {
    
      // Currently only 4-d case is supported
      func.func @quantized_conv2d_fn(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 01 12:06:54 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    #include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"  // IWYU pragma: keep
    
    #define DEBUG_TYPE "quantize-composite-functions"
    
    namespace mlir::quant::stablehlo {
    
    #define GEN_PASS_DEF_QUANTIZECOMPOSITEFUNCTIONSPASS
    #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h.inc"
    
    namespace {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir

    // RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions -quant-insert-quantized-functions -quant-quantize-composite-functions -symbol-dce | FileCheck %s
    
    func.func @fake_quant_conv(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> {
      %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-nnapi.mlir

    // CHECK:           %[[VAL_4:.*]] = "tfl.quantize"(%[[VAL_3]]) <{qtype = tensor<?x2048x!quant.uniform<i8:f32, 9.000000e-01:-128>>}> : (tensor<?x2048x!quant.uniform<i8:f32, 6.000000e-01:-128>>) -> tensor<?x2048x!quant.uniform<i8:f32, 9.000000e-01:-128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/post_quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-post-quantize | FileCheck %s
    
    // CHECK-LABEL: @remove_volatile_qdq
    func.func @remove_volatile_qdq() -> tensor<3x2xf32> {
      // CHECK: %[[CST:.*]] = stablehlo.constant
      // CHECK-NOT: "quantfork.qcast"
      // CHECK-NOT: "quantfork.dcast"
      // CHECK: return %[[CST]]
      %cst = stablehlo.constant dense<[[-0.960978984, -0.390246302], [-0.790828585, -0.601039409], [-1.0280807, -1.02731466]]> : tensor<3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc

      auto func_op_quant_scale_spec = GetStableHloQuantConstraints;
    
      for (auto func_op : module_op.getOps<func::FuncOp>()) {
        // The function might contain more stats ops than required, and it will
        // introduce requantize if the calibration stats have conflicts. This tries
        // to remove all the redundant stats ops.
        RemoveRedundantStatsOps(func_op, func_op_quant_spec,
                                func_op_quant_scale_spec);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 05:11:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

    // Defines various options to specify and control the behavior of the quantizer.
    // It consists of
    // 1) Model-wise quantization configuration as a default configuration. If it is
    // None, the default configuration is "do not quantize the model".
    // 2) A set of supported operations.
    // 3) Unit wise quantization precision.
    // 4) Target hardware name.
    // NEXT ID: 18
    message QuantizationOptions {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc

                nullptr);
      }
    
      // Special case where min/max is close enough. The tensor contents are all
      // 0.0s, so the scale is set to 1.0 and the tensor can be quantized to zero
      // points and dequantized to 0.0.
      if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) {
        return quant::UniformQuantizedType::getChecked(
            loc, flags, storageType, expressedType, 1.0, qmin, qmin, qmax);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions='target-opset=XLA' -quant-insert-quantized-functions -quant-quantize-composite-functions='target-opset=XLA' -symbol-dce -inline -tf-shape-inference -canonicalize -quant-replace-cast-hacks-with-tf-xla-ops -cse -quant-optimize | FileCheck %s
    
    module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1219 : i32}, tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.2K bytes
    - Viewed (0)
Back to top