Search Options

Results per page
Sort
Preferred Languages
Advance

Results 141 - 150 of 203 for dequantize (0.15 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

    #include "tensorflow/compiler/mlir/lite/utils/utils.h"
    
    namespace mlir {
    namespace TFL {
    namespace tac {
    
    bool NotTFLQuantDequantizeOp(Operation* op) {
      if (!op) return false;
      if (llvm::isa<TFL::QuantizeOp, TFL::DequantizeOp>(op)) return false;
      return true;
    }
    
    bool IsTerminatorOp(Operation* op) {
      if (!op) return false;
      return op->hasTrait<OpTrait::IsTerminator>();
    }
    
    // Try to guess the inference type of the op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize | FileCheck %s
    
    module {
      func.func @same_scale_test(%arg0: tensor<*xf32>) -> tensor<*xf32> {
        %cst = arith.constant dense<[-1, 144]> : tensor<2xi32>
        %cst_1 = arith.constant dense<1.0> : tensor<144x10xf32>
        %cst_2 = arith.constant dense<0.1> : tensor<10xf32>
        %0 = "quantfork.qcast"(%arg0) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 0.05:-10>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 29 02:42:57 UTC 2022
    - 2.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/wrap_converter.py

        enable_whole_model_verify,
        denylisted_ops,
        denylisted_nodes,
        enable_variable_quantization,
        disable_per_channel_for_dense_layers,
        debug_options_str,
    ):
      """Wraps experimental mlir quantize model."""
      return _pywrap_converter_api.ExperimentalMlirQuantizeModel(
          input_data_str,
          disable_per_channel,
          fully_quantize,
          inference_type,
          input_data_type,
          output_data_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py

            ),
            tags={tag_constants.SERVING},
            signature_keys=['serving_default'],
        )
    
        model = quantize_model.quantize(
            temp_path,
            quantization_options=quantization_options,
            representative_dataset=data_gen(),
        )
        return model
    
      @test_util.run_in_graph_and_eager_modes
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Sep 11 00:47:05 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/aot/BUILD

    )
    
    filegroup(
        name = "quantize_header",
        srcs = ["quantize.h"],
        visibility = ["//visibility:public"],
    )
    
    cc_library(
        name = "tfcompile_lib",
        srcs = [
            "codegen.cc",
            "compile.cc",
            "flags.cc",
        ],
        hdrs = [
            "codegen.h",
            "compile.h",
            "flags.h",
            "quantize.h",
        ],
        compatible_with = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 16:13:05 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc

      // It is known that `op` is `ModuleOp` when `pass` is
      // `QuantizeCompositeFunctionPass`, but the check is still performed to be
      // defensive.
      return pass != nullptr &&
             pass->getArgument() == "stablehlo-quantize-composite-functions" &&
             isa_and_nonnull<ModuleOp>(op);
    }
    
    // Report is saved only when:
    // * After running `QuantizeCompositeFunctionPass`.
    // * The pass is run on `ModuleOp`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize='post-training-quantize=true' | FileCheck %s
    
    // -----
    
    module {
      func.func @same_scale_ptq_test(%arg0: tensor<*xf32>) -> tensor<*xf32> {
        %cst = arith.constant dense<[-1, 144]> : tensor<2xi32>
        %cst_1 = arith.constant dense<1.0> : tensor<144x10xf32>
        %cst_2 = arith.constant dense<0.1> : tensor<10xf32>
        %0 = "quantfork.stats"(%arg0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 9.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA enable-per-channel-quantization=true' -symbol-dce...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

          if (QuantizedType::getQuantizedElementType(operand.getType())) {
            auto newTy = QuantizedType::castToExpressedType(operand.getType());
            newOperands.push_back(
                rewriter.create<TFL::DequantizeOp>(loc, newTy, operand));
            continue;
          }
    
          newOperands.push_back(operand);
        }
    
        SmallVector<Type> newResultTys;
        for (auto result : op->getResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

      converters.reserve(dim_size);
      for (int i = 0, e = dim_size; i != e; ++i) {
        converters.push_back(getPerChunkConverter(i));
      }
    
      // Scan the elements of the dense elements attributes and quantize them by
      // using the right quantization parameters.
      int64_t flatten_index = 0;
      auto shape = type.getShape();
      int64_t chunk_size =
          std::accumulate(std::next(shape.begin(), quantization_dim_ + 1),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top