Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 140 of 200 for dequantize (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

        assert(scales_.size() == zero_points_.size());
      }
    
      // Quantize an Attribute by the quantization parameters. Return nullptr if
      // the conversion fails or the input array isn't an ElementsAttr.
      ElementsAttr convert(Attribute real_value);
    
     private:
      // Quantize an DenseFPElementsAttr by the quantization parameters.
      DenseElementsAttr convert(DenseFPElementsAttr attr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir

    A. Unique TensorFlower <******@****.***> 1713119208 -0700
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 18:33:43 UTC 2024
    - 1.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/converter_python_api.h

                      const tensorflow::quantization::PyFunctionLibrary*
                          quantization_py_function_library = nullptr);
    
    // Quantize the model with calibration data. Throw errors if `fully_quantize`
    // is specified by the calibration data are not sufficient to quantize the
    // model.
    PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel,
                                bool fully_quantize, int inference_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

    bool NotFromDequant(mlir::Value value) {
      auto dequant_op = value.getDefiningOp<DequantizeOp>();
      if (dequant_op) {
        return false;
      }
      auto split_op = value.getDefiningOp<SplitOp>();
      if (!split_op) {
        return true;
      }
      return !split_op.getValue().getDefiningOp<DequantizeOp>();
    }
    
    // Optimize TFLite operations in functions.
    class OptimizeBatchMatmulPass
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_int4.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=bit-width=4 -verify-diagnostics | FileCheck %s
    
    // CHECK-LABEL: func @dot_int4
    // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32>
    func.func @dot_int4(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> {
      // CHECK: %[[cst:.*]] = stablehlo.constant
      // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]])
      // CHECK-SAME: quant.uniform<i8:f32, 0.0040316890267764818:127>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc

      EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
    }
    
    TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
      constexpr absl::string_view kQuantizeOp = R"mlir(
        func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
          %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize='post-training-quantize=true enable-per-channel-quantization=true' | FileCheck %s
    
    module {
      func.func private @conv_with_bias_and_relu(%arg0: tensor<1x3x4x3xf32>) -> tensor<*xf32> {
        %cst = "tf.Const"() {device = "", value = dense<[7.11401462, 7.05456924]> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

        // 1. Collect quantizable ops.
        QuantizationUnits quantizable_ops = GetQuantizableOps(op);
        if (quantizable_ops.empty()) {
          return failure();
        }
    
        // 2. Quantize collected ops.
        if (!QuantizeOps(rewriter, op, quantizable_ops)) {
          return failure();
        }
    
        // 3. Complete the Q-DQ pair for each inference type.
        if (!ConvertToFloat16Constant(rewriter, op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

    #include "tensorflow/compiler/mlir/lite/utils/utils.h"
    
    namespace mlir {
    namespace TFL {
    namespace tac {
    
    bool NotTFLQuantDequantizeOp(Operation* op) {
      if (!op) return false;
      if (llvm::isa<TFL::QuantizeOp, TFL::DequantizeOp>(op)) return false;
      return true;
    }
    
    bool IsTerminatorOp(Operation* op) {
      if (!op) return false;
      return op->hasTrait<OpTrait::IsTerminator>();
    }
    
    // Try to guess the inference type of the op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize | FileCheck %s
    
    module {
      func.func @same_scale_test(%arg0: tensor<*xf32>) -> tensor<*xf32> {
        %cst = arith.constant dense<[-1, 144]> : tensor<2xi32>
        %cst_1 = arith.constant dense<1.0> : tensor<144x10xf32>
        %cst_2 = arith.constant dense<0.1> : tensor<10xf32>
        %0 = "quantfork.qcast"(%arg0) : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 0.05:-10>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 29 02:42:57 UTC 2022
    - 2.1K bytes
    - Viewed (0)
Back to top