Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 200 for requantize (0.39 sec)

  1. tensorflow/compiler/mlir/lite/utils/convert_type.h

    // Returns element type from attribute Type 'type_attr'.
    mlir::Type GetShapeStrippedType(mlir::TypeAttr type_attr);
    
    // Returns true if 'val' is not from Quantize op or
    // from Quantize Op with same quant type as 'qtype_attr'
    bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr);
    
    }  // namespace tflite
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

        assert(scales_.size() == zero_points_.size());
      }
    
      // Quantize an Attribute by the quantization parameters. Return nullptr if
      // the conversion fails or the input array isn't an ElementsAttr.
      ElementsAttr convert(Attribute real_value);
    
     private:
      // Quantize an DenseFPElementsAttr by the quantization parameters.
      DenseElementsAttr convert(DenseFPElementsAttr attr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

        bool enable_legacy_weight_only = false,
        std::optional<const absl::string_view> mlir_dump_file_prefix =
            std::nullopt);
    
    // Converts dequantize-(quantizable) call-quantize pattern to a single call op
    // that has quantized input and output types. It is expected for this pass to
    // emit illegal IR with unsupported quantized input and output types. The
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir

    A. Unique TensorFlower <******@****.***> 1713119208 -0700
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 18:33:43 UTC 2024
    - 1.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/python/converter_python_api.h

                      const tensorflow::quantization::PyFunctionLibrary*
                          quantization_py_function_library = nullptr);
    
    // Quantize the model with calibration data. Throw errors if `fully_quantize`
    // is specified by the calibration data are not sufficient to quantize the
    // model.
    PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel,
                                bool fully_quantize, int inference_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 18:18:30 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc

      EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
    }
    
    TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
      constexpr absl::string_view kQuantizeOp = R"mlir(
        func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
          %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_int4.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=bit-width=4 -verify-diagnostics | FileCheck %s
    
    // CHECK-LABEL: func @dot_int4
    // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32>
    func.func @dot_int4(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> {
      // CHECK: %[[cst:.*]] = stablehlo.constant
      // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]])
      // CHECK-SAME: quant.uniform<i8:f32, 0.0040316890267764818:127>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize='post-training-quantize=true enable-per-channel-quantization=true' | FileCheck %s
    
    module {
      func.func private @conv_with_bias_and_relu(%arg0: tensor<1x3x4x3xf32>) -> tensor<*xf32> {
        %cst = "tf.Const"() {device = "", value = dense<[7.11401462, 7.05456924]> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

        // 1. Collect quantizable ops.
        QuantizationUnits quantizable_ops = GetQuantizableOps(op);
        if (quantizable_ops.empty()) {
          return failure();
        }
    
        // 2. Quantize collected ops.
        if (!QuantizeOps(rewriter, op, quantizable_ops)) {
          return failure();
        }
    
        // 3. Complete the Q-DQ pair for each inference type.
        if (!ConvertToFloat16Constant(rewriter, op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/tests/rewrite_quantized_io.mlir

    // CHECK-LABEL: remove_quantized_io
    func.func @remove_quantized_io(
      %arg0: tensor<1x10x!quant.uniform<i8:f32, 0.1:-128>>,
      %arg1: tensor<1x5xf32>) -> (tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>, tensor<1x5xf32>) {
      %0 = "tf.MyRequantize"(%arg0) : (tensor<1x10x!quant.uniform<i8:f32, 0.1:-128>>) -> tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>
      %1 = "tf.Intermediate"(%arg1) : (tensor<1x5xf32>) -> tensor<1x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 2.3K bytes
    - Viewed (0)
Back to top