Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 200 for requantize (0.28 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc

          func.return %add : tensor<3x2xf32>
        }
      }
    )mlir";
    
    // Quantizable ops: XlaCallModule op with "fully_quantizable" attribute and
    // same-scale StableHLO ops
    // Non-quantizable ops: quantize/dequantize ops
    constexpr absl::string_view kModuleCompositeSameScale = R"mlir(
      module {
        func.func @same_scale_after_composite() -> tensor<3x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc

                                           int32_t qmax) {
      auto quantize = [scale, zero_point](float f) {
        return zero_point + static_cast<int32_t>(std::round(f / scale));
      };
    
      if (rmin.has_value() && rmax.has_value()) {
        return {std::max(qmin, quantize(rmin.value())),
                std::min(qmax, quantize(rmax.value()))};
      } else if (rmin.has_value()) {
        return {std::max(qmin, quantize(rmin.value())), qmax};
      } else if (rmax.has_value()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 17 19:57:04 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_drq.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-prepare-quantize-drq -quant-quantize='weight-quantization=true' -verify-each=false | FileCheck %s
    
    // -----
    
    module {
      func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) {
        %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc

        Value operand = op->getOperand(index);
        auto inserted_value = values->insert(operand).second;
        if (inserted_value) continue;
        // We can only clone the constant op or const->dequantize combo. The latter
        // case is useful for float16 quantization. Since all ops have been
        // legalized to tflite ops, so we only care about ConstOp or QConstOp or
        // mlir constant op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

        // This is the argument used to refer to the pass in
        // the textual format (on the commandline for example).
        return "quant-quantize-weights";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Quantize weights used by quantizable ops.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/quantization_info.proto

    message QuantizationInfo {
      // min/max of the per axis value range. To quantize the value, the metadata
      // of the target properties should be specified or read from the ops
      // quantization specification.
      message MinMax {
        float min = 1;
        float max = 2;
      }
    
      // Affine parameters to quantize the per axis value. The metadata of the
      // target properties should be specified as well.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 08 03:45:04 UTC 2019
    - 2.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/split-merged-operands.mlir

      // CHECK-DAG:  %[[CST_1:.*]] = "tfl.pseudo_const"() <{value = dense<0.000000e+00> : tensor<4x4xf16>}> : () -> tensor<4x4xf16>
      // CHECK-DAG:  %[[DQ_0:.*]] = "tfl.dequantize"(%[[CST_0]]) : (tensor<4x4xf16>) -> tensor<4x4xf32>
      // CHECK-DAG:  %[[DQ_1:.*]] = "tfl.dequantize"(%[[CST_1]]) : (tensor<4x4xf16>) -> tensor<4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir

    // CHECK-NO-UNPACK: %[[DEQUANTIZE:.+]] = stablehlo.uniform_dequantize %[[QUANTIZE_1]] : (tensor<1x3x!quant.uniform<i8:f32, {{.*}}>>) -> tensor<1x3xf32>
    // CHECK-NO-UNPACK: return %[[DEQUANTIZE]] : tensor<1x3xf32>
    
    // -----
    
    // Tests that a simple dot_general without CustomAggregators is not quantized.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/fuzzing/op_fuzzing.bzl

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 07 19:14:57 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto

        METHOD_UNSPECIFIED = 0;  // go/do-include-enum-unspecified
    
        // Apply default weight-only quantization. Weights are quantized during
        // conversion, then dequantized during inference.
        // Activation: f32, Weight: qi8, Bias: f32
        WEIGHT_ONLY = 1;
    
        // Apply default dynamic range quantization. Quantized tensor value's
        // ranges are determined during graph runtime.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 22 02:20:05 UTC 2023
    - 3.6K bytes
    - Viewed (0)
Back to top