Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 82 for dequantize (0.23 sec)

  1. tensorflow/compiler/mlir/lite/quantization/quantization_context.cc

          auto &requantize = states_manager_.GetOperandRequantizeState(op, i);
          if (state.IsEmpty() && requantize.pos == RequantizeState::NO_REQUANTIZE) {
            input_specs.push_back(original_input_specs[i]);
          } else if (requantize.pos == RequantizeState::ON_OUTPUT) {
            input_specs.push_back(TypeAttr::get(requantize.params));
          } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 01:38:03 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      // ranges.
      bool SetInputNodesQuantizationParams(func::FuncOp func);
    
      // The function might contain more stats ops than required, and it will
      // introduce requantize if the calibration stats have conflicts. This method
      // tries to remove all the redundant stats ops.
      bool RemoveRedundantStats(func::FuncOp func);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

      // Whether the func contains Quantize ops. This is used to determine whether
      // to use the quantization parameters from the fixed output range property.
      bool ContainsQuantizeOps(func::FuncOp func);
    
      QuantizationSpecs quant_specs_;
    
      Option<bool> enable_post_training_quantize_{
          *this, "post-training-quantize", llvm::cl::init(false),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/quantization_context.h

    struct RequantizeState {
      // Sometimes, we have to "requantize" the quantization result to satisfy all
      // the constraints. The "requantize" can happen either on the input or output
      // of the quantization result.
      enum RequantizePosition {
        NO_REQUANTIZE,
        ON_INPUT,
        ON_OUTPUT
      } pos = NO_REQUANTIZE;
    
      // Quantization parameters will be used to add the requantize ops.
      QuantParams params;
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 01:38:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

        // This is the argument used to refer to the pass in
        // the textual format (on the commandline for example).
        return "quant-quantize-weights";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Quantize weights used by quantizable ops.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h

        assert(scales_.size() == zero_points_.size());
      }
    
      // Quantize an Attribute by the quantization parameters. Return nullptr if
      // the conversion fails or the input array isn't an ElementsAttr.
      ElementsAttr convert(Attribute real_value);
    
     private:
      // Quantize an DenseFPElementsAttr by the quantization parameters.
      DenseElementsAttr convert(DenseFPElementsAttr attr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc

      EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
    }
    
    TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
      constexpr absl::string_view kQuantizeOp = R"mlir(
        func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
          %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

        // 1. Collect quantizable ops.
        QuantizationUnits quantizable_ops = GetQuantizableOps(op);
        if (quantizable_ops.empty()) {
          return failure();
        }
    
        // 2. Quantize collected ops.
        if (!QuantizeOps(rewriter, op, quantizable_ops)) {
          return failure();
        }
    
        // 3. Complete the Q-DQ pair for each inference type.
        if (!ConvertToFloat16Constant(rewriter, op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/aot/BUILD

    )
    
    filegroup(
        name = "quantize_header",
        srcs = ["quantize.h"],
        visibility = ["//visibility:public"],
    )
    
    cc_library(
        name = "tfcompile_lib",
        srcs = [
            "codegen.cc",
            "compile.cc",
            "flags.cc",
        ],
        hdrs = [
            "codegen.h",
            "compile.h",
            "flags.h",
            "quantize.h",
        ],
        compatible_with = [],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 16:13:05 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA enable-per-channel-quantization=true' -symbol-dce...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
Back to top