Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 41 for calibration (0.14 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.cc

    #include "mlir/IR/MLIRContext.h"  // from @llvm-project
    #include "mlir/IR/OwningOpRef.h"  // from @llvm-project
    #include "mlir/Pass/PassManager.h"  // from @llvm-project
    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/component.h"
    #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/context.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/pre_calibration_component.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -verify-diagnostics \
    // RUN:   -stablehlo-test-pre-calibration-component | FileCheck %s
    
    func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
      %0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
      %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
      return %1 : tensor<1x3xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

            quantization_method,
        tensorflow::quantization::OpSet op_set, int min_num_elements_for_weights);
    
    // Replaces tf.CustomAggregator ops with quant.Stats ops for finalizing the
    // calibration procedure.
    std::unique_ptr<OperationPass<func::FuncOp>>
    CreateConvertCustomAggregationOpToQuantStatsPass();
    
    // Inserts quantized function library.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-insert-calibration-statistics-saver='aggregator-ops-to-ignore=skipping_id' | FileCheck %s
    
    func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

        )
    
        # Run quantization the first time, calibration is expected to be run.
        with self.assertLogs(level='INFO') as info_logs:
          quantization.quantize_saved_model(
              self._input_saved_model_path,
              self._output_saved_model_path,
              config,
          )
          self.assertTrue(
              self._any_log_contains(
                  'Calibration step is executed in graph mode.',
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.cc

        // the StableHLO functions to the top level module. This is needed for
        // StableHLO quantization. Also restores some shape information for
        // XlaCallModuleOps and CustomAggregatorOps lost from the calibration step.
        AddXlaCallModuleOpDeserializationPasses(pm_after_freezing_variables);
      }
    
      if (const auto pre_variable_freezing_status = RunPassesOnModuleOp(
              /*mlir_dump_file_name=*/absl::StrCat(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

            double rmax = FloatAttr::getValueAsDouble(*it);
            // The default nudging implementation of mlir quant library might cause
            // clamping during inference if the calibration range isn't wide enough.
            // So here we adjust the range to include 0.0.
            rmin = std::min(rmin, 0.0);
            rmax = std::max(rmax, 0.0);
            if (num_bits == 16) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

        elem_type = mlir::IntegerType::get(elem_type.getContext(),
                                           elem_type.getIntOrFloatBitWidth());
      }
    
      // Intermediate tensors with calibration value (but not scale and zero points)
      // should return calibrated quantized type.
      if (is_intermediate && tensor.quantization != nullptr &&
          !IsQuantized(tensor)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      // ranges.
      bool SetInputNodesQuantizationParams(func::FuncOp func);
    
      // The function might contain more stats ops than required, and it will
      // introduce requantize if the calibration stats have conflicts. This method
      // tries to remove all the redundant stats ops.
      bool RemoveRedundantStats(func::FuncOp func);
    
      // Verify the quantization specification is expected for quantizing the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

      // ranges.
      bool SetInputNodesQuantizationParams(func::FuncOp func);
    
      // The function might contain more stats ops than required, and it will
      // introduce requantize if the calibration stats have conflicts. This method
      // tries to remove all the redundant stats ops.
      bool RemoveRedundantStats(func::FuncOp func);
    
      // Verify the quantization specification is expected for quantizing the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
Back to top