- Sort Score
- Result 10 results
- Languages All
Results 11 - 19 of 19 for calibration (0.32 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
# Set `force_graph_mode_calibration` to True to avoid skipping op execution, # which are not connected to return ops, during calibration execution. # TODO: b/335031954 - Bring back support to run calibration in Eager mode. logging.debug( 'Setting `force_graph_mode_calibration = True` to ensure the calibration' ' mode is executed properly.' ) quantization_options.force_graph_mode_calibration = True
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
#include "mlir/Support/LLVM.h" // from @llvm-project #include "tensorflow/cc/saved_model/loader.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/context.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-insert-calibration-statistics-saver='aggregator-ops-to-ignore=skipping_id' | FileCheck %s func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
) # Run quantization the first time, calibration is expected to be run. with self.assertLogs(level='INFO') as info_logs: quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) self.assertTrue( self._any_log_contains( 'Calibration step is executed in graph mode.',
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
# Restore the logger verbosity. logging.set_verbosity(prev_log_level) self.assertNotEmpty(info_logs.records) self.assertTrue( self._any_log_contains( 'Calibration step is executed in graph mode.', info_logs.records, ) ) class TensorNamePreservationTest(quantize_model_test_base.QuantizedModelTest):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
FlatSymbolRefAttr::get(builder.getStringAttr(func_name))); // Store the custom attribute to restore the function name when loading it // back in the post calibration stage. As mentioned above, the above entry // function attribute is not reliable. call_op->setAttr(kOriginalStablehloEntryFunctionAttrName, builder.getStringAttr(func_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
input_model_ = ReadModel(internal::kConvModelWith0Plus10Weights); readonly_model_ = input_model_->GetModel(); model_ = UnPackFlatBufferModel(*readonly_model_); // Flatbuffer is missing calibration data -- add dummy params. auto& subgraph = model_.subgraphs[0]; auto* input = subgraph->tensors[subgraph->inputs[0]].get(); auto* output = subgraph->tensors[subgraph->outputs[0]].get();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc
#include "tsl/platform/status.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace { using ::stablehlo::quantization::CalibrationOptions; using ::tensorflow::calibrator::CalibrationStatistics; using ::tensorflow::calibrator::CalibrationStatisticsMap; using ::testing::Contains; using ::testing::ElementsAre; using ::testing::HasSubstr; using ::testing::Key; using ::testing::SizeIs; using ::tsl::testing::StatusIs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 11.4K bytes - Viewed (0) -
RELEASE.md
* `converter.convert()` no longer returns a `tf.function`. Now the function must be accessed from the saved model. * The `converter.calibrate()` method has been removed. To trigger calibration, a `calibration_input_fn` should be provided to `converter.convert()`. * Other: * Fix accidental quadratic graph construction cost in graph-mode `tf.gradients()`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 730.3K bytes - Viewed (0)