- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for merge_fusion_with_dequantize_ (0.47 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc
// 2. Not supported, e.g. add op for server. pm.addPass(createXlaCallModuleToCallPass()); // TODO: b/321729008 - move this implementation to quantization_patterns.cc. if (merge_fusion_with_dequantize_) { pm.addPass(createMergeFusionWithDequantizePass()); } ModuleOp module_op = getOperation(); if (const absl::Status pm_run_status =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
options.enable_per_channel_quantized_weight_ = true; // For debugging purposes. options.mlir_dump_file_name_ = "quantize_composite_functions"; options.merge_fusion_with_dequantize_ = pipeline_config.merge_fusion_with_dequantize(); AddShapeLegalizationPasses(pm); pm.addNestedPass<func::FuncOp>( CreateConvertCustomAggregationOpToQuantStatsPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
Option<"mlir_dump_file_name_", "mlir-dump-file-name", "std::optional<std::string>", /*default=*/"std::nullopt", "MLIR dump file name.">, Option<"merge_fusion_with_dequantize_", "merge-fusion-with-dequantize", "bool", /*default=*/"false", "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">, ]; let dependentDialects = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
Christian Sigg <******@****.***> 1714060318 -0700
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
([2, 3, 1, 1024], [2, 3, 1024, 3]), ), 'merge_fusion_with_dequantize': (False, True), }]) ) @test_util.run_in_graph_and_eager_modes def test_matmul_ptq_model( self, bias_fn: Optional[ops.Operation], activation_fn: Optional[ops.Operation], dim_sizes: Sequence[int], merge_fusion_with_dequantize: bool, ): lhs_dim_size, rhs_dim_size = dim_sizes
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// subsequent dequantize op if present. // Default value: false // TODO: b/321729008 - re-consider default value after testing on prod model. bool merge_fusion_with_dequantize = 2; } // Represents a single quantizable unit, a (nearly) minimum unit of work when // applying quantization. It may correspond to a single or multiple ops. // Next ID: 2 message QuantizableUnit {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/BUILD
"passes/lift_quantizable_spots_as_functions.cc", "passes/lift_quantizable_spots_as_functions_fusion.inc", "passes/lift_quantizable_spots_as_functions_simple.inc", "passes/merge_fusion_with_dequantize.cc", "passes/nchw_convolution_to_nhwc.cc", "passes/optimize_graph.cc", "passes/post_quantize.cc", "passes/prepare_quantize.cc", "passes/quantize.cc",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 28.3K bytes - Viewed (0)