- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 306 for Quantized (0.43 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h
#include "mlir/Pass/Pass.h" // from @llvm-project namespace mlir::quant::stablehlo { // Creates an instance of the ConvertTFQuantOpsToMHLOPass pass, which will // convert TF uniform quantized ops to the corresponding quantized MHLO ops. std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertTFQuantOpsToMHLOPass(); // TODO(b/288094093): Migrate uniform quantization legalization in a separate // pass.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 23 01:41:18 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; // TODO(b/212514817): refactor mode checking to improve code quality for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } else if (quant_specs_.inference_type == tensorflow::DT_HALF) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_drq.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=drq' | FileCheck %s // RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' | FileCheck --check-prefix=UQ-CHECK %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_calculate_quant_params
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h
// identifies the `MetaGraphDef`. `quantization_config` determines the behavior // of StableHLO Quantizer. `quantization_py_function_lib` contains python // implementations of certain APIs that are required for calibration. // `module_op` is the input graph to be quantized and it should contain // StableHLO ops. // // Returns a quantized `ModuleOp` in StableHLO, potentially wrapped inside a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
class RewriteQuantizedIOPass : public PassWrapper<RewriteQuantizedIOPass, OperationPass<ModuleOp>> { public: StringRef getArgument() const final { return "tfr-rewrite-quantized-io"; } StringRef getDescription() const final { return "Replaces operands and results that has quantized type with their " "storage types."; } void runOnOperation() override; }; void RewriteQuantizedIOPass::runOnOperation() {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc
limitations under the License. ==============================================================================*/ // Rewrites ops that require quantized inputs or outputs to ops that allow // non-quantized inputs and outputs. #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_op_with_region.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s // Tests if reduce_window op following quantized function is quantized. module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1722 : i32}, tf_saved_model.semantics} { // CHECK-LABEL: main_00 // CHECK-SAME: %[[ARG0:.*]]: tensor<2x3x1x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 18.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
// execution to be safe. Although normally they should support float // execution. Not Quantized ops. if (!int8_type_observed && !uint8_type_observed) return; // Insert dequantize ops for every quantized input. SmallVector<Value, 4> dequantized_inputs; for (auto& input : op->getOpOperands()) { auto input_type = input.get().getType();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
GetAsVector(expected_tensor->shape())); } // Finds the match of the quantized tensor from the possible tensors. Each // possible tensors can be used only once. It checks shape and name if the // tensor is quantized and also checks buffer contents and tensor type if not // quantized. For the quantized case, tensor type and quantizaction params are // expected to be checked in the test body with the match.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h
const std::string& hardware); // Convert quantized ops to float, this will essentially insert dequantize & // quantize pair around the op. void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder); // This will optimize the quantized ops -> float graph. void OptimizeQuantizedOpToFloat(func::FuncOp func, MLIRContext* context); } // namespace tac
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 07 18:43:51 UTC 2022 - 2K bytes - Viewed (0)