- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 323 for quantized (1.73 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } } return quantized; } protected: QuantizationSpecs quant_specs_; OpSet op_set_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
GetAsVector(expected_tensor->shape())); } // Finds the match of the quantized tensor from the possible tensors. Each // possible tensors can be used only once. It checks shape and name if the // tensor is quantized and also checks buffer contents and tensor type if not // quantized. For the quantized case, tensor type and quantizaction params are // expected to be checked in the test body with the match.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// in the quantized model. Both models are required to be able to dump // both quantized and unquantized tensors and compare them offline. if (quantization_options.has_debugger_config() && quantization_options.debugger_config().debugger_type() == DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL) { TF_ASSIGN_OR_RETURN( ExportedModel debugging_exported_model,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/exported_model.proto
string checkpoint_dir = 5; // Function name -> function alias mapping. This associates the quantized // functions to the original functions' aliases. This information will be used // to populate `MetaInfoDef`s `function_aliases` when the quantized model is // exported to the saved model. This field is usually only populated for the // TF2 models. map<string, string> function_aliases = 6;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 06:12:59 UTC 2023 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -verify-diagnostics \ // RUN: -stablehlo-quantize-composite-functions | FileCheck --check-prefix=CHECK %s // Test that per-tensor weight-only quantized dot_general op is produced when // empty `weight_only_ptq` is provided. module attributes {tf_saved_model.semantics} { func.func private @quantize_dot_general_per_tensor(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
if bias_fn: self.assertTrue(re.search('stablehlo.add.*xi32>', module_str)) # Consider if there is a way to check if activation fusion is properly # done in MLIR level. # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.3, atol=0.2) # Due to other meta data, the compression is not exactly 1/4.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
op_set=target_opset, ) if target_opset != quant_opts_pb2.XLA: # Uniform quantized opset is not supported for weight-only with self.assertRaisesRegex( ValueError, 'TF/Uniform quantized opset does not support weight-only.' ): converted_model = quantize_model.quantize( input_saved_model_path, output_directory, quantization_options,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/passes.h
// Decompose ops. std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass( std::optional<ModuleOp> tfr_module = std::nullopt); // Rewrites quantized operands and results with their storage types. // This pass should be run at module level after decomposition, if there are // quantized operands or results. std::unique_ptr<OperationPass<ModuleOp>> CreateRewriteQuantizedIOPass(); // Raise to TF ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
Eq(TensorType_INT8)); // Verify FC bias should be int32 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(), Eq(TensorType_FLOAT32)); EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type, Eq(TensorType_INT32)); // The output tensor of FC should be int8 quantized. ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc
} } elem_type = mlir::TF::VariantType::get(tensor_types, builder.getContext()); } if (IsQuantized(tensor) && !get_storage) { TF_ASSIGN_OR_RETURN(elem_type, GetQuantizedType(tensor, builder, is_constant)); } else if (IsQuantized(tensor) && get_storage) { // If the type is quantized we strip the signedness from the storage type. elem_type = mlir::IntegerType::get(elem_type.getContext(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 16.6K bytes - Viewed (0)