- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 108 for Quantized (0.13 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -quant-quantize-composite-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -symbol-dce | FileCheck %s module { // TODO(b/260020937): Support transpose_a, transpose_b for matmul. func.func @matmul(%arg0: tensor<2x12xf32>) -> (tensor<*xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
"""Base test class for StableHLO quant tests.""" def setUp(self) -> None: super().setUp() # Many test cases for quantization involve creating and saving the input # model and saving the output quantized model. These two member # attributes can be used to specify the paths for such models, # respectively. These paths will be cleaned up after each test case.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
quant::QuantizedType::getQuantizedElementType(input_type); auto output_quantized_type = quant::QuantizedType::getQuantizedElementType(output_type); // If both the input & output types are non-quantized, they will be both // nullptrs. if (input_quantized_type != output_quantized_type) { return failure(); } int64_t batch = input_type.getDimSize(0); int64_t height = input_type.getDimSize(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
union QuantizationDetails { CustomQuantization, } // Parameters for converting a quantized tensor back to float. table QuantizationParameters { // These four parameters are the asymmetric linear quantization parameters. // Given a quantized value q, the corresponding float value f should be: // f = scale * (q - zero_point)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc
m_Constant(&filter_scale_attr))) { return failure(); } // The shape of scale_type is {} (rank 0) for per-tensor quantized tensor, // and {num_channels} (rank 1) for per-channel quantized one. auto scale_type = filter_scale_attr.getType().dyn_cast<RankedTensorType>(); if (scale_type.getRank() != 0 && scale_type.getRank() != 1) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 21 16:55:41 UTC 2023 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.h
// Uses the type of `val` to set the initial state of the index-th result if // `as_result` is true or index-th operand if `as_result` is false. The // state is immutable if the type is a quantized type. Returns the index of // this new state in the state vector. int InitializeState(quantfork::QuantizeRegionOp op, int index, bool as_result);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
union QuantizationDetails { CustomQuantization, } // Parameters for converting a quantized tensor back to float. table QuantizationParameters { // These four parameters are the asymmetric linear quantization parameters. // Given a quantized value q, the corresponding float value f should be: // f = scale * (q - zero_point)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
// TODO(hinsu): Lower quantized types after supporting them in GetScalarOfType. def : Pat<(TF_ReluOp AnyTensor:$input), (CHLO_BroadcastMaxOp (MHLO_ConstantOp:$zero (GetScalarOfType<0> $input)), $input, (BinBroadcastDimensions $zero, $input)), [(TF_IntOrFpTensor $input)]>; // TODO(hinsu): Lower quantized types after supporting them in GetScalarOfType.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h
QuantTraitValues[QuantizationTrait::FullyQuantizable]; } // Returns true if `op` has two operands and one result and only second operand // is quantized. bool IsHybridQuantizedOp(Operation* op); // Returns whether a given `stablehlo.dot_general` can be legalizable to // `tfl.fully_connected`. absl::StatusOr<bool> IsDotGeneralFullyConnected(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc
for (const auto& it : llvm::enumerate(regions)) { llvm::SetVector<Value> region_extern_values; getUsedValuesDefinedAbove(*it.value(), region_extern_values); // Sink down constants (including quantized constant) into the functions. for (auto extern_value : region_extern_values) { if (!matchPattern(extern_value, m_Constant()) && !llvm::dyn_cast_or_null<TFL::QConstOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.8K bytes - Viewed (0)