- Sort Score
- Result 10 results
- Languages All
Results 141 - 150 of 291 for Quantized (1.15 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h
public: static int GetCoefficientOperandIndex() { return OperandIndex; } static int GetQuantizationDim() { return QuantDim; } }; }; // This class provides the API for ops that can be quantized. // This is as a trait like this: // // class LessOp : public Op<LessOp, OpTrait::quant::QuantizableResult> { // template <typename ConcreteType> class QuantizableResult
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=true -verify-diagnostics | FileCheck %s // ----- module { // CHECK-LABEL: conv_with_bias_and_relu func.func private @conv_with_bias_and_relu(%arg0: tensor<1x3x2x3xf32>) -> tensor<1x2x2x2xf32> { %cst = "tf.Const"() {device = "", value = dense<[7.11401462, 7.05456924]> : tensor<2xf32>} : () -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 26 07:48:15 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.cc
if (auto typeAttr = mlir::dyn_cast<TypeAttr>(quantSpec)) { Type spec = typeAttr.getValue(); if (mlir::isa<TensorType, VectorType>(spec)) return false; // The spec should be either a quantized type which is compatible to the // expressed type, or a primitive type which is as same as the // (element type of) the expressed type. if (auto quantizedType = mlir::dyn_cast<QuantizedType>(spec))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.cc
if (auto typeAttr = mlir::dyn_cast<TypeAttr>(quantSpec)) { Type spec = typeAttr.getValue(); if (mlir::isa<TensorType, VectorType>(spec)) return false; // The spec should be either a quantized type which is compatible to the // expressed type, or a primitive type which is as same as the // (element type of) the expressed type. if (auto quantizedType = mlir::dyn_cast<QuantizedType>(spec))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/ConvertSimQuant.cc
bool failableRewrite(FakeQuantOp op, PatternRewriter &rewriter) const { auto converter = ExpressedToQuantizedConverter::forInputType(op.getType()); if (!converter) { return (op.emitError("unsupported quantized type conversion"), true); } quant::QuantizedType elementType = static_cast<const ConcreteRewriteClass *>(this) ->convertFakeQuantAttrsToType(op, converter.expressed_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla_selective_quantization.mlir
// This file test the selective quantiation feature in TF Quantizer. In the test // config, the op named "test_opt_out" will not be quantized. module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1269 : i32}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc
auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType()); if (input_type == nullptr || !input_type.hasStaticShape()) continue; // Quantized type does not support getSizeInBits. if (IsQUI8Type(input_type) || IsQI8Type(input_type)) { total_size_transferred += input_type.getNumElements() * 8; } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir
// Quantized tfl.add // CHECK: %[[add:.*]] = tfl.add(%[[q1]], %[[q0]]) <{fused_activation_function = "NONE"}> : (tensor<2x2x!quant.uniform<u8:f32, 0.0078431372549019607:128>> // CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[add]]) : (tensor<2x2x!quant.uniform<u8:f32, 0.0078431372549019607:128>>) // CHECK: return %[[dq]] } // CHECK-LABEL: hardcode_input
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 8.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; // Initialize for tests. void initializeForTest() { if (!test_mode_) return; op_set_.setCallback([this](const OpSet& new_op_set) { quant_options_.set_op_set(new_op_set);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0)