- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 193 for Quantile (0.13 sec)
-
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
assert(scales_.size() == zero_points_.size()); } // Quantize an Attribute by the quantization parameters. Return nullptr if // the conversion fails or the input array isn't an ElementsAttr. ElementsAttr convert(Attribute real_value); private: // Quantize an DenseFPElementsAttr by the quantization parameters. DenseElementsAttr convert(DenseFPElementsAttr attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// Finally, use the quantization parameter to create the quantize and // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp // and its users. auto quantize = rewriter.create<quantfork::QuantizeCastOp>( tf_op.getLoc(), qtype.getValue(), input); auto dequantize = rewriter.create<quantfork::DequantizeCastOp>( tf_op.getLoc(), res_type, quantize.getResult());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
func.return %dot_out : tensor<*x!tf_type.qint32> } // Quantize initial input at the start of the graph. Output is qint8. func.func @quantize_i8(%input : tensor<*xf32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*x!tf_type.qint8> { %quantize = "tf.UniformQuantize"(%input, %input_scale, %input_zp) { Tin = "tfdtype$DT_FLOAT", Tout = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir
A. Unique TensorFlower <******@****.***> 1713119208 -0700
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 18:33:43 UTC 2024 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
// The input of the quantize op has already been quantized, i.e. // rescale. return failure(); } Operation* operand_op = operand.getDefiningOp(); if (operand_op == nullptr) { // When `QuantizeOpT`'s operand does not have a defining op, it means it // is a `BlockArgument`. The pattern does not match if there is no op to // quantize. return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.h
const tensorflow::quantization::PyFunctionLibrary* quantization_py_function_library = nullptr); // Quantize the model with calibration data. Throw errors if `fully_quantize` // is specified by the calibration data are not sufficient to quantize the // model. PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel, bool fully_quantize, int inference_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 3.6K bytes - Viewed (0) -
docs/fr/docs/benchmarks.md
* Si on compare Uvicorn, il faut le comparer à d'autre applications de serveurs comme Daphne, Hypercorn, uWSGI, etc. * **Starlette** :
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Thu Jul 27 18:49:56 UTC 2023 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc
private: void runOnOperation() override; }; // TODO: b/305815328 - Consider preserving leading and trailing QDQs for // ModifyIONodesPass in TFLite use cases. // Removes the back-to-back quantize and dequantize ops with volatile attribute. class RemoveVolatileQdqPattern : public OpRewritePattern<quantfork::DequantizeCastOp> { public: explicit RemoveVolatileQdqPattern(MLIRContext* context)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
// and tfl.dequantize pairs before tf.FakeQuant* being foled. LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx, bool use_fake_quant_num_bits) { OpBuilder builder(func); if (failed(UnwrapTFCustomOps(func, builder))) { return failure(); } // Insert the tfl.quantize/tfl.dequantize ops after the tf.FakeQuant* ops to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 03 00:14:05 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_int4.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=bit-width=4 -verify-diagnostics | FileCheck %s // CHECK-LABEL: func @dot_int4 // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32> func.func @dot_int4(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> { // CHECK: %[[cst:.*]] = stablehlo.constant // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]]) // CHECK-SAME: quant.uniform<i8:f32, 0.0040316890267764818:127>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 08 22:40:14 UTC 2024 - 1.7K bytes - Viewed (0)