- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 69 for Quantile (0.38 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/types.h" // NOLINTNEXTLINE //===----------------------------------------------------------------------===// // The prepare-dynamic-range-quantize Pass. // namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_PREPAREDYNAMICRANGEQUANTIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// quantization. // * Non-convolution ops: All // // Default value: true bool enable_per_channel_quantized_weight = 2 [deprecated = true]; // Whether to quantize all quantizable ops or only compute-heavy ops. bool enable_full_int_quantization = 3; } // Applies int8 per-channel weight-only post-training quantization for all // dot_general and convolution ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// parameters in `tensor_type`. Returns empty Attribute if the // `tensor_type` is not a QuantizedType or the quantization fails. ElementsAttr Quantize(Attribute real_value, Type tensor_type); // Quantizes the elements in "legacy mode", where it calls TOCO's methods to // to quantize values with float scale. ElementsAttr QuantizeLegacy(Attribute real_value, Type tensor_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir
%zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> // CHECK: %[[QUANTIZE:.*]] = mhlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:3>> // CHECK: %[[CONVERT_1:.*]] = mhlo.bitcast_convert %[[QUANTIZE]] : (tensor<2x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<2xi8>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 01:25:29 UTC 2024 - 37.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir
%4 = tfl.mul %2, %3 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : tensor<100xf32> %5 = "tfl.quantize"(%4) {qtype = tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>, tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100xf32>) -> tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/aot/BUILD
) filegroup( name = "quantize_header", srcs = ["quantize.h"], visibility = ["//visibility:public"], ) cc_library( name = "tfcompile_lib", srcs = [ "codegen.cc", "compile.cc", "flags.cc", ], hdrs = [ "codegen.h", "compile.h", "flags.h", "quantize.h", ], compatible_with = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 16:13:05 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
Type quantized_type = quant_type.castFromExpressedType(expressed_type); ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type); DenseElementsAttr tensor_proto_attr = mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type)); if (!tensor_proto_attr) { return nullptr; } Type storage_type = mlir::cast<QuantizedType>(shaped_quantized_type.getElementType()) .getStorageType();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/reduced_precision_support.h" namespace mlir { namespace quant { // Stores information about how to quantize a user-specified custom operation. struct CustomOpInfo { std::vector<std::int32_t> quantizable_input_indices; bool is_weight_only = false; bool no_side_effect = true; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
if (IsCallToQuantizableLiftedFunction(op)) { std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(op); if (!composite_function_name.has_value()) return failure(); // Quantize inputs of quantizable composite functions. for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
LLVM_DEBUG(llvm::dbgs() << "Quantization parameters are not collected in an ideal place. " "Has to fallback values which might introduce errors.\n"); // Use the first immutable state to quantize the rest operands and results. if (!immutable_states.empty()) return immutable_states.front()->params; // If there are no immutable states, use the operand's state if it is the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0)