- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 193 for Quantile (0.25 sec)
-
tensorflow/compiler/mlir/lite/python/wrap_converter.py
enable_whole_model_verify, denylisted_ops, denylisted_nodes, enable_variable_quantization, disable_per_channel_for_dense_layers, debug_options_str, ): """Wraps experimental mlir quantize model.""" return _pywrap_converter_api.ExperimentalMlirQuantizeModel( input_data_str, disable_per_channel, fully_quantize, inference_type, input_data_type, output_data_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py
), tags={tag_constants.SERVING}, signature_keys=['serving_default'], ) model = quantize_model.quantize( temp_path, quantization_options=quantization_options, representative_dataset=data_gen(), ) return model @test_util.run_in_graph_and_eager_modes
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 11 00:47:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
if (devce_hardware == nullptr) return {}; return devce_hardware->IsOpSupported(op); } // ================== Convert Quantized Op ============================ // Walk through the func and convert the quantize ops to their float version. void ConvertQuantizedOpToFloat(mlir::func::FuncOp func, OpBuilder* builder) { func.walk([&](Operation* op) { // TODO(renjieliu): Find a generic way to deal with const ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc
// going forward. // NOLINTNEXTLINE llvm::cl::list<std::string> custom_opdefs( "tf-custom-opdefs", llvm::cl::desc("List of custom opdefs when importing " "graphdef")); // Quantize and Dequantize ops pair can be optionally emitted before and after // the quantized model as the adaptors to receive and produce floating point // type data with the quantized model. Set this to `false` if the model input is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 20:53:17 UTC 2024 - 7.9K bytes - Viewed (0) -
guava-tests/benchmark/com/google/common/math/QuantilesBenchmark.java
import com.google.common.collect.ImmutableSet; import com.google.common.collect.Range; import java.util.Random; /** Benchmarks some algorithms providing the same functionality as {@link Quantiles}. */ public class QuantilesBenchmark { private static final ContiguousSet<Integer> ALL_DECILE_INDEXES = ContiguousSet.create(Range.closed(0, 10), DiscreteDomain.integers());
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Mon Oct 10 19:45:10 UTC 2022 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/aot/BUILD
) filegroup( name = "quantize_header", srcs = ["quantize.h"], visibility = ["//visibility:public"], ) cc_library( name = "tfcompile_lib", srcs = [ "codegen.cc", "compile.cc", "flags.cc", ], hdrs = [ "codegen.h", "compile.h", "flags.h", "quantize.h", ], compatible_with = [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 16:13:05 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc
// It is known that `op` is `ModuleOp` when `pass` is // `QuantizeCompositeFunctionPass`, but the check is still performed to be // defensive. return pass != nullptr && pass->getArgument() == "stablehlo-quantize-composite-functions" && isa_and_nonnull<ModuleOp>(op); } // Report is saved only when: // * After running `QuantizeCompositeFunctionPass`. // * The pass is run on `ModuleOp`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize='post-training-quantize=true' | FileCheck %s // ----- module { func.func @same_scale_ptq_test(%arg0: tensor<*xf32>) -> tensor<*xf32> { %cst = arith.constant dense<[-1, 144]> : tensor<2xi32> %cst_1 = arith.constant dense<1.0> : tensor<144x10xf32> %cst_2 = arith.constant dense<0.1> : tensor<10xf32> %0 = "quantfork.stats"(%arg0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' -quant-quantize-composite-functions='quantization-method=weight_only target-opset=XLA enable-per-channel-quantization=true' -symbol-dce...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
Type quantized_type = quant_type.castFromExpressedType(expressed_type); ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type); DenseElementsAttr tensor_proto_attr = mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type)); if (!tensor_proto_attr) { return nullptr; } Type storage_type = mlir::cast<QuantizedType>(shaped_quantized_type.getElementType()) .getStorageType();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0)