- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 192 for dequantize (0.15 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s // Tests for PopulateFusedGemmStylePatterns are handled in // quantize_composite_functions for module-level evaluation of functions. module attributes {tf_saved_model.semantics} { // CHECK: quantize_simple_xla_call_module(%[[ARG_0:.+]]: tensor<1x4xf32>) func.func private @quantize_simple_xla_call_module(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 01:38:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-quantize -verify-each=false | FileCheck %s func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} { %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize.cc
patterns.add<StableHloQuantization, StableHloQuantizationReverse>(&ctx); PopulateCommonQuantizationPatterns(ctx, patterns, enable_per_channel_quantized_weight_); // Quantize all quantizable ops, including ops that are not compute-heavy. PopulateAllQuantizablePatterns(ctx, patterns); if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 07:08:19 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
auto &requantize = states_manager_.GetOperandRequantizeState(op, i); if (state.IsEmpty() && requantize.pos == RequantizeState::NO_REQUANTIZE) { input_specs.push_back(original_input_specs[i]); } else if (requantize.pos == RequantizeState::ON_OUTPUT) { input_specs.push_back(TypeAttr::get(requantize.params)); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/propagate_quantize_type.mlir
// CHECK: %[[IDENTITY:.*]] = "tf.Identity"(%cst_0) : (tensor<200x100x300xi8>) -> tensor<200x100x300xi8> // CHECK: %[[DEQUANTIZED:.*]] = "tf.PartitionedCall"(%[[IDENTITY]]) <{config = "", config_proto = "", executor_type = "", f = @composite_dequantize_uniform}> : (tensor<200x100x300xi8>) -> tensor<200x100x300xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
"DataFormatDimMap", "DataFormatVecPermute", "DepthToSpace", "DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "Dequantize", "Diag", "DynamicInfeedEnqueueTupleOp", "DynamicInfeedDequeueTupleOp", "DynamicStitch", "DynamicPartition", "Einsum", "EmptyTensorList",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
// Quantization ops. //===----------------------------------------------------------------------===// def TFL_DequantizeOp: TFL_Op<"dequantize", [NoMemoryEffect]> { let summary = "Dequantize operator"; let description = [{ Converts quantized array of integers to floating-points according to the quantization parameters. }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/aot/quantize.h
Jake Harmon <******@****.***> 1694027275 -0700
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
// ranges. bool SetInputNodesQuantizationParams(func::FuncOp func); // The function might contain more stats ops than required, and it will // introduce requantize if the calibration stats have conflicts. This method // tries to remove all the redundant stats ops. bool RemoveRedundantStats(func::FuncOp func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
// Whether the func contains Quantize ops. This is used to determine whether // to use the quantization parameters from the fixed output range property. bool ContainsQuantizeOps(func::FuncOp func); QuantizationSpecs quant_specs_; Option<bool> enable_post_training_quantize_{ *this, "post-training-quantize", llvm::cl::init(false),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0)