- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 81 for dequantize (0.34 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// hardware performs better with integer ops. // Default value: true optional bool unpack_quantized_types = 1; // When set to True, requantize op in the quantized fusion will merge with the // subsequent dequantize op if present. // Default value: false // TODO: b/321729008 - re-consider default value after testing on prod model. bool merge_fusion_with_dequantize = 2; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
bool enable_legacy_weight_only = false, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 42K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
(TF_ConstOp (GetI64ScalarElementsAttr<-1>)))), (TF_SoftmaxCrossEntropyWithLogitsOp $features, $adjusted_labels)]>; //===----------------------------------------------------------------------===// // Dequantize op patterns. //===----------------------------------------------------------------------===// def DequantizeHalfRange : NativeCodeCall< "DequantizeHalfRange(&$_builder, $0)">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
const mlir::TFL::PassConfig& pass_config, mlir::OpPassManager* pass_manager) { // This pass wraps all the tf.FakeQuant ops in a custom op so they are not // folded before being converted to tfl.quantize and tfl.dequantize ops. auto wrapped_ops = mlir::TFL::AllTfFakeQuantOps(); pass_manager->addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreateRaiseCustomOpsPass(wrapped_ops));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
src/image/jpeg/scan.go
} } } } if d.progressive { // Save the coefficients. d.progCoeffs[compIndex][by*mxx*hi+bx] = b // At this point, we could call reconstructBlock to dequantize and perform the // inverse DCT, to save early stages of a progressive image to the *image.YCbCr // buffers (the whole point of progressive encoding), but in Go, the jpeg.Decode
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 25 00:46:29 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
// set of acceptable options. // LINT.IfChange enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8, FULLY_CONNECTED = 9, HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
// set of acceptable options. // LINT.IfChange enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8, FULLY_CONNECTED = 9, HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize.cc
// Base struct for quantization. template <QuantizationTrait quantization_trait, typename ConcreteT, typename RootOpT = DequantizeOp> struct TFLQuantizationBase : public quant::QuantizationPattern<ConcreteT, QuantizeOp, DequantizeOp, NumericVerifyOp, RootOpT> { explicit TFLQuantizationBase(MLIRContext* ctx,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
auto &requantize = states_manager_.GetOperandRequantizeState(op, i); if (state.IsEmpty() && requantize.pos == RequantizeState::NO_REQUANTIZE) { input_specs.push_back(original_input_specs[i]); } else if (requantize.pos == RequantizeState::ON_OUTPUT) { input_specs.push_back(TypeAttr::get(requantize.params)); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0)