- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 260 for weights (0.29 sec)
-
tensorflow/compiler/mlir/lite/tests/debuginfo/v1_1.0_224_frozen.wrong_attr.line.part.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 27 18:59:05 UTC 2023 - 16.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
const bool disable_per_channel_; // We should distinguish weights and bias constants. Biases are specified by // the quantization spec or are the operands of ops with same scale spec. The // rest are weights. DenseSet<Operation*> weights_; // The weights require narrow_range quantization. This map collects all the // weight operands defined by the op quant spec. The value of each entry is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
namespace { using ::stablehlo::quantization::Method; using ::stablehlo::quantization::QuantizedType; using ::stablehlo::quantization::WeightOnlyPtq; // Inserts quantization parameters of weights for weight-only quantization and // dynamic range quantization of `stablehlo.convolution` and // `stablehlo.dot_general`. class InsertWeightParamPass : public impl::InsertWeightParamPassBase<InsertWeightParamPass> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/debuginfo/v1_1.0_224_frozen.wrong_attr.stack.part.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 27 18:59:05 UTC 2023 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
// This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "quant-quantize-weights"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Quantize weights used by quantizable ops."; } void getDependentDialects(DialectRegistry& registry) const override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// For now, restrict scale adjustment to ops with affine quantized weights, // and having weights and biases as constants. This currently only applies to // FC and Conv* ops. Restriction for the weight can be relaxed if there are // needs for adjusting scale of variable weights. auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
pilot/pkg/networking/core/loadbalancer/loadbalancer_test.go
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue Apr 23 05:38:57 UTC 2024 - 39.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
// Whether to allow weight-only quantization. This scheme quantizes // weights but will dequantize them back at runtime which is useful for // memory bound case without kernel support available in lower precisions. // Used in MLIR dynamic range quantizer. bool weight_only_quantization = false; // The minimum number of elements in a weights array required to apply
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
input_ = fused_func_op_.getArgument(0); bias_ = fused_func_op_.getArgument(2); weight_ = fused_func_op_.getArgument(1); weight_type_ = mlir::cast<RankedTensorType>(weight_.getType()); if (weight_type_.getRank() != 2) { return fused_func_op_.emitError() << "The weight tensor was not of rank 2"; } if (weight_type_.getDimSize(1) % num_gates_ != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8, "weight_only", "Post-training weight-only quantizaiton"))}; Option<bool> enable_per_channel_quantization_{ *this, "enable-per-channel-quantization", llvm::cl::init(false), llvm::cl::desc("Whether enable per-channel quantized weights.")}; }; // Apply constant transformations for the op_set.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0)