- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 569 for biases (0.11 sec)
-
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
fc1 = gen_mnist_ops.new_fully_connected(reshape, self.weights['f3'], self.biases['b3'], 'RELU') # output shape: [-1, 10] return gen_mnist_ops.new_fully_connected(fc1, self.weights['f4'], self.biases['b4']) def main(strategy): """Trains an MNIST model using the given tf.distribute.Strategy."""
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 20 03:05:18 UTC 2021 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
private: // Whether the value is used as a bias input of another op. Here we assume // bias is used immediately by the user. This assumption is always correct // after constant folding. bool UsedAsBias(Value value) { for (auto &use : value.getUses()) { auto biases = TFL::GetOpQuantSpec(use.getOwner())->biases_params; if (biases.find(use.getOperandNumber()) != biases.end()) return true; } return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity_4bit.pbtxt
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
GetQuantScaleSpec(user); const BiasParamsMap biases = spec->biases_params; // The quantization parameters of a `weight` shouldn't be determined by // other values. So any constants which are not bias, an operand of an // op with same scale requirements, and haven't been quantized are // weights. if (!biases.contains(operand_num) && !scale_spec->has_same_scale_requirement &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc
return; // We only fold int32/int64 for Const and i32 for QConst if not specify // all constants flag. (Since they're more like "configs" or i32 biases.) // We will fold every const ops (and q_const ops) if we speicfy the // fold_all_constants_flag. if (!fold_all_constants_flag_) { if (!IsConstOrQConstInt(op)) return; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
src/internal/fuzz/mutators_byteslice.go
// randomly. We may want to either pick a much larger value // (AFL uses 32768, paired with a similar impl to chooseLen // which biases towards smaller lengths that grow over time), // or set the max based on characteristics of the corpus // (libFuzzer sets a min/max based on the min/max size of // entries in the corpus and then picks uniformly from // that range). n := m.chooseLen(4096) if len(b)+n >= cap(b) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 19 18:23:43 UTC 2021 - 7.7K bytes - Viewed (0) -
src/internal/fuzz/encoding.go
// instead of the original value (see https://go.dev/issue/51526), so // they must be treated as int32 instead. // // We arbitrarily draw the line at UTF-8 validity, which biases toward the // "rune" interpretation. (However, we accept either format as input.) if utf8.ValidRune(t) { fmt.Fprintf(b, "rune(%q)\n", t) } else { fmt.Fprintf(b, "int32(%v)\n", t) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 30 16:39:12 UTC 2022 - 11K bytes - Viewed (0)