- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 498 for weighted (0.17 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
"Uses TF Uniform Quantized ops"))}; Option<int64_t> min_num_elements_for_weights_{ *this, "min-num-elements-for-weights", llvm::cl::init(0), llvm::cl::desc("The minimum required number of elements in a weight " "array to apply quantization.")}; Option<QuantMethod> quantization_method_{ *this, "quantization-method",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
// This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "quant-quantize-weights"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Quantize weights used by quantizable ops."; } void getDependentDialects(DialectRegistry& registry) const override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
pilot/pkg/xds/endpoints/endpoint_builder.go
} func (e *LocalityEndpoints) refreshWeight() { var weight *wrapperspb.UInt32Value if len(e.llbEndpoints.LbEndpoints) == 0 { weight = nil } else { weight = &wrapperspb.UInt32Value{} for _, lbEp := range e.llbEndpoints.LbEndpoints { weight.Value += lbEp.GetLoadBalancingWeight().Value } } e.llbEndpoints.LoadBalancingWeight = weight } func (e *LocalityEndpoints) AssertInvarianceInTest() {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Sun Apr 28 02:18:19 UTC 2024 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
// Apply float16 quantization to all the weights. Quantized weights will be // dequantized before running inference. // Activation: f32, Weight: f16, Bias: f16 FLOAT16 = 3; // Apply static range quantization. The quantization range is determined // via calibration phase and quantized during conversion. // Activation: qi8, Weight: qi8, Bias: qi32 POST_TRAINING_QUANTIZATION_STATIC_RANGE = 4; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
pkg/scheduler/framework/runtime/framework_test.go
return buildScoreConfigWithWeights(defaultWeights, ps...) } func buildScoreConfigWithWeights(weights map[string]int32, ps ...string) *config.Plugins { var plugins []config.Plugin for _, p := range ps { plugins = append(plugins, config.Plugin{Name: p, Weight: weights[p]}) } return &config.Plugins{Score: config.PluginSet{Enabled: plugins}} } type injectedResult struct {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri May 17 09:07:27 UTC 2024 - 103K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
"Non-constant weights are not supported at the moment," " except matmul and einsum."); } else if (!quant_options_.enable_two_input_tensors() && !is_unitwise_quantization_enabled) { return absl::InternalError( "Quantization is disabled for this op due to the non-constant " "weight. You can enable it by setting `enable_two_input_tensors` "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
src/go/doc/testdata/examples/issue43658.golden
g, err := community.NewUndirectedLayers(friends, enemies) if err != nil { log.Fatal(err) } weights := []float64{1, -1} // Get the profile of internal node weight for resolutions // between 0.1 and 10 using logarithmic bisection. p, err := community.Profile( community.ModularMultiplexScore(g, weights, true, community.WeightMultiplex, 10, src), true, 1e-3, 0.1, 10, ) if err != nil { log.Fatal(err)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 10 23:13:45 UTC 2022 - 4.5K bytes - Viewed (0) -
tests/test_tutorial/test_extra_models/test_tutorial005.py
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Fri Jun 30 18:25:16 UTC 2023 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8, "weight_only", "Post-training weight-only quantizaiton"))}; Option<bool> enable_per_channel_quantization_{ *this, "enable-per-channel-quantization", llvm::cl::init(false), llvm::cl::desc("Whether enable per-channel quantized weights.")}; }; // Apply constant transformations for the op_set.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tests/test_tutorial/test_extra_models/test_tutorial005_py39.py
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Fri Jun 30 18:25:16 UTC 2023 - 1.6K bytes - Viewed (0)