- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 34 for float1 (0.22 sec)
-
src/reflect/value.go
} // CanFloat reports whether [Value.Float] can be used without panicking. func (v Value) CanFloat() bool { switch v.kind() { case Float32, Float64: return true default: return false } } // Float returns v's underlying value, as a float64. // It panics if v's Kind is not [Float32] or [Float64] func (v Value) Float() float64 { k := v.kind() switch k { case Float32:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 119.9K bytes - Viewed (0) -
cmd/metrics-v2.go
if globalExpiryState != nil { expPendingTasks.Value = float64(globalExpiryState.PendingTasks()) expMissedTasks.Value = float64(globalExpiryState.stats.MissedTasks()) expMissedFreeVersions.Value = float64(globalExpiryState.stats.MissedFreeVersTasks()) expMissedTierJournalTasks.Value = float64(globalExpiryState.stats.MissedTierJournalTasks()) expNumWorkers.Value = float64(globalExpiryState.stats.NumWorkers()) }
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:54 UTC 2024 - 131.9K bytes - Viewed (0) -
staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation_test.go
"unsortedInts": []interface{}{int64(2), int64(1)}, "emptyInts": []interface{}{}, "doubles": []interface{}{float64(1), float64(2), float64(2), float64(3)}, "unsortedDoubles": []interface{}{float64(2), float64(1)}, "emptyDoubles": []interface{}{}, "intBackedDoubles": []interface{}{int64(1), int64(2), int64(2), int64(3)},
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 17:14:10 UTC 2024 - 159.9K bytes - Viewed (0) -
src/reflect/all_test.go
// Fun with floating point. {math.NaN(), math.NaN(), false}, {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false}, {&[1]float64{math.NaN()}, self{}, true}, {[]float64{math.NaN()}, []float64{math.NaN()}, false}, {[]float64{math.NaN()}, self{}, true}, {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false}, {map[float64]float64{math.NaN(): 1}, self{}, true}, // Nil vs empty: not the same.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 218.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
bias = array_ops.constant( np.random.uniform(size=[shapes[1][-1]]), dtype=dtypes.float32 ) model = MatmulModel(bias) x = array_ops.constant( np.random.uniform(size=x_shape), dtype=dtypes.float32 ) y = array_ops.constant( np.random.uniform(size=y_shape), dtype=dtypes.float32 ) if use_kernel: model.matmul = model.matmul_with_kernel
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
return op.emitOpError("requires min to be a 1d float tensor"); auto max = GetRankedTensorTypeForOperand(op.getMax()); if (max && !IsOfRankedFloatTensorType(max, 1)) return op.emitOpError("requires max to be a 1d float tensor"); Value inputs = op.getInputs(); if (!HasRankAtLeast(inputs, 1)) return op.emitError("requires inputs to be at least 1d float tensor"); int64_t num_bits = op.getNumBits();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
std::vector<float> result_values; result_values.reserve(output_size); for (int i = 0; i < output_size; ++i) { // Dot product with Kahan/Neumaier summation to minimize numeric errors. float sum = has_bias ? *bias_values_it : 0.0f; float compensation = 0.0f; for (int j = 0; j < input_size; ++j) { const float addend = input_values_it[j] * weights_row_it[j]; const float new_sum = sum + addend;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_export.cc
element_type)) { std::vector<float> mins = {static_cast<float>(qtype.getMin())}; std::vector<float> maxs = {static_cast<float>(qtype.getMax())}; q_params = tflite::CreateQuantizationParameters( builder_, builder_.CreateVector<float>(mins), builder_.CreateVector<float>(maxs)); } return tflite::CreateTensor(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:41:49 UTC 2024 - 164.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// CHECK-LABEL: dot_general_upstream_srq_wrong_contracting // CHECK: stablehlo.dot_general // CHECK-NOT: tfl.batch_matmul // ----- // Tests static range quantized dot_general with float operands // CHECK-LABEL: dot_general_upstream_srq_float_operands func.func @dot_general_upstream_srq_float_operands(%arg0: tensor<1x2x3x4xf32>, %arg1: tensor<1x2x4x5xf32>) -> tensor<1x2x3x5xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/generic.rules
(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)]) (Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)]) (Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)]) (Cvt32to32F (Const32 [c])) => (Const32F [float32(c)]) (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) (Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 22:21:05 UTC 2024 - 135.3K bytes - Viewed (0)