- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 171 for Bias (0.2 sec)
-
src/math/trig_reduce.go
const PI4 = Pi / 4 if x < PI4 { return 0, x } // Extract out the integer and exponent such that, // x = ix * 2 ** exp. ix := Float64bits(x) exp := int(ix>>shift&mask) - bias - shift ix &^= mask << shift ix |= 1 << shift // Use the exponent to extract the 3 appropriate uint64 digits from mPi4, // B ~ (z0, z1, z2), such that the product leading digit has the exponent -61.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 3.3K bytes - Viewed (0) -
src/math/fma.go
pm1, _ = bits.Sub64(pm1, zm1, c) nz := lz(pm1, pm2) pe -= nz m, pm2 = shl(pm1, pm2, uint(nz-1)) m |= nonzero(pm2) } // Round and break ties to even if pe > 1022+bias || pe == 1022+bias && (m+1<<9)>>63 == 1 { // rounded value overflows exponent range return Float64frombits(uint64(ps)<<63 | uvinf) } if pe < 0 { n := uint(-pe) m = m>>n | nonzero(m&(1<<n-1)) pe = 0 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jul 05 22:05:30 UTC 2023 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
@Composite( 'FusedFullyConnected', inputs=['input_: T', 'filter_: T', 'bias: T'], attrs=['act: {"", "RELU", "RELU6", "TANH"} = ""'], derived_attrs=['T: {float, int8}'], outputs=['o: T']) def _composite_fully_connected(input_, filter_, bias, act): res = tf.raw_ops.MatMul( a=input_, b=filter_, transpose_a=False, transpose_b=True) res = tf.raw_ops.Add(x=res, y=bias) if act == 'RELU': return tf.raw_ops.Relu(features=res)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
src/math/logb.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 07 19:46:45 UTC 2022 - 1021 bytes - Viewed (0) -
src/math/exp_amd64.s
ADDSD X0, X1 MULSD X1, X0 MOVSD exprodata<>+16(SB), X1 ADDSD X0, X1 MULSD X1, X0 ADDSD exprodata<>+8(SB), X0 // return fr * 2**exponent ldexp: ADDL $0x3FF, BX // add bias JLE denormal CMPL BX, $0x7FF JGE overflow lastStep: SHLQ $52, BX MOVQ BX, X1 MULSD X1, X0 MOVSD X0, ret+8(FP) RET notFinite: // test bits for -Inf
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
result_type = bias_add.getResult().getType(); } auto fused_loc = rewriter.getFusedLoc(locations); // The fused contraction has the same operands as the original contraction // with `bias` from the BiasAddOp appended. SmallVector<Value, 4> operands(contraction.operand_begin(), contraction.operand_end()); operands.push_back(bias_add.getBias());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
EXPECT_NE(float_tensor, nullptr); // If the tensor is a weight, it should have type INT8, otherwise it // should stay with type FLOAT32. // If the tensor is a bias, it should have type FLOAT32. // // Check with float_tensor name since quantized tensor // may be renamed. if (float_tensor->name()->str() == "conv_bias") {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
src/math/modf.go
switch { case f < 0: int, frac = Modf(-f) return -int, -frac case f == 0: return f, f // Return -0, -0 when f == -0 } return 0, f } x := Float64bits(f) e := uint(x>>shift)&mask - bias // Keep the top 12+e bits, the integer part; clear the rest. if e < 64-12 { x &^= 1<<(64-12-e) - 1 } int = Float64frombits(x) frac = f - int return
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 913 bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
Status Compute(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> grad_outputs, absl::Span<AbstractTensorHandle*> grad_inputs) override { /* Given upstream grad U and a BiasAdd: A + bias, the gradients are: * * dA = U * dbias = reduceSum(U, dims = channel_dim) */ AbstractTensorHandle* upstream_grad = grad_outputs[0]; DCHECK(upstream_grad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/resources/composite_ops.cc
.Attr("N: int >= 1") .Attr("T: {numbertype, variant}") .SetIsCommutative() .SetIsAggregate(); REGISTER_OP("MyBiasedDense") .Input("input: T") .Input("weight: T") .Input("bias: T") .Output("out: T") .Attr("T: {float, int8}") .Attr("act: {'', 'relu', 'relu6'} = ''");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 23 21:28:23 UTC 2020 - 1.3K bytes - Viewed (0)