- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 67 for log1p (0.11 sec)
-
src/math/log1p.go
// // Special cases are: // // Log1p(+Inf) = +Inf // Log1p(±0) = ±0 // Log1p(-1) = -Inf // Log1p(x < -1) = NaN // Log1p(NaN) = NaN func Log1p(x float64) float64 { if haveArchLog1p { return archLog1p(x) } return log1p(x) } func log1p(x float64) float64 { const ( Sqrt2M1 = 4.142135623730950488017e-01 // Sqrt(2)-1 = 0x3fda827999fcef34
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 11:59:09 UTC 2023 - 6.3K bytes - Viewed (0) -
src/math/asinh.go
// we have // asinh(x) := x if 1+x*x=1, // := sign(x)*(log(x)+ln2) for large |x|, else // := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else // := sign(x)*log1p(|x| + x**2/(1 + sqrt(1+x**2))) // // Asinh returns the inverse hyperbolic sine of x. // // Special cases are: // // Asinh(±0) = ±0 // Asinh(±Inf) = ±Inf // Asinh(NaN) = NaN func Asinh(x float64) float64 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 13 20:02:49 UTC 2023 - 1.9K bytes - Viewed (0) -
src/math/atanh.go
// 2. For x>=0.5 // 1 2x x // atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------) // 2 1 - x 1 - x // // For x<0.5 // atanh(x) = 0.5*log1p(2x+2x*x/(1-x)) // // Special cases: // atanh(x) is NaN if |x| > 1 with signal; // atanh(NaN) is that NaN with no signal; // atanh(+-1) is +-INF with signal.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 2K bytes - Viewed (0) -
src/math/log1p_s390x.s
GLOBL ·log1ptab<> + 0(SB), RODATA, $128 // Log1p returns the natural logarithm of 1 plus its argument x. // It is more accurate than Log(1 + x) when x is near zero. // // Special cases are: // Log1p(+Inf) = +Inf // Log1p(±0) = ±0 // Log1p(-1) = -Inf // Log1p(x < -1) = NaN // Log1p(NaN) = NaN // The algorithm used is minimax polynomial approximation
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 15:34:41 UTC 2019 - 5.1K bytes - Viewed (0) -
src/math/export_s390x_test.go
// license that can be found in the LICENSE file. package math // Export internal functions and variable for testing. var Log10NoVec = log10 var CosNoVec = cos var CoshNoVec = cosh var SinNoVec = sin var SinhNoVec = sinh var TanhNoVec = tanh var Log1pNovec = log1p var AtanhNovec = atanh var AcosNovec = acos var AcoshNovec = acosh var AsinNovec = asin var AsinhNovec = asinh var ErfNovec = erf
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 08 19:52:30 UTC 2017 - 732 bytes - Viewed (0) -
src/math/acosh.go
// Method : // Based on // acosh(x) = log [ x + sqrt(x*x-1) ] // we have // acosh(x) := log(x)+ln2, if x is large; else // acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else // acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1. // // Special cases: // acosh(x) is NaN with signal if x<1. // acosh(NaN) is NaN without signal. // // Acosh returns the inverse hyperbolic cosine of x. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 11 16:34:30 UTC 2022 - 1.7K bytes - Viewed (0) -
tensorflow/c/experimental/ops/math_ops.cc
} // Op: Log1p() // Summary: Computes natural logarithm of (1 + x) element-wise. // // Description: // I.e., \\(y = \log_e (1 + x)\\). // // Example: // // ```python // x = tf.constant([0, 0.5, 1, 5]) // tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] // ``` Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 12.2K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
ExpandDims \ OnesLike ${generate} \ --category=math \ Mul \ Conj \ AddV2 \ MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
src/math/arith_s390x_test.go
a := vf[i] / 100 if f := Log1pNovec(a); !veryclose(log1p[i], f) { t.Errorf("Log1p(%g) = %g, want %g", a, f, log1p[i]) } } a := 9.0 if f := Log1pNovec(a); f != Ln10 { t.Errorf("Log1p(%g) = %g, want %g", a, f, Ln10) } for i := 0; i < len(vflogSC); i++ { if f := Log1pNovec(vflog1pSC[i]); !alike(log1pSC[i], f) { t.Errorf("Log1p(%g) = %g, want %g", vflog1pSC[i], f, log1pSC[i]) } } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 08 19:52:30 UTC 2017 - 10.8K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad_test.cc
} TEST_F(CWiseUnaryGradTest, Log1p) { auto x_fn = [this](const int i) { return RV({0, 1e-6, 1, 2, 3, 4, 100}); }; TestCWiseGrad<float, float>(LOG1P, x_fn); } TEST_F(CWiseUnaryGradTest, Log1p_Complex) { auto x_fn = [this](const int i) { return CRV({{0, 0}, {1e-6, 0}, {2, -1}, {1, 2}, {3, 4}}); }; TestCWiseGrad<complex64, complex64>(LOG1P, x_fn); } TEST_F(CWiseUnaryGradTest, Sinh) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 36K bytes - Viewed (0)