- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 30 for expm1 (0.39 sec)
-
src/math/expm1.go
// To compensate the error in the argument reduction, we use // expm1(r+c) = expm1(r) + c + expm1(r)*c // ~ expm1(r) + c + r*c // Thus c+r*c will be added in as the correction terms for // expm1(r+c). Now rearrange the term to avoid optimization // screw up: // ( 2 2 ) // ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 11:59:09 UTC 2023 - 7.9K bytes - Viewed (0) -
src/math/expm1_s390x.s
DATA ·expm1tab<> + 120(SB)/8, $-.115062908917949451E-01 GLOBL ·expm1tab<> + 0(SB), RODATA, $128 // Expm1 returns e**x - 1, the base-e exponential of x minus 1. // It is more accurate than Exp(x) - 1 when x is near zero. // // Special cases are: // Expm1(+Inf) = +Inf // Expm1(-Inf) = -1 // Expm1(NaN) = NaN // Very large values overflow to -1 or +Inf.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 15:34:41 UTC 2019 - 5.3K bytes - Viewed (0) -
src/math/export_s390x_test.go
var AsinhNovec = asinh var ErfNovec = erf var ErfcNovec = erfc var AtanNovec = atan var Atan2Novec = atan2 var CbrtNovec = cbrt var LogNovec = log var TanNovec = tan var ExpNovec = exp var Expm1Novec = expm1 var PowNovec = pow var HypotNovec = hypot
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 08 19:52:30 UTC 2017 - 732 bytes - Viewed (0) -
src/math/example_test.go
func ExampleExp2() { fmt.Printf("%.2f\n", math.Exp2(1)) fmt.Printf("%.2f\n", math.Exp2(-3)) // Output: // 2.00 // 0.12 } func ExampleExpm1() { fmt.Printf("%.6f\n", math.Expm1(0.01)) fmt.Printf("%.6f\n", math.Expm1(-1)) // Output: // 0.010050 // -0.632121 } func ExampleTrunc() { fmt.Printf("%.2f\n", math.Trunc(math.Pi)) fmt.Printf("%.2f\n", math.Trunc(-1.2345)) // Output:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 07 18:09:53 UTC 2021 - 3.7K bytes - Viewed (0) -
src/math/arith_s390x_test.go
} for i := 0; i < len(vf); i++ { a := vf[i] / 100 if f := Expm1Novec(a); !veryclose(expm1[i], f) { t.Errorf("Expm1(%g) = %g, want %g", a, f, expm1[i]) } } for i := 0; i < len(vf); i++ { a := vf[i] * 10 if f := Expm1Novec(a); !close(expm1Large[i], f) { t.Errorf("Expm1(%g) = %g, want %g", a, f, expm1Large[i]) } } for i := 0; i < len(vfexpm1SC); i++ {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 08 19:52:30 UTC 2017 - 10.8K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad_test.cc
} TEST_F(CWiseUnaryGradTest, Expm1) { auto x_fn = [this](const int i) { return RV({0, -1, 1e-6, 1, -1.5, 1.5}); }; TestCWiseGrad<float, float>(EXPM1, x_fn); } TEST_F(CWiseUnaryGradTest, Expm1_Complex) { auto x_fn = [this](const int i) { return CRV({{-1, 0}, {1, 0}, {1.5, -1.5}}); }; TestCWiseGrad<complex64, complex64>(EXPM1, x_fn); } TEST_F(CWiseUnaryGradTest, Log) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 36K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
(TF_ConstOp (getScaledAlpha $features)))))>; //===----------------------------------------------------------------------===// // Expm1 op patterns. //===----------------------------------------------------------------------===// // Expm1(x) = Exp(x) - 1 def LowerExp1mOp : Pat< (TF_Expm1Op $x), (TF_SubOp (TF_ExpOp $x), (TF_ConstOp (GetScalarOfType<1> $x)) )>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
src/math/stubs_s390x.s
TEXT ·expm1TrampolineSetup(SB), NOSPLIT, $0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported MOVD $·expm1vectorfacility+0x00(SB), R1 MOVD $·expm1(SB), R2 MOVD R2, 0(R1) BR ·expm1(SB) vectorimpl: MOVD $·expm1vectorfacility+0x00(SB), R1 MOVD $·expm1Asm(SB), R2 MOVD R2, 0(R1) BR ·expm1Asm(SB) GLOBL ·expm1vectorfacility+0x00(SB), NOPTR, $8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 12.4K bytes - Viewed (0) -
src/math/all_test.go
a := vf[i] / 100 if f := Expm1(a); !veryclose(expm1[i], f) { t.Errorf("Expm1(%g) = %g, want %g", a, f, expm1[i]) } } for i := 0; i < len(vf); i++ { a := vf[i] * 10 if f := Expm1(a); !close(expm1Large[i], f) { t.Errorf("Expm1(%g) = %g, want %g", a, f, expm1Large[i]) } } for i := 0; i < len(vfexpm1SC); i++ { if f := Expm1(vfexpm1SC[i]); !alike(expm1SC[i], f) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jul 07 17:39:26 UTC 2023 - 86.8K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
std::vector<Output>* grad_outputs) { // y = expm1(x) // dy/dx = exp(x) auto dydx = Exp(scope, op.input(0)); // grad(x) = grad(y) * conj(dy/dx) grad_outputs->push_back( Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx))); return scope.status(); } REGISTER_GRADIENT_OP("Expm1", Expm1Grad); Status LogGrad(const Scope& scope, const Operation& op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0)