- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 67 for log1p (0.06 sec)
-
tensorflow/c/experimental/gradients/math_grad_test.cc
} Status Log1pModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::Log1p(ctx, inputs[0], &outputs[0], "Log1p"); } Status DivNoNanModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 13 17:32:14 UTC 2023 - 16.3K bytes - Viewed (0) -
tensorflow/c/experimental/ops/math_ops.h
const char* name = nullptr, const char* raw_device_name = nullptr); // Computes natural logarithm of (1 + x) element-wise. Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); } // namespace ops } // namespace tensorflow
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 4.4K bytes - Viewed (0) -
src/math/stubs_s390x.s
TEXT ·log1pTrampolineSetup(SB), NOSPLIT, $0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported MOVD $·log1pvectorfacility+0x00(SB), R1 MOVD $·log1p(SB), R2 MOVD R2, 0(R1) BR ·log1p(SB) vectorimpl: MOVD $·log1pvectorfacility+0x00(SB), R1 MOVD $·log1pAsm(SB), R2 MOVD R2, 0(R1) BR ·log1pAsm(SB) GLOBL ·log1pvectorfacility+0x00(SB), NOPTR, $8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 12.4K bytes - Viewed (0) -
src/math/all_test.go
a := vf[i] / 100 if f := Log1p(a); !veryclose(log1p[i], f) { t.Errorf("Log1p(%g) = %g, want %g", a, f, log1p[i]) } } a := 9.0 if f := Log1p(a); f != Ln10 { t.Errorf("Log1p(%g) = %g, want %g", a, f, Ln10) } for i := 0; i < len(vflogSC); i++ { if f := Log1p(vflog1pSC[i]); !alike(log1pSC[i], f) { t.Errorf("Log1p(%g) = %g, want %g", vflog1pSC[i], f, log1pSC[i]) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jul 07 17:39:26 UTC 2023 - 86.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
def LogOfSoftmax : Pat< (TF_LogOp:$src (TF_SoftmaxOp $arg)), (TF_LogSoftmaxOp:$dest $arg), [], [(CopyAttrs $src, $dest)]>; // Canonicalize: Log(1.0 + x) to Log1p(x) // // We currently do this rewrite only if the constant `1` is a scalar, because // it is safely broadcastable to any shape. To be able to canonicalize when
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/math_grad.cc
absl::Span<AbstractTensorHandle* const> grad_outputs, absl::Span<AbstractTensorHandle*> grad_inputs) override { // TODO(vnvo2409): Add control dependency /* Given upstream grad U and a Log1p op: Y = log(1 + X), the gradients are: * * dX = U / (1 + X) * */ AbstractTensorHandle* upstream_grad = grad_outputs[0]; AbstractTensorHandle* X = forward_inputs_[0];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
} // To improve accuracy on platforms with less-precise log implementations, // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on // the device. // log(t) = log(kLanczosGamma + 0.5 + z) // = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5)) Value t = rewriter.create<AddV2Op>(loc, lanczos_gamma_plus_one_half, z); Value z_div_lanczos_gamma_plus_one_half =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
// Hoist coefficient-wise unary operation out of the Concat op: // // %0 = "tf.Log1p"(%arg_0) // %1 = "tf.Log1p"(%arg_1) // ... // %n = "tf.Log1p"(%arg_n) // %m = "tf.ConcatV2"(%0, %1, ..., %n, %axis) // // Rewrite it to: // // %0 = "tf.ConcatV2"(%arg_0, %arg_1, ..., %arg_n, %axis) // %1 = "tf.Log1p"(%0) class HoistCwiseUnaryOutOfConcat : public OpRewritePattern<TF::ConcatV2Op> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
// y = log1p(x) // dy/dx = 1 / (1 + x) auto one = Cast(scope, Const(scope, 1.0), op.input(0).type()); auto dydx = Reciprocal(scope, Add(scope, one, op.input(0))); // grad(x) = grad(y) * conj(dy/dx) grad_outputs->push_back( Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx))); return scope.status(); } REGISTER_GRADIENT_OP("Log1p", Log1pGrad);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0)