- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 25 for grad_inputs_ (0.41 sec)
-
tensorflow/cc/framework/gradients.cc
std::vector<Output>* grad_outputs) { std::vector<Output> grad_inputs; grad_inputs.reserve(outputs.size()); for (const Output& output : outputs) { grad_inputs.emplace_back(ops::OnesLike(scope, output)); } return AddSymbolicGradients(scope, outputs, inputs, grad_inputs, grad_outputs); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/array_grad.cc
absl::Span<AbstractTensorHandle*> grad_inputs) override { for (int i = 0; i < grad_outputs.size(); i++) { auto grad_input = grad_outputs[i]; // TODO(srbs): Should we add a copy contructor to AbstractTensorHandle // that takes care of this similar to `Tensor`? if (grad_input) { grad_input->Ref(); } grad_inputs[i] = grad_input; } return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { auto softmax = Exp(scope, op.output(0)); auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true)); auto mul = Mul(scope, sum, softmax); auto dx = Sub(scope, grad_inputs[0], mul); grad_outputs->push_back(dx); return scope.status(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
if (!ta && !tb) { return MatMulGradHelper(scope, is_batch, grad_inputs[0], false, b, true, a.type(), a, true, grad_inputs[0], false, b.type(), grad_outputs); } else if (!ta && tb) { return MatMulGradHelper(scope, is_batch, grad_inputs[0], false, b, false, a.type(), grad_inputs[0], true, a, false, b.type(), grad_outputs);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad.cc
const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { grad_outputs->push_back(Identity(scope, grad_inputs[0])); return scope.status(); } REGISTER_GRADIENT_OP("Identity", IdentityGrad); Status RefIdentityGrad(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 31.7K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/math_grad.cc
absl::Span<AbstractTensorHandle*> grad_inputs) override { // TODO(b/161805092): Support broadcasting. DCHECK(grad_outputs[0]); grad_inputs[0] = grad_outputs[0]; grad_inputs[1] = grad_outputs[0]; grad_inputs[0]->Ref(); grad_inputs[1]->Ref(); return absl::OkStatus(); } ~AddGradientFunction() override {} };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 15.2K bytes - Viewed (0) -
tensorflow/cc/gradients/image_grad.cc
std::vector<Output>* grad_outputs) { string kernel_type; TF_RETURN_IF_ERROR( GetNodeAttr(op.node()->attrs(), "kernel_type", &kernel_type)); bool antialias; TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "antialias", &antialias)); grad_outputs->push_back(internal::ScaleAndTranslateGrad( scope, grad_inputs[0], op.input(0), op.input(2), op.input(3),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 11 00:29:23 UTC 2021 - 5.7K bytes - Viewed (0) -
tensorflow/cc/gradients/manip_grad.cc
namespace ops { namespace { Status RollGrad(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { auto shift = op.input(1); auto axis = op.input(2); auto grad_op = Roll(scope, grad_inputs[0], Neg(scope, shift), axis); grad_outputs->push_back(grad_op); grad_outputs->push_back(NoGradient()); grad_outputs->push_back(NoGradient());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 19 12:19:42 UTC 2020 - 1.4K bytes - Viewed (0) -
tensorflow/cc/framework/grad_op_registry.h
/// Implementations should add operations to compute the gradient outputs of /// 'op' (returned in 'grad_outputs') using 'scope' and 'grad_inputs'. typedef Status (*GradFunc)(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs); /// GradOpRegistry maintains a static registry of gradient functions.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 15:33:58 UTC 2022 - 2.9K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
absl::Span<AbstractTensorHandle*> grad_inputs) override { // Grad for Softmax Input TF_RETURN_IF_ERROR(BroadcastMul( ctx, grad_outputs[0], forward_outputs_[1], grad_inputs.subspan(0, 1))); // upstream_grad * local softmax grad // Grad for labels is null grad_inputs[1] = nullptr; return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0)