- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for ReluGrad (0.18 sec)
-
tensorflow/c/experimental/ops/nn_ops.cc
Status status = op_ptr->Execute(temp_outputs, &num_retvals); *loss = temp_outputs[0]; *backprop = temp_outputs[1]; return status; } // Op: ReluGrad() // Summary: Computes rectified linear gradients for a Relu operation. // // Description: Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 5.9K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
#include "tensorflow/c/experimental/ops/nn_ops.h" #include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h" #include "tensorflow/core/platform/errors.h" using std::vector; using tensorflow::ops::BiasAddGrad; using tensorflow::ops::ReluGrad; namespace tensorflow { namespace gradients { namespace { class ReluGradientFunction : public GradientFunction { public: explicit ReluGradientFunction(vector<AbstractTensorHandle*> f_outputs)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
AddV2 \ MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \ ReadVariableOp \ AssignVariableOp \ DestroyResourceOp ${generate} \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.golden_summary
Conv2DBackpropInput 52 DivNoNan 1 Equal 1 FusedBatchNorm 53 FusedBatchNormGrad 53 Identity 2 MatMul 3 MaxPool 1 MaxPoolGrad 1 Mean 1 Mul 164 Pad 1 ReadVariableOp 646 Relu 49 ReluGrad 49 Reshape 2 ResourceApplyKerasMomentum 161 ShapeN 50 Softmax 1 SparseSoftmaxCrossEntropyWithLogits 1 Square 55 Squeeze 1 Sub 106 Sum 57 Tile 1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 740 bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main.golden_summary
Conv2DBackpropFilter 53 Conv2DBackpropInput 52 Equal 1 FusedBatchNormGradV2 53 FusedBatchNormV2 53 MatMul 3 MaxPool 1 MaxPoolGrad 1 Mean 1 Mul 218 Pad 2 ReadVariableOp 538 Relu 49 ReluGrad 49 Reshape 2 ResourceApplyKerasMomentum 161 Slice 1 Softmax 1 SparseSoftmaxCrossEntropyWithLogits 1 Squeeze 1 Sum 1 Tile 1 Transpose 1 cluster 1 size 815 AddN 1 AssignAddVariableOp 1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 874 bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.h
AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes rectified linear gradients for a Relu operation. Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features, AbstractTensorHandle** backprops, const char* name = nullptr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main.pbtxt
attr { key: "dtype" value { type: DT_FLOAT } } } node { name: "training/LossScaleOptimizer/gradients/activation_48_1/Relu_grad/ReluGrad" op: "ReluGrad" input: "training/LossScaleOptimizer/gradients/avg_pool_1/Mean_grad/truediv" input: "activation_48_1/Relu" device: "/job:localhost/replica:0/task:0/device:GPU:0" attr { key: "T"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 30 02:52:54 UTC 2019 - 1.3M bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.pbtxt
} } attr { key: "keep_dims" value { b: false } } } node { name: "training/SGD/gradients/activation_48_1/Relu_grad/ReluGrad" op: "ReluGrad" input: "training/SGD/gradients/avg_pool_1/Mean_grad/truediv" input: "activation_48_1/Relu" device: "/job:localhost/replica:0/task:0/device:GPU:0" attr { key: "T" value {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 30 02:52:54 UTC 2019 - 1.1M bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
} TEST_F(NNGradTest, EluGrad) { TensorShape shape({5, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Elu(scope_, x); Tensor x_init_value = test::AsTensor<float>( {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f}, {5, 2}); RunTest(x, x_init_value, y, shape); } TEST_F(NNGradTest, SeluGrad) { TensorShape shape({5, 2});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
Status ReluGradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0)); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("Relu", ReluGradHelper); Status Relu6GradHelper(const Scope& scope, const Operation& op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0)