- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 18 for relu_grad (0.25 sec)
-
tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py
@tf.RegisterGradient('NewConv2D') def _conv_add_relu_grad(op: ops.Operation, grad): act = op.get_attr('act') y = op.outputs[0] if act == 'RELU': grad = gen_nn_ops.relu_grad(grad, y) elif act == 'RELU6': grad = gen_nn_ops.relu6_grad(grad, y) elif act == 'TANH': y = math_ops.conj(y) grad = gen_math_ops.tanh_grad(y, grad) broadcast_shape = tf.shape(y) input_value_shape = tf.shape(op.inputs[2])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 20:23:51 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad.cc
AbstractTensorHandle* upstream_grad = grad_outputs[0]; AbstractTensorHandle* activations = forward_outputs_[0]; // Calculate Grad std::string name = "relu_grad"; TF_RETURN_IF_ERROR(ReluGrad(ctx, upstream_grad, activations, &grad_inputs[0], name.c_str())); return absl::OkStatus(); } ~ReluGradientFunction() override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:38:45 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.pbtxt
} } attr { key: "keep_dims" value { b: false } } } node { name: "training/SGD/gradients/activation_48_1/Relu_grad/ReluGrad" op: "ReluGrad" input: "training/SGD/gradients/avg_pool_1/Mean_grad/truediv" input: "activation_48_1/Relu" device: "/job:localhost/replica:0/task:0/device:GPU:0" attr { key: "T"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 30 02:52:54 UTC 2019 - 1.1M bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main.pbtxt
attr { key: "dtype" value { type: DT_FLOAT } } } node { name: "training/LossScaleOptimizer/gradients/activation_48_1/Relu_grad/ReluGrad" op: "ReluGrad" input: "training/LossScaleOptimizer/gradients/avg_pool_1/Mean_grad/truediv" input: "activation_48_1/Relu" device: "/job:localhost/replica:0/task:0/device:GPU:0" attr { key: "T"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 30 02:52:54 UTC 2019 - 1.3M bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
} TEST_F(NNGradTest, EluGrad) { TensorShape shape({5, 2}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)); auto y = Elu(scope_, x); Tensor x_init_value = test::AsTensor<float>( {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f}, {5, 2}); RunTest(x, x_init_value, y, shape); } TEST_F(NNGradTest, SeluGrad) { TensorShape shape({5, 2});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
std::vector<Output>* grad_outputs) { auto dx = internal::Relu6Grad(scope, grad_inputs[0], op.input(0)); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper); Status LeakyReluGradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.cc
Status status = op_ptr->Execute(temp_outputs, &num_retvals); *loss = temp_outputs[0]; *backprop = temp_outputs[1]; return status; } // Op: ReluGrad() // Summary: Computes rectified linear gradients for a Relu operation. // // Description: Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 5.9K bytes - Viewed (0) -
tensorflow/c/experimental/ops/update_cpp_ops.sh
AddV2 \ MatMul \ Neg \ Sum \ Sub \ Div \ DivNoNan \ Exp \ Sqrt \ SqrtGrad \ Log1p ${generate} \ --category=nn \ SparseSoftmaxCrossEntropyWithLogits \ ReluGrad \ Relu \ BiasAdd \ BiasAddGrad ${generate} \ --category=resource_variable \ VarHandleOp \ ReadVariableOp \ AssignVariableOp \ DestroyResourceOp ${generate} \
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 17 17:54:34 UTC 2022 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.golden_summary
Conv2DBackpropInput 52 DivNoNan 1 Equal 1 FusedBatchNorm 53 FusedBatchNormGrad 53 Identity 2 MatMul 3 MaxPool 1 MaxPoolGrad 1 Mean 1 Mul 164 Pad 1 ReadVariableOp 646 Relu 49 ReluGrad 49 Reshape 2 ResourceApplyKerasMomentum 161 ShapeN 50 Softmax 1 SparseSoftmaxCrossEntropyWithLogits 1 Square 55 Squeeze 1 Sub 106 Sum 57 Tile 1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 06 10:38:14 UTC 2023 - 740 bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir
%3 = "chlo.broadcast_maximum"(%2, %0) {broadcast_dimensions = array<i64>} : (tensor<?xi32>, tensor<i32>) -> tensor<?xi32> func.return %3 : tensor<?xi32> } // CHECK-LABEL: func @relu_grad( // CHECK-SAME: %[[VAL_0:.*]]: tensor<4x8xf32>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<4x8xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 340.2K bytes - Viewed (0)