- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 45 for _backprop (0.12 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_70.mlir
%arg2: tensor<64xf32> ) -> tensor<1x28x28x64xf32> { // CHECK: "tf.FusedBatchNormGradV3" // CHECK-SAME: (%[[X_TRANSPOSE:[0-9]*]], %[[Y_TRANSPOSE:[0-9]*]], // CHECK-SAME: data_format = "NCHW" %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC", epsilon = 1.001 : f32,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 8.5K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
// We multiply the backprop for cost with the gradients - op.output[1]. // There is no gradient for labels. // The outputs of the network are at input index 0. auto logits = op.input(0); // The "truth" labels are at index 1. auto softmax_grad = op.output(1); // The loss is the output at index 0, and backprop is the output at index 1. auto grad_loss = grad_inputs[0];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// CHECK: %[[RES_PERM:.*]] = "tf.Const"() // CHECK-SAME: <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}> // CHECK: %[[RES_TPOSE:[0-9]*]] = "tf.Transpose" // CHECK-SAME: (%x_backprop, %[[RES_PERM]]) // CHECK: return %[[RES_TPOSE]] %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
absl::Span<AbstractTensorHandle*> outputs) { AbstractTensorHandle* loss; AbstractTensorHandle* backprop; TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits( ctx, inputs[0], inputs[1], &loss, &backprop, "SparseSoftmaxCrossEntropyWithLogits")); // `gradient_checker` only works with model that returns only 1 tensor.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.h.golden
// Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 2.9K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.cc.golden
// Summary: // // Description: Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name, const char* raw_device_name) { AbstractOperationPtr op_ptr(ctx->CreateOperation()); TF_RETURN_IF_ERROR(op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
// computes loss and backprop of the loss with respect to 'features'. // // Softmax cross entropy loss is defined as follows: // // loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features))) // loss = Sum(-labels * LogSoftmax(features)) // // Computing gradient of the loss with respect to features gives us, // // backprop = (Exp(features) / Sum(Exp(features))) - labels
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/c/experimental/ops/array_ops.cc
// backprop such that dx = g(dy). In Python, // // ```python // with tf.get_default_graph().gradient_override_map( // {'IdentityN': 'OverrideGradientWithG'}): // y, _ = identity_n([f(x), x]) // // @tf.RegisterGradient('OverrideGradientWithG') // def ApplyG(op, dy, _): // return [None, g(dy)] # Do not backprop to f(x). // ``` Status IdentityN(AbstractContext* ctx,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 6.7K bytes - Viewed (0) -
src/main/webapp/js/admin/bootstrap.min.js.map
vent.HIDDEN)\n })\n }\n\n _removeBackdrop() {\n if (this._backdrop) {\n $(this._backdrop).remove()\n this._backdrop = null\n }\n }\n\n _showBackdrop(callback) {\n const animate = $(this._element).hasClass(ClassName.FADE)\n ? ClassName.FADE : ''\n\n if (this._isShown && this._config.backdrop) {\n this._backdrop = document.createElement('div')\n this._backdrop.className = ClassName.BACKDROP\n\n if (animate) {\n this._backdrop.classList.add(animate)\n...
Registered: Wed Jun 12 13:08:18 UTC 2024 - Last Modified: Fri Feb 07 10:28:50 UTC 2020 - 185.8K bytes - Viewed (0) -
src/main/webapp/js/bootstrap.min.js.map
vent.HIDDEN)\n })\n }\n\n _removeBackdrop() {\n if (this._backdrop) {\n $(this._backdrop).remove()\n this._backdrop = null\n }\n }\n\n _showBackdrop(callback) {\n const animate = $(this._element).hasClass(ClassName.FADE)\n ? ClassName.FADE : ''\n\n if (this._isShown && this._config.backdrop) {\n this._backdrop = document.createElement('div')\n this._backdrop.className = ClassName.BACKDROP\n\n if (animate) {\n this._backdrop.classList.add(animate)\n...
Registered: Wed Jun 12 13:08:18 UTC 2024 - Last Modified: Sat Jan 11 06:54:28 UTC 2020 - 189.9K bytes - Viewed (0)