- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 34 for _backprop (0.38 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// CHECK: %[[RES_PERM:.*]] = "tf.Const"() // CHECK-SAME: <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}> // CHECK: %[[RES_TPOSE:[0-9]*]] = "tf.Transpose" // CHECK-SAME: (%x_backprop, %[[RES_PERM]]) // CHECK: return %[[RES_TPOSE]] %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
absl::Span<AbstractTensorHandle*> outputs) { AbstractTensorHandle* loss; AbstractTensorHandle* backprop; TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits( ctx, inputs[0], inputs[1], &loss, &backprop, "SparseSoftmaxCrossEntropyWithLogits")); // `gradient_checker` only works with model that returns only 1 tensor.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.h.golden
// Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 2.9K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.cc.golden
// Summary: // // Description: Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name, const char* raw_device_name) { AbstractOperationPtr op_ptr(ctx->CreateOperation()); TF_RETURN_IF_ERROR(op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
// computes loss and backprop of the loss with respect to 'features'. // // Softmax cross entropy loss is defined as follows: // // loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features))) // loss = Sum(-labels * LogSoftmax(features)) // // Computing gradient of the loss with respect to features gives us, // // backprop = (Exp(features) / Sum(Exp(features))) - labels
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/c/experimental/ops/array_ops.cc
// backprop such that dx = g(dy). In Python, // // ```python // with tf.get_default_graph().gradient_override_map( // {'IdentityN': 'OverrideGradientWithG'}): // y, _ = identity_n([f(x), x]) // // @tf.RegisterGradient('OverrideGradientWithG') // def ApplyG(op, dy, _): // return [None, g(dy)] # Do not backprop to f(x). // ``` Status IdentityN(AbstractContext* ctx,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 6.7K bytes - Viewed (0) -
tensorflow/c/eager/tape.h
// any gradients to be computed). // // Finally, we start a backprop stack with a set of tape entries for which we // have all gradients available. This set usually is a subset of the set of // targets (not all since targets which have outputs in the tape will not have // gradients available initially). // // Then we repeatedly pop an entry from the stack, run its backprop, and update
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 02 12:40:29 UTC 2024 - 47.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir
// CHECK: %[[offset_backprop:.*]] = mhlo.convert %[[red2]] : tensor<8xf32> // CHECK: %[[x_backprop:.*]] = mhlo.convert %[[mul3]] : tensor<8x8x8x8xf32> // CHECK: return %[[x_backprop]] : tensor<8x8x8x8xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 38.6K bytes - Viewed (0) -
samples/bookinfo/src/productpage/static/tailwind/tailwind.css
s":" ","--tw-backdrop-contrast":" ","--tw-backdrop-grayscale":" ","--tw-backdrop-hue-rotate":" ","--tw-backdrop-invert":" ","--tw-backdrop-opacity":" ","--tw-backdrop-saturate":" ","--tw-backdrop-sepia":" "}),e({".backdrop-filter":{"@defaults backdrop-filter":{},"backdrop-filter":Fe},".backdrop-filter-none":{"backdrop-filter":"none"}})},transitionProperty:({matchUtilities:i,theme:e})=>{let t=e("transitionTimingFunction.DEFAULT"),r=e("transitionDuration.DEFAULT");i({transition:n=>({"transition-pr...
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue May 28 14:48:01 UTC 2024 - 357.1K bytes - Viewed (1) -
tensorflow/cc/gradients/nn_grad_test.cc
auto y = tensorflow::ops::SoftmaxCrossEntropyWithLogits(scope_, logits, labels); // Note the reversal of the backprop and loss orders. Issue #18734 has been // opened for this. RunTest({logits, labels}, {logits_shape, logits_shape}, {y.backprop, y.loss}, {logits_shape, loss_shape}); } TEST_F(NNGradTest, LogSoftmaxGrad) { TensorShape shape({5, 3});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0)