- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for _backprop (0.2 sec)
-
tensorflow/cc/framework/while_gradients.cc
return result; } // The backprop loop counter and main backprop loop run in their own execution // frame (conceptually, the main forward loop and forward loop counter run // together in a frame, then the backprop loop counter and backprop loop run // together in a different frame). This returns the frame name to use for the // backprop while loops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
DCHECK(while_ctx != nullptr); // Record 'summed_grads' as the backprop input associated with 'exit_node' std::map<Node*, Output>& backprops = while_backprops_[while_ctx]; DCHECK(backprops.find(exit_node) == backprops.end()); backprops[exit_node] = summed_grads; // Wait until we have all exit nodes' backprops collected before processing // the while loop.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
// Build new BackPropFilterOp. auto loc = backprop.getLoc(); auto new_backprop = builder.create<TF::Conv2DBackpropFilterOp>( loc, new_result_type, input, new_filter_sizes, backprop.getOutBackprop(), strides, backprop.getUseCudnnOnGpu(), backprop.getPadding(), backprop.getExplicitPaddings(), backprop.getDataFormat(), backprop.getDilations());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 29.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir
// CHECK: %[[RES_PERM:.*]] = "tf.Const"() // CHECK-SAME: <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}> // CHECK: %[[RES_TPOSE:[0-9]*]] = "tf.Transpose" // CHECK-SAME: (%x_backprop, %[[RES_PERM]]) // CHECK: return %[[RES_TPOSE]] %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/nn_grad_test.cc
absl::Span<AbstractTensorHandle*> outputs) { AbstractTensorHandle* loss; AbstractTensorHandle* backprop; TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits( ctx, inputs[0], inputs[1], &loss, &backprop, "SparseSoftmaxCrossEntropyWithLogits")); // `gradient_checker` only works with model that returns only 1 tensor.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.h.golden
// Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 2.9K bytes - Viewed (0) -
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.cc.golden
// Summary: // // Description: Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name, const char* raw_device_name) { AbstractOperationPtr op_ptr(ctx->CreateOperation()); TF_RETURN_IF_ERROR(op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td
// computes loss and backprop of the loss with respect to 'features'. // // Softmax cross entropy loss is defined as follows: // // loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features))) // loss = Sum(-labels * LogSoftmax(features)) // // Computing gradient of the loss with respect to features gives us, // // backprop = (Exp(features) / Sum(Exp(features))) - labels
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 04 13:30:42 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/c/eager/tape.h
// any gradients to be computed). // // Finally, we start a backprop stack with a set of tape entries for which we // have all gradients available. This set usually is a subset of the set of // targets (not all since targets which have outputs in the tape will not have // gradients available initially). // // Then we repeatedly pop an entry from the stack, run its backprop, and update
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 02 12:40:29 UTC 2024 - 47.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir
// CHECK: %[[offset_backprop:.*]] = mhlo.convert %[[red2]] : tensor<8xf32> // CHECK: %[[x_backprop:.*]] = mhlo.convert %[[mul3]] : tensor<8x8x8x8xf32> // CHECK: return %[[x_backprop]] : tensor<8x8x8x8xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 38.6K bytes - Viewed (0)