- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 34 for _backprop (0.13 sec)
-
tensorflow/cc/framework/while_gradients.cc
return result; } // The backprop loop counter and main backprop loop run in their own execution // frame (conceptually, the main forward loop and forward loop counter run // together in a frame, then the backprop loop counter and backprop loop run // together in a different frame). This returns the frame name to use for the // backprop while loops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
DCHECK(while_ctx != nullptr); // Record 'summed_grads' as the backprop input associated with 'exit_node' std::map<Node*, Output>& backprops = while_backprops_[while_ctx]; DCHECK(backprops.find(exit_node) == backprops.end()); backprops[exit_node] = summed_grads; // Wait until we have all exit nodes' backprops collected before processing // the while loop.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.cc
*loss = temp_outputs[0]; *backprop = temp_outputs[1]; return status; } // Op: ReluGrad() // Summary: Computes rectified linear gradients for a Relu operation. // // Description: Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features, AbstractTensorHandle** backprops, const char* name,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 5.9K bytes - Viewed (0) -
tensorflow/c/experimental/ops/nn_ops.h
AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); // Computes rectified linear gradients for a Relu operation. Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features, AbstractTensorHandle** backprops, const char* name = nullptr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 10 19:11:36 UTC 2022 - 2.6K bytes - Viewed (0) -
pkg/ctrlz/assets/static/js/bootstrap-4.0.0.min.js
tScrollbar(),t(e._element).trigger(h.HIDDEN)})},p._removeBackdrop=function(){this._backdrop&&(t(this._backdrop).remove(),this._backdrop=null)},p._showBackdrop=function(e){var n=this,i=t(this._element).hasClass(d)?d:"";if(this._isShown&&this._config.backdrop){var s=P.supportsTransitionEnd()&&i;if(this._backdrop=document.createElement("div"),this._backdrop.className=u,i&&t(this._backdrop).addClass(i),t(this._backdrop).appendTo(document.body),t(this._element).on(h.CLICK_DISMISS,function(t){n._ignor...
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue May 23 17:08:31 UTC 2023 - 47.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/test_utils.py
# compute with op. with backprop.GradientTape() as gt: for var_ in vars_: gt.watch(var_) y = compute_op(**op_kwargs) # uses op and decomposites by the graph pass. grads = gt.gradient(y, vars_) # uses registered gradient function. # compute with composition with backprop.GradientTape() as gt: for var_ in vars_: gt.watch(var_)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 02 18:32:17 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
// Build new BackPropFilterOp. auto loc = backprop.getLoc(); auto new_backprop = builder.create<TF::Conv2DBackpropFilterOp>( loc, new_result_type, input, new_filter_sizes, backprop.getOutBackprop(), strides, backprop.getUseCudnnOnGpu(), backprop.getPadding(), backprop.getExplicitPaddings(), backprop.getDataFormat(), backprop.getDilations());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 29.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_60.mlir
%arg2: tensor<64xf32> ) -> tensor<1x28x28x64xf32> { // CHECK: "tf.FusedBatchNormGradV3" // CHECK-SAME: (%[[X_TRANSPOSE:[0-9]*]], %[[Y_TRANSPOSE:[0-9]*]], // CHECK-SAME: data_format = "NCHW" %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC", epsilon = 1.001 : f32,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_gpu_cc_70.mlir
%arg2: tensor<64xf32> ) -> tensor<1x28x28x64xf32> { // CHECK: "tf.FusedBatchNormGradV3" // CHECK-SAME: (%[[X_TRANSPOSE:[0-9]*]], %[[Y_TRANSPOSE:[0-9]*]], // CHECK-SAME: data_format = "NCHW" %x_backprop, %scale_backprop, %offset_backprop, %reserve_1, %reserve_2 = "tf.FusedBatchNormGradV3"(%arg0, %arg1, %arg2, %arg2, %arg2, %arg2) { data_format = "NHWC", epsilon = 1.001 : f32,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 8.5K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
// We multiply the backprop for cost with the gradients - op.output[1]. // There is no gradient for labels. // The outputs of the network are at input index 0. auto logits = op.input(0); // The "truth" labels are at index 1. auto softmax_grad = op.output(1); // The loss is the output at index 0, and backprop is the output at index 1. auto grad_loss = grad_inputs[0];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0)