Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for _backprop (0.16 sec)

  1. tensorflow/cc/framework/gradients.cc

      DCHECK(while_ctx != nullptr);
    
      // Record 'summed_grads' as the backprop input associated with 'exit_node'
      std::map<Node*, Output>& backprops = while_backprops_[while_ctx];
      DCHECK(backprops.find(exit_node) == backprops.end());
      backprops[exit_node] = summed_grads;
    
      // Wait until we have all exit nodes' backprops collected before processing
      // the while loop.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 22K bytes
    - Viewed (0)
  2. pkg/ctrlz/assets/static/js/bootstrap-4.0.0.min.js

    tScrollbar(),t(e._element).trigger(h.HIDDEN)})},p._removeBackdrop=function(){this._backdrop&&(t(this._backdrop).remove(),this._backdrop=null)},p._showBackdrop=function(e){var n=this,i=t(this._element).hasClass(d)?d:"";if(this._isShown&&this._config.backdrop){var s=P.supportsTransitionEnd()&&i;if(this._backdrop=document.createElement("div"),this._backdrop.className=u,i&&t(this._backdrop).addClass(i),t(this._backdrop).appendTo(document.body),t(this._element).on(h.CLICK_DISMISS,function(t){n._ignor...
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Tue May 23 17:08:31 UTC 2023
    - 47.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

      // Build new BackPropFilterOp.
      auto loc = backprop.getLoc();
      auto new_backprop = builder.create<TF::Conv2DBackpropFilterOp>(
          loc, new_result_type, input, new_filter_sizes, backprop.getOutBackprop(),
          strides, backprop.getUseCudnnOnGpu(), backprop.getPadding(),
          backprop.getExplicitPaddings(), backprop.getDataFormat(),
          backprop.getDilations());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/nn_grad.cc

      // We multiply the backprop for cost with the gradients - op.output[1].
      // There is no gradient for labels.
    
      // The outputs of the network are at input index 0.
      auto logits = op.input(0);
      // The "truth" labels are at index 1.
      auto softmax_grad = op.output(1);
    
      // The loss is the output at index 0, and backprop is the output at index 1.
      auto grad_loss = grad_inputs[0];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    // computes loss and backprop of the loss with respect to 'features'.
    //
    // Softmax cross entropy loss is defined as follows:
    //
    //  loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features)))
    //  loss = Sum(-labels * LogSoftmax(features))
    //
    // Computing gradient of the loss with respect to features gives us,
    //
    //  backprop = (Exp(features) / Sum(Exp(features))) - labels
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  6. tensorflow/c/eager/tape.h

    // any gradients to be computed).
    //
    // Finally, we start a backprop stack with a set of tape entries for which we
    // have all gradients available. This set usually is a subset of the set of
    // targets (not all since targets which have outputs in the tape will not have
    // gradients available initially).
    //
    // Then we repeatedly pop an entry from the stack, run its backprop, and update
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 12:40:29 UTC 2024
    - 47.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

        // CHECK: %[[offset_backprop:.*]] = mhlo.convert %[[red2]] : tensor<8xf32>
    
        // CHECK: %[[x_backprop:.*]] = mhlo.convert %[[mul3]] : tensor<8x8x8x8xf32>
        // CHECK: return %[[x_backprop]] : tensor<8x8x8x8xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  8. tensorflow/cc/gradients/nn_grad_test.cc

      auto y =
          tensorflow::ops::SoftmaxCrossEntropyWithLogits(scope_, logits, labels);
      // Note the reversal of the backprop and loss orders. Issue #18734 has been
      // opened for this.
      RunTest({logits, labels}, {logits_shape, logits_shape}, {y.backprop, y.loss},
              {logits_shape, loss_shape});
    }
    
    TEST_F(NNGradTest, LogSoftmaxGrad) {
      TensorShape shape({5, 3});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir

        // CHECK: %[[BACKPROP:.*]] = "tf.Conv2DBackpropFilter"
        // CHECK-SAME: strides = [1, 1, 1, 1]
        // CHECK-SAME: (tensor<2x115x115x12xf32>, tensor<4xi32>, tensor<2x112x112x64xf32>) -> tensor<4x4x12x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 37.4K bytes
    - Viewed (0)
  10. tensorflow/cc/framework/gradients_test.cc

                                            {dx, dy, dz}, &grad_outputs));
        }
      }
      CompareTestAndExpectedGraphs();
    }
    
    TEST_F(GradientsTest, StackUnstack_StopBackprop) {
      // Tests that backprop stops before calculating gradients for Stack (because
      // only gradients w.r.t the output of Stack are requested).
      for (const bool expected : {false, true}) {
        const Scope& scope = expected ? scope_expected_ : scope_test_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 15 15:13:38 UTC 2023
    - 25K bytes
    - Viewed (0)
Back to top