Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for _backprop (0.48 sec)

  1. tensorflow/cc/framework/gradients.cc

      DCHECK(while_ctx != nullptr);
    
      // Record 'summed_grads' as the backprop input associated with 'exit_node'
      std::map<Node*, Output>& backprops = while_backprops_[while_ctx];
      DCHECK(backprops.find(exit_node) == backprops.end());
      backprops[exit_node] = summed_grads;
    
      // Wait until we have all exit nodes' backprops collected before processing
      // the while loop.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 22K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

      // Build new BackPropFilterOp.
      auto loc = backprop.getLoc();
      auto new_backprop = builder.create<TF::Conv2DBackpropFilterOp>(
          loc, new_result_type, input, new_filter_sizes, backprop.getOutBackprop(),
          strides, backprop.getUseCudnnOnGpu(), backprop.getPadding(),
          backprop.getExplicitPaddings(), backprop.getDataFormat(),
          backprop.getDilations());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    // computes loss and backprop of the loss with respect to 'features'.
    //
    // Softmax cross entropy loss is defined as follows:
    //
    //  loss = Sum(-labels * Log(Exp(features) / Sum(Exp(features)))
    //  loss = Sum(-labels * LogSoftmax(features))
    //
    // Computing gradient of the loss with respect to features gives us,
    //
    //  backprop = (Exp(features) / Sum(Exp(features))) - labels
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  4. tensorflow/c/eager/tape.h

    // any gradients to be computed).
    //
    // Finally, we start a backprop stack with a set of tape entries for which we
    // have all gradients available. This set usually is a subset of the set of
    // targets (not all since targets which have outputs in the tape will not have
    // gradients available initially).
    //
    // Then we repeatedly pop an entry from the stack, run its backprop, and update
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 12:40:29 UTC 2024
    - 47.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

        // CHECK: %[[offset_backprop:.*]] = mhlo.convert %[[red2]] : tensor<8xf32>
    
        // CHECK: %[[x_backprop:.*]] = mhlo.convert %[[mul3]] : tensor<8x8x8x8xf32>
        // CHECK: return %[[x_backprop]] : tensor<8x8x8x8xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  6. tensorflow/c/while_loop_test.cc

          Add(params_->body_inputs[0], {one, 0}, params_->body_graph, s_);
      ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
      params_->body_outputs[0] = {add, 0};
    
      ExpectOK();
    
      // Create backprop graph
      TF_Output grad_output;
      TF_AddGradients(graph_, outputs_.data(), outputs_.size(), inputs_.data(), 1,
                      nullptr, s_, &grad_output);
      ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 06:05:56 UTC 2024
    - 15.3K bytes
    - Viewed (0)
Back to top