Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for Hevery (0.37 sec)

  1. tensorflow/c/c_api.cc

      } else {
        return ToOperation(iter->second);
      }
    }
    
    TF_Operation* TF_GraphNextOperation(TF_Graph* graph, size_t* pos) {
      if (*pos == 0) {
        // Advance past the first sentinel nodes in every graph (the source & sink).
        *pos += 2;
      } else {
        // Advance to the next node.
        *pos += 1;
      }
    
      mutex_lock l(graph->mu);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem.cc

                                  static_cast<off_t>(offset))};
        if (r > 0) {
          dst += r;
          offset += static_cast<uint64_t>(r);
          n -= r;  // safe as 0 < r <= n so n will never underflow
          read += r;
        } else if (r == 0) {
          TF_SetStatus(status, TF_OUT_OF_RANGE, "Read fewer bytes than requested");
          break;
        } else if (errno == EINTR || errno == EAGAIN) {
          // Retry
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Sun Mar 24 20:08:23 GMT 2024
    - 15.8K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/gradients/math_grad_test.cc

          immediate_execution_ctx_.get(), {x.get()}, UseFunction()));
    }
    
    TEST_P(CppGradients, TestMatMulGrad) {
      // TODO(vnvo2409): Figure out why `gradient_checker` does not work very
      // well with `MatMul` and remove `TestMatMul*` in
      // `mnist_gradients_test` when done.
      GTEST_SKIP();
    
      float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
      int64_t A_dims[] = {3, 3};
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Thu Apr 13 17:32:14 GMT 2023
    - 16.3K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/filesystem/modular_filesystem_test.cc

    //
    // Each filesystem implementation can be provided by DSOs, so we provide the
    // `--dsos` flag to specify a list of shared objects to be loaded in order.
    // If the flag is not used, no shared objects are loaded.
    //
    // Every filesystem provides support for accessing URIs of form
    // `[<scheme>://]<path>` where `<scheme>` is optional (if missing, we are
    // accessing local paths). This test suite tests exactly one scheme for each
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri May 27 20:25:58 GMT 2022
    - 71K bytes
    - Viewed (0)
  5. tensorflow/c/eager/gradients.cc

      // TODO(srbs): It seems like this is used only for performance optimization
      // and not for correctness. The only downside of keeping this 1 seems to be
      // that the gradient accumulation is unbounded and we will never
      // aggressively aggregate accumulated gradients to recover memory.
      // Revisit and fix.
      return 1;
    }
    
    // Consumes references to the tensors in the gradient_tensors list and returns
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

          : status_(TF_NewStatus()),
            // If the context's default exector is set to async, re-using that in
            // each thread would cause collectives to deadlock. For consistency we
            // create a new sync executor for every thread.
            //
            // TODO(allenl): We should have an async API that works with the
            // parallel device.
            device_(device),
            executor_(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  7. tensorflow/c/eager/parallel_device/parallel_device.cc

        const TFE_OpAttrs* attributes, int expected_max_outputs,
        TF_Status* status) {
      absl::optional<std::vector<MaybeParallelTensorOwned>> result;
      // TODO(allenl): We should remove "TPU" from these op names at the very least,
      // or consider other ways of packing/unpacking parallel tensors.
      if (operation_name == std::string("TPUReplicatedInput")) {
        // Special-cased operation for packing per-device tensors into one parallel
        // tensor.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 29 22:05:31 GMT 2023
    - 18.3K bytes
    - Viewed (0)
Back to top