- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for Hevery (0.19 sec)
-
tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem.cc
static_cast<off_t>(offset))}; if (r > 0) { dst += r; offset += static_cast<uint64_t>(r); n -= r; // safe as 0 < r <= n so n will never underflow read += r; } else if (r == 0) { TF_SetStatus(status, TF_OUT_OF_RANGE, "Read fewer bytes than requested"); break; } else if (errno == EINTR || errno == EAGAIN) { // Retry
C++ - Registered: Tue Apr 23 12:39:09 GMT 2024 - Last Modified: Sun Mar 24 20:08:23 GMT 2024 - 15.8K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/math_grad_test.cc
immediate_execution_ctx_.get(), {x.get()}, UseFunction())); } TEST_P(CppGradients, TestMatMulGrad) { // TODO(vnvo2409): Figure out why `gradient_checker` does not work very // well with `MatMul` and remove `TestMatMul*` in // `mnist_gradients_test` when done. GTEST_SKIP(); float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}; int64_t A_dims[] = {3, 3};
C++ - Registered: Tue Mar 26 12:39:09 GMT 2024 - Last Modified: Thu Apr 13 17:32:14 GMT 2023 - 16.3K bytes - Viewed (0) -
tensorflow/c/eager/gradients.cc
// TODO(srbs): It seems like this is used only for performance optimization // and not for correctness. The only downside of keeping this 1 seems to be // that the gradient accumulation is unbounded and we will never // aggressively aggregate accumulated gradients to recover memory. // Revisit and fix. return 1; } // Consumes references to the tensors in the gradient_tensors list and returns
C++ - Registered: Tue Apr 30 12:39:09 GMT 2024 - Last Modified: Thu Feb 15 09:49:45 GMT 2024 - 19.3K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out)); EXPECT_EQ(calls, 1); // Now advance the clock one second at a time and redo the read. The call // count should advance every 3 seconds (i.e. every time the staleness is // greater than 2). for (int i = 1; i <= 10; i++) { env->SetNowSeconds(i + 1); TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out)); EXPECT_EQ(calls, 1 + i / 3); }
C++ - Registered: Tue Apr 23 12:39:09 GMT 2024 - Last Modified: Fri Oct 15 03:16:57 GMT 2021 - 23.2K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc
break; case FetchState::FINISHED: return TF_SetStatus(status, TF_OK, ""); } } return TF_SetStatus( status, TF_INTERNAL, "Control flow should never reach the end of RamFileBlockCache::Fetch."); } int64_t RamFileBlockCache::Read(const std::string& filename, size_t offset, size_t n, char* buffer, TF_Status* status) { if (n == 0) {
C++ - Registered: Tue Apr 23 12:39:09 GMT 2024 - Last Modified: Thu Jul 16 01:39:09 GMT 2020 - 11.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.cc
: status_(TF_NewStatus()), // If the context's default exector is set to async, re-using that in // each thread would cause collectives to deadlock. For consistency we // create a new sync executor for every thread. // // TODO(allenl): We should have an async API that works with the // parallel device. device_(device), executor_(
C++ - Registered: Tue Apr 30 12:39:09 GMT 2024 - Last Modified: Fri Feb 09 07:47:20 GMT 2024 - 25.4K bytes - Viewed (1) -
tensorflow/c/eager/parallel_device/parallel_device.cc
const TFE_OpAttrs* attributes, int expected_max_outputs, TF_Status* status) { absl::optional<std::vector<MaybeParallelTensorOwned>> result; // TODO(allenl): We should remove "TPU" from these op names at the very least, // or consider other ways of packing/unpacking parallel tensors. if (operation_name == std::string("TPUReplicatedInput")) { // Special-cased operation for packing per-device tensors into one parallel // tensor.
C++ - Registered: Tue Apr 30 12:39:09 GMT 2024 - Last Modified: Wed Mar 29 22:05:31 GMT 2023 - 18.3K bytes - Viewed (0)