Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for Sync (0.16 sec)

  1. tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem.cc

      if (fflush(posix_file->handle) != 0)
        TF_SetStatusFromIOError(status, errno, posix_file->filename);
    }
    
    static void Sync(const TF_WritableFile* file, TF_Status* status) {
      // For historical reasons, this does the same as `Flush` at the moment.
      // TODO(b/144055243): This should use `fsync`/`sync`.
      Flush(file, status);
    }
    
    static void Close(const TF_WritableFile* file, TF_Status* status) {
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Sun Mar 24 20:08:23 GMT 2024
    - 15.8K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api_experimental.h

    // Causes the calling thread to block till all ops dispatched in this executor
    // have been executed. Note that "execution" here refers to kernel execution /
    // scheduling of copies, etc. Similar to sync execution, it doesn't guarantee
    // that lower level device queues (like GPU streams) have been flushed.
    //
    // This call may not block for execution of ops enqueued concurrently with this
    // call.
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 39.5K bytes
    - Viewed (0)
  3. tensorflow/c/eager/c_api.cc

            handle->TypeString(), " tensor handle.");
        return nullptr;
      }
      tensorflow::Device* device(handle->device());
      if (device != nullptr) {
        status->status = device->Sync();
        if (!status->status.ok()) {
          return nullptr;
        }
      }
      const tensorflow::Tensor* tensor;
      status->status = handle->Tensor(&tensor);
      if (!status->status.ok()) {
        return nullptr;
      }
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
  4. RELEASE.md

    *   `tf.distribute`:
        *   Support added for global sync `BatchNormalization` by using the newly
            added `tf.keras.layers.experimental.SyncBatchNormalization` layer. This
            layer will sync `BatchNormalization` statistics every step across all
            replicas taking part in sync training.
        *   Performance improvements for GPU multi-worker distributed training using
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Apr 29 19:17:57 GMT 2024
    - 727.7K bytes
    - Viewed (8)
Back to top