Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for Sait (0.16 sec)

  1. tensorflow/c/eager/c_api_experimental_test.cc

      //
      // 1. Create a variable on `remote_device`, using `ctx_0`.
      TFE_TensorHandle* handle_0 =
          CreateVariable(ctx_0, 1.2, remote_device, /*variable_name=*/"var");
    
      // 2. Wait for `var` to be created and initialized on the worker.
      TF_Status* status = TF_NewStatus();
      TFE_ContextAsyncWait(ctx_0, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TF_DeleteStatus(status);
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 03 03:14:26 GMT 2023
    - 31.5K bytes
    - Viewed (1)
  2. tensorflow/c/eager/c_api_test.cc

      // 1. Create a variable on `remote_device`, using `ctx_0`.
      TFE_TensorHandle* handle_0 =
          CreateVariable(ctx_0, 1.2, remote_device, /*variable_name=*/"var2");
    
      // 2. Wait for `var2` to be created and initialized on the worker.
      TF_Status* status = TF_NewStatus();
      TFE_ContextAsyncWait(ctx_0, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TF_DeleteStatus(status);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 03 20:50:20 GMT 2023
    - 94.6K bytes
    - Viewed (1)
  3. RELEASE.md

          (https://www.tensorflow.org/api_docs/python/tf/data/experimental/service)
          to write distributed dataset snapshots. The call is non-blocking and
          returns without waiting for the snapshot to finish. Setting `wait=True` to
          `tf.data.Dataset.load` allows the snapshots to be read while they are
          being written.
    
    ### Bug Fixes and Other Changes
    
    * <SIMILAR TO ABOVE SECTION, BUT FOR OTHER IMPORTANT CHANGES / BUG FIXES>
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Apr 29 19:17:57 GMT 2024
    - 727.7K bytes
    - Viewed (8)
  4. ci/devinfra/docker_windows/Dockerfile

            "--quiet", "--wait", "--nocache", \
            "--add", "Microsoft.VisualStudio.Workload.VCTools", \
            "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", \
            "--add", "Microsoft.VisualStudio.Component.Windows10SDK.19041" -Wait; \
        Start-Process -FilePath C:/TEMP/vs_buildtools.exe -ArgumentList "--installPath", "C:/VS", \
            "--quiet", "--wait", "--nocache", "--includeOptional", \
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Fri Aug 18 17:24:20 GMT 2023
    - 13.6K bytes
    - Viewed (0)
  5. configure.py

      proc = subprocess.Popen(
          [environ_cp['PYTHON_BIN_PATH'], paths[0]] + cuda_libraries,
          stdout=subprocess.PIPE,
          env=maybe_encode_env(environ_cp))
    
      if proc.wait():
        # Errors from find_cuda_config.py were sent to stderr.
        print('Asking for detailed CUDA configuration...\n')
        return False
    
      config = dict(
    Python
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 18:25:36 GMT 2024
    - 53.8K bytes
    - Viewed (1)
  6. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      StatusPtr first_bad_status(nullptr);
    
      for (const auto& dt : device_threads_) {
        StatusPtr async_wait_status(TF_NewStatus());
        dt->AsyncWait(async_wait_status.get());
        // Prefer non cancelled errors to uncover real failures.
        if (TF_GetCode(async_wait_status.get()) != TF_OK &&
            (first_bad_status == nullptr ||
             TF_GetCode(first_bad_status.get()) == TF_CANCELLED)) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top