Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 74 for dies (0.13 sec)

  1. tensorflow/c/experimental/next_pluggable_device/c_api.h

    // invokes create_func to create the resource. `delete_func` is needed for
    // ResourceMgr to clean up the resource. `status` will be set. If `status` is
    // not OK, `*result_plugin_resource` will be set as nullptr.
    //
    // Caller does not take ownership of the `plugin_resource`.
    TF_CAPI_EXPORT extern void TF_LookupOrCreatePluginResource(
        TF_OpKernelContext* ctx, const char* container_name,
        const char* plugin_resource_name, void** result_plugin_resource,
    C
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Wed Dec 20 20:01:06 GMT 2023
    - 7.2K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api_experimental.cc

    }
    
    TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx, TF_DataType dtype,
                                      const int64_t* dims, int num_dims,
                                      TF_Status* status) {
      std::vector<int64_t> dimvec(num_dims);
      for (int i = 0; i < num_dims; ++i) {
        dimvec[i] = static_cast<int64_t>(dims[i]);
      }
    
      if (ctx == nullptr) {
        status->status = tensorflow::errors::InvalidArgument("Invalid Context");
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 11 23:52:39 GMT 2024
    - 35.9K bytes
    - Viewed (3)
  3. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc

      tensorflow::AsyncValueTensor* av_tensor =
          tensorflow::AsyncValueTensor::FromTensor(tensor);
      if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
        return absl::InternalError("Input tensor does not have PjRtBuffer.");
      }
      auto* c_api_buffer =
          dynamic_cast<xla::PjRtCApiBuffer*>(av_tensor->GetBuffer().get());
      if (c_api_buffer == nullptr) {
        return absl::InternalError(
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Mon Oct 30 19:20:20 GMT 2023
    - 3.7K bytes
    - Viewed (0)
  4. tensorflow/c/eager/immediate_execution_context.h

      // `memory_releaser` will be called on destruction, and it's responsible for
      // cleaning up the underlying buffer.
      virtual AbstractTensorInterface* CreateTensor(
          DataType dtype, const int64_t* dims, int num_dims, void* data, size_t len,
          MemoryReleaser memory_releaser, void* memory_releaser_arg) = 0;
    
      // Create a handle to wrap and manage a Tensor
      virtual ImmediateExecutionTensorHandle* CreateLocalHandle(
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 06 08:34:00 GMT 2023
    - 12.3K bytes
    - Viewed (0)
  5. tensorflow/c/eager/c_api_unified_experimental_internal.h

    // `c_api_unified_experimental.h` header.
    // =============================================================================
    
    // Represents either a MlirTensor or a GraphTensor.
    // This base class does not expose any public methods other than to distinguish
    // which subclass it actually is. The user is responsible to use the right
    // type of AbstractTensor in their context (do not pass an MlirTensor to a
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Nov 13 22:20:40 GMT 2020
    - 5.2K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/filesystem/modular_filesystem_registration.cc

      if (info->plugin_memory_allocate == nullptr)
        return errors::FailedPrecondition(
            "Cannot load filesystem plugin which does not provide "
            "`plugin_memory_allocate`");
    
      if (info->plugin_memory_free == nullptr)
        return errors::FailedPrecondition(
            "Cannot load filesystem plugin which does not provide "
            "`plugin_memory_free`");
    
      return OkStatus();
    }
    
    namespace filesystem_registration {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 07 22:08:43 GMT 2023
    - 12.8K bytes
    - Viewed (0)
  7. ci/official/utilities/setup_macos.sh

      chmod +x "/usr/local/bin/bazel"
    fi
    
    # "TFCI_MACOS_UPGRADE_PYENV_ENABLE" is used to decide if we need to upgrade the
    # Pyenv version. We enable this for macOS x86 builds as the default Pyenv on
    # those VMs does not support installing Python 3.12 and above which we need
    # for running smoke tests in nightly/release wheel builds.
    if [[ "${TFCI_MACOS_UPGRADE_PYENV_ENABLE}" == 1 ]]; then
    Shell Script
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 15:23:28 GMT 2024
    - 6.2K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/filesystem/modular_filesystem.h

      Status DeleteRecursively(const std::string& dirname, TransactionToken* token,
                               int64_t* undeleted_files,
                               int64_t* undeleted_dirs) override;
      Status DeleteDir(const std::string& dirname,
                       TransactionToken* token) override;
      Status RecursivelyCreateDir(const std::string& dirname,
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Oct 12 08:49:52 GMT 2023
    - 8.9K bytes
    - Viewed (0)
  9. SECURITY.md

    TensorFlow server (`tf.train.Server`). The TensorFlow server is intended for
    internal communication only. It is not built for use in untrusted environments
    or networks.
    
    For performance reasons, the default TensorFlow server does not include any
    authorization protocol and sends messages unencrypted. It accepts connections
    from anywhere, and executes the graphs it is sent without performing any checks.
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Sun Oct 01 06:06:35 GMT 2023
    - 9.6K bytes
    - Viewed (0)
  10. tensorflow/BUILD

    # with clang. It does not imply that CUDA support has been enabled.
    alias(
        name = "is_cuda_compiler_clang",
        actual = if_oss(
            "@local_config_cuda//:is_cuda_compiler_clang",
            "@local_config_cuda//cuda:TRUE",
        ),
    )
    
    # Config setting that is satisfied when CUDA device code should be compiled
    # with nvcc. It does not imply that CUDA support has been enabled.
    alias(
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 09 18:15:11 GMT 2024
    - 53.4K bytes
    - Viewed (8)
Back to top