Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for tensor (0.17 sec)

  1. tensorflow/c/c_api_test.cc

      EXPECT_TF_META("v", 2, TF_ATTR_TENSOR, -1);
      TF_Tensor* values[2];
      TF_OperationGetAttrTensorList(oper, "v", &values[0], TF_ARRAYSIZE(values),
                                    s_);
      ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
    
      const char* tensor_data[] = {&tensor1[0], &tensor2[0]};
      const size_t tensor_size[] = {TF_ARRAYSIZE(tensor1), TF_ARRAYSIZE(tensor2)};
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 96.9K bytes
    - Viewed (3)
  2. tensorflow/c/eager/c_api_test.cc

      TF_Tensor* tensor =
          TF_AllocateTensor(TF_FLOAT, dims.data(), dims.size(), sizeof(float));
      float tensor_data[] = {1};
      memcpy(TF_TensorData(tensor), tensor_data, TF_TensorByteSize(tensor));
      TFE_TensorHandle* tensor_handle = TFE_NewTensorHandle(tensor, status);
      ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_OpAddInput(op, tensor_handle, status);
      TF_DeleteTensor(tensor);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 03 20:50:20 GMT 2023
    - 94.6K bytes
    - Viewed (1)
  3. tensorflow/c/c_api_function_test.cc

       *                 v     v
       *                   add
       *                    |
       *                    |
       *                    v
       */
      // Define
      TF_Tensor* tensor_123 = Int32Tensor({1, 2, 3});
      TF_Operation* c = Const(tensor_123, func_graph_, s_, "const_array");
      ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
      TF_Operation* split = Split3(c, func_graph_, s_);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 20 22:08:54 GMT 2023
    - 63.6K bytes
    - Viewed (6)
  4. tensorflow/c/c_api.h

    TF_CAPI_EXPORT extern void TF_SessionRun(
        TF_Session* session,
        // RunOptions
        const TF_Buffer* run_options,
        // Input tensors
        const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs,
        // Output tensors
        const TF_Output* outputs, TF_Tensor** output_values, int noutputs,
        // Target operations
        const TF_Operation* const* target_opers, int ntargets,
        // RunMetadata
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Oct 26 21:08:15 GMT 2023
    - 82.3K bytes
    - Viewed (3)
  5. tensorflow/BUILD

            "//tensorflow/core:reader_base",
            "//tensorflow/core:script_ops_op_lib",
            "//tensorflow/distribute/experimental/rpc/kernels:rpc_ops",
            "//tensorflow/dtensor/cc:dtensor_device_cc",
            "//tensorflow/dtensor/cc:tensor_layout",
            "//tensorflow/lite/c:common",
            "//tensorflow/lite/core/api",
            "//tensorflow/lite/delegates/flex:delegate",
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 09 18:15:11 GMT 2024
    - 53.4K bytes
    - Viewed (8)
  6. configure.py

      tf_tensorrt_version = get_from_env_or_user_or_default(
          environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
          _DEFAULT_TENSORRT_VERSION)
      environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
    
    
    def set_tf_nccl_version(environ_cp):
      """Set TF_NCCL_VERSION."""
      if not is_linux():
        raise ValueError('Currently NCCL is only supported on Linux platform.')
    
    Python
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 18:25:36 GMT 2024
    - 53.8K bytes
    - Viewed (1)
  7. .bazelrc

    build:cuda --@local_config_cuda//:enable_cuda
    
    # CUDA: This config refers to building CUDA op kernels with clang.
    build:cuda_clang --config=cuda
    # Enable TensorRT optimizations https://developer.nvidia.com/tensorrt
    build:cuda_clang --config=tensorrt
    build:cuda_clang --action_env=TF_CUDA_CLANG="1"
    build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
    # Select supported compute capabilities (supported graphics cards).
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu May 02 19:34:20 GMT 2024
    - 52.8K bytes
    - Viewed (2)
Back to top