Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 37 for Memcpy (0.14 sec)

  1. tensorflow/c/eager/c_api_test_util.cc

    TFE_TensorHandle* TestScalarTensorHandle(TFE_Context* ctx, float value) {
      float data[] = {value};
      TF_Status* status = TF_NewStatus();
      TF_Tensor* t = TFE_AllocateHostTensor(ctx, TF_FLOAT, nullptr, 0, status);
      memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
      TFE_TensorHandle* th = TFE_NewTensorHandleFromTensor(ctx, t, status);
      CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TF_DeleteTensor(t);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 22:37:46 UTC 2024
    - 23.5K bytes
    - Viewed (0)
  2. src/runtime/race/race_darwin_amd64.go

    //go:cgo_import_dynamic madvise madvise ""
    //go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
    //go:cgo_import_dynamic malloc_zones malloc_zones ""
    //go:cgo_import_dynamic memcpy memcpy ""
    //go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
    //go:cgo_import_dynamic mkdir mkdir ""
    //go:cgo_import_dynamic mprotect mprotect ""
    //go:cgo_import_dynamic open open ""
    //go:cgo_import_dynamic pipe pipe ""
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 19:29:22 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  3. src/runtime/race/race_darwin_arm64.go

    //go:cgo_import_dynamic madvise madvise ""
    //go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
    //go:cgo_import_dynamic malloc_zones malloc_zones ""
    //go:cgo_import_dynamic memcpy memcpy ""
    //go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
    //go:cgo_import_dynamic mkdir mkdir ""
    //go:cgo_import_dynamic mprotect mprotect ""
    //go:cgo_import_dynamic open open ""
    //go:cgo_import_dynamic pipe pipe ""
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 19:29:22 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/string_utils.cc

      // big-endian platform.
      memcpy(*buffer, &num_strings, sizeof(int32_t));
    
      // Set offset of strings.
      int32_t start = sizeof(int32_t) * (num_strings + 2);
      for (size_t i = 0; i < offset_.size(); i++) {
        // TODO(b/165919229): This code will need changing if/when we port to a
        // big-endian platform.
        int32_t offset = start + offset_[i];
        memcpy(*buffer + sizeof(int32_t) * (i + 1), &offset, sizeof(int32_t));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/gradients/grad_test_helper.cc

        ASSERT_EQ(num_elem_numerical, num_elem_analytical);
    
        float* dnumerical = new float[num_elem_numerical]{0};
        memcpy(&dnumerical[0], TF_TensorData(numerical_tensor),
               TF_TensorByteSize(numerical_tensor));
        float* danalytical = new float[num_elem_analytical]{0};
        memcpy(&danalytical[0], TF_TensorData(analytical_tensor),
               TF_TensorByteSize(analytical_tensor));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 5K bytes
    - Viewed (0)
  6. tensorflow/c/eager/gradient_checker.cc

      TF_RETURN_IF_ERROR(GetValue(theta, &theta_tensor));
    
      // Get number of elements and fill data.
      int num_elems = TF_TensorElementCount(theta_tensor);
      vector<float> theta_data(num_elems);
      memcpy(theta_data.data(), TF_TensorData(theta_tensor),
             TF_TensorByteSize(theta_tensor));
    
      // Initialize space for the numerical gradient.
      vector<float> dtheta_approx(num_elems);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/stream_executor/stream_executor_test.cc

        EXPECT_EQ(stream->stream_id, 14);
        std::memcpy(host_dst, device_src->opaque, size);
      };
    
      StreamExecutor* executor = GetExecutor(0);
      TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
      size_t size = sizeof(int);
      int src_data = 34;
      int dst_data = 2;
      DeviceMemoryBase device_src(&src_data, size);
      TF_ASSERT_OK(stream->Memcpy(&dst_data, device_src, size));
      ASSERT_EQ(dst_data, 34);
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 19:54:04 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_host_recv_device_context.cc

      Status status = XLAShapeToTensorShape(shape_, &tensor_shape);
      if (!status.ok()) {
        done(status);
        return;
      }
    
      *cpu_tensor = Tensor(dtype, tensor_shape);
    
      status = stream_->Memcpy(cpu_tensor->data(), device_memory_base_,
                               device_memory_base_.size());
      if (!status.ok()) {
        done(status);
        return;
      }
      status = stream_->RecordEvent(done_event_.get().get());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc

      xla::Shape shape;
      TF_ASSERT_OK(TensorShapeToXLAShape(DT_FLOAT, TensorShape({2, 2}), &shape));
    
      // Copy the cpu_tensor to the GPU first before trying to copy it back.
      TF_ASSERT_OK(
          stream->Memcpy(&gpu_dst, origin_cpu_tensor.data(), gpu_dst.size()));
      TF_ASSERT_OK(stream->BlockHostUntilDone());
    
      TF_ASSERT_OK_AND_ASSIGN(auto se_event, executor->CreateEvent());
      tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_host_send_device_context.cc

    namespace tensorflow {
    
    void XlaHostSendDeviceContext::CopyCPUTensorToDevice(
        const Tensor* cpu_tensor, Device* device, Tensor* device_tensor,
        StatusCallback done, bool sync_dst_compute) const {
      auto status = stream_->Memcpy(device_memory_base_, cpu_tensor->data(),
                                    device_memory_base_->size());
      if (!status.ok()) {
        done(status);
        return;
      }
      status = stream_->RecordEvent(done_event_.get().get());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.6K bytes
    - Viewed (0)
Back to top