- Sort Score
- Result 10 results
- Languages All
Results 1 - 5 of 5 for device_index (0.1 sec)
-
tensorflow/c/eager/parallel_device/parallel_device_lib.cc
default_cancellation_manager_(absl::make_unique<CancellationManager>()) { device_threads_.reserve(devices.size()); for (int device_index = 0; device_index < devices.size(); ++device_index) { device_threads_.emplace_back(new DeviceThread( devices[device_index].c_str(), is_async, in_flight_nodes_limit)); } } // Necessary for a unique_ptr to a forward-declared type.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 25.9K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.h
return nullptr; } TF_DataType datatype_enum( static_cast<TF_DataType>(DataTypeToEnum<DataType>().value)); for (int device_index = 0; device_index < num_underlying_devices(); ++device_index) { auto device_value = absl::make_unique<DataType>(); *device_value = values[device_index]; std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> tensor( TF_NewTensor(
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device.cc
std::vector<std::string> underlying_devices_vector; underlying_devices_vector.reserve(num_underlying_devices); for (int device_index = 0; device_index < num_underlying_devices; ++device_index) { underlying_devices_vector.push_back(underlying_devices[device_index]); } std::unique_ptr<ParallelDevice> parallel_device( new ParallelDevice(underlying_devices_vector)); *device_info =
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 21 04:14:14 UTC 2024 - 18.3K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_test.cc
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); bool has_tpu = false; for (int device_index = 0; device_index < TF_DeviceListCount(devices.get()); ++device_index) { std::string device_type = TF_DeviceListType(devices.get(), device_index, status.get()); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); if (device_type == "TPU") {
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Aug 06 23:56:17 UTC 2024 - 29.4K bytes - Viewed (0) -
RELEASE.md
treated as tensors, so if you still want them to be treated that way, you need to wrap them with `tf.convert_to_tensor`. * No lowering on gradient case op when input is `DeviceIndex` op. * Extend the ragged version of `tf.gather` to support `batch_dims` and `axis` args. * Update `tf.map_fn` to support RaggedTensors and SparseTensors.
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Tue Oct 22 14:33:53 UTC 2024 - 735.3K bytes - Viewed (0)