Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for device_index (0.08 sec)

  1. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

          default_cancellation_manager_(absl::make_unique<CancellationManager>()) {
      device_threads_.reserve(devices.size());
      for (int device_index = 0; device_index < devices.size(); ++device_index) {
        device_threads_.emplace_back(new DeviceThread(
            devices[device_index].c_str(), is_async, in_flight_nodes_limit));
      }
    }
    
    // Necessary for a unique_ptr to a forward-declared type.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 21 04:14:14 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  2. tensorflow/c/eager/parallel_device/parallel_device_lib.h

        return nullptr;
      }
      TF_DataType datatype_enum(
          static_cast<TF_DataType>(DataTypeToEnum<DataType>().value));
      for (int device_index = 0; device_index < num_underlying_devices();
           ++device_index) {
        auto device_value = absl::make_unique<DataType>();
        *device_value = values[device_index];
        std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> tensor(
            TF_NewTensor(
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 21 04:14:14 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  3. tensorflow/c/eager/parallel_device/parallel_device.cc

      std::vector<std::string> underlying_devices_vector;
      underlying_devices_vector.reserve(num_underlying_devices);
      for (int device_index = 0; device_index < num_underlying_devices;
           ++device_index) {
        underlying_devices_vector.push_back(underlying_devices[device_index]);
      }
      std::unique_ptr<ParallelDevice> parallel_device(
          new ParallelDevice(underlying_devices_vector));
      *device_info =
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 21 04:14:14 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  4. RELEASE.md

            treated as tensors, so if you still want them to be treated that way,
            you need to wrap them with `tf.convert_to_tensor`.
        *   No lowering on gradient case op when input is `DeviceIndex` op.
        *   Extend the ragged version of `tf.gather` to support `batch_dims` and
            `axis` args.
        *   Update `tf.map_fn` to support RaggedTensors and SparseTensors.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Tue Oct 22 14:33:53 UTC 2024
    - 735.3K bytes
    - Viewed (0)
Back to top