- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 3,466 for Devices (0.12 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_device_helper.cc
// (NVIDIA compute capability >= 7.0). bool CanUseTensorCores(const RuntimeDevices &devices) { auto has_tensor_cores = [&](const DeviceNameUtils::ParsedName &device) { auto md = devices.GetGpuDeviceMetadata(device); return md ? md->getCcMajor() >= 7 : false; }; return llvm::all_of( llvm::make_filter_range(devices.device_names(), IsGpuDevice), has_tensor_cores); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 21 08:41:18 UTC 2022 - 2.4K bytes - Viewed (0) -
pkg/kubelet/apis/podresources/server_v1alpha1_test.go
pods []*v1.Pod devices []*podresourcesv1.ContainerDevices expectedResponse *v1alpha1.ListPodResourcesResponse }{ { desc: "no pods", pods: []*v1.Pod{}, devices: []*podresourcesv1.ContainerDevices{}, expectedResponse: &v1alpha1.ListPodResourcesResponse{}, }, { desc: "pod without devices", pods: []*v1.Pod{ {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Mar 07 08:12:16 UTC 2024 - 3.9K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go
func New(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint { return newV2(devEntries, devices) } func newV2(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint { return &Data{ Data: checkpointData{ PodDeviceEntries: devEntries, RegisteredDevices: devices, }, } } // MarshalCheckpoint returns marshalled data
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Apr 15 12:01:56 UTC 2024 - 3.2K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/types.go
// owning device plugin to allow setup procedures to take place, and for // the device plugin to provide runtime settings to use the device // (environment variables, mount points and device files). Allocate(pod *v1.Pod, container *v1.Container) error // UpdatePluginResources updates node resources based on devices already // allocated to pods. The node object is provided for the device manager to
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Sep 27 13:02:15 UTC 2023 - 5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_gpu_device.cc
auto device = std::make_unique<XlaDevice>(session_options, options); Status status = device->UseAcceleratorDeviceInfo(); if (!status.ok()) { LOG(INFO) << "Ignoring visible " << DEVICE_GPU_XLA_JIT << " device. Device number is " << i << ", reason: " << status; continue; } devices->push_back(std::move(device)); } return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h
// A TPU device for execution alongside its associated host CPU device. struct TPUDeviceAndHost { TPUDeviceAndHost() = default; TPUDeviceAndHost(llvm::StringRef device, llvm::StringRef host) : device(device), host(host) {} std::string device; std::string host; }; // TPU devices to be used for execution (e.g. devices for TPUExecute ops) and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 11.3K bytes - Viewed (0) -
pkg/volume/util/device_util_linux_test.go
devices, err := mockDeviceUtil.FindDevicesForISCSILun("target1", 1) if err != nil { t.Fatalf("error getting devices for lun: %v", err) } if devices == nil { t.Fatal("no devices returned") } if len(devices) != 1 { t.Fatalf("wrong number of devices: %d", len(devices)) } if devices[0] != "sda" { t.Fatalf("incorrect device %v", devices) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Aug 24 19:47:49 UTC 2021 - 8K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/topology_hints.go
// request size and the list of available devices. deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested) } return deviceHints } func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool { // If any device has Topology NUMANodes available, we assume they care about alignment. for _, device := range m.allDevices[resource] {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat Jan 27 02:10:25 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
// cannot be run on other deviecs): // // We will try: // 1) If we can do some mathmatically equaivalent transformation so this // subgraph can be run on other devices. // 2) We will other apply device-specifics optimizations as well, that includes // maybe tensor layout transformation, device specific fusion, etc. class AlternativeSubgraphPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cpu_device.cc
public: Status ListPhysicalDevices(std::vector<string>* devices) override; Status CreateDevices(const SessionOptions& options, const string& name_prefix, std::vector<std::unique_ptr<Device>>* devices) override; }; Status XlaCpuDeviceFactory::ListPhysicalDevices(std::vector<string>* devices) { XlaDeviceFlags* flags = GetXlaDeviceFlags();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.5K bytes - Viewed (0)