- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 376 for r2devices (0.18 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc
lhs_device_attr.getValue() == rhs_device_attr.getValue()); }; // Check if tf_executor.NextIteration.Source/tf_executor.NextIteration.Sink // pair has matching devices or no devices. if (auto source = llvm::dyn_cast<tf_executor::NextIterationSourceOp>(op)) { return ops_have_same_device(source, source.GetSink()); } else if (auto sink = llvm::dyn_cast<tf_executor::NextIterationSinkOp>(op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
pkg/kubelet/cm/util/cdi/cdi.go
} return annotationPrefix + name, nil } // annotationValue returns an annotation value for the given devices. func annotationValue(devices []string) (string, error) { value, sep := "", "" for _, d := range devices { if _, _, _, err := parseQualifiedName(d); err != nil { return "", err } value += sep + d sep = "," } return value, nil }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jul 11 09:48:24 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir
} // ----- // Tests collecting compilation and execution devices results in an error. module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU:0"]} { func.func @bad_devices() { // expected-error@+1 {{error in fetching TPU compilation/execution devices: no TPU_SYSTEM devices found}}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 172.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc
if (execute_arg_to_outer_args.empty()) return false; // Extract the replicated devices. auto devices_attr = replicate.getDevices(); if (!devices_attr) return false; auto device_map = devices_attr.value(); llvm::SmallDenseMap<llvm::StringRef, llvm::SmallVector<StringRef, 4>> devices; devices.reserve(device_map.size()); for (auto it : device_map) { auto device_alias = it.getName().strref();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.9K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.cc
} // Call AddDevices to register the XLA devices. // // It may be worth refactoring out XlaOpRegistry::RegisterCompilationDevice to // make this more direct, but probably not worth it solely for this test. std::vector<std::unique_ptr<Device>> devices; TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(session_options, "", &devices)); GraphOptimizationPassOptions opt_options; opt_options.graph = graph;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 09 19:51:48 UTC 2023 - 3.1K bytes - Viewed (0) -
pkg/util/procfs/procfs_linux.go
if len(entries) == 3 && entries[1] == "devices" { return strings.TrimSpace(entries[2]), nil } } return "", fmt.Errorf("could not find devices cgroup location") } // GetFullContainerName gets the container name given the root process id of the container. // E.g. if the devices cgroup for the container is stored in /sys/fs/cgroup/devices/docker/nginx,
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jan 16 09:22:35 UTC 2023 - 4.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.h
// A parallel tensor with scalar integers numbering component devices. std::unique_ptr<ParallelTensor> DeviceIDs(TFE_Context* context, TF_Status* status) const; // The number of devices operations run on. size_t num_underlying_devices() const { return underlying_devices_.size(); } // The devices operations run on. const std::vector<std::string>& underlying_devices() const {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 25 15:21:13 UTC 2023 - 12.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// If padded_shape_fn is empty, a default implementation that returns // the logical on-device shape without padding is used. PaddedShapeFn padded_shape_fn; // Set of devices to use. This controls which of the devices on the given // platform will have resources allocated. For GPUs this will be // filled from visible_gpu_devices list from session configuration. std::optional<std::set<int>> allowed_devices;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir
// CHECK-SAME: (%[[ARG0]] as %[[RI_0:[a-z0-9]*]]: tensor<*x!tf_type.resource<tensor<4xf32>>>) tf_device.replicate(%arg0 as %arg1: tensor<*x!tf_type.resource<tensor<4xf32>>>) { _mirrored_variable_indices = [0], devices = {TPU_REPLICATED_CORE_0 = ["/job:worker/replica:0/task:0/device:TPU:0", "/job:worker/replica:0/task:0/device:TPU:1"]}, n = 2 : i32} { // CHECK: %[[RESOURCE_OUT:.*]] = "tf_device.launch"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h
// // Input: // Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices. // xla_device_type - The device type that is being targeted. // Output: // Tensorflow Dialect MLIR with Runtime specific ops. All tf_device.cluster // ops are removed. Physical devices are assigned to ops instead of virtual // devices. tensorflow::Status RunLowerClusterToRuntimeOpsPassPipeline(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 21:47:17 UTC 2023 - 2.3K bytes - Viewed (0)