- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 2,948 for Devices (0.11 sec)
-
tensorflow/compiler/jit/xla_platform_info.h
// configuring the persistor used in the DeviceCompiler. Please note that // non-XLA devices aren't supported yet. This is because: // 1. PjRtClient doesn't support data transfer for non-XLA devices yet // 2. Fetching the PjRtClient for non-XLA devices is also not supported yet Status GetOrCreatePjRtDeviceCompilerAndProfiler( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h
// such as TPUExecute or XlaExecute depending on the device type and specific // host runtime. Also does some optimization. Will return an error if it fails. // The output Runtime ops depends on both Device Type and Runtime Host. // // Input: // Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices. // xla_device_type - The device type that is being targeted. // Output:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 21:47:17 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/xla_rewrite.mlir
// RUN: tf-opt %s -split-input-file -tf-xla-rewrite | FileCheck %s module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} { // CHECK-LABEL: func.func @convert_cluster_func func.func @convert_cluster_func(%arg0: tensor<i32>) -> tensor<i32> { // CHECK: "tf.XlaLaunch"(%arg0) <{function = @func, operandSegmentSizes = array<i32: 0, 1, 0>}> : (tensor<i32>) -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc
device_list_for_alias.reserve(device_list.size()); for (auto device : device_list) device_list_for_alias.emplace_back( mlir::cast<StringAttr>(device).getValue()); devices.insert({device_alias, device_list_for_alias}); } OpBuilder builder(replicate); builder.setInsertionPoint(while_op); // Create per-device variables for formatting state, and add them to the while // loop.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/spmd.mlir
func.func @main(%arg0: tensor<*xf32> {tf.device = "/job:localhost/replica:0/task:0/device:CPU:0"}) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 12 04:22:33 UTC 2023 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tpu_cluster_util.cc
std::function<WalkResult(Operation*, tf_device::ClusterOp, std::optional<std::string>)> callback) { mlir::TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure(); const CallGraph call_graph(module); // symbol_table caches callees in the CallGraph. SymbolTableCollection symbol_table;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 04:50:13 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/jit/test_util.cc
} Device* DeviceSetup::GetDevice(const string& device_name) { if (device_mgr_ == nullptr) { return nullptr; } string full_device_name = absl::StrCat( "/job:localhost/replica:0/task:0/device:", device_name, ":0"); Device* device; TF_CHECK_OK(device_mgr_->LookupDevice(full_device_name, &device)); return device; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 11:36:41 UTC 2024 - 3.7K bytes - Viewed (0) -
pkg/util/procfs/procfs_linux.go
if len(entries) == 3 && entries[1] == "devices" { return strings.TrimSpace(entries[2]), nil } } return "", fmt.Errorf("could not find devices cgroup location") } // GetFullContainerName gets the container name given the root process id of the container. // E.g. if the devices cgroup for the container is stored in /sys/fs/cgroup/devices/docker/nginx,
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jan 16 09:22:35 UTC 2023 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// The name of the compilation device (e.g., "XLA_CPU_JIT"); string compilation_device_name; // If 'use_multiple_streams' is true, we create separate streams for // compute, host-to-device, and device-to-host communication. bool use_multiple_streams = false; // If true, the XLA devices with the same device ordinal will share the same // compute stream. Otherwise each XLA device will having their own compute
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu-variable-runtime-reformatting.mlir
%arg2: !tf_res_md_f32 {tf.device = "/device:TPU:0"}, %arg3: !tf_res_md_f32 {tf.device = "/device:TPU:1"}) { %0 = "tf.Const"() {value = dense<100> : tensor<i32>} : () -> tensor<i32> // CHECK: %[[STATE0:.*]] = "tf.VarHandleOp"() // CHECK-SAME: device = "/device:TPU:0" // CHECK: %[[STATE1:.*]] = "tf.VarHandleOp"() // CHECK-SAME: device = "/device:TPU:1"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 25.4K bytes - Viewed (0)