- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 847 for device_1 (0.18 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir
tf_device.return }) {device = "TPU_REPLICATED_CORE_0"} : () -> () "tf_device.launch"() ({ // CHECK: "tf.B"(%[[RESOURCE_OUT]]) "tf.B"(%1) : (tensor<4xf32>) -> () tf_device.return }) {device = "TPU_REPLICATED_CORE_0"} : () -> () tf_device.return } func.return } // Tests AssignVariable op using composite device resource is wrapped inside // tf_device.Cluster.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h
// such as TPUExecute or XlaExecute depending on the device type and specific // host runtime. Also does some optimization. Will return an error if it fails. // The output Runtime ops depends on both Device Type and Runtime Host. // // Input: // Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices. // xla_device_type - The device type that is being targeted. // Output:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 21:47:17 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_device_helper.h
class RuntimeDevices; // Returns true if at least one GPU device is available at runtime. bool CanUseGpuDevice(const RuntimeDevices &devices); // Returns true if all of the GPUs available at runtime support TensorCores // (NVIDIA compute capability >= 7.0). bool CanUseTensorCores(const RuntimeDevices &devices); // Returns true if operation does not have explicit device placement that would // prevent it from running on GPU device.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Nov 12 21:57:12 UTC 2021 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
session_options, "/job:localhost/replica:0/task:0", &devices)); if (devices.empty()) { return errors::NotFound( "Failed to create a CPU device for EncapsulateSubgraphsPass"); } std::unique_ptr<DeviceMgr> device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices)); const auto* config = &options.session_options->config;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/ifrt/rewrite_cluster_to_ifrt_call.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Feb 17 07:28:40 UTC 2024 - 9K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_test.cc
TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList); ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get()); bool has_tpu = false; for (int device_index = 0; device_index < TF_DeviceListCount(devices.get()); ++device_index) { std::string device_type = TF_DeviceListType(devices.get(), device_index, status.get());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 08 23:47:35 UTC 2021 - 29.3K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
// `enabled_for_gpu_` below. bool enabled_for_all_; // If true, enable Device API (PjRt) for TF GPU device. This is a helper // flag so that individual tests can turn on PjRt for GPU specifically. // Once the rollout to GPU is complete, this flag can be deprecated. bool enabled_for_gpu_; private: // Devices for which using Device API (PjRt) is allowed in the XlaLaunch op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.h
} // Returns a device-to-device stream, in round-robin fashion. se::Stream* GetDeviceToDeviceStream(); Status ThenExecute(Device* device, stream_executor::Stream* stream, std::function<void()> func) override; private: bool UseMultipleStreams() const { return stream_ != host_to_device_stream_; } // The main compute stream of the device, used to synchronize the transfer
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/spmd.mlir
func.func @main(%arg0: tensor<*xf32> {tf.device = "/job:localhost/replica:0/task:0/device:CPU:0"}) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 12 04:22:33 UTC 2023 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc
if (!device_mgr_) return failure(); // Type of params_.device is DeviceBase* so store it as Device* to access // derived class method. device_ = device_mgr_->ListDevices().front(); params_.device = device_; params_.resource_manager = device_->resource_manager(); // Resources are cleared at the time of device manager destruction so pass // no-op cleanup function.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 18.9K bytes - Viewed (0)