- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 368 for Advice (0.15 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/launch_outlining.mlir
// CHECK: %[[LAUNCH_OUTPUT:[0-9]*]] = "tf_device.launch_func"(%[[A_OUTPUT]]) <{device = "/device:test_device:0", func = @[[LAUNCH:.*]]}> %3 = "tf_device.launch"() ({ %4 = "tf.B"(%2) : (tensor<?xi32>) -> tensor<?xi32> tf_device.return %4 : tensor<?xi32> }) {device = "/device:test_device:0"} : () -> tensor<?xi32> // CHECK: tf_executor.yield %[[LAUNCH_OUTPUT]] tf_executor.yield %3 : tensor<?xi32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/rewrite_util.cc
namespace mlir { namespace TF { namespace { const char kDeviceAttr[] = "device"; const char kDeviceGpu[] = "GPU"; std::optional<std::string> GetOpDevice(mlir::Operation *op) { mlir::StringAttr device = op->getAttrOfType<mlir::StringAttr>(kDeviceAttr); if (!device || device.getValue().empty()) { return std::nullopt; } tensorflow::DeviceNameUtils::ParsedName parsed_name;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 19:47:48 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_base_device.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/jit/device_util.h
class DeviceInfoCache { public: bool IsGpu(DeviceId device) const { return is_gpu_[device.id()]; } bool IsCpu(DeviceId device) const { return is_cpu_[device.id()]; } absl::string_view GetNameFor(DeviceId device) const { return names_[device.id()]; } absl::StatusOr<DeviceId> GetIdFor(absl::string_view name);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 17:18:31 UTC 2024 - 7.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/gpu_ops.td
Op<TFRT_GPU_Dialect, mnemonic, traits> { } // TODO(b/260267885): We may add a device argument when we want to support // GPU MIG. def TransferToDeviceOp: Gpu_Op<"transfer_to_device"> { let summary = "Transfer a CPU tensor to device."; let description = [{ Transfer a CPU tensor to device. Example: %device_tensor = gpurt.transfer_to_device %cpu_tensor }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 15:01:21 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/jit/device_util.cc
}; devices.ForEach([&](jit::DeviceId device) { if (device_info_cache.IsGpu(device)) { if (maybe_gpu_device) { multiple_gpu_devices = is_multiple_devices(device, &maybe_gpu_device); if (multiple_gpu_devices) return false; } else { maybe_gpu_device = device; } } else if (device_info_cache.IsCpu(device)) { if (maybe_cpu_device) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir
%1 = "tf.Rank"(%0) {_tpu_replicate = "cluster", device = ""} : (tensor<*xi1>) -> tensor<*xi32> %2 = "tf.Range"(%cst_0, %1, %cst_1) {_tpu_replicate = "cluster", _xla_outside_compilation = "0", device = ""} : (tensor<i32>, tensor<*xi32>, tensor<i32>) -> tensor<*xi32> %3 = "tf.All"(%0, %2) {_tpu_replicate = "cluster", _xla_outside_compilation = "0", device = "", keep_dims = false} : (tensor<*xi1>, tensor<*xi32>) -> tensor<*xi1>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/colocate_tpu_copy_with_dynamic_shape.mlir
%3 = builtin.unrealized_conversion_cast to tensor<i32> // CHECK: TPUCopyWithDynamicShape{{.*}}device = "foobar" %4, %5 = "tf.TPUCopyWithDynamicShape"(%0, %1, %2, %3) {operandSegmentSizes = array<i32: 2, 2>} : (tensor<2048xi32>, tensor<2048xi32>, tensor<i32>, tensor<i32>) -> (tensor<2048xi32>, tensor<2048xi32>) "tf.TPUExecute"(%4, %arg0) {device = "foobar"} : (tensor<2048xi32>, tensor<!tf_type.string>) -> () return } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 23 00:30:27 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/convert_launch_func_to_tf_call.mlir
// CHECK-SAME: device = "/device:test_device:0" %3 = "tf_device.launch_func"(%2) {device = "/device:test_device:0", func = @_func} : (tensor<?xf32>) -> tensor<?xf32> // CHECK: %[[CALL_OUTPUT_1:[0-9]*]] = "tf.PartitionedCall"(%[[CALL_OUTPUT_0]]) // CHECK-SAME: f = @_func // CHECK-SAME: device = "/device:test_device:1"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.td
def TFXLADeviceSpecificTransforms : Pass<"tfxla-device-specific-transforms", "mlir::func::FuncOp"> { let summary = "Transforms ops that require device context into device independent TF Ops."; let description = [{"Transforms device specific ops into device independent" "ops."}]; let options = [ Option<"device_type_", "device-type", "std::string",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 17:44:14 UTC 2024 - 4.3K bytes - Viewed (0)