- Sort Score
- Result 10 results
- Languages All
Results 11 - 17 of 17 for RuntimeDevices (0.24 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/tpu_cluster_util.cc
bool pass_host_device, ModuleOp module, std::function<WalkResult(Operation*, tf_device::ClusterOp, std::optional<std::string>)> callback) { mlir::TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure(); const CallGraph call_graph(module); // symbol_table caches callees in the CallGraph. SymbolTableCollection symbol_table;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 04:50:13 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc
} LogicalResult GetTpuDeviceAssignment( ClusterOp cluster, ReplicateOp replicate, mlir::ModuleOp module, absl::StatusOr<TPUDeviceAssignment>& status_or_tpu_device_assignment) { mlir::TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure(); uint32_t num_replicas = replicate.getN(); auto num_cores_per_replica_attr = cluster->getAttrOfType<mlir::IntegerAttr>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 18:52:07 UTC 2024 - 13.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
} return success(); } LogicalResult BiasAddOp::UpdateDataFormat(StringRef data_format) { return ::mlir::TF::UpdateDataFormat(data_format, this); } StringRef BiasAddOp::GetOptimalLayout(const RuntimeDevices& devices) { // Keep current data format if no GPUs are available or if explicit placement // does not allow to use GPU for this operation. if (!CanUseGpuDevice(devices) || !CanUseGpuDevice(getOperation()))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
using Permutation = SmallVector<int64_t, 4>; void LayoutAssignmentPass::runOnOperation() { func::FuncOp func = getOperation(); // Get runtime devices information from the closest parent module. RuntimeDevices devices; if (failed(::tensorflow::GetDevicesFromOp(func->getParentOfType<ModuleOp>(), &devices))) return signalPassFailure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc
new_parallel_execute, builder); if (failed(result)) return failure(); return TF::RemoveSingletonParallelExecuteOp(new_parallel_execute, builder); } void TPURewritePass::runOnOperation() { TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(getOperation(), &devices))) return signalPassFailure(); // Collect compilation results.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 29.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
// Get runtime devices information from the closest parent module. auto module = getOperation(); if (failed(CheckPreconditions(module))) signalPassFailure(); mlir::TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return signalPassFailure(); llvm::SmallVector<mlir::tf_device::ParallelExecuteOp, 4> tmp_parallel_execute_ops;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
// TF_LayoutSensitiveInterface: SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; } SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; } StringRef GetOptimalLayout(const RuntimeDevices& devices); LogicalResult UpdateDataFormat(StringRef data_format); }]; let hasVerifier = 1; } def TF_BiasAddGradOp : TF_Op<"BiasAddGrad", [Pure]> { let summary = [{
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0)