- Sort Score
- Result 10 results
- Languages All
Results 121 - 130 of 718 for divides (0.39 sec)
-
src/math/bits/bits_errors_bootstrap.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 592 bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// If padded_shape_fn is empty, a default implementation that returns // the logical on-device shape without padding is used. PaddedShapeFn padded_shape_fn; // Set of devices to use. This controls which of the devices on the given // platform will have resources allocated. For GPUs this will be // filled from visible_gpu_devices list from session configuration. std::optional<std::set<int>> allowed_devices;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.cc
} // Call AddDevices to register the XLA devices. // // It may be worth refactoring out XlaOpRegistry::RegisterCompilationDevice to // make this more direct, but probably not worth it solely for this test. std::vector<std::unique_ptr<Device>> devices; TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(session_options, "", &devices)); GraphOptimizationPassOptions opt_options; opt_options.graph = graph;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 09 19:51:48 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/c/eager/parallel_device/parallel_device_lib.h
// A parallel tensor with scalar integers numbering component devices. std::unique_ptr<ParallelTensor> DeviceIDs(TFE_Context* context, TF_Status* status) const; // The number of devices operations run on. size_t num_underlying_devices() const { return underlying_devices_.size(); } // The devices operations run on. const std::vector<std::string>& underlying_devices() const {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 25 15:21:13 UTC 2023 - 12.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h
// // Input: // Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices. // xla_device_type - The device type that is being targeted. // Output: // Tensorflow Dialect MLIR with Runtime specific ops. All tf_device.cluster // ops are removed. Physical devices are assigned to ops instead of virtual // devices. tensorflow::Status RunLowerClusterToRuntimeOpsPassPipeline(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 21:47:17 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
// configuring the persistor used in the DeviceCompiler. Please note that // non-XLA devices aren't supported yet. This is because: // 1. PjRtClient doesn't support data transfer for non-XLA devices yet // 2. Fetching the PjRtClient for non-XLA devices is also not supported yet Status GetOrCreatePjRtDeviceCompilerAndProfiler( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir
// CHECK-SAME: (%[[ARG0]] as %[[RI_0:[a-z0-9]*]]: tensor<*x!tf_type.resource<tensor<4xf32>>>) tf_device.replicate(%arg0 as %arg1: tensor<*x!tf_type.resource<tensor<4xf32>>>) { _mirrored_variable_indices = [0], devices = {TPU_REPLICATED_CORE_0 = ["/job:worker/replica:0/task:0/device:TPU:0", "/job:worker/replica:0/task:0/device:TPU:1"]}, n = 2 : i32} { // CHECK: %[[RESOURCE_OUT:.*]] = "tf_device.launch"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/jit/device_context_test.cc
auto device_factory = DeviceFactory::GetFactory(device_type); SessionOptions options; std::vector<std::unique_ptr<Device>> devices; Status s = device_factory->CreateDevices( options, "/job:worker/replica:0/task:0", &devices); device_ = std::move(devices[0]); tensorflow::AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tpu_cluster_util.cc
std::function<WalkResult(Operation*, tf_device::ClusterOp, std::optional<std::string>)> callback) { mlir::TF::RuntimeDevices devices; if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure(); const CallGraph call_graph(module); // symbol_table caches callees in the CallGraph. SymbolTableCollection symbol_table;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 04:50:13 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc
return tensorflow::WrapOpInLaunch(builder, compile_op.getLoc(), compile_op, compilation_device); } // Assigns explicit devices to replicate op. An aliased device is created per // core, and all replica devices per core are grouped together. void AssignDevicesToReplicate( tf_device::ReplicateOp replicate, llvm::ArrayRef<llvm::SmallVector<tensorflow::TPUDeviceAndHost, 8>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 29.7K bytes - Viewed (0)