Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 1,897 for Devices (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc

      static const char* const module_str =
          R"(
    module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
      func.func @convert_cluster_func(%arg0: tensor<i32>) -> () {
        %2 = "tf_device.parallel_execute"() ({
    
          %3 = "tf_device.cluster_func"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0", func = @func} : (tensor<i32>) -> tensor<i32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/flags.cc

                "Switch a device into 'on-demand' mode, where instead of "
                "autoclustering ops are compiled one by one just-in-time."),
    
           Flag("tf_xla_enable_xla_devices",
                &device_flags->tf_xla_enable_xla_devices,
                "Generate XLA_* devices, where placing a computation on such a "
                "device"
                "forces compilation by XLA. Deprecated."),
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 24.5K bytes
    - Viewed (0)
  3. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      }
    }
    
    ParallelDevice::ParallelDevice(const std::vector<std::string>& devices,
                                   bool is_async, int in_flight_nodes_limit)
        : underlying_devices_(devices),
          default_cancellation_manager_(absl::make_unique<CancellationManager>()) {
      device_threads_.reserve(devices.size());
      for (int device_index = 0; device_index < devices.size(); ++device_index) {
        device_threads_.emplace_back(new DeviceThread(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 07:47:20 UTC 2024
    - 25.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc

                                      std::unique_ptr<Graph>* result) {
      std::vector<std::unique_ptr<Device>> devices;
      devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU));
      devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU));
    
      std::unique_ptr<DeviceSet> device_set(new DeviceSet());
      for (auto& device : devices) {
        device_set->AddDevice(device.get());
      }
    
      auto graph = std::make_unique<Graph>(OpRegistry::Global());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/mark_for_compilation_pass.cc

        const Cluster& cluster_a, const Cluster& cluster_b) {
      DeviceSet devices = cluster_a.devices();
      devices.UnionWith(cluster_b.devices());
    
      TF_ASSIGN_OR_RETURN(
          std::optional<jit::DeviceId> maybe_chosen_device,
          MaybePickDeviceForXla(device_info_cache_, devices,
                                /*allow_mixing_unknown_and_cpu=*/false));
      if (!maybe_chosen_device.has_value()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

    LogicalResult GetTpuDeviceAssignment(
        ClusterOp cluster, ReplicateOp replicate, mlir::ModuleOp module,
        absl::StatusOr<TPUDeviceAssignment>& status_or_tpu_device_assignment) {
      mlir::TF::RuntimeDevices devices;
      if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure();
    
      uint32_t num_replicas = replicate.getN();
    
      auto num_cores_per_replica_attr = cluster->getAttrOfType<mlir::IntegerAttr>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline_refvar.mlir

    // CHECK-NEXT: [[o_chain:%.*]], [[o:%.*]] = tfrt_fallback_async.executeop.seq([[in_chain]]) key(0) cost({{.*}}) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.VarHandleOp"()
    // CHECK-NEXT: [[o_chain_0:%.*]], [[o1:%.*]] = tfrt_fallback_async.executeop.seq([[in_chain]]) key(1) cost({{.*}}) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.ReadVariableOp"([[o]]) {dtype = f32} : 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  8. pkg/volume/util/hostutil/hostutil.go

    )
    
    // HostUtils defines the set of methods for interacting with paths on a host.
    type HostUtils interface {
    	// DeviceOpened determines if the device (e.g. /dev/sdc) is in use elsewhere
    	// on the system, i.e. still mounted.
    	DeviceOpened(pathname string) (bool, error)
    	// PathIsDevice determines if a path is a device.
    	PathIsDevice(pathname string) (bool, error)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Feb 28 13:38:40 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h

    // "replicate_on_last_tile_dim" and "last_tile_dims" should be deducted from the
    // real Tensor dimensions when tiled.
    // For example:
    // f32[8,512](sharding={devices=[1,1,2]0,1 last_tile_dims={REPLICATED})
    // also means a replicated tensor over all devices.
    //
    // See xla_data.proto for detailed explanations on the fields.
    int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 22:18:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  10. src/internal/runtime/atomic/sys_linux_arm.s

    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    #include "textflag.h"
    
    // Linux/ARM atomic operations.
    
    // Because there is so much variation in ARM devices,
    // the Linux kernel provides an appropriate compare-and-swap
    // implementation at address 0xffff0fc0.  Caller sets:
    //	R0 = old value
    //	R1 = new value
    //	R2 = addr
    //	LR = return address
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 2.8K bytes
    - Viewed (0)
Back to top