Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 137 for r2devices (0.11 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc

          op->erase();
          return WalkResult::advance();
        }
    
        if (!devices.has_value()) return WalkResult::advance();
    
        // Map aliased devices to explicit devices based on replica.
        if (auto launch = dyn_cast<tf_device::LaunchOp>(op))
          if (auto device_by_replica = devices.value().get(launch.getDevice()))
            launch->setAttr(
                kDeviceAttr,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 24 21:01:40 UTC 2023
    - 16.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/replicate_to_island.mlir

    // CHECK: "tf.opA"
    // device = "CORE_0"
    // CHECK: _parallel_execution_ids = "r0:1"
    
    
    // Tests devices are not remapped if device is not in replicate devices.
    // CHECK-LABEL: func @no_override_device
    func.func @no_override_device() {
      tf_executor.graph {
        %0 = tf_executor.island {
          tf_device.replicate {n = 2 : i32, devices = {CORE_0 = ["/CPU:0", "/GPU:1"]}} {
            "tf_device.launch"() ({
              "tf.opA"() : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 15.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td

    The region held by this operation represents a computation that is replicated
    across multiple devices. The number of replications is based on the `n`
    attribute. Explicit devices can be populated in the `devices` attribute, and it
    must be a mapping of device alias to list of explicit or aliased device names
    from the outer scope. The device name map specifies devices on which replicated
    ops inside tf_device.replicate will be executed.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 23:53:20 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  4. pkg/volume/fc/fc_util.go

    			return err
    		}
    	} else {
    		// Add single devicepath to devices
    		devices = append(devices, dstPath)
    	}
    	klog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
    	var lastErr error
    	for _, device := range devices {
    		err := util.detachFCDisk(c.io, c.exec, device)
    		if err != nil {
    			klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Sep 16 11:12:06 UTC 2022
    - 12.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc

     private:
      void runOnOperation() override;
    
      // Given a func and targeted devices, we will try to clonse the func &
      // transform/optimize for those devices.
      // This will only happen if the whole subgraph can be supported by the target
      // or can be supported after some transformations.
      void GetAlternativeGraphForFunc(ArrayRef<std::string> devices,
                                      func::FuncOp func, ModuleOp module,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 12.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc

                lhs_device_attr.getValue() == rhs_device_attr.getValue());
      };
    
      // Check if tf_executor.NextIteration.Source/tf_executor.NextIteration.Sink
      // pair has matching devices or no devices.
      if (auto source = llvm::dyn_cast<tf_executor::NextIterationSourceOp>(op)) {
        return ops_have_same_device(source, source.GetSink());
      } else if (auto sink = llvm::dyn_cast<tf_executor::NextIterationSinkOp>(op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc

      if (execute_arg_to_outer_args.empty()) return false;
    
      // Extract the replicated devices.
      auto devices_attr = replicate.getDevices();
      if (!devices_attr) return false;
    
      auto device_map = devices_attr.value();
      llvm::SmallDenseMap<llvm::StringRef, llvm::SmallVector<StringRef, 4>> devices;
      devices.reserve(device_map.size());
    
      for (auto it : device_map) {
        auto device_alias = it.getName().strref();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_device.h

        // If padded_shape_fn is empty, a default implementation that returns
        // the logical on-device shape without padding is used.
        PaddedShapeFn padded_shape_fn;
    
        // Set of devices to use. This controls which of the devices on the given
        // platform will have resources allocated. For GPUs this will be
        // filled from visible_gpu_devices list from session configuration.
        std::optional<std::set<int>> allowed_devices;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  9. tensorflow/c/eager/parallel_device/parallel_device_lib.h

      // A parallel tensor with scalar integers numbering component devices.
      std::unique_ptr<ParallelTensor> DeviceIDs(TFE_Context* context,
                                                TF_Status* status) const;
    
      // The number of devices operations run on.
      size_t num_underlying_devices() const { return underlying_devices_.size(); }
    
      // The devices operations run on.
      const std::vector<std::string>& underlying_devices() const {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 25 15:21:13 UTC 2023
    - 12.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

      return tensorflow::WrapOpInLaunch(builder, compile_op.getLoc(), compile_op,
                                        compilation_device);
    }
    
    // Assigns explicit devices to replicate op. An aliased device is created per
    // core, and all replica devices per core are grouped together.
    void AssignDevicesToReplicate(
        tf_device::ReplicateOp replicate,
        llvm::ArrayRef<llvm::SmallVector<tensorflow::TPUDeviceAndHost, 8>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
Back to top