Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 805 for Devices (0.13 sec)

  1. tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc

                                      std::unique_ptr<Graph>* result) {
      std::vector<std::unique_ptr<Device>> devices;
      devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU));
      devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU));
    
      std::unique_ptr<DeviceSet> device_set(new DeviceSet());
      for (auto& device : devices) {
        device_set->AddDevice(device.get());
      }
    
      auto graph = std::make_unique<Graph>(OpRegistry::Global());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

    LogicalResult GetTpuDeviceAssignment(
        ClusterOp cluster, ReplicateOp replicate, mlir::ModuleOp module,
        absl::StatusOr<TPUDeviceAssignment>& status_or_tpu_device_assignment) {
      mlir::TF::RuntimeDevices devices;
      if (failed(tensorflow::GetDevicesFromOp(module, &devices))) return failure();
    
      uint32_t num_replicas = replicate.getN();
    
      auto num_cores_per_replica_attr = cluster->getAttrOfType<mlir::IntegerAttr>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

      func::FuncOp func = getOperation();
    
      // Get runtime devices information from the closest parent module.
      RuntimeDevices devices;
      if (failed(::tensorflow::GetDevicesFromOp(func->getParentOfType<ModuleOp>(),
                                                &devices)))
        return signalPassFailure();
    
      // If there is no runtime device information and data format is not explicitly
      // forced, there is nothing to do.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/replicate_invariant_op_hoisting.mlir

      %0:8 = tf_device.replicate([%arg0, %arg1] as %ri: tensor<*xf32>) {devices = {TPU_REPLICATED_CORE_0 = ["/device:TPU:0", "/device:TPU:1"]}, n = 2: i32} {
        %1 = "tf.Shape"(%ri) {device = "", T = "tfdtype$DT_FLOAT", out_type = "tfdtype$DT_INT32"} : (tensor<*xf32>) -> tensor<?xi32>
        %2 = "tf.opA"(%1) {device = "TPU_REPLICATED_CORE_0"} : (tensor<?xi32>) -> tensor<*xi32>
        %3 = "tf_device.launch"() ({
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 11.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_launch_util_gpu_test.cc

        // Set flag to use PJRT for device compilation and execution.
        auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
        rollout_config.enabled_for_xla_launch_ = true;
        rollout_config.enabled_for_compile_on_demand_ = true;
        rollout_config.enabled_for_gpu_ = true;
    
        // Set flag to enable using XLA devices. PJRT currently is only supported
        // for XLA devices.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 10K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

        const auto sharding_type = sharding.type();
        if (sharding_type == xla::OpSharding::OTHER) {
          for (const auto& device : sharding.tile_assignment_devices()) {
            CHECK(device >= 0 && device < input_mappings.size());
            input_mappings[device].push_back(idx);
          }
        } else if (sharding_type == xla::OpSharding::REPLICATED) {
          for (auto& input : input_mappings) input.push_back(idx);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc

    std::string GetHost(Operation *op) {
      std::string device = "";
      if (StringAttr attr = op->getAttrOfType<StringAttr>(kDeviceAttr)) {
        device = attr.getValue().str();
      }
      return GetHost(device);
    }
    
    // The device is considered to be on the localhost iff one of the following is
    // true:
    // 1) None of the job/replica/task is specified in the device name.
    // 2) The job/replica/task in the device name are explicitly specified as
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  8. tensorflow/c/eager/c_api_experimental.cc

      // in an initialized context.
      for (auto d = devices.begin(); d != devices.end();) {
        if (absl::StrContains(d->get()->name(), "CPU:0")) {
          d = devices.erase(d);
        } else {
          ++d;
        }
      }
    
      status->status = tensorflow::unwrap(ctx)->AddDevices(std::move(devices));
    }
    
    void TFE_InsertConfigKeyValue(TFE_Context* ctx, const char* key,
                                  const char* value, TF_Status* status) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 23:52:39 UTC 2024
    - 35.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_collective.cc

                                    DenseIntElementsAttr replica_groups,
                                    Operation* op) {
      // Use special group_key 0 to represent "all available devices". This
      // shall resolve to a DeviceAssignment that includes all devices intended for
      // replica_groups.
      IntegerAttr group_size = builder.getI32IntegerAttr(replica_groups.size());
      IntegerAttr group_key = builder.getI32IntegerAttr(0);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/tpu-dynamic-layout-pass.mlir

      // CHECK-DAG: %[[COPY1:.*]] = "tf.TPUCopyWithLayout"(%[[ITER0]]#1, %[[LAYOUT1]]) {device = "/device:TPU:0"}
      // CHECK-DAG: %[[COPY2:.*]] = "tf.TPUCopyWithLayout"(%[[ITER1]]#0, %[[LAYOUT0]]) {device = "/device:TPU:1"}
      // CHECK-DAG: %[[COPY3:.*]] = "tf.TPUCopyWithLayout"(%[[ITER1]]#1, %[[LAYOUT1]]) {device = "/device:TPU:1"}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 29.6K bytes
    - Viewed (0)
Back to top