Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 268 for r2devices (0.25 sec)

  1. tensorflow/compiler/jit/xla_device.h

        // If padded_shape_fn is empty, a default implementation that returns
        // the logical on-device shape without padding is used.
        PaddedShapeFn padded_shape_fn;
    
        // Set of devices to use. This controls which of the devices on the given
        // platform will have resources allocated. For GPUs this will be
        // filled from visible_gpu_devices list from session configuration.
        std::optional<std::set<int>> allowed_devices;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir

      // CHECK-SAME: (%[[ARG0]] as %[[RI_0:[a-z0-9]*]]: tensor<*x!tf_type.resource<tensor<4xf32>>>)
      tf_device.replicate(%arg0 as %arg1: tensor<*x!tf_type.resource<tensor<4xf32>>>) {
        _mirrored_variable_indices = [0],
        devices = {TPU_REPLICATED_CORE_0 = ["/job:worker/replica:0/task:0/device:TPU:0", "/job:worker/replica:0/task:0/device:TPU:1"]},
        n = 2 : i32} {
         // CHECK:      %[[RESOURCE_OUT:.*]] = "tf_device.launch"()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h

    //
    // Input:
    //     Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices.
    //     xla_device_type - The device type that is being targeted.
    // Output:
    //     Tensorflow Dialect MLIR with Runtime specific ops. All tf_device.cluster
    //     ops are removed. Physical devices are assigned to ops instead of virtual
    //     devices.
    tensorflow::Status RunLowerClusterToRuntimeOpsPassPipeline(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 21:47:17 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_platform_info.h

    // configuring the persistor used in the DeviceCompiler. Please note that
    // non-XLA devices aren't supported yet. This is because:
    // 1. PjRtClient doesn't support data transfer for non-XLA devices yet
    // 2. Fetching the PjRtClient for non-XLA devices is also not supported yet
    Status GetOrCreatePjRtDeviceCompilerAndProfiler(
        const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/device_context_test.cc

        auto device_factory = DeviceFactory::GetFactory(device_type);
        SessionOptions options;
        std::vector<std::unique_ptr<Device>> devices;
        Status s = device_factory->CreateDevices(
            options, "/job:worker/replica:0/task:0", &devices);
        device_ = std::move(devices[0]);
    
        tensorflow::AllocatorAttributes host_alloc_attr;
        host_alloc_attr.set_on_host(true);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/runtime_lowering_tpu.mlir

    // RUN: tf-tfrt-opt -tfrt-lower-cluster-to-runtime-ops-tpu -split-input-file -verify-diagnostics %s | FileCheck %s
    
    module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:TPU:0"]} {
    
      // CHECK-LABEL: @converts_cluster
      func.func @converts_cluster() {
        // CHECK: %0:2 = "tf_device.launch"() <{{.*}}> ({
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 1.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

      return tensorflow::WrapOpInLaunch(builder, compile_op.getLoc(), compile_op,
                                        compilation_device);
    }
    
    // Assigns explicit devices to replicate op. An aliased device is created per
    // core, and all replica devices per core are grouped together.
    void AssignDevicesToReplicate(
        tf_device::ReplicateOp replicate,
        llvm::ArrayRef<llvm::SmallVector<tensorflow::TPUDeviceAndHost, 8>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/spmd.mlir

    module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU:2", "/job:localhost/replica:0/task:0/device:TPU:3", "/job:localhost/replica:0/task:0/device:TPU:4", "/job:localhost/replica:0/task:0/device:TPU:5", "/job:localhost/replica:0/task:0/device:TPU:6",...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 12 04:22:33 UTC 2023
    - 1.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc

      auto& side_effect_analysis = getAnalysis<mlir::TF::SideEffectAnalysis>();
      // Get runtime devices information from the closest parent module.
      auto module = getOperation();
      mlir::TF::RuntimeDevices devices;
      if (failed(tensorflow::GetDevicesFromOp(module, &devices)))
        return signalPassFailure();
    
      OpBuilder builder(&getContext());
      llvm::SmallVector<mlir::tf_device::ClusterOp, 4> clusters;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/test_util.cc

      for (const auto& device_name : device_names) {
        device_count->insert({device_name, 1});
      }
    
      std::vector<std::unique_ptr<Device>> devices;
      TF_CHECK_OK(DeviceFactory::AddDevices(
          options, "/job:localhost/replica:0/task:0", &devices));
      device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
    
      OptimizerOptions opts;
      lib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 11:36:41 UTC 2024
    - 3.7K bytes
    - Viewed (0)
Back to top