Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 264 for r2devices (0.21 sec)

  1. pkg/kubelet/volumemanager/reconciler/reconciler.go

    	rc.mountOrAttachVolumes()
    
    	// Unmount volumes only when DSW and ASW are fully populated to prevent unmounting a volume
    	// that is still needed, but it did not reach DSW yet.
    	if readyToUnmount {
    		// Ensure devices that should be detached/unmounted are detached/unmounted.
    		rc.unmountDetachDevices()
    
    		// Clean up any orphan volumes that failed reconstruction.
    		rc.cleanOrphanVolumes()
    	}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 21 10:23:12 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu-annotate-dynamic-shape-inputs.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics -tf-tpu-annotate-dynamic-shape-inputs %s | FileCheck %s
    
    // Test that annotate the inputs of the cluster func to be dynamic shaped.
    
    module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:TPU:0"]} {
       func.func @main(
          %arg0: tensor<2048xi64> {tf.device = "/job:localhost/replica:0/task:0/device:CPU:0"},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tpu-multiple-while-body-func.mlir

        "func.return"(%2) : (tensor<*xi1>) -> ()
      }) {sym_name = "main_while_cond_4225140", sym_visibility = "private", tf._input_shapes = [#tf_type.shape<>], function_type = (tensor<i32>) -> tensor<*xi1>} : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir

    // host-device communication.
    // CHECK: tf._TPUCompileMlir
    // CHECK-SAME: tf.Rank
    // CHECK-NOT: tf._XlaHostComputeMlir
    // CHECK-NOT: tf._XlaRecvAtHost
    // CHECK-NOT: tf._XlaSendFromHost
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

          location, concat_dim_type, concat_dimension_attr);
    
      // Correctly set output shapes of concat op output if output shape is
      // statically known. Since the shape of TPUExecute op must be the same
      // across logical devices, we refer to the shape of 0th logical device
      // computation output.
      mlir::Type output_type;
      auto input_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
    
      if (input_type.hasRank()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  6. okhttp/src/main/kotlin/okhttp3/internal/platform/android/StandardAndroidSocketAdapter.kt

    import okhttp3.internal.readFieldOrNull
    
    /**
     * Base Android reflection based SocketAdapter for the built in Android SSLSocket.
     *
     * It's assumed to always be present with known class names on Android devices, so we build
     * optimistically via [buildIfSupported].  But it also doesn't assume a compile time API.
     */
    class StandardAndroidSocketAdapter(
      sslSocketClass: Class<in SSLSocket>,
    Registered: Sun Jun 16 04:42:17 UTC 2024
    - Last Modified: Mon Jan 08 01:13:22 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/replicate_invariant_op_hoisting.mlir

    func.func @do_not_hoist_ops_with_virtual_device(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) {
      %0:8 = tf_device.replicate([%arg0, %arg1] as %ri: tensor<*xf32>) {devices = {TPU_REPLICATED_CORE_0 = ["/device:TPU:0", "/device:TPU:1"]}, n = 2: i32} {
        %1 = "tf.Shape"(%ri) {device = "", T = "tfdtype$DT_FLOAT", out_type = "tfdtype$DT_INT32"} : (tensor<*xf32>) -> tensor<?xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 11.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_launch_util_gpu_test.cc

        rollout_config.enabled_for_xla_launch_ = true;
        rollout_config.enabled_for_compile_on_demand_ = true;
        rollout_config.enabled_for_gpu_ = true;
    
        // Set flag to enable using XLA devices. PJRT currently is only supported
        // for XLA devices.
        GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
    
        // Add and setup the GPU device.
        auto device_type = DeviceType(DEVICE_GPU);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 10K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc

      llvm::SmallVector<Value, 4> inputs;
      // The result values of the function.
      llvm::SmallVector<Value, 4> results;
      // The devices of the input values. It should have the same size as inputs.
      llvm::SmallVector<std::string, 4> input_devices;
      // The devices of the result values. It should have the same size as results.
      llvm::SmallVector<std::string, 4> result_devices;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_collective.cc

                                    DenseIntElementsAttr replica_groups,
                                    Operation* op) {
      // Use special group_key 0 to represent "all available devices". This
      // shall resolve to a DeviceAssignment that includes all devices intended for
      // replica_groups.
      IntegerAttr group_size = builder.getI32IntegerAttr(replica_groups.size());
      IntegerAttr group_key = builder.getI32IntegerAttr(0);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16K bytes
    - Viewed (0)
Back to top