Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 3,466 for Devices (0.16 sec)

  1. README.md

    jcifs-ng will be a proper choice for many users. 
    There are a lot of SMB devices in the world.
    Some of them only work with the old jcifs library.
    If you want to support many SMB devices, CodeLibs jcifs library will be helpful.
    For example, since [Fess](https://github.com/codelibs/fess) needs to support many SMB devices, it uses this library.
    Registered: Wed Jun 12 15:45:55 UTC 2024
    - Last Modified: Wed May 10 09:29:34 UTC 2023
    - 1.5K bytes
    - Viewed (0)
  2. android/guava/src/com/google/common/base/Ascii.java

       *
       * @since 8.0
       */
      public static final byte DLE = 16;
    
      /**
       * Device Control 1. Characters for the control of ancillary devices associated with data
       * processing or telecommunication systems, more especially switching devices "on" or "off." (If a
       * single "stop" control is required to interrupt or turn off ancillary devices, DC4 is the
       * preferred assignment.)
       *
       * @since 8.0
       */
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Jul 19 15:43:07 UTC 2021
    - 21.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td

                    {n = 2 : i32,
                     devices = {DEVICE_ALIAS_0 = ["/DEVICE:0", "/DEVICE:1"],
                                DEVICE_ALIAS_1 = ["/DEVICE:2", "/DEVICE:3"]}} {
      // Inside the region, %0, %2, %4, and %6 corresponds to
      // "/DEVICE:0"/"/DEVICE:2" and %1, %3, %5, and %7 corresponds to
      // "/DEVICE:1"/"/DEVICE:3", depending on which device alias is used.
      %k = "tf_device.launch"() ( {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 23:53:20 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/replica_id_to_device_ordinal.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics %s -tf-replica-id-to-device-ordinal | FileCheck %s
    
    
    // Tests device ordinal is set correctly for multiple devices.
    // CHECK-LABEL: func @device_ordinal_attr_added_multiple_devices
    module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:TPU:0", "/job:worker/replica:0/task:0/device:TPU:1"]} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tpu_colocate_composite_resource_ops.mlir

           tf_device.return
        }) {device = "TPU_REPLICATED_CORE_0"} : () -> ()
        "tf_device.launch"() ({
          // CHECK:  "tf.B"(%[[RESOURCE_OUT]])
          "tf.B"(%1) : (tensor<4xf32>) -> ()
           tf_device.return
        }) {device = "TPU_REPLICATED_CORE_0"} : () -> ()
        tf_device.return
      }
      func.return
    }
    
    // Tests AssignVariable op using composite device resource is wrapped inside
    // tf_device.Cluster.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.cc

      }
    
      // Call AddDevices to register the XLA devices.
      //
      // It may be worth refactoring out XlaOpRegistry::RegisterCompilationDevice to
      // make this more direct, but probably not worth it solely for this test.
      std::vector<std::unique_ptr<Device>> devices;
      TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(session_options, "", &devices));
    
      GraphOptimizationPassOptions opt_options;
      opt_options.graph = graph;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 09 19:51:48 UTC 2023
    - 3.1K bytes
    - Viewed (0)
  7. pkg/kubelet/cm/dra/claiminfo_test.go

    							"vendor.com/device=device1",
    							"vendor.com/device=device2",
    						},
    						"test-plugin2": {
    							"vendor.com/device=device1",
    							"vendor.com/device=device2",
    						},
    					},
    				},
    			},
    			expectedResult: []kubecontainer.CDIDevice{
    				{
    					Name: "vendor.com/device=device1",
    				},
    				{
    					Name: "vendor.com/device=device1",
    				},
    				{
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 03 13:30:31 UTC 2024
    - 21K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_platform_info.h

    // configuring the persistor used in the DeviceCompiler. Please note that
    // non-XLA devices aren't supported yet. This is because:
    // 1. PjRtClient doesn't support data transfer for non-XLA devices yet
    // 2. Fetching the PjRtClient for non-XLA devices is also not supported yet
    Status GetOrCreatePjRtDeviceCompilerAndProfiler(
        const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h

    // such as TPUExecute or XlaExecute depending on the device type and specific
    // host runtime. Also does some optimization. Will return an error if it fails.
    // The output Runtime ops depends on both Device Type and Runtime Host.
    //
    // Input:
    //     Tensorflow Dialect MLIR with tf_device.cluster ops and virtual devices.
    //     xla_device_type - The device type that is being targeted.
    // Output:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 21:47:17 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/xla_rewrite.mlir

    // RUN: tf-opt %s -split-input-file -tf-xla-rewrite | FileCheck %s
    
    
    module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
      // CHECK-LABEL: func.func @convert_cluster_func
      func.func @convert_cluster_func(%arg0: tensor<i32>) -> tensor<i32> {
        // CHECK: "tf.XlaLaunch"(%arg0) <{function = @func, operandSegmentSizes = array<i32: 0, 1, 0>}> : (tensor<i32>) -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 2.8K bytes
    - Viewed (0)
Back to top