Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 380 for kDevice (0.21 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc

    // `{ tac.device = "GPU", tac.inference_type = "FLOAT"}` to a function
    // with the matching attributes. Assumed is that device type "CPU"
    // is the only device that is allowed to call other devices. I.e. ancestors of a
    // "CPU" `Operation` may only `Operations` without a device or other "CPU"
    // `Operations`. Implied is that "CPU" ops may contain subgraphs of different
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc

      for (const auto& device : devices) {
        if (inference_type == QUANTIZED_INT8) {
          all_device_inference_types.push_back({device, QUANTIZED_INT8});
        } else if (inference_type == QUANTIZED_UINT8) {
          all_device_inference_types.push_back({device, QUANTIZED_UINT8});
        }
    
        // We will alway enable float.
        all_device_inference_types.push_back({device, FLOAT});
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 12.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_sequencing.cc

          // TODO(bfontain): Check for other attributes.
          replicated_output->setAttr(kDevice, builder.getStringAttr(""));
          TF::TPUReplicatedInputOp input = builder.create<TF::TPUReplicatedInputOp>(
              op->getLoc(), result.getType(), replicated_output.getResults());
          input->setAttr(kDevice, builder.getStringAttr(""));
          mlir::Value new_value = input.getOutput();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 39.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc

              // Set interface_name & target to the call_op as well.
              new_call->setAttr(kInterfaceNameAttr,
                                builder->getStringAttr(interface_name));
              new_call->setAttr(
                  kDevice,
                  builder->getStringAttr(preferred_inference_device_type.hardware));
              new_call->setAttr(
                  kInferenceType,
                  builder->getStringAttr(GetInferenceString(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 24 15:10:02 UTC 2022
    - 19.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/resource-device-inference.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics -tf-resource-device-inference %s | FileCheck %s
    
    !tf_res = tensor<*x!tf_type.resource<tensor<32xf32>>>
    
    // Tests that the pass can correctly propagate device attributes inside the same
    // function.
    
    // CHECK-LABEL: func @propagate_in_function
    func.func @propagate_in_function(
      %arg0: !tf_res {tf.device = "/TPU:0"},
      %arg1: !tf_res {tf.device = "/TPU:1"}) {
      tf_executor.graph {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 17 16:01:45 UTC 2022
    - 18.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    // RUN: tac-opt-all-backends -tfl-device-transform-gpu %s -split-input-file -verify-diagnostics | FileCheck %s
    
    func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %0 : tensor<2x1xf32>
    }
    
    // CHECK:   func @pack(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  7. tensorflow/c/eager/custom_device_test.cc

      ASSERT_FALSE(arrived);
      TFE_TensorHandle* hdevice =
          TFE_TensorHandleCopyToDevice(hcpu, context, name, status.get());
      ASSERT_TRUE(arrived);
      ASSERT_FALSE(executed);
      ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
      std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul(
          MatMulOp(context, hcpu, hdevice), TFE_DeleteOp);
      TFE_OpSetDevice(matmul.get(), name, status.get());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 27 23:39:24 UTC 2020
    - 18.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_device_ops.h

                                                                                   \
      REGISTER_KERNEL_BUILDER(                                                     \
          Name("VarHandleOp").Device(DEVICE).HostMemory("resource"), VarHandleOp); \
      REGISTER_KERNEL_BUILDER(                                                     \
          Name("_VarHandlesOp").Device(DEVICE).HostMemory("resources"),            \
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 23 19:28:25 UTC 2021
    - 17.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/testdata/outside_compilation.mlir

          %control_31 = tf_executor.island wraps "tf.NoOp"() {device = "/device:CPU:0"} : () -> ()
          %outputs_32, %control_33 = tf_executor.island wraps "tf.Const"() {device = "/device:CPU:0", value = dense<4> : tensor<i32>} : () -> tensor<i32>
          %control_34 = tf_executor.island wraps "tf.NoOp"() {device = "/device:CPU:0"} : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 19 20:19:45 UTC 2023
    - 21.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_platform_info_test.cc

      device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
      Device* device = device_setup_.GetDevice(DEVICE_GPU);
      XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
    
      ResourceMgr resource_mgr("");
      OpKernelContext::Params params;
      params.resource_manager = &resource_mgr;
      params.device = device;
      OpKernelContext ctx(&params, 0);
    
      PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Jan 14 15:17:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
Back to top