Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 1,177 for kDevice (0.12 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/resource-device-inference.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics -tf-resource-device-inference %s | FileCheck %s
    
    !tf_res = tensor<*x!tf_type.resource<tensor<32xf32>>>
    
    // Tests that the pass can correctly propagate device attributes inside the same
    // function.
    
    // CHECK-LABEL: func @propagate_in_function
    func.func @propagate_in_function(
      %arg0: !tf_res {tf.device = "/TPU:0"},
      %arg1: !tf_res {tf.device = "/TPU:1"}) {
      tf_executor.graph {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 17 16:01:45 UTC 2022
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/device-transform-nnapi.mlir

    // RUN: tac-translate -input-mlir -output-mlir -device-specs=NNAPI %s -o - 2>&1 | FileCheck %s
    
    module {
      // CHECK-LABEL: main
      func.func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
        %0 = "tfl.squared_difference"(%arg0, %arg1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
        func.return %0 : tensor<4xf32>
        // CHECK:  [[VAL_0:%.*]] = tfl.sub %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/device-arg-retval-attr.mlir

    // Verify arg/ret attributes are exported as device assignment for arg/retval
    // nodes.
    
    module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 121 : i32}} {
      func.func @main(%arg0: tensor<*xf32> {tf.device = "/CPU:0"}, %arg1: tensor<2x4x6x8xi32>) -> (tensor<*xf32>, tensor<2x4x6x8xi32> {tf.device = "/CPU:1"})
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 1.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/device-arg-retval-attr.pbtxt

    # Verify arg and ret devices are added as arg and ret attributes.
    
    # CHECK-LABEL: func @main
    # CHECK-SAME:  (%[[ARG_0:[a-z0-9]+]]: tensor<*xf32> {tf.device = "/CPU:0"}, %[[ARG_1:[a-z0-9]+]]: tensor<2x4x6x8xi32>) -> (tensor<*xf32>, tensor<*xi32> {tf.device = "/CPU:1"})
    
    node {
      name: "args_0"
      op: "_Arg"
      device: "/CPU:0"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "index"
        value {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Dec 07 17:45:22 UTC 2020
    - 1.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    // RUN: tac-opt-all-backends -tfl-device-transform-gpu %s -split-input-file -verify-diagnostics | FileCheck %s
    
    func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %0 : tensor<2x1xf32>
    }
    
    // CHECK:   func @pack(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-nnapi.mlir

    // RUN: tac-opt-all-backends -tfl-device-transform-nnapi %s -split-input-file -verify-diagnostics | FileCheck %s
    
    func.func @mean_4d_keepdim(%arg0: tensor<1x48x48x512xf32>) -> tensor<1x1x1x512xf32> {
      %cst = arith.constant dense<[1, 2]> : tensor<2xi32>
      %0 = "tfl.mean"(%arg0, %cst) {keep_dims = true} : (tensor<1x48x48x512xf32>, tensor<2xi32>) -> tensor<1x1x1x512xf32>
      func.return %0 : tensor<1x1x1x512xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/graph-device-retval.pbtxt

    A. Unique TensorFlower <******@****.***> 1605121757 -0800
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Nov 11 19:14:04 UTC 2020
    - 1.5K bytes
    - Viewed (0)
  8. tensorflow/c/eager/custom_device_test.cc

      ASSERT_FALSE(arrived);
      TFE_TensorHandle* hdevice =
          TFE_TensorHandleCopyToDevice(hcpu, context, name, status.get());
      ASSERT_TRUE(arrived);
      ASSERT_FALSE(executed);
      ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
      std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul(
          MatMulOp(context, hcpu, hdevice), TFE_DeleteOp);
      TFE_OpSetDevice(matmul.get(), name, status.get());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 27 23:39:24 UTC 2020
    - 18.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h

    // Creates a pass that extracts outside compilation (Host ops inside device
    // cluster) at head/tail of Device cluster to run before/after XLA computation.
    std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
    CreateExtractHeadTailOutsideCompilationPass();
    
    // Creates a pass that extract outside compilation (Host ops inside cevice
    // cluster) ops to a separate parallel_execute region to run on CPU.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

        needed to apply space to depth optimization for a model that needs to load a
        pre-train checkpoint. This transform can be done on the host or TPU device
        based on the cost. As the size of the kernel is relatively small, this won't
        add additional cost to TPU device time. Below is the logic to transform the
        kernel of shape [7, 7, 3, 64] to [4, 4, 12, 84].
    
        ```python
        conv0 = tf.compat.v1.layers.Conv2D(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
Back to top