Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 1,105 for Levine (0.12 sec)

  1. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %0 = "tf.ReadVariableOp"(%arg1) {device = "/CPU:0"} : (tensor<*x!tf_type.resource>) -> tensor<*xi32>
      %1 = "tf.Shape"(%arg0) {device = "/CPU:0"} : (tensor<*xi32>) -> tensor<?xi64>
      %2 = "tf.Shape"(%0) {device = "/CPU:0"} : (tensor<*xi32>) -> tensor<?xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir

        %5 = "tf.Identity"(%arg4) {device = ""} : (tensor<1024x1024xf32>) -> tensor<1024x1024xf32>
        %6 = "tf.MatMul"(%arg3, %5) {device = "", transpose_a = false, transpose_b = false} : (tensor<1x1024xf32>, tensor<1024x1024xf32>) -> tensor<1x1024xf32>
        %7 = "tf.AddV2"(%2, %6) {device = ""} : (tensor<1x1024xf32>, tensor<1x1024xf32>) -> tensor<1x1024xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 42K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/internal/passes/mark_ops_for_outside_compilation.cc

    auto* auto_outside_compilation_gauge =
        tensorflow::monitoring::Gauge<bool, 0>::New(
            "/tensorflow/core/use_auto_outside_compilation",
            "Tracks if auto outside compilation is enabled");
    
    #define GEN_PASS_DEF_MARKOPSFOROUTSIDECOMPILATIONPASS
    #include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h.inc"
    
    struct MarkOpsForOutsideCompilation
        : public impl::MarkOpsForOutsideCompilationPassBase<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.4K bytes
    - Viewed (0)
  4. platforms/documentation/docs/src/snippets/native-binaries/cunit/groovy/libs/cunit/2.1-2/include/CUnit/CUnit.h

      #define CU_TRUE 1
    #endif
    
    #ifndef CU_FALSE
      /** Boolean FALSE for CUnit use. */
      #define CU_FALSE 0
    #endif
    
    #ifndef CU_UNREFERENCED_PARAMETER
      /** Consistent approach to referencing unused parameters. */
      #define CU_UNREFERENCED_PARAMETER(x) (void)x
    #endif
    
    #ifndef CU_MAX
    #  define CU_MAX(a,b) (((a) >= (b)) ? (a) : (b))
    #endif
    
    #ifndef CU_MIN
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Mon Nov 27 17:53:42 UTC 2023
    - 18.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        func.return %0 : tensor<2x100xf32>
      }
      func.func @func_0_GPU_FLOAT(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>, %arg2: tensor<100xf32>) -> tensor<100xf32> attributes {tac.cost = 4.000000e+01 : f32, tac.device = "GPU", tac.inference_type = "FLOAT", tac.interface_name = "func_0"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  6. tensorflow/c/kernels.h

    // Returns the Device ID of the device that the context possesses. Returns the
    // PlatformDeviceId if a mapping between between TfDeviceId and PlatformDeviceId
    // is set; otherwise returns the id in the device name. Please refer to
    // tensorflow/tsl/framework/device_id.h for more details.
    // For mobile or slim build, returns the id in the device name.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 09 22:46:22 UTC 2024
    - 24.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu_device_propagation.mlir

    // CHECK-SAME: ({{%.+}}: tensor<i64> {tf.device = "/job:localhost/replica:0/task:0/device:TPU:0"}, {{%.+}}: tensor<i32> {tf.device = "/job:localhost/replica:0/task:0/device:TPU:0"})
    // CHECK-SAME: -> (tensor<i64> {tf.device = "/job:localhost/replica:0/task:0/device:TPU:0"}, tensor<i32> {tf.device = "/job:localhost/replica:0/task:0/device:TPU:0"})
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 19K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc

          task_and_device = {task, device};
        }
      }
    
      return topology;
    }
    
    // Determine execution devices when topology and device assignment are defined.
    // With a topology device coordinate to task and device mapping, device
    // assignment device coordinates can then be mapped to task and device for TPU
    // devices. The device assignment array is also validated.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:10:40 UTC 2024
    - 32.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

        %0 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : tensor<1xf32>
        %1 = tfl.mul %0, %arg2 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : tensor<1xf32>
        func.return %1 : tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_platform_info_test.cc

      device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
      Device* device = device_setup_.GetDevice(DEVICE_GPU);
      XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
    
      ResourceMgr resource_mgr("");
      OpKernelContext::Params params;
      params.resource_manager = &resource_mgr;
      params.device = device;
      OpKernelContext ctx(&params, 0);
    
      PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Jan 14 15:17:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
Back to top