Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 2,196 for Levine (0.19 sec)

  1. tensorflow/c/eager/parallel_device/parallel_device.h

    // per device in `underlying_devices`. Implicit copies off of the device throw
    // an error.
    //
    // All component tensors must have the same dtype. Currently they must also have
    // the same shape, although this requirement may be relaxed in the future.
    //
    // `device_name` must not name an existing physical or custom device (see
    // the documentation for TFE_RegisterCustomDevice for more information).
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 04 21:49:16 UTC 2020
    - 2.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/colocate_tpu_copy_with_dynamic_shape.cc

          auto device = op->getAttrOfType<StringAttr>(kDevice);
          for (auto *operand : operands)
            propagateIfChanged(operand, operand->SetDevice(device));
        } else {
          // Propagate device through other ops. These ops might have their
          // own device annotation, but that's fine. We only care about
          // where the TPUExecute ops live.
          StringAttr device;
          for (const Device *d : results) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 23 00:30:27 UTC 2023
    - 5.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/resource_analyzer_test_pass.cc

    };
    
    // A set of values that identifies a resource.
    struct ResourceKey {
      StringRef device;
      StringRef container;
      StringRef shared_name;
    };
    
    ResourceKey GetResourceKey(TF::VarHandleOp var_handle_op) {
      ResourceKey resource_key;
    
      if (auto attr = var_handle_op->getAttrOfType<StringAttr>("device")) {
        resource_key.device = attr.getValue();
      }
    
      resource_key.container = var_handle_op.getContainer();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 00:21:29 UTC 2023
    - 3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfrt/ir/gpu_ops.td

        Op<TFRT_GPU_Dialect, mnemonic, traits> {
    }
    
    // TODO(b/260267885): We may add a device argument when we want to support
    // GPU MIG.
    def TransferToDeviceOp: Gpu_Op<"transfer_to_device"> {
      let summary = "Transfer a CPU tensor to device.";
    
      let description = [{
        Transfer a CPU tensor to device.
    
        Example:
          %device_tensor = gpurt.transfer_to_device %cpu_tensor
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_cleanup_attributes.cc

          // This attribute is used for op colocation. Since all ops are located
          // on a single device cluster, this private attribute is no longer
          // needed.
          op->removeAttr(kClassAttr);
          if (auto attr = op->getAttrOfType<StringAttr>(kDeviceAttr)) {
            // Preserve device attribute if the op is placed on a replicated core
            // device. Device attribute is used to infer the appropriate sharding
            // within TPUs for this op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 23:50:19 UTC 2022
    - 3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/constant_op_device_assignment.cc

    ==============================================================================*/
    
    // This pass adds the device attribute to every tf.Const op based on the device
    // attribute of the operations that read its result. If the result of a tf.Const
    // op is read by operations placed on multiple devices, then the pass will
    // replicate the tf.Const op once for each device.
    
    #include "mlir/IR/Builders.h"
    #include "mlir/Pass/Pass.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 23:50:19 UTC 2022
    - 3.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/pjrt_base_device.h

        void operator=(const Metadata&) = delete;
      };
    
      struct Options {
        // The device name's prefix (e.g., "/task:7")
        std::string device_name_prefix;
    
        // The name of the  device (e.g., "TPU")
        std::string device_name;
    
        // The index of the device.
        int device_ordinal = -1;
    
        // The name of the compilation device, also referred to as jit_device_type.
        // (e.g., "XLA_CPU_JIT");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops_pass.cc

      // device cluster ops.
      for (auto func : module.getOps<func::FuncOp>()) {
        for (auto user : symbol_map.getUsers(func)) {
          // Populate caller-callee map.
          if (func::FuncOp caller = user->getParentOfType<func::FuncOp>())
            caller_callee_map[caller].insert(func);
          // Initialize function worklist with functions refrerenced in device
          // cluster.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Sep 08 20:01:13 UTC 2023
    - 8K bytes
    - Viewed (0)
  9. tensorflow/c/experimental/pluggable_profiler/pluggable_profiler_internal.h

    #include "tensorflow/core/profiler/protobuf/xplane.pb.h"
    #include "tsl/profiler/protobuf/profiler_options.pb.h"
    
    namespace tensorflow {
    namespace profiler {
    
    // Plugin initialization function that a device plugin must define. Returns
    // a TF_Status output specifying whether the initialization is successful.
    using TFInitProfilerFn = void (*)(TF_ProfilerRegistrationParams* const,
                                      TF_Status* const);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 1.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_compiler_options_util.h

    limitations under the License.
    ==============================================================================*/
    
    #ifndef TENSORFLOW_COMPILER_JIT_XLA_COMPILER_OPTIONS_UTIL_H_
    #define TENSORFLOW_COMPILER_JIT_XLA_COMPILER_OPTIONS_UTIL_H_
    
    #include "tensorflow/compiler/jit/device_compiler.h"
    #include "tensorflow/compiler/jit/xla_platform_info.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 2.7K bytes
    - Viewed (0)
Back to top