Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 195 for Advice (0.11 sec)

  1. tensorflow/compiler/mlir/lite/tests/legalize-tf-hashtables.mlir

      %0 = "tf.HashTableV2"() {container = "", device = "", key_dtype = !tf_type.string, shared_name = "hash_table_1dd4fef4-646d-491f-a3a8-bf5334f45813", use_node_name_sharing = false, value_dtype = i64} : () -> tensor<!tf_type.resource>
      "tf.LookupTableImportV2"(%0, %cst, %cst_1) {device = ""} : (tensor<!tf_type.resource>, tensor<3x!tf_type.string>, tensor<3xi64>) -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/rewrite_util.cc

    namespace mlir {
    namespace TF {
    
    namespace {
    
    const char kDeviceAttr[] = "device";
    const char kDeviceGpu[] = "GPU";
    
    std::optional<std::string> GetOpDevice(mlir::Operation *op) {
      mlir::StringAttr device = op->getAttrOfType<mlir::StringAttr>(kDeviceAttr);
      if (!device || device.getValue().empty()) {
        return std::nullopt;
      }
      tensorflow::DeviceNameUtils::ParsedName parsed_name;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/pjrt_base_device.cc

            "Cannot get device metadata from non-PJRT device \"", device->name(),
            "\". GetMetadata must only be called on a device derived from "
            "PjRtBaseDevice. Either an internal bug has been triggered, or an "
            "XLA-specific op has been placed on the wrong device.");
      }
      return &pjrt_device->metadata_;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 2.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/device_util.h

    class DeviceInfoCache {
     public:
      bool IsGpu(DeviceId device) const { return is_gpu_[device.id()]; }
      bool IsCpu(DeviceId device) const { return is_cpu_[device.id()]; }
    
      absl::string_view GetNameFor(DeviceId device) const {
        return names_[device.id()];
      }
    
      absl::StatusOr<DeviceId> GetIdFor(absl::string_view name);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 17:18:31 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/ir/gpu_ops.td

        Op<TFRT_GPU_Dialect, mnemonic, traits> {
    }
    
    // TODO(b/260267885): We may add a device argument when we want to support
    // GPU MIG.
    def TransferToDeviceOp: Gpu_Op<"transfer_to_device"> {
      let summary = "Transfer a CPU tensor to device.";
    
      let description = [{
        Transfer a CPU tensor to device.
    
        Example:
          %device_tensor = gpurt.transfer_to_device %cpu_tensor
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/device_util.cc

      };
    
      devices.ForEach([&](jit::DeviceId device) {
        if (device_info_cache.IsGpu(device)) {
          if (maybe_gpu_device) {
            multiple_gpu_devices = is_multiple_devices(device, &maybe_gpu_device);
            if (multiple_gpu_devices) return false;
          } else {
            maybe_gpu_device = device;
          }
        } else if (device_info_cache.IsCpu(device)) {
          if (maybe_cpu_device) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu_tail_with_tobool_op.mlir

        %1 = "tf.Rank"(%0) {_tpu_replicate = "cluster", device = ""} : (tensor<*xi1>) -> tensor<*xi32>
        %2 = "tf.Range"(%cst_0, %1, %cst_1) {_tpu_replicate = "cluster", _xla_outside_compilation = "0", device = ""} : (tensor<i32>, tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
        %3 = "tf.All"(%0, %2) {_tpu_replicate = "cluster", _xla_outside_compilation = "0", device = "", keep_dims = false} : (tensor<*xi1>, tensor<*xi32>) -> tensor<*xi1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 21:23:47 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.td

    def TFXLADeviceSpecificTransforms : Pass<"tfxla-device-specific-transforms",
                                                "mlir::func::FuncOp"> {
      let summary = "Transforms ops that require device context into device independent TF Ops.";
    
      let description = [{"Transforms device specific ops into device independent"
                        "ops."}];
    
      let options = [
        Option<"device_type_", "device-type", "std::string",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 17:44:14 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_host_recv_device_context.h

                                 StringPiece tensor_name, Device* device,
                                 Tensor* cpu_tensor, StatusCallback done) override;
    
      void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
                                  Tensor* output_tensor,
                                  StatusCallback done) const override {
        done(errors::Internal("device->device copy not implemented."));
      }
    
     private:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir

    // CHECK-NEXT: [[o5_chain:%.*]], [[o5:%.*]] = tfrt_fallback_async.executeop.seq([[in_chain]]) key({{[0-9]+}}) cost({{.*}}) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.ReadVariableOp"([[arg4]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 7.7K bytes
    - Viewed (0)
Back to top