Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for on_device_shape (0.18 sec)

  1. tensorflow/compiler/jit/xla_tpu_device.cc

            "XlaTensor is expected to have device memory allocated when "
            "computing padded shape");
      }
    
      const xla::Shape& on_device_shape =
          xla_tensor->shaped_buffer().on_device_shape();
    
      StatusHelper status;
      ApiConverter::StackHelper<XLA_Shape> se_shape(on_device_shape);
      ApiConverter::StackHelper<XLA_Shape> tpu_shape;
      stream_executor::tpu::ExecutorApiFn()->XlaShapeToTpuPaddedShapeFn(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_launch_util.cc

      // Computation output should always be a tuple.
      VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString();
      VLOG(2) << "Result tuple shape (on device): "
              << output.on_device_shape().DebugString();
      CHECK_EQ(ctx->num_outputs(), compilation_result->outputs.size());
    
      // If the on-host-shape isn't a tuple, create a new single-element tuple
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device.cc

      if (xla_tensor == nullptr) {
        return TensorShapeToXLAShape(tensor.dtype(), tensor.shape(), shape);
      }
    
      const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
      *shape = shaped_buffer.on_device_shape();
      return absl::OkStatus();
    }
    
    // Caches a XlaDeviceAllocator per <backend, device ordinal> pair. A
    // XlaDeviceAllocator is created on demand and is associated with a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
Back to top