Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for xla_tensor (0.12 sec)

  1. tensorflow/compiler/jit/xla_device_context.cc

      }
    
      XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
      xla_tensor->WaitForDefinitionEventOnStream(device_to_host_stream.get());
    
      // Transfer manager requires the shape of the shaped buffer to be the same as
      // literal shape except for the layout.  Set the literal to use xla_tensor's
      // shape as it is derived from the cpu_tensor's shape using
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_tpu_device.cc

      const tensorflow::XlaTensor* xla_tensor =
          tensorflow::XlaTensor::FromTensor(&tensor);
      if (xla_tensor == nullptr) {
        return errors::InvalidArgument(
            "Expected an XlaTensor when computing padded shape");
      }
    
      if (!xla_tensor->has_shaped_buffer()) {
        return errors::InvalidArgument(
            "XlaTensor is expected to have device memory allocated when "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_launch_util.cc

            ctx->allocate_temp(output_dtype, output_shape, &output_tensor));
        if (output_tensor.TotalBytes() > 0) {
          XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor);
          TF_RET_CHECK(xla_tensor);
          xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num}));
          if (use_multiple_streams) {
            xla_tensor->ResetDefinitionEvent(definition_event, stream);
          }
        }
        return output_tensor;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/BUILD

        ] + if_static([
            "//tensorflow/core/common_runtime:copy_tensor",
            ":jit_compilation_passes",
        ]),
        alwayslink = 1,
    )
    
    cc_library(
        name = "xla_tensor",
        srcs = ["xla_tensor.cc"],
        hdrs = ["xla_tensor.h"],
        visibility = [":friends"],
        deps = [
            "//tensorflow/compiler/tf2xla:common",
            "//tensorflow/core:core_cpu_internal",
            "//tensorflow/core:framework",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 00:41:19 UTC 2024
    - 61.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_device.cc

    Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) {
      const tensorflow::XlaTensor* xla_tensor =
          tensorflow::XlaTensor::FromTensor(&tensor);
      if (xla_tensor == nullptr) {
        return TensorShapeToXLAShape(tensor.dtype(), tensor.shape(), shape);
      }
    
      const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
      *shape = shaped_buffer.on_device_shape();
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_platform_info.cc

        //
        // Importantly we can't use ctx->device()->GetAllocator() as the allocator
        // (which xla_allocator above uses) as on an XlaDevice, this is a dummy
        // allocator that returns XlaTensor objects. The XlaCompiler needs a real
        // allocator to allocate real buffers.
        platform_id = xla_device_metadata->platform()->id();
        custom_allocator =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
Back to top