- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for GetCompilerIr (1.07 sec)
-
tensorflow/compiler/jit/get_compiler_ir.h
// Returns the IR format of the selected stage for a given function `func_name` // using library runtime `runtime` on a device `dev` with given // `inputs_arg_shape_and_dtype` and `input_handles`. absl::StatusOr<std::string> GetCompilerIr( IrExportStage stage, ProcessFunctionLibraryRuntime* pflr, absl::string_view func_name, Device* dev, EagerContext* context, absl::Span<const ArgShapeAndDType> input_arg_shape_and_dtype,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
stage == IrExportStage::HLO_SERIALIZED; }; // TODO(b/238830423): support GetCompilerIr on TFRT TPU device for stages // that requires compilation from HLO to executable. if (device_type != DEVICE_CPU && stream == nullptr && !is_tfrt_tpu_supported_stage(stage)) { return absl::InternalError( "GetCompilerIr with requested stage is not supported on this device."); } return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
} // TFRT-TPU is used if device type is `DEVICE_TPU` and platform_info does not // have `xla_device_metadata`. This is used for TFRT-TPU when // BuildXlaDeviceCompiler() is called in GetCompilerIr(). Currently only // lowering to HLO is needed there and xla::LocalClient doesn't support // building the executable for TFRT-TPU and hence, is set to nullptr here. if (platform_info.device_type() == DEVICE_TPU) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0)