- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for xla_compile (0.16 sec)
-
tensorflow/compiler/jit/kernels/xla_ops.cc
OP_REQUIRES_OK(ctx, args_and_variables_snapshot.status()); const std::vector<XlaCompiler::Argument>& args = args_and_variables_snapshot->first; variables_snapshot = std::move(args_and_variables_snapshot->second); // Do not alias resource updates as locking variables in XlaCompile and // unlocking them in XlaRun may lead to deadlocks. Status status; if (use_pjrt) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
tensorflow::OpRegistry::Global(), flib); tensorflow::XlaCompiler::Options options; options.device_type = tensorflow::DeviceType(kArbitraryDeviceName); options.client = client; options.flib_def = flib_def.get(); tensorflow::XlaCompiler compiler(options); std::unique_ptr<tensorflow::Graph> graph_copy(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc
#include "absl/strings/str_format.h" #include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h" #include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/client_library.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/lib/monitoring/cell_reader.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 23:59:33 UTC 2024 - 16.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
bool is_updated_resource_variable = is_resource_variable && absl::c_any_of(compilation_result->resource_updates, [&](const XlaCompiler::ResourceUpdate& update) { // XlaCompiler records `arg_num` (instead of kernel // parameters) in `resource_updates`. return update.input_index == arg_num &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
} // Allow using Device API (PjRt) for `device_type` in the XlaCompile and // XlaRun ops. Please note that `enabled_for_compile_and_run_` needs to be // true in addition to the `device_type` being allowed in order to use the // Device API for single device compilation and execution in the XlaCompile // and XlaRun ops. void AllowForDeviceInXlaCompileAndRun(const DeviceType& device_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h" #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h" #include "tensorflow/compiler/tf2xla/layout_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/compile_only_client.h" #include "xla/hlo/ir/hlo_input_output_alias_config.h" #include "xla/mlir_hlo/mhlo/IR/register.h" #include "xla/shape.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
// operations). Such a cluster can fail compilation (in way that // MarkForCompilation could not have detected) if the CPU JIT is not // linked in. // // So bail out of _XlaCompile in this case, and let the executor handle // the situation for us. const Status& status = compiler_for_platform.status(); if (status.code() == error::NOT_FOUND) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc
} return root_tuple; } LogicalResult Tf2XlaRewriter::PrepareParams() { // XlaCompiler within the context is only used by the functional ops to // compile functions. We are not handling those at the moment so // XlaCompiler is not required. context_ = new tensorflow::XlaContext(/*compiler=*/nullptr, &xla_builder_, /*graph=*/nullptr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 18.9K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen.cc
Status GenResultMethods(const tf2xla::Config& config, const xla::ProgramShapeProto& ps, string* methods) { if (ps.result().element_type() != xla::TUPLE) { // The XlaCompiler we use to build the xla computation always generates a // tuple result, and we rely on this to simplify code generation. return errors::Internal("codegen requires the XLA result to be a tuple"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 36.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
effects.emplace_back(MemoryEffects::Write::get(), ResourceEffects::_XlaRun::get()); // Conservatively mark resource handles as read and write, as without // analyzing _XlaCompile, there is not sufficient information to determine // effects on resources. for (Value value : getArgs()) { MarkResourceAsReadAndWrite(value, effects); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0)