- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 60 for compilation_result (0.58 sec)
-
tensorflow/compiler/jit/device_compilation_cache_test.cc
auto compilation_result2 = std::make_unique<XlaCompiler::CompilationResult>(); auto executable1 = std::make_unique<FakeExecutable>("foo_exe"); auto executable2 = std::make_unique<FakeExecutable>("bar_exe"); cache->Store(key1, DeviceCompileState::kCompiled, errors::InvalidArgument("Invalid argument."), std::move(compilation_result1), std::move(executable1));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_test.cc
const XlaCompiler::CompilationResult* compilation_result = nullptr; xla::LocalExecutable* xla_executable = nullptr; TF_EXPECT_OK(xla_device_compiler->CompileIfNeeded( options, fn, args, XlaCompiler::CompileOptions{}, DeviceCompileMode::kStrict, profiler_, &compilation_result, &xla_executable)); EXPECT_TRUE(compilation_result != nullptr); EXPECT_TRUE(xla_executable != nullptr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compilation_cache.h
executable.get()); int64_t hlo_module_size = 0; if (compilation_result != nullptr && compilation_result->computation != nullptr) { hlo_module_size = compilation_result->computation->proto().ByteSizeLong(); } return absl::StrCat(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 12 08:49:52 UTC 2023 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
} void RunInThreadPoolIfCollectivesPresent( const XlaCompiler::CompilationResult& compilation_result, std::function<void()> execution_fn) { // If we are using collectives, we need to run in a separate threadpool. if (compilation_result.collective_info.has_value()) { GetOrCreateThreadPoolForCollective(*compilation_result.collective_info) ->Schedule(execution_fn); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util_test.cc
DeviceCompileMode::kStrict, /*has_ref_vars=*/true, /*may_alias_resource_update=*/true, device_setup.flr(), &resource_mgr, &compilation_result, &pjrt_client, &pjrt_executable)); EXPECT_TRUE(compilation_result != nullptr); EXPECT_TRUE(pjrt_executable != nullptr); EXPECT_TRUE(pjrt_client != nullptr); } TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutableWithOpKernelContext) { DeviceSetup device_setup;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 21 23:21:57 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
OpKernelContext* ctx, const XlaCompiler::CompilationResult& compilation_result, int missing_ctx_input_prefix) { std::vector<VariableInfo> out; out.reserve(compilation_result.resource_updates.size()); for (int i = 0; i < compilation_result.resource_updates.size(); ++i) { const XlaCompiler::ResourceUpdate& write = compilation_result.resource_updates[i];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc
arg_shapes, arg_core_mapping, per_core_arg_shapes, client, compilation_result.get())); DumpHloCompilationResult("legalize_tf_fallback.hlo", compilation_result.get()) .IgnoreError(); return *compilation_result; } auto combined_bridge_status = internal::LegalizeTfToHlo(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 00:40:46 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
const std::vector<XlaCompiler::Argument>& args, DeviceCompileMode compile_mode, bool has_ref_vars, bool may_alias_resource_update, FunctionLibraryRuntime* flr, ResourceMgr* rm, const XlaCompiler::CompilationResult** compilation_result, xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) { PjRtDeviceCompiler* pjrt_device_compiler; DeviceCompilationProfiler* profiler;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_disable_test.cc
} DisableXlaCompilation(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); DeviceType device_type = DeviceType(DEVICE_CPU_XLA_JIT); const XlaCompiler::CompilationResult* compilation_result; xla::LocalExecutable* executable; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
const XlaCompiler::CompilationResult* compilation_result, int output_num); // Converts input tensors and variables which are parameters of the // XlaComputation into PjRtBuffers to be fed as input to the // PjRtLoadedExecutable. // // Assumes that the first `num_missing_prefix_ctx_inputs` inputs to the // compilation_result are missing in `inputs` and adjusts indexing into `inputs` // accordingly.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0)