- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for EagerContext (0.16 sec)
-
tensorflow/compiler/jit/get_compiler_ir.h
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { class ProcessFunctionLibraryRuntime; class Device; class Tensor; class TensorHandle; class EagerContext; enum class IrExportStage { HLO, HLO_NO_METADATA, HLO_SERIALIZED, OPTIMIZED_HLO, OPTIMIZED_HLO_SERIALIZED, OPTIMIZED_HLO_PROTO_SERIALIZED, OPTIMIZED_HLO_DOT };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
} return absl::OkStatus(); } absl::StatusOr<std::vector<XlaCompiler::Argument>> PrepareXlaCompilerArgs( FunctionLibraryRuntime* flr, const NameAttrList& function, EagerContext* context, Device* dev, absl::Span<const ArgShapeAndDType> input_arg_shape_and_dtype, absl::Span<const TensorHandle* const> input_handles, CompilerArgSource compiler_arg_source) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
auto r = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>( new tensorflow::IntraProcessRendezvous(device_mgr.get())); tensorflow::EagerContext* eager_context = new tensorflow::EagerContext( opts->session_options.options, static_cast<tensorflow::ContextDevicePlacementPolicy>( opts->device_placement_policy), opts->async, device_mgr.release(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/node_expansion_pass.cc
LOG_FIRST_N(INFO, 1) << "Run Node Expansion Passes"; // Get the FunctionDef and insert that into the context const NodeDef& ndef = orig_op->MutableAttrs()->BuildNodeDef(); auto& ctx = orig_op->EagerContext(); Fprint128 cache_key = orig_op->MutableAttrs()->CacheKey(orig_op->DeviceName()); // Include soft placement policy in cache key since the placement strategy // can change and thus affect which kernel is picked.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Feb 25 16:22:36 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/saved_variable_loading_test.cc
options, "/job:localhost/replica:0/task:0", &devices)); device_mgr_ = absl::make_unique<StaticDeviceMgr>(std::move(devices)); ctx_ = testing::CreateTestingEagerContext(device_mgr_.get()); } EagerContext* context() { return ctx_.get(); } private: std::unique_ptr<StaticDeviceMgr> device_mgr_; EagerContextPtr ctx_; }; // Sanity check that constructing a tensorflow::Variable from a SavedVariable
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 27 09:34:33 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/ops/restore_ops_test.cc
} class RestoreOpsTest : public ::testing::Test { public: RestoreOpsTest() : device_mgr_(testing::CreateTestingDeviceMgr()), ctx_(testing::CreateTestingEagerContext(device_mgr_.get())) {} EagerContext* context() { return ctx_.get(); } private: std::unique_ptr<StaticDeviceMgr> device_mgr_; EagerContextPtr ctx_; }; // One way of obtaining the checkpointa checkpoint's tensor names is:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 14 19:16:58 UTC 2023 - 4.2K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/test_utils.cc
return std::make_unique<StaticDeviceMgr>( DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0")); } EagerContextPtr CreateTestingEagerContext(DeviceMgr* device_mgr) { return EagerContextPtr(new EagerContext( SessionOptions(), tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, /* async= */ false, device_mgr, /* device_mgr_owned= */ false, /* rendezvous= */ nullptr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Sep 08 20:13:32 UTC 2023 - 5.8K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.cc
auto* gpu_options = config.mutable_gpu_options(); gpu_options->set_allow_growth(gpu_memory_allow_growth); (*config.mutable_device_count())["CPU"] = num_cpu_devices; // TODO(b/113217601): This is needed for EagerContext::runner_ to use a // threadpool, so that we avoid the possibility of running the runner_ in the // threadpool of GPU event mgr, as that can trigger more callbacks to be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 03:35:10 UTC 2024 - 29.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_executor.cc
// Build an ExecutionContext from the HostContext. auto builder = RequestContextBuilder(host_context_.get(), &resource_context_); // Get tensorflow::EagerContext for the kernel fallback. auto* eager_context_resource = resource_context_ .GetOrCreateResource<tensorflow::tfd::EagerContextResource>( tensorflow::tfd::kEagerContextResourceName);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_ops.td
let summary = "eager context initialization operation"; let description = [{ The "tfd.init_eager_context" operation takes an input chain, creates and initializes the TF EagerContext and returns an output chain. Example: %c1 = "tfd.init_eager_context"(%c0): (!tfrt.chain) -> !tfrt.chain }]; let arguments = (ins TFRT_ChainType); let results = (outs TFRT_ChainType); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 23 19:35:12 UTC 2023 - 5.9K bytes - Viewed (0)