- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for __xla_ (0.09 sec)
-
tensorflow/compiler/aot/tfcompile.bzl
# Rule that runs tfcompile to produce the header and object file. header_file = name + ".h" # The XLA backends morph kernel name prefix __ that is not in the form of # __xla_. ep = ("__xla_" + native.package_name() + "__" + name).replace("/", "_") if type(tfcompile_flags) == type(""): flags = tfcompile_flags else: flags = " ".join([
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 19:18:08 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc
EXPECT_NE(add_node, nullptr); add_node->AddAttr(kXlaConnectedToXlaComputationAttrName, "cluster"); add_node->AddAttr(kXlaConnectedFromXlaComputationAttrName, "cluster"); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 41K bytes - Viewed (0) -
tensorflow/compiler/jit/build_xla_ops_pass_test.cc
#include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { class BuildXlaOpsTest : public ::testing::Test { protected: void SetUp() override { // This is needed to register the XLA_* devices. CHECK(DeviceFactory::AddDevices( SessionOptions(), "/job:localhost/replica:0/task:0", &devices_) .ok()); } private: std::vector<std::unique_ptr<Device>> devices_; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
bool tf_xla_compile_on_demand; // Enables "XLA" devices if this flag is set. bool tf_xla_enable_xla_devices; }; // Flags common to the _Xla* ops and their kernels. struct XlaOpsCommonFlags { // If true, _XlaCompile always refuses to compile the cluster, which means the // XLA clusters always run in the TF executor. Defaults to false.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
"autoclustering ops are compiled one by one just-in-time."), Flag("tf_xla_enable_xla_devices", &device_flags->tf_xla_enable_xla_devices, "Generate XLA_* devices, where placing a computation on such a " "device" "forces compilation by XLA. Deprecated."), Flag("tf_xla_always_defer_compilation",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
// second XLA cluster nor the output from the first XLA cluster are supported // because of (2). // // TODO(b/113100872): This can be fixed if the TensorFlow representation for // TensorArray and Stack on the XLA_{C|G}PU devices were the same in XLA; then // (2) would no longer hold. if (n.assigned_device_name().empty()) { *ignore = false; return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0)