- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 28 for XlaLaunch (0.26 sec)
-
tensorflow/compiler/jit/xla_device_ops.h
namespace tensorflow { // Dummy OpKernel, used for kernels assigned to an XLA device that should be // compiled. Should never be called at runtime since such ops should be // rewritten to a XlaLaunch op. If it is called, it means the placer placed an // operator on an XLA device but the compiler did not compile it. class XlaDeviceDummyOp : public OpKernel { public: explicit XlaDeviceDummyOp(OpKernelConstruction* ctx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 23 19:28:25 UTC 2021 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.h
const NodeDef& node_def, absl::Span<const XlaArgument> args, absl::Span<const DataType> result_types); // Checks if single device compilation and execution with PJRT is enabled for // `device_type` in either the XlaLaunch op or the XlaCompileOnDemand op. bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type); // Gets the resource name of the PjRt DeviceCompiler for `device_type`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.h
#include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { // An OpKernel that compiles an op to an XLA computation and runs it. Unlike // XlaLaunch this doesn't rely on any rewrites of the graphdef - it will run a // vanilla TensorFlow op as long as the bridge supports it. class XlaCompileOnDemandOp : public OpKernel { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/jit/jit_compilation_pass_registration.cc
#include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { // PRE_PLACEMENT passes: // EncapsulateXlaComputationsPass rewrites computations generated by the // xla.compile() Python code into XlaLaunch nodes. REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 36, EncapsulateXlaComputationsPass); // from // tensorflow/compiler/tf2xla/functionalize_control_flow_pass_registration.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 11 21:53:08 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
public: // Allow using Device API (PjRt) for `device_type` in the XlaLaunch op. // Please note that `enabled_for_xla_launch_` needs to be true in addition // to the `device_type` being allowed in order to use the Device API for // single device compilation and execution in the XlaLaunch op. void AllowForDeviceInXlaLaunch(const DeviceType& device_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_subgraphs_pass.h
std::unique_ptr<Graph>* graph_out, FunctionLibraryDefinition* library); // The attribute that marks function calls produced by the encapsulate // subgraphs pass and that should in turn be compiled via XlaLaunch operators. extern const char* const kXlaCompiledKernelAttr; // Does `node` have the kXlaCompiledKernelAttr attribute? bool IsXlaCompiledKernel(const Node& node);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 12 03:59:36 UTC 2022 - 4.9K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
if (ctx->has_input(i) || ctx->has_input(++i)) { ctx->set_output(0, ctx->input(i)); } } REGISTER_KERNEL_BUILDER(Name("XlaLaunch").Device(DEVICE_CPU), XlaLocalLaunchOp); REGISTER_KERNEL_BUILDER(Name("XlaLaunchV2").Device(DEVICE_CPU), XlaLaunchV2Op); REGISTER_KERNEL_BUILDER(Name("XlaLaunch") .Device(DEVICE_GPU) .HostMemory("constants")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/side-effect-analysis-test.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 20 04:39:18 UTC 2023 - 129.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir
%unused = "tf.TestAsyncIdentity"(%x) {__op_key = 0: i32, T = i32} : (tensor<i32>) -> tensor<i32> // CHECK: mlrt.await_all_control [[unused]] return %x : tensor<i32> } // ----- // Test for XlaLaunch func.func private @xla_func_0(%arg0: tensor<1x3xf32>, %arg1: tensor<1x3xf32>) -> tensor<1x3xf32> attributes {tf._XlaMustCompile = true, tf._noinline = true, tf._original_func_name = "should_not_be_used"} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 24.7K bytes - Viewed (0)