- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 28 for XlaLaunch (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/xla_rewrite.mlir
// CHECK: "tf.XlaLaunch"(%arg1, %arg0) <{function = @func_with_resources, operandSegmentSizes = array<i32: 0, 1, 1>}> : (tensor<i32>, tensor<!tf_type.resource>) -> tensor<i32> %0 = "tf_device.cluster_func"(%arg0, %arg1) {func = @func_with_resources} : (tensor<!tf_type.resource>, tensor<i32>) -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_xla_computations_pass.h
// functions contain the computations to be passed to XlaLaunch. During // encapsulation, we sort the arguments into the order expected by // XlaLaunch. static Status Encapsulate(std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def); // b) we rewrite the function calls generated in phase (a) into XlaLaunch // operators. We also convert the XlaClusterOutput output nodes of the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_side_effects.h
}; struct GlobalIterId : public ::mlir::SideEffects::Resource::Base<GlobalIterId> { StringRef getName() final { return "GlobalIterId"; } }; struct XlaLaunch : public ::mlir::SideEffects::Resource::Base<XlaLaunch> { StringRef getName() final { return "XlaLaunch"; } }; struct WriteTrainingPredictions : public ::mlir::SideEffects::Resource::Base<WriteTrainingPredictions> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 26 18:45:40 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
// the arguments into the order expected by XlaLaunch computations: // 1) arguments // 2) resource variable arguments // See the documentation of EncapsulateSubgraphsInFunctions for the meaning // of the arguments. // // TODO(b/113166435): Ordering constraints on XlaLaunch op can be relaxed. Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
// xla_device_metadata_ lives in the tensorflow::DeviceBase in which the // XlaLaunch/_XlaCompile/_XlaRun op is placed and thus does not die before the // XlaLaunch/_XlaCompile/_XlaRun OpKernel. const XlaDevice::Metadata* xla_device_metadata_; // pjrt_device_metadata_ lives in tensorflow::PjRtBaseDevice in which the // XlaLaunch/XlaCompileOnDemand op is placed and thus does not die before the // op kernel.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/runtime_lowering_gpu.mlir
// CHECK-LABEL: @converts_cluster func.func @converts_cluster() { // CHECK: "tf.XlaLaunch"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 13 17:41:44 UTC 2023 - 840 bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/xla_launch_xla_reduce_window.mlir
%1 = "tf.ReadVariableOp"(%0) {device = "/device:CPU:0"} : (tensor<!tf_type.resource<tensor<f32>>>) -> tensor<f32> %2 = "tf.XlaLaunch"(%arg0, %1) {_noinline = true, _xla_compile_device_type = "GPU", device = "/device:GPU:0", function = @xla_func_0, operandSegmentSizes = array<i32: 0, 2, 0>} : (tensor<7xf32>, tensor<f32>) -> tensor<10xf32> func.return %2 : tensor<10xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 14 15:35:49 UTC 2023 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td
This pass rewrites `tf.PartitionedCall` and `tf.StatefulPartitionedCall` operations with `_xla_compile_device_type` attribute in a `tf_device.cluster` into `tf.XlaLaunch` operations. This makes the attached function execute with XLA. `tf.XlaLaunch` requires resource-type arguments come at the end, so this pass rewrites the called function if necessary. This pass assumes there are no nested `tf_device.cluster`s so we don't end
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/jit/ops/xla_ops.cc
#include "absl/status/status.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; REGISTER_OP("XlaLaunch") .Input("constants: Tconstants") .Attr("Tconstants: list(type) >= 0") .Input("args: Targs") .Attr("Targs: list(type) >= 0") .Input("resources: Nresources * resource")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 09:08:06 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/xla_launch.mlir
%1 = "tf.ReadVariableOp"(%0) {device = "/device:CPU:0"} : (tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32> %2 = "tf.XlaLaunch"(%arg0, %1) {_noinline = true, _xla_compile_device_type = "GPU", device = "/device:GPU:0", function = @xla_func_0, operandSegmentSizes = array<i32: 0, 2, 0>} : (tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 14 15:35:49 UTC 2023 - 1.6K bytes - Viewed (0)