Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for XlaLaunch (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/xla_rewrite.mlir

        // CHECK: "tf.XlaLaunch"(%arg1, %arg0) <{function = @func_with_resources, operandSegmentSizes = array<i32: 0, 1, 1>}> : (tensor<i32>, tensor<!tf_type.resource>) -> tensor<i32>
        %0 = "tf_device.cluster_func"(%arg0, %arg1) {func = @func_with_resources} : (tensor<!tf_type.resource>, tensor<i32>) -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/encapsulate_xla_computations_pass.h

      //    functions contain the computations to be passed to XlaLaunch. During
      //    encapsulation, we sort the arguments into the order expected by
      //    XlaLaunch.
      static Status Encapsulate(std::unique_ptr<Graph>* graph,
                                FunctionLibraryDefinition* flib_def);
    
      // b) we rewrite the function calls generated in phase (a) into XlaLaunch
      //    operators. We also convert the XlaClusterOutput output nodes of the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_side_effects.h

    };
    
    struct GlobalIterId : public ::mlir::SideEffects::Resource::Base<GlobalIterId> {
      StringRef getName() final { return "GlobalIterId"; }
    };
    
    struct XlaLaunch : public ::mlir::SideEffects::Resource::Base<XlaLaunch> {
      StringRef getName() final { return "XlaLaunch"; }
    };
    
    struct WriteTrainingPredictions
        : public ::mlir::SideEffects::Resource::Base<WriteTrainingPredictions> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 26 18:45:40 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_platform_info.h

      // xla_device_metadata_ lives in the tensorflow::DeviceBase in which the
      // XlaLaunch/_XlaCompile/_XlaRun op is placed and thus does not die before the
      // XlaLaunch/_XlaCompile/_XlaRun OpKernel.
      const XlaDevice::Metadata* xla_device_metadata_;
    
      // pjrt_device_metadata_ lives in tensorflow::PjRtBaseDevice in which the
      // XlaLaunch/XlaCompileOnDemand op is placed and thus does not die before the
      // op kernel.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/runtime_lowering_gpu.mlir

      // CHECK-LABEL: @converts_cluster
      func.func @converts_cluster() {
        // CHECK: "tf.XlaLaunch"()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 13 17:41:44 UTC 2023
    - 840 bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/xla_launch_xla_reduce_window.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/device:CPU:0"} : (tensor<!tf_type.resource<tensor<f32>>>) -> tensor<f32>
      %2 = "tf.XlaLaunch"(%arg0, %1) {_noinline = true, _xla_compile_device_type = "GPU", device = "/device:GPU:0", function = @xla_func_0, operandSegmentSizes = array<i32: 0, 2, 0>} : (tensor<7xf32>, tensor<f32>) -> tensor<10xf32>
      func.return %2 : tensor<10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 1.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/ops/xla_ops.cc

    #include "absl/status/status.h"
    #include "tensorflow/core/framework/op.h"
    #include "tensorflow/core/framework/shape_inference.h"
    
    namespace tensorflow {
    
    using shape_inference::InferenceContext;
    
    REGISTER_OP("XlaLaunch")
        .Input("constants: Tconstants")
        .Attr("Tconstants: list(type) >= 0")
        .Input("args: Targs")
        .Attr("Targs: list(type) >= 0")
        .Input("resources: Nresources * resource")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 09:08:06 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/xla_launch.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/device:CPU:0"} : (tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
      %2 = "tf.XlaLaunch"(%arg0, %1) {_noinline = true, _xla_compile_device_type = "GPU", device = "/device:GPU:0", function = @xla_func_0, operandSegmentSizes = array<i32: 0, 2, 0>} : (tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 14 15:35:49 UTC 2023
    - 1.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_compile_util.h

        const NodeDef& node_def, absl::Span<const XlaArgument> args,
        absl::Span<const DataType> result_types);
    
    // Checks if single device compilation and execution with PJRT is enabled for
    // `device_type` in either the XlaLaunch op or the XlaCompileOnDemand op.
    bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type);
    
    // Gets the resource name of the PjRt DeviceCompiler for `device_type`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_compile_on_demand_op.h

    #include "tensorflow/core/framework/types.h"
    #include "tensorflow/core/lib/core/status.h"
    
    namespace tensorflow {
    
    // An OpKernel that compiles an op to an XLA computation and runs it. Unlike
    // XlaLaunch this doesn't rely on any rewrites of the graphdef - it will run a
    // vanilla TensorFlow op as long as the bridge supports it.
    class XlaCompileOnDemandOp : public OpKernel {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.2K bytes
    - Viewed (0)
Back to top