Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 28 of 28 for XlaLaunch (0.17 sec)

  1. tensorflow/compiler/jit/xla_launch_util.h

                                  int device_ordinal, bool allocate_xla_tensors,
                                  bool use_multiple_streams);
    
      // Builds a XlaCompiler::Argument vector from the arguments to an XlaLaunch
      // op.
      // Precondition: variables in `variable_args` are locked.
      static absl::StatusOr<std::vector<XlaCompiler::Argument>>
      BuildXlaCompilerArguments(absl::Span<int const> must_be_constant_idxs,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc

      auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
      auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
    
      NameAttrList function;
      function.set_name("launch0");
      auto launch = ops::XlaLaunch(
          scope.WithOpName("launch0").WithDevice("/gpu:0"),
          std::initializer_list<Input>{}, std::initializer_list<Input>{a, b, c, d},
          std::initializer_list<Input>{u, v, w},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 16 18:03:15 UTC 2023
    - 14.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/compilability_check_util.h

        const tensorflow::FunctionBody* fbody,
        absl::Span<int const> constant_arg_indices,
        absl::Span<int const> resource_arg_indices);
    
    // Returns output memory types.
    //
    // XlaLaunch kernel keeps all outputs (including constants, which it copies),
    // in device memory except for resources.
    tensorflow::MemoryTypeVector GetOutputMemoryTypes(
        const tensorflow::FunctionBody* fbody);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 14.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td

        with `_xla_compile_device_type` attribute into a `tf_device.cluster`.
        Notice this pass will only rewrite the outermost call if there are nested
        calls to avoid nested `tf.XlaLaunch` operations from being created later.
    
        For example, the following code
    
        ```mlir
        func.func @main() -> tensor<i32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 19.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/hoist_invariant_ops.mlir

      attributes {tf_saved_model.exported_names = ["main"]} {
      %0 = "tf.VarHandleOp"() {device = "/device:CPU:0", container = "", shared_name = "variable"} : () -> tensor<!tf_type.resource<tensor<1x3xf32>>>
      %1 = "tf.XlaLaunch"(%arg0, %0) {device = "/device:GPU:0", function = @xla_func, operandSegmentSizes = array<i32: 0, 2, 0>} : (tensor<1x3xf32>, tensor<!tf_type.resource<tensor<1x3xf32>>>) -> tensor<*xf32>
      func.return  %1 : tensor<*xf32>
    
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 01 23:54:14 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/passes.h

    // parent region.
    std::unique_ptr<OperationPass<ModuleOp>> CreateXlaInlineDeviceOpsPass();
    
    // Creates a pass that rewrites partitioned calls with `_xla_compile_device
    // type` with `tf.XlaLaunch` ops.
    std::unique_ptr<OperationPass<ModuleOp>> CreateXlaRewritePass();
    
    // Create a pass that validates the input graph to the CPU/GPU bridge.
    std::unique_ptr<OperationPass<ModuleOp>> CreateXlaValidateInputsPass();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 31.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

                           ResourceEffects::XlaLaunch::get());
    
      // Conservatively mark resource handles as read and write, as without
      // analyzing XlaLaunch, there is not sufficient information to determine
      // effects on resources.
      for (Value value : getArgs()) {
        MarkResourceAsReadAndWrite(value, effects);
      }
    }
    
    // For `XlaLaunch` ops the `device` attribute corresponds to the resource
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      );
    
      TF_DerivedOperandTypeAttr K = TF_DerivedOperandTypeAttr<0>;
      TF_DerivedOperandTypeAttr V = TF_DerivedOperandTypeAttr<1>;
    }
    
    def TF_XlaLaunchOp : TF_Op<"XlaLaunch", [AttrSizedOperandSegments, DeclareOpInterfaceMethods<MemoryEffectsOpInterface>, DeclareOpInterfaceMethods<TF_GetResourceInstanceInterface>]> {
      let summary = "XLA Launch Op. For use by the XLA JIT only.";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top