Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for XlaClusterOutput (0.15 sec)

  1. tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc

      scope.graph()->AddEdge(w.node(), 0, launch, 6);
    
      auto out0 =
          ops::XlaClusterOutput(scope.WithOpName("Out0"), Output(launch, 0));
      auto out1 =
          ops::XlaClusterOutput(scope.WithOpName("Out1"), Output(launch, 1));
      auto out2 =
          ops::XlaClusterOutput(scope.WithOpName("Out2"), Output(launch, 2));
      auto out3 =
          ops::XlaClusterOutput(scope.WithOpName("Out3"), Output(launch, 3));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 16 18:03:15 UTC 2023
    - 14.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/encapsulate_xla_computations_pass.h

    // XlaLaunch nodes.
    //
    // xla.compile() does two main things:
    // a) marks operators that make up an XLA computation with the attribute
    //    _xla_compile_id=XYZ, where XYZ is a unique key.
    // b) adds XlaClusterOutput nodes to represent outputs of the computation.
    //    These nodes are not marked with the _xla_compile_id attribute.
    
    #ifndef TENSORFLOW_COMPILER_JIT_ENCAPSULATE_XLA_COMPUTATIONS_PASS_H_
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/ops/xla_ops_grad.py

    # limitations under the License.
    # ==============================================================================
    
    from tensorflow.python.framework import ops
    
    
    @ops.RegisterGradient("XlaClusterOutput")
    def _XlaClusterOutputGrad(_, grad):
      del grad  # unused
      raise RuntimeError("Gradient computation of graph in xla.compile() is "
                         "prohibited because it can cause performance degradation."
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 1.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/ops/xla_ops.cc

        .Attr("Tresults: list(type) >= 0")
        .Attr("constants: list(int) >= 0")
        .Attr("resources: list(int) >= 0")
        .Attr("function: func")
        .Doc("XLA Launch Op. For use by the XLA JIT only.");
    
    REGISTER_OP("XlaClusterOutput")
        .Input("input: T")
        // Note: when replication is supported, this op will have N outputs.
        .Output("outputs: T")
        .Attr("T: type")
        .SetShapeFn([](InferenceContext* c) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 09:08:06 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc

    #include "tensorflow/core/platform/statusor.h"
    #include "tensorflow/core/util/dump_graph.h"
    
    namespace tensorflow {
    
    namespace {
    
    const char* const kXlaClusterOutput = "XlaClusterOutput";
    
    bool IsCpuGpuCompile(const Graph* graph) {
      for (Node* n : graph->nodes()) {
        string name;
        // Only consider nodes being compiled.
        if (!TryGetNodeAttr(n->attrs(), kXlaClusterIdAttr, &name)) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      );
    
      TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
      TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
    }
    
    def TF_XlaClusterOutputOp : TF_Op<"XlaClusterOutput", [Pure, TF_NoConstantFold]> {
      let summary = [{
    Operator that connects the output of an XLA computation to other consumer graph nodes.
      }];
    
      let arguments = (ins
        TF_Tensor:$input
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top