Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for _XlaRun (0.24 sec)

  1. tensorflow/compiler/jit/build_xla_ops_pass_test.cc

      TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
    
      Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
      ASSERT_NE(write_op_new, nullptr);
      EXPECT_THAT(write_op_new, NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")))));
    }
    
    TEST_F(BuildXlaOpsTest, CleanFailureOnBogusAttr) {
      Scope root = Scope::NewRootScope().ExitOnError();
    
      FunctionDefLibrary fdef_lib =
          CreateFunctionDefLibWithConstFunction("cluster_0");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/ops/xla_ops.cc

       `compilation_successful` is always true.
    )");
    
    REGISTER_OP("_XlaRun")
        .Input("args: Targs")
        .Attr("Targs: list(type) >= 0")
        .Output("results: Tresults")
        .Attr("Tresults: list(type) >= 0")
        .Input("key: string")
        // XLA random-number generation ops are stateful.
        // TODO(phawkins): create stateful and non-stateful variants of _XlaRun.
        .SetIsStateful()
        .Doc(R"(XLA Run Op. For use by the XLA JIT only.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 09:08:06 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_platform_info.h

     private:
      DeviceType device_type_;
      se::Platform::Id platform_id_;
    
      // xla_device_metadata_ lives in the tensorflow::DeviceBase in which the
      // XlaLaunch/_XlaCompile/_XlaRun op is placed and thus does not die before the
      // XlaLaunch/_XlaCompile/_XlaRun OpKernel.
      const XlaDevice::Metadata* xla_device_metadata_;
    
      // pjrt_device_metadata_ lives in tensorflow::PjRtBaseDevice in which the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/build_xla_ops_pass.cc

        //   xla_run_outputs = _XlaRun(..., key=use_xla_run)
        //   outputs = Merge(tf_call_outputs, xla_run_outputs).
        ops::Switch s(root.WithOpName("predicated_compilation_key"),
                      xla_compile.key, xla_compile.compilation_successful);
        Output predicated_compilation_key = s.output_true;
        Output inverse_predicated_compilation_key = s.output_false;
    
        ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/kernels/xla_ops.cc

                            XlaCompileOp);
    
    REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_CPU), XlaRunOp);
    REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE_GPU).HostMemory("key"),
                            XlaRunOp);
    REGISTER_KERNEL_BUILDER(
        Name("_XlaRun").Device(DEVICE_DEFAULT).HostMemory("key"), XlaRunOp);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    namespace tensorflow {
    
    namespace {
    using DeadnessPredicate = DeadnessAnalysis::DeadnessPredicate;
    using jit::DeviceId;
    using jit::DeviceSet;
    
    // The clusters we create here are eventually lowered into an
    // _XlaCompile/_XlaRun pair with a TF executor "fallback" that uses the
    // PartitionedCall op to execute the cluster in the regular graph executor if
    // need be.  PartitionedCall, however, reruns the entire TF graph optimization
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

            &effects) {
      effects.reserve(2 * getArgs().size() + 1);
      effects.emplace_back(MemoryEffects::Write::get(),
                           ResourceEffects::_XlaRun::get());
    
      // Conservatively mark resource handles as read and write, as without
      // analyzing _XlaCompile, there is not sufficient information to determine
      // effects on resources.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      let summary = "XLA Compile Op. For use by the XLA JIT only.";
    
      let description = [{
    Compiles a TensorFlow function into an XLA LocalExecutable and returns a key
    that _XlaRun can use to look up the LocalExecutable and execute it.
      }];
    
      let arguments = (ins
        Variadic<TF_Tensor>:$constants,
        Variadic<TF_Tensor>:$args,
        Variadic<TF_ResourceTensor>:$resources,
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top