Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for _XlaCompile (0.27 sec)

  1. tensorflow/compiler/jit/build_xla_ops_pass_test.cc

      call->AddAttr(kXlaHasReferenceVarsAttr, false);
    
      Node* write_op = MakeWrite(root, Output(call), "write_result");
      write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
    
      auto xla_compile = NodeWith(Op("_XlaCompile"), Attr("must_compile", false));
      auto predicated_compilation_key =
          NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile)));
      auto xla_run =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/build_xla_ops_pass.cc

                       .NewSubScope(n->name())
                       .WithDevice(n->requested_device())
                       .WithAssignedDevice(device_name_str);
    
      ops::_XlaCompile xla_compile(root.WithOpName("xla_compile"),
                                   /*constants=*/cluster_info.constant_inputs,
                                   /*args=*/cluster_info.non_constant_inputs,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/flags.h

    };
    
    // Flags common to the _Xla* ops and their kernels.
    struct XlaOpsCommonFlags {
      // If true, _XlaCompile always refuses to compile the cluster, which means the
      // XLA clusters always run in the TF executor.  Defaults to false.
      bool tf_xla_always_defer_compilation;
      // If true, _XlaCompile compiles the cluster asynchronously with respect to
      // the main execution. The fallback path is taken while compilation happens.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_ops.h

                                  .HostMemory("resources"),   \
                              KERNEL);
    
    #define REGISTER_XLA_COMPILE_KERNEL(DEVICE, KERNEL, TYPES)          \
      REGISTER_KERNEL_BUILDER(Name("_XlaCompile")                       \
                                  .Device(DEVICE)                       \
                                  .HostMemory("constants")              \
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 23 19:28:25 UTC 2021
    - 17.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/kernels/xla_ops.cc

                                .HostMemory("constants")
                                .HostMemory("resources"),
                            XlaLocalLaunchOp);
    
    REGISTER_KERNEL_BUILDER(Name("_XlaCompile").Device(DEVICE_CPU), XlaCompileOp);
    REGISTER_KERNEL_BUILDER(Name("_XlaCompile")
                                .Device(DEVICE_GPU)
                                .HostMemory("constants")
                                .HostMemory("key")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_platform_info.cc

        // operations).  Such a cluster can fail compilation (in way that
        // MarkForCompilation could not have detected) if the CPU JIT is not
        // linked in.
        //
        // So bail out of _XlaCompile in this case, and let the executor handle
        // the situation for us.
        const Status& status = compiler_for_platform.status();
        if (status.code() == error::NOT_FOUND) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
Back to top