Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 14 of 14 for platform_info (0.16 sec)

  1. tensorflow/compiler/jit/pjrt_compile_util.h

    // used for compilation is output in `client`. The PJRT executable is output in
    // `executable`.
    Status CompileToPjRtLoadedExecutable(
        const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
        const NameAttrList& function,
        const std::vector<XlaCompiler::Argument>& args,
        DeviceCompileMode compile_mode, bool has_ref_vars,
        bool may_alias_resource_update,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_compile_on_demand_op.cc

          GetAllocator(ctx->device(), stream, platform_info_);
      se::DeviceMemoryAllocator* allocator = allocator_ptr.get();
      XlaComputationLaunchContext launch_context(
          client, allocator, client->default_device_ordinal(),
          /*allocate_xla_tensors=*/platform_info_.xla_device_metadata() != nullptr,
          platform_info_.xla_device_metadata()
              ? platform_info_.xla_device_metadata()->UseMultipleStreams()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 08:39:39 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/kernels/xla_ops.h

      // Indexes of compile-time constant inputs
      const std::vector<int> constants_;
      // Indexes of resource inputs
      const std::vector<int> resources_;
    
      const NameAttrList function_;
      const XlaPlatformInfo platform_info_;
    
      bool has_ref_vars_;
    };
    
    // XlaLocalLaunchOp is used to replace a region of the TensorFlow graph
    // which will be compiled and executed using XLA.  The XlaLocalLaunchOp is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 16 23:44:26 UTC 2023
    - 4.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_compile_on_demand_op.h

    // vanilla TensorFlow op as long as the bridge supports it.
    class XlaCompileOnDemandOp : public OpKernel {
     public:
      explicit XlaCompileOnDemandOp(OpKernelConstruction* ctx)
          : OpKernel(ctx),
            platform_info_(XlaPlatformInfoFromDevice(ctx->device())) {}
      void Compute(OpKernelContext* ctx) override;
    
     private:
      Status Compile(const std::vector<XlaCompiler::Argument>& args,
                     OpKernelContext* ctx,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.2K bytes
    - Viewed (0)
Back to top