Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 14 of 14 for platform_info_ (0.22 sec)

  1. tensorflow/compiler/jit/get_compiler_ir.cc

                                 compiler_arg_source));
    
      XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(dev);
      auto compilation_device_type = platform_info.device_type();
      if (platform_info.device_type() != DEVICE_TPU) {
        TF_ASSIGN_OR_RETURN(compilation_device_type,
                            GetCompilationDeviceType(platform_info.device_type()));
      }
    
      XlaDeviceCompiler* xla_device_compiler;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 19K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_compiler_options_util.h

        const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
            xla_device_compiler,
        const FunctionLibraryRuntime& function_library, DeviceBase* device,
        se::Stream* stream, const XlaPlatformInfo& platform_info,
        bool has_ref_vars);
    
    // Returns created options for XLA compiler when TFRT-TPU is used.
    XlaCompiler::Options GenerateCompilerOptionsForTfrtTpu(
        const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/pjrt_compile_util_test.cc

      TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
      device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef);
    
      Device* device = device_setup.GetDevice(DEVICE_GPU);
      const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
    
      NameAttrList function;
      function.set_name("foo");
    
      ResourceMgr resource_mgr("");
    
      const XlaCompiler::CompilationResult* compilation_result = nullptr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 21 23:21:57 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/pjrt_compile_util.h

    // used for compilation is output in `client`. The PJRT executable is output in
    // `executable`.
    Status CompileToPjRtLoadedExecutable(
        const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
        const NameAttrList& function,
        const std::vector<XlaCompiler::Argument>& args,
        DeviceCompileMode compile_mode, bool has_ref_vars,
        bool may_alias_resource_update,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2.7K bytes
    - Viewed (0)
Back to top