Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for platform_info_ (0.19 sec)

  1. tensorflow/compiler/jit/kernels/xla_ops.cc

          function_(function),
          platform_info_(XlaPlatformInfoFromDevice(ctx->device())),
          has_ref_vars_(has_ref_vars) {}
    
    void XlaLocalLaunchBase::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
      VLOG(1) << "XlaLocalLaunchOpBase::Compute "
              << Canonicalize(function_.name(), AttrSlice(&function_.attr()));
      xla_launch_counter->GetCell(platform_info_.device_type().type_string())
          ->IncrementBy(1);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_compile_on_demand_op.cc

          GetAllocator(ctx->device(), stream, platform_info_);
      se::DeviceMemoryAllocator* allocator = allocator_ptr.get();
      XlaComputationLaunchContext launch_context(
          client, allocator, client->default_device_ordinal(),
          /*allocate_xla_tensors=*/platform_info_.xla_device_metadata() != nullptr,
          platform_info_.xla_device_metadata()
              ? platform_info_.xla_device_metadata()->UseMultipleStreams()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 08:39:39 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_platform_info.cc

    Status GetCompilationDeviceTypeAndPjRtClient(
        const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr,
        DeviceType* compilation_device_type, xla::PjRtClient** pjrt_client) {
      DeviceType device_type = platform_info.device_type();
    
      if (platform_info.xla_device_metadata()) {
        VLOG(2) << "Building PjRtDeviceCompiler using "
                   "platform_info.xla_device_metadata().";
    
        *compilation_device_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_platform_info_test.cc

      XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
    
      TF_ASSERT_OK_AND_ASSIGN(
          DeviceType compilation_device_type,
          GetCompilationDeviceType(platform_info.device_type()));
    
      XlaDeviceCompiler* xla_device_compiler = nullptr;
      TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
                                          platform_info, compilation_device_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Jan 14 15:17:12 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_compiler_options_util_test.cc

      std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
      XlaPlatformInfo platform_info(
          compilation_device_type, platform_id, xla_device_metadata.get(),
          /*pjrt_device_metadata=*/nullptr, custom_allocator);
    
      XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
          *device_setup_.flr(), device, platform_info,
          /*pjrt_device_compiler=*/nullptr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 14.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/get_compiler_ir.cc

                                 compiler_arg_source));
    
      XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(dev);
      auto compilation_device_type = platform_info.device_type();
      if (platform_info.device_type() != DEVICE_TPU) {
        TF_ASSIGN_OR_RETURN(compilation_device_type,
                            GetCompilationDeviceType(platform_info.device_type()));
      }
    
      XlaDeviceCompiler* xla_device_compiler;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 19K bytes
    - Viewed (0)
Back to top