Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 232 for _xla (0.04 sec)

  1. tensorflow/compiler/jit/xla_compiler_options_util.h

    #include "tensorflow/compiler/jit/xla_platform_info.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/local_client.h"
    #include "xla/pjrt/pjrt_client.h"
    
    namespace tensorflow {
    
    // Returns created options for the XLA compiler.
    XlaCompiler::Options GenerateCompilerOptions(
        const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
            xla_device_compiler,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/stream_executor/BUILD

            "@local_tsl//tsl/platform:status",
            "@local_xla//xla/stream_executor",
            "@local_xla//xla/stream_executor:executor_cache",
            "@local_xla//xla/stream_executor:host_memory_allocation",
            "@local_xla//xla/stream_executor:memory_allocation",
            "@local_xla//xla/stream_executor:platform",
            "@local_xla//xla/stream_executor:stream_executor_common",
        ],
    )
    
    cc_library(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 00:27:07 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/device_compilation_cache.h

    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/local_client.h"
    #include "xla/pjrt/pjrt_client.h"
    #include "tensorflow/core/platform/mutex.h"
    
    namespace tensorflow {
    namespace device_compilation_cache_internal {
    template <typename ExecutableType>
    int64_t ExecutableSize(const ExecutableType* executable) {
      return 0;
    }
    
    template <>
    inline int64_t ExecutableSize<xla::LocalExecutable>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 12 08:49:52 UTC 2023
    - 8.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/xla_sharding_util_test.cc

    #include <string>
    
    #include <gtest/gtest.h>
    #include "mlir/Support/LogicalResult.h"  // from @llvm-project
    #include "xla/xla_data.pb.h"
    
    inline constexpr llvm::StringRef kXlaShardingAttrName = "_XlaSharding";
    
    namespace tensorflow {
    namespace {
    
    TEST(DecodeShardingAttributeTest, CheckInvalidString) {
      xla::OpSharding sharding;
      EXPECT_TRUE(DecodeShardingAttribute("", sharding).succeeded());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/pjrt_compile_util.cc

    #include "tensorflow/core/platform/status.h"
    #include "tsl/platform/errors.h"
    #include "tsl/platform/statusor.h"
    
    namespace tensorflow {
    
    using PjRtDeviceCompiler =
        DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
    
    Status CompileToPjRtLoadedExecutable(
        const DeviceBase* device, const XlaPlatformInfo& platform_info,
        const NameAttrList& function,
        const std::vector<XlaCompiler::Argument>& args,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_host_send_device_context.h

    #ifndef TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_
    #define TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_
    
    #include "xla/shape.h"
    #include "xla/stream_executor/device_memory.h"
    #include "xla/stream_executor/stream.h"
    #include "tensorflow/core/framework/device_base.h"
    #include "tfrt/concurrency/async_value_ref.h"  // from @tf_runtime
    
    namespace tensorflow {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/pjrt_device_compiler_client.cc

    #include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
    
    #include <memory>
    #include <string>
    #include <utility>
    
    namespace tensorflow {
    
    xla::CompileOptions GetPjRtCompileOptions(
        const XlaCompiler::Options& options,
        const XlaCompiler::CompilationResult& result) {
      xla::CompileOptions pjrt_compile_options;
      pjrt_compile_options.argument_layouts = result.xla_input_shapes;
      pjrt_compile_options.executable_build_options =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/kernels/BUILD

        "@local_xla//xla:executable_run_options",
        "@local_xla//xla:status_macros",
        "@local_xla//xla:statusor",
        "@local_xla//xla/client:client_library",
        "@local_xla//xla/client:local_client",
        "@local_xla//xla/service:compiler",
        "@local_xla//xla/service/gpu:gpu_executable_run_options",
        "//tensorflow/core:core_cpu_internal",
        "//tensorflow/core:framework",
        "//tensorflow/core:lib",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_activity.proto

        int32 count = 2;
      }
    
      // Describes a single XLA cluster.
      //
      // Next ID: 4
      message Cluster {
        string name = 1;
    
        // The number of nodes in the cluster.
        int32 size = 2;
    
        // A histogram of the TF operations in this cluster.
        repeated OpAndCount op_histogram = 3;
      }
    
      // The number of nodes in the graph that are not inside an XLA cluster.
      int32 unclustered_node_count = 1;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 15 03:11:33 UTC 2022
    - 3.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/extract_outside_compilation_pass.h

    // xla_cluster_name: XLA cluster name for this XLA computation. We need it
    //   because XLA cluster name might be different from `func_name`.
    // func_name_attrs: they will be used to instantiate the XLA computation func.
    // new_func_name: new function name for rewritten XLA computation func.
    // host_compute_core: mapping from outside compilation cluster name to XLA
    //   device assignment.
    // fld: FunctionLibraryDefinition object.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.3K bytes
    - Viewed (0)
Back to top