Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 232 for _xla (0.04 sec)

  1. tensorflow/compiler/jit/xla_gpu_device.cc

    limitations under the License.
    ==============================================================================*/
    
    // Registers the XLA_GPU device, which is an XlaDevice instantiation that runs
    // operators using XLA via the XLA "CUDA" or "ROCM" (GPU) backend.
    
    #include <array>
    #include <set>
    
    #include "absl/memory/memory.h"
    #include "absl/strings/numbers.h"
    #include "absl/strings/str_split.h"
    #include "tensorflow/compiler/jit/defs.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/encapsulate_util.h

            func_name_attrs(func_name_attrs),
            node(node),
            host_compute_core(host_compute_core) {}
      // XLA cluster name. It might be different from `func_name`.
      const string cluster_name;
      // Name and attributes of XLA computation function.
      const NameAttrList func_name_attrs;
      // The XLA computation node in the graph.
      Node* node;
      // A mapping from outside compilation cluster name to its device assignment.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_tensor.h

      // REQUIRES: has_shaped_buffer()
      const xla::ShapedBuffer& shaped_buffer() const {
        CHECK(has_shaped_buffer());
        return *shaped_buffer_;
      }
      xla::ShapedBuffer& shaped_buffer() {
        CHECK(has_shaped_buffer());
        return *shaped_buffer_;
      }
      // Mutates the XlaTensor to set the ShapedBuffer.
      void set_shaped_buffer(xla::ScopedShapedBuffer shaped_buffer) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/python/jax_to_tfl_flatbuffer.cc

    #include "tensorflow/compiler/mlir/lite/transforms/passes.h"
    #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h"
    #include "xla/service/hlo.pb.h"
    #include "xla/service/hlo_parser.h"
    #include "xla/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
    #include "tensorflow/core/framework/graph.pb.h"
    #include "tensorflow/core/framework/graph_debug_info.pb.h"
    #include "tensorflow/core/framework/types.pb.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 8K bytes
    - Viewed (0)
  5. tensorflow/compiler/aot/compile.h

    #define TENSORFLOW_COMPILER_AOT_COMPILE_H_
    
    #include <memory>
    #include <string>
    
    #include "tensorflow/compiler/aot/flags.h"
    #include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
    #include "xla/service/cpu/cpu_compiler.h"
    #include "xla/xla_data.pb.h"
    #include "tensorflow/core/framework/graph.pb.h"
    #include "tensorflow/core/platform/status.h"
    
    namespace tensorflow {
    namespace tfcompile {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/resource_operation_safety_analysis.h

    #ifndef TENSORFLOW_COMPILER_JIT_RESOURCE_OPERATION_SAFETY_ANALYSIS_H_
    #define TENSORFLOW_COMPILER_JIT_RESOURCE_OPERATION_SAFETY_ANALYSIS_H_
    
    #include "xla/service/graphcycles/graphcycles.h"
    #include "tensorflow/core/framework/function.h"
    #include "tensorflow/core/graph/graph.h"
    
    namespace tensorflow {
    // An XLA cluster hoists all resource reads to be beginning of the cluster
    // execution and all the resource writes to the end.  This means it cannot
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/internal/utils/BUILD

            "@llvm-project//mlir:FuncDialect",
            "@llvm-project//mlir:IR",
            "@local_tsl//tsl/platform:errors",
            "@local_xla//xla:shape_util",
            "@local_xla//xla/mlir_hlo:hlo_dialect_registration",
            "@local_xla//xla/translate/mhlo_to_hlo:type_to_shape",
        ],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc

    #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/client_library.h"
    #include "xla/client/compile_only_client.h"
    #include "xla/stream_executor/host/host_platform_id.h"
    #include "xla/stream_executor/platform_manager.h"
    #include "tensorflow/core/common_runtime/graph_constructor.h"
    #include "tensorflow/core/framework/function.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_tensor.cc

    }
    
    Status XlaTensor::AllocateShapedBuffer(DataType dtype,
                                           const xla::Shape& on_device_shape,
                                           xla::LocalClient* client,
                                           int device_ordinal) {
      xla::Shape on_host_shape =
          xla::ShapeUtil::DeviceShapeToHostShape(on_device_shape);
      xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/pjrt_compile_util.h

        const std::vector<XlaCompiler::Argument>& args,
        DeviceCompileMode compile_mode, bool has_ref_vars,
        bool may_alias_resource_update,
        const XlaCompiler::CompilationResult** compilation_result,
        xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable);
    
    // Similar to the above function but it does not take a OpKernelContext.
    // Instead, it takes the following arguments that are obtained from
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2.7K bytes
    - Viewed (0)
Back to top