Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 428 for _xla (0.08 sec)

  1. tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc

    #include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
    #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
    #include "xla/client/xla_builder.h"
    #include "xla/client/xla_computation.h"
    #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
    #include "xla/shape_util.h"
    #include "xla/xla_data.pb.h"
    #include "tensorflow/core/framework/op_kernel.h"
    #include "tsl/lib/core/status_test_util.h"
    #include "tsl/platform/errors.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:16:07 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/device_compiler_disable_test.cc

      }
    
      DisableXlaCompilation();
    
      xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
      DeviceType device_type = DeviceType(DEVICE_CPU_XLA_JIT);
    
      const XlaCompiler::CompilationResult* compilation_result;
      xla::LocalExecutable* executable;
    
      using XlaDeviceExecutablePersistor =
          DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device_compiler_client.h

    #include "tensorflow/compiler/jit/device_compiler_client.h"
    #include "xla/client/local_client.h"
    
    namespace tensorflow {
    
    class XlaDeviceCompilerClient
        : public DeviceCompilerClient<xla::LocalExecutable, xla::LocalClient> {
     public:
      explicit XlaDeviceCompilerClient(xla::LocalClient* client)
          : client_(client) {}
    
      absl::StatusOr<std::unique_ptr<xla::LocalExecutable>> BuildExecutable(
          const XlaCompiler::Options& options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 2.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_context.h

    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/global_data.h"
    #include "xla/client/local_client.h"
    #include "tensorflow/core/framework/allocator.h"
    #include "tensorflow/core/framework/device_base.h"
    #include "tensorflow/core/lib/core/status.h"
    
    namespace tensorflow {
    
    // The allocator used for Tensors assigned to the XLA device. The allocator
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/pjrt_device_compiler_client.h

    #include "tensorflow/compiler/jit/device_compiler_client.h"
    #include "xla/pjrt/pjrt_client.h"
    
    namespace tensorflow {
    
    // Calls into PjRtClient to provide functionality for building, serializing and
    // loading PjRtLoadedExecutables.
    class PjRtDeviceCompilerClient
        : public DeviceCompilerClient<xla::PjRtLoadedExecutable, xla::PjRtClient> {
     public:
      explicit PjRtDeviceCompilerClient(xla::PjRtClient* client)
          : client_(client) {}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 3.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.cc

        return absl::OkStatus();
      }
    
      TF_ASSIGN_OR_RETURN(
          auto hlo_module_config,
          xla::HloModule::CreateModuleConfigFromProto(
              compilation_result->computation->proto(), xla::DebugOptions()));
    
      TF_ASSIGN_OR_RETURN(
          std::unique_ptr<xla::HloModule> hlo_module,
          xla::HloModule::CreateFromProto(compilation_result->computation->proto(),
                                          hlo_module_config));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 00:40:46 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tf_xla_op_to_tf_op.cc

        // This is the argument used to refer to the pass in
        // the textual format (on the commandline for example).
        return "quant-convert-tf-xla-op-to-tf-op";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Apply converting Tensorflow Xla ops to non-xla ops.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 13.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc

    #include "tensorflow/compiler/tf2xla/layout_util.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "tensorflow/compiler/tf2xla/xla_helpers.h"
    #include "xla/mlir_hlo/mhlo/IR/register.h"
    #include "xla/shape.h"
    #include "tensorflow/core/framework/tensor_shape.h"
    #include "tensorflow/core/platform/errors.h"
    #include "tensorflow/core/platform/profile_utils/cpu_utils.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_compiler_options_util_test.cc

    namespace {
    using XlaDeviceCompiler =
        DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
    using XlaDeviceExecutablePersistor =
        DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
    using PjRtDeviceCompiler =
        DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
    using PjRtDeviceExecutablePersistor =
        DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 14.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_device_compiler_client.cc

    #include <memory>
    #include <string>
    #include <utility>
    #include <vector>
    
    #include "xla/client/local_client.h"
    
    namespace tensorflow {
    namespace {
    std::vector<const xla::Shape*> GetShapePointers(
        absl::Span<const xla::Shape> shapes) {
      std::vector<const xla::Shape*> shape_ptrs;
      shape_ptrs.reserve(shapes.size());
      for (const auto& shape : shapes) {
        shape_ptrs.push_back(&shape);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 4.1K bytes
    - Viewed (0)
Back to top