Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 309 for _xla (0.06 sec)

  1. tensorflow/compiler/jit/device_executable_persistor.h

    #include "tensorflow/compiler/jit/xla_compilation_cache.pb.h"
    #include "tensorflow/compiler/jit/xla_device_compiler_client.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/pjrt/pjrt_client.h"
    #include "xla/service/hlo.pb.h"
    #include "xla/util.h"
    #include "tensorflow/core/framework/device.h"
    #include "tensorflow/core/lib/strings/proto_serialization.h"
    #include "tensorflow/core/platform/path.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device_context.cc

    #include "tensorflow/compiler/tf2xla/xla_helpers.h"
    #include "xla/util.h"
    #include "tensorflow/core/common_runtime/device.h"
    #include "tensorflow/core/common_runtime/dma_helper.h"
    #include "tensorflow/core/framework/tensor_reference.h"
    #include "tsl/platform/statusor.h"
    
    namespace tensorflow {
    
    // The allocator used for Tensors assigned to the XLA device.
    XlaDeviceAllocator::XlaDeviceAllocator(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc

    #include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "tensorflow/compiler/tf2xla/xla_helpers.h"
    #include "xla/client/client_library.h"
    #include "xla/shape.h"
    #include "xla/stream_executor/platform.h"
    #include "xla/stream_executor/platform_manager.h"
    #include "tensorflow/core/framework/tensor_shape.h"
    #include "tensorflow/core/framework/types.pb.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/transforms/passes.h

        bool prefer_tf2xla = false);
    
    // Legalizes TF/XLA communication ops (TF dialect) to HLO dialect communication
    // ops.
    std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeTFCommunicationPass();
    
    // Legalizes TF/XLA collective ops (TF dialect) to HLO dialect collective
    // ops.
    std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeTFCollectivePass();
    
    // Verifies that the TF/XLA ops have all been lowered to MHLO.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/next_pluggable_device/c_api.cc

    #include "tensorflow/compiler/jit/variable_info.h"
    #include "tensorflow/compiler/jit/variable_info_util.h"
    #include "xla/pjrt/c/pjrt_c_api.h"
    #include "xla/pjrt/c/pjrt_c_api_helpers.h"
    #include "xla/pjrt/pjrt_c_api_client.h"
    #include "xla/pjrt/pjrt_client.h"
    #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
    #include "tensorflow/core/common_runtime/next_pluggable_device/plugin_resource.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 05:48:24 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h

    #include "mlir/Pass/Pass.h"  // from @llvm-project
    #include "tensorflow/compiler/tf2xla/xla_helpers.h"
    #include "xla/client/compile_only_client.h"
    #include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
    #include "tsl/platform/statusor.h"
    
    namespace tensorflow {
    namespace tf2xla {
    namespace internal {
    
    // Legalize the given MLIR module to XLA HLO using a combination of the MLIR
    // Bridge and XlaBuilder
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc

        std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
        std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
        std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
        xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) {
      LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
                              "Combined MLIR Tf2Xla Bridge.";
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc

          llvm::cl::desc("Choose target opset."),
          llvm::cl::values(
              clEnumValN(OpSet::TF, "TF",
                         "Uses TF ops that mimic quantization behavior"),
              clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
              clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
                         "Uses TF Uniform Quantized ops"))};
    };
    
    llvm::StringRef InsertQuantizedFunctionsPass::GetFunctionLibrary(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc

    }
    
    absl::Status BuildHloFromGraph(Graph& graph, bool use_output_shapes) {
      xla::XlaBuilder builder(
          ::testing::UnitTest::GetInstance()->current_test_info()->name());
      mlir::MLIRContext mlir_context;
      llvm::SmallVector<xla::XlaOp, 4> xla_params;
      std::vector<xla::XlaOp> returns(1);
      return BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 19:54:38 UTC 2024
    - 9.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/tests/device_compiler_test_helper.h

      }
    
      JitCompilationListener* listener() const { return listener_; }
    
      // Returns a test graph that will split into two XLA clusters (due to a node
      // with _XlaCompile = false).
      GraphDef GetTestGraph(const PartialTensorShape& input_shape);
    
      // Runs the graph using specified batch size both with and without XLA JIT
      // compilation. Returns an error if the results between the two do not match.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 08:24:16 UTC 2024
    - 3.6K bytes
    - Viewed (0)
Back to top