- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 54 for HLO (0.22 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h
namespace internal { // Compiles a serialized MLIR module and returns a serialized MLIR module of the // result of running all the MLIR Bridge passes. If compile_to_xla_hlo is true // then those passes include all the Legalization to XLA HLO which is returned // in the compilation_result. absl::StatusOr<std::string> CompileFromMlirToXlaHlo( bool lower_to_xla_hlo, const tpu::MlirToHloArgs& computation,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/composite_op_round_trip.mlir
// RUN: tf_tfl_translate --enable-hlo-to-tf-conversion --input-mlir %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s --check-prefix=CHECK-ROUNDTRIP module { func.func public @main( %arg0: tensor<i64>) -> tensor<i64> { %0 = func.call @test_add_roundtrip(%arg0) : (tensor<i64>) -> tensor<i64> return %0 : tensor<i64> } // CHECK-LABEL: func.func private @test_add_roundtrip
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 03:40:50 UTC 2024 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
// Whether to enable the hlo/stablehlo to tf conversion. This also supports // the case where a saved model contains both TF module and serialized // StableHLO module. bool enable_hlo_to_tf_conversion = false; // Whether to disable the direct hlo/stablehlo to Tensorflow Lite conversion. // // This prevents from directly converting from HLO to TFLite without going
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc
std::vector<ShardingAndIndex>* arg_core_mapping, std::vector<std::vector<xla::Shape>>* per_core_arg_shapes) { LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using MLIR tf2xla bridge in " "the op by op fallback mode. This is Phase 2 of the TF2XLA Bridge. " "Old (non-MLIR) bridge may be used in case of unsupported feature "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h
const std::string& device_type); ~Tf2XlaRewriter(); // Compiles the given Operation with XlaBuilder and imports the generated HLO // via the HLO -> MHLO importer. absl::StatusOr<mhlo::TupleOp> CompileWithHloImporter( tensorflow::OpKernelContext& op_context); // Import the given XlaComputation into the parent module. Returns the given
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf.cc
if (tf2xla_fallback_device_type) { // Add TF->HLO legalization patterns via TF2XLA fallback. PopulateLegalizeTfWithTf2XlaPatterns(tf2xla_fallback_device_type.value(), patterns, context, converter, prefer_tf2xla); } // Populate with CHLO->HLO lowerings to account for TF ops legalized to // CHLO first.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/mlprogram_util.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/mlprogram.h" namespace tensorflow { void RegisterMlProgramPasses() { mlir::registerPassPipeline( "tf-lower-to-mlprogram-and-hlo", "Lower TF to ml_program + mhlo", [](mlir::OpPassManager& pm, llvm::StringRef options, llvm::function_ref<mlir::LogicalResult(const llvm::Twine&)> errorHandler) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 22:13:50 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo_custom_call.mlir
// RUN: odml-to-stablehlo-opt %s -tfl-legalize-hlo -split-input-file | FileCheck %s --dump-input=fail // CHECK-LABEL: mhlo_custom_call_test__legalize_string_backend_config func.func @mhlo_custom_call_test__legalize_string_backend_config(%arg0: tensor<1x4xf32>) -> tensor<1x8xf32> { %0 = mhlo.custom_call @custom_call.my_custom_op(%arg0) { api_version = 1 : i32, backend_config = "this_is_a_test_string" } : (tensor<1x4xf32>) -> (tensor<1x8xf32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td
"mlir::TFL::TFLDialect", ]; } def LegalizeHloToTfPass : Pass<"tf-legalize-hlo", "ModuleOp"> { let summary = "Legalize from MHLO to the TF dialect"; let dependentDialects = ["TF::TensorFlowDialect"]; let constructor = "mlir::odml::CreateLegalizeHloToTfPass()"; } def LegalizeHloToTfLitePass : Pass<"tfl-legalize-hlo", "mlir::ModuleOp"> { let summary = "Legalize from MHLO to the TFLite dialect";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h
#include "tensorflow/core/tpu/kernels/tpu_compile.pb.h" #include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h" namespace tensorflow { namespace tf2xla { namespace v1 { // Compiles the given Tensorflow graph into xla::HLO. The result is in // compilation_result. If the input computation is in MLIR, it will be // converted to a Tensorflow graph. Otherwise, the graph compiler will be run. absl::Status CompileTensorflowGraphToHlo(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:08:57 UTC 2024 - 2.1K bytes - Viewed (0)