- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 435 for ModuleOp (0.28 sec)
-
tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.h
// Whether the input file is an MLIR not tflite file. bool input_mlir = false; }; explicit TfLiteImporter(const Options& options) : options_(options) {} absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> Import() override; private: Options options_; mlir::MLIRContext context_; llvm::SourceMgr source_mgr_; std::unique_ptr<mlir::SourceMgrDiagnosticHandler> source_mgr_handler_; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/inference/inference_metrics_pass.cc
: public impl::InferenceMetricsPassBase<InferenceMetricsPass> { public: void runOnOperation() override; }; void InferenceMetricsPass::runOnOperation() { bool has_tpu_partitioned_call = false; ModuleOp module = getOperation(); for (auto func_op : module.getOps<func::FuncOp>()) { func_op->walk( [&](TF::TPUPartitionedCallOp op) { has_tpu_partitioned_call = true; });
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 05 21:24:51 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
// Convert a TPU model to be compatible on CPU by rewriting/removing TPU ops. class ConvertTpuModelToCpuPass : public PassWrapper<ConvertTpuModelToCpuPass, OperationPass<ModuleOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ConvertTpuModelToCpuPass) explicit ConvertTpuModelToCpuPass() = default; StringRef getArgument() const final {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/cluster_tf_test.cc
mlir_module_ = mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_); if (!mlir_module_) { return absl::Status( absl::StatusCode::kNotFound, absl::StrCat("Could not find MLIR module at ", mlir_module_path)); } return absl::OkStatus(); } DialectRegistry registry_; MLIRContext context_; OwningOpRef<mlir::ModuleOp> mlir_module_; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:44:37 UTC 2024 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_to_vhlo.cc
// vhlo.op %1 : vhlo.tensor<...> // ==> // vhlo.op %1 : tensor<...> // // TODO: There's likely a way to make MLIR manage the unrealized cast // conversions using a specific rewriter. LogicalResult ApplyTypeConverter(ModuleOp op, TypeConverter &converter) { IRRewriter rewriter(op->getContext()); op->walk([&](Operation *op) { if (op->getDialect()->getNamespace() != "vhlo") return; // Convert operands
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 19:48:51 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h
#include "tsl/platform/statusor.h" namespace mlir { namespace mhlo { namespace test { // Given a raw string, return a ModuleOp that can be used with the given // MLIRContext. absl::StatusOr<OwningOpRef<ModuleOp>> GetMlirModuleFromString( absl::string_view module_string, MLIRContext* mlir_context); } // namespace test } // namespace mhlo } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.cc
#include "tsl/platform/errors.h" namespace tensorflow { namespace tf2xla { namespace internal { namespace { constexpr char kEntryFuncName[] = "main"; absl::Status SetupArguments(mlir::ModuleOp module, std::vector<TensorShape>& arg_shapes, tpu::TPUCompileMetadataProto& metadata_proto) { auto main_fn = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 23:59:33 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/tf_xla_mlir_translate.cc
mlir::OwningOpRef<mlir::ModuleOp> module_ref; auto status = DeserializeMlirModule(str_attr.getValue().str(), context, &module_ref); if (!status.ok()) { LOG(ERROR) << status; return nullptr; } return module_ref; } static mlir::LogicalResult MlirModuleToSerializedMlirStringAttrTranslate( mlir::ModuleOp module_op, llvm::raw_ostream& output) { output << "\"";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h
// _xla_compile_device_type=TPU. bool IsSupportedByReplicatedBridge(mlir::ModuleOp module); // Check if an MLIR module contains TPUPartitionedCall op. If so, we define // such graph as an inference graph. Otherwise, it is non inference graph. bool HasTPUPartitionedCallOpInModule(mlir::ModuleOp module); // Check if a graph contains TPUPartitionedCall op, including its reachable
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 16:33:22 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/tac_filter.cc
TacFilter; using ::third_party::tensorflow::compiler::mlir::lite::experimental::tac:: TacFilters; class TacFilterPass : public PassWrapper<TacFilterPass, OperationPass<ModuleOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TacFilterPass) TacFilterPass() = default; TacFilterPass(const TacFilterPass& other) { this->tac_filters_ = other.tac_filters_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0)