- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 22 for getNumRegions (0.19 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc
<< "Cannot find function " << sym.getRootReference(); } callees.push_back(callee); } return success(); } bool HasSingleBlock(func::FuncOp func) { return func->getNumRegions() == 1 && func.getBody().hasOneBlock(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
output_types, candidate_op->getAttrs()); for (int i = 0; i < candidate_op->getNumRegions(); ++i) { new_state.addRegion(); } Operation* quantized_op = rewriter.create(new_state); if (candidate_op->getNumRegions() != 0) { for (const auto& indexed_regions : llvm::enumerate(candidate_op->getRegions())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/remove_unused_arguments.cc
// This is a lengthy bit of code, since it has to recreate the operation. // TODO(kramm): Move this under utils/ somewhere. void EraseResults(Operation* op, llvm::BitVector erase) { assert(!op->getNumRegions()); std::vector<Type> result_types; for (auto result : op->getResults()) { if (!erase[result.getResultNumber()]) { result_types.push_back(result.getType()); } } OpBuilder builder(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/merge_duplicate_resource_ops.cc
// Gets the GraphOp from the function op. Returns an empty op iff it doesn't // exist. // TODO(b/284222084): Move executor dialect utilities to a new library. GraphOp GetGraphOpFromFuncOp(func::FuncOp func_op) { if (func_op->getNumRegions() == 0 || func_op.getBody().empty()) return {}; auto graph_op_range = func_op.front().without_terminator(); if (llvm::hasSingleElement(graph_op_range)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 26 04:26:16 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
// such AssignVariableOp are also pruned. void SinkResourceWritesIntoParallelExecute( tf_device::ParallelExecuteOp parallel_execute) { bool rewrite = false; const int num_regions = parallel_execute.getNumRegions(); llvm::SmallVector<Value, 4> results_to_remap; // Go through each region and find AssignVariableOps that can be moved into // the parallel_execute region. Result indices by region index are collected,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 06 04:46:18 UTC 2022 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc
// could be folded should have a custom folder instead of relying on the // TensorFlow folding hook. if (inst == nullptr || inst->getNumResults() == 0 || inst->hasTrait<OpTrait::TF::NoConstantFold>() || inst->getNumRegions() != 0 || !isMemoryEffectFree(inst)) { return false; } // If any of the result types are variants, don't try to constant fold them. // This creates opaque variant constants which lose information and would
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/merge_save_function_ops_to_main.cc
} void runOnOperation() override; }; // Returns true iff func_op has either no Region or the body has no Blocks. bool IsFuncOpEmpty(func::FuncOp func_op) { return func_op->getNumRegions() == 0 || func_op.getBody().empty(); } // Gets the GraphOp from the function op. Returns an empty op iff it doesn't // exist. GraphOp GetGraphOpFromFuncOp(func::FuncOp func_op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
output_types, quantizing_op->getAttrs()); for (int i = 0; i < quantizing_op->getNumRegions(); ++i) { new_state.addRegion(); } Operation* quantized_op = rewriter.create(new_state); if (quantizing_op->getNumRegions() != 0) { for (const auto& indexed_regions : llvm::enumerate(quantizing_op->getRegions())) { IRMapping mapping;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc
} } } else if (isa<tf_device::LaunchOp, tf_device::ClusterOp, tf_executor::IslandOp, tf_executor::GraphOp>(op) && op->getNumRegions() == 1) { Region& region = op->getRegion(0); const auto& body_info = backtrack_analysis.GetAnalysisForRegion(region); for (auto result : filter_resources(op->getResults())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 09:04:13 UTC 2024 - 28.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc
llvm::SmallVectorImpl<tf_executor::IslandOp>& executes, bool legacy_graph_export, int parallel_group_idx) { const int num_regions = parallel_execute_op.getOperation()->getNumRegions(); executes.reserve(num_regions); for (int i : llvm::seq<int>(0, num_regions)) { Block& execute_block = parallel_execute_op.GetRegionBlockWithIndex(i);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 19 19:47:16 UTC 2023 - 11.1K bytes - Viewed (0)