- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 54 for getTerminator (0.18 sec)
-
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
arg.replaceAllUsesWith(new_arg); arg.dropAllUses(); bb.eraseArgument(0); } // Edit the return ops and remove the dequantize ops in place. auto* terminator = bb.getTerminator(); int num_return_operands = terminator->getNumOperands(); llvm::SmallVector<Type, 4> output_types; output_types.reserve(num_return_operands); for (int i = 0; i != num_return_operands; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/promote_resources_to_args.cc
func::FuncOp function, llvm::ArrayRef<std::string> var_handle_shared_names) { Block& block = function.front(); auto return_op = llvm::dyn_cast_or_null<func::ReturnOp>(block.getTerminator()); if (!return_op) return function.emitError() << "expects function '" << function.getName() << "' to have a MLIR ReturnOp";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
int new_num_operands = old_num_operands; auto &body_block = while_op.getBody().front(); auto &cond_block = while_op.getCond().front(); auto &body_yield = *body_block.getTerminator(); auto &cond_yield = *cond_block.getTerminator(); bool cond_forwards_args = cond_yield.getOperands().size() > 1; // Bit mask indicating which operands will be removed.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc
ops_to_visit.push_back( {/*region_idx=*/std::nullopt, block_token, ®ion.front().front()}); return; } RewriteControlFlowTerminator(builder, region.front().getTerminator(), block_token, /*flatten_tuple=*/true); } // For mlir::IfOp or mlir::CaseOp, replace the use of their region's block // argument (of type token) with 'implicit_operand'.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 40.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc
// LiveOuts keeps track of live values at the output of some op. The updates // must be made in a reverse, bottom-up manner. const auto result_values = main_func_block.getTerminator()->getOperands(); LiveOuts liveouts(result_values); // Copy ops to iterate because we will be modifying the block during // iteration. The ordering should be reversed because liveness analysis is a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc
ret_operands.push_back(operand->getValue()); } builder_.create<func::ReturnOp>(func_.getLoc(), ret_operands); auto arg_types = body.getArgumentTypes(); auto result_types = body.getTerminator()->getOperandTypes(); func_.setType(FunctionType::get(func_.getContext(), arg_types, result_types)); *f = new MlirFunction(std::move(context_), std::move(module_), func_); return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
}; // Updates func argument type to have the updated input shape. void UpdateFuncType(func::FuncOp func) { auto arg_types = func.front().getArgumentTypes(); auto result_types = func.front().getTerminator()->getOperandTypes(); func.setType(FunctionType::get(func.getContext(), arg_types, result_types)); } void HandleFuncOp(Operation* op) { auto func = llvm::cast<func::FuncOp>(op); UpdateFuncType(func); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 29.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/convert_control_to_data_outputs.cc
graph_op->replaceAllUsesWith( new_graph_op->getResults().drop_back(num_resources)); graph_op.erase(); func::ReturnOp return_op = cast<func::ReturnOp>(block.getTerminator()); int num_old_arguments = return_op.getNumOperands(); return_op->insertOperands( num_old_arguments, new_graph_op.getResults().slice(num_old_arguments, num_resources)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// Add i32 -> i8 requantization. UniformQuantizeOp uniform_quant_op = rewriter.create<UniformQuantizeOp>( op.getLoc(), func_result_type, op.getResults()); cast<func::ReturnOp>(entry_func_op.getBody().front().getTerminator()) .setOperand(0, uniform_quant_op); } template <typename GemmStyleOp> // Creates a quantized bias pattern for static and dynamic shape case // and sets the quantized bias as the return op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
if (auto aliasing_output = main_func.getArgAttrOfType<mlir::IntegerAttr>( i, "tf.aliasing_output")) output_to_input_alias[aliasing_output.getInt()] = i; auto return_op = main_func.begin()->getTerminator(); for (const auto& type_and_idx : llvm::enumerate(func_type.getResults())) { size_t idx = type_and_idx.index(); auto result_ty = mlir::cast<mlir::RankedTensorType>(type_and_idx.value());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0)