- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 218 for getOperands (0.65 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
SmallVector<TransposeOp, 2>* transpose_ops) { for (auto it = transpose_ops->begin(); it != transpose_ops->end(); ++it) { auto tranpose_op = *it; for (auto tranpose_operand : tranpose_op.getOperands()) { auto ranked_tranpose_type = mlir::dyn_cast_or_null<RankedTensorType>(tranpose_operand.getType()); if (!ranked_tranpose_type) continue; if (ranked_tranpose_type.getRank() == permutation.size() &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc
if (llvm::isa<mlir::TF::TPUPartitionedOutputV2Op>(user)) { assert(user->use_empty()); user->erase(); } } } for (auto operand : cluster.getOperands()) { mlir::Operation* def = operand.getDefiningOp(); if (operand.hasOneUse() && llvm::isa_and_nonnull<mlir::TF::TPUPartitionedInputV2Op>(def)) { operand.dropAllUses(); def->erase();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 13 03:57:18 UTC 2023 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
auto new_operands = llvm::to_vector<8>(caller->getOperands()); llvm::SmallVector<int64_t, 8> changed_indices; // Find the operands to change, and create the loads. for (auto& entry : arg_data_type_and_updated_output_index) { int64_t index = entry.getFirst(); Type new_type = entry.getSecond().first; int64_t updated_index = entry.getSecond().second; auto operand = caller->getOperand(index); builder.setInsertionPoint(caller);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// PartitionedCallOp does not have kQuantTraitAttrName, and therefore won't // get quantized. auto new_call_op = rewriter.create<TF::PartitionedCallOp>( call_op.getLoc(), call_op.getResultTypes(), call_op.getOperands(), FlatSymbolRefAttr::get(new_ref_func_name)); return new_call_op; } Operation *DuplicateOp(TF::XlaCallModuleOp call_op, PatternRewriter &rewriter, const StringAttr &new_ref_func_name) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// inputs. SmallVector<Value, 4> inputs; inputs.reserve(op_with_region->getNumOperands()); for (Value operand : op_with_region->getOperands()) { const Type operand_type = operand.getType(); if (mlir::isa<NoneType>(operand_type)) { inputs.push_back(operand); continue; } const Type element_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_custom_call_to_composite.cc
auto decomposition = mlir::cast<FlatSymbolRefAttr>(calledComputations[0]); auto composite = rewriter.create<mlir::stablehlo::CompositeOp>( op.getLoc(), op.getResultTypes(), op.getOperands(), name.str(), attrs, decomposition.getValue()); rewriter.replaceOp(op, composite.getResults()); return success(); } }; struct LegalizeStablehloCustomCallToCompositePass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_with_tf2xla.cc
// while creating the ops so make sure there aren't any type changes between // the original op operands and the operands during the conversion. for (auto&& [old_val, new_val] : llvm::zip(op->getOperands(), operands)) { if (old_val.getType() != new_val.getType()) return failure(); } auto abstractOp = op->getRegisteredInfo(); if (!abstractOp) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 9.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/raise_custom_ops.cc
for (auto *op : custom_ops) { builder.setInsertionPoint(op); Location loc = op->getLoc(); auto custom_op = builder.create<CustomTfOp>(loc, op->getResultTypes(), op->getOperands()); Region region; Block *new_block = new Block; region.push_back(new_block); builder.setInsertionPointToEnd(®ion.front()); Operation *inner_op = builder.clone(*op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 17 07:31:01 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
func_op.getRegion().getBlocks().front().getTerminator()); if (!return_op) return failure(); auto req_op = llvm::dyn_cast_or_null<mlir::stablehlo::UniformQuantizeOp>( return_op.getOperands()[0].getDefiningOp()); if (!req_op) return failure(); // Create a new func.call op with f32 output. auto new_call_op = call_op.clone(); new_call_op->getResult(0).setType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
OpFoldResult LeakyReluOp::fold(FoldAdaptor adaptor) { auto operands = adaptor.getOperands(); assert(operands.size() == 1 && "leaky relu has one operand"); // leaky_relu(x, alpha: 1) -> x if (getAlpha().convertToFloat() == 1.0f && getOperand().getType() == getType()) return getOperand(); auto calculate = [&](FloatAttr arg) { APFloat val = arg.getValue();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0)