- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 108 for getOperands (0.2 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc
liveouts.snapshot_previous_state(); return; } reverse_subgraph.insert(op); defined_values.insert(op->getResults().begin(), op->getResults().end()); operands.insert(op->getOperands().begin(), op->getOperands().end()); }; for (Operation* op : reverse_main_func_block_ops) { if (!ops_to_add.contains(op)) continue; // When hitting a non-StableHLO op, i.e. tf.CustomAggregatorOp, start
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
return std::nullopt; for (auto [arg, operand] : llvm::zip(block.getArguments(), call.getOperands())) { if (arg != operand) return std::nullopt; } for (auto [ret, operand] : llvm::zip(call.getResults(), yield.getOperands())) { if (ret != operand) return std::nullopt; } SymbolRefAttr symbol = call.getCallableForCallee().get<SymbolRefAttr>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc
auto old_body_ret = body.front().getTerminator(); auto new_retvals = llvm::to_vector<8>(old_body_ret->getOperands()); for (int64_t i = 0; i < while_op.getNumResults(); ++i) { if (!ta_arg_buffer_type(i)) continue; auto retval = old_body_ret->getOperand(i); auto arg = retval.dyn_cast<BlockArgument>(); if (!arg) { return while_op.emitOpError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 02 20:41:19 UTC 2023 - 40.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc
if (!graph) return false; Operation* terminator = block.getTerminator(); if (graph.getNumResults() != terminator->getNumOperands()) return false; for (auto result : llvm::zip(graph.getResults(), terminator->getOperands())) if (std::get<0>(result) != std::get<1>(result)) return false; return true; } // Checks if an operation of the tf_executor dialect can have TPU devices // propagated through.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc
Operation* terminator = execute_block.getTerminator(); builder->setInsertionPoint(terminator); auto yield = builder->create<tf_executor::YieldOp>( terminator->getLoc(), terminator->getOperands()); terminator->erase(); // Create new island for each region. builder->setInsertionPoint(island_op); auto execute_island = builder->create<tf_executor::IslandOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 19 19:47:16 UTC 2023 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc
has_resource_result = true; int result_idx = result.getResultNumber(); Value ret0 = op->getRegion(0).front().getTerminator()->getOperand(result_idx); for (Region ®ion : op->getRegions().drop_front()) { Value ret = region.front().getTerminator()->getOperand(result_idx); if (ret != ret0) { return op->emitError("Result #") << result_idx
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.cc
for (auto operand_and_idx : llvm::enumerate(op->getOperands())) TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand_and_idx.value(), dst_node, operand_and_idx.index())); operand_offset = op->getNumOperands(); } // For all other ops (including tf_executor.island), add remaining edges. for (auto operand_and_idx : llvm::enumerate(inst->getOperands())) TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 35.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc
for (auto operand_and_idx : llvm::enumerate(op->getOperands())) TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand_and_idx.value(), dst_node, operand_and_idx.index())); operand_offset = op->getNumOperands(); } // For all other ops (including tf_executor.island), add remaining edges. for (auto operand_and_idx : llvm::enumerate(inst->getOperands())) TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 23:04:51 UTC 2024 - 35.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
SmallVector<TransposeOp, 2>* transpose_ops) { for (auto it = transpose_ops->begin(); it != transpose_ops->end(); ++it) { auto tranpose_op = *it; for (auto tranpose_operand : tranpose_op.getOperands()) { auto ranked_tranpose_type = mlir::dyn_cast_or_null<RankedTensorType>(tranpose_operand.getType()); if (!ranked_tranpose_type) continue; if (ranked_tranpose_type.getRank() == permutation.size() &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// PartitionedCallOp does not have kQuantTraitAttrName, and therefore won't // get quantized. auto new_call_op = rewriter.create<TF::PartitionedCallOp>( call_op.getLoc(), call_op.getResultTypes(), call_op.getOperands(), FlatSymbolRefAttr::get(new_ref_func_name)); return new_call_op; } Operation *DuplicateOp(TF::XlaCallModuleOp call_op, PatternRewriter &rewriter, const StringAttr &new_ref_func_name) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0)