- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 218 for getOperands (0.16 sec)
-
tensorflow/compiler/mlir/lite/utils/utils.h
using mlir::Operation; using mlir::ShapedType; using mlir::Value; // Returns true if all tensor value in `values` has static shape and same shape. inline bool OpHasSameStaticShapes(Operation* op) { auto values = op->getOperands(); int operand_num = 0; ArrayRef<int64_t> shape; for (Value value : values) { auto shaped_type = value.getType().dyn_cast<ShapedType>(); if (!shaped_type || !shaped_type.hasStaticShape()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
llvm::Twine(calib_opts_.calibration_method())) .str()); } } else { // Quantize output of fully quantizable composite functions. for (Value input : op->getOperands()) { auto defining_op = input.getDefiningOp(); std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(defining_op); if (!composite_function_name.has_value()) continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
} rewriter.setInsertionPoint(cloned_func.getBody().front().getTerminator()); rewriter.create<TF::_XlaSendFromHostOp>( func.getLoc(), cloned_func.getBody().front().getTerminator()->getOperands(), /*dynamic_key=*/dynamic_key, op.getRecvKeyAttr(), /*device_ordinal=*/rewriter.getI64IntegerAttr(0), rewriter.getStringAttr("TPU")); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// The current implementation is based on the number of operands. static bool PreferResultScale(Operation* op) { int float_operands = 0; for (auto operand : op->getOperands()) { if (auto operand_type = dyn_cast<ShapedType>(operand.getType())) { if (isa<FloatType>(operand_type.getElementType())) { if (++float_operands > 1) return true; } } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc
// UniformQuantized Ops are considered legal if its qint operands and // results are connected to TF CastOp. return op && llvm::all_of(op->getResults(), IsQintValueQintToIntCast) && llvm::all_of(op->getOperands(), IsQintValueDefinedByIntToQintCast); } bool IsCastOpLegal(TF::CastOp cast_op) { // Consider qint <-> qint casts illegal. if (IsIllegalType(cast_op.getSrcT()) && IsIllegalType(cast_op.getDstT())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
std::string custom_option_buffer; CreateFlexOpCustomOptions(op_name, node_def_str, custom_option_buffer); auto flex_op = builder.create<TFL::CustomOp>( op->getLoc(), op->getResultTypes(), op->getOperands(), flex_op_name, CustomOptionForFlexOp(&builder, custom_option_buffer)); op->replaceAllUsesWith(flex_op); op->erase(); return true; } // Sets the "no_fallback" attribute.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_device_inference.cc
auto walk_res = func_op.walk([&](Operation* op) { if (auto while_op = dyn_cast<WhileOp>(op)) { if (failed(propagate_operands_to_callee_arguments( while_op, while_op.getOperands(), {while_op.body_function(), while_op.cond_function()}, func_res))) return WalkResult::interrupt(); } else if (auto if_op = dyn_cast<IfOp>(op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 03:47:00 UTC 2023 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc
if (num_cores_per_replica != 1) return success(); llvm::SetVector<Value> bcasts; cluster->walk([&](Operation* op) { if (op == cluster) return WalkResult::advance(); for (auto operand : op->getOperands()) { Operation* scope = operand.getParentBlock()->getParentOp(); if (scope->isProperAncestor(replicate)) { bcasts.insert(operand); } } return WalkResult::advance(); });
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 13 18:52:07 UTC 2024 - 13.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
return RefineTypeForPassThroughOperands( op, iter_sink.getOperands().drop_front().take_front(), iter_source.getResults()); } if (auto launch_op = dyn_cast<tf_device::LaunchOp>(op)) { auto terminator = launch_op.GetBody().getTerminator(); return RefineTypeForPassThroughOperands(op, terminator->getOperands(), op->getResults()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
// func.walk([&](quantfork::QuantizeCastOp q_op) { // If up with end up with auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>( q_op.getOperand().getDefiningOp()); if (!dq_op) { return; } auto dq_arg = dq_op.getOperand(); if (!dq_arg.hasOneUse()) { // The initial quantization is used someplace else ... so it might be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0)