- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 2,255 for OP (0.02 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_traits.h
if (argumentOp && op->getName() == argumentOp->getName()) { // Replace the outer operation output with the inner operation. return op->getOperand(0); } } else if (op->getOperand(0) == op->getOperand(1)) { return op->getOperand(0); } return {}; } inline LogicalResult verifyIsInvolution(Operation* op) { // TODO(b/246518997): Add back check for no side effects on operation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewriteARM64latelower.go
// result: x for { x := v_0 if x.Op != OpARM64Equal { break } v.copyOf(x) return true } // match: (MOVBUreg x:(NotEqual _)) // result: x for { x := v_0 if x.Op != OpARM64NotEqual { break } v.copyOf(x) return true } // match: (MOVBUreg x:(LessThan _)) // result: x for { x := v_0 if x.Op != OpARM64LessThan { break }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc
#define TAC_REGISTER_CPU_OP(Op, Create) \ TargetHardwareOpRegistration<CpuHardware, Op> Op##_CpuHardware_hardware( \ Create); // Operation costs on CPU // Currently used for these ops: // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected class CpuConvOp : public TargetHardwareOperation { double GetOpCost(mlir::Operation* op) const override { float cost = 0.0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/device_attribute_to_launch.cc
builder.create<tf_device::ReturnOp>(op->getLoc(), op->getResults()) .getOperation(); MLIRContext* context = launch_op.getContext(); op->removeAttr(StringAttr::get(context, kDeviceAttr)); op->moveBefore(return_op); } void DeviceAttributeToLaunch::runOnOperation() { const Dialect* tf_dialect = getContext().getLoadedDialect("tf"); getOperation().walk([&](Operation* op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 04 00:59:46 UTC 2022 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc
void runOnOperation() override; }; // Returns whether op requires `device_ordinal` attribute. bool RequiresDeviceOrdinalAttribute(Operation* op) { return (llvm::isa<TF::EnqueueTPUEmbeddingSparseTensorBatchOp, TF::EnqueueTPUEmbeddingRaggedTensorBatchOp, TF::EnqueueTPUEmbeddingArbitraryTensorBatchOp>(op) && op->hasAttr(kDeviceOrdinalAttr) && op->hasAttr(kReplicaIdAttr)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Checks if an op is inside a lifted function. // If the given op pointer is a nullptr, returns false. bool IsInLiftedFunc(Operation* op); // Checks if the op is inside a StableHLO op with region. // If the given op pointer is a nullptr, returns false. bool IsInStableHloOpRegion(Operation* op); // Checks if a given einsum op is supported for XlaDotV2 quantization.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
new op is presented as a single node in the graph, thus optimization passes and kernels can easily be specialized to this op for better performance. * *Automatic shape/type inference support*: No shape functions are required for the new op; * *Automatic gradient support (WIP)*: The user doesn't need to author gradient a function of the op for training. ### Use Cases
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// quantization. For dynamic range quantizable ops, it refers to the op // specification for checking the support. For custom ops, it checks the // provided map. bool hasInt8QuantizableOperandAt(Operation* op, int operand_index) const { if (visited_nonquantizable_ops_->contains(op)) { return false; } if (auto custom_op = llvm::dyn_cast_or_null<CustomOp>(op)) { std::string op_name = custom_op.getCustomCode().str();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
return true; }; return RewriteOp(op, rewriter, get_broadcasted_shape); } template <typename Op> LogicalResult ConvertResultsBroadcastableShapeOp::RewriteEqOp( Operation* op, PatternRewriter& rewriter) const { auto eq_op = llvm::dyn_cast_or_null<Op>(op); if (eq_op && eq_op.getIncompatibleShapeError()) return RewriteOp(op, rewriter, OpTrait::util::getBroadcastedShape); return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc
tensorflow::tpu::TPUCompileMetadataProto* metadata) { auto input_shardings = op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr); if (!input_shardings) return op.emitOpError( CreateMissingAttributeMsg(tensorflow::kInputShardingAttr)); if (input_shardings.size() != op.getNumOperands()) return op.emitOpError( llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.3K bytes - Viewed (0)