- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for LogOp (1.04 sec)
-
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc
TAC_REGISTER_GPU_OP(CosOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(DivOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(ExpOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(HardSwishOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(LogOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(LogisticOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(MaxPool2DOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(MirrorPadOp, CreateBasicOpNoCost);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
AddV2Op::getOperationName(), DivOp::getOperationName(), SubOp::getOperationName(), LogOp::getOperationName(), Log1pOp::getOperationName(), IsInfOp::getOperationName(), MulOp::getOperationName(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc
TypeID::get<TF::LeftShiftOp>(), TypeID::get<TF::LessOp>(), TypeID::get<TF::ListDiffOp>(), TypeID::get<TF::LogicalAndOp>(), TypeID::get<TF::LogicalNotOp>(), TypeID::get<TF::LogOp>(), TypeID::get<TF::LowerBoundOp>(), TypeID::get<TF::MakeUniqueOp>(), TypeID::get<TF::MatMulOp>(), TypeID::get<TF::MatrixDiagV3Op>(), TypeID::get<TF::MatrixInverseOp>(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 21.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
return setOperand(0, callee.get<Value>()); } //===----------------------------------------------------------------------===// // LogOp //===----------------------------------------------------------------------===// void LogOp::getCanonicalizationPatterns(RewritePatternSet& results, MLIRContext* context) { results.add<LogOfSoftmax, LogToLog1p>(context); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
}; return ConstFoldUnaryOp(result_type, operands[0], compute); } //===----------------------------------------------------------------------===// // LogOp //===----------------------------------------------------------------------===// OpFoldResult LogOp::fold(FoldAdaptor adaptor) { auto operands = adaptor.getOperands(); Type result_type = getType(); // Only constant fold for tensor of f32 is implemented.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
PatternRewriter &rewriter) const override { if (sub_op.getFusedActivationFunction() != "NONE") { return failure(); } auto log_op = dyn_cast_or_null<TFL::LogOp>(sub_op.getRhs().getDefiningOp()); if (!log_op || !log_op->hasOneUse()) { return failure(); } auto sum_op = dyn_cast_or_null<TFL::SumOp>(log_op.getX().getDefiningOp());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/asm7.go
o1 = c.opxrrr(p, p.As, rt, r, obj.REG_NONE, true) o1 |= c.encRegShiftOrExt(p, &p.From, p.From.Reg) /* includes reg, op, etc */ } else { o1 = c.opxrrr(p, p.As, rt, r, rf, false) } case 28: /* logop $vcon, [R], R (64 bit literal) */ if p.Reg == REGTMP { c.ctxt.Diag("cannot use REGTMP as source: %v\n", p) } o := uint32(0) num := uint8(0) cls := int(p.From.Class) if isANDWop(p.As) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 201.1K bytes - Viewed (0)