- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for MULTIPLY (0.41 sec)
-
src/cmd/internal/obj/s390x/asmz.go
op_VME uint32 = 0xE7A6 // VRR-c VECTOR MULTIPLY EVEN op_VMH uint32 = 0xE7A3 // VRR-c VECTOR MULTIPLY HIGH op_VMLE uint32 = 0xE7A4 // VRR-c VECTOR MULTIPLY EVEN LOGICAL op_VMLH uint32 = 0xE7A1 // VRR-c VECTOR MULTIPLY HIGH LOGICAL op_VMLO uint32 = 0xE7A5 // VRR-c VECTOR MULTIPLY ODD LOGICAL op_VML uint32 = 0xE7A2 // VRR-c VECTOR MULTIPLY LOW op_VMO uint32 = 0xE7A7 // VRR-c VECTOR MULTIPLY ODD
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 17:46:09 UTC 2024 - 176.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/generic.rules
(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0]) // Unsigned divide, not a power of 2. Strength reduce to a multiply. // For 8-bit divides, we just do a direct 9-bit by 8-bit multiply. (Div8u x (Const8 [c])) && umagicOK8(c) => (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)])
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 22:21:05 UTC 2024 - 135.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
// Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x. {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 04 16:40:24 UTC 2023 - 98K bytes - Viewed (1) -
src/cmd/internal/obj/ppc64/asm9.go
/* Vector multiply */ {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ /* Vector rotate */
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 13:55:28 UTC 2024 - 156.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
int64_t L2NormalizationOp::GetArithmeticCount(Operation* op) { int64_t count; // Computing the squared L2 norm is N multiply-adds so 2N ops, // then the single inverse-sqrt is negligible, then we multiply each // value by the resulting multiplier, so an extra N ops. count 3N ops. if (ArithmeticCountUtilHelper::GetFirstOutputCount(op, &count)) { return 3 * count; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
LogicalResult matchAndRewrite(TFL::MulOp mul_op, PatternRewriter &rewriter) const override { // If we are broadcasting on the lhs then don't fold the multiply as it // would increase the amount of compute done by the fully connected op. if (mul_op.getLhs().getType() != mul_op.getType()) return failure(); // Mul. DenseElementsAttr cst;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
mlir::cast<ShapedType>(dot_op.getResult().getType()), dot_op.getLoc()); } // Converts mhlo.dot to tf.BatchMatMul. Reshape or Transpose ops will also be // inserted to convert to well-formed matrix multiply. Value ConvertDotGeneralOp(PatternRewriter& rewriter, Operation* old_op) { auto dot_general_op = cast<mhlo::DotGeneralOp>(old_op); return ConvertDot( rewriter, dot_general_op.getLhs(), dot_general_op.getRhs(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
And<[TFL_OperandHasRankAtMostPred<0, 5>, TFL_OperandHasRankAtMostPred<1, 5>]>>, DynamicRangeQuantizedOpInterface]> { let summary = "Batch Matrix Multiply Operator"; let description = [{ Performs a batched matrix multiplication on the inputs. Follows the conventions of TensorFlow BatchMatMulV2, with support for unknown dimensions
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
src/runtime/proc.go
case _Gwaiting: if !gp.waitreason.isMutexWait() { // Not blocking on a lock. break } // Blocking on a lock, measure it. Note that because we're // sampling, we have to multiply by our sampling period to get // a more representative estimate of the absolute value. // gTrackingPeriod also represents an accurate sampling period // because we can only enter this state from _Grunning.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
doc/go1.17_spec.html
the precision of the target type, preventing fusion that would discard that rounding. </p> <p> For instance, some architectures provide a "fused multiply and add" (FMA) instruction that computes <code>x*y + z</code> without rounding the intermediate result <code>x*y</code>. These examples show when a Go implementation can use that instruction: </p> <pre>
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 11 20:22:45 UTC 2024 - 211.6K bytes - Viewed (0)