- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 32 for vsubfp (0.12 sec)
-
src/cmd/internal/obj/x86/anames.go
"VSHUFF32X4", "VSHUFF64X2", "VSHUFI32X4", "VSHUFI64X2", "VSHUFPD", "VSHUFPS", "VSQRTPD", "VSQRTPS", "VSQRTSD", "VSQRTSS", "VSTMXCSR", "VSUBPD", "VSUBPS", "VSUBSD", "VSUBSS", "VTESTPD", "VTESTPS", "VUCOMISD", "VUCOMISS", "VUNPCKHPD", "VUNPCKHPS", "VUNPCKLPD", "VUNPCKLPS", "VXORPD", "VXORPS", "VZEROALL",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 11 18:32:50 UTC 2023 - 19.1K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
inst.Op = FDIV case FSUB: inst.Op = FSUBR case FSUBR: inst.Op = FSUB case FDIVP: inst.Op = FDIVRP case FDIVRP: inst.Op = FDIVP case FSUBP: inst.Op = FSUBRP case FSUBRP: inst.Op = FSUBP } } case MOVNTSD: // MOVNTSD is F2 0F 2B /r. // MOVNTSS is F3 0F 2B /r (supposedly; not in manuals). // Usually inner prefixes win for display,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 21.4K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/plan9.go
return true } return false } func reverseOperandOrder(op Op) bool { switch op { // Special case for SUBF, SUBFC: not reversed case ADD, ADDC, ADDE, ADDCC, ADDCCC: return true case MULLW, MULLWCC, MULHW, MULHWCC, MULLD, MULLDCC, MULHD, MULHDCC, MULLWO, MULLWOCC, MULHWU, MULHWUCC, MULLDO, MULLDOCC: return true
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 22 17:16:14 UTC 2022 - 10.9K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go
} var fpInst []goFPInfo = []goFPInfo{ {VADD_EQ_F32, []int{2, 1, 0}, "VADD", "ADDF"}, {VADD_EQ_F64, []int{2, 1, 0}, "VADD", "ADDD"}, {VSUB_EQ_F32, []int{2, 1, 0}, "VSUB", "SUBF"}, {VSUB_EQ_F64, []int{2, 1, 0}, "VSUB", "SUBD"}, {VMUL_EQ_F32, []int{2, 1, 0}, "VMUL", "MULF"}, {VMUL_EQ_F64, []int{2, 1, 0}, "VMUL", "MULD"}, {VNMUL_EQ_F32, []int{2, 1, 0}, "VNMUL", "NMULF"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/functional-control-flow-to-cfg.mlir
^bb1(%2: tensor<*xf32>, %3: tensor<*xf32>): %4 = arith.subf %arg0, %arg1 : tensor<*xf32> func.return %4 : tensor<*xf32> // CHECK: ^bb4([[FINALARG0:%.+]]: tensor<*xf32>, [[FINALARG1:%.+]]: tensor<*xf32>): // CHECK: [[SUBF:%.+]] = arith.subf %arg0, %arg1 : tensor<*xf32> // CHECK: return [[SUBF]] : tensor<*xf32> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
Operation *op_input = args[0].getDefiningOp(); Operation *op_weight = args[1].getDefiningOp(); if (isa<TF::SubOp>(op_input)) { op_input = op_input->getOperand(0).getDefiningOp(); } if (isa<TF::SubOp>(op_weight)) { op_weight = op_weight->getOperand(0).getDefiningOp(); } if (isa<TF::CastOp>(op_input)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
src/syscall/ztypes_linux_amd64.go
} const ( VINTR = 0x0 VQUIT = 0x1 VERASE = 0x2 VKILL = 0x3 VEOF = 0x4 VTIME = 0x5 VMIN = 0x6 VSWTC = 0x7 VSTART = 0x8 VSTOP = 0x9 VSUSP = 0xa VEOL = 0xb VREPRINT = 0xc VDISCARD = 0xd VWERASE = 0xe VLNEXT = 0xf VEOL2 = 0x10 IGNBRK = 0x1 BRKINT = 0x2 IGNPAR = 0x4 PARMRK = 0x8 INPCK = 0x10
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 08 17:55:49 UTC 2023 - 12K bytes - Viewed (0) -
src/syscall/ztypes_linux_386.go
} const ( VINTR = 0x0 VQUIT = 0x1 VERASE = 0x2 VKILL = 0x3 VEOF = 0x4 VTIME = 0x5 VMIN = 0x6 VSWTC = 0x7 VSTART = 0x8 VSTOP = 0x9 VSUSP = 0xa VEOL = 0xb VREPRINT = 0xc VDISCARD = 0xd VWERASE = 0xe VLNEXT = 0xf VEOL2 = 0x10 IGNBRK = 0x1 BRKINT = 0x2 IGNPAR = 0x4 PARMRK = 0x8 INPCK = 0x10
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 08 17:55:49 UTC 2023 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
// broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) { return false; } auto opr1_type = llvm::dyn_cast_or_null<RankedTensorType>(op->getOperand(0).getType()); auto opr2_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
TF::Relu6Op::getOperationName().str(), TF::ReluOp::getOperationName().str(), TF::ReshapeOp::getOperationName().str(), TF::SoftmaxOp::getOperationName().str(), TF::SubOp::getOperationName().str(), TF::TransposeOp::getOperationName().str(), // go/keep-sorted end // clang-format on }); return *legacy_op_list; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.2K bytes - Viewed (0)