summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--main/xen/APKBUILD183
-rw-r--r--main/xen/xsa182-4.6.patch102
-rw-r--r--main/xen/xsa183-4.6.patch75
-rw-r--r--main/xen/xsa185.patch38
-rw-r--r--main/xen/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch73
-rw-r--r--main/xen/xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch41
-rw-r--r--main/xen/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch142
-rw-r--r--main/xen/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch42
-rw-r--r--main/xen/xsa190-4.6-CVE-2016-7777.patch163
-rw-r--r--main/xen/xsa191-4.6-CVE-2016-9386.patch138
-rw-r--r--main/xen/xsa192-CVE-2016-9382.patch64
-rw-r--r--main/xen/xsa193-4.7-CVE-2016-9385.patch68
-rw-r--r--main/xen/xsa195-CVE-2016-9383.patch45
-rw-r--r--main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch61
-rw-r--r--main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch76
-rw-r--r--main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch63
-rw-r--r--main/xen/xsa197-qemut-CVE-2016-9381.patch65
-rw-r--r--main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch62
-rw-r--r--main/xen/xsa200-4.6.patch55
-rw-r--r--main/xen/xsa201-1.patch87
-rw-r--r--main/xen/xsa201-2.patch199
-rw-r--r--main/xen/xsa201-3-4.7.patch47
-rw-r--r--main/xen/xsa201-4.patch130
-rw-r--r--main/xen/xsa202-4.6.patch73
-rw-r--r--main/xen/xsa203-4.7.patch19
-rw-r--r--main/xen/xsa204-4.7.patch69
-rw-r--r--main/xen/xsa207.patch31
-rw-r--r--main/xen/xsa208-qemut.patch56
-rw-r--r--main/xen/xsa208-qemuu-4.7.patch53
-rw-r--r--main/xen/xsa209-qemut.patch54
-rw-r--r--main/xen/xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch72
-rw-r--r--main/xen/xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch60
-rw-r--r--main/xen/xsa211-qemut.patch225
-rw-r--r--main/xen/xsa211-qemuu-4.6.patch260
-rw-r--r--main/xen/xsa212.patch87
-rw-r--r--main/xen/xsa213-4.6.patch173
-rw-r--r--main/xen/xsa214.patch41
-rw-r--r--main/xen/xsa215.patch37
-rw-r--r--main/xen/xsa226-4.6.patch133
-rw-r--r--main/xen/xsa227-4.6.patch66
-rw-r--r--main/xen/xsa228-4.8.patch198
41 files changed, 423 insertions, 3303 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index a2c343ece57..d0d80f8b46d 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,8 +3,8 @@
# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
-pkgver=4.6.3
-pkgrel=10
+pkgver=4.6.6
+pkgrel=0
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64"
@@ -42,6 +42,11 @@ subpackages="$pkgname-doc $pkgname-dev $pkgname-libs $pkgname-hypervisor"
# - CVE-2017-14318 XSA-232
# - CVE-2017-14317 XSA-233
# - CVE-2017-14319 XSA-234
+# 4.6.6-r0:
+# - CVE-2017-12135 XSA-226
+# - CVE-2017-12137 XSA-227
+# - CVE-2017-12136 XSA-228
+# - CVE-2017-12855 XSA-230
# grep _VERSION= stubdom/configure
_ZLIB_VERSION="1.2.3"
@@ -68,45 +73,11 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/$pkgname-$pkgver.ta
http://xenbits.xen.org/xen-extfiles/zlib-$_ZLIB_VERSION.tar.gz
http://xenbits.xen.org/xen-extfiles/ipxe-git-$_IPXE_GIT_TAG.tar.gz
- xsa182-4.6.patch
- xsa183-4.6.patch
- xsa184-qemut-master.patch
xsa184-qemuu-master.patch
- xsa185.patch
- xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
- xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
- xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
- xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
- xsa190-4.6-CVE-2016-7777.patch
- xsa191-4.6-CVE-2016-9386.patch
- xsa192-CVE-2016-9382.patch
- xsa193-4.7-CVE-2016-9385.patch
- xsa195-CVE-2016-9383.patch
- xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
- xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
- xsa197-4.6-qemuu-CVE-2016-9381.patch
- xsa197-qemut-CVE-2016-9381.patch
- xsa198-CVE-2016-9379-CVE-2016-9380.patch
- xsa200-4.6.patch
- xsa201-1.patch
- xsa201-2.patch
- xsa201-3-4.7.patch
- xsa201-4.patch
- xsa202-4.6.patch
- xsa203-4.7.patch
- xsa204-4.7.patch
- xsa207.patch
- xsa208-qemut.patch
- xsa208-qemuu-4.7.patch
- xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
- xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
- xsa209-qemut.patch
- xsa211-qemut.patch
- xsa211-qemuu-4.6.patch
- xsa212.patch
- xsa213-4.6.patch
- xsa214.patch
- xsa215.patch
+ xsa226-4.6.patch
+ xsa227-4.6.patch
+ xsa228-4.8.patch
+ xsa230.patch
xsa231-4.7.patch
xsa232.patch
xsa233.patch
@@ -312,7 +283,7 @@ hypervisor() {
mv "$pkgdir"/boot "$subpkgdir"/
}
-md5sums="26419d8477082dbdb32ec75b00f00643 xen-4.6.3.tar.gz
+md5sums="698328dcac775c8ccef0da3167020b19 xen-4.6.6.tar.gz
dd60683d7057917e34630b4a787932e8 gmp-4.3.2.tar.bz2
cd3f3eb54446be6003156158d51f4884 grub-0.97.tar.gz
36cc57650cffda9a0269493be2a169bb lwip-1.3.0.tar.gz
@@ -322,45 +293,11 @@ cec05e7785497c5e19da2f114b934ffd pciutils-2.2.9.tar.bz2
e26becb8a6a2b6695f6b3e8097593db8 tpm_emulator-0.7.4.tar.gz
debc62758716a169df9f62e6ab2bc634 zlib-1.2.3.tar.gz
7496268cebf47d5c9ccb0696e3b26065 ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-0c0322407ec522b1b82d833dd6d38e88 xsa182-4.6.patch
-f137255f6928d439a5ddf18ebab402d7 xsa183-4.6.patch
-95bc220677fc2bb9a3df4dc14a0b31f6 xsa184-qemut-master.patch
cc0904605d03a9e4f6f21d16824e41c9 xsa184-qemuu-master.patch
-8ae22c70681f3daf97ee7ef8ad947e76 xsa185.patch
-9a2b74f2079ba0b7a6e2420e6887cc3a xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
-3d812cf9ccc8443874b36e061392d388 xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
-c426383254acdcbb9466bbec2d6f8d9b xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
-a98c0fa2579965d72272f381f193195d xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
-2c6f0d0ec618a832cc4f5316624fac5e xsa190-4.6-CVE-2016-7777.patch
-5399accd478266047e9fada57bba1bf8 xsa191-4.6-CVE-2016-9386.patch
-002cef87f605db2cd9a6ec5230685554 xsa192-CVE-2016-9382.patch
-0bde9ad287f8a586fb47abc2f393287e xsa193-4.7-CVE-2016-9385.patch
-03ee88fdd719a6e2cdd53b698b14bfa0 xsa195-CVE-2016-9383.patch
-362e7460fa4e5db3a5e1c2a4209718cf xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
-3f66b6bb7129867f857fe25916c32d84 xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
-2800ef5a88bb0b1a394ae3a938d507f0 xsa197-4.6-qemuu-CVE-2016-9381.patch
-23e70410938604dda2ade27a8b94264f xsa197-qemut-CVE-2016-9381.patch
-e8d3ee1e904071920a6afbbf6a27aad2 xsa198-CVE-2016-9379-CVE-2016-9380.patch
-add3ad7828d582fc272073e906ce17a1 xsa200-4.6.patch
-6580371b4b8db7cb6876f2b42ab3fc61 xsa201-1.patch
-76394482eaf0caeb3e0611ba70e8923c xsa201-2.patch
-136b9ad8b2bcc57d5a7ed3bf13bebe3c xsa201-3-4.7.patch
-9cb1516d783fc9c765e9a37574bb3cbd xsa201-4.patch
-a5a39c6354c952095e1d78a582385933 xsa202-4.6.patch
-da401ec1a25668a2dabc666f6687409b xsa203-4.7.patch
-dc4ad05682ce371e1755817b22229601 xsa204-4.7.patch
-31058e5dfdf50c171d450e27776d5d07 xsa207.patch
-91f0e92cde4c3d88a792699d9ea43f00 xsa208-qemut.patch
-ef703d045bf84ef27c90ce3190e25e33 xsa208-qemuu-4.7.patch
-fa347ce5494be0a9199b052eede3ca19 xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
-1dc8ad4b8a7ad8412c64a71a79c836c1 xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
-d3307c3a5e6473717f34b8aff693f678 xsa209-qemut.patch
-08722b90a7d33850ea67dfc90d04f6d8 xsa211-qemut.patch
-a300eae67ae77cf4d2e0741dad01ee29 xsa211-qemuu-4.6.patch
-8d3c76a3954dfa359d2f9fe9b59c1828 xsa212.patch
-c25a4a45f64fc77a3dc6d80f19570e3a xsa213-4.6.patch
-c4466088c7c521f6c84cdc63e8e91e60 xsa214.patch
-e5847b6c87c60de11ba7a128d7babe10 xsa215.patch
+e06e0d88c3ab98d3aed3a488021ecbdf xsa226-4.6.patch
+b16af567bd9a37fab1a6887c7726a14a xsa227-4.6.patch
+f66e7149f5a579ead59cc416d23bdefb xsa228-4.8.patch
+9230643cea68c0dea122b3631078059a xsa230.patch
0144a1d4b2c989231f36a7828c52261f xsa231-4.7.patch
d582d6a402935ea1aa2f6d9435ffef52 xsa232.patch
2f027cddb9401ca25add6ae229cb52c6 xsa233.patch
@@ -386,7 +323,7 @@ dcdd1de2c29e469e834a02ede4f47806 xendomains.confd
9df68ac65dc3f372f5d61183abdc83ff xen-consoles.logrotate
6a2f777c16678d84039acf670d86fff6 xenqemu.confd
e1c9e1c83a5cc49224608a48060bd677 xenqemu.initd"
-sha256sums="02badfce9a037bd1bd4a94210c1f6b85467746216c71795805102b514bcf1fc4 xen-4.6.3.tar.gz
+sha256sums="fa0748f128b189ec3497470a95f53ea42bbe4d7b5622509bcd862877895842f8 xen-4.6.6.tar.gz
936162c0312886c21581002b79932829aa048cfaf9937c6265aeaa14f1cd1775 gmp-4.3.2.tar.bz2
4e1d15d12dbd3e9208111d6b806ad5a9857ca8850c47877d36575b904559260b grub-0.97.tar.gz
772e4d550e07826665ed0528c071dd5404ef7dbe1825a38c8adbc2a00bca948f lwip-1.3.0.tar.gz
@@ -396,45 +333,11 @@ f60ae61cfbd5da1d849d0beaa21f593c38dac9359f0b3ddc612f447408265b24 pciutils-2.2.9
4e48ea0d83dd9441cc1af04ab18cd6c961b9fa54d5cbf2c2feee038988dea459 tpm_emulator-0.7.4.tar.gz
1795c7d067a43174113fdf03447532f373e1c6c57c08d61d9e4e9be5e244b05e zlib-1.2.3.tar.gz
632ce8c193ccacc3012bd354bdb733a4be126f7c098e111930aa41dad537405c ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-f10665acaf17dedd15c40bfeb832b188db1ab3e789d95cc3787575529a280813 xsa182-4.6.patch
-0fee41f21a3eb4af1487590098047f4625688bcef7419572a8f418f9fb728468 xsa183-4.6.patch
-88c939c64b8f9fc9f86d0a30517d5455462d1ff837aa4285a9cb189b54c0cf20 xsa184-qemut-master.patch
3877e19992c4532b8b2a37e151fe6a6187a1bbee2b54c1718b995260bb0fcf65 xsa184-qemuu-master.patch
-3328a1953ecdf4de35462ea8396b0927171d718e95f73a87a7f651427bd8f8b4 xsa185.patch
-f2082a36d968a47e477bb5082d0e0aaa58e6cb3dc20b26389f043a9b7b595fa6 xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
-7482a823c3443e26dee1111c4904162845eaa9f826aa7bf8348007406d91bddd xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
-be9fe85d36c2c1fbca246c1f4d834c3ef11b6ab3d5467da0ac8c079aa5a68de9 xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
-b96731379ea77d49ffff31d969f4742dde985ef7a86af9422dcac8327c2a1916 xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
-dbfc4b36132c841959847dfbb85a188ee6489ad3b8d7ecec43c55a303a43df21 xsa190-4.6-CVE-2016-7777.patch
-d95a1f0dd5c45497ca56e2e1390fc688bf0a4a7a7fd10c65ae25b4bbb3353b69 xsa191-4.6-CVE-2016-9386.patch
-687b0216eefd5ecef8a3135cc6f542cb3d9ff35e8e9696a157703e84656c35e8 xsa192-CVE-2016-9382.patch
-f1b0092c585ebffe83d6ed7df94885ec5dfcb4227bdb33f421bad9febb8135a1 xsa193-4.7-CVE-2016-9385.patch
-6ab5f13b81e3bbf6096020f4c3beeffaff67a075cab67e033ba27d199b41cec1 xsa195-CVE-2016-9383.patch
-c4122280f3786416231ae5f0660123446d29e9ac5cd3ffb92784ed36edeec8b7 xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
-25671c44c746d4d0e8f7e2b109926c013b440e0bf225156282052ec38536e347 xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
-6f43a62d0bf9d9eaa9a13262281878d60154b6e2793047e84791418b5621f67a xsa197-4.6-qemuu-CVE-2016-9381.patch
-d3af265879196c05b3fdd2cdeb5e95446f454dd3c1151452fe4f3389eccc39e4 xsa197-qemut-CVE-2016-9381.patch
-0e4533ad2157c03ab309bd12a54f5ff325f03edbe97f23c60a16a3f378c75eae xsa198-CVE-2016-9379-CVE-2016-9380.patch
-820e95e87b838de5eb4158a55c81cf205428f0ed17009dc8d45b2392cf9a0885 xsa200-4.6.patch
-163aeb9ae3ffce28e0bc95bdfff490d2df6f6f0b85ac1d4f447bea921f0a0dda xsa201-1.patch
-0ba570ed7df172475bc745e02b89670608251634895e5279edcf534619d6d81b xsa201-2.patch
-a9cf56564d020675c0f2f1ea15009a712f172be3d53ea8ddf2f48adaac392e76 xsa201-3-4.7.patch
-388d548cd4e30883ae100863d33e792869e7dbd86054299a91b64db6d6599919 xsa201-4.patch
-e007187639f5392a9256979504d50eff0ae38309a61524ea42c4150fab38b6f4 xsa202-4.6.patch
-7cc04278778fe885e4c3ae3f846d099075a38bccfafe6dff018ba525499b4e46 xsa203-4.7.patch
-d0359f26e9be783672896200e14d85a3111c29d7da580313b593fca04688fef2 xsa204-4.7.patch
-e9bcf807b3785ac4d78b621fba4a9395cd713d6e57cdaa66559bccf95ded1cd9 xsa207.patch
-7587967c37af44064a48a244f86e828502f56f6f7cbc76439b7566defcd1c6ee xsa208-qemut.patch
-de706f2b87dcfa5ff9cab37f9640fbd59a90d7f93345eb0c4b23966fd9ed1c10 xsa208-qemuu-4.7.patch
-501566e24ee8b4df6b97bc050bcdc11ea7b12801cba7446d5179788dbb3e5190 xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
-78f45281545ed9e5e7c41010dbcb1c3e28aaf3609608568b1d45bbe30e4b5336 xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
-af15d6e6a52f01dbdfc2a4b8a7931d4305fc89b003558f10a548a644dbdb8245 xsa209-qemut.patch
-6d583b255db09a8ed34545a8b290d40c2f51f293de6ebb5abef57945c274ecb0 xsa211-qemut.patch
-be0049f39b306a3dfb703b73eb60ecf35b9cc7a3d4e9481fd8314fd7e3704573 xsa211-qemuu-4.6.patch
-be1255bcda06158cdb86eb5297e8a271e05318e88cd21035c58a67f9ada6ccba xsa212.patch
-dce026ed1a02db1cf22de89120e7129839f656d041379c450e7403ae909e7b99 xsa213-4.6.patch
-1c038c3927d08e6abdf3ce320bb8b0b68a106e6ac86b4e8194035dc5e4726d64 xsa214.patch
-5be4ff661dd22890b0120f86beee3ec809e2a29f833db8c48bd70ce98e9691ee xsa215.patch
+28c7df7edabb91fb2f1fa3fc7d6906bfae75a6e701f1cd335baafaae3e087696 xsa226-4.6.patch
+162b991b27b86f210089526a01cae715563d3a069c92f42538b423bba7709fcc xsa227-4.6.patch
+5a7416f15ac9cd7cace354b6102ff58199fe0581f65a36a36869650c71784e48 xsa228-4.8.patch
+77a73f1c32d083e315ef0b1bbb119cb8840ceb5ada790cad76cbfb9116f725cc xsa230.patch
ce29b56a0480f4835b37835b351e704d204bb0ccd22325f487127aa2776cc2cf xsa231-4.7.patch
5068a78293daa58557c30c95141b775becfb650de6a5eda0d82a4a321ced551c xsa232.patch
f721cc49ba692b2f36299b631451f51d7340b8b4732f74c98f01cb7a80d8662b xsa233.patch
@@ -460,7 +363,7 @@ d13719093a2c3824525f36ac91ac3c9bd1154e5ba0974e5441e4a2ab5e883521 xenconsoled.in
0da87a4b9094f934e3de937e8ef8d3afc752e76793aa3d730182d0241e118b19 xen-consoles.logrotate
4cfcddcade5d055422ab4543e8caa6e5c5eee7625c41880a9000b7a87c7c424e xenqemu.confd
c92bbb1166edd61141fdf678116974209c4422daf373cdd5bc438aa4adb25b8d xenqemu.initd"
-sha512sums="187a860b40c05139f22b8498a5fae1db173c3110d957147af29a56cb83b7111c9dc4946d65f9dffc847001fc01c5e9bf51886eaa1194bb9cfd0b6dbcd43a2c5c xen-4.6.3.tar.gz
+sha512sums="4683fe6c44dce3a6f9ff410d026f39094ccd6937ea0052f08ef5e066172ee840548322654cc15d7ded9f5bce10d43b5e46f6a04f16ef3c03ea3ba2cc2f7724ec xen-4.6.6.tar.gz
2e0b0fd23e6f10742a5517981e5171c6e88b0a93c83da701b296f5c0861d72c19782daab589a7eac3f9032152a0fc7eff7f5362db8fccc4859564a9aa82329cf gmp-4.3.2.tar.bz2
c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a3628bd00ba4d14a54742bc04848110eb3ae8ca25dbfbaabadb grub-0.97.tar.gz
1465b58279af1647f909450e394fe002ca165f0ff4a0254bfa9fe0e64316f50facdde2729d79a4e632565b4500cf4d6c74192ac0dd3bc9fe09129bbd67ba089d lwip-1.3.0.tar.gz
@@ -470,45 +373,11 @@ c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a36
4928b5b82f57645be9408362706ff2c4d9baa635b21b0d41b1c82930e8c60a759b1ea4fa74d7e6c7cae1b7692d006aa5cb72df0c3b88bf049779aa2b566f9d35 tpm_emulator-0.7.4.tar.gz
021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e zlib-1.2.3.tar.gz
c5cb1cdff40d2d71fd3e692a9d0efadf2aa17290daf5195391a1c81ddd9dfc913a8e44d5be2b12be85b2a5565ea31631c99c7053564f2fb2225c80ea0bb0e4a4 ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-ea94b7ad08dd19c205af584786d92e463a36b522b4a0fb62bd86ea828d867f58821b7ed4e42f35544cf18a6e9aac311ea6d8d085e802ee819a563fe6f6598e47 xsa182-4.6.patch
-f3495976ab219cfd376bae3ad409b452169df11ebcd36b106212db1b1fc8db8c50e721a5d1e23efbc25146946f922556014eda652517ee95efbfb3b482327e99 xsa183-4.6.patch
-14c07d077a9d60a03859ca1b92347517c93faf88db06f8cb0515e486a3919afa8401203161ff671dda8fbdb64e6ca5e86120f1b8f65e6bfaa63a8c6a33211bad xsa184-qemut-master.patch
862e00d9cd126f8323f9c9706bf6ce7896d97e68e647416c699d9f2e01b88083a5fea346b13403577311384946912123f64bf5a568f1a6f92077d28923df54c6 xsa184-qemuu-master.patch
-6b774cfef049d457d89149a973b5a5af674b995726c88ce09278f4a64cb94f5b3c2c2380a6273475a13eb9cdd972f5429f393247ecca6463f6068d606ea74886 xsa185.patch
-bf899dde20cee730598b90e0a07941155b20e0ea17b9a3017a53bd0e1495fb6e5dc251934e01d02937b56ad65faf3accecf695b4fd7f6dcc0bae91290bd87b19 xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
-6583c843855d300b3d40321d909b64ab0df6b03da62b3400cb7e58a9249077112e5951e14449880cfc8d593dabd9afcffc15ff77555f745b478f7af939b3219e xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
-d85bc3c56805ff5b3df6b85b2b34ff97d15fe254fc5a873b5c43c2c15564eea42753723a6296292a543e7b7dc83ad71f0fafe01fa6a6ebf82fa0a7268fc67486 xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
-63f30d4a6842fc516d33334b25806e10a89228fec32315df27c9c271303d02619be4a88e638e41920ad808215280c3fce697574d05c5fb3f184844069383a201 xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
-ba155f6ee81718ecaa2289998c8204e2f6ba9a6d70b042a3eaa9373d8dcd030091feca829b51914f0071d6672fad5a3f9c253da579780aa429b51c24c0bf228c xsa190-4.6-CVE-2016-7777.patch
-502f50bece05d52b127c497eda0236a9011e56885fb0b5fac74ab449c2eac94d0f2cf64da16808c25f8a3091aef0a9586ad5c19f6b98a8c459908149d629b321 xsa191-4.6-CVE-2016-9386.patch
-13670f640f36d216b276dc4fcf73745cb81e54381afbee7452d8e058166a468dc4467dbdeb3e22154f66d5ef70b796f0a0f0f0080dcb4c3587d7f15fe7b9abc6 xsa192-CVE-2016-9382.patch
-6a20d6b192849af32e7db59f61d7686cbd4e0542741f3b6ddef2133f102212ba3ebc93901e5d74cdd54747e188a4eb8060b8843c10878e3bc9c567af678a6bd1 xsa193-4.7-CVE-2016-9385.patch
-2b32a360c13590f24de8ebb1cd18eb17eada444034a394739c21306be708ba5924ea1448e0d120c0f61f9472bce45e80439e3fd5779f4be72d367ce5c55b6ec0 xsa195-CVE-2016-9383.patch
-d76d457343a1a2cd08d6a3fcaf063569638862d5491c5eb3100bc3902d3f4845c5a9a6ceed16e2be405ecfc924d786e7a0e2407c002c59da344a10e8e183e758 xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
-3f47f78f83f01af57c51eee5c6a51466c59d23ddcbbf0c107539166840faed756af113b139c73aea74534ebceb304c0b6b69a394e47c3a9a5499342cce6d5cf8 xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
-207d73265b27dd1a3ba31b8cf3b940955f01bd4c3c61b9272a08d280d6289fda85190cce861d9c95dad085275e82899bf5e790b3856c20215cec06cfa16bcf89 xsa197-4.6-qemuu-CVE-2016-9381.patch
-3edd70e047df0c452be5c0b0f3d03d041810728c9950a4f3e87f12ed27f152d56dee259794423776f42980a396039609c5563ff7e90109c7c237efab049e8e39 xsa197-qemut-CVE-2016-9381.patch
-b61429fbf4d1677a8dab2710ab21335f18b3f998f2e5e19e45a4727f71b9671b3d1bd709bef3594cbaa5a47f339c3b8a5cccf11dd361b993aa76d242b825549c xsa198-CVE-2016-9379-CVE-2016-9380.patch
-183f3d389b5cc6bac5ec80072d08302817f24a324f5674011b7456ef9f8be0d87a109aff6bedf1867540e7b8610779e4b6b17cc35d27485bfb1c2cf0667dada3 xsa200-4.6.patch
-67006c1ac5d0b01eb65b5a9b6583ef31c0df0cdb6331af983d972d9b0c4bc21416484d88445edb8ee8470becdc11bc88fad4a617aac40ae26610eb2bee40bd01 xsa201-1.patch
-afed1ed3c5b4dd3a1d2c1c0fe824cdeb58efdc40fdaf5ce439deb2feef63141168114ea362fc5c683eb0494bb6bd3c76773b099495af21550ae3a1e5cb4e924d xsa201-2.patch
-ad0f4217ef8218dac6997385690981e7a88d05b735e04779f582ad4a0307d8e7804c015971403133fe1d3334c628da784c696161768b275ed3ab64d6140293dc xsa201-3-4.7.patch
-1761ca422fe9e3caee3442b43b84da49721a01ed8417f653c568695b08718c40be1493cc7a0a6145c7ce195c7fb0c753b190fe2f1782d5242e1e304c18005610 xsa201-4.patch
-dee7a595324ea5de3754c9aad2422fc2021bcb53999e344dbe6e4edfd4772a5ed20e8ebfb40750b81287a2a022037d49cbe4f0f7ba481ae0ac79a4249ef630bf xsa202-4.6.patch
-b86ef48db23dacb51fbbdd55041bf08fac8aa0db76a272bb2f9d9be7195cd9a359a30fbbb61e040c66f23358f12ae102a92a30296fb18e4feb1023b58ffad4ff xsa203-4.7.patch
-a2a091cd51ed54f5b5ba4131efc1c9cc0a69a647cea46415f73c29e5764efb00025e2e65bd5d24cf26f903263fce150b2b1c52ca5d61fd81dea7efe16abf57be xsa204-4.7.patch
-89848dcdfaebf462765b2a32c9c57d5404930721ff92f7cb05c221a99be2b82fb23d31f91f52fbf32874a69065a2e8ad921460a3655f4b03cf827a8203137fac xsa207.patch
-ef8422f79c1e791f19f6346ecf0de1d7e9735f9d623b6535a10a44b045ca4379b1df5701193624e729be4ca26746407dee42e6edb9498f004a2819385b82bde1 xsa208-qemut.patch
-8b1a507abc7b0e51d870e845e9f27d7ad19b514a93f57942fee1ee0aabd8118311051ae00a556d4399583f8d628452e4b385ef142306ecadf0518568f0cd8d7f xsa208-qemuu-4.7.patch
-5da7ccb38726634251905fed692ee8c9bbe480c33b0e172651fc7316ef84fdfac5d660ba309944800e3344f0260efae32444f3cf9ec4f8dfc3f848cdb8626d20 xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
-156fec680ab0b0652cea8409e0f86110c796d5b166466bb00743d35cd2289a91bab1192a73f77f1fa33be615743cf3dff7c3c848cc0c93ae35843e0e52fa3405 xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
-cfc0178fd1d22b99d7debd94d5271967d8daeb7f132e8853b90e0e5f1793635939beba5b0ed6984b635a4c44ef2c02df7bedf7a98abf969e307427d06d2e4412 xsa209-qemut.patch
-42d3350eda9dafeb52748d5ba5f3edf8dea106a1fd2bce8d00bdee4ca1f49be1fd5d7b0bec427e147b856efd91517ce5f069796a1172504317986c34d652180d xsa211-qemut.patch
-a21ae520900f31b77a50cb9956499d884d93802962e0f10503c61b8962ad76a38655a17bc9ef03057b5c23d4f4c5b6a951fd3ad6aa5bbd5ad7e939b29706b7c6 xsa211-qemuu-4.6.patch
-d012556c6b439629c5e4284a0de2f5ae70cda3db4f6f42373b8719509fec3bb0bb667a50484fd1e6c1129dcd2bff550a3eb9ead0f676fb626e6263ac98023e06 xsa212.patch
-b3788dd469157582e4efd2c47fd162e14bb32608817e9a7118310e950f9783eb7fa2aac976770ca01c902c30642e9eeeee1f3ebb60237be5fb221f2a6dfedcfd xsa213-4.6.patch
-ea12702e97b9417ea6c4120dbc7cf9c5e2b89f82b41cfd389069d3238891749474a5d3925d2dc571a7cc2aaf5e88af03ccc9af60046eaa39425b5af05f62fba0 xsa214.patch
-3e6a2589cc6ff4b8f15ae1aaac5d71b601bfb88e88bbc0b28047a3afd62a10a1bf5cd13bcb919fec687f155c4cd3fe50e50868601896fa34dde65e6d7a3b6e2b xsa215.patch
+fd1f962f60b9fc971915588830cdad99b97fc07e7fcf9c4beb2df22f464468c7df811f4864046b41cffa0c22e0b20d9eb8fce085eb55c50a340ef4ce53e0586e xsa226-4.6.patch
+7fda23056913bad2a4be5ff79ede719eebd9d8cd4c0d7a1ef4f2c04f050d0d3e6d4074f035a75263dcecd7ae5b49a756245acdd06954b2154e7321af15082f9f xsa227-4.6.patch
+e7c6f248979e23a681aad07357baace71bee56d64c0897e7b49d4c2aaff6e4784a0e649d407a388662ed57c157bd4024cca7155e9fba2a494539d51345bdba90 xsa228-4.8.patch
+df174a1675f74b73e78bc3cb1c9f16536199dfd1922c0cc545a807e92bc24941a816891838258e118f477109548487251a7eaccb2d1dd9b6994c8c76fc5b058f xsa230.patch
c1c05c2ec68486a3721ae9c305a4f7a01a1c38a62f468ba97be22ee583b5690b92fa1cb3c8a4ea657429483d844ee8dd66f96d6f602cabeaeb50db4a459317b4 xsa231-4.7.patch
fb742225a4f3dbf2a574c4a6e3ef61a5da0c91aaeed77a2247023bdefcd4e0b6c08f1c9ffb42eaac3d38739c401443c3cf7aebb507b1d779c415b6cbffabbc10 xsa232.patch
a322ac6c5ac2f858a59096108032fd42974eaaeeebd8f4966119149665f32bed281e333e743136e79add2e6f3844d88b6a3e4d5a685c2808702fd3a9e6396cd4 xsa233.patch
diff --git a/main/xen/xsa182-4.6.patch b/main/xen/xsa182-4.6.patch
deleted file mode 100644
index be2047d6881..00000000000
--- a/main/xen/xsa182-4.6.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From f48a75b0c10ac79b287ca2b580ecb9ea2f696607 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Mon, 11 Jul 2016 14:32:03 +0100
-Subject: [PATCH] x86/pv: Remove unsafe bits from the mod_l?_entry() fastpath
-
-All changes in writeability and cacheability must go through full
-re-validation.
-
-Rework the logic as a whitelist, to make it clearer to follow.
-
-This is XSA-182
-
-Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
----
- xen/arch/x86/mm.c | 28 ++++++++++++++++------------
- xen/include/asm-x86/page.h | 1 +
- 2 files changed, 17 insertions(+), 12 deletions(-)
-
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index daf02ab..8dd22b8 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -1780,6 +1780,14 @@ static inline int update_intpte(intpte_t *p,
- _t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \
- (_m), (_v), (_ad))
-
-+/*
-+ * PTE flags that a guest may change without re-validating the PTE.
-+ * All other bits affect translation, caching, or Xen's safety.
-+ */
-+#define FASTPATH_FLAG_WHITELIST \
-+ (_PAGE_NX_BIT | _PAGE_AVAIL_HIGH | _PAGE_AVAIL | _PAGE_GLOBAL | \
-+ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER)
-+
- /* Update the L1 entry at pl1e to new value nl1e. */
- static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
- unsigned long gl1mfn, int preserve_ad,
-@@ -1820,9 +1828,8 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
- return -EINVAL;
- }
-
-- /* Fast path for identical mapping, r/w, presence, and cachability. */
-- if ( !l1e_has_changed(ol1e, nl1e,
-- PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
-+ /* Fast path for sufficiently-similar mappings. */
-+ if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) )
- {
- adjust_guest_l1e(nl1e, pt_dom);
- if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
-@@ -1904,11 +1911,8 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
- return -EINVAL;
- }
-
-- /* Fast path for identical mapping and presence. */
-- if ( !l2e_has_changed(ol2e, nl2e,
-- unlikely(opt_allow_superpage)
-- ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
-- : _PAGE_PRESENT) )
-+ /* Fast path for sufficiently-similar mappings. */
-+ if ( !l2e_has_changed(ol2e, nl2e, ~FASTPATH_FLAG_WHITELIST) )
- {
- adjust_guest_l2e(nl2e, d);
- if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
-@@ -1973,8 +1977,8 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
- return -EINVAL;
- }
-
-- /* Fast path for identical mapping and presence. */
-- if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) )
-+ /* Fast path for sufficiently-similar mappings. */
-+ if ( !l3e_has_changed(ol3e, nl3e, ~FASTPATH_FLAG_WHITELIST) )
- {
- adjust_guest_l3e(nl3e, d);
- rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad);
-@@ -2037,8 +2041,8 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
- return -EINVAL;
- }
-
-- /* Fast path for identical mapping and presence. */
-- if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) )
-+ /* Fast path for sufficiently-similar mappings. */
-+ if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
- {
- adjust_guest_l4e(nl4e, d);
- rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
-diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
-index 66b611c..1a59ed8 100644
---- a/xen/include/asm-x86/page.h
-+++ b/xen/include/asm-x86/page.h
-@@ -311,6 +311,7 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
- #define _PAGE_AVAIL2 _AC(0x800,U)
- #define _PAGE_AVAIL _AC(0xE00,U)
- #define _PAGE_PSE_PAT _AC(0x1000,U)
-+#define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12)
- #define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
- /* non-architectural flags */
- #define _PAGE_PAGED 0x2000U
---
-2.1.4
-
diff --git a/main/xen/xsa183-4.6.patch b/main/xen/xsa183-4.6.patch
deleted file mode 100644
index 84d70077c89..00000000000
--- a/main/xen/xsa183-4.6.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 777ebe30e81ab284f9b78392875fe884a593df35 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Wed, 15 Jun 2016 18:32:14 +0100
-Subject: [PATCH] x86/entry: Avoid SMAP violation in
- compat_create_bounce_frame()
-
-A 32bit guest kernel might be running on user mappings.
-compat_create_bounce_frame() must whitelist its guest accesses to avoid
-risking a SMAP violation.
-
-For both variants of create_bounce_frame(), re-blacklist user accesses if
-execution exits via an exception table redirection.
-
-This is XSA-183 / CVE-2016-6259
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
-v2:
- * Include CLAC on the exit paths from compat_create_bounce_frame which occur
- from faults attempting to load %fs
- * Reposition ASM_STAC to avoid breaking the macro-op fusion of test/jz
----
- xen/arch/x86/x86_64/compat/entry.S | 3 +++
- xen/arch/x86/x86_64/entry.S | 2 ++
- 2 files changed, 5 insertions(+)
-
-diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
-index 0e3db7c..1eaf4bb 100644
---- a/xen/arch/x86/x86_64/compat/entry.S
-+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -350,6 +350,7 @@ ENTRY(compat_int80_direct_trap)
- compat_create_bounce_frame:
- ASSERT_INTERRUPTS_ENABLED
- mov %fs,%edi
-+ ASM_STAC
- testb $2,UREGS_cs+8(%rsp)
- jz 1f
- /* Push new frame at registered guest-OS stack base. */
-@@ -403,6 +404,7 @@ UNLIKELY_START(nz, compat_bounce_failsafe)
- movl %ds,%eax
- .Lft12: movl %eax,%fs:0*4(%rsi) # DS
- UNLIKELY_END(compat_bounce_failsafe)
-+ ASM_CLAC
- /* Rewrite our stack frame and return to guest-OS mode. */
- /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
- andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
-@@ -448,6 +450,7 @@ compat_crash_page_fault_4:
- addl $4,%esi
- compat_crash_page_fault:
- .Lft14: mov %edi,%fs
-+ ASM_CLAC
- movl %esi,%edi
- call show_page_walk
- jmp dom_crash_sync_extable
-diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
-index 6e27508..0c2e63a 100644
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -462,9 +462,11 @@ domain_crash_page_fault_16:
- domain_crash_page_fault_8:
- addq $8,%rsi
- domain_crash_page_fault:
-+ ASM_CLAC
- movq %rsi,%rdi
- call show_page_walk
- ENTRY(dom_crash_sync_extable)
-+ ASM_CLAC
- # Get out of the guest-save area of the stack.
- GET_STACK_BASE(%rax)
- leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
---
-2.1.4
-
diff --git a/main/xen/xsa185.patch b/main/xen/xsa185.patch
deleted file mode 100644
index a4c133ee194..00000000000
--- a/main/xen/xsa185.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 30aba4992b18245c436f16df7326a16c01a51570 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Mon, 8 Aug 2016 10:58:12 +0100
-Subject: x86/32on64: don't allow recursive page tables from L3
-
-L3 entries are special in PAE mode, and hence can't reasonably be used
-for setting up recursive (and hence linear) page table mappings. Since
-abuse is possible when the guest in fact gets run on 4-level page
-tables, this needs to be excluded explicitly.
-
-This is XSA-185.
-
-Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com>
-Reported-by: 栾尚聪(好风) <shangcong.lsc@alibaba-inc.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
----
- xen/arch/x86/mm.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index 109b8be..69b8b8d 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -1122,7 +1122,9 @@ get_page_from_l3e(
-
- rc = get_page_and_type_from_pagenr(
- l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1);
-- if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) )
-+ if ( unlikely(rc == -EINVAL) &&
-+ !is_pv_32bit_domain(d) &&
-+ get_l3_linear_pagetable(l3e, pfn, d) )
- rc = 0;
-
- return rc;
---
-2.1.4
-
diff --git a/main/xen/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch b/main/xen/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
deleted file mode 100644
index b2574970856..00000000000
--- a/main/xen/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From e938be013ba73ff08fa4f1d8670501aacefde7fb Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Fri, 22 Jul 2016 16:02:54 +0000
-Subject: [PATCH 1/2] x86/emulate: Correct boundary interactions of emulated
- instructions
-
-This reverts most of c/s 0640ffb6 "x86emul: fix rIP handling".
-
-Experimentally, in long mode processors will execute an instruction stream
-which crosses the 64bit -1 -> 0 virtual boundary, whether the instruction
-boundary is aligned on the virtual boundary, or is misaligned.
-
-In compatibility mode, Intel processors will execute an instruction stream
-which crosses the 32bit -1 -> 0 virtual boundary, while AMD processors raise a
-segmentation fault. Xen's segmentation behaviour matches AMD.
-
-For 16bit code, hardware does not ever truncated %ip. %eip is always used and
-behaves normally as a 32bit register, including in 16bit protected mode
-segments, as well as in Real and Unreal mode.
-
-This is XSA-186
-
-Reported-by: Brian Marcotte <marcotte@panix.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/x86_emulate/x86_emulate.c | 22 ++++------------------
- 1 file changed, 4 insertions(+), 18 deletions(-)
-
-diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
-index d5a56cf..bf3529a 100644
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -1570,10 +1570,6 @@ x86_emulate(
- #endif
- }
-
-- /* Truncate rIP to def_ad_bytes (2 or 4) if necessary. */
-- if ( def_ad_bytes < sizeof(_regs.eip) )
-- _regs.eip &= (1UL << (def_ad_bytes * 8)) - 1;
--
- /* Prefix bytes. */
- for ( ; ; )
- {
-@@ -3906,21 +3902,11 @@ x86_emulate(
-
- /* Commit shadow register state. */
- _regs.eflags &= ~EFLG_RF;
-- switch ( __builtin_expect(def_ad_bytes, sizeof(_regs.eip)) )
-- {
-- uint16_t ip;
-
-- case 2:
-- ip = _regs.eip;
-- _regs.eip = ctxt->regs->eip;
-- *(uint16_t *)&_regs.eip = ip;
-- break;
--#ifdef __x86_64__
-- case 4:
-- _regs.rip = _regs._eip;
-- break;
--#endif
-- }
-+ /* Zero the upper 32 bits of %rip if not in long mode. */
-+ if ( def_ad_bytes < sizeof(_regs.eip) )
-+ _regs.eip = (uint32_t)_regs.eip;
-+
- *ctxt->regs = _regs;
-
- done:
---
-2.1.4
-
diff --git a/main/xen/xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch b/main/xen/xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
deleted file mode 100644
index 07c30a2e532..00000000000
--- a/main/xen/xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: hvm/fep: Allow testing of instructions crossing the -1 -> 0 virtual boundary
-
-The Force Emulation Prefix is named to follow its PV counterpart for cpuid or
-rdtsc, but isn't really an instruction prefix. It behaves as a break-out into
-Xen, with the purpose of emulating the next instruction in the current state.
-
-It is important to be able to test legal situations which occur in real
-hardware, including instruction which cross certain boundaries, and
-instructions starting at 0.
-
-Reported-by: Brian Marcotte <marcotte@panix.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/hvm/svm/svm.c
-+++ b/xen/arch/x86/hvm/svm/svm.c
-@@ -2139,6 +2139,10 @@ static void svm_vmexit_ud_intercept(stru
- {
- regs->eip += sizeof(sig);
- regs->eflags &= ~X86_EFLAGS_RF;
-+
-+ /* Zero the upper 32 bits of %rip if not in long mode. */
-+ if ( svm_guest_x86_mode(current) != 8 )
-+ regs->eip = regs->_eip;
- }
- }
-
---- a/xen/arch/x86/hvm/vmx/vmx.c
-+++ b/xen/arch/x86/hvm/vmx/vmx.c
-@@ -2757,6 +2757,10 @@ static void vmx_vmexit_ud_intercept(stru
- {
- regs->eip += sizeof(sig);
- regs->eflags &= ~X86_EFLAGS_RF;
-+
-+ /* Zero the upper 32 bits of %rip if not in long mode. */
-+ if ( vmx_guest_x86_mode(current) != 8 )
-+ regs->eip = regs->_eip;
- }
- }
-
diff --git a/main/xen/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch b/main/xen/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
deleted file mode 100644
index e8cd1e778f7..00000000000
--- a/main/xen/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
+++ /dev/null
@@ -1,142 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
-
-HVM HAP codepaths have space for all segment registers in the seg_reg[]
-cache (with x86_seg_none still risking an array overrun), while the shadow
-codepaths only have space for the user segments.
-
-Range check the input segment of *_get_seg_reg() against the size of the array
-used to cache the results, to avoid overruns in the case that the callers
-don't filter their input suitably.
-
-Subsume the is_x86_user_segment(seg) checks from the shadow code, which were
-an incomplete attempt at range checking, and are now superceeded. Make
-hvm_get_seg_reg() static, as it is not used outside of shadow/common.c
-
-No functional change, but far easier to reason that no overflow is possible.
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Acked-by: Tim Deegan <tim@xen.org>
-Acked-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/hvm/emulate.c
-+++ b/xen/arch/x86/hvm/emulate.c
-@@ -526,6 +526,8 @@ static int hvmemul_virtual_to_linear(
- ? 1 : 4096);
-
- reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
-+ if ( IS_ERR(reg) )
-+ return -PTR_ERR(reg);
-
- if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
- {
-@@ -1360,6 +1362,10 @@ static int hvmemul_read_segment(
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
-+
-+ if ( IS_ERR(sreg) )
-+ return -PTR_ERR(sreg);
-+
- memcpy(reg, sreg, sizeof(struct segment_register));
- return X86EMUL_OKAY;
- }
-@@ -1373,6 +1379,9 @@ static int hvmemul_write_segment(
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
-
-+ if ( IS_ERR(sreg) )
-+ return -PTR_ERR(sreg);
-+
- memcpy(sreg, reg, sizeof(struct segment_register));
- __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
-
-@@ -1911,10 +1920,17 @@ void hvm_emulate_writeback(
- }
- }
-
-+/*
-+ * Callers which pass a known in-range x86_segment can rely on the return
-+ * pointer being valid. Other callers must explicitly check for errors.
-+ */
- struct segment_register *hvmemul_get_seg_reg(
- enum x86_segment seg,
- struct hvm_emulate_ctxt *hvmemul_ctxt)
- {
-+ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
-+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
-+
- if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )
- hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
- return &hvmemul_ctxt->seg_reg[seg];
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -125,10 +125,19 @@ __initcall(shadow_audit_key_init);
- /* x86 emulator support for the shadow code
- */
-
-+/*
-+ * Callers which pass a known in-range x86_segment can rely on the return
-+ * pointer being valid. Other callers must explicitly check for errors.
-+ */
- struct segment_register *hvm_get_seg_reg(
- enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
- {
-- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
-+ struct segment_register *seg_reg;
-+
-+ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) )
-+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
-+
-+ seg_reg = &sh_ctxt->seg_reg[seg];
- if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) )
- hvm_get_segment_register(current, seg, seg_reg);
- return seg_reg;
-@@ -145,14 +154,9 @@ static int hvm_translate_linear_addr(
- struct segment_register *reg;
- int okay;
-
-- /*
-- * Can arrive here with non-user segments. However, no such cirucmstance
-- * is part of a legitimate pagetable update, so fail the emulation.
-- */
-- if ( !is_x86_user_segment(seg) )
-- return X86EMUL_UNHANDLEABLE;
--
- reg = hvm_get_seg_reg(seg, sh_ctxt);
-+ if ( IS_ERR(reg) )
-+ return -PTR_ERR(reg);
-
- okay = hvm_virtual_to_linear_addr(
- seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
-@@ -254,9 +258,6 @@ hvm_emulate_write(enum x86_segment seg,
- unsigned long addr;
- int rc;
-
-- if ( !is_x86_user_segment(seg) )
-- return X86EMUL_UNHANDLEABLE;
--
- /* How many emulations could we save if we unshadowed on stack writes? */
- if ( seg == x86_seg_ss )
- perfc_incr(shadow_fault_emulate_stack);
-@@ -284,9 +285,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg
- unsigned long addr, old[2], new[2];
- int rc;
-
-- if ( !is_x86_user_segment(seg) )
-- return X86EMUL_UNHANDLEABLE;
--
- rc = hvm_translate_linear_addr(
- seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
- if ( rc )
---- a/xen/include/asm-x86/hvm/emulate.h
-+++ b/xen/include/asm-x86/hvm/emulate.h
-@@ -13,6 +13,7 @@
- #define __ASM_X86_HVM_EMULATE_H__
-
- #include <xen/config.h>
-+#include <xen/err.h>
- #include <asm/hvm/hvm.h>
- #include <asm/x86_emulate.h>
-
diff --git a/main/xen/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch b/main/xen/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
deleted file mode 100644
index bc995960839..00000000000
--- a/main/xen/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/shadow: Avoid overflowing sh_ctxt->seg_reg[]
-
-hvm_get_seg_reg() does not perform a range check on its input segment, calls
-hvm_get_segment_register() and writes straight into sh_ctxt->seg_reg[].
-
-x86_seg_none is outside the bounds of sh_ctxt->seg_reg[], and will hit a BUG()
-in {vmx,svm}_get_segment_register().
-
-HVM guests running with shadow paging can end up performing a virtual to
-linear translation with x86_seg_none. This is used for addresses which are
-already linear. However, none of this is a legitimate pagetable update, so
-fail the emulation in such a case.
-
-This is XSA-187
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -140,9 +140,18 @@ static int hvm_translate_linear_addr(
- struct sh_emulate_ctxt *sh_ctxt,
- unsigned long *paddr)
- {
-- struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt);
-+ struct segment_register *reg;
- int okay;
-
-+ /*
-+ * Can arrive here with non-user segments. However, no such cirucmstance
-+ * is part of a legitimate pagetable update, so fail the emulation.
-+ */
-+ if ( !is_x86_user_segment(seg) )
-+ return X86EMUL_UNHANDLEABLE;
-+
-+ reg = hvm_get_seg_reg(seg, sh_ctxt);
-+
- okay = hvm_virtual_to_linear_addr(
- seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
-
diff --git a/main/xen/xsa190-4.6-CVE-2016-7777.patch b/main/xen/xsa190-4.6-CVE-2016-7777.patch
deleted file mode 100644
index b950ae95066..00000000000
--- a/main/xen/xsa190-4.6-CVE-2016-7777.patch
+++ /dev/null
@@ -1,163 +0,0 @@
-x86emul: honor guest CR0.TS and CR0.EM
-
-We must not emulate any instructions accessing respective registers
-when either of these flags is set in the guest view of the register, or
-else we may do so on data not belonging to the guest's current task.
-
-Being architecturally required behavior, the logic gets placed in the
-instruction emulator instead of hvmemul_get_fpu(). It should be noted,
-though, that hvmemul_get_fpu() being the only current handler for the
-get_fpu() callback, we don't have an active problem with CR4: Both
-CR4.OSFXSR and CR4.OSXSAVE get handled as necessary by that function.
-
-This is XSA-190.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/tools/tests/x86_emulator/test_x86_emulator.c
-+++ b/tools/tests/x86_emulator/test_x86_emulator.c
-@@ -129,6 +129,22 @@ static inline uint64_t xgetbv(uint32_t x
- (ebx & (1U << 5)) != 0; \
- })
-
-+static int read_cr(
-+ unsigned int reg,
-+ unsigned long *val,
-+ struct x86_emulate_ctxt *ctxt)
-+{
-+ /* Fake just enough state for the emulator's _get_fpu() to be happy. */
-+ switch ( reg )
-+ {
-+ case 0:
-+ *val = 0x00000001; /* PE */
-+ return X86EMUL_OKAY;
-+ }
-+
-+ return X86EMUL_UNHANDLEABLE;
-+}
-+
- int get_fpu(
- void (*exception_callback)(void *, struct cpu_user_regs *),
- void *exception_callback_arg,
-@@ -160,6 +176,7 @@ static struct x86_emulate_ops emulops =
- .write = write,
- .cmpxchg = cmpxchg,
- .cpuid = cpuid,
-+ .read_cr = read_cr,
- .get_fpu = get_fpu,
- };
-
---- a/xen/arch/x86/hvm/emulate.c
-+++ b/xen/arch/x86/hvm/emulate.c
-@@ -1557,6 +1557,7 @@ static int hvmemul_get_fpu(
- switch ( type )
- {
- case X86EMUL_FPU_fpu:
-+ case X86EMUL_FPU_wait:
- break;
- case X86EMUL_FPU_mmx:
- if ( !cpu_has_mmx )
-@@ -1564,7 +1565,6 @@ static int hvmemul_get_fpu(
- break;
- case X86EMUL_FPU_xmm:
- if ( !cpu_has_xmm ||
-- (curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_EM) ||
- !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSFXSR) )
- return X86EMUL_UNHANDLEABLE;
- break;
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -366,6 +366,9 @@ typedef union {
-
- /* Control register flags. */
- #define CR0_PE (1<<0)
-+#define CR0_MP (1<<1)
-+#define CR0_EM (1<<2)
-+#define CR0_TS (1<<3)
- #define CR4_TSD (1<<2)
-
- /* EFLAGS bit definitions. */
-@@ -393,6 +396,7 @@ typedef union {
- #define EXC_OF 4
- #define EXC_BR 5
- #define EXC_UD 6
-+#define EXC_NM 7
- #define EXC_TS 10
- #define EXC_NP 11
- #define EXC_SS 12
-@@ -674,10 +678,45 @@ static void fpu_handle_exception(void *_
- regs->eip += fic->insn_bytes;
- }
-
-+static int _get_fpu(
-+ enum x86_emulate_fpu_type type,
-+ struct fpu_insn_ctxt *fic,
-+ struct x86_emulate_ctxt *ctxt,
-+ const struct x86_emulate_ops *ops)
-+{
-+ int rc;
-+
-+ fic->exn_raised = 0;
-+
-+ fail_if(!ops->get_fpu);
-+ rc = ops->get_fpu(fpu_handle_exception, fic, type, ctxt);
-+
-+ if ( rc == X86EMUL_OKAY )
-+ {
-+ unsigned long cr0;
-+
-+ fail_if(!ops->read_cr);
-+ rc = ops->read_cr(0, &cr0, ctxt);
-+ if ( rc != X86EMUL_OKAY )
-+ return rc;
-+ if ( cr0 & CR0_EM )
-+ {
-+ generate_exception_if(type == X86EMUL_FPU_fpu, EXC_NM, -1);
-+ generate_exception_if(type == X86EMUL_FPU_mmx, EXC_UD, -1);
-+ generate_exception_if(type == X86EMUL_FPU_xmm, EXC_UD, -1);
-+ }
-+ generate_exception_if((cr0 & CR0_TS) &&
-+ (type != X86EMUL_FPU_wait || (cr0 & CR0_MP)),
-+ EXC_NM, -1);
-+ }
-+
-+ done:
-+ return rc;
-+}
-+
- #define get_fpu(_type, _fic) \
--do{ (_fic)->exn_raised = 0; \
-- fail_if(ops->get_fpu == NULL); \
-- rc = ops->get_fpu(fpu_handle_exception, _fic, _type, ctxt); \
-+do { \
-+ rc = _get_fpu(_type, _fic, ctxt, ops); \
- if ( rc ) goto done; \
- } while (0)
- #define _put_fpu() \
-@@ -2508,8 +2547,14 @@ x86_emulate(
- }
-
- case 0x9b: /* wait/fwait */
-- emulate_fpu_insn("fwait");
-+ {
-+ struct fpu_insn_ctxt fic = { .insn_bytes = 1 };
-+
-+ get_fpu(X86EMUL_FPU_wait, &fic);
-+ asm volatile ( "fwait" ::: "memory" );
-+ put_fpu(&fic);
- break;
-+ }
-
- case 0x9c: /* pushf */
- src.val = _regs.eflags;
---- a/xen/arch/x86/x86_emulate/x86_emulate.h
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
-@@ -115,6 +115,7 @@ struct __packed segment_register {
- /* FPU sub-types which may be requested via ->get_fpu(). */
- enum x86_emulate_fpu_type {
- X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
-+ X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
- X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
- X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
- X86EMUL_FPU_ymm /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
diff --git a/main/xen/xsa191-4.6-CVE-2016-9386.patch b/main/xen/xsa191-4.6-CVE-2016-9386.patch
deleted file mode 100644
index d661d0c7fb9..00000000000
--- a/main/xen/xsa191-4.6-CVE-2016-9386.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/hvm: Fix the handling of non-present segments
-
-In 32bit, the data segments may be NULL to indicate that the segment is
-ineligible for use. In both 32bit and 64bit, the LDT selector may be NULL to
-indicate that the entire LDT is ineligible for use. However, nothing in Xen
-actually checks for this condition when performing other segmentation
-checks. (Note however that limit and writeability checks are correctly
-performed).
-
-Neither Intel nor AMD specify the exact behaviour of loading a NULL segment.
-Experimentally, AMD zeroes all attributes but leaves the base and limit
-unmodified. Intel zeroes the base, sets the limit to 0xfffffff and resets the
-attributes to just .G and .D/B.
-
-The use of the segment information in the VMCB/VMCS is equivalent to a native
-pipeline interacting with the segment cache. The present bit can therefore
-have a subtly different meaning, and it is now cooked to uniformly indicate
-whether the segment is usable or not.
-
-GDTR and IDTR don't have access rights like the other segments, but for
-consistency, they are treated as being present so no special casing is needed
-elsewhere in the segmentation logic.
-
-AMD hardware does not consider the present bit for %cs and %tr, and will
-function as if they were present. They are therefore unconditionally set to
-present when reading information from the VMCB, to maintain the new meaning of
-usability.
-
-Intel hardware has a separate unusable bit in the VMCS segment attributes.
-This bit is inverted and stored in the present field, so the hvm code can work
-with architecturally-common state.
-
-This is XSA-191.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/hvm/hvm.c
-+++ b/xen/arch/x86/hvm/hvm.c
-@@ -3666,6 +3666,10 @@ int hvm_virtual_to_linear_addr(
- * COMPATIBILITY MODE: Apply segment checks and add base.
- */
-
-+ /* Segment not valid for use (cooked meaning of .p)? */
-+ if ( !reg->attr.fields.p )
-+ return 0;
-+
- switch ( access_type )
- {
- case hvm_access_read:
-@@ -3871,6 +3875,10 @@ static int hvm_load_segment_selector(
- hvm_get_segment_register(
- v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
-
-+ /* Segment not valid for use (cooked meaning of .p)? */
-+ if ( !desctab.attr.fields.p )
-+ goto fail;
-+
- /* Check against descriptor table limit. */
- if ( ((sel & 0xfff8) + 7) > desctab.limit )
- goto fail;
---- a/xen/arch/x86/hvm/svm/svm.c
-+++ b/xen/arch/x86/hvm/svm/svm.c
-@@ -620,6 +620,7 @@ static void svm_get_segment_register(str
- {
- case x86_seg_cs:
- memcpy(reg, &vmcb->cs, sizeof(*reg));
-+ reg->attr.fields.p = 1;
- reg->attr.fields.g = reg->limit > 0xFFFFF;
- break;
- case x86_seg_ds:
-@@ -653,13 +654,16 @@ static void svm_get_segment_register(str
- case x86_seg_tr:
- svm_sync_vmcb(v);
- memcpy(reg, &vmcb->tr, sizeof(*reg));
-+ reg->attr.fields.p = 1;
- reg->attr.fields.type |= 0x2;
- break;
- case x86_seg_gdtr:
- memcpy(reg, &vmcb->gdtr, sizeof(*reg));
-+ reg->attr.bytes = 0x80;
- break;
- case x86_seg_idtr:
- memcpy(reg, &vmcb->idtr, sizeof(*reg));
-+ reg->attr.bytes = 0x80;
- break;
- case x86_seg_ldtr:
- svm_sync_vmcb(v);
---- a/xen/arch/x86/hvm/vmx/vmx.c
-+++ b/xen/arch/x86/hvm/vmx/vmx.c
-@@ -867,10 +867,12 @@ void vmx_get_segment_register(struct vcp
- reg->sel = sel;
- reg->limit = limit;
-
-- reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
-- /* Unusable flag is folded into Present flag. */
-- if ( attr & (1u<<16) )
-- reg->attr.fields.p = 0;
-+ /*
-+ * Fold VT-x representation into Xen's representation. The Present bit is
-+ * unconditionally set to the inverse of unusable.
-+ */
-+ reg->attr.bytes =
-+ (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
-
- /* Adjust for virtual 8086 mode */
- if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr
-@@ -950,11 +952,11 @@ static void vmx_set_segment_register(str
- }
- }
-
-- attr = ((attr & 0xf00) << 4) | (attr & 0xff);
--
-- /* Not-present must mean unusable. */
-- if ( !reg->attr.fields.p )
-- attr |= (1u << 16);
-+ /*
-+ * Unfold Xen representation into VT-x representation. The unusable bit
-+ * is unconditionally set to the inverse of present.
-+ */
-+ attr = (!(attr & (1u << 7)) << 16) | ((attr & 0xf00) << 4) | (attr & 0xff);
-
- /* VMX has strict consistency requirement for flag G. */
- attr |= !!(limit >> 20) << 15;
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -1209,6 +1209,10 @@ protmode_load_seg(
- &desctab, ctxt)) )
- return rc;
-
-+ /* Segment not valid for use (cooked meaning of .p)? */
-+ if ( !desctab.attr.fields.p )
-+ goto raise_exn;
-+
- /* Check against descriptor table limit. */
- if ( ((sel & 0xfff8) + 7) > desctab.limit )
- goto raise_exn;
diff --git a/main/xen/xsa192-CVE-2016-9382.patch b/main/xen/xsa192-CVE-2016-9382.patch
deleted file mode 100644
index b573a132c9f..00000000000
--- a/main/xen/xsa192-CVE-2016-9382.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
-
-Just like TR, LDTR is purely a protected mode facility and hence needs
-to be loaded accordingly. Also move its loading to where it
-architecurally belongs.
-
-This is XSA-192.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/hvm/hvm.c
-+++ b/xen/arch/x86/hvm/hvm.c
-@@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
- }
-
- static int hvm_load_segment_selector(
-- enum x86_segment seg, uint16_t sel)
-+ enum x86_segment seg, uint16_t sel, unsigned int eflags)
- {
- struct segment_register desctab, cs, segr;
- struct desc_struct *pdesc, desc;
- u8 dpl, rpl, cpl;
- bool_t writable;
- int fault_type = TRAP_invalid_tss;
-- struct cpu_user_regs *regs = guest_cpu_user_regs();
- struct vcpu *v = current;
-
-- if ( regs->eflags & X86_EFLAGS_VM )
-+ if ( eflags & X86_EFLAGS_VM )
- {
- segr.sel = sel;
- segr.base = (uint32_t)sel << 4;
-@@ -2986,6 +2985,8 @@ void hvm_task_switch(
- if ( rc != HVMCOPY_okay )
- goto out;
-
-+ if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
-+ goto out;
-
- if ( hvm_set_cr3(tss.cr3, 1) )
- goto out;
-@@ -3008,13 +3009,12 @@ void hvm_task_switch(
- }
-
- exn_raised = 0;
-- if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
-- hvm_load_segment_selector(x86_seg_es, tss.es) ||
-- hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
-- hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
-- hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
-- hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
-- hvm_load_segment_selector(x86_seg_gs, tss.gs) )
-+ if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
-+ hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
-+ hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
-+ hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
-+ hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
-+ hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
- exn_raised = 1;
-
- rc = hvm_copy_to_guest_virt(
diff --git a/main/xen/xsa193-4.7-CVE-2016-9385.patch b/main/xen/xsa193-4.7-CVE-2016-9385.patch
deleted file mode 100644
index c5486efa544..00000000000
--- a/main/xen/xsa193-4.7-CVE-2016-9385.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/PV: writes of %fs and %gs base MSRs require canonical addresses
-
-Commit c42494acb2 ("x86: fix FS/GS base handling when using the
-fsgsbase feature") replaced the use of wrmsr_safe() on these paths
-without recognizing that wr{f,g}sbase() use just wrmsrl() and that the
-WR{F,G}SBASE instructions also raise #GP for non-canonical input.
-
-Similarly arch_set_info_guest() needs to prevent non-canonical
-addresses from getting stored into state later to be loaded by context
-switch code. For consistency also check stack pointers and LDT base.
-DR0..3, otoh, already get properly checked in set_debugreg() (albeit
-we discard the error there).
-
-The SHADOW_GS_BASE check isn't strictly necessary, but I think we
-better avoid trying the WRMSR if we know it's going to fail.
-
-This is XSA-193.
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -890,7 +890,13 @@ int arch_set_info_guest(
- {
- if ( !compat )
- {
-- if ( !is_canonical_address(c.nat->user_regs.eip) ||
-+ if ( !is_canonical_address(c.nat->user_regs.rip) ||
-+ !is_canonical_address(c.nat->user_regs.rsp) ||
-+ !is_canonical_address(c.nat->kernel_sp) ||
-+ (c.nat->ldt_ents && !is_canonical_address(c.nat->ldt_base)) ||
-+ !is_canonical_address(c.nat->fs_base) ||
-+ !is_canonical_address(c.nat->gs_base_kernel) ||
-+ !is_canonical_address(c.nat->gs_base_user) ||
- !is_canonical_address(c.nat->event_callback_eip) ||
- !is_canonical_address(c.nat->syscall_callback_eip) ||
- !is_canonical_address(c.nat->failsafe_callback_eip) )
---- a/xen/arch/x86/traps.c
-+++ b/xen/arch/x86/traps.c
-@@ -2723,19 +2723,22 @@ static int emulate_privileged_op(struct
- switch ( regs->_ecx )
- {
- case MSR_FS_BASE:
-- if ( is_pv_32bit_domain(currd) )
-+ if ( is_pv_32bit_domain(currd) ||
-+ !is_canonical_address(msr_content) )
- goto fail;
- wrfsbase(msr_content);
- v->arch.pv_vcpu.fs_base = msr_content;
- break;
- case MSR_GS_BASE:
-- if ( is_pv_32bit_domain(currd) )
-+ if ( is_pv_32bit_domain(currd) ||
-+ !is_canonical_address(msr_content) )
- goto fail;
- wrgsbase(msr_content);
- v->arch.pv_vcpu.gs_base_kernel = msr_content;
- break;
- case MSR_SHADOW_GS_BASE:
-- if ( is_pv_32bit_domain(currd) )
-+ if ( is_pv_32bit_domain(currd) ||
-+ !is_canonical_address(msr_content) )
- goto fail;
- if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
- goto fail;
diff --git a/main/xen/xsa195-CVE-2016-9383.patch b/main/xen/xsa195-CVE-2016-9383.patch
deleted file mode 100644
index a193a5cca03..00000000000
--- a/main/xen/xsa195-CVE-2016-9383.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86emul: fix huge bit offset handling
-
-We must never chop off the high 32 bits.
-
-This is XSA-195.
-
-Reported-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -2549,6 +2549,12 @@ x86_emulate(
- else
- {
- /*
-+ * Instructions such as bt can reference an arbitrary offset from
-+ * their memory operand, but the instruction doing the actual
-+ * emulation needs the appropriate op_bytes read from memory.
-+ * Adjust both the source register and memory operand to make an
-+ * equivalent instruction.
-+ *
- * EA += BitOffset DIV op_bytes*8
- * BitOffset = BitOffset MOD op_bytes*8
- * DIV truncates towards negative infinity.
-@@ -2560,14 +2566,15 @@ x86_emulate(
- src.val = (int32_t)src.val;
- if ( (long)src.val < 0 )
- {
-- unsigned long byte_offset;
-- byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1));
-+ unsigned long byte_offset =
-+ op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L));
-+
- ea.mem.off -= byte_offset;
- src.val = (byte_offset << 3) + src.val;
- }
- else
- {
-- ea.mem.off += (src.val >> 3) & ~(op_bytes - 1);
-+ ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L);
- src.val &= (op_bytes << 3) - 1;
- }
- }
diff --git a/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch b/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
deleted file mode 100644
index 7193e9ad5ad..00000000000
--- a/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/emul: Correct the IDT entry calculation in inject_swint()
-
-The logic, as introduced in c/s 36ebf14ebe "x86/emulate: support for emulating
-software event injection" is buggy. The size of an IDT entry depends on long
-mode being active, not the width of the code segment currently in use.
-
-In particular, this means that a compatibility code segment which hits
-emulation for software event injection will end up using an incorrect offset
-in the IDT for DPL/Presence checking. In practice, this only occurs on old
-AMD hardware lacking NRip support; all newer AMD hardware, and all Intel
-hardware bypass this path in the emulator.
-
-While here, fix a minor issue with reading the IDT entry. The return value
-from ops->read() wasn't checked, but in reality the only failure case is if a
-pagefault occurs. This is not a realistic problem as the kernel will almost
-certainly crash with a double fault if this setup actually occured.
-
-This is part of XSA-196.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/x86_emulate/x86_emulate.c | 15 +++++++++++----
- 1 file changed, 11 insertions(+), 4 deletions(-)
-
-diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
-index 7a707dc..f74aa8f 100644
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -1630,10 +1630,16 @@ static int inject_swint(enum x86_swint_type type,
- {
- if ( !in_realmode(ctxt, ops) )
- {
-- unsigned int idte_size = (ctxt->addr_size == 64) ? 16 : 8;
-- unsigned int idte_offset = vector * idte_size;
-+ unsigned int idte_size, idte_offset;
- struct segment_register idtr;
- uint32_t idte_ctl;
-+ int lm = in_longmode(ctxt, ops);
-+
-+ if ( lm < 0 )
-+ return X86EMUL_UNHANDLEABLE;
-+
-+ idte_size = lm ? 16 : 8;
-+ idte_offset = vector * idte_size;
-
- /* icebp sets the External Event bit despite being an instruction. */
- error_code = (vector << 3) | ECODE_IDT |
-@@ -1661,8 +1667,9 @@ static int inject_swint(enum x86_swint_type type,
- * Should strictly speaking read all 8/16 bytes of an entry,
- * but we currently only care about the dpl and present bits.
- */
-- ops->read(x86_seg_none, idtr.base + idte_offset + 4,
-- &idte_ctl, sizeof(idte_ctl), ctxt);
-+ if ( (rc = ops->read(x86_seg_none, idtr.base + idte_offset + 4,
-+ &idte_ctl, sizeof(idte_ctl), ctxt)) )
-+ goto done;
-
- /* Is this entry present? */
- if ( !(idte_ctl & (1u << 15)) )
diff --git a/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch b/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
deleted file mode 100644
index 26580ff8099..00000000000
--- a/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/svm: Fix injection of software interrupts
-
-The non-NextRip logic in c/s 36ebf14eb "x86/emulate: support for emulating
-software event injection" was based on an older version of the AMD software
-manual. The manual was later corrected, following findings from that series.
-
-I took the original wording of "not supported without NextRIP" to mean that
-X86_EVENTTYPE_SW_INTERRUPT was not eligible for use. It turns out that this
-is not the case, and the new wording is clearer on the matter.
-
-Despite testing the original patch series on non-NRip hardware, the
-swint-emulation XTF test case focuses on the debug vectors; it never ended up
-executing an `int $n` instruction for a vector which wasn't also an exception.
-
-During a vmentry, the use of X86_EVENTTYPE_HW_EXCEPTION comes with a vector
-check to ensure that it is only used with exception vectors. Xen's use of
-X86_EVENTTYPE_HW_EXCEPTION for `int $n` injection has always been buggy on AMD
-hardware.
-
-Fix this by always using X86_EVENTTYPE_SW_INTERRUPT.
-
-Print and decode the eventinj information in svm_vmcb_dump(), as it has
-several invalid combinations which cause vmentry failures.
-
-This is part of XSA-196.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/hvm/svm/svm.c | 13 +++++--------
- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++++
- 2 files changed, 9 insertions(+), 8 deletions(-)
-
-diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
-index 4391744..76efc3e 100644
---- a/xen/arch/x86/hvm/svm/svm.c
-+++ b/xen/arch/x86/hvm/svm/svm.c
-@@ -1231,17 +1231,14 @@ static void svm_inject_trap(const struct hvm_trap *trap)
- {
- case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */
- /*
-- * Injection type 4 (software interrupt) is only supported with
-- * NextRIP support. Without NextRIP, the emulator will have performed
-- * DPL and presence checks for us.
-+ * Software interrupts (type 4) cannot be properly injected if the
-+ * processor doesn't support NextRIP. Without NextRIP, the emulator
-+ * will have performed DPL and presence checks for us, and will have
-+ * moved eip forward if appropriate.
- */
- if ( cpu_has_svm_nrips )
-- {
- vmcb->nextrip = regs->eip + _trap.insn_len;
-- event.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
-- }
-- else
-- event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
-+ event.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
- break;
-
- case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
-diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
-index ded5d19..f93dfed 100644
---- a/xen/arch/x86/hvm/svm/svmdebug.c
-+++ b/xen/arch/x86/hvm/svm/svmdebug.c
-@@ -48,6 +48,10 @@ void svm_vmcb_dump(const char *from, struct vmcb_struct *vmcb)
- vmcb->tlb_control,
- (unsigned long long)vmcb->_vintr.bytes,
- (unsigned long long)vmcb->interrupt_shadow);
-+ printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
-+ vmcb->eventinj.bytes, vmcb->eventinj.fields.v,
-+ vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type,
-+ vmcb->eventinj.fields.vector);
- printk("exitcode = %#Lx exitintinfo = %#Lx\n",
- (unsigned long long)vmcb->exitcode,
- (unsigned long long)vmcb->exitintinfo.bytes);
diff --git a/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch b/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch
deleted file mode 100644
index e59a965d43f..00000000000
--- a/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: xen: fix ioreq handling
-
-Avoid double fetches and bounds check size to avoid overflowing
-internal variables.
-
-This is XSA-197.
-
-Reported-by: yanghongke <yanghongke@huawei.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
-
---- a/tools/qemu-xen/xen-hvm.c
-+++ b/tools/qemu-xen/xen-hvm.c
-@@ -817,6 +817,10 @@ static void cpu_ioreq_pio(ioreq_t *req)
- {
- uint32_t i;
-
-+ if (req->size > sizeof(uint32_t)) {
-+ hw_error("PIO: bad size (%u)", req->size);
-+ }
-+
- if (req->dir == IOREQ_READ) {
- if (!req->data_is_ptr) {
- req->data = do_inp(req->addr, req->size);
-@@ -846,6 +850,10 @@ static void cpu_ioreq_move(ioreq_t *req)
- {
- uint32_t i;
-
-+ if (req->size > sizeof(req->data)) {
-+ hw_error("MMIO: bad size (%u)", req->size);
-+ }
-+
- if (!req->data_is_ptr) {
- if (req->dir == IOREQ_READ) {
- for (i = 0; i < req->count; i++) {
-@@ -999,11 +1007,13 @@ static int handle_buffered_iopage(XenIOS
- req.df = 1;
- req.type = buf_req->type;
- req.data_is_ptr = 0;
-+ xen_rmb();
- qw = (req.size == 8);
- if (qw) {
- buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
- IOREQ_BUFFER_SLOT_NUM];
- req.data |= ((uint64_t)buf_req->data) << 32;
-+ xen_rmb();
- }
-
- handle_ioreq(state, &req);
-@@ -1034,7 +1044,11 @@ static void cpu_handle_ioreq(void *opaqu
-
- handle_buffered_iopage(state);
- if (req) {
-- handle_ioreq(state, req);
-+ ioreq_t copy = *req;
-+
-+ xen_rmb();
-+ handle_ioreq(state, &copy);
-+ req->data = copy.data;
-
- if (req->state != STATE_IOREQ_INPROCESS) {
- fprintf(stderr, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa197-qemut-CVE-2016-9381.patch b/main/xen/xsa197-qemut-CVE-2016-9381.patch
deleted file mode 100644
index 3f55beccb03..00000000000
--- a/main/xen/xsa197-qemut-CVE-2016-9381.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: xen: fix ioreq handling
-
-Avoid double fetches and bounds check size to avoid overflowing
-internal variables.
-
-This is XSA-197.
-
-Reported-by: yanghongke <yanghongke@huawei.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Ian Jackson <ian.jackson@eu.citrix.com>
-
---- a/tools/qemu-xen-traditional/i386-dm/helper2.c
-+++ b/tools/qemu-xen-traditional/i386-dm/helper2.c
-@@ -375,6 +375,11 @@ static void cpu_ioreq_pio(CPUState *env,
- {
- uint32_t i;
-
-+ if (req->size > sizeof(unsigned long)) {
-+ fprintf(stderr, "PIO: bad size (%u)\n", req->size);
-+ exit(-1);
-+ }
-+
- if (req->dir == IOREQ_READ) {
- if (!req->data_is_ptr) {
- req->data = do_inp(env, req->addr, req->size);
-@@ -404,6 +409,11 @@ static void cpu_ioreq_move(CPUState *env
- {
- uint32_t i;
-
-+ if (req->size > sizeof(req->data)) {
-+ fprintf(stderr, "MMIO: bad size (%u)\n", req->size);
-+ exit(-1);
-+ }
-+
- if (!req->data_is_ptr) {
- if (req->dir == IOREQ_READ) {
- for (i = 0; i < req->count; i++) {
-@@ -516,11 +526,13 @@ static int __handle_buffered_iopage(CPUS
- req.df = 1;
- req.type = buf_req->type;
- req.data_is_ptr = 0;
-+ xen_rmb();
- qw = (req.size == 8);
- if (qw) {
- buf_req = &buffered_io_page->buf_ioreq[(rdptr + 1) %
- IOREQ_BUFFER_SLOT_NUM];
- req.data |= ((uint64_t)buf_req->data) << 32;
-+ xen_rmb();
- }
-
- __handle_ioreq(env, &req);
-@@ -552,7 +564,11 @@ static void cpu_handle_ioreq(void *opaqu
-
- __handle_buffered_iopage(env);
- if (req) {
-- __handle_ioreq(env, req);
-+ ioreq_t copy = *req;
-+
-+ xen_rmb();
-+ __handle_ioreq(env, &copy);
-+ req->data = copy.data;
-
- if (req->state != STATE_IOREQ_INPROCESS) {
- fprintf(logfile, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch b/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
deleted file mode 100644
index dbf708491ed..00000000000
--- a/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From 71a389ae940bc52bf897a6e5becd73fd8ede94c5 Mon Sep 17 00:00:00 2001
-From: Ian Jackson <ian.jackson@eu.citrix.com>
-Date: Thu, 3 Nov 2016 16:37:40 +0000
-Subject: [PATCH] pygrub: Properly quote results, when returning them to the
- caller:
-
-* When the caller wants sexpr output, use `repr()'
- This is what Xend expects.
-
- The returned S-expressions are now escaped and quoted by Python,
- generally using '...'. Previously kernel and ramdisk were unquoted
- and args was quoted with "..." but without proper escaping. This
- change may break toolstacks which do not properly dequote the
- returned S-expressions.
-
-* When the caller wants "simple" output, crash if the delimiter is
- contained in the returned value.
-
- With --output-format=simple it does not seem like this could ever
- happen, because the bootloader config parsers all take line-based
- input from the various bootloader config files.
-
- With --output-format=simple0, this can happen if the bootloader
- config file contains nul bytes.
-
-This is XSA-198.
-
-Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
-Tested-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
----
- tools/pygrub/src/pygrub | 9 ++++++---
- 1 file changed, 6 insertions(+), 3 deletions(-)
-
-diff --git a/tools/pygrub/src/pygrub b/tools/pygrub/src/pygrub
-index 40f9584..dd0c8f7 100755
---- a/tools/pygrub/src/pygrub
-+++ b/tools/pygrub/src/pygrub
-@@ -721,14 +721,17 @@ def sniff_netware(fs, cfg):
- return cfg
-
- def format_sxp(kernel, ramdisk, args):
-- s = "linux (kernel %s)" % kernel
-+ s = "linux (kernel %s)" % repr(kernel)
- if ramdisk:
-- s += "(ramdisk %s)" % ramdisk
-+ s += "(ramdisk %s)" % repr(ramdisk)
- if args:
-- s += "(args \"%s\")" % args
-+ s += "(args %s)" % repr(args)
- return s
-
- def format_simple(kernel, ramdisk, args, sep):
-+ for check in (kernel, ramdisk, args):
-+ if check is not None and sep in check:
-+ raise RuntimeError, "simple format cannot represent delimiter-containing value"
- s = ("kernel %s" % kernel) + sep
- if ramdisk:
- s += ("ramdisk %s" % ramdisk) + sep
---
-2.1.4
-
diff --git a/main/xen/xsa200-4.6.patch b/main/xen/xsa200-4.6.patch
deleted file mode 100644
index 5aa979b5383..00000000000
--- a/main/xen/xsa200-4.6.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86emul: CMPXCHG8B ignores operand size prefix
-
-Otherwise besides mis-handling the instruction, the comparison failure
-case would result in uninitialized stack data being handed back to the
-guest in rDX:rAX (32 bits leaked for 32-bit guests, 96 bits for 64-bit
-ones).
-
-This is XSA-200.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
---- a/tools/tests/x86_emulator/test_x86_emulator.c
-+++ b/tools/tests/x86_emulator/test_x86_emulator.c
-@@ -429,6 +429,24 @@ int main(int argc, char **argv)
- goto fail;
- printf("okay\n");
-
-+ printf("%-40s", "Testing cmpxchg8b (%edi) [opsize]...");
-+ instr[0] = 0x66; instr[1] = 0x0f; instr[2] = 0xc7; instr[3] = 0x0f;
-+ res[0] = 0x12345678;
-+ res[1] = 0x87654321;
-+ regs.eflags = 0x200;
-+ regs.eip = (unsigned long)&instr[0];
-+ regs.edi = (unsigned long)res;
-+ rc = x86_emulate(&ctxt, &emulops);
-+ if ( (rc != X86EMUL_OKAY) ||
-+ (res[0] != 0x12345678) ||
-+ (res[1] != 0x87654321) ||
-+ (regs.eax != 0x12345678) ||
-+ (regs.edx != 0x87654321) ||
-+ ((regs.eflags&0x240) != 0x200) ||
-+ (regs.eip != (unsigned long)&instr[4]) )
-+ goto fail;
-+ printf("okay\n");
-+
- printf("%-40s", "Testing movsxbd (%%eax),%%ecx...");
- instr[0] = 0x0f; instr[1] = 0xbe; instr[2] = 0x08;
- regs.eflags = 0x200;
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -4739,8 +4739,12 @@ x86_emulate(
- generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
- generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
- if ( op_bytes == 8 )
-+ {
- vcpu_must_have_cx16();
-- op_bytes *= 2;
-+ op_bytes = 16;
-+ }
-+ else
-+ op_bytes = 8;
-
- /* Get actual old value. */
- if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes,
diff --git a/main/xen/xsa201-1.patch b/main/xen/xsa201-1.patch
deleted file mode 100644
index 50983b852fa..00000000000
--- a/main/xen/xsa201-1.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From: Wei Chen <Wei.Chen@arm.com>
-Subject: arm64: handle guest-generated EL1 asynchronous abort
-
-In current code, when the hypervisor receives an asynchronous abort
-from a guest, the hypervisor will do panic, the host will be down.
-We have to prevent such security issue, so, in this patch we crash
-the guest, when the hypervisor receives an asynchronous abort from
-the guest.
-
-This is CVE-2016-9815, part of XSA-201.
-
-Signed-off-by: Wei Chen <Wei.Chen@arm.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
-Reviewed-by: Steve Capper <steve.capper@arm.com>
-Reviewed-by: Julien Grall <Julien.Grall@arm.com>
-
---- a/xen/arch/arm/arm64/entry.S
-+++ b/xen/arch/arm/arm64/entry.S
-@@ -204,9 +204,12 @@ guest_fiq_invalid:
- entry hyp=0, compat=0
- invalid BAD_FIQ
-
--guest_error_invalid:
-+guest_error:
- entry hyp=0, compat=0
-- invalid BAD_ERROR
-+ msr daifclr, #2
-+ mov x0, sp
-+ bl do_trap_guest_error
-+ exit hyp=0, compat=0
-
- guest_sync_compat:
- entry hyp=0, compat=1
-@@ -225,9 +228,12 @@ guest_fiq_invalid_compat:
- entry hyp=0, compat=1
- invalid BAD_FIQ
-
--guest_error_invalid_compat:
-+guest_error_compat:
- entry hyp=0, compat=1
-- invalid BAD_ERROR
-+ msr daifclr, #2
-+ mov x0, sp
-+ bl do_trap_guest_error
-+ exit hyp=0, compat=1
-
- ENTRY(return_to_new_vcpu32)
- exit hyp=0, compat=1
-@@ -286,12 +292,12 @@ ENTRY(hyp_traps_vector)
- ventry guest_sync // Synchronous 64-bit EL0/EL1
- ventry guest_irq // IRQ 64-bit EL0/EL1
- ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1
-- ventry guest_error_invalid // Error 64-bit EL0/EL1
-+ ventry guest_error // Error 64-bit EL0/EL1
-
- ventry guest_sync_compat // Synchronous 32-bit EL0/EL1
- ventry guest_irq_compat // IRQ 32-bit EL0/EL1
- ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1
-- ventry guest_error_invalid_compat // Error 32-bit EL0/EL1
-+ ventry guest_error_compat // Error 32-bit EL0/EL1
-
- /*
- * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
---- a/xen/arch/arm/traps.c
-+++ b/xen/arch/arm/traps.c
-@@ -2723,6 +2723,21 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
- }
- }
-
-+asmlinkage void do_trap_guest_error(struct cpu_user_regs *regs)
-+{
-+ enter_hypervisor_head(regs);
-+
-+ /*
-+ * Currently, to ensure hypervisor safety, when we received a
-+ * guest-generated vSerror/vAbort, we just crash the guest to protect
-+ * the hypervisor. In future we can better handle this by injecting
-+ * a vSerror/vAbort to the guest.
-+ */
-+ gdprintk(XENLOG_WARNING, "Guest(Dom-%u) will be crashed by vSError\n",
-+ current->domain->domain_id);
-+ domain_crash_synchronous();
-+}
-+
- asmlinkage void do_trap_irq(struct cpu_user_regs *regs)
- {
- enter_hypervisor_head(regs);
diff --git a/main/xen/xsa201-2.patch b/main/xen/xsa201-2.patch
deleted file mode 100644
index 9bd1f8f89d7..00000000000
--- a/main/xen/xsa201-2.patch
+++ /dev/null
@@ -1,199 +0,0 @@
-From: Wei Chen <Wei.Chen@arm.com>
-Subject: arm64: handle async aborts delivered while at EL2
-
-If EL1 generates an asynchronous abort and then traps into EL2
-(by HVC or IRQ) before the abort has been delivered, the hypervisor
-could not catch it, because the PSTATE.A bit is masked all the time
-in hypervisor. So this asynchronous abort may be slipped to next
-running guest with PSTATE.A bit unmasked.
-
-In order to avoid this, it is necessary to take the abort at EL2, by
-clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
-to open a window to catch guest-generated asynchronous abort in all
-EL1 -> EL2 swich paths. If we catched such asynchronous abort in
-checking window, the hyp_error exception will be triggered and the
-abort source guest will be crashed.
-
-This is CVE-2016-9816, part of XSA-201.
-
-Signed-off-by: Wei Chen <Wei.Chen@arm.com>
-Reviewed-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/arch/arm/arm64/entry.S
-+++ b/xen/arch/arm/arm64/entry.S
-@@ -173,6 +173,43 @@ hyp_error_invalid:
- entry hyp=1
- invalid BAD_ERROR
-
-+hyp_error:
-+ /*
-+ * Only two possibilities:
-+ * 1) Either we come from the exit path, having just unmasked
-+ * PSTATE.A: change the return code to an EL2 fault, and
-+ * carry on, as we're already in a sane state to handle it.
-+ * 2) Or we come from anywhere else, and that's a bug: we panic.
-+ */
-+ entry hyp=1
-+ msr daifclr, #2
-+
-+ /*
-+ * The ELR_EL2 may be modified by an interrupt, so we have to use the
-+ * saved value in cpu_user_regs to check whether we come from 1) or
-+ * not.
-+ */
-+ ldr x0, [sp, #UREGS_PC]
-+ adr x1, abort_guest_exit_start
-+ cmp x0, x1
-+ adr x1, abort_guest_exit_end
-+ ccmp x0, x1, #4, ne
-+ mov x0, sp
-+ mov x1, #BAD_ERROR
-+
-+ /*
-+ * Not equal, the exception come from 2). It's a bug, we have to
-+ * panic the hypervisor.
-+ */
-+ b.ne do_bad_mode
-+
-+ /*
-+ * Otherwise, the exception come from 1). It happened because of
-+ * the guest. Crash this guest.
-+ */
-+ bl do_trap_guest_error
-+ exit hyp=1
-+
- /* Traps taken in Current EL with SP_ELx */
- hyp_sync:
- entry hyp=1
-@@ -189,15 +226,29 @@ hyp_irq:
-
- guest_sync:
- entry hyp=0, compat=0
-+ bl check_pending_vserror
-+ /*
-+ * If x0 is Non-zero, a vSError took place, the initial exception
-+ * doesn't have any significance to be handled. Exit ASAP
-+ */
-+ cbnz x0, 1f
- msr daifclr, #2
- mov x0, sp
- bl do_trap_hypervisor
-+1:
- exit hyp=0, compat=0
-
- guest_irq:
- entry hyp=0, compat=0
-+ bl check_pending_vserror
-+ /*
-+ * If x0 is Non-zero, a vSError took place, the initial exception
-+ * doesn't have any significance to be handled. Exit ASAP
-+ */
-+ cbnz x0, 1f
- mov x0, sp
- bl do_trap_irq
-+1:
- exit hyp=0, compat=0
-
- guest_fiq_invalid:
-@@ -213,15 +264,29 @@ guest_error:
-
- guest_sync_compat:
- entry hyp=0, compat=1
-+ bl check_pending_vserror
-+ /*
-+ * If x0 is Non-zero, a vSError took place, the initial exception
-+ * doesn't have any significance to be handled. Exit ASAP
-+ */
-+ cbnz x0, 1f
- msr daifclr, #2
- mov x0, sp
- bl do_trap_hypervisor
-+1:
- exit hyp=0, compat=1
-
- guest_irq_compat:
- entry hyp=0, compat=1
-+ bl check_pending_vserror
-+ /*
-+ * If x0 is Non-zero, a vSError took place, the initial exception
-+ * doesn't have any significance to be handled. Exit ASAP
-+ */
-+ cbnz x0, 1f
- mov x0, sp
- bl do_trap_irq
-+1:
- exit hyp=0, compat=1
-
- guest_fiq_invalid_compat:
-@@ -270,6 +335,62 @@ return_from_trap:
- eret
-
- /*
-+ * This function is used to check pending virtual SError in the gap of
-+ * EL1 -> EL2 world switch.
-+ * The x0 register will be used to indicate the results of detection.
-+ * x0 -- Non-zero indicates a pending virtual SError took place.
-+ * x0 -- Zero indicates no pending virtual SError took place.
-+ */
-+check_pending_vserror:
-+ /*
-+ * Save elr_el2 to check whether the pending SError exception takes
-+ * place while we are doing this sync exception.
-+ */
-+ mrs x0, elr_el2
-+
-+ /* Synchronize against in-flight ld/st */
-+ dsb sy
-+
-+ /*
-+ * Unmask PSTATE asynchronous abort bit. If there is a pending
-+ * SError, the EL2 error exception will happen after PSTATE.A
-+ * is cleared.
-+ */
-+ msr daifclr, #4
-+
-+ /*
-+ * This is our single instruction exception window. A pending
-+ * SError is guaranteed to occur at the earliest when we unmask
-+ * it, and at the latest just after the ISB.
-+ *
-+ * If a pending SError occurs, the program will jump to EL2 error
-+ * exception handler, and the elr_el2 will be set to
-+ * abort_guest_exit_start or abort_guest_exit_end.
-+ */
-+abort_guest_exit_start:
-+
-+ isb
-+
-+abort_guest_exit_end:
-+ /* Mask PSTATE asynchronous abort bit, close the checking window. */
-+ msr daifset, #4
-+
-+ /*
-+ * Compare elr_el2 and the saved value to check whether we are
-+ * returning from a valid exception caused by pending SError.
-+ */
-+ mrs x1, elr_el2
-+ cmp x0, x1
-+
-+ /*
-+ * Not equal, the pending SError exception took place, set
-+ * x0 to non-zero.
-+ */
-+ cset x0, ne
-+
-+ ret
-+
-+/*
- * Exception vectors.
- */
- .macro ventry label
-@@ -287,7 +408,7 @@ ENTRY(hyp_traps_vector)
- ventry hyp_sync // Synchronous EL2h
- ventry hyp_irq // IRQ EL2h
- ventry hyp_fiq_invalid // FIQ EL2h
-- ventry hyp_error_invalid // Error EL2h
-+ ventry hyp_error // Error EL2h
-
- ventry guest_sync // Synchronous 64-bit EL0/EL1
- ventry guest_irq // IRQ 64-bit EL0/EL1
diff --git a/main/xen/xsa201-3-4.7.patch b/main/xen/xsa201-3-4.7.patch
deleted file mode 100644
index af7fc3703e0..00000000000
--- a/main/xen/xsa201-3-4.7.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From: Wei Chen <Wei.Chen@arm.com>
-Subject: arm: crash the guest when it traps on external abort
-
-If we spot a data or prefetch abort bearing the ESR_EL2.EA bit set, we
-know that this is an external abort, and that should crash the guest.
-
-This is CVE-2016-9817, part of XSA-201.
-
-Signed-off-by: Wei Chen <Wei.Chen@arm.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
-Reviewed-by: Steve Capper <steve.capper@arm.com>
-Reviewed-by: Julien Grall <Julien.Grall@arm.com>
-
---- a/xen/arch/arm/traps.c
-+++ b/xen/arch/arm/traps.c
-@@ -2383,6 +2383,15 @@ static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
- int rc;
- register_t gva = READ_SYSREG(FAR_EL2);
-
-+ /*
-+ * If this bit has been set, it means that this instruction abort is caused
-+ * by a guest external abort. Currently we crash the guest to protect the
-+ * hypervisor. In future one can better handle this by injecting a virtual
-+ * abort to the guest.
-+ */
-+ if ( hsr.iabt.eat )
-+ domain_crash_synchronous();
-+
- switch ( hsr.iabt.ifsc & 0x3f )
- {
- case FSC_FLT_PERM ... FSC_FLT_PERM + 3:
-@@ -2437,6 +2446,15 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
- return;
- }
-
-+ /*
-+ * If this bit has been set, it means that this data abort is caused
-+ * by a guest external abort. Currently we crash the guest to protect the
-+ * hypervisor. In future one can better handle this by injecting a virtual
-+ * abort to the guest.
-+ */
-+ if ( dabt.eat )
-+ domain_crash_synchronous();
-+
- info.dabt = dabt;
- #ifdef CONFIG_ARM_32
- info.gva = READ_CP32(HDFAR);
diff --git a/main/xen/xsa201-4.patch b/main/xen/xsa201-4.patch
deleted file mode 100644
index 8060a5be136..00000000000
--- a/main/xen/xsa201-4.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-From: Wei Chen <Wei.Chen@arm.com>
-Subject: arm32: handle async aborts delivered while at HYP
-
-If guest generates an asynchronous abort and then traps into HYP
-(by HVC or IRQ) before the abort has been delivered, the hypervisor
-could not catch it, because the PSTATE.A bit is masked all the time
-in hypervisor. So this asynchronous abort may be slipped to next
-running guest with PSTATE.A bit unmasked.
-
-In order to avoid this, it is necessary to take the abort at HYP, by
-clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
-to open a window to catch guest-generated asynchronous abort in all
-Guest -> HYP switch paths. If we caught such asynchronous abort in
-checking window, the HYP data abort exception will be triggered and
-the abort source guest will be crashed.
-
-This is CVE-2016-9818, part of XSA-201.
-
-Signed-off-by: Wei Chen <Wei.Chen@arm.com>
-Reviewed-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/arch/arm/arm32/entry.S
-+++ b/xen/arch/arm/arm32/entry.S
-@@ -42,6 +42,61 @@ save_guest_regs:
- SAVE_BANKED(fiq)
- SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq); SAVE_ONE_BANKED(R10_fiq)
- SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq);
-+ /*
-+ * Start to check pending virtual abort in the gap of Guest -> HYP
-+ * world switch.
-+ *
-+ * Save ELR_hyp to check whether the pending virtual abort exception
-+ * takes place while we are doing this trap exception.
-+ */
-+ mrs r1, ELR_hyp
-+
-+ /*
-+ * Force loads and stores to complete before unmasking asynchronous
-+ * aborts and forcing the delivery of the exception.
-+ */
-+ dsb sy
-+
-+ /*
-+ * Unmask asynchronous abort bit. If there is a pending asynchronous
-+ * abort, the data_abort exception will happen after A bit is cleared.
-+ */
-+ cpsie a
-+
-+ /*
-+ * This is our single instruction exception window. A pending
-+ * asynchronous abort is guaranteed to occur at the earliest when we
-+ * unmask it, and at the latest just after the ISB.
-+ *
-+ * If a pending abort occurs, the program will jump to data_abort
-+ * exception handler, and the ELR_hyp will be set to
-+ * abort_guest_exit_start or abort_guest_exit_end.
-+ */
-+ .global abort_guest_exit_start
-+abort_guest_exit_start:
-+
-+ isb
-+
-+ .global abort_guest_exit_end
-+abort_guest_exit_end:
-+ /* Mask CPSR asynchronous abort bit, close the checking window. */
-+ cpsid a
-+
-+ /*
-+ * Compare ELR_hyp and the saved value to check whether we are
-+ * returning from a valid exception caused by pending virtual
-+ * abort.
-+ */
-+ mrs r2, ELR_hyp
-+ cmp r1, r2
-+
-+ /*
-+ * Not equal, the pending virtual abort exception took place, the
-+ * initial exception does not have any significance to be handled.
-+ * Exit ASAP.
-+ */
-+ bne return_from_trap
-+
- mov pc, lr
-
- #define DEFINE_TRAP_ENTRY(trap) \
---- a/xen/arch/arm/arm32/traps.c
-+++ b/xen/arch/arm/arm32/traps.c
-@@ -63,7 +63,10 @@ asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs)
-
- asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs)
- {
-- do_unexpected_trap("Data Abort", regs);
-+ if ( VABORT_GEN_BY_GUEST(regs) )
-+ do_trap_guest_error(regs);
-+ else
-+ do_unexpected_trap("Data Abort", regs);
- }
-
- /*
---- a/xen/include/asm-arm/arm32/processor.h
-+++ b/xen/include/asm-arm/arm32/processor.h
-@@ -55,6 +55,17 @@ struct cpu_user_regs
-
- uint32_t pad1; /* Doubleword-align the user half of the frame */
- };
-+
-+/* Functions for pending virtual abort checking window. */
-+void abort_guest_exit_start(void);
-+void abort_guest_exit_end(void);
-+
-+#define VABORT_GEN_BY_GUEST(r) \
-+( \
-+ ( (unsigned long)abort_guest_exit_start == (r)->pc ) || \
-+ ( (unsigned long)abort_guest_exit_end == (r)->pc ) \
-+)
-+
- #endif
-
- /* Layout as used in assembly, with src/dest registers mixed in */
---- a/xen/include/asm-arm/processor.h
-+++ b/xen/include/asm-arm/processor.h
-@@ -690,6 +690,8 @@ void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
- int call_smc(register_t function_id, register_t arg0, register_t arg1,
- register_t arg2);
-
-+void do_trap_guest_error(struct cpu_user_regs *regs);
-+
- #endif /* __ASSEMBLY__ */
- #endif /* __ASM_ARM_PROCESSOR_H */
- /*
diff --git a/main/xen/xsa202-4.6.patch b/main/xen/xsa202-4.6.patch
deleted file mode 100644
index 0c7fff0e34c..00000000000
--- a/main/xen/xsa202-4.6.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: force EFLAGS.IF on when exiting to PV guests
-
-Guest kernels modifying instructions in the process of being emulated
-for another of their vCPU-s may effect EFLAGS.IF to be cleared upon
-next exiting to guest context, by converting the being emulated
-instruction to CLI (at the right point in time). Prevent any such bad
-effects by always forcing EFLAGS.IF on. And to cover hypothetical other
-similar issues, also force EFLAGS.{IOPL,NT,VM} to zero.
-
-This is XSA-202.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/x86_64/compat/entry.S
-+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -174,6 +174,8 @@ compat_bad_hypercall:
- /* %rbx: struct vcpu, interrupts disabled */
- ENTRY(compat_restore_all_guest)
- ASSERT_INTERRUPTS_DISABLED
-+ mov $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
-+ and UREGS_eflags(%rsp),%r11d
- .Lcr4_orig:
- .skip .Lcr4_alt_end - .Lcr4_alt, 0x90
- .Lcr4_orig_end:
-@@ -209,6 +211,8 @@ ENTRY(compat_restore_all_guest)
- (.Lcr4_orig_end - .Lcr4_orig), \
- (.Lcr4_alt_end - .Lcr4_alt)
- .popsection
-+ or $X86_EFLAGS_IF,%r11
-+ mov %r11d,UREGS_eflags(%rsp)
- RESTORE_ALL adj=8 compat=1
- .Lft0: iretq
-
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -40,28 +40,29 @@ restore_all_guest:
- testw $TRAP_syscall,4(%rsp)
- jz iret_exit_to_guest
-
-+ movq 24(%rsp),%r11 # RFLAGS
-+ andq $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11
-+ orq $X86_EFLAGS_IF,%r11
-+
- /* Don't use SYSRET path if the return address is not canonical. */
- movq 8(%rsp),%rcx
- sarq $47,%rcx
- incl %ecx
- cmpl $1,%ecx
-- ja .Lforce_iret
-+ movq 8(%rsp),%rcx # RIP
-+ ja iret_exit_to_guest
-
- cmpw $FLAT_USER_CS32,16(%rsp)# CS
-- movq 8(%rsp),%rcx # RIP
-- movq 24(%rsp),%r11 # RFLAGS
- movq 32(%rsp),%rsp # RSP
- je 1f
- sysretq
- 1: sysretl
-
--.Lforce_iret:
-- /* Mimic SYSRET behavior. */
-- movq 8(%rsp),%rcx # RIP
-- movq 24(%rsp),%r11 # RFLAGS
- ALIGN
- /* No special register assumptions. */
- iret_exit_to_guest:
-+ andl $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp)
-+ orl $X86_EFLAGS_IF,24(%rsp)
- addq $8,%rsp
- .Lft0: iretq
-
diff --git a/main/xen/xsa203-4.7.patch b/main/xen/xsa203-4.7.patch
deleted file mode 100644
index d623d8468b5..00000000000
--- a/main/xen/xsa203-4.7.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/HVM: add missing NULL check before using VMFUNC hook
-
-This is XSA-203.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/hvm/emulate.c
-+++ b/xen/arch/x86/hvm/emulate.c
-@@ -1643,6 +1643,8 @@ static int hvmemul_vmfunc(
- {
- int rc;
-
-+ if ( !hvm_funcs.altp2m_vcpu_emulate_vmfunc )
-+ return X86EMUL_UNHANDLEABLE;
- rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
- if ( rc != X86EMUL_OKAY )
- hvmemul_inject_hw_exception(TRAP_invalid_op, 0, ctxt);
diff --git a/main/xen/xsa204-4.7.patch b/main/xen/xsa204-4.7.patch
deleted file mode 100644
index ea41789a4b8..00000000000
--- a/main/xen/xsa204-4.7.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Sun, 18 Dec 2016 15:42:59 +0000
-Subject: [PATCH] x86/emul: Correct the handling of eflags with SYSCALL
-
-A singlestep #DB is determined by the resulting eflags value from the
-execution of SYSCALL, not the original eflags value.
-
-By using the original eflags value, we negate the guest kernels attempt to
-protect itself from a privilege escalation by masking TF.
-
-Introduce a tf boolean and have the SYSCALL emulation recalculate it
-after the instruction is complete.
-
-This is XSA-204
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/x86_emulate/x86_emulate.c | 23 ++++++++++++++++++++---
- 1 file changed, 20 insertions(+), 3 deletions(-)
-
-diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
-index bca7045..abe442e 100644
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -1582,6 +1582,7 @@ x86_emulate(
- union vex vex = {};
- unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
- bool_t lock_prefix = 0;
-+ bool_t tf = !!(ctxt->regs->eflags & EFLG_TF);
- int override_seg = -1, rc = X86EMUL_OKAY;
- struct operand src = { .reg = REG_POISON };
- struct operand dst = { .reg = REG_POISON };
-@@ -3910,9 +3911,8 @@ x86_emulate(
- }
-
- no_writeback:
-- /* Inject #DB if single-step tracing was enabled at instruction start. */
-- if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
-- (ops->inject_hw_exception != NULL) )
-+ /* Should a singlestep #DB be raised? */
-+ if ( tf && (rc == X86EMUL_OKAY) && (ops->inject_hw_exception != NULL) )
- rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
-
- /* Commit shadow register state. */
-@@ -4143,6 +4143,23 @@ x86_emulate(
- (rc = ops->write_segment(x86_seg_ss, &ss, ctxt)) )
- goto done;
-
-+ /*
-+ * SYSCALL (unlike most instructions) evaluates its singlestep action
-+ * based on the resulting EFLG_TF, not the starting EFLG_TF.
-+ *
-+ * As the #DB is raised after the CPL change and before the OS can
-+ * switch stack, it is a large risk for privilege escalation.
-+ *
-+ * 64bit kernels should mask EFLG_TF in MSR_FMASK to avoid any
-+ * vulnerability. Running the #DB handler on an IST stack is also a
-+ * mitigation.
-+ *
-+ * 32bit kernels have no ability to mask EFLG_TF at all. Their only
-+ * mitigation is to use a task gate for handling #DB (or to not use
-+ * enable EFER.SCE to start with).
-+ */
-+ tf = !!(_regs.eflags & EFLG_TF);
-+
- break;
- }
-
diff --git a/main/xen/xsa207.patch b/main/xen/xsa207.patch
deleted file mode 100644
index 6fb86fc9d58..00000000000
--- a/main/xen/xsa207.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From: Oleksandr Tyshchenko <olekstysh@gmail.com>
-Subject: IOMMU: always call teardown callback
-
-There is a possible scenario when (d)->need_iommu remains unset
-during guest domain execution. For example, when no devices
-were assigned to it. Taking into account that teardown callback
-is not called when (d)->need_iommu is unset we might have unreleased
-resourses after destroying domain.
-
-So, always call teardown callback to roll back actions
-that were performed in init callback.
-
-This is XSA-207.
-
-Signed-off-by: Oleksandr Tyshchenko <olekstysh@gmail.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-Tested-by: Jan Beulich <jbeulich@suse.com>
-Tested-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/drivers/passthrough/iommu.c
-+++ b/xen/drivers/passthrough/iommu.c
-@@ -244,8 +244,7 @@ void iommu_domain_destroy(struct domain
- if ( !iommu_enabled || !dom_iommu(d)->platform_ops )
- return;
-
-- if ( need_iommu(d) )
-- iommu_teardown(d);
-+ iommu_teardown(d);
-
- arch_iommu_domain_destroy(d);
- }
diff --git a/main/xen/xsa208-qemut.patch b/main/xen/xsa208-qemut.patch
deleted file mode 100644
index 2e5827275ba..00000000000
--- a/main/xen/xsa208-qemut.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From 8f63265efeb6f92e63f7e749cb26131b68b20df7 Mon Sep 17 00:00:00 2001
-From: Li Qiang <liqiang6-s@360.cn>
-Date: Mon, 13 Feb 2017 15:22:15 +0000
-Subject: [PATCH] cirrus: fix oob access issue (CVE-2017-2615)
-
-When doing bitblt copy in backward mode, we should minus the
-blt width first just like the adding in the forward mode. This
-can avoid the oob access of the front of vga's vram.
-
-This is XSA-208.
-
-upstream-commit-id: 62d4c6bd5263bb8413a06c80144fc678df6dfb64
-
-Signed-off-by: Li Qiang <liqiang6-s@360.cn>
-
-{ kraxel: with backward blits (negative pitch) addr is the topmost
- address, so check it as-is against vram size ]
-
-[ This is CVE-2017-2615 / XSA-208 - Ian Jackson ]
-
-Cc: qemu-stable@nongnu.org
-Cc: P J P <ppandit@redhat.com>
-Cc: Laszlo Ersek <lersek@redhat.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: Wolfgang Bumiller <w.bumiller@proxmox.com>
-Fixes: d3532a0db02296e687711b8cdc7791924efccea0 (CVE-2014-8106)
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Message-id: 1485938101-26602-1-git-send-email-kraxel@redhat.com
-Reviewed-by: Laszlo Ersek <lersek@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
-Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
----
- tools/qemu-xen-traditional/hw/cirrus_vga.c | 7 +++----
- 1 file changed, 3 insertions(+), 4 deletions(-)
-
-diff --git a/tools/qemu-xen-traditional/hw/cirrus_vga.c b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-index e6c3893..364e22d 100644
---- a/tools/qemu-xen-traditional/hw/cirrus_vga.c
-+++ b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-@@ -308,10 +308,9 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
- {
- if (pitch < 0) {
- int64_t min = addr
-- + ((int64_t)s->cirrus_blt_height-1) * pitch;
-- int32_t max = addr
-- + s->cirrus_blt_width;
-- if (min < 0 || max >= s->vram_size) {
-+ + ((int64_t)s->cirrus_blt_height - 1) * pitch
-+ - s->cirrus_blt_width;
-+ if (min < -1 || addr >= s->vram_size) {
- return true;
- }
- } else {
---
-2.1.4
-
diff --git a/main/xen/xsa208-qemuu-4.7.patch b/main/xen/xsa208-qemuu-4.7.patch
deleted file mode 100644
index abd85c77e60..00000000000
--- a/main/xen/xsa208-qemuu-4.7.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From 8f63265efeb6f92e63f7e749cb26131b68b20df7 Mon Sep 17 00:00:00 2001
-From: Li Qiang <liqiang6-s@360.cn>
-Date: Mon, 13 Feb 2017 15:22:15 +0000
-Subject: [PATCH] cirrus: fix oob access issue (CVE-2017-2615)
-
-When doing bitblt copy in backward mode, we should minus the
-blt width first just like the adding in the forward mode. This
-can avoid the oob access of the front of vga's vram.
-
-This is XSA-208.
-
-upstream-commit-id: 62d4c6bd5263bb8413a06c80144fc678df6dfb64
-
-Signed-off-by: Li Qiang <liqiang6-s@360.cn>
-
-{ kraxel: with backward blits (negative pitch) addr is the topmost
- address, so check it as-is against vram size ]
-
-Cc: qemu-stable@nongnu.org
-Cc: P J P <ppandit@redhat.com>
-Cc: Laszlo Ersek <lersek@redhat.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: Wolfgang Bumiller <w.bumiller@proxmox.com>
-Fixes: d3532a0db02296e687711b8cdc7791924efccea0 (CVE-2014-8106)
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Message-id: 1485938101-26602-1-git-send-email-kraxel@redhat.com
-Reviewed-by: Laszlo Ersek <lersek@redhat.com>
-Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
----
- tools/qemu-xen/hw/display/cirrus_vga.c | 7 +++----
- 1 file changed, 3 insertions(+), 4 deletions(-)
-
-diff --git a/tools/qemu-xen/hw/display/cirrus_vga.c b/tools/qemu-xen/hw/display/cirrus_vga.c
-index 5198037..7bf3707 100644
---- a/tools/qemu-xen/hw/display/cirrus_vga.c
-+++ b/tools/qemu-xen/hw/display/cirrus_vga.c
-@@ -272,10 +272,9 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
- {
- if (pitch < 0) {
- int64_t min = addr
-- + ((int64_t)s->cirrus_blt_height-1) * pitch;
-- int32_t max = addr
-- + s->cirrus_blt_width;
-- if (min < 0 || max >= s->vga.vram_size) {
-+ + ((int64_t)s->cirrus_blt_height - 1) * pitch
-+ - s->cirrus_blt_width;
-+ if (min < -1 || addr >= s->vga.vram_size) {
- return true;
- }
- } else {
---
-2.1.4
-
diff --git a/main/xen/xsa209-qemut.patch b/main/xen/xsa209-qemut.patch
deleted file mode 100644
index 23225d0d294..00000000000
--- a/main/xen/xsa209-qemut.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From: Gerd Hoffmann <kraxel@redhat.com>
-Subject: [PATCH 3/3] cirrus: add blit_is_unsafe call to cirrus_bitblt_cputovideo
-
-CIRRUS_BLTMODE_MEMSYSSRC blits do NOT check blit destination
-and blit width, at all. Oops. Fix it.
-
-Security impact: high.
-
-The missing blit destination check allows to write to host memory.
-Basically same as CVE-2014-8106 for the other blit variants.
-
-The missing blit width check allows to overflow cirrus_bltbuf,
-with the attractive target cirrus_srcptr (current cirrus_bltbuf write
-position) being located right after cirrus_bltbuf in CirrusVGAState.
-
-Due to cirrus emulation writing cirrus_bltbuf bytewise the attacker
-hasn't full control over cirrus_srcptr though, only one byte can be
-changed. Once the first byte has been modified further writes land
-elsewhere.
-
-[ This is CVE-2017-2620 / XSA-209 - Ian Jackson ]
-
-Fixed compilation by removing extra parameter to blit_is_unsafe. -iwj
-
-Reported-by: Gerd Hoffmann <ghoffman@redhat.com>
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
----
-diff --git a/tools/qemu-xen-traditional/hw/cirrus_vga.c b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-index e6c3893..45facb6 100644
---- a/tools/qemu-xen-traditional/hw/cirrus_vga.c
-+++ b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-@@ -900,6 +900,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
- {
- int w;
-
-+ if (blit_is_unsafe(s)) {
-+ return 0;
-+ }
-+
- s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC;
- s->cirrus_srcptr = &s->cirrus_bltbuf[0];
- s->cirrus_srcptr_end = &s->cirrus_bltbuf[0];
-@@ -925,6 +929,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
- }
- s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height;
- }
-+
-+ /* the blit_is_unsafe call above should catch this */
-+ assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE);
-+
- s->cirrus_srcptr = s->cirrus_bltbuf;
- s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
- cirrus_update_memory_access(s);
diff --git a/main/xen/xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch b/main/xen/xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
deleted file mode 100644
index 95f522c3d5c..00000000000
--- a/main/xen/xsa209-qemuu-0001-display-cirrus-ignore-source-pitch-value-as-needed-i.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From 52b7f43c8fa185ab856bcaacda7abc9a6fc07f84 Mon Sep 17 00:00:00 2001
-From: Bruce Rogers <brogers@suse.com>
-Date: Tue, 21 Feb 2017 10:54:38 -0800
-Subject: [PATCH 1/2] display: cirrus: ignore source pitch value as needed in
- blit_is_unsafe
-
-Commit 4299b90 added a check which is too broad, given that the source
-pitch value is not required to be initialized for solid fill operations.
-This patch refines the blit_is_unsafe() check to ignore source pitch in
-that case. After applying the above commit as a security patch, we
-noticed the SLES 11 SP4 guest gui failed to initialize properly.
-
-Signed-off-by: Bruce Rogers <brogers@suse.com>
-Message-id: 20170109203520.5619-1-brogers@suse.com
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
----
- tools/qemu-xen/hw/display/cirrus_vga.c | 11 +++++++----
- 1 file changed, 7 insertions(+), 4 deletions(-)
-
-diff --git a/tools/qemu-xen/hw/display/cirrus_vga.c b/tools/qemu-xen/hw/display/cirrus_vga.c
-index 7bf3707..34a6900 100644
---- a/tools/qemu-xen/hw/display/cirrus_vga.c
-+++ b/tools/qemu-xen/hw/display/cirrus_vga.c
-@@ -288,7 +288,7 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
- return false;
- }
-
--static bool blit_is_unsafe(struct CirrusVGAState *s)
-+static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only)
- {
- /* should be the case, see cirrus_bitblt_start */
- assert(s->cirrus_blt_width > 0);
-@@ -302,6 +302,9 @@ static bool blit_is_unsafe(struct CirrusVGAState *s)
- s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) {
- return true;
- }
-+ if (dst_only) {
-+ return false;
-+ }
- if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch,
- s->cirrus_blt_srcaddr & s->cirrus_addr_mask)) {
- return true;
-@@ -667,7 +670,7 @@ static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s,
-
- dst = s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask);
-
-- if (blit_is_unsafe(s))
-+ if (blit_is_unsafe(s, false))
- return 0;
-
- (*s->cirrus_rop) (s, dst, src,
-@@ -685,7 +688,7 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop)
- {
- cirrus_fill_t rop_func;
-
-- if (blit_is_unsafe(s)) {
-+ if (blit_is_unsafe(s, true)) {
- return 0;
- }
- rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1];
-@@ -784,7 +787,7 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
-
- static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
- {
-- if (blit_is_unsafe(s))
-+ if (blit_is_unsafe(s, false))
- return 0;
-
- cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
---
-2.1.4
-
diff --git a/main/xen/xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch b/main/xen/xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
deleted file mode 100644
index f6a5880516e..00000000000
--- a/main/xen/xsa209-qemuu-0002-cirrus-add-blit_is_unsafe-call-to-cirrus_bitblt_cput.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From 15268f91fbe75b38a851c458aef74e693d646ea5 Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 21 Feb 2017 10:54:59 -0800
-Subject: [PATCH 2/2] cirrus: add blit_is_unsafe call to
- cirrus_bitblt_cputovideo
-
-CIRRUS_BLTMODE_MEMSYSSRC blits do NOT check blit destination
-and blit width, at all. Oops. Fix it.
-
-Security impact: high.
-
-The missing blit destination check allows to write to host memory.
-Basically same as CVE-2014-8106 for the other blit variants.
-
-The missing blit width check allows to overflow cirrus_bltbuf,
-with the attractive target cirrus_srcptr (current cirrus_bltbuf write
-position) being located right after cirrus_bltbuf in CirrusVGAState.
-
-Due to cirrus emulation writing cirrus_bltbuf bytewise the attacker
-hasn't full control over cirrus_srcptr though, only one byte can be
-changed. Once the first byte has been modified further writes land
-elsewhere.
-
-[ This is CVE-2017-2620 / XSA-209 - Ian Jackson ]
-
-Reported-by: Gerd Hoffmann <ghoffman@redhat.com>
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
----
- tools/qemu-xen/hw/display/cirrus_vga.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/tools/qemu-xen/hw/display/cirrus_vga.c b/tools/qemu-xen/hw/display/cirrus_vga.c
-index 34a6900..5901250 100644
---- a/tools/qemu-xen/hw/display/cirrus_vga.c
-+++ b/tools/qemu-xen/hw/display/cirrus_vga.c
-@@ -865,6 +865,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
- {
- int w;
-
-+ if (blit_is_unsafe(s, true)) {
-+ return 0;
-+ }
-+
- s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC;
- s->cirrus_srcptr = &s->cirrus_bltbuf[0];
- s->cirrus_srcptr_end = &s->cirrus_bltbuf[0];
-@@ -890,6 +894,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
- }
- s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height;
- }
-+
-+ /* the blit_is_unsafe call above should catch this */
-+ assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE);
-+
- s->cirrus_srcptr = s->cirrus_bltbuf;
- s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
- cirrus_update_memory_access(s);
---
-2.1.4
-
diff --git a/main/xen/xsa211-qemut.patch b/main/xen/xsa211-qemut.patch
deleted file mode 100644
index ecfc58a17a1..00000000000
--- a/main/xen/xsa211-qemut.patch
+++ /dev/null
@@ -1,225 +0,0 @@
-From 29e67cfd46b4d06ca1bb75558e227ec34a6af35f Mon Sep 17 00:00:00 2001
-From: Ian Jackson <ian.jackson@eu.citrix.com>
-Date: Thu, 9 Mar 2017 11:14:55 +0000
-Subject: [PATCH] cirrus/vnc: zap drop bitblit support from console code.
-
-From: Gerd Hoffmann <kraxel@redhat.com>
-
-There is a special code path (dpy_gfx_copy) to allow graphic emulation
-notify user interface code about bitblit operations carryed out by
-guests. It is supported by cirrus and vnc server. The intended purpose
-is to optimize display scrolls and just send over the scroll op instead
-of a full display update.
-
-This is rarely used these days though because modern guests simply don't
-use the cirrus blitter any more. Any linux guest using the cirrus drm
-driver doesn't. Any windows guest newer than winxp doesn't ship with a
-cirrus driver any more and thus uses the cirrus as simple framebuffer.
-
-So this code tends to bitrot and bugs can go unnoticed for a long time.
-See for example commit "3e10c3e vnc: fix qemu crash because of SIGSEGV"
-which fixes a bug lingering in the code for almost a year, added by
-commit "c7628bf vnc: only alloc server surface with clients connected".
-
-Also the vnc server will throttle the frame rate in case it figures the
-network can't keep up (send buffers are full). This doesn't work with
-dpy_gfx_copy, for any copy operation sent to the vnc client we have to
-send all outstanding updates beforehand, otherwise the vnc client might
-run the client side blit on outdated data and thereby corrupt the
-display. So this dpy_gfx_copy "optimization" might even make things
-worse on slow network links.
-
-Lets kill it once for all.
-
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-
-These changes (dropping dpy_copy and all its references and
-implementations) reimplemented for qemu-xen-traditional.
-
-This is XSA-211.
-
-Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
----
- console.c | 8 --------
- tools/qemu-xen-traditional/console.h | 16 ----------------
- hw/cirrus_vga.c | 15 +++++----------
- hw/vmware_vga.c | 1 +
- vnc.c | 35 -----------------------------------
- 5 files changed, 6 insertions(+), 69 deletions(-)
-
-diff --git a/tools/qemu-xen-traditional/console.c b/tools/qemu-xen-traditional/console.c
-index d4f1ad0..e61b53b 100644
---- a/tools/qemu-xen-traditional/console.c
-+++ b/tools/qemu-xen-traditional/console.c
-@@ -1399,14 +1399,6 @@ void qemu_console_resize(DisplayState *ds, int width, int height)
- }
- }
-
--void qemu_console_copy(DisplayState *ds, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h)
--{
-- if (is_graphic_console()) {
-- dpy_copy(ds, src_x, src_y, dst_x, dst_y, w, h);
-- }
--}
--
- PixelFormat qemu_different_endianness_pixelformat(int bpp)
- {
- PixelFormat pf;
-diff --git a/tools/qemu-xen-traditional/console.h b/tools/qemu-xen-traditional/console.h
-index 14b42f3..8306cc4 100644
---- a/tools/qemu-xen-traditional/console.h
-+++ b/tools/qemu-xen-traditional/console.h
-@@ -98,8 +98,6 @@ struct DisplayChangeListener {
- void (*dpy_resize)(struct DisplayState *s);
- void (*dpy_setdata)(struct DisplayState *s);
- void (*dpy_refresh)(struct DisplayState *s);
-- void (*dpy_copy)(struct DisplayState *s, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h);
- void (*dpy_fill)(struct DisplayState *s, int x, int y,
- int w, int h, uint32_t c);
- void (*dpy_text_cursor)(struct DisplayState *s, int x, int y);
-@@ -211,18 +209,6 @@ static inline void dpy_refresh(DisplayState *s)
- }
- }
-
--static inline void dpy_copy(struct DisplayState *s, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h) {
-- struct DisplayChangeListener *dcl = s->listeners;
-- while (dcl != NULL) {
-- if (dcl->dpy_copy)
-- dcl->dpy_copy(s, src_x, src_y, dst_x, dst_y, w, h);
-- else /* TODO */
-- dcl->dpy_update(s, dst_x, dst_y, w, h);
-- dcl = dcl->next;
-- }
--}
--
- static inline void dpy_fill(struct DisplayState *s, int x, int y,
- int w, int h, uint32_t c) {
- struct DisplayChangeListener *dcl = s->listeners;
-@@ -297,8 +283,6 @@ void text_consoles_set_display(DisplayState *ds);
- void console_select(unsigned int index);
- void console_color_init(DisplayState *ds);
- void qemu_console_resize(DisplayState *ds, int width, int height);
--void qemu_console_copy(DisplayState *ds, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h);
-
- /* sdl.c */
- void sdl_display_init(DisplayState *ds, int full_screen, int no_frame, int opengl_enabled);
-diff --git a/tools/qemu-xen-traditional/hw/cirrus_vga.c b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-index 06b4a3b..4e85b90 100644
---- a/tools/qemu-xen-traditional/hw/cirrus_vga.c
-+++ b/tools/qemu-xen-traditional/hw/cirrus_vga.c
-@@ -793,11 +793,6 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
- }
- }
-
-- /* we have to flush all pending changes so that the copy
-- is generated at the appropriate moment in time */
-- if (notify)
-- vga_hw_update();
--
- (*s->cirrus_rop) (s, s->vram_ptr +
- (s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
- s->vram_ptr +
-@@ -806,13 +801,13 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
- s->cirrus_blt_width, s->cirrus_blt_height);
-
- if (notify)
-- qemu_console_copy(s->ds,
-- sx, sy, dx, dy,
-- s->cirrus_blt_width / depth,
-- s->cirrus_blt_height);
-+ dpy_update(s->ds,
-+ dx, dy,
-+ s->cirrus_blt_width / depth,
-+ s->cirrus_blt_height);
-
- /* we don't have to notify the display that this portion has
-- changed since qemu_console_copy implies this */
-+ changed since dpy_update implies this */
-
- cirrus_invalidate_region(s, s->cirrus_blt_dstaddr,
- s->cirrus_blt_dstpitch, s->cirrus_blt_width,
-diff --git a/tools/qemu-xen-traditional/hw/vmware_vga.c b/tools/qemu-xen-traditional/hw/vmware_vga.c
-index d1cba28..c38e43c 100644
---- a/tools/qemu-xen-traditional/hw/vmware_vga.c
-+++ b/tools/qemu-xen-traditional/hw/vmware_vga.c
-@@ -383,6 +383,7 @@ static inline void vmsvga_copy_rect(struct vmsvga_state_s *s,
-
- # ifdef DIRECT_VRAM
- if (s->ds->dpy_copy)
-+# error This configuration is not supported. See XSA-211.
- qemu_console_copy(s->ds, x0, y0, x1, y1, w, h);
- else
- # endif
-diff --git a/tools/qemu-xen-traditional/vnc.c b/tools/qemu-xen-traditional/vnc.c
-index 61d1555..0e61197 100644
---- a/tools/qemu-xen-traditional/vnc.c
-+++ b/tools/qemu-xen-traditional/vnc.c
-@@ -572,36 +572,6 @@ static void send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
- send_framebuffer_update_raw(vs, x, y, w, h);
- }
-
--static void vnc_copy(DisplayState *ds, int src_x, int src_y, int dst_x, int dst_y, int w, int h)
--{
-- VncState *vs = ds->opaque;
-- int updating_client = 1;
--
-- if (!vs->update_requested ||
-- src_x < vs->visible_x || src_y < vs->visible_y ||
-- dst_x < vs->visible_x || dst_y < vs->visible_y ||
-- (src_x + w) > (vs->visible_x + vs->visible_w) ||
-- (src_y + h) > (vs->visible_y + vs->visible_h) ||
-- (dst_x + w) > (vs->visible_x + vs->visible_w) ||
-- (dst_y + h) > (vs->visible_y + vs->visible_h))
-- updating_client = 0;
--
-- if (updating_client)
-- _vnc_update_client(vs);
--
-- if (updating_client && vs->csock != -1 && !vs->has_update) {
-- vnc_write_u8(vs, 0); /* msg id */
-- vnc_write_u8(vs, 0);
-- vnc_write_u16(vs, 1); /* number of rects */
-- vnc_framebuffer_update(vs, dst_x, dst_y, w, h, 1);
-- vnc_write_u16(vs, src_x);
-- vnc_write_u16(vs, src_y);
-- vnc_flush(vs);
-- vs->update_requested--;
-- } else
-- framebuffer_set_updated(vs, dst_x, dst_y, w, h);
--}
--
- static int find_update_height(VncState *vs, int y, int maxy, int last_x, int x)
- {
- int h;
-@@ -1543,16 +1513,12 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings)
- vs->has_pointer_type_change = 0;
- vs->has_WMVi = 0;
- vs->absolute = -1;
-- dcl->dpy_copy = NULL;
-
- for (i = n_encodings - 1; i >= 0; i--) {
- switch (encodings[i]) {
- case 0: /* Raw */
- vs->has_hextile = 0;
- break;
-- case 1: /* CopyRect */
-- dcl->dpy_copy = vnc_copy;
-- break;
- case 5: /* Hextile */
- vs->has_hextile = 1;
- break;
-@@ -2459,7 +2425,6 @@ static void vnc_listen_read(void *opaque)
- vs->has_resize = 0;
- vs->has_hextile = 0;
- vs->update_requested = 0;
-- dcl->dpy_copy = NULL;
- vnc_timer_init(vs);
- }
- }
---
-2.1.4
-
diff --git a/main/xen/xsa211-qemuu-4.6.patch b/main/xen/xsa211-qemuu-4.6.patch
deleted file mode 100644
index b61cca93e23..00000000000
--- a/main/xen/xsa211-qemuu-4.6.patch
+++ /dev/null
@@ -1,260 +0,0 @@
-From c85f4df08b17f5808eda2b8afea1e4db7016cdc8 Mon Sep 17 00:00:00 2001
-From: Gerd Hoffmann <kraxel@redhat.com>
-Date: Tue, 14 Feb 2017 19:09:59 +0100
-Subject: [PATCH] cirrus/vnc: zap bitblit support from console code.
-
-There is a special code path (dpy_gfx_copy) to allow graphic emulation
-notify user interface code about bitblit operations carryed out by
-guests. It is supported by cirrus and vnc server. The intended purpose
-is to optimize display scrolls and just send over the scroll op instead
-of a full display update.
-
-This is rarely used these days though because modern guests simply don't
-use the cirrus blitter any more. Any linux guest using the cirrus drm
-driver doesn't. Any windows guest newer than winxp doesn't ship with a
-cirrus driver any more and thus uses the cirrus as simple framebuffer.
-
-So this code tends to bitrot and bugs can go unnoticed for a long time.
-See for example commit "3e10c3e vnc: fix qemu crash because of SIGSEGV"
-which fixes a bug lingering in the code for almost a year, added by
-commit "c7628bf vnc: only alloc server surface with clients connected".
-
-Also the vnc server will throttle the frame rate in case it figures the
-network can't keep up (send buffers are full). This doesn't work with
-dpy_gfx_copy, for any copy operation sent to the vnc client we have to
-send all outstanding updates beforehand, otherwise the vnc client might
-run the client side blit on outdated data and thereby corrupt the
-display. So this dpy_gfx_copy "optimization" might even make things
-worse on slow network links.
-
-Lets kill it once for all.
-
-Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
----
- hw/display/cirrus_vga.c | 12 ++-----
- include/ui/console.h | 8 -----
- ui/console.c | 28 ---------------
- ui/vnc.c | 91 -------------------------------------------------
- 4 files changed, 3 insertions(+), 136 deletions(-)
-
-diff --git a/tools/qemu-xen/hw/display/cirrus_vga.c b/tools/qemu-xen/hw/display/cirrus_vga.c
-index d643a0d..2e38c86 100644
---- a/tools/qemu-xen/hw/display/cirrus_vga.c
-+++ b/tools/qemu-xen/hw/display/cirrus_vga.c
-@@ -756,11 +756,6 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
- }
- }
-
-- /* we have to flush all pending changes so that the copy
-- is generated at the appropriate moment in time */
-- if (notify)
-- graphic_hw_update(s->vga.con);
--
- (*s->cirrus_rop) (s, s->vga.vram_ptr +
- (s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
- s->vga.vram_ptr +
-@@ -769,10 +764,9 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
- s->cirrus_blt_width, s->cirrus_blt_height);
-
- if (notify) {
-- qemu_console_copy(s->vga.con,
-- sx, sy, dx, dy,
-- s->cirrus_blt_width / depth,
-- s->cirrus_blt_height);
-+ dpy_gfx_update(s->vga.con, dx, dy,
-+ s->cirrus_blt_width / depth,
-+ s->cirrus_blt_height);
- }
-
- /* we don't have to notify the display that this portion has
-diff --git a/tools/qemu-xen/include/ui/console.h b/tools/qemu-xen/include/ui/console.h
-index 22ef8ca..331c07a 100644
---- a/tools/qemu-xen/include/ui/console.h
-+++ b/tools/qemu-xen/include/ui/console.h
-@@ -158,10 +158,6 @@ typedef struct DisplayChangeListenerOps {
- int x, int y, int w, int h);
- void (*dpy_gfx_switch)(DisplayChangeListener *dcl,
- struct DisplaySurface *new_surface);
-- void (*dpy_gfx_copy)(DisplayChangeListener *dcl,
-- int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h);
--
- void (*dpy_text_cursor)(DisplayChangeListener *dcl,
- int x, int y);
- void (*dpy_text_resize)(DisplayChangeListener *dcl,
-@@ -223,8 +219,6 @@ int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info);
- void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h);
- void dpy_gfx_replace_surface(QemuConsole *con,
- DisplaySurface *surface);
--void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h);
- void dpy_text_cursor(QemuConsole *con, int x, int y);
- void dpy_text_update(QemuConsole *con, int x, int y, int w, int h);
- void dpy_text_resize(QemuConsole *con, int w, int h);
-@@ -315,8 +309,6 @@ void text_consoles_set_display(DisplayState *ds);
- void console_select(unsigned int index);
- void console_color_init(DisplayState *ds);
- void qemu_console_resize(QemuConsole *con, int width, int height);
--void qemu_console_copy(QemuConsole *con, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h);
- DisplaySurface *qemu_console_surface(QemuConsole *con);
- DisplayState *qemu_console_displaystate(QemuConsole *console);
-
-diff --git a/tools/qemu-xen/ui/console.c b/tools/qemu-xen/ui/console.c
-index 258af5d..cc1aa20 100644
---- a/tools/qemu-xen/ui/console.c
-+++ b/tools/qemu-xen/ui/console.c
-@@ -1450,27 +1450,6 @@ static void dpy_refresh(DisplayState *s)
- }
- }
-
--void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h)
--{
-- DisplayState *s = con->ds;
-- DisplayChangeListener *dcl;
--
-- if (!qemu_console_is_visible(con)) {
-- return;
-- }
-- QLIST_FOREACH(dcl, &s->listeners, next) {
-- if (con != (dcl->con ? dcl->con : active_console)) {
-- continue;
-- }
-- if (dcl->ops->dpy_gfx_copy) {
-- dcl->ops->dpy_gfx_copy(dcl, src_x, src_y, dst_x, dst_y, w, h);
-- } else { /* TODO */
-- dcl->ops->dpy_gfx_update(dcl, dst_x, dst_y, w, h);
-- }
-- }
--}
--
- void dpy_text_cursor(QemuConsole *con, int x, int y)
- {
- DisplayState *s = con->ds;
-@@ -1968,13 +1947,6 @@ void qemu_console_resize(QemuConsole *s, int width, int height)
- dpy_gfx_replace_surface(s, surface);
- }
-
--void qemu_console_copy(QemuConsole *con, int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h)
--{
-- assert(con->console_type == GRAPHIC_CONSOLE);
-- dpy_gfx_copy(con, src_x, src_y, dst_x, dst_y, w, h);
--}
--
- DisplaySurface *qemu_console_surface(QemuConsole *console)
- {
- return console->surface;
-diff --git a/tools/qemu-xen/ui/vnc.c b/tools/qemu-xen/ui/vnc.c
-index 76caa897..c3c2625 100644
---- a/tools/qemu-xen/ui/vnc.c
-+++ b/tools/qemu-xen/ui/vnc.c
-@@ -733,96 +733,6 @@ int vnc_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
- return n;
- }
-
--static void vnc_copy(VncState *vs, int src_x, int src_y, int dst_x, int dst_y, int w, int h)
--{
-- /* send bitblit op to the vnc client */
-- vnc_lock_output(vs);
-- vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
-- vnc_write_u8(vs, 0);
-- vnc_write_u16(vs, 1); /* number of rects */
-- vnc_framebuffer_update(vs, dst_x, dst_y, w, h, VNC_ENCODING_COPYRECT);
-- vnc_write_u16(vs, src_x);
-- vnc_write_u16(vs, src_y);
-- vnc_unlock_output(vs);
-- vnc_flush(vs);
--}
--
--static void vnc_dpy_copy(DisplayChangeListener *dcl,
-- int src_x, int src_y,
-- int dst_x, int dst_y, int w, int h)
--{
-- VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
-- VncState *vs, *vn;
-- uint8_t *src_row;
-- uint8_t *dst_row;
-- int i, x, y, pitch, inc, w_lim, s;
-- int cmp_bytes;
--
-- vnc_refresh_server_surface(vd);
-- QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) {
-- if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
-- vs->force_update = 1;
-- vnc_update_client(vs, 1, true);
-- /* vs might be free()ed here */
-- }
-- }
--
-- /* do bitblit op on the local surface too */
-- pitch = vnc_server_fb_stride(vd);
-- src_row = vnc_server_fb_ptr(vd, src_x, src_y);
-- dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y);
-- y = dst_y;
-- inc = 1;
-- if (dst_y > src_y) {
-- /* copy backwards */
-- src_row += pitch * (h-1);
-- dst_row += pitch * (h-1);
-- pitch = -pitch;
-- y = dst_y + h - 1;
-- inc = -1;
-- }
-- w_lim = w - (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT));
-- if (w_lim < 0) {
-- w_lim = w;
-- } else {
-- w_lim = w - (w_lim % VNC_DIRTY_PIXELS_PER_BIT);
-- }
-- for (i = 0; i < h; i++) {
-- for (x = 0; x <= w_lim;
-- x += s, src_row += cmp_bytes, dst_row += cmp_bytes) {
-- if (x == w_lim) {
-- if ((s = w - w_lim) == 0)
-- break;
-- } else if (!x) {
-- s = (VNC_DIRTY_PIXELS_PER_BIT -
-- (dst_x % VNC_DIRTY_PIXELS_PER_BIT));
-- s = MIN(s, w_lim);
-- } else {
-- s = VNC_DIRTY_PIXELS_PER_BIT;
-- }
-- cmp_bytes = s * VNC_SERVER_FB_BYTES;
-- if (memcmp(src_row, dst_row, cmp_bytes) == 0)
-- continue;
-- memmove(dst_row, src_row, cmp_bytes);
-- QTAILQ_FOREACH(vs, &vd->clients, next) {
-- if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
-- set_bit(((x + dst_x) / VNC_DIRTY_PIXELS_PER_BIT),
-- vs->dirty[y]);
-- }
-- }
-- }
-- src_row += pitch - w * VNC_SERVER_FB_BYTES;
-- dst_row += pitch - w * VNC_SERVER_FB_BYTES;
-- y += inc;
-- }
--
-- QTAILQ_FOREACH(vs, &vd->clients, next) {
-- if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
-- vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h);
-- }
-- }
--}
--
- static void vnc_mouse_set(DisplayChangeListener *dcl,
- int x, int y, int visible)
- {
-@@ -2949,7 +2859,6 @@ static void vnc_listen_websocket_read(void *opaque)
- static const DisplayChangeListenerOps dcl_ops = {
- .dpy_name = "vnc",
- .dpy_refresh = vnc_refresh,
-- .dpy_gfx_copy = vnc_dpy_copy,
- .dpy_gfx_update = vnc_dpy_update,
- .dpy_gfx_switch = vnc_dpy_switch,
- .dpy_mouse_set = vnc_mouse_set,
---
-2.1.4
-
diff --git a/main/xen/xsa212.patch b/main/xen/xsa212.patch
deleted file mode 100644
index 2c435c41364..00000000000
--- a/main/xen/xsa212.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-memory: properly check guest memory ranges in XENMEM_exchange handling
-
-The use of guest_handle_okay() here (as introduced by the XSA-29 fix)
-is insufficient here, guest_handle_subrange_okay() needs to be used
-instead.
-
-Note that the uses are okay in
-- XENMEM_add_to_physmap_batch handling due to the size field being only
- 16 bits wide,
-- livepatch_list() due to the limit of 1024 enforced on the
- number-of-entries input (leaving aside the fact that this can be
- called by a privileged domain only anyway),
-- compat mode handling due to counts there being limited to 32 bits,
-- everywhere else due to guest arrays being accessed sequentially from
- index zero.
-
-This is XSA-212.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -436,8 +436,8 @@ static long memory_exchange(XEN_GUEST_HA
- goto fail_early;
- }
-
-- if ( !guest_handle_okay(exch.in.extent_start, exch.in.nr_extents) ||
-- !guest_handle_okay(exch.out.extent_start, exch.out.nr_extents) )
-+ if ( !guest_handle_subrange_okay(exch.in.extent_start, exch.nr_exchanged,
-+ exch.in.nr_extents - 1) )
- {
- rc = -EFAULT;
- goto fail_early;
-@@ -447,11 +447,27 @@ static long memory_exchange(XEN_GUEST_HA
- {
- in_chunk_order = exch.out.extent_order - exch.in.extent_order;
- out_chunk_order = 0;
-+
-+ if ( !guest_handle_subrange_okay(exch.out.extent_start,
-+ exch.nr_exchanged >> in_chunk_order,
-+ exch.out.nr_extents - 1) )
-+ {
-+ rc = -EFAULT;
-+ goto fail_early;
-+ }
- }
- else
- {
- in_chunk_order = 0;
- out_chunk_order = exch.in.extent_order - exch.out.extent_order;
-+
-+ if ( !guest_handle_subrange_okay(exch.out.extent_start,
-+ exch.nr_exchanged << out_chunk_order,
-+ exch.out.nr_extents - 1) )
-+ {
-+ rc = -EFAULT;
-+ goto fail_early;
-+ }
- }
-
- d = rcu_lock_domain_by_any_id(exch.in.domid);
---- a/xen/include/asm-x86/x86_64/uaccess.h
-+++ b/xen/include/asm-x86/x86_64/uaccess.h
-@@ -29,8 +29,9 @@ extern void *xlat_malloc(unsigned long *
- /*
- * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
- * This is also valid for range checks (addr, addr+size). As long as the
-- * start address is outside the Xen-reserved area then we will access a
-- * non-canonical address (and thus fault) before ever reaching VIRT_START.
-+ * start address is outside the Xen-reserved area, sequential accesses
-+ * (starting at addr) will hit a non-canonical address (and thus fault)
-+ * before ever reaching VIRT_START.
- */
- #define __addr_ok(addr) \
- (((unsigned long)(addr) < (1UL<<47)) || \
-@@ -40,7 +41,8 @@ extern void *xlat_malloc(unsigned long *
- (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
-
- #define array_access_ok(addr, count, size) \
-- (access_ok(addr, (count)*(size)))
-+ (likely(((count) ?: 0UL) < (~0UL / (size))) && \
-+ access_ok(addr, (count) * (size)))
-
- #define __compat_addr_ok(d, addr) \
- ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))
diff --git a/main/xen/xsa213-4.6.patch b/main/xen/xsa213-4.6.patch
deleted file mode 100644
index 115bb6310b4..00000000000
--- a/main/xen/xsa213-4.6.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: multicall: deal with early exit conditions
-
-In particular changes to guest privilege level require the multicall
-sequence to be aborted, as hypercalls are permitted from kernel mode
-only. While likely not very useful in a multicall, also properly handle
-the return value in the HYPERVISOR_iret case (which should be the guest
-specified value).
-
-This is XSA-213.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Acked-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/arch/arm/traps.c
-+++ b/xen/arch/arm/traps.c
-@@ -1485,30 +1485,33 @@ static bool_t check_multicall_32bit_clea
- return true;
- }
-
--void do_multicall_call(struct multicall_entry *multi)
-+enum mc_disposition do_multicall_call(struct multicall_entry *multi)
- {
- arm_hypercall_fn_t call = NULL;
-
- if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) )
- {
- multi->result = -ENOSYS;
-- return;
-+ return mc_continue;
- }
-
- call = arm_hypercall_table[multi->op].fn;
- if ( call == NULL )
- {
- multi->result = -ENOSYS;
-- return;
-+ return mc_continue;
- }
-
- if ( is_32bit_domain(current->domain) &&
- !check_multicall_32bit_clean(multi) )
-- return;
-+ return mc_continue;
-
- multi->result = call(multi->args[0], multi->args[1],
- multi->args[2], multi->args[3],
- multi->args[4]);
-+
-+ return likely(!psr_mode_is_user(guest_cpu_user_regs()))
-+ ? mc_continue : mc_preempt;
- }
-
- /*
---- a/xen/common/multicall.c
-+++ b/xen/common/multicall.c
-@@ -40,6 +40,7 @@ do_multicall(
- struct mc_state *mcs = &current->mc_state;
- uint32_t i;
- int rc = 0;
-+ enum mc_disposition disp = mc_continue;
-
- if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) )
- {
-@@ -50,7 +51,7 @@ do_multicall(
- if ( unlikely(!guest_handle_okay(call_list, nr_calls)) )
- rc = -EFAULT;
-
-- for ( i = 0; !rc && i < nr_calls; i++ )
-+ for ( i = 0; !rc && disp == mc_continue && i < nr_calls; i++ )
- {
- if ( i && hypercall_preempt_check() )
- goto preempted;
-@@ -63,7 +64,7 @@ do_multicall(
-
- trace_multicall_call(&mcs->call);
-
-- do_multicall_call(&mcs->call);
-+ disp = do_multicall_call(&mcs->call);
-
- #ifndef NDEBUG
- {
-@@ -77,7 +78,14 @@ do_multicall(
- }
- #endif
-
-- if ( unlikely(__copy_field_to_guest(call_list, &mcs->call, result)) )
-+ if ( unlikely(disp == mc_exit) )
-+ {
-+ if ( __copy_field_to_guest(call_list, &mcs->call, result) )
-+ /* nothing, best effort only */;
-+ rc = mcs->call.result;
-+ }
-+ else if ( unlikely(__copy_field_to_guest(call_list, &mcs->call,
-+ result)) )
- rc = -EFAULT;
- else if ( test_bit(_MCSF_call_preempted, &mcs->flags) )
- {
-@@ -93,6 +101,9 @@ do_multicall(
- guest_handle_add_offset(call_list, 1);
- }
-
-+ if ( unlikely(disp == mc_preempt) && i < nr_calls )
-+ goto preempted;
-+
- perfc_incr(calls_to_multicall);
- perfc_add(calls_from_multicall, i);
- mcs->flags = 0;
---- a/xen/include/asm-arm/multicall.h
-+++ b/xen/include/asm-arm/multicall.h
-@@ -1,7 +1,11 @@
- #ifndef __ASM_ARM_MULTICALL_H__
- #define __ASM_ARM_MULTICALL_H__
-
--extern void do_multicall_call(struct multicall_entry *call);
-+extern enum mc_disposition {
-+ mc_continue,
-+ mc_exit,
-+ mc_preempt,
-+} do_multicall_call(struct multicall_entry *call);
-
- #endif /* __ASM_ARM_MULTICALL_H__ */
- /*
---- a/xen/include/asm-x86/multicall.h
-+++ b/xen/include/asm-x86/multicall.h
-@@ -7,8 +7,21 @@
-
- #include <xen/errno.h>
-
-+enum mc_disposition {
-+ mc_continue,
-+ mc_exit,
-+ mc_preempt,
-+};
-+
-+#define multicall_ret(call) \
-+ (unlikely((call)->op == __HYPERVISOR_iret) \
-+ ? mc_exit \
-+ : likely(guest_kernel_mode(current, \
-+ guest_cpu_user_regs())) \
-+ ? mc_continue : mc_preempt)
-+
- #define do_multicall_call(_call) \
-- do { \
-+ ({ \
- __asm__ __volatile__ ( \
- " movq %c1(%0),%%rax; " \
- " leaq hypercall_table(%%rip),%%rdi; " \
-@@ -37,9 +50,11 @@
- /* all the caller-saves registers */ \
- : "rax", "rcx", "rdx", "rsi", "rdi", \
- "r8", "r9", "r10", "r11" ); \
-- } while ( 0 )
-+ multicall_ret(_call); \
-+ })
-
- #define compat_multicall_call(_call) \
-+ ({ \
- __asm__ __volatile__ ( \
- " movl %c1(%0),%%eax; " \
- " leaq compat_hypercall_table(%%rip),%%rdi; "\
-@@ -67,6 +82,8 @@
- "i" (-ENOSYS) \
- /* all the caller-saves registers */ \
- : "rax", "rcx", "rdx", "rsi", "rdi", \
-- "r8", "r9", "r10", "r11" ) \
-+ "r8", "r9", "r10", "r11" ); \
-+ multicall_ret(_call); \
-+ })
-
- #endif /* __ASM_X86_MULTICALL_H__ */
diff --git a/main/xen/xsa214.patch b/main/xen/xsa214.patch
deleted file mode 100644
index 46a3d3a4c60..00000000000
--- a/main/xen/xsa214.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: discard type information when stealing pages
-
-While a page having just a single general reference left necessarily
-has a zero type reference count too, its type may still be valid (and
-in validated state; at present this is only possible and relevant for
-PGT_seg_desc_page, as page tables have their type forcibly zapped when
-their type reference count drops to zero, and
-PGT_{writable,shared}_page pages don't require any validation). In
-such a case when the page is being re-used with the same type again,
-validation is being skipped. As validation criteria differ between
-32- and 64-bit guests, pages to be transferred between guests need to
-have their validation indicator zapped (and with it we zap all other
-type information at once).
-
-This is XSA-214.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -4466,6 +4466,17 @@ int steal_page(
- y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
- } while ( y != x );
-
-+ /*
-+ * With the sole reference dropped temporarily, no-one can update type
-+ * information. Type count also needs to be zero in this case, but e.g.
-+ * PGT_seg_desc_page may still have PGT_validated set, which we need to
-+ * clear before transferring ownership (as validation criteria vary
-+ * depending on domain type).
-+ */
-+ BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked |
-+ PGT_pinned));
-+ page->u.inuse.type_info = 0;
-+
- /* Swizzle the owner then reinstate the PGC_allocated reference. */
- page_set_owner(page, NULL);
- y = page->count_info;
diff --git a/main/xen/xsa215.patch b/main/xen/xsa215.patch
deleted file mode 100644
index f18a1cd01ff..00000000000
--- a/main/xen/xsa215.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: correct create_bounce_frame
-
-We may push up to 96 bytes on the guest (kernel) stack, so we should
-also cover as much in the early range check. Note that this is the
-simplest possible patch, which has the theoretical potential of
-breaking a guest: We only really push 96 bytes when invoking the
-failsafe callback, ordinary exceptions only have 56 or 64 bytes pushed
-(without / with error code respectively). There is, however, no PV OS
-known to place a kernel stack there.
-
-This is XSA-215.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -347,7 +347,7 @@ int80_slow_path:
- jmp handle_exception_saved
-
- /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
--/* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
-+/* { RCX, R11, [DS-GS,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
- /* %rdx: trap_bounce, %rbx: struct vcpu */
- /* On return only %rbx and %rdx are guaranteed non-clobbered. */
- create_bounce_frame:
-@@ -367,7 +367,7 @@ create_bounce_frame:
- 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
- movq $HYPERVISOR_VIRT_START,%rax
- cmpq %rax,%rsi
-- movq $HYPERVISOR_VIRT_END+60,%rax
-+ movq $HYPERVISOR_VIRT_END+12*8,%rax
- sbb %ecx,%ecx # In +ve address space? Then okay.
- cmpq %rax,%rsi
- adc %ecx,%ecx # Above Xen private area? Then okay.
diff --git a/main/xen/xsa226-4.6.patch b/main/xen/xsa226-4.6.patch
new file mode 100644
index 00000000000..c1892f2076a
--- /dev/null
+++ b/main/xen/xsa226-4.6.patch
@@ -0,0 +1,133 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: grant_table: Default to v1, and disallow transitive grants
+
+The reference counting and locking discipline for transitive grants is broken.
+Their use is therefore declared out of security support.
+
+This is XSA-226.
+
+Transitive grants are expected to be unconditionally available with grant
+table v2. Hiding transitive grants alone is an ABI breakage for the guest.
+Modern versions of Linux and the Windows PV drivers use grant table v1, but
+older versions did use v2.
+
+In principle, disabling gnttab v2 entirely is the safer way to cause guests to
+avoid using transitive grants. However, some older guests which defaulted to
+using gnttab v2 don't tolerate falling back from v2 to v1 over migrate.
+
+This patch introduces a new command line option to control grant table
+behaviour. One suboption allows a choice of the maximum grant table version
+Xen will allow the guest to use, and defaults to v2. A different suboption
+independently controls whether transitive grants can be used.
+
+The default case is:
+
+ gnttab=max_ver:2
+
+To disable gnttab v2 entirely, use:
+
+ gnttab=max_ver:1
+
+To allow gnttab v2 and transitive grants, use:
+
+ gnttab=max_ver:2,transitive
+
+Reported-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
+index d99a20a..113bb29 100644
+--- a/docs/misc/xen-command-line.markdown
++++ b/docs/misc/xen-command-line.markdown
+@@ -733,6 +733,22 @@ Controls EPT related features.
+
+ Specify the serial parameters for the GDB stub.
+
++### gnttab
++> `= List of [ max_ver:<integer>, transitive ]`
++
++> Default: `gnttab=max_ver:2,no-transitive`
++
++Control various aspects of the grant table behaviour available to guests.
++
++* `max_ver` Select the maximum grant table version to offer to guests. Valid
++version are 1 and 2.
++* `transitive` Permit or disallow the use of transitive grants. Note that the
++use of grant table v2 without transitive grants is an ABI breakage from the
++guests point of view.
++
++*Warning:*
++Due to XSA-226, the use of transitive grants is outside of security support.
++
+ ### gnttab\_max\_frames
+ > `= <integer>`
+
+diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
+index 20230fb..98845c4 100644
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -50,6 +50,42 @@ integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
+ unsigned int __read_mostly max_grant_frames;
+ integer_param("gnttab_max_frames", max_grant_frames);
+
++static unsigned int __read_mostly opt_gnttab_max_version = 2;
++static bool_t __read_mostly opt_transitive_grants;
++
++static void __init parse_gnttab(char *s)
++{
++ char *ss;
++
++ do {
++ ss = strchr(s, ',');
++ if ( ss )
++ *ss = '\0';
++
++ if ( !strncmp(s, "max_ver:", 8) )
++ {
++ long ver = simple_strtol(s + 8, NULL, 10);
++
++ if ( ver >= 1 && ver <= 2 )
++ opt_gnttab_max_version = ver;
++ }
++ else
++ {
++ bool_t val = !!strncmp(s, "no-", 3);
++
++ if ( !val )
++ s += 3;
++
++ if ( !strcmp(s, "transitive") )
++ opt_transitive_grants = val;
++ }
++
++ s = ss + 1;
++ } while ( ss );
++}
++
++custom_param("gnttab", parse_gnttab);
++
+ /* The maximum number of grant mappings is defined as a multiplier of the
+ * maximum number of grant table entries. This defines the multiplier used.
+ * Pretty arbitrary. [POLICY]
+@@ -2175,6 +2211,10 @@ __acquire_grant_for_copy(
+ }
+ else if ( (shah->flags & GTF_type_mask) == GTF_transitive )
+ {
++ if ( !opt_transitive_grants )
++ PIN_FAIL(unlock_out_clear, GNTST_general_error,
++ "transitive grant disallowed by policy\n");
++
+ if ( !allow_transitive )
+ PIN_FAIL(unlock_out_clear, GNTST_general_error,
+ "transitive grant when transitivity not allowed\n");
+@@ -3143,7 +3183,10 @@ do_grant_table_op(
+ }
+ case GNTTABOP_set_version:
+ {
+- rc = gnttab_set_version(guest_handle_cast(uop, gnttab_set_version_t));
++ if ( opt_gnttab_max_version == 1 )
++ rc = -ENOSYS; /* Behave as before set_version was introduced. */
++ else
++ rc = gnttab_set_version(guest_handle_cast(uop, gnttab_set_version_t));
+ break;
+ }
+ case GNTTABOP_get_status_frames:
diff --git a/main/xen/xsa227-4.6.patch b/main/xen/xsa227-4.6.patch
new file mode 100644
index 00000000000..3238cd90098
--- /dev/null
+++ b/main/xen/xsa227-4.6.patch
@@ -0,0 +1,66 @@
+From 697edc414352e89f29ca3de744a76c1625c0466c Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Tue, 20 Jun 2017 19:18:54 +0100
+Subject: [PATCH] x86/grant: Disallow misaligned PTEs
+
+Pagetable entries must be aligned to function correctly. Disallow attempts
+from the guest to have a grant PTE created at a misaligned address, which
+would result in corruption of the L1 table with largely-guest-controlled
+values.
+
+This is XSA-227
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+ xen/arch/x86/mm.c | 13 +++++++++++++
+ xen/include/xen/config.h | 2 ++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
+index 213b52a..3bf728b 100644
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -3878,6 +3878,9 @@ static int create_grant_pte_mapping(
+ l1_pgentry_t ol1e;
+ struct domain *d = v->domain;
+
++ if ( !IS_ALIGNED(pte_addr, sizeof(nl1e)) )
++ return GNTST_general_error;
++
+ adjust_guest_l1e(nl1e, d);
+
+ gmfn = pte_addr >> PAGE_SHIFT;
+@@ -3935,6 +3938,16 @@ static int destroy_grant_pte_mapping(
+ struct page_info *page;
+ l1_pgentry_t ol1e;
+
++ /*
++ * addr comes from Xen's active_entry tracking so isn't guest controlled,
++ * but it had still better be PTE-aligned.
++ */
++ if ( !IS_ALIGNED(addr, sizeof(ol1e)) )
++ {
++ ASSERT_UNREACHABLE();
++ return GNTST_general_error;
++ }
++
+ gmfn = addr >> PAGE_SHIFT;
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+diff --git a/xen/include/xen/config.h b/xen/include/xen/config.h
+index f7258c7..ded8156 100644
+--- a/xen/include/xen/config.h
++++ b/xen/include/xen/config.h
+@@ -72,6 +72,8 @@
+ #define MB(_mb) (_AC(_mb, ULL) << 20)
+ #define GB(_gb) (_AC(_gb, ULL) << 30)
+
++#define IS_ALIGNED(val, align) (((val) & ((align) - 1)) == 0)
++
+ #define __STR(...) #__VA_ARGS__
+ #define STR(...) __STR(__VA_ARGS__)
+
+--
+2.1.4
+
diff --git a/main/xen/xsa228-4.8.patch b/main/xen/xsa228-4.8.patch
new file mode 100644
index 00000000000..57e6661cdb3
--- /dev/null
+++ b/main/xen/xsa228-4.8.patch
@@ -0,0 +1,198 @@
+From cb91f4c43bd4158daa6561c73921a6455176f278 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Mon, 31 Jul 2017 15:17:56 +0100
+Subject: [PATCH] gnttab: split maptrack lock to make it fulfill its purpose
+ again
+
+The way the lock is currently being used in get_maptrack_handle(), it
+protects only the maptrack limit: The function acts on current's list
+only, so races on list accesses are impossible even without the lock.
+
+Otoh list access races are possible between __get_maptrack_handle() and
+put_maptrack_handle(), due to the invocation of the former for other
+than current from steal_maptrack_handle(). Introduce a per-vCPU lock
+for list accesses to become race free again. This lock will be
+uncontended except when it becomes necessary to take the steal path,
+i.e. in the common case there should be no meaningful performance
+impact.
+
+When in get_maptrack_handle adds a stolen entry to a fresh, empty,
+freelist, we think that there is probably no concurrency. However,
+this is not a fast path and adding the locking there makes the code
+clearly correct.
+
+Also, while we are here: the stolen maptrack_entry's tail pointer was
+not properly set. Set it.
+
+This is XSA-228.
+
+Reported-by: Ian Jackson <ian.jackson@eu.citrix.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
+---
+ docs/misc/grant-tables.txt | 7 ++++++-
+ xen/common/grant_table.c | 30 ++++++++++++++++++++++++------
+ xen/include/xen/grant_table.h | 2 +-
+ xen/include/xen/sched.h | 1 +
+ 4 files changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/docs/misc/grant-tables.txt b/docs/misc/grant-tables.txt
+index 417ce2d..64da5cf 100644
+--- a/docs/misc/grant-tables.txt
++++ b/docs/misc/grant-tables.txt
+@@ -87,7 +87,8 @@ is complete.
+ inconsistent grant table state such as current
+ version, partially initialized active table pages,
+ etc.
+- grant_table->maptrack_lock : spinlock used to protect the maptrack free list
++ grant_table->maptrack_lock : spinlock used to protect the maptrack limit
++ v->maptrack_freelist_lock : spinlock used to protect the maptrack free list
+ active_grant_entry->lock : spinlock used to serialize modifications to
+ active entries
+
+@@ -102,6 +103,10 @@ is complete.
+ The maptrack free list is protected by its own spinlock. The maptrack
+ lock may be locked while holding the grant table lock.
+
++ The maptrack_freelist_lock is an innermost lock. It may be locked
++ while holding other locks, but no other locks may be acquired within
++ it.
++
+ Active entries are obtained by calling active_entry_acquire(gt, ref).
+ This function returns a pointer to the active entry after locking its
+ spinlock. The caller must hold the grant table read lock before
+diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
+index f9654f1..593121c 100644
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -304,11 +304,16 @@ __get_maptrack_handle(
+ {
+ unsigned int head, next, prev_head;
+
++ spin_lock(&v->maptrack_freelist_lock);
++
+ do {
+ /* No maptrack pages allocated for this VCPU yet? */
+ head = read_atomic(&v->maptrack_head);
+ if ( unlikely(head == MAPTRACK_TAIL) )
++ {
++ spin_unlock(&v->maptrack_freelist_lock);
+ return -1;
++ }
+
+ /*
+ * Always keep one entry in the free list to make it easier to
+@@ -316,12 +321,17 @@ __get_maptrack_handle(
+ */
+ next = read_atomic(&maptrack_entry(t, head).ref);
+ if ( unlikely(next == MAPTRACK_TAIL) )
++ {
++ spin_unlock(&v->maptrack_freelist_lock);
+ return -1;
++ }
+
+ prev_head = head;
+ head = cmpxchg(&v->maptrack_head, prev_head, next);
+ } while ( head != prev_head );
+
++ spin_unlock(&v->maptrack_freelist_lock);
++
+ return head;
+ }
+
+@@ -380,6 +390,8 @@ put_maptrack_handle(
+ /* 2. Add entry to the tail of the list on the original VCPU. */
+ v = currd->vcpu[maptrack_entry(t, handle).vcpu];
+
++ spin_lock(&v->maptrack_freelist_lock);
++
+ cur_tail = read_atomic(&v->maptrack_tail);
+ do {
+ prev_tail = cur_tail;
+@@ -388,6 +400,8 @@ put_maptrack_handle(
+
+ /* 3. Update the old tail entry to point to the new entry. */
+ write_atomic(&maptrack_entry(t, prev_tail).ref, handle);
++
++ spin_unlock(&v->maptrack_freelist_lock);
+ }
+
+ static inline int
+@@ -411,10 +425,6 @@ get_maptrack_handle(
+ */
+ if ( nr_maptrack_frames(lgt) >= max_maptrack_frames )
+ {
+- /*
+- * Can drop the lock since no other VCPU can be adding a new
+- * frame once they've run out.
+- */
+ spin_unlock(&lgt->maptrack_lock);
+
+ /*
+@@ -426,8 +436,12 @@ get_maptrack_handle(
+ handle = steal_maptrack_handle(lgt, curr);
+ if ( handle == -1 )
+ return -1;
++ spin_lock(&curr->maptrack_freelist_lock);
++ maptrack_entry(lgt, handle).ref = MAPTRACK_TAIL;
+ curr->maptrack_tail = handle;
+- write_atomic(&curr->maptrack_head, handle);
++ if ( curr->maptrack_head == MAPTRACK_TAIL )
++ write_atomic(&curr->maptrack_head, handle);
++ spin_unlock(&curr->maptrack_freelist_lock);
+ }
+ return steal_maptrack_handle(lgt, curr);
+ }
+@@ -460,12 +474,15 @@ get_maptrack_handle(
+ smp_wmb();
+ lgt->maptrack_limit += MAPTRACK_PER_PAGE;
+
++ spin_unlock(&lgt->maptrack_lock);
++ spin_lock(&curr->maptrack_freelist_lock);
++
+ do {
+ new_mt[i - 1].ref = read_atomic(&curr->maptrack_head);
+ head = cmpxchg(&curr->maptrack_head, new_mt[i - 1].ref, handle + 1);
+ } while ( head != new_mt[i - 1].ref );
+
+- spin_unlock(&lgt->maptrack_lock);
++ spin_unlock(&curr->maptrack_freelist_lock);
+
+ return handle;
+ }
+@@ -3474,6 +3491,7 @@ grant_table_destroy(
+
+ void grant_table_init_vcpu(struct vcpu *v)
+ {
++ spin_lock_init(&v->maptrack_freelist_lock);
+ v->maptrack_head = MAPTRACK_TAIL;
+ v->maptrack_tail = MAPTRACK_TAIL;
+ }
+diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
+index 4e77899..100f2b3 100644
+--- a/xen/include/xen/grant_table.h
++++ b/xen/include/xen/grant_table.h
+@@ -78,7 +78,7 @@ struct grant_table {
+ /* Mapping tracking table per vcpu. */
+ struct grant_mapping **maptrack;
+ unsigned int maptrack_limit;
+- /* Lock protecting the maptrack page list, head, and limit */
++ /* Lock protecting the maptrack limit */
+ spinlock_t maptrack_lock;
+ /* The defined versions are 1 and 2. Set to 0 if we don't know
+ what version to use yet. */
+diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
+index 1fbda87..ff0f38f 100644
+--- a/xen/include/xen/sched.h
++++ b/xen/include/xen/sched.h
+@@ -223,6 +223,7 @@ struct vcpu
+ int controller_pause_count;
+
+ /* Maptrack */
++ spinlock_t maptrack_freelist_lock;
+ unsigned int maptrack_head;
+ unsigned int maptrack_tail;
+
+--
+2.1.4
+