aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenrik Riomar <henrik.riomar@gmail.com>2019-05-15 21:08:48 +0200
committerLeonardo Arena <rnalrd@alpinelinux.org>2019-05-17 11:53:14 +0000
commit1d0fe0196f9102c4c9edf2965deb91b142688924 (patch)
tree099db8846e2be183935ce05e3c0ed5e3b4e0b901
parent58b26b850f7aa6ed54f78bbb7af1f0b651744f50 (diff)
main/xen: XSA-297
CVE-2018-12126 CVE-2018-12127 CVE-2018-12130 CVE-2019-11091 Also includes needed patches from the xen stable-4.11 branch Signed-off-by: Leonardo Arena <rnalrd@alpinelinux.org>
-rw-r--r--main/xen/APKBUILD34
-rw-r--r--main/xen/x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch71
-rw-r--r--main/xen/x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch68
-rw-r--r--main/xen/x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch194
-rw-r--r--main/xen/xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch34
-rw-r--r--main/xen/xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch52
-rw-r--r--main/xen/xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch464
-rw-r--r--main/xen/xsa297-4.11-1.patch163
-rw-r--r--main/xen/xsa297-4.11-2.patch54
-rw-r--r--main/xen/xsa297-4.11-3.patch109
-rw-r--r--main/xen/xsa297-4.11-4.patch55
-rw-r--r--main/xen/xsa297-4.11-5.patch141
-rw-r--r--main/xen/xsa297-4.11-6.patch134
-rw-r--r--main/xen/xsa297-4.11-7.patch316
14 files changed, 1888 insertions, 1 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 249416f685c..5e014fa2e61 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.11.1
-pkgrel=1
+pkgrel=2
pkgdesc="Xen hypervisor"
url="https://www.xenproject.org/"
arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8
@@ -139,6 +139,11 @@ options="!strip"
# - CVE-2018-19965 XSA-279
# - CVE-2018-19966 XSA-280
# - CVE-2018-19967 XSA-282
+# 4.11.1-r2:
+# - CVE-2018-12126 XSA-297
+# - CVE-2018-12127 XSA-297
+# - CVE-2018-12130 XSA-297
+# - CVE-2019-11091 XSA-297
case "$CARCH" in
x86*)
@@ -206,6 +211,20 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
hotplug-Linux-iscsi-block-handle-lun-1.patch
+ x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch
+ x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch
+ xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch
+ xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch
+ x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch
+ xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch
+ xsa297-4.11-1.patch
+ xsa297-4.11-2.patch
+ xsa297-4.11-3.patch
+ xsa297-4.11-4.patch
+ xsa297-4.11-5.patch
+ xsa297-4.11-6.patch
+ xsa297-4.11-7.patch
+
xenstored.initd
xenstored.confd
xenconsoled.initd
@@ -462,6 +481,19 @@ e76816c6ad0e91dc5f81947f266da3429b20e6d976c3e8c41202c6179532eec878a3f0913921ef3a
69dfa60628ca838678862383528654ecbdf4269cbb5c9cfb6b84d976202a8dea85d711aa65a52fa1b477fb0b30604ca70cf1337192d6fb9388a08bbe7fe56077 xenstore_client_transaction_fix.patch
2094ea964fa610b2bf72fd2c7ede7e954899a75c0f5b08030cf1d74460fb759ade84866176e32f8fe29c921dfdc6dafd2b31e23ab9b0a3874d3dceeabdd1913b xenqemu-xattr-size-max.patch
8c9cfc6afca325df1d8026e21ed03fa8cd2c7e1a21a56cc1968301c5ab634bfe849951899e75d328951d7a41273d1e49a2448edbadec0029ed410c43c0549812 hotplug-Linux-iscsi-block-handle-lun-1.patch
+a74683f3f69b27ad19e6d50f829e5c4bc0cfdc572a5c02c655a2dff7f11aa835b348ed293a35a48c6d93310d40cb0ba9dcfd63289df89684a448fed100abdc1d x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch
+79f4fa555c3c575d2fc93d8421bb4dcb26a0eecf644193498b351dc145027293eb7bab34064ae5b2903f95b0651997d3c1ce63e22e91ae341b4ab9c87340a29a x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch
+404b67fee48aea9d93152a911c8dd59b1aef9085780b9d848acac09ea70a009a57a888aa3fe2c26ce5f176731336c5eb0faf4e42255b1d64ef53ce9af82cde9a xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch
+a99890aebf1dacf0687bec794c1e4230be73ff7b9a44a83f0d761b2f5cbfb370b954ad6c32f54c029f9671e03c81660dbd0ca0a10b176240ad650a3ba68cae81 xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch
+7809eb2b777684a8adc6866def8c3c6f6c835ff8a3161ae321a52e803525c1122f1253bf1522ec057f3f7f817335e7e2da242dc6884a6811dac40c4fed00abb1 x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch
+3b34c063565b70f4aa84b2bd3ef450d549cd54e623d20b793a080e439e86622d50287583612a78f0f4dd7b2ba578011fd414364ea23e9ccf7af2eb22a62933e6 xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch
+3d6d10039ffa87b47755b4fed8ba055078a5d0cd2681edf96dfd48683d62fb4bfd6634d8b204396ee6dfe4399558b8c1824a341c53b5f810bc1db916794edda2 xsa297-4.11-1.patch
+4d09e53dd839a46ee9a50cedc4c7ed6e308e7a6b579d686f9ce44b774cdbfcc0f914b4eda62c2ba5c795d1e31e8566ee250efad141d5a02b9c2e87bbf26b4365 xsa297-4.11-2.patch
+1b6b02823a4c2328390a290a0af5bf0642b8d89d8491ca8ce1df968b36aad76e11a0af5243ce13d8ae4ee40e97d93e7ea515c655c9bdca85c77dcaeb90a27b43 xsa297-4.11-3.patch
+de773e10d1c9fa223f89c030a2095cf476852c5339329a972780908d916a25c3687e0ac6eaf491f14d93679589c9c7d4ba287855d27fef2a82cca5313e0a9597 xsa297-4.11-4.patch
+0106761bbeaa60d556552677bd8e2a08ca1b06cc672d74ebf25eec063f97ee200c6edfc84bab154b6f156b8a06688525aaa6900cc9a8ab8b71bc65a50c052654 xsa297-4.11-5.patch
+d0a68c1f7be1a258fd147e7644a2cdfa8be576f5b81e1dbf4cd9f453152336e778e30969905300bd32d6cde65ad939069f2b4ee3e0ad1da3d3d29e7fd90682c5 xsa297-4.11-6.patch
+52ae2654bfa8864e5631b7a47abb552af388d88a46f307bf2c257f3189e25edb9699454ba1b3af22cf950fbe0252531dff470751ccec04a4b2c492cc3d2c344b xsa297-4.11-7.patch
52c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
3c86ed48fbee0af4051c65c4a3893f131fa66e47bf083caf20c9b6aa4b63fdead8832f84a58d0e27964bc49ec8397251b34e5be5c212c139f556916dc8da9523 xenconsoled.initd
diff --git a/main/xen/x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch b/main/xen/x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch
new file mode 100644
index 00000000000..14a92178bcc
--- /dev/null
+++ b/main/xen/x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch
@@ -0,0 +1,71 @@
+From 0825fbdd62724577febeff11ae50d440992a8f11 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:55:10 +0200
+Subject: [PATCH 1/2] x86/msr: Shorten ARCH_CAPABILITIES_* constants
+
+They are unnecesserily verbose, and ARCH_CAPS_* is already the more common
+version.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+master commit: ba27aaa88548c824a47dcf5609288ee1c05d2946
+master date: 2019-03-18 16:26:40 +0000
+---
+ xen/arch/x86/spec_ctrl.c | 10 +++++-----
+ xen/include/asm-x86/msr-index.h | 4 ++--
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index e641894f17..27b1158d84 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -286,8 +286,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_SSBD)) ? " SSBD" : "",
+ (e8b & cpufeat_mask(X86_FEATURE_IBPB)) ? " IBPB" : "",
+- (caps & ARCH_CAPABILITIES_IBRS_ALL) ? " IBRS_ALL" : "",
+- (caps & ARCH_CAPABILITIES_RDCL_NO) ? " RDCL_NO" : "",
++ (caps & ARCH_CAPS_IBRS_ALL) ? " IBRS_ALL" : "",
++ (caps & ARCH_CAPS_RDCL_NO) ? " RDCL_NO" : "",
+ (caps & ARCH_CAPS_RSBA) ? " RSBA" : "",
+ (caps & ARCH_CAPS_SKIP_L1DFL) ? " SKIP_L1DFL": "",
+ (caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : "");
+@@ -598,7 +598,7 @@ static __init void l1tf_calculations(uint64_t caps)
+ }
+
+ /* Any processor advertising RDCL_NO should be not vulnerable to L1TF. */
+- if ( caps & ARCH_CAPABILITIES_RDCL_NO )
++ if ( caps & ARCH_CAPS_RDCL_NO )
+ cpu_has_bug_l1tf = false;
+
+ if ( cpu_has_bug_l1tf && hit_default )
+@@ -662,9 +662,9 @@ int8_t __read_mostly opt_xpti_domu = -1;
+ static __init void xpti_init_default(uint64_t caps)
+ {
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+- caps = ARCH_CAPABILITIES_RDCL_NO;
++ caps = ARCH_CAPS_RDCL_NO;
+
+- if ( caps & ARCH_CAPABILITIES_RDCL_NO )
++ if ( caps & ARCH_CAPS_RDCL_NO )
+ {
+ if ( opt_xpti_hwdom < 0 )
+ opt_xpti_hwdom = 0;
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index d13308ffe0..7588fc1567 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -44,8 +44,8 @@
+ #define PRED_CMD_IBPB (_AC(1, ULL) << 0)
+
+ #define MSR_ARCH_CAPABILITIES 0x0000010a
+-#define ARCH_CAPABILITIES_RDCL_NO (_AC(1, ULL) << 0)
+-#define ARCH_CAPABILITIES_IBRS_ALL (_AC(1, ULL) << 1)
++#define ARCH_CAPS_RDCL_NO (_AC(1, ULL) << 0)
++#define ARCH_CAPS_IBRS_ALL (_AC(1, ULL) << 1)
+ #define ARCH_CAPS_RSBA (_AC(1, ULL) << 2)
+ #define ARCH_CAPS_SKIP_L1DFL (_AC(1, ULL) << 3)
+ #define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4)
+--
+2.20.1
+
diff --git a/main/xen/x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch b/main/xen/x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch
new file mode 100644
index 00000000000..e311f8cec6a
--- /dev/null
+++ b/main/xen/x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch
@@ -0,0 +1,68 @@
+From 3b062f5040a103d86b44c5e8412ff9555b00d06c Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:55:55 +0200
+Subject: [PATCH 2/2] x86/spec-ctrl: Extend repoline safey calcuations for
+ eIBRS and Atom parts
+
+All currently-released Atom processors are in practice retpoline-safe, because
+they don't fall back to a BTB prediction on RSB underflow.
+
+However, an additional meaning of Enhanced IRBS is that the processor may not
+be retpoline-safe. The Gemini Lake platform, based on the Goldmont Plus
+microarchitecture is the first Atom processor to support eIBRS.
+
+Until Xen gets full eIBRS support, Gemini Lake will still be safe using
+regular IBRS.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+master commit: 17f74242ccf0ce6e51c03a5860947865c0ef0dc2
+master date: 2019-03-18 16:26:40 +0000
+---
+ xen/arch/x86/spec_ctrl.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 27b1158d84..8fa6c10528 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -365,8 +365,11 @@ static bool __init retpoline_safe(uint64_t caps)
+ /*
+ * RSBA may be set by a hypervisor to indicate that we may move to a
+ * processor which isn't retpoline-safe.
++ *
++ * Processors offering Enhanced IBRS are not guarenteed to be
++ * repoline-safe.
+ */
+- if ( caps & ARCH_CAPS_RSBA )
++ if ( caps & (ARCH_CAPS_RSBA | ARCH_CAPS_IBRS_ALL) )
+ return false;
+
+ switch ( boot_cpu_data.x86_model )
+@@ -426,6 +429,23 @@ static bool __init retpoline_safe(uint64_t caps)
+ case 0x9e:
+ return false;
+
++ /*
++ * Atom processors before Goldmont Plus/Gemini Lake are retpoline-safe.
++ */
++ case 0x1c: /* Pineview */
++ case 0x26: /* Lincroft */
++ case 0x27: /* Penwell */
++ case 0x35: /* Cloverview */
++ case 0x36: /* Cedarview */
++ case 0x37: /* Baytrail / Valleyview (Silvermont) */
++ case 0x4d: /* Avaton / Rangely (Silvermont) */
++ case 0x4c: /* Cherrytrail / Brasswell */
++ case 0x4a: /* Merrifield */
++ case 0x5a: /* Moorefield */
++ case 0x5c: /* Goldmont */
++ case 0x5f: /* Denverton */
++ return true;
++
+ default:
+ printk("Unrecognised CPU model %#x - assuming not reptpoline safe\n",
+ boot_cpu_data.x86_model);
+--
+2.20.1
+
diff --git a/main/xen/x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch b/main/xen/x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch
new file mode 100644
index 00000000000..880fc54f3c4
--- /dev/null
+++ b/main/xen/x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch
@@ -0,0 +1,194 @@
+From fc46e159a6b1c379a0dac918120267b606d4defd Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Mon, 18 Mar 2019 17:08:25 +0100
+Subject: [PATCH] x86/tsx: Implement controls for RTM force-abort mode
+
+The CPUID bit and MSR are deliberately not exposed to guests, because they
+won't exist on newer processors. As vPMU isn't security supported, the
+misbehaviour of PCR3 isn't expected to impact production deployments.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+master commit: 6be613f29b4205349275d24367bd4c82fb2960dd
+master date: 2019-03-12 17:05:21 +0000
+---
+ docs/misc/xen-command-line.markdown | 17 ++++++++++++++++-
+ tools/misc/xen-cpuid.c | 2 ++
+ xen/arch/x86/cpu/intel.c | 3 +++
+ xen/arch/x86/cpu/vpmu.c | 5 +++++
+ xen/arch/x86/msr.c | 4 ++++
+ xen/include/asm-x86/cpufeature.h | 3 +++
+ xen/include/asm-x86/msr-index.h | 3 +++
+ xen/include/asm-x86/vpmu.h | 1 +
+ xen/include/public/arch-x86/cpufeatureset.h | 1 +
+ 9 files changed, 38 insertions(+), 1 deletion(-)
+
+diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
+index 8046cc8333..8e243808a1 100644
+--- a/docs/misc/xen-command-line.markdown
++++ b/docs/misc/xen-command-line.markdown
+@@ -2008,7 +2008,7 @@ Use Virtual Processor ID support if available. This prevents the need for TLB
+ flushes on VM entry and exit, increasing performance.
+
+ ### vpmu (x86)
+-> `= ( <boolean> | { bts | ipc | arch [, ...] } )`
++> `= ( <boolean> | { bts | ipc | arch | rtm-abort=<bool> [, ...] } )`
+
+ > Default: `off`
+
+@@ -2034,6 +2034,21 @@ in the Pre-Defined Architectural Performance Events table from the Intel 64
+ and IA-32 Architectures Software Developer's Manual, Volume 3B, System
+ Programming Guide, Part 2.
+
++vpmu=rtm-abort controls a trade-off between working Restricted Transactional
++Memory, and working performance counters.
++
++All processors released to date (Q1 2019) supporting Transactional Memory
++Extensions suffer an erratum which has been addressed in microcode.
++
++Processors based on the Skylake microarchitecture with up-to-date
++microcode internally use performance counter 3 to work around the erratum.
++A consequence is that the counter gets reprogrammed whenever an `XBEGIN`
++instruction is executed.
++
++An alternative mode exists where PCR3 behaves as before, at the cost of
++`XBEGIN` unconditionally aborting. Enabling `rtm-abort` mode will
++activate this alternative mode.
++
+ If a boolean is not used, combinations of flags are allowed, comma separated.
+ For example, vpmu=arch,bts.
+
+diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
+index 3888b4e158..0ac903a931 100644
+--- a/tools/misc/xen-cpuid.c
++++ b/tools/misc/xen-cpuid.c
+@@ -142,6 +142,8 @@ static const char *str_7d0[32] =
+ {
+ [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
+
++ /* 12 */ [13] = "tsx-force-abort",
++
+ [26] = "ibrsb", [27] = "stibp",
+ [28] = "l1d_flush", [29] = "arch_caps",
+ /* 30 */ [31] = "ssbd",
+diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
+index 9477965321..8e23ed6379 100644
+--- a/xen/arch/x86/cpu/intel.c
++++ b/xen/arch/x86/cpu/intel.c
+@@ -287,6 +287,9 @@ static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ __set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
++
++ if (cpu_has_tsx_force_abort && opt_rtm_abort)
++ wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
+ }
+
+
+diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
+index 2be61606b4..639ae0ca63 100644
+--- a/xen/arch/x86/cpu/vpmu.c
++++ b/xen/arch/x86/cpu/vpmu.c
+@@ -53,6 +53,7 @@ CHECK_pmu_params;
+ static unsigned int __read_mostly opt_vpmu_enabled;
+ unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF;
+ unsigned int __read_mostly vpmu_features = 0;
++bool __read_mostly opt_rtm_abort;
+ static int parse_vpmu_params(const char *s);
+ custom_param("vpmu", parse_vpmu_params);
+
+@@ -71,6 +72,8 @@ static int __init parse_vpmu_params(const char *s)
+ break;
+ default:
+ do {
++ int val;
++
+ ss = strchr(s, ',');
+ if ( !ss )
+ ss = strchr(s, '\0');
+@@ -81,6 +84,8 @@ static int __init parse_vpmu_params(const char *s)
+ vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
+ else if ( !cmdline_strcmp(s, "arch") )
+ vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
++ else if ( (val = parse_boolean("rtm-abort", s, ss)) >= 0 )
++ opt_rtm_abort = val;
+ else
+ return -EINVAL;
+
+diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
+index 1a591dd2b5..b49fbd8077 100644
+--- a/xen/arch/x86/msr.c
++++ b/xen/arch/x86/msr.c
+@@ -152,6 +152,8 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+ case MSR_PRED_CMD:
+ case MSR_FLUSH_CMD:
+ /* Write-only */
++ case MSR_TSX_FORCE_ABORT:
++ /* Not offered to guests. */
+ goto gp_fault;
+
+ case MSR_SPEC_CTRL:
+@@ -203,6 +205,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+ case MSR_INTEL_PLATFORM_INFO:
+ case MSR_ARCH_CAPABILITIES:
+ /* Read-only */
++ case MSR_TSX_FORCE_ABORT:
++ /* Not offered to guests. */
+ goto gp_fault;
+
+ case MSR_AMD_PATCHLOADER:
+diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
+index 861cb0af93..1c699a8def 100644
+--- a/xen/include/asm-x86/cpufeature.h
++++ b/xen/include/asm-x86/cpufeature.h
+@@ -106,6 +106,9 @@
+ /* CPUID level 0x80000007.edx */
+ #define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
+
++/* CPUID level 0x00000007:0.edx */
++#define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)
++
+ /* Synthesized. */
+ #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+ #define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 7235623c86..d13308ffe0 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -53,6 +53,9 @@
+ #define MSR_FLUSH_CMD 0x0000010b
+ #define FLUSH_CMD_L1D (_AC(1, ULL) << 0)
+
++#define MSR_TSX_FORCE_ABORT 0x0000010f
++#define TSX_FORCE_ABORT_RTM (_AC(1, ULL) << 0)
++
+ /* Intel MSRs. Some also available on other CPUs */
+ #define MSR_IA32_PERFCTR0 0x000000c1
+ #define MSR_IA32_A_PERFCTR0 0x000004c1
+diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
+index 5e778ab7ba..1287b9fb6e 100644
+--- a/xen/include/asm-x86/vpmu.h
++++ b/xen/include/asm-x86/vpmu.h
+@@ -125,6 +125,7 @@ static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+
+ extern unsigned int vpmu_mode;
+ extern unsigned int vpmu_features;
++extern bool opt_rtm_abort;
+
+ /* Context switch */
+ static inline void vpmu_switch_from(struct vcpu *prev)
+diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
+index 6c82816fd3..aa2656d792 100644
+--- a/xen/include/public/arch-x86/cpufeatureset.h
++++ b/xen/include/public/arch-x86/cpufeatureset.h
+@@ -242,6 +242,7 @@ XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by
+ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
+ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */
+ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
++XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
+ XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
+ XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */
+ XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /*S MSR_FLUSH_CMD and L1D flush. */
+--
+2.20.1
+
diff --git a/main/xen/xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch b/main/xen/xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch
new file mode 100644
index 00000000000..4b50d3c4692
--- /dev/null
+++ b/main/xen/xen-Fix-backport-of-x86-tsx-Implement-controls-for-R.patch
@@ -0,0 +1,34 @@
+From 0ebfc81c091be150343d6ce948841a1e6cf89361 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:51:31 +0200
+Subject: [PATCH] xen: Fix backport of "x86/tsx: Implement controls for RTM
+ force-abort mode"
+
+The posted version of this patch depends on c/s 3c555295 "x86/vpmu: Improve
+documentation and parsing for vpmu=" (Xen 4.12 and later) to prevent
+`vpmu=rtm-abort` impliying `vpmu=1`, which is outside of security support.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+ xen/arch/x86/cpu/vpmu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
+index 639ae0ca63..9a42e09a11 100644
+--- a/xen/arch/x86/cpu/vpmu.c
++++ b/xen/arch/x86/cpu/vpmu.c
+@@ -91,6 +91,10 @@ static int __init parse_vpmu_params(const char *s)
+
+ s = ss + 1;
+ } while ( *ss );
++
++ if ( !vpmu_features ) /* rtm-abort doesn't imply vpmu=1 */
++ break;
++
+ /* fall through */
+ case 1:
+ /* Default VPMU mode */
+--
+2.20.1
+
diff --git a/main/xen/xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch b/main/xen/xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch
new file mode 100644
index 00000000000..4a822e62272
--- /dev/null
+++ b/main/xen/xen-Fix-backport-of-xen-cmdline-Fix-buggy-strncmp-s-.patch
@@ -0,0 +1,52 @@
+From eb8acba82ad00cf3499de8dbd52cdc813b2d64e2 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:52:32 +0200
+Subject: [PATCH] xen: Fix backport of "xen/cmdline: Fix buggy strncmp(s,
+ LITERAL, ss - s) construct"
+
+These were missed as a consequence of being rebased over other cmdline
+cleanup.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+ xen/arch/x86/dom0_build.c | 4 ++--
+ xen/arch/x86/hvm/vmx/vmcs.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
+index b744791c38..dbd7d0d393 100644
+--- a/xen/arch/x86/dom0_build.c
++++ b/xen/arch/x86/dom0_build.c
+@@ -237,10 +237,10 @@ static int __init parse_dom0_param(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( !strncmp(s, "pvh", ss - s) )
++ if ( !cmdline_strcmp(s, "pvh") )
+ dom0_pvh = true;
+ #ifdef CONFIG_SHADOW_PAGING
+- else if ( !strncmp(s, "shadow", ss - s) )
++ else if ( !cmdline_strcmp(s, "shadow") )
+ opt_dom0_shadow = true;
+ #endif
+ else
+diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
+index 49c916b82d..19755f2716 100644
+--- a/xen/arch/x86/hvm/vmx/vmcs.c
++++ b/xen/arch/x86/hvm/vmx/vmcs.c
+@@ -90,9 +90,9 @@ static int __init parse_ept_param(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( !strncmp(s, "pml", ss - s) )
++ if ( !cmdline_strcmp(s, "pml") )
+ opt_pml_enabled = val;
+- else if ( !strncmp(s, "ad", ss - s) )
++ else if ( !cmdline_strcmp(s, "ad") )
+ opt_ept_ad = val;
+ else
+ rc = -EINVAL;
+--
+2.20.1
+
diff --git a/main/xen/xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch b/main/xen/xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch
new file mode 100644
index 00000000000..bbcc3a299f3
--- /dev/null
+++ b/main/xen/xen-cmdline-Fix-buggy-strncmp-s-LITERAL-ss-s-constru.patch
@@ -0,0 +1,464 @@
+From e202feb7131e66ed9186ad8766c9582502c98998 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 1 Feb 2019 11:34:35 +0100
+Subject: [PATCH] xen/cmdline: Fix buggy strncmp(s, LITERAL, ss - s) construct
+
+When the command line parsing was updated to use const strings and no longer
+tokenise with NUL characters, string matches could no longer be made with
+strcmp().
+
+Unfortunately, the replacement was buggy. strncmp(s, "opt", ss - s) matches
+"o", "op" and "opt" on the command line, as ss - s may be shorter than the
+passed literal. Furthermore, parse_bool() is affected by this, so substrings
+such as "d", "e" and "o" are considered valid, with the latter being ambiguous
+between "on" and "off".
+
+Introduce a new strcmp-like function for the task, which looks for exact
+string matches, but declares success when the NUL of the literal matches a
+comma, colon or semicolon in the command line fragment.
+
+No change to the intended parsing functionality, but fixes cases where a
+partial string on the command line will inadvertently trigger options.
+
+A few areas were more than just a trivial change:
+
+ * parse_irq_vector_map_param() gained some style corrections.
+ * parse_vpmu_params() was rewritten to use the normal list-of-options form,
+ rather than just fixing up parse_vpmu_param() and leaving the parsing being
+ hard to follow.
+ * Instead of making the trivial fix of adding an explicit length check in
+ parse_bool(), use the length to select which token to we search for, which
+ is more efficient than the previous linear search over all possible tokens.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Julien Grall <julien.grall@arm.com>
+master commit: 2ddf7e3e341df3ccf21613ff7ffd4b7693abe9e9
+master date: 2019-01-15 12:58:34 +0000
+---
+ xen/arch/x86/cpu/vpmu.c | 49 ++++++-----------
+ xen/arch/x86/irq.c | 12 ++--
+ xen/arch/x86/psr.c | 4 +-
+ xen/arch/x86/spec_ctrl.c | 12 ++--
+ xen/arch/x86/x86_64/mmconfig-shared.c | 4 +-
+ xen/common/efi/boot.c | 4 +-
+ xen/common/kernel.c | 79 +++++++++++++++++++++------
+ xen/drivers/cpufreq/cpufreq.c | 6 +-
+ xen/drivers/passthrough/iommu.c | 28 +++++-----
+ xen/drivers/passthrough/pci.c | 4 +-
+ xen/include/xen/lib.h | 7 +++
+ 11 files changed, 124 insertions(+), 85 deletions(-)
+
+diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
+index b978e05613..2be61606b4 100644
+--- a/xen/arch/x86/cpu/vpmu.c
++++ b/xen/arch/x86/cpu/vpmu.c
+@@ -61,42 +61,31 @@ static unsigned vpmu_count;
+
+ static DEFINE_PER_CPU(struct vcpu *, last_vcpu);
+
+-static int parse_vpmu_param(const char *s, unsigned int len)
+-{
+- if ( !*s || !len )
+- return 0;
+- if ( !strncmp(s, "bts", len) )
+- vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
+- else if ( !strncmp(s, "ipc", len) )
+- vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
+- else if ( !strncmp(s, "arch", len) )
+- vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
+- else
+- return 1;
+- return 0;
+-}
+-
+ static int __init parse_vpmu_params(const char *s)
+ {
+- const char *sep, *p = s;
++ const char *ss;
+
+ switch ( parse_bool(s, NULL) )
+ {
+ case 0:
+ break;
+ default:
+- for ( ; ; )
+- {
+- sep = strchr(p, ',');
+- if ( sep == NULL )
+- sep = strchr(p, 0);
+- if ( parse_vpmu_param(p, sep - p) )
+- goto error;
+- if ( !*sep )
+- /* reached end of flags */
+- break;
+- p = sep + 1;
+- }
++ do {
++ ss = strchr(s, ',');
++ if ( !ss )
++ ss = strchr(s, '\0');
++
++ if ( !cmdline_strcmp(s, "bts") )
++ vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
++ else if ( !cmdline_strcmp(s, "ipc") )
++ vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
++ else if ( !cmdline_strcmp(s, "arch") )
++ vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
++ else
++ return -EINVAL;
++
++ s = ss + 1;
++ } while ( *ss );
+ /* fall through */
+ case 1:
+ /* Default VPMU mode */
+@@ -105,10 +94,6 @@ static int __init parse_vpmu_params(const char *s)
+ break;
+ }
+ return 0;
+-
+- error:
+- printk("VPMU: unknown flags: %s - vpmu disabled!\n", s);
+- return -EINVAL;
+ }
+
+ void vpmu_lvtpc_update(uint32_t val)
+diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
+index 87ef2e801f..0ceb9b9a1c 100644
+--- a/xen/arch/x86/irq.c
++++ b/xen/arch/x86/irq.c
+@@ -70,12 +70,12 @@ static int __init parse_irq_vector_map_param(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( !strncmp(s, "none", ss - s))
+- opt_irq_vector_map=OPT_IRQ_VECTOR_MAP_NONE;
+- else if ( !strncmp(s, "global", ss - s))
+- opt_irq_vector_map=OPT_IRQ_VECTOR_MAP_GLOBAL;
+- else if ( !strncmp(s, "per-device", ss - s))
+- opt_irq_vector_map=OPT_IRQ_VECTOR_MAP_PERDEV;
++ if ( !cmdline_strcmp(s, "none") )
++ opt_irq_vector_map = OPT_IRQ_VECTOR_MAP_NONE;
++ else if ( !cmdline_strcmp(s, "global") )
++ opt_irq_vector_map = OPT_IRQ_VECTOR_MAP_GLOBAL;
++ else if ( !cmdline_strcmp(s, "per-device") )
++ opt_irq_vector_map = OPT_IRQ_VECTOR_MAP_PERDEV;
+ else
+ rc = -EINVAL;
+
+diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c
+index 0ba8ef88d4..5866a261e3 100644
+--- a/xen/arch/x86/psr.c
++++ b/xen/arch/x86/psr.c
+@@ -591,13 +591,13 @@ static int __init parse_psr_param(const char *s)
+ if ( val_delim > ss )
+ val_delim = ss;
+
+- if ( *val_delim && !strncmp(s, "rmid_max", val_delim - s) )
++ if ( *val_delim && !cmdline_strcmp(s, "rmid_max") )
+ {
+ opt_rmid_max = simple_strtoul(val_delim + 1, &q, 0);
+ if ( *q && *q != ',' )
+ rc = -EINVAL;
+ }
+- else if ( *val_delim && !strncmp(s, "cos_max", val_delim - s) )
++ else if ( *val_delim && !cmdline_strcmp(s, "cos_max") )
+ {
+ opt_cos_max = simple_strtoul(val_delim + 1, &q, 0);
+ if ( *q && *q != ',' )
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index eb480c1f08..e641894f17 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -83,11 +83,11 @@ static int __init parse_bti(const char *s)
+ {
+ s += 6;
+
+- if ( !strncmp(s, "retpoline", ss - s) )
++ if ( !cmdline_strcmp(s, "retpoline") )
+ opt_thunk = THUNK_RETPOLINE;
+- else if ( !strncmp(s, "lfence", ss - s) )
++ else if ( !cmdline_strcmp(s, "lfence") )
+ opt_thunk = THUNK_LFENCE;
+- else if ( !strncmp(s, "jmp", ss - s) )
++ else if ( !cmdline_strcmp(s, "jmp") )
+ opt_thunk = THUNK_JMP;
+ else
+ rc = -EINVAL;
+@@ -194,11 +194,11 @@ static int __init parse_spec_ctrl(const char *s)
+ {
+ s += 10;
+
+- if ( !strncmp(s, "retpoline", ss - s) )
++ if ( !cmdline_strcmp(s, "retpoline") )
+ opt_thunk = THUNK_RETPOLINE;
+- else if ( !strncmp(s, "lfence", ss - s) )
++ else if ( !cmdline_strcmp(s, "lfence") )
+ opt_thunk = THUNK_LFENCE;
+- else if ( !strncmp(s, "jmp", ss - s) )
++ else if ( !cmdline_strcmp(s, "jmp") )
+ opt_thunk = THUNK_JMP;
+ else
+ rc = -EINVAL;
+diff --git a/xen/arch/x86/x86_64/mmconfig-shared.c b/xen/arch/x86/x86_64/mmconfig-shared.c
+index 7c3b7fd30b..01b5720445 100644
+--- a/xen/arch/x86/x86_64/mmconfig-shared.c
++++ b/xen/arch/x86/x86_64/mmconfig-shared.c
+@@ -46,8 +46,8 @@ static int __init parse_mmcfg(const char *s)
+ case 1:
+ break;
+ default:
+- if ( !strncmp(s, "amd_fam10", ss - s) ||
+- !strncmp(s, "amd-fam10", ss - s) )
++ if ( !cmdline_strcmp(s, "amd_fam10") ||
++ !cmdline_strcmp(s, "amd-fam10") )
+ pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
+ else
+ rc = -EINVAL;
+diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c
+index 6be0b3986f..a9917f31f1 100644
+--- a/xen/common/efi/boot.c
++++ b/xen/common/efi/boot.c
+@@ -1323,14 +1323,14 @@ static int __init parse_efi_param(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( !strncmp(s, "rs", ss - s) )
++ if ( !cmdline_strcmp(s, "rs") )
+ {
+ if ( val )
+ __set_bit(EFI_RS, &efi_flags);
+ else
+ __clear_bit(EFI_RS, &efi_flags);
+ }
+- else if ( !strncmp(s, "attr=uc", ss - s) )
++ else if ( !cmdline_strcmp(s, "attr=uc") )
+ efi_map_uc = val;
+ else
+ rc = -EINVAL;
+diff --git a/xen/common/kernel.c b/xen/common/kernel.c
+index 5766a0f784..053c31d391 100644
+--- a/xen/common/kernel.c
++++ b/xen/common/kernel.c
+@@ -221,25 +221,51 @@ void __init cmdline_parse(const char *cmdline)
+
+ int parse_bool(const char *s, const char *e)
+ {
+- unsigned int len;
++ size_t len = e ? ({ ASSERT(e >= s); e - s; }) : strlen(s);
+
+- len = e ? ({ ASSERT(e >= s); e - s; }) : strlen(s);
+- if ( !len )
+- return -1;
++ switch ( len )
++ {
++ case 1:
++ if ( *s == '1' )
++ return 1;
++ if ( *s == '0' )
++ return 0;
++ break;
+
+- if ( !strncmp("no", s, len) ||
+- !strncmp("off", s, len) ||
+- !strncmp("false", s, len) ||
+- !strncmp("disable", s, len) ||
+- !strncmp("0", s, len) )
+- return 0;
++ case 2:
++ if ( !strncmp("on", s, 2) )
++ return 1;
++ if ( !strncmp("no", s, 2) )
++ return 0;
++ break;
++
++ case 3:
++ if ( !strncmp("yes", s, 3) )
++ return 1;
++ if ( !strncmp("off", s, 3) )
++ return 0;
++ break;
++
++ case 4:
++ if ( !strncmp("true", s, 4) )
++ return 1;
++ break;
++
++ case 5:
++ if ( !strncmp("false", s, 5) )
++ return 0;
++ break;
+
+- if ( !strncmp("yes", s, len) ||
+- !strncmp("on", s, len) ||
+- !strncmp("true", s, len) ||
+- !strncmp("enable", s, len) ||
+- !strncmp("1", s, len) )
+- return 1;
++ case 6:
++ if ( !strncmp("enable", s, 6) )
++ return 1;
++ break;
++
++ case 7:
++ if ( !strncmp("disable", s, 7) )
++ return 0;
++ break;
++ }
+
+ return -1;
+ }
+@@ -271,6 +297,27 @@ int parse_boolean(const char *name, const char *s, const char *e)
+ return -1;
+ }
+
++int cmdline_strcmp(const char *frag, const char *name)
++{
++ for ( ; ; frag++, name++ )
++ {
++ unsigned char f = *frag, n = *name;
++ int res = f - n;
++
++ if ( res || n == '\0' )
++ {
++ /*
++ * NUL in 'name' matching a comma, colon or semicolon in 'frag'
++ * implies success.
++ */
++ if ( n == '\0' && (f == ',' || f == ':' || f == ';') )
++ res = 0;
++
++ return res;
++ }
++ }
++}
++
+ unsigned int tainted;
+
+ /**
+diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c
+index 212f48f9f4..6152a045d9 100644
+--- a/xen/drivers/cpufreq/cpufreq.c
++++ b/xen/drivers/cpufreq/cpufreq.c
+@@ -73,7 +73,7 @@ static int __init setup_cpufreq_option(const char *str)
+ arg = strchr(str, '\0');
+ choice = parse_bool(str, arg);
+
+- if ( choice < 0 && !strncmp(str, "dom0-kernel", arg - str) )
++ if ( choice < 0 && !cmdline_strcmp(str, "dom0-kernel") )
+ {
+ xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
+ cpufreq_controller = FREQCTL_dom0_kernel;
+@@ -81,14 +81,14 @@ static int __init setup_cpufreq_option(const char *str)
+ return 0;
+ }
+
+- if ( choice == 0 || !strncmp(str, "none", arg - str) )
++ if ( choice == 0 || !cmdline_strcmp(str, "none") )
+ {
+ xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
+ cpufreq_controller = FREQCTL_none;
+ return 0;
+ }
+
+- if ( choice > 0 || !strncmp(str, "xen", arg - str) )
++ if ( choice > 0 || !cmdline_strcmp(str, "xen") )
+ {
+ xen_processor_pmbits |= XEN_PROCESSOR_PM_PX;
+ cpufreq_controller = FREQCTL_xen;
+diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
+index 2c44fabf99..f9b13b018c 100644
+--- a/xen/drivers/passthrough/iommu.c
++++ b/xen/drivers/passthrough/iommu.c
+@@ -95,36 +95,36 @@ static int __init parse_iommu_param(const char *s)
+ b = parse_bool(s, ss);
+ if ( b >= 0 )
+ iommu_enable = b;
+- else if ( !strncmp(s, "force", ss - s) ||
+- !strncmp(s, "required", ss - s) )
++ else if ( !cmdline_strcmp(s, "force") ||
++ !cmdline_strcmp(s, "required") )
+ force_iommu = val;
+- else if ( !strncmp(s, "workaround_bios_bug", ss - s) )
++ else if ( !cmdline_strcmp(s, "workaround_bios_bug") )
+ iommu_workaround_bios_bug = val;
+- else if ( !strncmp(s, "igfx", ss - s) )
++ else if ( !cmdline_strcmp(s, "igfx") )
+ iommu_igfx = val;
+- else if ( !strncmp(s, "verbose", ss - s) )
++ else if ( !cmdline_strcmp(s, "verbose") )
+ iommu_verbose = val;
+- else if ( !strncmp(s, "snoop", ss - s) )
++ else if ( !cmdline_strcmp(s, "snoop") )
+ iommu_snoop = val;
+- else if ( !strncmp(s, "qinval", ss - s) )
++ else if ( !cmdline_strcmp(s, "qinval") )
+ iommu_qinval = val;
+- else if ( !strncmp(s, "intremap", ss - s) )
++ else if ( !cmdline_strcmp(s, "intremap") )
+ iommu_intremap = val;
+- else if ( !strncmp(s, "intpost", ss - s) )
++ else if ( !cmdline_strcmp(s, "intpost") )
+ iommu_intpost = val;
+- else if ( !strncmp(s, "debug", ss - s) )
++ else if ( !cmdline_strcmp(s, "debug") )
+ {
+ iommu_debug = val;
+ if ( val )
+ iommu_verbose = 1;
+ }
+- else if ( !strncmp(s, "amd-iommu-perdev-intremap", ss - s) )
++ else if ( !cmdline_strcmp(s, "amd-iommu-perdev-intremap") )
+ amd_iommu_perdev_intremap = val;
+- else if ( !strncmp(s, "dom0-passthrough", ss - s) )
++ else if ( !cmdline_strcmp(s, "dom0-passthrough") )
+ iommu_passthrough = val;
+- else if ( !strncmp(s, "dom0-strict", ss - s) )
++ else if ( !cmdline_strcmp(s, "dom0-strict") )
+ iommu_dom0_strict = val;
+- else if ( !strncmp(s, "sharept", ss - s) )
++ else if ( !cmdline_strcmp(s, "sharept") )
+ iommu_hap_pt_share = val;
+ else
+ rc = -EINVAL;
+diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
+index 1db69d5b99..f51cae7f4e 100644
+--- a/xen/drivers/passthrough/pci.c
++++ b/xen/drivers/passthrough/pci.c
+@@ -212,12 +212,12 @@ static int __init parse_pci_param(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( !strncmp(s, "serr", ss - s) )
++ if ( !cmdline_strcmp(s, "serr") )
+ {
+ cmd_mask = PCI_COMMAND_SERR;
+ brctl_mask = PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_DTMR_SERR;
+ }
+- else if ( !strncmp(s, "perr", ss - s) )
++ else if ( !cmdline_strcmp(s, "perr") )
+ {
+ cmd_mask = PCI_COMMAND_PARITY;
+ brctl_mask = PCI_BRIDGE_CTL_PARITY;
+diff --git a/xen/include/xen/lib.h b/xen/include/xen/lib.h
+index 1d9771340c..750f809968 100644
+--- a/xen/include/xen/lib.h
++++ b/xen/include/xen/lib.h
+@@ -81,6 +81,13 @@ int parse_bool(const char *s, const char *e);
+ */
+ int parse_boolean(const char *name, const char *s, const char *e);
+
++/**
++ * Very similar to strcmp(), but will declare a match if the NUL in 'name'
++ * lines up with comma, colon or semicolon in 'frag'. Designed for picking
++ * exact string matches out of a delimited command line list.
++ */
++int cmdline_strcmp(const char *frag, const char *name);
++
+ /*#define DEBUG_TRACE_DUMP*/
+ #ifdef DEBUG_TRACE_DUMP
+ extern void debugtrace_dump(void);
+--
+2.20.1
+
diff --git a/main/xen/xsa297-4.11-1.patch b/main/xen/xsa297-4.11-1.patch
new file mode 100644
index 00000000000..5dd503587f8
--- /dev/null
+++ b/main/xen/xsa297-4.11-1.patch
@@ -0,0 +1,163 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Reposition the XPTI command line parsing logic
+
+It has ended up in the middle of the mitigation calculation logic. Move it to
+be beside the other command line parsing.
+
+No functional change.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 8fa6c10..949bbda 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -223,6 +223,73 @@ static int __init parse_spec_ctrl(const char *s)
+ }
+ custom_param("spec-ctrl", parse_spec_ctrl);
+
++int8_t __read_mostly opt_xpti_hwdom = -1;
++int8_t __read_mostly opt_xpti_domu = -1;
++
++static __init void xpti_init_default(uint64_t caps)
++{
++ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
++ caps = ARCH_CAPS_RDCL_NO;
++
++ if ( caps & ARCH_CAPS_RDCL_NO )
++ {
++ if ( opt_xpti_hwdom < 0 )
++ opt_xpti_hwdom = 0;
++ if ( opt_xpti_domu < 0 )
++ opt_xpti_domu = 0;
++ }
++ else
++ {
++ if ( opt_xpti_hwdom < 0 )
++ opt_xpti_hwdom = 1;
++ if ( opt_xpti_domu < 0 )
++ opt_xpti_domu = 1;
++ }
++}
++
++static __init int parse_xpti(const char *s)
++{
++ const char *ss;
++ int val, rc = 0;
++
++ /* Interpret 'xpti' alone in its positive boolean form. */
++ if ( *s == '\0' )
++ opt_xpti_hwdom = opt_xpti_domu = 1;
++
++ do {
++ ss = strchr(s, ',');
++ if ( !ss )
++ ss = strchr(s, '\0');
++
++ switch ( parse_bool(s, ss) )
++ {
++ case 0:
++ opt_xpti_hwdom = opt_xpti_domu = 0;
++ break;
++
++ case 1:
++ opt_xpti_hwdom = opt_xpti_domu = 1;
++ break;
++
++ default:
++ if ( !strcmp(s, "default") )
++ opt_xpti_hwdom = opt_xpti_domu = -1;
++ else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
++ opt_xpti_hwdom = val;
++ else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
++ opt_xpti_domu = val;
++ else if ( *s )
++ rc = -EINVAL;
++ break;
++ }
++
++ s = ss + 1;
++ } while ( *ss );
++
++ return rc;
++}
++custom_param("xpti", parse_xpti);
++
+ int8_t __read_mostly opt_pv_l1tf_hwdom = -1;
+ int8_t __read_mostly opt_pv_l1tf_domu = -1;
+
+@@ -676,73 +743,6 @@ static __init void l1tf_calculations(uint64_t caps)
+ : (3ul << (paddr_bits - 2))));
+ }
+
+-int8_t __read_mostly opt_xpti_hwdom = -1;
+-int8_t __read_mostly opt_xpti_domu = -1;
+-
+-static __init void xpti_init_default(uint64_t caps)
+-{
+- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+- caps = ARCH_CAPS_RDCL_NO;
+-
+- if ( caps & ARCH_CAPS_RDCL_NO )
+- {
+- if ( opt_xpti_hwdom < 0 )
+- opt_xpti_hwdom = 0;
+- if ( opt_xpti_domu < 0 )
+- opt_xpti_domu = 0;
+- }
+- else
+- {
+- if ( opt_xpti_hwdom < 0 )
+- opt_xpti_hwdom = 1;
+- if ( opt_xpti_domu < 0 )
+- opt_xpti_domu = 1;
+- }
+-}
+-
+-static __init int parse_xpti(const char *s)
+-{
+- const char *ss;
+- int val, rc = 0;
+-
+- /* Interpret 'xpti' alone in its positive boolean form. */
+- if ( *s == '\0' )
+- opt_xpti_hwdom = opt_xpti_domu = 1;
+-
+- do {
+- ss = strchr(s, ',');
+- if ( !ss )
+- ss = strchr(s, '\0');
+-
+- switch ( parse_bool(s, ss) )
+- {
+- case 0:
+- opt_xpti_hwdom = opt_xpti_domu = 0;
+- break;
+-
+- case 1:
+- opt_xpti_hwdom = opt_xpti_domu = 1;
+- break;
+-
+- default:
+- if ( !strcmp(s, "default") )
+- opt_xpti_hwdom = opt_xpti_domu = -1;
+- else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
+- opt_xpti_hwdom = val;
+- else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
+- opt_xpti_domu = val;
+- else if ( *s )
+- rc = -EINVAL;
+- break;
+- }
+-
+- s = ss + 1;
+- } while ( *ss );
+-
+- return rc;
+-}
+-custom_param("xpti", parse_xpti);
+-
+ void __init init_speculation_mitigations(void)
+ {
+ enum ind_thunk thunk = THUNK_DEFAULT;
diff --git a/main/xen/xsa297-4.11-2.patch b/main/xen/xsa297-4.11-2.patch
new file mode 100644
index 00000000000..7c6c0062231
--- /dev/null
+++ b/main/xen/xsa297-4.11-2.patch
@@ -0,0 +1,54 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/msr: Definitions for MSR_INTEL_CORE_THREAD_COUNT
+
+This is a model specific register which details the current configuration
+cores and threads in the package. Because of how Hyperthread and Core
+configuration works works in firmware, the MSR it is de-facto constant and
+will remain unchanged until the next system reset.
+
+It is a read only MSR (so unilaterally reject writes), but for now retain its
+leaky-on-read properties. Further CPUID/MSR work is required before we can
+start virtualising a consistent topology to the guest, and retaining the old
+behaviour is the safest course of action.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
+index b49fbd8..153f36b 100644
+--- a/xen/arch/x86/msr.c
++++ b/xen/arch/x86/msr.c
+@@ -180,6 +180,10 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+ _MSR_MISC_FEATURES_CPUID_FAULTING;
+ break;
+
++ /*
++ * TODO: Implement when we have better topology representation.
++ case MSR_INTEL_CORE_THREAD_COUNT:
++ */
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
+@@ -202,6 +206,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+ {
+ uint64_t rsvd;
+
++ case MSR_INTEL_CORE_THREAD_COUNT:
+ case MSR_INTEL_PLATFORM_INFO:
+ case MSR_ARCH_CAPABILITIES:
+ /* Read-only */
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 7588fc1..7cddfca 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -34,6 +34,10 @@
+ #define EFER_KNOWN_MASK (EFER_SCE | EFER_LME | EFER_LMA | EFER_NX | \
+ EFER_SVME | EFER_LMSLE | EFER_FFXSE)
+
++#define MSR_INTEL_CORE_THREAD_COUNT 0x00000035
++#define MSR_CTC_THREAD_MASK 0x0000ffff
++#define MSR_CTC_CORE_MASK 0xffff0000
++
+ /* Speculation Controls. */
+ #define MSR_SPEC_CTRL 0x00000048
+ #define SPEC_CTRL_IBRS (_AC(1, ULL) << 0)
diff --git a/main/xen/xsa297-4.11-3.patch b/main/xen/xsa297-4.11-3.patch
new file mode 100644
index 00000000000..a6e59efaca7
--- /dev/null
+++ b/main/xen/xsa297-4.11-3.patch
@@ -0,0 +1,109 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/boot: Detect the firmware SMT setting correctly on Intel hardware
+
+While boot_cpu_data.x86_num_siblings is an accurate value to use on AMD
+hardware, it isn't on Intel when the user has disabled Hyperthreading in the
+firmware. As a result, a user which has chosen to disable HT still gets
+nagged on L1TF-vulnerable hardware when they haven't chosen an explicit
+smt=<bool> setting.
+
+Make use of the largely-undocumented MSR_INTEL_CORE_THREAD_COUNT which in
+practice exists since Nehalem, when booting on real hardware. Fall back to
+using the ACPI table APIC IDs.
+
+While adjusting this logic, fix a latent bug in amd_get_topology(). The
+thread count field in CPUID.0x8000001e.ebx is documented as 8 bits wide,
+rather than 2 bits wide.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
+index 76078b5..894b892 100644
+--- a/xen/arch/x86/cpu/amd.c
++++ b/xen/arch/x86/cpu/amd.c
+@@ -505,7 +505,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+- c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
++ c->x86_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+ if (c->x86 < 0x17)
+ c->compute_unit_id = ebx & 0xFF;
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 949bbda..ac1be4a 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -417,6 +417,45 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ opt_pv_l1tf_domu ? "enabled" : "disabled");
+ }
+
++static bool __init check_smt_enabled(void)
++{
++ uint64_t val;
++ unsigned int cpu;
++
++ /*
++ * x86_num_siblings defaults to 1 in the absence of other information, and
++ * is adjusted based on other topology information found in CPUID leaves.
++ *
++ * On AMD hardware, it will be the current SMT configuration. On Intel
++ * hardware, it will represent the maximum capability, rather than the
++ * current configuration.
++ */
++ if ( boot_cpu_data.x86_num_siblings < 2 )
++ return false;
++
++ /*
++ * Intel Nehalem and later hardware does have an MSR which reports the
++ * current count of cores/threads in the package.
++ *
++ * At the time of writing, it is almost completely undocumented, so isn't
++ * virtualised reliably.
++ */
++ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && !cpu_has_hypervisor &&
++ !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, val) )
++ return (MASK_EXTR(val, MSR_CTC_CORE_MASK) !=
++ MASK_EXTR(val, MSR_CTC_THREAD_MASK));
++
++ /*
++ * Search over the CPUs reported in the ACPI tables. Any whose APIC ID
++ * has a non-zero thread id component indicates that SMT is active.
++ */
++ for_each_present_cpu ( cpu )
++ if ( x86_cpu_to_apicid[cpu] & (boot_cpu_data.x86_num_siblings - 1) )
++ return true;
++
++ return false;
++}
++
+ /* Calculate whether Retpoline is known-safe on this CPU. */
+ static bool __init retpoline_safe(uint64_t caps)
+ {
+@@ -746,12 +785,14 @@ static __init void l1tf_calculations(uint64_t caps)
+ void __init init_speculation_mitigations(void)
+ {
+ enum ind_thunk thunk = THUNK_DEFAULT;
+- bool use_spec_ctrl = false, ibrs = false;
++ bool use_spec_ctrl = false, ibrs = false, hw_smt_enabled;
+ uint64_t caps = 0;
+
+ if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
+ rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+
++ hw_smt_enabled = check_smt_enabled();
++
+ /*
+ * Has the user specified any custom BTI mitigations? If so, follow their
+ * instructions exactly and disable all heuristics.
+@@ -927,8 +968,7 @@ void __init init_speculation_mitigations(void)
+ * However, if we are on affected hardware, with HT enabled, and the user
+ * hasn't explicitly chosen whether to use HT or not, nag them to do so.
+ */
+- if ( opt_smt == -1 && cpu_has_bug_l1tf && !pv_shim &&
+- boot_cpu_data.x86_num_siblings > 1 )
++ if ( opt_smt == -1 && cpu_has_bug_l1tf && !pv_shim && hw_smt_enabled )
+ warning_add(
+ "Booted on L1TF-vulnerable hardware with SMT/Hyperthreading\n"
+ "enabled. Please assess your configuration and choose an\n"
diff --git a/main/xen/xsa297-4.11-4.patch b/main/xen/xsa297-4.11-4.patch
new file mode 100644
index 00000000000..e6acc9ed522
--- /dev/null
+++ b/main/xen/xsa297-4.11-4.patch
@@ -0,0 +1,55 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Misc non-functional cleanup
+
+ * Identify BTI in the spec_ctrl_{enter,exit}_idle() comments, as other
+ mitigations will shortly appear.
+ * Use alternative_input() and cover the lack of memory cobber with a further
+ barrier.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
+index c846354..4983071 100644
+--- a/xen/include/asm-x86/spec_ctrl.h
++++ b/xen/include/asm-x86/spec_ctrl.h
+@@ -61,6 +61,8 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+ uint32_t val = 0;
+
+ /*
++ * Branch Target Injection:
++ *
+ * Latch the new shadow value, then enable shadowing, then update the MSR.
+ * There are no SMP issues here; only local processor ordering concerns.
+ */
+@@ -68,8 +70,9 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+ barrier();
+ info->spec_ctrl_flags |= SCF_use_shadow;
+ barrier();
+- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE)
+- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
++ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
++ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
++ barrier();
+ }
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
+@@ -78,13 +81,16 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
+ uint32_t val = info->xen_spec_ctrl;
+
+ /*
++ * Branch Target Injection:
++ *
+ * Disable shadowing before updating the MSR. There are no SMP issues
+ * here; only local processor ordering concerns.
+ */
+ info->spec_ctrl_flags &= ~SCF_use_shadow;
+ barrier();
+- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE)
+- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
++ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
++ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
++ barrier();
+ }
+
+ #endif /* !__X86_SPEC_CTRL_H__ */
diff --git a/main/xen/xsa297-4.11-5.patch b/main/xen/xsa297-4.11-5.patch
new file mode 100644
index 00000000000..f03c3d838aa
--- /dev/null
+++ b/main/xen/xsa297-4.11-5.patch
@@ -0,0 +1,141 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: CPUID/MSR definitions for Microarchitectural Data
+ Sampling
+
+The MD_CLEAR feature can be automatically offered to guests. No
+infrastructure is needed in Xen to support the guest making use of it.
+
+This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
+index 8e24380..8260dfb 100644
+--- a/docs/misc/xen-command-line.markdown
++++ b/docs/misc/xen-command-line.markdown
+@@ -489,7 +489,7 @@ accounting for hardware capabilities as enumerated via CPUID.
+
+ Currently accepted:
+
+-The Speculation Control hardware features `ibrsb`, `stibp`, `ibpb`,
++The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, `ibpb`,
+ `l1d-flush` and `ssbd` are used by default if available and applicable. They can
+ be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
+ won't offer them to guests.
+diff --git a/tools/libxl/libxl_cpuid.c b/tools/libxl/libxl_cpuid.c
+index 52e16c2..5a1702d 100644
+--- a/tools/libxl/libxl_cpuid.c
++++ b/tools/libxl/libxl_cpuid.c
+@@ -202,6 +202,7 @@ int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str)
+
+ {"avx512-4vnniw",0x00000007, 0, CPUID_REG_EDX, 2, 1},
+ {"avx512-4fmaps",0x00000007, 0, CPUID_REG_EDX, 3, 1},
++ {"md-clear", 0x00000007, 0, CPUID_REG_EDX, 10, 1},
+ {"ibrsb", 0x00000007, 0, CPUID_REG_EDX, 26, 1},
+ {"stibp", 0x00000007, 0, CPUID_REG_EDX, 27, 1},
+ {"l1d-flush", 0x00000007, 0, CPUID_REG_EDX, 28, 1},
+diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
+index 0ac903a..16697c4 100644
+--- a/tools/misc/xen-cpuid.c
++++ b/tools/misc/xen-cpuid.c
+@@ -142,6 +142,7 @@ static const char *str_7d0[32] =
+ {
+ [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
+
++ [10] = "md-clear",
+ /* 12 */ [13] = "tsx-force-abort",
+
+ [26] = "ibrsb", [27] = "stibp",
+diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
+index 5cc89e2..497bd2a 100644
+--- a/xen/arch/x86/cpuid.c
++++ b/xen/arch/x86/cpuid.c
+@@ -28,7 +28,12 @@ static int __init parse_xen_cpuid(const char *s)
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+- if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
++ if ( (val = parse_boolean("md-clear", s, ss)) >= 0 )
++ {
++ if ( !val )
++ setup_clear_cpu_cap(X86_FEATURE_MD_CLEAR);
++ }
++ else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
+ {
+ if ( !val )
+ setup_clear_cpu_cap(X86_FEATURE_IBPB);
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index ac1be4a..fdd90a8 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -347,17 +347,19 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ printk("Speculative mitigation facilities:\n");
+
+ /* Hardware features which pertain to speculative mitigations. */
+- printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s\n",
++ printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
+ (_7d0 & cpufeat_mask(X86_FEATURE_SSBD)) ? " SSBD" : "",
++ (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
+ (e8b & cpufeat_mask(X86_FEATURE_IBPB)) ? " IBPB" : "",
+ (caps & ARCH_CAPS_IBRS_ALL) ? " IBRS_ALL" : "",
+ (caps & ARCH_CAPS_RDCL_NO) ? " RDCL_NO" : "",
+ (caps & ARCH_CAPS_RSBA) ? " RSBA" : "",
+ (caps & ARCH_CAPS_SKIP_L1DFL) ? " SKIP_L1DFL": "",
+- (caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : "");
++ (caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : "",
++ (caps & ARCH_CAPS_MDS_NO) ? " MDS_NO" : "");
+
+ /* Compiled-in support which pertains to mitigations. */
+ if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) || IS_ENABLED(CONFIG_SHADOW_PAGING) )
+@@ -394,19 +396,21 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ * Alternatives blocks for protecting against and/or virtualising
+ * mitigation support for guests.
+ */
+- printk(" Support for VMs: PV:%s%s%s%s, HVM:%s%s%s%s\n",
++ printk(" Support for VMs: PV:%s%s%s%s%s, HVM:%s%s%s%s%s\n",
+ (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
+ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
+ opt_eager_fpu) ? "" : " None",
+ boot_cpu_has(X86_FEATURE_SC_MSR_PV) ? " MSR_SPEC_CTRL" : "",
+ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB" : "",
+ opt_eager_fpu ? " EAGER_FPU" : "",
++ boot_cpu_has(X86_FEATURE_MD_CLEAR) ? " MD_CLEAR" : "",
+ (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
+ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
+ opt_eager_fpu) ? "" : " None",
+ boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "",
+ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "",
+- opt_eager_fpu ? " EAGER_FPU" : "");
++ opt_eager_fpu ? " EAGER_FPU" : "",
++ boot_cpu_has(X86_FEATURE_MD_CLEAR) ? " MD_CLEAR" : "");
+
+ printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s\n",
+ opt_xpti_hwdom ? "enabled" : "disabled",
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 7cddfca..b8151d2 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -53,6 +53,7 @@
+ #define ARCH_CAPS_RSBA (_AC(1, ULL) << 2)
+ #define ARCH_CAPS_SKIP_L1DFL (_AC(1, ULL) << 3)
+ #define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4)
++#define ARCH_CAPS_MDS_NO (_AC(1, ULL) << 5)
+
+ #define MSR_FLUSH_CMD 0x0000010b
+ #define FLUSH_CMD_L1D (_AC(1, ULL) << 0)
+diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
+index aa2656d..a14d8a7 100644
+--- a/xen/include/public/arch-x86/cpufeatureset.h
++++ b/xen/include/public/arch-x86/cpufeatureset.h
+@@ -242,6 +242,7 @@ XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by
+ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
+ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */
+ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
++XEN_CPUFEATURE(MD_CLEAR, 9*32+10) /*A VERW clears microarchitectural buffers */
+ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
+ XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
+ XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */
diff --git a/main/xen/xsa297-4.11-6.patch b/main/xen/xsa297-4.11-6.patch
new file mode 100644
index 00000000000..5d153a90e93
--- /dev/null
+++ b/main/xen/xsa297-4.11-6.patch
@@ -0,0 +1,134 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers
+
+Three synthetic features are introduced, as we need individual control of
+each, depending on circumstances. A later change will enable them at
+appropriate points.
+
+The verw_sel field doesn't strictly need to live in struct cpu_info. It lives
+there because there is a convenient hole it can fill, and it reduces the
+complexity of the SPEC_CTRL_EXIT_TO_{PV,HVM} assembly by avoiding the need for
+any temporary stack maintenance.
+
+This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
+index 5957c76..97cff49 100644
+--- a/xen/arch/x86/x86_64/asm-offsets.c
++++ b/xen/arch/x86/x86_64/asm-offsets.c
+@@ -129,6 +129,7 @@ void __dummy__(void)
+
+ OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
+ OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
++ OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
+ OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
+ OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
+ OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
+diff --git a/xen/include/asm-x86/cpufeatures.h b/xen/include/asm-x86/cpufeatures.h
+index 8e5cc53..96a5a01 100644
+--- a/xen/include/asm-x86/cpufeatures.h
++++ b/xen/include/asm-x86/cpufeatures.h
+@@ -33,3 +33,6 @@ XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for
+ XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+20) /* XPTI mitigation not in use */
+ XEN_CPUFEATURE(SC_MSR_IDLE, (FSCAPINTS+0)*32+21) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
+ XEN_CPUFEATURE(XEN_LBR, (FSCAPINTS+0)*32+22) /* Xen uses MSR_DEBUGCTL.LBR */
++XEN_CPUFEATURE(SC_VERW_PV, (FSCAPINTS+0)*32+23) /* VERW used by Xen for PV */
++XEN_CPUFEATURE(SC_VERW_HVM, (FSCAPINTS+0)*32+24) /* VERW used by Xen for HVM */
++XEN_CPUFEATURE(SC_VERW_IDLE, (FSCAPINTS+0)*32+25) /* VERW used by Xen for idle */
+diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
+index 5bd64b2..f3508c3 100644
+--- a/xen/include/asm-x86/current.h
++++ b/xen/include/asm-x86/current.h
+@@ -38,6 +38,7 @@ struct vcpu;
+ struct cpu_info {
+ struct cpu_user_regs guest_cpu_user_regs;
+ unsigned int processor_id;
++ unsigned int verw_sel;
+ struct vcpu *current_vcpu;
+ unsigned long per_cpu_offset;
+ unsigned long cr4;
+diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
+index 4983071..333d180 100644
+--- a/xen/include/asm-x86/spec_ctrl.h
++++ b/xen/include/asm-x86/spec_ctrl.h
+@@ -53,6 +53,13 @@ static inline void init_shadow_spec_ctrl_state(void)
+ info->shadow_spec_ctrl = 0;
+ info->xen_spec_ctrl = default_xen_spec_ctrl;
+ info->spec_ctrl_flags = default_spec_ctrl_flags;
++
++ /*
++ * For least latency, the VERW selector should be a writeable data
++ * descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache
++ * line with __HYPERVISOR_CS, so is expected to be very cache-hot.
++ */
++ info->verw_sel = __HYPERVISOR_DS32;
+ }
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
+@@ -73,6 +80,22 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
+ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
+ barrier();
++
++ /*
++ * Microarchitectural Store Buffer Data Sampling:
++ *
++ * On vulnerable systems, store buffer entries are statically partitioned
++ * between active threads. When entering idle, our store buffer entries
++ * are re-partitioned to allow the other threads to use them.
++ *
++ * Flush the buffers to ensure that no sensitive data of ours can be
++ * leaked by a sibling after it gets our store buffer entries.
++ *
++ * Note: VERW must be encoded with a memory operand, as it is only that
++ * form which causes a flush.
++ */
++ alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,
++ [sel] "m" (info->verw_sel));
+ }
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
+@@ -91,6 +114,17 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
+ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
+ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
+ barrier();
++
++ /*
++ * Microarchitectural Store Buffer Data Sampling:
++ *
++ * On vulnerable systems, store buffer entries are statically partitioned
++ * between active threads. When exiting idle, the other threads store
++ * buffer entries are re-partitioned to give us some.
++ *
++ * We now have store buffer entries with stale data from sibling threads.
++ * A flush if necessary will be performed on the return to guest path.
++ */
+ }
+
+ #endif /* !__X86_SPEC_CTRL_H__ */
+diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h
+index edace2a..9cc15e7 100644
+--- a/xen/include/asm-x86/spec_ctrl_asm.h
++++ b/xen/include/asm-x86/spec_ctrl_asm.h
+@@ -245,12 +245,16 @@
+ /* Use when exiting to PV guest context. */
+ #define SPEC_CTRL_EXIT_TO_PV \
+ ALTERNATIVE "", \
+- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV
++ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \
++ ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \
++ X86_FEATURE_SC_VERW_PV
+
+ /* Use when exiting to HVM guest context. */
+ #define SPEC_CTRL_EXIT_TO_HVM \
+ ALTERNATIVE "", \
+- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM
++ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \
++ ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \
++ X86_FEATURE_SC_VERW_HVM
+
+ /*
+ * Use in IST interrupt/exception context. May interrupt Xen or PV context.
diff --git a/main/xen/xsa297-4.11-7.patch b/main/xen/xsa297-4.11-7.patch
new file mode 100644
index 00000000000..940191d8e20
--- /dev/null
+++ b/main/xen/xsa297-4.11-7.patch
@@ -0,0 +1,316 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Introduce options to control VERW flushing
+
+The Microarchitectural Data Sampling vulnerability is split into categories
+with subtly different properties:
+
+ MLPDS - Microarchitectural Load Port Data Sampling
+ MSBDS - Microarchitectural Store Buffer Data Sampling
+ MFBDS - Microarchitectural Fill Buffer Data Sampling
+ MDSUM - Microarchitectural Data Sampling Uncacheable Memory
+
+MDSUM is a special case of the other three, and isn't distinguished further.
+
+These issues pertain to three microarchitectural buffers. The Load Ports, the
+Store Buffers and the Fill Buffers. Each of these structures are flushed by
+the new enhanced VERW functionality, but the conditions under which flushing
+is necessary vary.
+
+For this concise overview of the issues and default logic, the abbreviations
+SP (Store Port), FB (Fill Buffer), LP (Load Port) and HT (Hyperthreading) are
+used for brevity:
+
+ * Vulnerable hardware is divided into two categories - parts which suffer
+ from SP only, and parts with any other combination of vulnerabilities.
+
+ * SP only has an HT interaction when the thread goes idle, due to the static
+ partitioning of resources. LP and FB have HT interactions at all points,
+ due to the competitive sharing of resources. All issues potentially leak
+ data across the return-to-guest transition.
+
+ * The microcode which implements VERW flushing also extends MSR_FLUSH_CMD, so
+ we don't need to do both on the HVM return-to-guest path. However, some
+ parts are not vulnerable to L1TF (therefore have no MSR_FLUSH_CMD), but are
+ vulnerable to MDS, so do require VERW on the HVM path.
+
+Note that we deliberately support mds=1 even without MD_CLEAR in case the
+microcode has been updated but the feature bit not exposed.
+
+This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
+index 8260dfb..8108bbf 100644
+--- a/docs/misc/xen-command-line.markdown
++++ b/docs/misc/xen-command-line.markdown
+@@ -1800,7 +1800,7 @@ is being interpreted as a custom timeout in milliseconds. Zero or boolean
+ false disable the quirk workaround, which is also the default.
+
+ ### spec-ctrl (x86)
+-> `= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb}=<bool>,
++> `= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb,md-clear}=<bool>,
+ > bti-thunk=retpoline|lfence|jmp, {ibrs,ibpb,ssbd,eager-fpu,
+ > l1d-flush}=<bool> ]`
+
+@@ -1824,9 +1824,10 @@ in place for guests to use.
+
+ Use of a positive boolean value for either of these options is invalid.
+
+-The booleans `pv=`, `hvm=`, `msr-sc=` and `rsb=` offer fine grained control
+-over the alternative blocks used by Xen. These impact Xen's ability to
+-protect itself, and Xen's ability to virtualise support for guests to use.
++The booleans `pv=`, `hvm=`, `msr-sc=`, `rsb=` and `md-clear=` offer fine
++grained control over the alternative blocks used by Xen. These impact Xen's
++ability to protect itself, and Xen's ability to virtualise support for guests
++to use.
+
+ * `pv=` and `hvm=` offer control over all suboptions for PV and HVM guests
+ respectively.
+@@ -1835,6 +1836,11 @@ protect itself, and Xen's ability to virtualise support for guests to use.
+ guests and if disabled, guests will be unable to use IBRS/STIBP/SSBD/etc.
+ * `rsb=` offers control over whether to overwrite the Return Stack Buffer /
+ Return Address Stack on entry to Xen.
++* `md-clear=` offers control over whether to use VERW to flush
++ microarchitectural buffers on idle and exit from Xen. *Note: For
++ compatibility with development versions of this fix, `mds=` is also accepted
++ on Xen 4.12 and earlier as an alias. Consult vendor documentation in
++ preference to here.*
+
+ If Xen was compiled with INDIRECT\_THUNK support, `bti-thunk=` can be used to
+ select which of the thunks gets patched into the `__x86_indirect_thunk_%reg`
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index fdd90a8..10fcd77 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -34,6 +34,8 @@ static bool __initdata opt_msr_sc_pv = true;
+ static bool __initdata opt_msr_sc_hvm = true;
+ static bool __initdata opt_rsb_pv = true;
+ static bool __initdata opt_rsb_hvm = true;
++static int8_t __initdata opt_md_clear_pv = -1;
++static int8_t __initdata opt_md_clear_hvm = -1;
+
+ /* Cmdline controls for Xen's speculative settings. */
+ static enum ind_thunk {
+@@ -58,6 +60,9 @@ paddr_t __read_mostly l1tf_addr_mask, __read_mostly l1tf_safe_maddr;
+ static bool __initdata cpu_has_bug_l1tf;
+ static unsigned int __initdata l1d_maxphysaddr;
+
++static bool __initdata cpu_has_bug_msbds_only; /* => minimal HT impact. */
++static bool __initdata cpu_has_bug_mds; /* Any other M{LP,SB,FB}DS combination. */
++
+ static int __init parse_bti(const char *s)
+ {
+ const char *ss;
+@@ -150,6 +155,8 @@ static int __init parse_spec_ctrl(const char *s)
+ disable_common:
+ opt_rsb_pv = false;
+ opt_rsb_hvm = false;
++ opt_md_clear_pv = 0;
++ opt_md_clear_hvm = 0;
+
+ opt_thunk = THUNK_JMP;
+ opt_ibrs = 0;
+@@ -172,11 +179,13 @@ static int __init parse_spec_ctrl(const char *s)
+ {
+ opt_msr_sc_pv = val;
+ opt_rsb_pv = val;
++ opt_md_clear_pv = val;
+ }
+ else if ( (val = parse_boolean("hvm", s, ss)) >= 0 )
+ {
+ opt_msr_sc_hvm = val;
+ opt_rsb_hvm = val;
++ opt_md_clear_hvm = val;
+ }
+ else if ( (val = parse_boolean("msr-sc", s, ss)) >= 0 )
+ {
+@@ -188,6 +197,12 @@ static int __init parse_spec_ctrl(const char *s)
+ opt_rsb_pv = val;
+ opt_rsb_hvm = val;
+ }
++ else if ( (val = parse_boolean("md-clear", s, ss)) >= 0 ||
++ (val = parse_boolean("mds", s, ss)) >= 0 )
++ {
++ opt_md_clear_pv = val;
++ opt_md_clear_hvm = val;
++ }
+
+ /* Xen's speculative sidechannel mitigation settings. */
+ else if ( !strncmp(s, "bti-thunk=", 10) )
+@@ -373,7 +388,7 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ "\n");
+
+ /* Settings for Xen's protection, irrespective of guests. */
+- printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s, Other:%s%s\n",
++ printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s, Other:%s%s%s\n",
+ thunk == THUNK_NONE ? "N/A" :
+ thunk == THUNK_RETPOLINE ? "RETPOLINE" :
+ thunk == THUNK_LFENCE ? "LFENCE" :
+@@ -383,7 +398,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ !boot_cpu_has(X86_FEATURE_SSBD) ? "" :
+ (default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-",
+ opt_ibpb ? " IBPB" : "",
+- opt_l1d_flush ? " L1D_FLUSH" : "");
++ opt_l1d_flush ? " L1D_FLUSH" : "",
++ opt_md_clear_pv || opt_md_clear_hvm ? " VERW" : "");
+
+ /* L1TF diagnostics, printed if vulnerable or PV shadowing is in use. */
+ if ( cpu_has_bug_l1tf || opt_pv_l1tf_hwdom || opt_pv_l1tf_domu )
+@@ -786,6 +802,107 @@ static __init void l1tf_calculations(uint64_t caps)
+ : (3ul << (paddr_bits - 2))));
+ }
+
++/* Calculate whether this CPU is vulnerable to MDS. */
++static __init void mds_calculations(uint64_t caps)
++{
++ /* MDS is only known to affect Intel Family 6 processors at this time. */
++ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
++ boot_cpu_data.x86 != 6 )
++ return;
++
++ /* Any processor advertising MDS_NO should be not vulnerable to MDS. */
++ if ( caps & ARCH_CAPS_MDS_NO )
++ return;
++
++ switch ( boot_cpu_data.x86_model )
++ {
++ /*
++ * Core processors since at least Nehalem are vulnerable.
++ */
++ case 0x1f: /* Auburndale / Havendale */
++ case 0x1e: /* Nehalem */
++ case 0x1a: /* Nehalem EP */
++ case 0x2e: /* Nehalem EX */
++ case 0x25: /* Westmere */
++ case 0x2c: /* Westmere EP */
++ case 0x2f: /* Westmere EX */
++ case 0x2a: /* SandyBridge */
++ case 0x2d: /* SandyBridge EP/EX */
++ case 0x3a: /* IvyBridge */
++ case 0x3e: /* IvyBridge EP/EX */
++ case 0x3c: /* Haswell */
++ case 0x3f: /* Haswell EX/EP */
++ case 0x45: /* Haswell D */
++ case 0x46: /* Haswell H */
++ case 0x3d: /* Broadwell */
++ case 0x47: /* Broadwell H */
++ case 0x4f: /* Broadwell EP/EX */
++ case 0x56: /* Broadwell D */
++ case 0x4e: /* Skylake M */
++ case 0x5e: /* Skylake D */
++ cpu_has_bug_mds = true;
++ break;
++
++ /*
++ * Some Core processors have per-stepping vulnerability.
++ */
++ case 0x55: /* Skylake-X / Cascade Lake */
++ if ( boot_cpu_data.x86_mask <= 5 )
++ cpu_has_bug_mds = true;
++ break;
++
++ case 0x8e: /* Kaby / Coffee / Whiskey Lake M */
++ if ( boot_cpu_data.x86_mask <= 0xb )
++ cpu_has_bug_mds = true;
++ break;
++
++ case 0x9e: /* Kaby / Coffee / Whiskey Lake D */
++ if ( boot_cpu_data.x86_mask <= 0xc )
++ cpu_has_bug_mds = true;
++ break;
++
++ /*
++ * Very old and very new Atom processors are not vulnerable.
++ */
++ case 0x1c: /* Pineview */
++ case 0x26: /* Lincroft */
++ case 0x27: /* Penwell */
++ case 0x35: /* Cloverview */
++ case 0x36: /* Cedarview */
++ case 0x7a: /* Goldmont */
++ break;
++
++ /*
++ * Middling Atom processors are vulnerable to just the Store Buffer
++ * aspect.
++ */
++ case 0x37: /* Baytrail / Valleyview (Silvermont) */
++ case 0x4a: /* Merrifield */
++ case 0x4c: /* Cherrytrail / Brasswell */
++ case 0x4d: /* Avaton / Rangely (Silvermont) */
++ case 0x5a: /* Moorefield */
++ case 0x5d:
++ case 0x65:
++ case 0x6e:
++ case 0x75:
++ /*
++ * Knights processors (which are based on the Silvermont/Airmont
++ * microarchitecture) are similarly only affected by the Store Buffer
++ * aspect.
++ */
++ case 0x57: /* Knights Landing */
++ case 0x85: /* Knights Mill */
++ cpu_has_bug_msbds_only = true;
++ break;
++
++ default:
++ printk("Unrecognised CPU model %#x - assuming vulnerable to MDS\n",
++ boot_cpu_data.x86_model);
++ cpu_has_bug_mds = true;
++ break;
++ }
++}
++
+ void __init init_speculation_mitigations(void)
+ {
+ enum ind_thunk thunk = THUNK_DEFAULT;
+@@ -978,6 +1095,47 @@ void __init init_speculation_mitigations(void)
+ "enabled. Please assess your configuration and choose an\n"
+ "explicit 'smt=<bool>' setting. See XSA-273.\n");
+
++ mds_calculations(caps);
++
++ /*
++ * By default, enable PV and HVM mitigations on MDS-vulnerable hardware.
++ * This will only be a token effort for MLPDS/MFBDS when HT is enabled,
++ * but it is somewhat better than nothing.
++ */
++ if ( opt_md_clear_pv == -1 )
++ opt_md_clear_pv = ((cpu_has_bug_mds || cpu_has_bug_msbds_only) &&
++ boot_cpu_has(X86_FEATURE_MD_CLEAR));
++ if ( opt_md_clear_hvm == -1 )
++ opt_md_clear_hvm = ((cpu_has_bug_mds || cpu_has_bug_msbds_only) &&
++ boot_cpu_has(X86_FEATURE_MD_CLEAR));
++
++ /*
++ * Enable MDS defences as applicable. The PV blocks need using all the
++ * time, and the Idle blocks need using if either PV or HVM defences are
++ * used.
++ *
++ * HVM is more complicated. The MD_CLEAR microcode extends L1D_FLUSH with
++ * equivelent semantics to avoid needing to perform both flushes on the
++ * HVM path. The HVM blocks don't need activating if our hypervisor told
++ * us it was handling L1D_FLUSH, or we are using L1D_FLUSH ourselves.
++ */
++ if ( opt_md_clear_pv )
++ setup_force_cpu_cap(X86_FEATURE_SC_VERW_PV);
++ if ( opt_md_clear_pv || opt_md_clear_hvm )
++ setup_force_cpu_cap(X86_FEATURE_SC_VERW_IDLE);
++ if ( opt_md_clear_hvm && !(caps & ARCH_CAPS_SKIP_L1DFL) && !opt_l1d_flush )
++ setup_force_cpu_cap(X86_FEATURE_SC_VERW_HVM);
++
++ /*
++ * Warn the user if they are on MLPDS/MFBDS-vulnerable hardware with HT
++ * active and no explicit SMT choice.
++ */
++ if ( opt_smt == -1 && cpu_has_bug_mds && hw_smt_enabled )
++ warning_add(
++ "Booted on MLPDS/MFBDS-vulnerable hardware with SMT/Hyperthreading\n"
++ "enabled. Mitigations will not be fully effective. Please\n"
++ "choose an explicit smt=<bool> setting. See XSA-297.\n");
++
+ print_details(thunk, caps);
+
+ /*