aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorAriadne Conill <ariadne@dereferenced.org>2021-11-23 17:18:59 -0600
committerAriadne Conill <ariadne@dereferenced.org>2021-11-23 17:18:59 -0600
commiteec5c3cc5459ef0d71433da9eb718b581916e099 (patch)
tree5fe08b640002ccf4a80eb41563de8cc0ca3caa56 /main
parent18c837d2db29d004fccc3e563143e1c73bee52c2 (diff)
downloadaports-eec5c3cc5459ef0d71433da9eb718b581916e099.tar.gz
aports-eec5c3cc5459ef0d71433da9eb718b581916e099.tar.bz2
aports-eec5c3cc5459ef0d71433da9eb718b581916e099.tar.xz
main/xen: add missing XSA patches
Diffstat (limited to 'main')
-rw-r--r--main/xen/xsa388-4.14-1.patch174
-rw-r--r--main/xen/xsa388-4.14-2.patch36
-rw-r--r--main/xen/xsa389-4.13.patch180
3 files changed, 390 insertions, 0 deletions
diff --git a/main/xen/xsa388-4.14-1.patch b/main/xen/xsa388-4.14-1.patch
new file mode 100644
index 0000000000..f76f2d56b6
--- /dev/null
+++ b/main/xen/xsa388-4.14-1.patch
@@ -0,0 +1,174 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/PoD: deal with misaligned GFNs
+
+Users of XENMEM_decrease_reservation and XENMEM_populate_physmap aren't
+required to pass in order-aligned GFN values. (While I consider this
+bogus, I don't think we can fix this there, as that might break existing
+code, e.g Linux'es swiotlb, which - while affecting PV only - until
+recently had been enforcing only page alignment on the original
+allocation.) Only non-PoD code paths (guest_physmap_{add,remove}_page(),
+p2m_set_entry()) look to be dealing with this properly (in part by being
+implemented inefficiently, handling every 4k page separately).
+
+Introduce wrappers taking care of splitting the incoming request into
+aligned chunks, without putting much effort in trying to determine the
+largest possible chunk at every iteration.
+
+Also "handle" p2m_set_entry() failure for non-order-0 requests by
+crashing the domain in one more place. Alongside putting a log message
+there, also add one to the other similar path.
+
+Note regarding locking: This is left in the actual worker functions on
+the assumption that callers aren't guaranteed atomicity wrt acting on
+multiple pages at a time. For mis-aligned GFNs gfn_lock() wouldn't have
+locked the correct GFN range anyway, if it didn't simply resolve to
+p2m_lock(), and for well-behaved callers there continues to be only a
+single iteration, i.e. behavior is unchanged for them. (FTAOD pulling
+out just pod_lock() into p2m_pod_decrease_reservation() would result in
+a lock order violation.)
+
+This is CVE-2021-28704 and CVE-2021-28707 / part of XSA-388.
+
+Fixes: 3c352011c0d3 ("x86/PoD: shorten certain operations on higher order ranges")
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -495,7 +495,7 @@ p2m_pod_zero_check_superpage(struct p2m_
+
+
+ /*
+- * This function is needed for two reasons:
++ * This pair of functions is needed for two reasons:
+ * + To properly handle clearing of PoD entries
+ * + To "steal back" memory being freed for the PoD cache, rather than
+ * releasing it.
+@@ -503,8 +503,8 @@ p2m_pod_zero_check_superpage(struct p2m_
+ * Once both of these functions have been completed, we can return and
+ * allow decrease_reservation() to handle everything else.
+ */
+-unsigned long
+-p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
++static unsigned long
++decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
+ {
+ unsigned long ret = 0, i, n;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+@@ -551,8 +551,10 @@ p2m_pod_decrease_reservation(struct doma
+ * All PoD: Mark the whole region invalid and tell caller
+ * we're done.
+ */
+- if ( p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
+- p2m->default_access) )
++ int rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
++ p2m->default_access);
++
++ if ( rc )
+ {
+ /*
+ * If this fails, we can't tell how much of the range was changed.
+@@ -560,7 +562,12 @@ p2m_pod_decrease_reservation(struct doma
+ * impossible.
+ */
+ if ( order != 0 )
++ {
++ printk(XENLOG_G_ERR
++ "%pd: marking GFN %#lx (order %u) as non-PoD failed: %d\n",
++ d, gfn_x(gfn), order, rc);
+ domain_crash(d);
++ }
+ goto out_unlock;
+ }
+ ret = 1UL << order;
+@@ -667,6 +674,22 @@ out_unlock:
+ return ret;
+ }
+
++unsigned long
++p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
++{
++ unsigned long left = 1UL << order, ret = 0;
++ unsigned int chunk_order = find_first_set_bit(gfn_x(gfn) | left);
++
++ do {
++ ret += decrease_reservation(d, gfn, chunk_order);
++
++ left -= 1UL << chunk_order;
++ gfn = gfn_add(gfn, 1UL << chunk_order);
++ } while ( left );
++
++ return ret;
++}
++
+ void p2m_pod_dump_data(struct domain *d)
+ {
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+@@ -1266,19 +1289,15 @@ remap_and_retry:
+ return true;
+ }
+
+-
+-int
+-guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l,
+- unsigned int order)
++static int
++mark_populate_on_demand(struct domain *d, unsigned long gfn_l,
++ unsigned int order)
+ {
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ gfn_t gfn = _gfn(gfn_l);
+ unsigned long i, n, pod_count = 0;
+ int rc = 0;
+
+- if ( !paging_mode_translate(d) )
+- return -EINVAL;
+-
+ gfn_lock(p2m, gfn, order);
+
+ P2M_DEBUG("mark pod gfn=%#lx\n", gfn_l);
+@@ -1316,12 +1335,44 @@ guest_physmap_mark_populate_on_demand(st
+ BUG_ON(p2m->pod.entry_count < 0);
+ pod_unlock(p2m);
+ }
++ else if ( order )
++ {
++ /*
++ * If this failed, we can't tell how much of the range was changed.
++ * Best to crash the domain.
++ */
++ printk(XENLOG_G_ERR
++ "%pd: marking GFN %#lx (order %u) as PoD failed: %d\n",
++ d, gfn_l, order, rc);
++ domain_crash(d);
++ }
+
+ out:
+ gfn_unlock(p2m, gfn, order);
+
+ return rc;
+ }
++
++int
++guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
++ unsigned int order)
++{
++ unsigned long left = 1UL << order;
++ unsigned int chunk_order = find_first_set_bit(gfn | left);
++ int rc;
++
++ if ( !paging_mode_translate(d) )
++ return -EINVAL;
++
++ do {
++ rc = mark_populate_on_demand(d, gfn, chunk_order);
++
++ left -= 1UL << chunk_order;
++ gfn += 1UL << chunk_order;
++ } while ( !rc && left );
++
++ return rc;
++}
+
+ void p2m_pod_init(struct p2m_domain *p2m)
+ {
diff --git a/main/xen/xsa388-4.14-2.patch b/main/xen/xsa388-4.14-2.patch
new file mode 100644
index 0000000000..2f8cc881f0
--- /dev/null
+++ b/main/xen/xsa388-4.14-2.patch
@@ -0,0 +1,36 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/PoD: handle intermediate page orders in p2m_pod_cache_add()
+
+p2m_pod_decrease_reservation() may pass pages to the function which
+aren't 4k, 2M, or 1G. Handle all intermediate orders as well, to avoid
+hitting the BUG() at the switch() statement's "default" case.
+
+This is CVE-2021-28708 / part of XSA-388.
+
+Fixes: 3c352011c0d3 ("x86/PoD: shorten certain operations on higher order ranges")
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -111,15 +111,13 @@ p2m_pod_cache_add(struct p2m_domain *p2m
+ /* Then add to the appropriate populate-on-demand list. */
+ switch ( order )
+ {
+- case PAGE_ORDER_1G:
+- for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
++ case PAGE_ORDER_2M ... PAGE_ORDER_1G:
++ for ( i = 0; i < (1UL << order); i += 1UL << PAGE_ORDER_2M )
+ page_list_add_tail(page + i, &p2m->pod.super);
+ break;
+- case PAGE_ORDER_2M:
+- page_list_add_tail(page, &p2m->pod.super);
+- break;
+- case PAGE_ORDER_4K:
+- page_list_add_tail(page, &p2m->pod.single);
++ case PAGE_ORDER_4K ... PAGE_ORDER_2M - 1:
++ for ( i = 0; i < (1UL << order); i += 1UL << PAGE_ORDER_4K )
++ page_list_add_tail(page + i, &p2m->pod.single);
+ break;
+ default:
+ BUG();
diff --git a/main/xen/xsa389-4.13.patch b/main/xen/xsa389-4.13.patch
new file mode 100644
index 0000000000..10a8a9b9ed
--- /dev/null
+++ b/main/xen/xsa389-4.13.patch
@@ -0,0 +1,180 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/P2M: deal with partial success of p2m_set_entry()
+
+M2P and PoD stats need to remain in sync with P2M; if an update succeeds
+only partially, respective adjustments need to be made. If updates get
+made before the call, they may also need undoing upon complete failure
+(i.e. including the single-page case).
+
+Log-dirty state would better also be kept in sync.
+
+Note that the change to set_typed_p2m_entry() may not be strictly
+necessary (due to the order restriction enforced near the top of the
+function), but is being kept here to be on the safe side.
+
+This is CVE-2021-28705 and CVE-2021-28709 / XSA-389.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m.c
++++ b/xen/arch/x86/mm/p2m.c
+@@ -781,6 +781,7 @@ p2m_remove_page(struct p2m_domain *p2m,
+ gfn_t gfn = _gfn(gfn_l);
+ p2m_type_t t;
+ p2m_access_t a;
++ int rc;
+
+ /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
+ if ( !paging_mode_translate(p2m->domain) )
+@@ -812,8 +813,27 @@ p2m_remove_page(struct p2m_domain *p2m,
+ set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+ }
+ }
+- return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
+- p2m->default_access);
++ rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
++ p2m->default_access);
++ if ( likely(!rc) || !mfn_valid(_mfn(mfn)) )
++ return rc;
++
++ /*
++ * The operation may have partially succeeded. For the failed part we need
++ * to undo the M2P update and, out of precaution, mark the pages dirty
++ * again.
++ */
++ for ( i = 0; i < (1UL << page_order); ++i )
++ {
++ p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
++ if ( !p2m_is_hole(t) && !p2m_is_special(t) && !p2m_is_shared(t) )
++ {
++ set_gpfn_from_mfn(mfn + i, gfn_l + i);
++ paging_mark_pfn_dirty(p2m->domain, _pfn(gfn_l + i));
++ }
++ }
++
++ return rc;
+ }
+
+ int
+@@ -1002,13 +1022,8 @@ guest_physmap_add_entry(struct domain *d
+
+ /* Now, actually do the two-way mapping */
+ rc = p2m_set_entry(p2m, gfn, mfn, page_order, t, p2m->default_access);
+- if ( rc == 0 )
++ if ( likely(!rc) )
+ {
+- pod_lock(p2m);
+- p2m->pod.entry_count -= pod_count;
+- BUG_ON(p2m->pod.entry_count < 0);
+- pod_unlock(p2m);
+-
+ if ( !p2m_is_grant(t) )
+ {
+ for ( i = 0; i < (1UL << page_order); i++ )
+@@ -1016,6 +1031,42 @@ guest_physmap_add_entry(struct domain *d
+ gfn_x(gfn_add(gfn, i)));
+ }
+ }
++ else
++ {
++ /*
++ * The operation may have partially succeeded. For the successful part
++ * we need to update M2P and dirty state, while for the failed part we
++ * may need to adjust PoD stats as well as undo the earlier M2P update.
++ */
++ for ( i = 0; i < (1UL << page_order); ++i )
++ {
++ omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, NULL, NULL);
++ if ( p2m_is_pod(ot) )
++ {
++ BUG_ON(!pod_count);
++ --pod_count;
++ }
++ else if ( mfn_eq(omfn, mfn_add(mfn, i)) && ot == t &&
++ a == p2m->default_access && !p2m_is_grant(t) )
++ {
++ set_gpfn_from_mfn(mfn_x(omfn), gfn_x(gfn) + i);
++ paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn) + i));
++ }
++ else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
++ {
++ ASSERT(mfn_valid(omfn));
++ set_gpfn_from_mfn(mfn_x(omfn), gfn_x(gfn) + i);
++ }
++ }
++ }
++
++ if ( pod_count )
++ {
++ pod_lock(p2m);
++ p2m->pod.entry_count -= pod_count;
++ BUG_ON(p2m->pod.entry_count < 0);
++ pod_unlock(p2m);
++ }
+
+ out:
+ p2m_unlock(p2m);
+@@ -1307,6 +1358,49 @@ static int set_typed_p2m_entry(struct do
+ return 0;
+ }
+ }
++
++ P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
++ rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
++ if ( unlikely(rc) )
++ {
++ gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
++ gfn_l, order, rc, mfn_x(mfn));
++
++ /*
++ * The operation may have partially succeeded. For the successful part
++ * we need to update PoD stats, M2P, and dirty state.
++ */
++ if ( order != PAGE_ORDER_4K )
++ {
++ unsigned long i;
++
++ for ( i = 0; i < (1UL << order); ++i )
++ {
++ p2m_type_t t;
++ mfn_t cmfn = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
++ NULL, NULL);
++
++ if ( !mfn_eq(cmfn, mfn_add(mfn, i)) || t != gfn_p2mt ||
++ a != access )
++ continue;
++
++ if ( p2m_is_ram(ot) )
++ {
++ ASSERT(mfn_valid(mfn_add(omfn, i)));
++ set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
++ }
++#ifdef CONFIG_HVM
++ else if ( p2m_is_pod(ot) )
++ {
++ pod_lock(p2m);
++ BUG_ON(!p2m->pod.entry_count);
++ --p2m->pod.entry_count;
++ pod_unlock(p2m);
++ }
++#endif
++ }
++ }
++ }
+ else if ( p2m_is_ram(ot) )
+ {
+ unsigned long i;
+@@ -1317,12 +1411,6 @@ static int set_typed_p2m_entry(struct do
+ set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
+ }
+ }
+-
+- P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
+- rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
+- if ( rc )
+- gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
+- gfn_l, order, rc, mfn_x(mfn));
+ #ifdef CONFIG_HVM
+ else if ( p2m_is_pod(ot) )
+ {