aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa321-4.11-7.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/xen/xsa321-4.11-7.patch')
-rw-r--r--main/xen/xsa321-4.11-7.patch164
1 files changed, 164 insertions, 0 deletions
diff --git a/main/xen/xsa321-4.11-7.patch b/main/xen/xsa321-4.11-7.patch
new file mode 100644
index 00000000000..65c4a4c84db
--- /dev/null
+++ b/main/xen/xsa321-4.11-7.patch
@@ -0,0 +1,164 @@
+From: <security@xenproject.org>
+Subject: x86/ept: flush cache when modifying PTEs and sharing page tables
+
+Modifications made to the page tables by EPT code need to be written
+to memory when the page tables are shared with the IOMMU, as Intel
+IOMMUs can be non-coherent and thus require changes to be written to
+memory in order to be visible to the IOMMU.
+
+In order to achieve this make sure data is written back to memory
+after writing an EPT entry when the recalc bit is not set in
+atomic_write_ept_entry. If such bit is set, the entry will be
+adjusted and atomic_write_ept_entry will be called a second time
+without the recalc bit set. Note that when splitting a super page the
+new tables resulting of the split should also be written back.
+
+Failure to do so can allow devices behind the IOMMU access to the
+stale super page, or cause coherency issues as changes made by the
+processor to the page tables are not visible to the IOMMU.
+
+This allows to remove the VT-d specific iommu_pte_flush helper, since
+the cache write back is now performed by atomic_write_ept_entry, and
+hence iommu_iotlb_flush can be used to flush the IOMMU TLB. The newly
+used method (iommu_iotlb_flush) can result in less flushes, since it
+might sometimes be called rightly with 0 flags, in which case it
+becomes a no-op.
+
+This is part of XSA-321.
+
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -90,6 +90,19 @@ static int atomic_write_ept_entry(ept_en
+
+ write_atomic(&entryptr->epte, new.epte);
+
++ /*
++ * The recalc field on the EPT is used to signal either that a
++ * recalculation of the EMT field is required (which doesn't effect the
++ * IOMMU), or a type change. Type changes can only be between ram_rw,
++ * logdirty and ioreq_server: changes to/from logdirty won't work well with
++ * an IOMMU anyway, as IOMMU #PFs are not synchronous and will lead to
++ * aborts, and changes to/from ioreq_server are already fully flushed
++ * before returning to guest context (see
++ * XEN_DMOP_map_mem_type_to_ioreq_server).
++ */
++ if ( !new.recalc && iommu_hap_pt_share )
++ iommu_sync_cache(entryptr, sizeof(*entryptr));
++
+ if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
+ put_page(mfn_to_page(_mfn(oldmfn)));
+
+@@ -319,6 +332,9 @@ static bool_t ept_split_super_page(struc
+ break;
+ }
+
++ if ( iommu_hap_pt_share )
++ iommu_sync_cache(table, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
++
+ unmap_domain_page(table);
+
+ /* Even failed we should install the newly allocated ept page. */
+@@ -378,6 +394,9 @@ static int ept_next_level(struct p2m_dom
+ if ( !next )
+ return GUEST_TABLE_MAP_FAILED;
+
++ if ( iommu_hap_pt_share )
++ iommu_sync_cache(next, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
++
+ rc = atomic_write_ept_entry(ept_entry, e, next_level);
+ ASSERT(rc == 0);
+ }
+@@ -875,7 +894,7 @@ out:
+ need_modify_vtd_table )
+ {
+ if ( iommu_hap_pt_share )
+- rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
++ rc = iommu_flush_iotlb(d, gfn, vtd_pte_present, 1u << order);
+ else
+ {
+ if ( iommu_flags )
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -612,10 +612,8 @@ static int __must_check iommu_flush_all(
+ return rc;
+ }
+
+-static int __must_check iommu_flush_iotlb(struct domain *d,
+- unsigned long gfn,
+- bool_t dma_old_pte_present,
+- unsigned int page_count)
++int iommu_flush_iotlb(struct domain *d, unsigned long gfn,
++ bool dma_old_pte_present, unsigned int page_count)
+ {
+ struct domain_iommu *hd = dom_iommu(d);
+ struct acpi_drhd_unit *drhd;
+@@ -1880,53 +1878,6 @@ static int __must_check intel_iommu_unma
+ return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
+ }
+
+-int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+- int order, int present)
+-{
+- struct acpi_drhd_unit *drhd;
+- struct iommu *iommu = NULL;
+- struct domain_iommu *hd = dom_iommu(d);
+- bool_t flush_dev_iotlb;
+- int iommu_domid;
+- int rc = 0;
+-
+- iommu_sync_cache(pte, sizeof(struct dma_pte));
+-
+- for_each_drhd_unit ( drhd )
+- {
+- iommu = drhd->iommu;
+- if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
+- continue;
+-
+- flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
+- iommu_domid= domain_iommu_domid(d, iommu);
+- if ( iommu_domid == -1 )
+- continue;
+-
+- rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+- (paddr_t)gfn << PAGE_SHIFT_4K,
+- order, !present, flush_dev_iotlb);
+- if ( rc > 0 )
+- {
+- iommu_flush_write_buffer(iommu);
+- rc = 0;
+- }
+- }
+-
+- if ( unlikely(rc) )
+- {
+- if ( !d->is_shutting_down && printk_ratelimit() )
+- printk(XENLOG_ERR VTDPREFIX
+- " d%d: IOMMU pages flush failed: %d\n",
+- d->domain_id, rc);
+-
+- if ( !is_hardware_domain(d) )
+- domain_crash(d);
+- }
+-
+- return rc;
+-}
+-
+ static int __init vtd_ept_page_compatible(struct iommu *iommu)
+ {
+ u64 ept_cap, vtd_cap = iommu->cap;
+--- a/xen/include/asm-x86/iommu.h
++++ b/xen/include/asm-x86/iommu.h
+@@ -87,8 +87,9 @@ int iommu_setup_hpet_msi(struct msi_desc
+
+ /* While VT-d specific, this must get declared in a generic header. */
+ int adjust_vtd_irq_affinities(void);
+-int __must_check iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+- int order, int present);
++int __must_check iommu_flush_iotlb(struct domain *d, unsigned long gfn,
++ bool dma_old_pte_present,
++ unsigned int page_count);
+ bool_t iommu_supports_eim(void);
+ int iommu_enable_x2apic_IR(void);
+ void iommu_disable_x2apic_IR(void);