pkgsrc-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[pkgsrc/trunk]: pkgsrc/sysutils/xenkernel413 Apply relevant Xen Security Advi...



details:   https://anonhg.NetBSD.org/pkgsrc/rev/141f0472c5e2
branches:  trunk
changeset: 381034:141f0472c5e2
user:      bouyer <bouyer%pkgsrc.org@localhost>
date:      Fri Jun 24 13:47:37 2022 +0000

description:
Apply relevant Xen Security Advisory 385 up to 402, and 404 (403 still
not released). Bump PKGREVISION

diffstat:

 sysutils/xenkernel413/Makefile             |     3 +-
 sysutils/xenkernel413/distinfo             |    12 +-
 sysutils/xenkernel413/patches/patch-XSA385 |    80 +
 sysutils/xenkernel413/patches/patch-XSA388 |   212 +
 sysutils/xenkernel413/patches/patch-XSA389 |   182 +
 sysutils/xenkernel413/patches/patch-XSA397 |   100 +
 sysutils/xenkernel413/patches/patch-XSA398 |    58 +
 sysutils/xenkernel413/patches/patch-XSA399 |    47 +
 sysutils/xenkernel413/patches/patch-XSA400 |  3149 ++++++++++++++++++++++++++++
 sysutils/xenkernel413/patches/patch-XSA401 |   343 +++
 sysutils/xenkernel413/patches/patch-XSA402 |   743 ++++++
 sysutils/xenkernel413/patches/patch-XSA404 |   485 ++++
 12 files changed, 5412 insertions(+), 2 deletions(-)

diffs (truncated from 5477 to 300 lines):

diff -r fee538ceee17 -r 141f0472c5e2 sysutils/xenkernel413/Makefile
--- a/sysutils/xenkernel413/Makefile    Fri Jun 24 13:46:46 2022 +0000
+++ b/sysutils/xenkernel413/Makefile    Fri Jun 24 13:47:37 2022 +0000
@@ -1,8 +1,9 @@
-# $NetBSD: Makefile,v 1.15 2021/09/21 13:20:47 bouyer Exp $
+# $NetBSD: Makefile,v 1.16 2022/06/24 13:47:37 bouyer Exp $
 
 VERSION=       4.13.4
 DISTNAME=      xen-${VERSION}
 PKGNAME=       xenkernel413-${VERSION}
+PKGREVISION=   1
 CATEGORIES=    sysutils
 MASTER_SITES=  https://downloads.xenproject.org/release/xen/${VERSION}/
 DIST_SUBDIR=   xen413
diff -r fee538ceee17 -r 141f0472c5e2 sysutils/xenkernel413/distinfo
--- a/sysutils/xenkernel413/distinfo    Fri Jun 24 13:46:46 2022 +0000
+++ b/sysutils/xenkernel413/distinfo    Fri Jun 24 13:47:37 2022 +0000
@@ -1,9 +1,19 @@
-$NetBSD: distinfo,v 1.13 2021/10/26 11:20:25 nia Exp $
+$NetBSD: distinfo,v 1.14 2022/06/24 13:47:37 bouyer Exp $
 
 BLAKE2s (xen413/xen-4.13.4.tar.gz) = b88ad78f8716c98253a8d3aae7622c1e3214efbc80c008518ae0104ef0eed661
 SHA512 (xen413/xen-4.13.4.tar.gz) = 1f6d67e0270b10be45b6444322ced791b44df09a3a51e0fe690f5ad76cd80d35115efc93056e99f73b4e550178e0e780c9ee827ced04b09caf12fdf34d9a9b71
 Size (xen413/xen-4.13.4.tar.gz) = 39055744 bytes
 SHA1 (patch-Config.mk) = 9372a09efd05c9fbdbc06f8121e411fcb7c7ba65
+SHA1 (patch-XSA385) = 5245aeb10dcfb9c97792f024942718b03c451cf5
+SHA1 (patch-XSA388) = c9d33d0770ee634aefa33805c17ccebea2879643
+SHA1 (patch-XSA389) = 04f6ec483f5fe1d8a47ce689a0a883871bda5214
+SHA1 (patch-XSA397) = 4aebc96ec37dc74e67d86d90abdf86b2516d0120
+SHA1 (patch-XSA398) = 9185899eef317ebbff8a0f1aa611c49a5e1c87e1
+SHA1 (patch-XSA399) = c9ab4473654810ca2701dfc38c26e91a0d7f2eb5
+SHA1 (patch-XSA400) = 90c8fcc1dd06e1a5c7667bc1a69145602ac692e9
+SHA1 (patch-XSA401) = 404d6899a161407618e2ab37e18d8f9e7ec61b1d
+SHA1 (patch-XSA402) = 92e585f077e15a3c67ba68044086ce8e0fc5379a
+SHA1 (patch-XSA404) = d562d5379673d0d23d0496438e00e0f131e7ac73
 SHA1 (patch-fixpvh) = eec14f19d0adc6d96035d6c711270bb375304660
 SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
 SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac
diff -r fee538ceee17 -r 141f0472c5e2 sysutils/xenkernel413/patches/patch-XSA385
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel413/patches/patch-XSA385        Fri Jun 24 13:47:37 2022 +0000
@@ -0,0 +1,80 @@
+$NetBSD: patch-XSA385,v 1.1 2022/06/24 13:47:37 bouyer Exp $
+
+From: Julien Grall <jgrall%amazon.com@localhost>
+Subject: xen/page_alloc: Harden assign_pages()
+
+domain_tot_pages() and d->max_pages are 32-bit values. While the order
+should always be quite small, it would still be possible to overflow
+if domain_tot_pages() is near to (2^32 - 1).
+
+As this code may be called by a guest via XENMEM_increase_reservation
+and XENMEM_populate_physmap, we want to make sure the guest is not going
+to be able to allocate more than it is allowed.
+
+Rework the allocation check to avoid any possible overflow. While the
+check domain_tot_pages() < d->max_pages should technically not be
+necessary, it is probably best to have it to catch any possible
+inconsistencies in the future.
+
+This is CVE-2021-28706 / XSA-385.
+
+Signed-off-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Roger Pau Monné <roger.pau%citrix.com@localhost>
+
+--- xen/common/grant_table.c.orig
++++ xen/common/grant_table.c
+@@ -2286,7 +2286,8 @@ gnttab_transfer(
+          * pages when it is dying.
+          */
+         if ( unlikely(e->is_dying) ||
+-             unlikely(e->tot_pages >= e->max_pages) )
++             unlikely(e->tot_pages >= e->max_pages) ||
++             unlikely(!(e->tot_pages + 1)) )
+         {
+             spin_unlock(&e->page_alloc_lock);
+ 
+@@ -2295,8 +2296,8 @@ gnttab_transfer(
+                          e->domain_id);
+             else
+                 gdprintk(XENLOG_INFO,
+-                         "Transferee d%d has no headroom (tot %u, max %u)\n",
+-                         e->domain_id, e->tot_pages, e->max_pages);
++                         "Transferee %pd has no headroom (tot %u, max %u)\n",
++                         e, e->tot_pages, e->max_pages);
+ 
+             gop.status = GNTST_general_error;
+             goto unlock_and_copyback;
+--- xen/common/page_alloc.c.orig
++++ xen/common/page_alloc.c
+@@ -2276,16 +2276,25 @@ int assign_pages(
+ 
+     if ( !(memflags & MEMF_no_refcount) )
+     {
+-        if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
++        unsigned int nr = 1u << order;
++
++        if ( unlikely(d->tot_pages > d->max_pages) )
++        {
++            gprintk(XENLOG_INFO, "Inconsistent allocation for %pd: %u > %u\n",
++                    d, d->tot_pages, d->max_pages);
++            rc = -EPERM;
++            goto out;
++        }
++
++        if ( unlikely(nr > d->max_pages - d->tot_pages) )
+         {
+-            gprintk(XENLOG_INFO, "Over-allocation for domain %u: "
+-                    "%u > %u\n", d->domain_id,
+-                    d->tot_pages + (1 << order), d->max_pages);
++            gprintk(XENLOG_INFO, "Over-allocation for %pd: %Lu > %u\n",
++                    d, d->tot_pages + 0ull + nr, d->max_pages);
+             rc = -E2BIG;
+             goto out;
+         }
+ 
+-        if ( unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
++        if ( unlikely(domain_adjust_tot_pages(d, nr) == nr) )
+             get_knownalive_domain(d);
+     }
+ 
diff -r fee538ceee17 -r 141f0472c5e2 sysutils/xenkernel413/patches/patch-XSA388
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel413/patches/patch-XSA388        Fri Jun 24 13:47:37 2022 +0000
@@ -0,0 +1,212 @@
+$NetBSD: patch-XSA388,v 1.1 2022/06/24 13:47:37 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86/PoD: deal with misaligned GFNs
+
+Users of XENMEM_decrease_reservation and XENMEM_populate_physmap aren't
+required to pass in order-aligned GFN values. (While I consider this
+bogus, I don't think we can fix this there, as that might break existing
+code, e.g Linux'es swiotlb, which - while affecting PV only - until
+recently had been enforcing only page alignment on the original
+allocation.) Only non-PoD code paths (guest_physmap_{add,remove}_page(),
+p2m_set_entry()) look to be dealing with this properly (in part by being
+implemented inefficiently, handling every 4k page separately).
+
+Introduce wrappers taking care of splitting the incoming request into
+aligned chunks, without putting much effort in trying to determine the
+largest possible chunk at every iteration.
+
+Also "handle" p2m_set_entry() failure for non-order-0 requests by
+crashing the domain in one more place. Alongside putting a log message
+there, also add one to the other similar path.
+
+Note regarding locking: This is left in the actual worker functions on
+the assumption that callers aren't guaranteed atomicity wrt acting on
+multiple pages at a time. For mis-aligned GFNs gfn_lock() wouldn't have
+locked the correct GFN range anyway, if it didn't simply resolve to
+p2m_lock(), and for well-behaved callers there continues to be only a
+single iteration, i.e. behavior is unchanged for them. (FTAOD pulling
+out just pod_lock() into p2m_pod_decrease_reservation() would result in
+a lock order violation.)
+
+This is CVE-2021-28704 and CVE-2021-28707 / part of XSA-388.
+
+Fixes: 3c352011c0d3 ("x86/PoD: shorten certain operations on higher order ranges")
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Roger Pau Monné <roger.pau%citrix.com@localhost>
+
+--- xen/arch/x86/mm/p2m-pod.c.orig
++++ xen/arch/x86/mm/p2m-pod.c
+@@ -495,7 +495,7 @@ p2m_pod_zero_check_superpage(struct p2m_
+ 
+ 
+ /*
+- * This function is needed for two reasons:
++ * This pair of functions is needed for two reasons:
+  * + To properly handle clearing of PoD entries
+  * + To "steal back" memory being freed for the PoD cache, rather than
+  *   releasing it.
+@@ -503,8 +503,8 @@ p2m_pod_zero_check_superpage(struct p2m_
+  * Once both of these functions have been completed, we can return and
+  * allow decrease_reservation() to handle everything else.
+  */
+-unsigned long
+-p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
++static unsigned long
++decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
+ {
+     unsigned long ret = 0, i, n;
+     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+@@ -551,8 +551,10 @@ p2m_pod_decrease_reservation(struct doma
+          * All PoD: Mark the whole region invalid and tell caller
+          * we're done.
+          */
+-        if ( p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
+-                           p2m->default_access) )
++        int rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
++                               p2m->default_access);
++
++        if ( rc )
+         {
+             /*
+              * If this fails, we can't tell how much of the range was changed.
+@@ -560,7 +562,12 @@ p2m_pod_decrease_reservation(struct doma
+              * impossible.
+              */
+             if ( order != 0 )
++            {
++                printk(XENLOG_G_ERR
++                       "%pd: marking GFN %#lx (order %u) as non-PoD failed: %d\n",
++                       d, gfn_x(gfn), order, rc);
+                 domain_crash(d);
++            }
+             goto out_unlock;
+         }
+         ret = 1UL << order;
+@@ -667,6 +674,22 @@ out_unlock:
+     return ret;
+ }
+ 
++unsigned long
++p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
++{
++    unsigned long left = 1UL << order, ret = 0;
++    unsigned int chunk_order = find_first_set_bit(gfn_x(gfn) | left);
++
++    do {
++        ret += decrease_reservation(d, gfn, chunk_order);
++
++        left -= 1UL << chunk_order;
++        gfn = gfn_add(gfn, 1UL << chunk_order);
++    } while ( left );
++
++    return ret;
++}
++
+ void p2m_pod_dump_data(struct domain *d)
+ {
+     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+@@ -1266,19 +1289,15 @@ remap_and_retry:
+     return true;
+ }
+ 
+-
+-int
+-guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn_l,
+-                                      unsigned int order)
++static int
++mark_populate_on_demand(struct domain *d, unsigned long gfn_l,
++                        unsigned int order)
+ {
+     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+     gfn_t gfn = _gfn(gfn_l);
+     unsigned long i, n, pod_count = 0;
+     int rc = 0;
+ 
+-    if ( !paging_mode_translate(d) )
+-        return -EINVAL;
+-
+     gfn_lock(p2m, gfn, order);
+ 
+     P2M_DEBUG("mark pod gfn=%#lx\n", gfn_l);
+@@ -1316,12 +1335,44 @@ guest_physmap_mark_populate_on_demand(st
+         BUG_ON(p2m->pod.entry_count < 0);
+         pod_unlock(p2m);
+     }
++    else if ( order )
++    {
++        /*
++         * If this failed, we can't tell how much of the range was changed.
++         * Best to crash the domain.
++         */
++        printk(XENLOG_G_ERR
++               "%pd: marking GFN %#lx (order %u) as PoD failed: %d\n",
++               d, gfn_l, order, rc);
++        domain_crash(d);
++    }
+ 
+ out:
+     gfn_unlock(p2m, gfn, order);
+ 
+     return rc;
+ }
++
++int
++guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
++                                      unsigned int order)
++{
++    unsigned long left = 1UL << order;
++    unsigned int chunk_order = find_first_set_bit(gfn | left);
++    int rc;
++
++    if ( !paging_mode_translate(d) )
++        return -EINVAL;
++
++    do {
++        rc = mark_populate_on_demand(d, gfn, chunk_order);
++
++        left -= 1UL << chunk_order;
++        gfn += 1UL << chunk_order;
++    } while ( !rc && left );
++
++    return rc;
++}
+ 



Home | Main Index | Thread Index | Old Index