pkgsrc-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[pkgsrc/trunk]: pkgsrc/sysutils Add upstream security patches for XSA286, XSA...



details:   https://anonhg.NetBSD.org/pkgsrc/rev/9f4b6f8c4952
branches:  trunk
changeset: 440898:9f4b6f8c4952
user:      bouyer <bouyer%pkgsrc.org@localhost>
date:      Wed Oct 21 09:03:05 2020 +0000

description:
Add upstream security patches for XSA286, XSA345, XSA346, XSA347.
Bump PKGREVISION.

diffstat:

 sysutils/xenkernel411/Makefile             |    4 +-
 sysutils/xenkernel411/distinfo             |    6 +-
 sysutils/xenkernel411/patches/patch-XSA286 |  778 +++++++++++++++++++++++++++++
 sysutils/xenkernel411/patches/patch-XSA345 |  413 +++++++++++++++
 sysutils/xenkernel411/patches/patch-XSA346 |  261 +++++++++
 sysutils/xenkernel411/patches/patch-XSA347 |  134 ++++
 sysutils/xenkernel413/Makefile             |    4 +-
 sysutils/xenkernel413/distinfo             |    6 +-
 sysutils/xenkernel413/patches/patch-XSA286 |  716 ++++++++++++++++++++++++++
 sysutils/xenkernel413/patches/patch-XSA345 |  413 +++++++++++++++
 sysutils/xenkernel413/patches/patch-XSA346 |  256 +++++++++
 sysutils/xenkernel413/patches/patch-XSA347 |  282 ++++++++++
 12 files changed, 3267 insertions(+), 6 deletions(-)

diffs (truncated from 3364 to 300 lines):

diff -r a479194365a5 -r 9f4b6f8c4952 sysutils/xenkernel411/Makefile
--- a/sysutils/xenkernel411/Makefile    Wed Oct 21 09:01:52 2020 +0000
+++ b/sysutils/xenkernel411/Makefile    Wed Oct 21 09:03:05 2020 +0000
@@ -1,8 +1,8 @@
-# $NetBSD: Makefile,v 1.16 2020/10/02 13:00:48 bouyer Exp $
+# $NetBSD: Makefile,v 1.17 2020/10/21 09:03:05 bouyer Exp $
 
 VERSION=       4.11.4
 #keep >= 1 if we have security patches
-PKGREVISION=   2
+PKGREVISION=   3
 DISTNAME=      xen-${VERSION}
 PKGNAME=       xenkernel411-${VERSION}
 CATEGORIES=    sysutils
diff -r a479194365a5 -r 9f4b6f8c4952 sysutils/xenkernel411/distinfo
--- a/sysutils/xenkernel411/distinfo    Wed Oct 21 09:01:52 2020 +0000
+++ b/sysutils/xenkernel411/distinfo    Wed Oct 21 09:03:05 2020 +0000
@@ -1,10 +1,11 @@
-$NetBSD: distinfo,v 1.14 2020/10/02 13:00:48 bouyer Exp $
+$NetBSD: distinfo,v 1.15 2020/10/21 09:03:05 bouyer Exp $
 
 SHA1 (xen411/xen-4.11.4.tar.gz) = 6c8cdf441621c14dc5345196b48df6982c060c4f
 RMD160 (xen411/xen-4.11.4.tar.gz) = 49819fcd1de3985d4dea370be962548c862f2933
 SHA512 (xen411/xen-4.11.4.tar.gz) = 8383f0b369fa08c8ecfdd68f902a2aaad140146a183131c50c020fe04c2f1e829c219b9bd9923fa8f1c180e1e7c6e73d0d68b7015fc39fd3b7f59e55c680cedb
 Size (xen411/xen-4.11.4.tar.gz) = 25184564 bytes
 SHA1 (patch-Config.mk) = 9372a09efd05c9fbdbc06f8121e411fcb7c7ba65
+SHA1 (patch-XSA286) = c7c5cc192be821721919cc035515ddf55d2c0658
 SHA1 (patch-XSA317) = 3a3e7bf8f115bebaf56001afcf68c2bd501c00a5
 SHA1 (patch-XSA319) = 4954bdc849666e1c735c3281256e4850c0594ee8
 SHA1 (patch-XSA320) = 38d84a2ded4ccacee455ba64eb3b369e5661fbfd
@@ -19,6 +20,9 @@
 SHA1 (patch-XSA342) = a61c4e28a8c8219b88e3bab534a109b2b29e2cc3
 SHA1 (patch-XSA343) = 239822636b474ebb62aa455cfdbd9853c4fb342f
 SHA1 (patch-XSA344) = cf7184ac9263b418305c6a7fbae7b163b233b4bc
+SHA1 (patch-XSA345) = 14ab754703af1045b2d049de1c6ba1c5baca5d81
+SHA1 (patch-XSA346) = c1962c037c5ab62c2f7e9a558c4565331c981be0
+SHA1 (patch-XSA347) = f3f98a794584d5d4321b95c2b1b9c88821fa567e
 SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
 SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac
 SHA1 (patch-xen_arch_x86_Rules.mk) = 0bedfc53a128a87b6a249ae04fbdf6a053bfb70b
diff -r a479194365a5 -r 9f4b6f8c4952 sysutils/xenkernel411/patches/patch-XSA286
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA286        Wed Oct 21 09:03:05 2020 +0000
@@ -0,0 +1,778 @@
+$NetBSD: patch-XSA286,v 1.1 2020/10/21 09:03:05 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86: don't allow clearing of TF_kernel_mode for other than 64-bit PV
+
+The flag is really only meant for those, both HVM and 32-bit PV tell
+kernel from user mode based on CPL/RPL. Remove the all-question-marks
+comment and let's be on the safe side here and also suppress clearing
+for 32-bit PV (this isn't a fast path after all).
+
+Remove no longer necessary is_pv_32bit_*() from sh_update_cr3() and
+sh_walk_guest_tables(). Note that shadow_one_bit_disable() already
+assumes the new behavior.
+
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Wei Liu <wei.liu2%citrix.com@localhost>
+Acked-by: George Dunlap <george.dunlap%citrix.com@localhost>
+Acked-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+
+diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
+index 35857dbe86..1d0ac81c5b 100644
+--- xen/arch/x86/domain.c.orig
++++ xen/arch/x86/domain.c
+@@ -804,9 +804,15 @@ int arch_set_info_guest(
+ 
+     v->fpu_initialised = !!(flags & VGCF_I387_VALID);
+ 
+-    v->arch.flags &= ~TF_kernel_mode;
+-    if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ )
+-        v->arch.flags |= TF_kernel_mode;
++    v->arch.flags |= TF_kernel_mode;
++    if ( unlikely(!(flags & VGCF_in_kernel)) &&
++         /*
++          * TF_kernel_mode is only allowed to be clear for 64-bit PV. See
++          * update_cr3(), sh_update_cr3(), sh_walk_guest_tables(), and
++          * shadow_one_bit_disable() for why that is.
++          */
++         !is_hvm_domain(d) && !is_pv_32bit_domain(d) )
++        v->arch.flags &= ~TF_kernel_mode;
+ 
+     v->arch.vgc_flags = flags;
+ 
+diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
+index 8ab343d16e..a2ebb4943f 100644
+--- xen/arch/x86/mm/shadow/multi.c.orig
++++ xen/arch/x86/mm/shadow/multi.c
+@@ -180,7 +180,7 @@ sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
+                              INVALID_MFN, v->arch.paging.shadow.gl3e);
+ #else /* 32 or 64 */
+     const struct domain *d = v->domain;
+-    mfn_t root_mfn = ((v->arch.flags & TF_kernel_mode) || is_pv_32bit_domain(d)
++    mfn_t root_mfn = (v->arch.flags & TF_kernel_mode
+                       ? pagetable_get_mfn(v->arch.guest_table)
+                       : pagetable_get_mfn(v->arch.guest_table_user));
+     void *root_map = map_domain_page(root_mfn);
+@@ -4018,7 +4018,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
+                   v, (unsigned long)pagetable_get_pfn(v->arch.guest_table));
+ 
+ #if GUEST_PAGING_LEVELS == 4
+-    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) )
++    if ( !(v->arch.flags & TF_kernel_mode) )
+         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
+     else
+ #endif
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86/mm: split L4 and L3 parts of the walk out of do_page_walk()
+
+The L3 one at least is going to be re-used by a subsequent patch, and
+splitting the L4 one then as well seems only natural.
+
+This is part of XSA-286.
+
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: George Dunlap <george.dunlap%citrix.com@localhost>
+Reviewed-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+
+diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
+index 3bd157967a..e73daa55e4 100644
+--- xen/arch/x86/x86_64/mm.c.orig
++++ xen/arch/x86/x86_64/mm.c
+@@ -44,26 +44,47 @@ unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
+ 
+ l2_pgentry_t *compat_idle_pg_table_l2;
+ 
+-void *do_page_walk(struct vcpu *v, unsigned long addr)
++static l4_pgentry_t page_walk_get_l4e(pagetable_t root, unsigned long addr)
+ {
+-    unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+-    l4_pgentry_t l4e, *l4t;
+-    l3_pgentry_t l3e, *l3t;
+-    l2_pgentry_t l2e, *l2t;
+-    l1_pgentry_t l1e, *l1t;
++    unsigned long mfn = pagetable_get_pfn(root);
++    l4_pgentry_t *l4t, l4e;
+ 
+-    if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
+-        return NULL;
++    if ( !is_canonical_address(addr) )
++        return l4e_empty();
+ 
+     l4t = map_domain_page(_mfn(mfn));
+     l4e = l4t[l4_table_offset(addr)];
+     unmap_domain_page(l4t);
++
++    return l4e;
++}
++
++static l3_pgentry_t page_walk_get_l3e(pagetable_t root, unsigned long addr)
++{
++    l4_pgentry_t l4e = page_walk_get_l4e(root, addr);
++    l3_pgentry_t *l3t, l3e;
++
+     if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+-        return NULL;
++        return l3e_empty();
+ 
+     l3t = map_l3t_from_l4e(l4e);
+     l3e = l3t[l3_table_offset(addr)];
+     unmap_domain_page(l3t);
++
++    return l3e;
++}
++
++void *do_page_walk(struct vcpu *v, unsigned long addr)
++{
++    l3_pgentry_t l3e;
++    l2_pgentry_t l2e, *l2t;
++    l1_pgentry_t l1e, *l1t;
++    unsigned long mfn;
++
++    if ( !is_pv_vcpu(v) )
++        return NULL;
++
++    l3e = page_walk_get_l3e(v->arch.guest_table, addr);
+     mfn = l3e_get_pfn(l3e);
+     if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
+         return NULL;
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86/mm: check page types in do_page_walk()
+
+For page table entries read to be guaranteed valid, transiently locking
+the pages and validating their types is necessary. Note that guest use
+of linear page tables is intentionally not taken into account here, as
+ordinary data (guest stacks) can't possibly live inside page tables.
+
+This is part of XSA-286.
+
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: George Dunlap <george.dunlap%citrix.com@localhost>
+Reviewed-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+
+diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
+index e73daa55e4..1ca9547d68 100644
+--- xen/arch/x86/x86_64/mm.c.orig
++++ xen/arch/x86/x86_64/mm.c
+@@ -46,15 +46,29 @@ l2_pgentry_t *compat_idle_pg_table_l2;
+ 
+ static l4_pgentry_t page_walk_get_l4e(pagetable_t root, unsigned long addr)
+ {
+-    unsigned long mfn = pagetable_get_pfn(root);
+-    l4_pgentry_t *l4t, l4e;
++    mfn_t mfn = pagetable_get_mfn(root);
++    /* current's root page table can't disappear under our feet. */
++    bool need_lock = !mfn_eq(mfn, pagetable_get_mfn(current->arch.guest_table));
++    struct page_info *pg;
++    l4_pgentry_t l4e = l4e_empty();
+ 
+     if ( !is_canonical_address(addr) )
+         return l4e_empty();
+ 
+-    l4t = map_domain_page(_mfn(mfn));
+-    l4e = l4t[l4_table_offset(addr)];
+-    unmap_domain_page(l4t);
++    pg = mfn_to_page(mfn);
++    if ( need_lock && !page_lock(pg) )
++        return l4e_empty();
++
++    if ( (pg->u.inuse.type_info & PGT_type_mask) == PGT_l4_page_table )
++    {
++        l4_pgentry_t *l4t = map_domain_page(mfn);
++
++        l4e = l4t[l4_table_offset(addr)];
++        unmap_domain_page(l4t);
++    }
++
++    if ( need_lock )
++        page_unlock(pg);
+ 
+     return l4e;
+ }
+@@ -62,14 +76,26 @@ static l4_pgentry_t page_walk_get_l4e(pagetable_t root, unsigned long addr)
+ static l3_pgentry_t page_walk_get_l3e(pagetable_t root, unsigned long addr)
+ {
+     l4_pgentry_t l4e = page_walk_get_l4e(root, addr);
+-    l3_pgentry_t *l3t, l3e;
++    mfn_t mfn = l4e_get_mfn(l4e);
++    struct page_info *pg;
++    l3_pgentry_t l3e = l3e_empty();
+ 
+     if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+         return l3e_empty();
+ 
+-    l3t = map_l3t_from_l4e(l4e);
+-    l3e = l3t[l3_table_offset(addr)];
+-    unmap_domain_page(l3t);
++    pg = mfn_to_page(mfn);
++    if ( !page_lock(pg) )
++        return l3e_empty();
++
++    if ( (pg->u.inuse.type_info & PGT_type_mask) == PGT_l3_page_table )
++    {
++        l3_pgentry_t *l3t = map_domain_page(mfn);
++
++        l3e = l3t[l3_table_offset(addr)];
++        unmap_domain_page(l3t);
++    }
++
++    page_unlock(pg);
+ 
+     return l3e;
+ }
+@@ -77,44 +103,67 @@ static l3_pgentry_t page_walk_get_l3e(pagetable_t root, unsigned long addr)
+ void *do_page_walk(struct vcpu *v, unsigned long addr)
+ {
+     l3_pgentry_t l3e;
+-    l2_pgentry_t l2e, *l2t;
+-    l1_pgentry_t l1e, *l1t;
+-    unsigned long mfn;
++    l2_pgentry_t l2e = l2e_empty();
++    l1_pgentry_t l1e = l1e_empty();
++    mfn_t mfn;
++    struct page_info *pg;
+ 
+     if ( !is_pv_vcpu(v) )
+         return NULL;
+ 
+     l3e = page_walk_get_l3e(v->arch.guest_table, addr);
+-    mfn = l3e_get_pfn(l3e);
+-    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
++    mfn = l3e_get_mfn(l3e);
++    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+         return NULL;
+     if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
+     {
+-        mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
++        mfn = mfn_add(mfn, PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1)));
+         goto ret;
+     }
+ 
+-    l2t = map_domain_page(_mfn(mfn));
+-    l2e = l2t[l2_table_offset(addr)];
+-    unmap_domain_page(l2t);
+-    mfn = l2e_get_pfn(l2e);
+-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
++    pg = mfn_to_page(mfn);
++    if ( !page_lock(pg) )



Home | Main Index | Thread Index | Old Index