pkgsrc-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[pkgsrc/pkgsrc-2020Q3]: pkgsrc/sysutils Pullup ticket #6332 - requested by bo...



details:   https://anonhg.NetBSD.org/pkgsrc/rev/e34b3eafdac4
branches:  pkgsrc-2020Q3
changeset: 440245:e34b3eafdac4
user:      bsiegert <bsiegert%pkgsrc.org@localhost>
date:      Sun Oct 04 20:44:32 2020 +0000

description:
Pullup ticket #6332 - requested by bouyer
sysutils/xenkernel411: security fix
sysutils/xenkernel413: security fix

Revisions pulled up:
- sysutils/xenkernel411/Makefile                                1.16
- sysutils/xenkernel411/distinfo                                1.14
- sysutils/xenkernel411/patches/patch-XSA333                    1.1
- sysutils/xenkernel411/patches/patch-XSA336                    1.1
- sysutils/xenkernel411/patches/patch-XSA337                    1.1
- sysutils/xenkernel411/patches/patch-XSA338                    1.1
- sysutils/xenkernel411/patches/patch-XSA339                    1.1
- sysutils/xenkernel411/patches/patch-XSA340                    1.1
- sysutils/xenkernel411/patches/patch-XSA342                    1.1
- sysutils/xenkernel411/patches/patch-XSA343                    1.1
- sysutils/xenkernel411/patches/patch-XSA344                    1.1
- sysutils/xenkernel413/Makefile                                1.5
- sysutils/xenkernel413/distinfo                                1.3
- sysutils/xenkernel413/patches/patch-XSA333                    1.1
- sysutils/xenkernel413/patches/patch-XSA334                    1.1
- sysutils/xenkernel413/patches/patch-XSA336                    1.1
- sysutils/xenkernel413/patches/patch-XSA337                    1.1
- sysutils/xenkernel413/patches/patch-XSA338                    1.1
- sysutils/xenkernel413/patches/patch-XSA339                    1.1
- sysutils/xenkernel413/patches/patch-XSA340                    1.1
- sysutils/xenkernel413/patches/patch-XSA342                    1.1
- sysutils/xenkernel413/patches/patch-XSA343                    1.1
- sysutils/xenkernel413/patches/patch-XSA344                    1.1

---
   Module Name: pkgsrc
   Committed By:        bouyer
   Date:                Thu Oct  1 12:41:19 UTC 2020

   Modified Files:
        pkgsrc/sysutils/xenkernel413: Makefile distinfo
   Added Files:
        pkgsrc/sysutils/xenkernel413/patches: patch-XSA333 patch-XSA334
            patch-XSA336 patch-XSA337 patch-XSA338 patch-XSA339 patch-XSA340
            patch-XSA342 patch-XSA343 patch-XSA344

   Log Message:
   Add uptream fixes for
   XSA333, XSA334, XSA336, XSA337, XSA338, XSA339, XSA340, XSA342, XSA343, XSA344
   bump PKGREVISION

---
   Module Name: pkgsrc
   Committed By:        bouyer
   Date:                Fri Oct  2 13:00:48 UTC 2020

   Modified Files:
        pkgsrc/sysutils/xenkernel411: Makefile distinfo
   Added Files:
        pkgsrc/sysutils/xenkernel411/patches: patch-XSA333 patch-XSA336
            patch-XSA337 patch-XSA338 patch-XSA339 patch-XSA340 patch-XSA342
            patch-XSA343 patch-XSA344

   Log Message:
   dd uptream fixes for
   XSA333, XSA336, XSA337, XSA338, XSA339, XSA340, XSA342, XSA343, XSA344
   bump PKGREVISION

diffstat:

 sysutils/xenkernel411/Makefile             |    4 +-
 sysutils/xenkernel411/distinfo             |   11 +-
 sysutils/xenkernel411/patches/patch-XSA333 |   41 +
 sysutils/xenkernel411/patches/patch-XSA336 |  258 ++++++++
 sysutils/xenkernel411/patches/patch-XSA337 |  276 +++++++++
 sysutils/xenkernel411/patches/patch-XSA338 |   44 +
 sysutils/xenkernel411/patches/patch-XSA339 |   78 ++
 sysutils/xenkernel411/patches/patch-XSA340 |   67 ++
 sysutils/xenkernel411/patches/patch-XSA342 |  147 ++++
 sysutils/xenkernel411/patches/patch-XSA343 |  863 ++++++++++++++++++++++++++++
 sysutils/xenkernel411/patches/patch-XSA344 |  337 +++++++++++
 sysutils/xenkernel413/Makefile             |    4 +-
 sysutils/xenkernel413/distinfo             |   12 +-
 sysutils/xenkernel413/patches/patch-XSA333 |   41 +
 sysutils/xenkernel413/patches/patch-XSA334 |   53 +
 sysutils/xenkernel413/patches/patch-XSA336 |  285 +++++++++
 sysutils/xenkernel413/patches/patch-XSA337 |  270 ++++++++
 sysutils/xenkernel413/patches/patch-XSA338 |   44 +
 sysutils/xenkernel413/patches/patch-XSA339 |   78 ++
 sysutils/xenkernel413/patches/patch-XSA340 |   67 ++
 sysutils/xenkernel413/patches/patch-XSA342 |  147 ++++
 sysutils/xenkernel413/patches/patch-XSA343 |  888 +++++++++++++++++++++++++++++
 sysutils/xenkernel413/patches/patch-XSA344 |  335 ++++++++++
 23 files changed, 4344 insertions(+), 6 deletions(-)

diffs (truncated from 4473 to 300 lines):

diff -r d519df20e241 -r e34b3eafdac4 sysutils/xenkernel411/Makefile
--- a/sysutils/xenkernel411/Makefile    Sun Oct 04 18:31:37 2020 +0000
+++ b/sysutils/xenkernel411/Makefile    Sun Oct 04 20:44:32 2020 +0000
@@ -1,8 +1,8 @@
-# $NetBSD: Makefile,v 1.15 2020/08/24 10:35:35 bouyer Exp $
+# $NetBSD: Makefile,v 1.15.2.1 2020/10/04 20:44:32 bsiegert Exp $
 
 VERSION=       4.11.4
 #keep >= 1 if we have security patches
-PKGREVISION=   1
+PKGREVISION=   2
 DISTNAME=      xen-${VERSION}
 PKGNAME=       xenkernel411-${VERSION}
 CATEGORIES=    sysutils
diff -r d519df20e241 -r e34b3eafdac4 sysutils/xenkernel411/distinfo
--- a/sysutils/xenkernel411/distinfo    Sun Oct 04 18:31:37 2020 +0000
+++ b/sysutils/xenkernel411/distinfo    Sun Oct 04 20:44:32 2020 +0000
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.13 2020/08/24 10:35:35 bouyer Exp $
+$NetBSD: distinfo,v 1.13.2.1 2020/10/04 20:44:32 bsiegert Exp $
 
 SHA1 (xen411/xen-4.11.4.tar.gz) = 6c8cdf441621c14dc5345196b48df6982c060c4f
 RMD160 (xen411/xen-4.11.4.tar.gz) = 49819fcd1de3985d4dea370be962548c862f2933
@@ -10,6 +10,15 @@
 SHA1 (patch-XSA320) = 38d84a2ded4ccacee455ba64eb3b369e5661fbfd
 SHA1 (patch-XSA321) = 1f15b2e3c0f7f2d7335879d3a83c1557ac9de806
 SHA1 (patch-XSA328) = a9b02c183a5dbfb6c0fe50824f18896fcab4a9e9
+SHA1 (patch-XSA333) = 47660b70b2c998436587600bb9a25c2f494afa49
+SHA1 (patch-XSA336) = da0a8bb05877917c75a28155cf2dd2f66d11ef9c
+SHA1 (patch-XSA337) = f323b4c596f8a7b2b3d57dd799f70cf62743369f
+SHA1 (patch-XSA338) = 0adcebec2c25a389155a10de84bf999ff2e5425d
+SHA1 (patch-XSA339) = 4f97076bda8150d1b1c68f6000d563f3c3314c02
+SHA1 (patch-XSA340) = 23888acfe25fc82ff085fa9acfbb36c156a15bc3
+SHA1 (patch-XSA342) = a61c4e28a8c8219b88e3bab534a109b2b29e2cc3
+SHA1 (patch-XSA343) = 239822636b474ebb62aa455cfdbd9853c4fb342f
+SHA1 (patch-XSA344) = cf7184ac9263b418305c6a7fbae7b163b233b4bc
 SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
 SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac
 SHA1 (patch-xen_arch_x86_Rules.mk) = 0bedfc53a128a87b6a249ae04fbdf6a053bfb70b
diff -r d519df20e241 -r e34b3eafdac4 sysutils/xenkernel411/patches/patch-XSA333
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA333        Sun Oct 04 20:44:32 2020 +0000
@@ -0,0 +1,41 @@
+$NetBSD: patch-XSA333,v 1.1.2.2 2020/10/04 20:44:32 bsiegert Exp $
+
+From: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Subject: x86/pv: Handle the Intel-specific MSR_MISC_ENABLE correctly
+
+This MSR doesn't exist on AMD hardware, and switching away from the safe
+functions in the common MSR path was an erroneous change.
+
+Partially revert the change.
+
+This is XSA-333.
+
+Fixes: 4fdc932b3cc ("x86/Intel: drop another 32-bit leftover")
+Signed-off-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Reviewed-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Wei Liu <wl%xen.org@localhost>
+
+diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
+index efeb2a727e..6332c74b80 100644
+--- xen/arch/x86/pv/emul-priv-op.c.orig
++++ xen/arch/x86/pv/emul-priv-op.c
+@@ -924,7 +924,8 @@ static int read_msr(unsigned int reg, uint64_t *val,
+         return X86EMUL_OKAY;
+ 
+     case MSR_IA32_MISC_ENABLE:
+-        rdmsrl(reg, *val);
++        if ( rdmsr_safe(reg, *val) )
++            break;
+         *val = guest_misc_enable(*val);
+         return X86EMUL_OKAY;
+ 
+@@ -1059,7 +1060,8 @@ static int write_msr(unsigned int reg, uint64_t val,
+         break;
+ 
+     case MSR_IA32_MISC_ENABLE:
+-        rdmsrl(reg, temp);
++        if ( rdmsr_safe(reg, temp) )
++            break;
+         if ( val != guest_misc_enable(temp) )
+             goto invalid;
+         return X86EMUL_OKAY;
diff -r d519df20e241 -r e34b3eafdac4 sysutils/xenkernel411/patches/patch-XSA336
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA336        Sun Oct 04 20:44:32 2020 +0000
@@ -0,0 +1,258 @@
+$NetBSD: patch-XSA336,v 1.1.2.2 2020/10/04 20:44:32 bsiegert Exp $
+
+From: Roger Pau Monné <roger.pau%citrix.com@localhost>
+Subject: x86/vpt: fix race when migrating timers between vCPUs
+
+The current vPT code will migrate the emulated timers between vCPUs
+(change the pt->vcpu field) while just holding the destination lock,
+either from create_periodic_time or pt_adjust_global_vcpu_target if
+the global target is adjusted. Changing the periodic_timer vCPU field
+in this way creates a race where a third party could grab the lock in
+the unlocked region of pt_adjust_global_vcpu_target (or before
+create_periodic_time performs the vcpu change) and then release the
+lock from a different vCPU, creating a locking imbalance.
+
+Introduce a per-domain rwlock in order to protect periodic_time
+migration between vCPU lists. Taking the lock in read mode prevents
+any timer from being migrated to a different vCPU, while taking it in
+write mode allows performing migration of timers across vCPUs. The
+per-vcpu locks are still used to protect all the other fields from the
+periodic_timer struct.
+
+Note that such migration shouldn't happen frequently, and hence
+there's no performance drop as a result of such locking.
+
+This is XSA-336.
+
+Reported-by: Igor Druzhinin <igor.druzhinin%citrix.com@localhost>
+Tested-by: Igor Druzhinin <igor.druzhinin%citrix.com@localhost>
+Signed-off-by: Roger Pau Monné <roger.pau%citrix.com@localhost>
+Reviewed-by: Jan Beulich <jbeulich%suse.com@localhost>
+
+--- xen/arch/x86/hvm/hvm.c.orig
++++ xen/arch/x86/hvm/hvm.c
+@@ -627,6 +627,8 @@ int hvm_domain_initialise(struct domain
+     /* need link to containing domain */
+     d->arch.hvm_domain.pl_time->domain = d;
+ 
++    rwlock_init(&d->arch.hvm_domain.pl_time->pt_migrate);
++
+     /* Set the default IO Bitmap. */
+     if ( is_hardware_domain(d) )
+     {
+--- xen/arch/x86/hvm/vpt.c.orig
++++ xen/arch/x86/hvm/vpt.c
+@@ -152,23 +152,32 @@ static int pt_irq_masked(struct periodic
+     return 1;
+ }
+ 
+-static void pt_lock(struct periodic_time *pt)
++static void pt_vcpu_lock(struct vcpu *v)
+ {
+-    struct vcpu *v;
++    read_lock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
++    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++}
+ 
+-    for ( ; ; )
+-    {
+-        v = pt->vcpu;
+-        spin_lock(&v->arch.hvm_vcpu.tm_lock);
+-        if ( likely(pt->vcpu == v) )
+-            break;
+-        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+-    }
++static void pt_vcpu_unlock(struct vcpu *v)
++{
++    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    read_unlock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
++}
++
++static void pt_lock(struct periodic_time *pt)
++{
++    /*
++     * We cannot use pt_vcpu_lock here, because we need to acquire the
++     * per-domain lock first and then (re-)fetch the value of pt->vcpu, or
++     * else we might be using a stale value of pt->vcpu.
++     */
++    read_lock(&pt->vcpu->domain->arch.hvm_domain.pl_time->pt_migrate);
++    spin_lock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+ }
+ 
+ static void pt_unlock(struct periodic_time *pt)
+ {
+-    spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(pt->vcpu);
+ }
+ 
+ static void pt_process_missed_ticks(struct periodic_time *pt)
+@@ -218,7 +227,7 @@ void pt_save_timer(struct vcpu *v)
+     if ( v->pause_flags & VPF_blocked )
+         return;
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_lock(v);
+ 
+     list_for_each_entry ( pt, head, list )
+         if ( !pt->do_not_freeze )
+@@ -226,7 +235,7 @@ void pt_save_timer(struct vcpu *v)
+ 
+     pt_freeze_time(v);
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(v);
+ }
+ 
+ void pt_restore_timer(struct vcpu *v)
+@@ -234,7 +243,7 @@ void pt_restore_timer(struct vcpu *v)
+     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+     struct periodic_time *pt;
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_lock(v);
+ 
+     list_for_each_entry ( pt, head, list )
+     {
+@@ -247,7 +256,7 @@ void pt_restore_timer(struct vcpu *v)
+ 
+     pt_thaw_time(v);
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(v);
+ }
+ 
+ static void pt_timer_fn(void *data)
+@@ -272,7 +281,7 @@ int pt_update_irq(struct vcpu *v)
+     uint64_t max_lag;
+     int irq, pt_vector = -1;
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_lock(v);
+ 
+     earliest_pt = NULL;
+     max_lag = -1ULL;
+@@ -300,14 +309,14 @@ int pt_update_irq(struct vcpu *v)
+ 
+     if ( earliest_pt == NULL )
+     {
+-        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++        pt_vcpu_unlock(v);
+         return -1;
+     }
+ 
+     earliest_pt->irq_issued = 1;
+     irq = earliest_pt->irq;
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(v);
+ 
+     switch ( earliest_pt->source )
+     {
+@@ -377,12 +386,12 @@ void pt_intr_post(struct vcpu *v, struct
+     if ( intack.source == hvm_intsrc_vector )
+         return;
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_lock(v);
+ 
+     pt = is_pt_irq(v, intack);
+     if ( pt == NULL )
+     {
+-        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++        pt_vcpu_unlock(v);
+         return;
+     }
+ 
+@@ -421,7 +430,7 @@ void pt_intr_post(struct vcpu *v, struct
+     cb = pt->cb;
+     cb_priv = pt->priv;
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(v);
+ 
+     if ( cb != NULL )
+         cb(v, cb_priv);
+@@ -432,12 +441,12 @@ void pt_migrate(struct vcpu *v)
+     struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+     struct periodic_time *pt;
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_lock(v);
+ 
+     list_for_each_entry ( pt, head, list )
+         migrate_timer(&pt->timer, v->processor);
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    pt_vcpu_unlock(v);
+ }
+ 
+ void create_periodic_time(
+@@ -455,7 +464,7 @@ void create_periodic_time(
+ 
+     destroy_periodic_time(pt);
+ 
+-    spin_lock(&v->arch.hvm_vcpu.tm_lock);
++    write_lock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
+ 
+     pt->pending_intr_nr = 0;
+     pt->do_not_freeze = 0;
+@@ -504,7 +513,7 @@ void create_periodic_time(
+     init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
+     set_timer(&pt->timer, pt->scheduled);
+ 
+-    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
++    write_unlock(&v->domain->arch.hvm_domain.pl_time->pt_migrate);
+ }
+ 
+ void destroy_periodic_time(struct periodic_time *pt)
+@@ -529,30 +538,20 @@ void destroy_periodic_time(struct period
+ 
+ static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v)
+ {
+-    int on_list;



Home | Main Index | Thread Index | Old Index