pkgsrc-Changes archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

CVS commit: pkgsrc/sysutils/xenkernel413



Module Name:    pkgsrc
Committed By:   bouyer
Date:           Wed Dec 16 17:15:22 UTC 2020

Modified Files:
        pkgsrc/sysutils/xenkernel413: Makefile distinfo
Added Files:
        pkgsrc/sysutils/xenkernel413/patches: patch-XSA348 patch-XSA358
            patch-XSA359

Log Message:
Add upstream patches for Xen seciruty advisory 348, 358 and 359.
Bump PKGREVISION


To generate a diff of this commit:
cvs rdiff -u -r1.10 -r1.11 pkgsrc/sysutils/xenkernel413/Makefile
cvs rdiff -u -r1.7 -r1.8 pkgsrc/sysutils/xenkernel413/distinfo
cvs rdiff -u -r0 -r1.1 pkgsrc/sysutils/xenkernel413/patches/patch-XSA348 \
    pkgsrc/sysutils/xenkernel413/patches/patch-XSA358 \
    pkgsrc/sysutils/xenkernel413/patches/patch-XSA359

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: pkgsrc/sysutils/xenkernel413/Makefile
diff -u pkgsrc/sysutils/xenkernel413/Makefile:1.10 pkgsrc/sysutils/xenkernel413/Makefile:1.11
--- pkgsrc/sysutils/xenkernel413/Makefile:1.10  Fri Dec  4 20:45:43 2020
+++ pkgsrc/sysutils/xenkernel413/Makefile       Wed Dec 16 17:15:22 2020
@@ -1,7 +1,7 @@
-# $NetBSD: Makefile,v 1.10 2020/12/04 20:45:43 nia Exp $
+# $NetBSD: Makefile,v 1.11 2020/12/16 17:15:22 bouyer Exp $
 
 VERSION=       4.13.2
-PKGREVISION=   3
+PKGREVISION=   4
 DISTNAME=      xen-${VERSION}
 PKGNAME=       xenkernel413-${VERSION}
 CATEGORIES=    sysutils

Index: pkgsrc/sysutils/xenkernel413/distinfo
diff -u pkgsrc/sysutils/xenkernel413/distinfo:1.7 pkgsrc/sysutils/xenkernel413/distinfo:1.8
--- pkgsrc/sysutils/xenkernel413/distinfo:1.7   Mon Nov 30 14:20:22 2020
+++ pkgsrc/sysutils/xenkernel413/distinfo       Wed Dec 16 17:15:22 2020
@@ -1,11 +1,14 @@
-$NetBSD: distinfo,v 1.7 2020/11/30 14:20:22 bouyer Exp $
+$NetBSD: distinfo,v 1.8 2020/12/16 17:15:22 bouyer Exp $
 
 SHA1 (xen413/xen-4.13.2.tar.gz) = d514f1de9582c58676420bb2c9fb1c765b44fbff
 RMD160 (xen413/xen-4.13.2.tar.gz) = 96727c20bd84338f8c67c7c584c01ef877bbcb18
 SHA512 (xen413/xen-4.13.2.tar.gz) = cd3092281c97e9421e303aa288aac04dcccd5536ba7c0ff4d51fbf3d07b5ffacfe3456ba06f5cf63577dafbf8cf3a5d9825ceb5e9ef8ca1427900cc3e57b50a3
 Size (xen413/xen-4.13.2.tar.gz) = 39037826 bytes
 SHA1 (patch-Config.mk) = 9372a09efd05c9fbdbc06f8121e411fcb7c7ba65
+SHA1 (patch-XSA348) = 70de325f88e004228d2b69b7ae3b4106175be1e0
 SHA1 (patch-XSA351) = edb0975ab0aa53d7a0ae7816fe170a081eea695e
+SHA1 (patch-XSA358) = 71d5b2e3d19223b986b8572adfbe7355a3a03db6
+SHA1 (patch-XSA359) = 4b778a86fffbe0e2a364e1589d573bbc7c27ff99
 SHA1 (patch-fixpvh) = fd71e150e0b3a461875c02c4419dbfb30548d8f6
 SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
 SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac

Added files:

Index: pkgsrc/sysutils/xenkernel413/patches/patch-XSA348
diff -u /dev/null pkgsrc/sysutils/xenkernel413/patches/patch-XSA348:1.1
--- /dev/null   Wed Dec 16 17:15:22 2020
+++ pkgsrc/sysutils/xenkernel413/patches/patch-XSA348   Wed Dec 16 17:15:22 2020
@@ -0,0 +1,356 @@
+$NetBSD: patch-XSA348,v 1.1 2020/12/16 17:15:22 bouyer Exp $ 
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86: replace reset_stack_and_jump_nolp()
+
+Move the necessary check into check_for_livepatch_work(), rather than
+mostly duplicating reset_stack_and_jump() for this purpose. This is to
+prevent an inflation of reset_stack_and_jump() flavors.
+
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Juergen Gross <jgross%suse.com@localhost>
+
+--- xen/arch/x86/domain.c.orig 2020-10-30 17:22:39.000000000 +0100
++++ xen/arch/x86/domain.c      2020-11-10 17:51:10.894525721 +0100
+@@ -192,7 +192,7 @@ static void noreturn continue_idle_domai
+ {
+     /* Idle vcpus might be attached to non-idle units! */
+     if ( !is_idle_domain(v->sched_unit->domain) )
+-        reset_stack_and_jump_nolp(guest_idle_loop);
++        reset_stack_and_jump(guest_idle_loop);
+ 
+     reset_stack_and_jump(idle_loop);
+ }
+--- xen/arch/x86/hvm/svm/svm.c.orig    2020-10-30 17:22:39.000000000 +0100
++++ xen/arch/x86/hvm/svm/svm.c 2020-11-10 17:51:10.898525723 +0100
+@@ -1032,7 +1032,7 @@ static void noreturn svm_do_resume(struc
+ 
+     hvm_do_resume(v);
+ 
+-    reset_stack_and_jump_nolp(svm_asm_do_resume);
++    reset_stack_and_jump(svm_asm_do_resume);
+ }
+ 
+ void svm_vmenter_helper(const struct cpu_user_regs *regs)
+--- xen/arch/x86/hvm/vmx/vmcs.c.orig   2020-05-18 18:53:09.000000000 +0200
++++ xen/arch/x86/hvm/vmx/vmcs.c        2020-11-10 17:51:10.898525723 +0100
+@@ -1889,7 +1889,7 @@ void vmx_do_resume(struct vcpu *v)
+     if ( host_cr4 != read_cr4() )
+         __vmwrite(HOST_CR4, read_cr4());
+ 
+-    reset_stack_and_jump_nolp(vmx_asm_do_vmentry);
++    reset_stack_and_jump(vmx_asm_do_vmentry);
+ }
+ 
+ static inline unsigned long vmr(unsigned long field)
+--- xen/arch/x86/pv/domain.c.orig      2020-10-30 17:22:39.000000000 +0100
++++ xen/arch/x86/pv/domain.c   2020-11-10 17:51:10.898525723 +0100
+@@ -61,7 +61,7 @@ custom_runtime_param("pcid", parse_pcid)
+ static void noreturn continue_nonidle_domain(struct vcpu *v)
+ {
+     check_wakeup_from_wait();
+-    reset_stack_and_jump_nolp(ret_from_intr);
++    reset_stack_and_jump(ret_from_intr);
+ }
+ 
+ static int setup_compat_l4(struct vcpu *v)
+--- xen/arch/x86/setup.c.orig  2020-05-18 18:53:09.000000000 +0200
++++ xen/arch/x86/setup.c       2020-11-10 17:51:10.898525723 +0100
+@@ -631,7 +631,7 @@ static void __init noreturn reinit_bsp_s
+     stack_base[0] = stack;
+     memguard_guard_stack(stack);
+ 
+-    reset_stack_and_jump_nolp(init_done);
++    reset_stack_and_jump(init_done);
+ }
+ 
+ /*
+--- xen/common/livepatch.c.orig        2020-05-18 18:53:09.000000000 +0200
++++ xen/common/livepatch.c     2020-11-10 17:51:10.898525723 +0100
+@@ -1300,6 +1300,11 @@ void check_for_livepatch_work(void)
+     s_time_t timeout;
+     unsigned long flags;
+ 
++    /* Only do any work when invoked in truly idle state. */
++    if ( system_state != SYS_STATE_active ||
++         !is_idle_domain(current->sched_unit->domain) )
++        return;
++
+     /* Fast path: no work to do. */
+     if ( !per_cpu(work_to_do, cpu ) )
+         return;
+--- xen/include/asm-x86/current.h.orig 2019-12-18 16:18:59.000000000 +0100
++++ xen/include/asm-x86/current.h      2020-11-10 17:51:10.902525725 +0100
+@@ -129,22 +129,16 @@ unsigned long get_stack_dump_bottom (uns
+ # define CHECK_FOR_LIVEPATCH_WORK ""
+ #endif
+ 
+-#define switch_stack_and_jump(fn, instr)                                \
++#define reset_stack_and_jump(fn)                                        \
+     ({                                                                  \
+         __asm__ __volatile__ (                                          \
+             "mov %0,%%"__OP"sp;"                                        \
+-            instr                                                       \
++            CHECK_FOR_LIVEPATCH_WORK                                    \
+              "jmp %c1"                                                  \
+             : : "r" (guest_cpu_user_regs()), "i" (fn) : "memory" );     \
+         unreachable();                                                  \
+     })
+ 
+-#define reset_stack_and_jump(fn)                                        \
+-    switch_stack_and_jump(fn, CHECK_FOR_LIVEPATCH_WORK)
+-
+-#define reset_stack_and_jump_nolp(fn)                                   \
+-    switch_stack_and_jump(fn, "")
+-
+ /*
+  * Which VCPU's state is currently running on each CPU?
+  * This is not necesasrily the same as 'current' as a CPU may be
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86: fold guest_idle_loop() into idle_loop()
+
+The latter can easily be made cover both cases. This is in preparation
+of using idle_loop directly for populating idle_csw.tail.
+
+Take the liberty and also adjust indentation / spacing in involved code.
+
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Juergen Gross <jgross%suse.com@localhost>
+
+--- xen/arch/x86/domain.c.orig 2020-11-10 17:51:10.894525721 +0100
++++ xen/arch/x86/domain.c      2020-11-10 17:51:46.354546349 +0100
+@@ -133,14 +133,22 @@ void play_dead(void)
+ static void idle_loop(void)
+ {
+     unsigned int cpu = smp_processor_id();
++    /*
++     * Idle vcpus might be attached to non-idle units! We don't do any
++     * standard idle work like tasklets or livepatching in this case.
++     */
++    bool guest = !is_idle_domain(current->sched_unit->domain);
+ 
+     for ( ; ; )
+     {
+         if ( cpu_is_offline(cpu) )
++        {
++            ASSERT(!guest);
+             play_dead();
++        }
+ 
+         /* Are we here for running vcpu context tasklets, or for idling? */
+-        if ( unlikely(tasklet_work_to_do(cpu)) )
++        if ( !guest && unlikely(tasklet_work_to_do(cpu)) )
+         {
+             do_tasklet();
+             /* Livepatch work is always kicked off via a tasklet. */
+@@ -151,28 +159,14 @@ static void idle_loop(void)
+          * and then, after it is done, whether softirqs became pending
+          * while we were scrubbing.
+          */
+-        else if ( !softirq_pending(cpu) && !scrub_free_pages()  &&
+-                    !softirq_pending(cpu) )
+-            pm_idle();
+-        do_softirq();
+-    }
+-}
+-
+-/*
+- * Idle loop for siblings in active schedule units.
+- * We don't do any standard idle work like tasklets or livepatching.
+- */
+-static void guest_idle_loop(void)
+-{
+-    unsigned int cpu = smp_processor_id();
+-
+-    for ( ; ; )
+-    {
+-        ASSERT(!cpu_is_offline(cpu));
+-
+-        if ( !softirq_pending(cpu) && !scrub_free_pages() &&
+-             !softirq_pending(cpu))
+-            sched_guest_idle(pm_idle, cpu);
++        else if ( !softirq_pending(cpu) && !scrub_free_pages() &&
++                  !softirq_pending(cpu) )
++        {
++            if ( guest )
++                sched_guest_idle(pm_idle, cpu);
++            else
++                pm_idle();
++        }
+         do_softirq();
+     }
+ }
+@@ -190,10 +184,6 @@ void startup_cpu_idle_loop(void)
+ 
+ static void noreturn continue_idle_domain(struct vcpu *v)
+ {
+-    /* Idle vcpus might be attached to non-idle units! */
+-    if ( !is_idle_domain(v->sched_unit->domain) )
+-        reset_stack_and_jump(guest_idle_loop);
+-
+     reset_stack_and_jump(idle_loop);
+ }
+ 
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86: avoid calling {svm,vmx}_do_resume()
+
+These functions follow the following path: hvm_do_resume() ->
+handle_hvm_io_completion() -> hvm_wait_for_io() ->
+wait_on_xen_event_channel() -> do_softirq() -> schedule() ->
+sched_context_switch() -> continue_running() and hence may
+recursively invoke themselves. If this ends up happening a couple of
+times, a stack overflow would result.
+
+Prevent this by also resetting the stack at the
+->arch.ctxt_switch->tail() invocations (in both places for consistency)
+and thus jumping to the functions instead of calling them.
+
+This is XSA-348 / CVE-2020-29566.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Juergen Gross <jgross%suse.com@localhost>
+
+--- xen/arch/x86/domain.c.orig 2020-11-10 17:51:46.354546349 +0100
++++ xen/arch/x86/domain.c      2020-11-10 17:56:58.758730088 +0100
+@@ -130,7 +130,7 @@ void play_dead(void)
+         dead_idle();
+ }
+ 
+-static void idle_loop(void)
++static void noreturn idle_loop(void)
+ {
+     unsigned int cpu = smp_processor_id();
+     /*
+@@ -182,11 +182,6 @@ void startup_cpu_idle_loop(void)
+     reset_stack_and_jump(idle_loop);
+ }
+ 
+-static void noreturn continue_idle_domain(struct vcpu *v)
+-{
+-    reset_stack_and_jump(idle_loop);
+-}
+-
+ void init_hypercall_page(struct domain *d, void *ptr)
+ {
+     memset(ptr, 0xcc, PAGE_SIZE);
+@@ -535,7 +530,7 @@ int arch_domain_create(struct domain *d,
+         static const struct arch_csw idle_csw = {
+             .from = paravirt_ctxt_switch_from,
+             .to   = paravirt_ctxt_switch_to,
+-            .tail = continue_idle_domain,
++            .tail = idle_loop,
+         };
+ 
+         d->arch.ctxt_switch = &idle_csw;
+@@ -1833,20 +1828,12 @@ void context_switch(struct vcpu *prev, s
+     /* Ensure that the vcpu has an up-to-date time base. */
+     update_vcpu_system_time(next);
+ 
+-    /*
+-     * Schedule tail *should* be a terminal function pointer, but leave a
+-     * bug frame around just in case it returns, to save going back into the
+-     * context switching code and leaving a far more subtle crash to diagnose.
+-     */
+-    nextd->arch.ctxt_switch->tail(next);
+-    BUG();
++    reset_stack_and_jump_ind(nextd->arch.ctxt_switch->tail);
+ }
+ 
+ void continue_running(struct vcpu *same)
+ {
+-    /* See the comment above. */
+-    same->domain->arch.ctxt_switch->tail(same);
+-    BUG();
++    reset_stack_and_jump_ind(same->domain->arch.ctxt_switch->tail);
+ }
+ 
+ int __sync_local_execstate(void)
+--- xen/arch/x86/hvm/svm/svm.c.orig    2020-11-10 17:51:10.898525723 +0100
++++ xen/arch/x86/hvm/svm/svm.c 2020-11-10 17:56:58.762730090 +0100
+@@ -987,8 +987,9 @@ static void svm_ctxt_switch_to(struct vc
+         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
+ }
+ 
+-static void noreturn svm_do_resume(struct vcpu *v)
++static void noreturn svm_do_resume(void)
+ {
++    struct vcpu *v = current;
+     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+     bool debug_state = (v->domain->debugger_attached ||
+                         v->domain->arch.monitor.software_breakpoint_enabled ||
+--- xen/arch/x86/hvm/vmx/vmcs.c.orig   2020-11-10 17:51:10.898525723 +0100
++++ xen/arch/x86/hvm/vmx/vmcs.c        2020-11-10 17:56:58.762730090 +0100
+@@ -1830,8 +1830,9 @@ void vmx_vmentry_failure(void)
+     domain_crash(curr->domain);
+ }
+ 
+-void vmx_do_resume(struct vcpu *v)
++void vmx_do_resume(void)
+ {
++    struct vcpu *v = current;
+     bool_t debug_state;
+     unsigned long host_cr4;
+ 
+--- xen/arch/x86/pv/domain.c.orig      2020-11-10 17:51:10.898525723 +0100
++++ xen/arch/x86/pv/domain.c   2020-11-10 17:56:58.762730090 +0100
+@@ -58,7 +58,7 @@ static int parse_pcid(const char *s)
+ }
+ custom_runtime_param("pcid", parse_pcid);
+ 
+-static void noreturn continue_nonidle_domain(struct vcpu *v)
++static void noreturn continue_nonidle_domain(void)
+ {
+     check_wakeup_from_wait();
+     reset_stack_and_jump(ret_from_intr);
+--- xen/include/asm-x86/current.h.orig 2020-11-10 17:51:10.902525725 +0100
++++ xen/include/asm-x86/current.h      2020-11-10 17:56:58.762730090 +0100
+@@ -129,16 +129,23 @@ unsigned long get_stack_dump_bottom (uns
+ # define CHECK_FOR_LIVEPATCH_WORK ""
+ #endif
+ 
+-#define reset_stack_and_jump(fn)                                        \
++#define switch_stack_and_jump(fn, instr, constr)                        \
+     ({                                                                  \
+         __asm__ __volatile__ (                                          \
+             "mov %0,%%"__OP"sp;"                                        \
+             CHECK_FOR_LIVEPATCH_WORK                                    \
+-             "jmp %c1"                                                  \
+-            : : "r" (guest_cpu_user_regs()), "i" (fn) : "memory" );     \
++            instr "1"                                                   \
++            : : "r" (guest_cpu_user_regs()), constr (fn) : "memory" );  \
+         unreachable();                                                  \
+     })
+ 
++#define reset_stack_and_jump(fn)                                        \
++    switch_stack_and_jump(fn, "jmp %c", "i")
++
++/* The constraint may only specify non-call-clobbered registers. */
++#define reset_stack_and_jump_ind(fn)                                    \
++    switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
++
+ /*
+  * Which VCPU's state is currently running on each CPU?
+  * This is not necesasrily the same as 'current' as a CPU may be
+--- xen/include/asm-x86/domain.h.orig  2020-10-30 17:22:39.000000000 +0100
++++ xen/include/asm-x86/domain.h       2020-11-10 17:56:58.762730090 +0100
+@@ -313,7 +313,7 @@ struct arch_domain
+     const struct arch_csw {
+         void (*from)(struct vcpu *);
+         void (*to)(struct vcpu *);
+-        void (*tail)(struct vcpu *);
++        void noreturn (*tail)(void);
+     } *ctxt_switch;
+ 
+ #ifdef CONFIG_HVM
+--- xen/include/asm-x86/hvm/vmx/vmx.h.orig     2019-12-18 16:18:59.000000000 +0100
++++ xen/include/asm-x86/hvm/vmx/vmx.h  2020-11-10 17:56:58.762730090 +0100
+@@ -95,7 +95,7 @@ typedef enum {
+ void vmx_asm_vmexit_handler(struct cpu_user_regs);
+ void vmx_asm_do_vmentry(void);
+ void vmx_intr_assist(void);
+-void noreturn vmx_do_resume(struct vcpu *);
++void noreturn vmx_do_resume(void);
+ void vmx_vlapic_msr_changed(struct vcpu *v);
+ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
+ void vmx_realmode(struct cpu_user_regs *regs);
Index: pkgsrc/sysutils/xenkernel413/patches/patch-XSA358
diff -u /dev/null pkgsrc/sysutils/xenkernel413/patches/patch-XSA358:1.1
--- /dev/null   Wed Dec 16 17:15:22 2020
+++ pkgsrc/sysutils/xenkernel413/patches/patch-XSA358   Wed Dec 16 17:15:22 2020
@@ -0,0 +1,48 @@
+$NetSBD: $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: evtchn/FIFO: re-order and synchronize (with) map_control_block()
+
+For evtchn_fifo_set_pending()'s check of the control block having been
+set to be effective, ordering of respective reads and writes needs to be
+ensured: The control block pointer needs to be recorded strictly after
+the setting of all the queue heads, and it needs checking strictly
+before any uses of them (this latter aspect was already guaranteed).
+
+This is XSA-358 / CVE-2020-29570.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Acked-by: Julien Grall <jgrall%amazon.com@localhost>
+---
+v3: Drop read-side barrier again, leveraging guest_test_and_set_bit().
+v2: Re-base over queue locking re-work.
+
+--- xen/common/event_fifo.c.orig
++++ xen/common/event_fifo.c
+@@ -474,6 +478,7 @@ static int setup_control_block(struct vc
+ static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+ {
+     void *virt;
++    struct evtchn_fifo_control_block *control_block;
+     unsigned int i;
+     int rc;
+ 
+@@ -484,10 +489,15 @@ static int map_control_block(struct vcpu
+     if ( rc < 0 )
+         return rc;
+ 
+-    v->evtchn_fifo->control_block = virt + offset;
++    control_block = virt + offset;
+ 
+     for ( i = 0; i <= EVTCHN_FIFO_PRIORITY_MIN; i++ )
+-        v->evtchn_fifo->queue[i].head = &v->evtchn_fifo->control_block->head[i];
++        v->evtchn_fifo->queue[i].head = &control_block->head[i];
++
++    /* All queue heads must have been set before setting the control block. */
++    smp_wmb();
++
++    v->evtchn_fifo->control_block = control_block;
+ 
+     return 0;
+ }
Index: pkgsrc/sysutils/xenkernel413/patches/patch-XSA359
diff -u /dev/null pkgsrc/sysutils/xenkernel413/patches/patch-XSA359:1.1
--- /dev/null   Wed Dec 16 17:15:22 2020
+++ pkgsrc/sysutils/xenkernel413/patches/patch-XSA359   Wed Dec 16 17:15:22 2020
@@ -0,0 +1,42 @@
+$NetBSD: patch-XSA359,v 1.1 2020/12/16 17:15:22 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: evtchn/FIFO: add 2nd smp_rmb() to evtchn_fifo_word_from_port()
+
+Besides with add_page_to_event_array() the function also needs to
+synchronize with evtchn_fifo_init_control() setting both d->evtchn_fifo
+and (subsequently) d->evtchn_port_ops.
+
+This is XSA-359 / CVE-2020-29571.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Julien Grall <jgrall%amazon.com@localhost>
+
+--- xen/common/event_fifo.c.orig
++++ xen/common/event_fifo.c
+@@ -55,6 +55,13 @@ static inline event_word_t *evtchn_fifo_
+ {
+     unsigned int p, w;
+ 
++    /*
++     * Callers aren't required to hold d->event_lock, so we need to synchronize
++     * with evtchn_fifo_init_control() setting d->evtchn_port_ops /after/
++     * d->evtchn_fifo.
++     */
++    smp_rmb();
++
+     if ( unlikely(port >= d->evtchn_fifo->num_evtchns) )
+         return NULL;
+ 
+@@ -606,6 +613,10 @@ int evtchn_fifo_init_control(struct evtc
+         if ( rc < 0 )
+             goto error;
+ 
++        /*
++         * This call, as a side effect, synchronizes with
++         * evtchn_fifo_word_from_port().
++         */
+         rc = map_control_block(v, gfn, offset);
+         if ( rc < 0 )
+             goto error;



Home | Main Index | Thread Index | Old Index