NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: port-xen/45975 (panic: HYPERVISOR_mmu_update failed, ret: -22 during heavy activity)



The following reply was made to PR port-xen/45975; it has been noted by GNATS.

From: Manuel Bouyer <bouyer%antioche.eu.org@localhost>
To: gnats-bugs%NetBSD.org@localhost
Cc: gnats-admin%NetBSD.org@localhost, netbsd-bugs%NetBSD.org@localhost, 
riz%NetBSD.org@localhost
Subject: Re: port-xen/45975 (panic: HYPERVISOR_mmu_update failed, ret: -22
 during heavy activity)
Date: Fri, 17 Feb 2012 16:30:26 +0100

 --3MwIy2ne0vdjdPXF
 Content-Type: text/plain; charset=us-ascii
 Content-Disposition: inline
 
 On Fri, Feb 17, 2012 at 01:12:46PM +0100, Manuel Bouyer wrote:
 > I think I found where the problem comes from:
 > in uvm, uvm_km_pgremove_intrsafe() will return a page to the free list.
 > But as it uses pmap_extract(), the mapping of the pages in pmap_kernel()
 > is removed *after* calling uvm_km_pgremove_intrsafe().
 > So there is a window where pages are returned to the free list, but
 > are still mapped in pmap_kernel(); another CPU can allocate and map this
 > page in this time (remember a CPU can be preempted by the hypervisor,
 > so the window can be quite long).
 > I confirmed this by adding an (expensive) check in uvm_pagefree() looking
 > for existing mappings of a page.
 > Multiple mappings of the same physical page with different
 > attributes are not a problem on real hardware, so this is a problem only for
 > Xen.
 > One trivial fix is to call pmap_kremove() from uvm_km_pgremove_intrsafe()
 > just after pmap_extract() has been done (and remove it from callers).
 > I'm testing this now
 
 See attached patch. I couldn't make my test domUs (a i386PAE with 4
 virtual CPU on a dual-core box, and a amd64 with 8 virtual CPUs on
 a 8-core box), several build.sh -j<as_appropriate> release have
 completed.
 
 -- 
 Manuel Bouyer <bouyer%antioche.eu.org@localhost>
      NetBSD: 26 ans d'experience feront toujours la difference
 --
 
 --3MwIy2ne0vdjdPXF
 Content-Type: text/plain; charset=us-ascii
 Content-Disposition: attachment; filename=diff
 
 Index: uvm/uvm_km.c
 ===================================================================
 RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
 retrieving revision 1.119
 diff -u -p -u -r1.119 uvm_km.c
 --- uvm/uvm_km.c       4 Feb 2012 17:56:17 -0000       1.119
 +++ uvm/uvm_km.c       17 Feb 2012 15:26:30 -0000
 @@ -472,12 +472,18 @@ uvm_km_pgremove_intrsafe(struct vm_map *
                if (!pmap_extract(pmap_kernel(), start, &pa)) {
                        continue;
                }
 +#ifdef __PMAP_NEED_UNMAP_BEFORE_FREE
 +              pmap_kremove(start, PAGE_SIZE);
 +#endif
                pg = PHYS_TO_VM_PAGE(pa);
                KASSERT(pg);
                KASSERT(pg->uobject == NULL && pg->uanon == NULL);
                KASSERT((pg->flags & PG_BUSY) == 0);
                uvm_pagefree(pg);
        }
 +#ifndef __PMAP_NEED_UNMAP_BEFORE_FREE
 +      pmap_kremove(start, end - start);
 +#endif
  }
  
  #if defined(DEBUG)
 @@ -670,7 +676,6 @@ uvm_km_free(struct vm_map *map, vaddr_t 
                 * remove it after.  See comment below about KVA visibility.
                 */
                uvm_km_pgremove_intrsafe(map, addr, addr + size);
 -              pmap_kremove(addr, size);
        }
  
        /*
 @@ -747,7 +752,6 @@ again:
                        } else {
                                uvm_km_pgremove_intrsafe(kernel_map, va,
                                    va + size);
 -                              pmap_kremove(va, size);
                                vmem_free(kmem_va_arena, va, size);
                                return ENOMEM;
                        }
 @@ -783,7 +787,6 @@ uvm_km_kmem_free(vmem_t *vm, vmem_addr_t
        }
  #endif /* PMAP_UNMAP_POOLPAGE */
        uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
 -      pmap_kremove(addr, size);
        pmap_update(pmap_kernel());
  
        vmem_free(vm, addr, size);
 Index: uvm/uvm_kmguard.c
 ===================================================================
 RCS file: /cvsroot/src/sys/uvm/uvm_kmguard.c,v
 retrieving revision 1.9
 diff -u -p -u -r1.9 uvm_kmguard.c
 --- uvm/uvm_kmguard.c  5 Feb 2012 11:08:06 -0000       1.9
 +++ uvm/uvm_kmguard.c  17 Feb 2012 15:26:30 -0000
 @@ -180,7 +180,6 @@ uvm_kmguard_free(struct uvm_kmguard *kg,
         */
  
        uvm_km_pgremove_intrsafe(kernel_map, va, va + PAGE_SIZE * 2);
 -      pmap_kremove(va, PAGE_SIZE * 2);
        pmap_update(pmap_kernel());
  
        /*
 Index: uvm/uvm_map.c
 ===================================================================
 RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
 retrieving revision 1.312
 diff -u -p -u -r1.312 uvm_map.c
 --- uvm/uvm_map.c      28 Jan 2012 00:00:06 -0000      1.312
 +++ uvm/uvm_map.c      17 Feb 2012 15:26:30 -0000
 @@ -2246,7 +2246,6 @@ uvm_unmap_remove(struct vm_map *map, vad
                        if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
                                uvm_km_pgremove_intrsafe(map, entry->start,
                                    entry->end);
 -                              pmap_kremove(entry->start, len);
                        }
                } else if (UVM_ET_ISOBJ(entry) &&
                           UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
 Index: arch/x86/include/pmap.h
 ===================================================================
 RCS file: /cvsroot/src/sys/arch/x86/include/pmap.h,v
 retrieving revision 1.49
 diff -u -p -u -r1.49 pmap.h
 --- arch/x86/include/pmap.h    4 Dec 2011 16:24:13 -0000       1.49
 +++ arch/x86/include/pmap.h    17 Feb 2012 15:26:30 -0000
 @@ -296,6 +298,14 @@ void              pmap_tlb_intr(void);
  #define PMAP_GROWKERNEL               /* turn on pmap_growkernel interface */
  #define PMAP_FORK             /* turn on pmap_fork interface */
  
 +#ifdef XEN
 +/*
 + * If a free vm_page is allocated for a PDP, is will be rejected
 + * by Xen if it has still some R/W mapping.
 + */
 +#define __PMAP_NEED_UNMAP_BEFORE_FREE
 +#endif
 +
  /*
   * Do idle page zero'ing uncached to avoid polluting the cache.
   */
 
 --3MwIy2ne0vdjdPXF--
 


Home | Main Index | Thread Index | Old Index