NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: port-arm/49061: armv4 crashes inside network interrupt



The following reply was made to PR port-arm/49061; it has been noted by GNATS.

From: Martin Husemann <martin%duskware.de@localhost>
To: gnats-bugs%NetBSD.org@localhost
Cc: 
Subject: Re: port-arm/49061: armv4 crashes inside network interrupt
Date: Mon, 4 Aug 2014 09:22:39 +0200

 Here is the actual patch I'm using - mostly from Matt, but #ifdef mess
 added by me to allow all evbarm kernels to compile.
 
 Martin
 
 Index: pmap.c
 ===================================================================
 RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v
 retrieving revision 1.295
 diff -u -p -r1.295 pmap.c
 --- pmap.c     25 Jul 2014 15:09:43 -0000      1.295
 +++ pmap.c     4 Aug 2014 07:20:17 -0000
 @@ -559,7 +559,9 @@ pmap_release_page_lock(struct vm_page_md
        mutex_exit(&pmap_lock);
  }
  
 -#ifdef DIAGNOSTIC
 +
 +#if defined(DIAGNOSTIC) || \
 +      (defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED))
  static inline int
  pmap_page_locked_p(struct vm_page_md *md)
  {
 @@ -3644,6 +3646,11 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
  #endif
  #endif
        struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
 +#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
 +      const bool page_locked_p = md ? pmap_page_locked_p(md) : false;
 +#elif defined(DIAGNOSTIC)
 +      const bool page_locked_p = false;
 +#endif
  
        UVMHIST_FUNC(__func__);
  
 @@ -3685,9 +3692,13 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
                        KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
                        KASSERT((flags & PMAP_KMPAGE) == 0);
  #ifndef ARM_MMU_EXTENDED
 -                      pmap_acquire_page_lock(omd);
 -                      pv = pmap_kremove_pg(opg, va);
 -                      pmap_release_page_lock(omd);
 +                      if (pmap_page_locked_p(omd)) {
 +                              pv = pmap_kremove_pg(opg, va);
 +                      } else {
 +                              pmap_acquire_page_lock(omd);
 +                              pv = pmap_kremove_pg(opg, va);
 +                              pmap_release_page_lock(omd);
 +                      }
  #endif
                }
  #endif
 @@ -3752,7 +3763,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
                                pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
                                KASSERT(pv != NULL);
                        }
 -                      pmap_acquire_page_lock(md);
 +                      if (!page_locked_p)
 +                              pmap_acquire_page_lock(md);
                        pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
                            PVF_WIRED | PVF_KENTRY
                            | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
 @@ -3761,7 +3773,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
                                md->pvh_attrs |= PVF_DIRTY;
                        KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & 
(PVF_DIRTY|PVF_NC)));
                        pmap_vac_me_harder(md, pa, pmap_kernel(), va);
 -                      pmap_release_page_lock(md);
 +                      if (!page_locked_p)
 +                              pmap_release_page_lock(md);
  #endif
                }
  #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
 @@ -3770,7 +3783,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
                        pool_put(&pmap_pv_pool, pv);
  #endif
        }
 -      KASSERT(md == NULL || !pmap_page_locked_p(md));
 +      KASSERT(md == NULL || page_locked_p || !pmap_page_locked_p(md));
        if (pmap_initialized) {
                UVMHIST_LOG(maphist, "  <-- done (ptep %p: %#x -> %#x)",
                    ptep, opte, npte, 0);
 


Home | Main Index | Thread Index | Old Index