Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/mips/mips Remove PMAP_POOLPAGE_DEBUG latent code si...



details:   https://anonhg.NetBSD.org/src/rev/b35e38103f18
branches:  trunk
changeset: 338853:b35e38103f18
user:      matt <matt%NetBSD.org@localhost>
date:      Thu Jun 11 15:58:49 2015 +0000

description:
Remove PMAP_POOLPAGE_DEBUG latent code since it never worked.

diffstat:

 sys/arch/mips/mips/pmap.c |  62 ++++------------------------------------------
 1 files changed, 6 insertions(+), 56 deletions(-)

diffs (139 lines):

diff -r c3eacfe4c419 -r b35e38103f18 sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Thu Jun 11 15:56:53 2015 +0000
+++ b/sys/arch/mips/mips/pmap.c Thu Jun 11 15:58:49 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.218 2015/06/11 15:50:17 matt Exp $  */
+/*     $NetBSD: pmap.c,v 1.219 2015/06/11 15:58:49 matt Exp $  */
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.218 2015/06/11 15:50:17 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.219 2015/06/11 15:58:49 matt Exp $");
 
 /*
  *     Manages physical address maps.
@@ -280,15 +280,6 @@
 pt_entry_t     *Sysmap;                /* kernel pte table */
 unsigned int   Sysmapsize;             /* number of pte's in Sysmap */
 
-#ifdef PMAP_POOLPAGE_DEBUG
-struct poolpage_info {
-       vaddr_t base;
-       vaddr_t size;
-       vaddr_t hint;
-       pt_entry_t *sysmap;
-} poolpage;
-#endif
-
 static void pmap_pvlist_lock_init(void);
 
 /*
@@ -528,10 +519,6 @@
 #ifdef KSEG2IOBUFSIZE
        Sysmapsize += (KSEG2IOBUFSIZE >> PGSHIFT);
 #endif
-#ifdef PMAP_POOLPAGE_DEBUG
-       poolpage.size = nkmempages + MCLBYTES * nmbclusters;
-       Sysmapsize += poolpage.size;
-#endif
 #ifdef _LP64
        /*
         * If we are using tmpfs, then we might want to use a great deal of
@@ -573,11 +560,6 @@
        Sysmap = (pt_entry_t *)
            uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
 
-#ifdef PMAP_POOLPAGE_DEBUG
-       pmap_limits.virtual_end -= poolpage.size;
-       poolpage.base = pmap_limits.virtual_end;
-       poolpage.sysmap = Sysmap + atop(poolpage.size);
-#endif
        /*
         * Initialize the pools.
         */
@@ -2257,7 +2239,7 @@
                            pmap, va);
 #endif
                if (__predict_true(apv == NULL)) {
-#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG) || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(LOCKDEBUG)
                        /*
                         * To allocate a PV, we have to release the PVLIST lock
                         * so get the page generation.  We allocate the PV, and
@@ -2268,7 +2250,7 @@
                        apv = (pv_entry_t)pmap_pv_alloc();
                        if (apv == NULL)
                                panic("pmap_enter_pv: pmap_pv_alloc() failed");
-#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG) || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(LOCKDEBUG)
 #ifdef MULTIPROCESSOR
                        /*
                         * If the generation has changed, then someone else
@@ -2614,31 +2596,6 @@
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        pmap_set_mdpage_attributes(md, PG_MD_POOLPAGE);
 
-#ifdef PMAP_POOLPAGE_DEBUG
-       KASSERT((poolpage.hint & MIPS_CACHE_ALIAS_MASK) == 0);
-       vaddr_t va_offset = poolpage.hint + mips_cache_indexof(pa);
-       pt_entry_t *pte = poolpage.sysmap + atop(va_offset);
-       const size_t va_inc = MIPS_CACHE_ALIAS_MASK + PAGE_SIZE;
-       const size_t pte_inc = atop(va_inc);
-
-       for (; va_offset < poolpage.size;
-            va_offset += va_inc, pte += pte_inc) {
-               if (!mips_pg_v(pte->pt_entry))
-                       break;
-       }
-       if (va_offset >= poolpage.size) {
-               for (va_offset -= poolpage.size, pte -= atop(poolpage.size);
-                    va_offset < poolpage.hint;
-                    va_offset += va_inc, pte += pte_inc) {
-                       if (!mips_pg_v(pte->pt_entry))
-                               break;
-               }
-       }
-       KASSERT(!mips_pg_v(pte->pt_entry));
-       va = poolpage.base + va_offset;
-       poolpage.hint = roundup2(va_offset + 1, va_inc);
-       pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, 0);
-#else
 #ifdef _LP64
        KASSERT(mips_options.mips3_xkphys_cached);
        va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
@@ -2649,8 +2606,7 @@
 
        va = MIPS_PHYS_TO_KSEG0(pa);
 #endif
-#endif
-#if !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG)
+#if !defined(_LP64)
        if (MIPS_CACHE_VIRTUAL_ALIAS) {
                /*
                 * If this page was last mapped with an address that might
@@ -2673,10 +2629,7 @@
 mips_pmap_unmap_poolpage(vaddr_t va)
 {
        paddr_t pa;
-#ifdef PMAP_POOLPAGE_DEBUG
-       KASSERT(poolpage.base <= va && va < poolpage.base + poolpage.size);
-       pa = mips_tlbpfn_to_paddr(kvtopte(va)->pt_entry);
-#elif defined(_LP64)
+#if defined(_LP64)
        KASSERT(MIPS_XKPHYS_P(va));
        pa = MIPS_XKPHYS_TO_PHYS(va);
 #else
@@ -2696,9 +2649,6 @@
                mips_dcache_inv_range(va, PAGE_SIZE);
        }
 #endif
-#ifdef PMAP_POOLPAGE_DEBUG
-       pmap_kremove(va, PAGE_SIZE);
-#endif
        return pa;
 }
 



Home | Main Index | Thread Index | Old Index