Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm Move the new pmap from arm32/pmap_new.c to arm3...



details:   https://anonhg.NetBSD.org/src/rev/7c696b7823a1
branches:  trunk
changeset: 547518:7c696b7823a1
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed May 21 18:07:07 2003 +0000

description:
Move the new pmap from arm32/pmap_new.c to arm32/pmap.c, fully replacing
the old.

diffstat:

 sys/arch/arm/arm32/pmap.c     |  6923 ++++++++++++++++++++++------------------
 sys/arch/arm/arm32/pmap_new.c |  4937 -----------------------------
 sys/arch/arm/conf/files.arm   |     6 +-
 3 files changed, 3843 insertions(+), 8023 deletions(-)

diffs (truncated from 12437 to 300 lines):

diff -r 048511b4ec38 -r 7c696b7823a1 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed May 21 18:04:42 2003 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed May 21 18:07:07 2003 +0000
@@ -1,7 +1,42 @@
-/*     $NetBSD: pmap.c,v 1.133 2003/05/10 21:10:27 thorpej Exp $       */
+/*     $NetBSD: pmap.c,v 1.134 2003/05/21 18:07:07 thorpej Exp $       */
 
 /*
- * Copyright (c) 2002 Wasabi Systems, Inc.
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed for the NetBSD Project by
+ *      Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ *    or promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2002-2003 Wasabi Systems, Inc.
  * Copyright (c) 2001 Richard Earnshaw
  * Copyright (c) 2001-2002 Christopher Gilbert
  * All rights reserved.
@@ -110,20 +145,52 @@
  */
 
 /*
- * The dram block info is currently referenced from the bootconfig.
- * This should be placed in a separate structure.
+ * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
+ * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
+ * Systems, Inc.
+ *
+ * There are still a few things outstanding at this time:
+ *
+ *   - There are some unresolved issues for MP systems:
+ *
+ *     o The L1 metadata needs a lock, or more specifically, some places
+ *       need to acquire an exclusive lock when modifying L1 translation
+ *       table entries.
+ *
+ *     o When one cpu modifies an L1 entry, and that L1 table is also
+ *       being used by another cpu, then the latter will need to be told
+ *       that a tlb invalidation may be necessary. (But only if the old
+ *       domain number in the L1 entry being over-written is currently
+ *       the active domain on that cpu). I guess there are lots more tlb
+ *       shootdown issues too...
+ *
+ *     o If the vector_page is at 0x00000000 instead of 0xffff0000, then
+ *       MP systems will lose big-time because of the MMU domain hack.
+ *       The only way this can be solved (apart from moving the vector
+ *       page to 0xffff0000) is to reserve the first 1MB of user address
+ *       space for kernel use only. This would require re-linking all
+ *       applications so that the text section starts above this 1MB
+ *       boundary.
+ *
+ *     o Tracking which VM space is resident in the cache/tlb has not yet
+ *       been implemented for MP systems.
+ *
+ *     o Finally, there is a pathological condition where two cpus running
+ *       two separate processes (not lwps) which happen to share an L1
+ *       can get into a fight over one or more L1 entries. This will result
+ *       in a significant slow-down if both processes are in tight loops.
  */
 
 /*
  * Special compilation symbols
  * PMAP_DEBUG          - Build in pmap_debug_level code
  */
-    
+
 /* Include header files */
 
+#include "opt_cpuoptions.h"
 #include "opt_pmap_debug.h"
 #include "opt_ddb.h"
-#include "opt_cpuoptions.h"
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -137,21 +204,19 @@
  
 #include <uvm/uvm.h>
 
-#include <machine/bootconfig.h>
 #include <machine/bus.h>
 #include <machine/pmap.h>
 #include <machine/pcb.h>
 #include <machine/param.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.133 2003/05/10 21:10:27 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.134 2003/05/21 18:07:07 thorpej Exp $");
 
 #ifdef PMAP_DEBUG
 #define        PDEBUG(_lev_,_stat_) \
        if (pmap_debug_level >= (_lev_)) \
                ((_stat_))
-int pmap_debug_level = -2;
-void pmap_dump_pvlist(vaddr_t phys, char *m);
+int pmap_debug_level = 0;
 
 /*
  * for switching to potentially finer grained debugging
@@ -171,9 +236,12 @@
 #define        PDB_PARANOIA    0x2000
 #define        PDB_WIRING      0x4000
 #define        PDB_PVDUMP      0x8000
-
-int debugmap = 0;
-int pmapdebug = PDB_PARANOIA | PDB_FOLLOW | PDB_GROWKERN | PDB_ENTER | PDB_REMOVE;
+#define        PDB_VAC         0x10000
+#define        PDB_KENTER      0x20000
+#define        PDB_KREMOVE     0x40000
+
+int debugmap = 1;
+int pmapdebug = 0; 
 #define        NPDEBUG(_lev_,_stat_) \
        if (pmapdebug & (_lev_)) \
                ((_stat_))
@@ -183,135 +251,282 @@
 #define NPDEBUG(_lev_,_stat_) /* Nothing */
 #endif /* PMAP_DEBUG */
 
+/*
+ * pmap_kernel() points here
+ */
 struct pmap     kernel_pmap_store;
 
 /*
- * linked list of all non-kernel pmaps
+ * Which pmap is currently 'live' in the cache
+ *
+ * XXXSCW: Fix for SMP ...
  */
-
-static LIST_HEAD(, pmap) pmaps;
+union pmap_cache_state *pmap_cache_state;
 
 /*
- * pool that pmap structures are allocated from
+ * Pool and cache that pmap structures are allocated from.
+ * We use a cache to avoid clearing the pm_l2[] array (1KB)
+ * in pmap_create().
  */
-
-struct pool pmap_pmap_pool;
+static struct pool pmap_pmap_pool;
+static struct pool_cache pmap_pmap_cache;
+static LIST_HEAD(, pmap) pmap_pmaps;
+
+/*
+ * Pool of PV structures
+ */
+static struct pool pmap_pv_pool;
+static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
+static void pmap_bootstrap_pv_page_free(struct pool *, void *);
+static struct pool_allocator pmap_bootstrap_pv_allocator = {
+       pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
+};
 
 /*
- * pool/cache that PT-PT's are allocated from
+ * Pool and cache of l2_dtable structures.
+ * We use a cache to avoid clearing the structures when they're
+ * allocated. (196 bytes)
  */
-
-struct pool pmap_ptpt_pool;
-struct pool_cache pmap_ptpt_cache;
-u_int pmap_ptpt_cache_generation;
-
-static void *pmap_ptpt_page_alloc(struct pool *, int);
-static void pmap_ptpt_page_free(struct pool *, void *);
-
-struct pool_allocator pmap_ptpt_allocator = {
-       pmap_ptpt_page_alloc, pmap_ptpt_page_free,
-};
-
-static int pmap_ptpt_ctor(void *, void *, int);
-
+static struct pool pmap_l2dtable_pool;
+static struct pool_cache pmap_l2dtable_cache;
+static vaddr_t pmap_kernel_l2dtable_kva;
+
+/*
+ * Pool and cache of L2 page descriptors.
+ * We use a cache to avoid clearing the descriptor table
+ * when they're allocated. (1KB)
+ */
+static struct pool pmap_l2ptp_pool;
+static struct pool_cache pmap_l2ptp_cache;
+static vaddr_t pmap_kernel_l2ptp_kva;
+static paddr_t pmap_kernel_l2ptp_phys;
+
+/*
+ * pmap copy/zero page, and mem(5) hook point
+ */
 static pt_entry_t *csrc_pte, *cdst_pte;
 static vaddr_t csrcp, cdstp;
-
 char *memhook;
 extern caddr_t msgbufaddr;
 
-boolean_t pmap_initialized = FALSE;    /* Has pmap_init completed? */
 /*
- * locking data structures
+ * Flag to indicate if pmap_init() has done its thing
  */
-
+boolean_t pmap_initialized;
+
+/*
+ * Misc. locking data structures
+ */
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
 static struct lock pmap_main_lock;
-static struct simplelock pvalloc_lock;
-static struct simplelock pmaps_lock;
-#ifdef LOCKDEBUG
+
 #define PMAP_MAP_TO_HEAD_LOCK() \
      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
 #define PMAP_MAP_TO_HEAD_UNLOCK() \
      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-
 #define PMAP_HEAD_TO_MAP_LOCK() \
      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
 #define PMAP_HEAD_TO_MAP_UNLOCK() \
-     (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+     spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
 #else
-#define        PMAP_MAP_TO_HEAD_LOCK()         /* nothing */
-#define        PMAP_MAP_TO_HEAD_UNLOCK()       /* nothing */
-#define        PMAP_HEAD_TO_MAP_LOCK()         /* nothing */
-#define        PMAP_HEAD_TO_MAP_UNLOCK()       /* nothing */
-#endif /* LOCKDEBUG */
-   
+#define PMAP_MAP_TO_HEAD_LOCK()                /* null */
+#define PMAP_MAP_TO_HEAD_UNLOCK()      /* null */
+#define PMAP_HEAD_TO_MAP_LOCK()                /* null */
+#define PMAP_HEAD_TO_MAP_UNLOCK()      /* null */
+#endif
+
+#define        pmap_acquire_pmap_lock(pm)                      \
+       do {                                            \
+               if ((pm) != pmap_kernel())              \
+                       simple_lock(&(pm)->pm_lock);    \
+       } while (/*CONSTCOND*/0)
+
+#define        pmap_release_pmap_lock(pm)                      \
+       do {                                            \
+               if ((pm) != pmap_kernel())              \
+                       simple_unlock(&(pm)->pm_lock);  \
+       } while (/*CONSTCOND*/0)
+
+
 /*
- * pv_page management structures: locked by pvalloc_lock
+ * Metadata for L1 translation tables.
  */
-
-TAILQ_HEAD(pv_pagelist, pv_page);
-static struct pv_pagelist pv_freepages;        /* list of pv_pages with free entrys */
-static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
-static unsigned int pv_nfpvents;       /* # of free pv entries */
-static struct pv_page *pv_initpage;    /* bootstrap page from kernel_map */
-static vaddr_t pv_cachedva;            /* cached VA for later use */
-
-#define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
-#define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
-                                       /* high water mark */
+struct l1_ttable {
+       /* Entry on the L1 Table list */



Home | Main Index | Thread Index | Old Index