Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/ia64 Attempted port over from FreeBSD with suggesti...



details:   https://anonhg.NetBSD.org/src/rev/0dcc5df91555
branches:  trunk
changeset: 822820:0dcc5df91555
user:      scole <scole%NetBSD.org@localhost>
date:      Sat Apr 08 18:08:33 2017 +0000

description:
Attempted port over from FreeBSD with suggestions from <chs>.  Still
more work needed, but at least now the ski simulator and hardware die
at the same place.

diffstat:

 sys/arch/ia64/ia64/pmap.c    |  4007 +++++++++++++++++++++++++----------------
 sys/arch/ia64/include/pmap.h |   134 +-
 2 files changed, 2517 insertions(+), 1624 deletions(-)

diffs (truncated from 4411 to 300 lines):

diff -r 8dbb95787aca -r 0dcc5df91555 sys/arch/ia64/ia64/pmap.c
--- a/sys/arch/ia64/ia64/pmap.c Sat Apr 08 18:05:36 2017 +0000
+++ b/sys/arch/ia64/ia64/pmap.c Sat Apr 08 18:08:33 2017 +0000
@@ -1,5 +1,4 @@
-/* $NetBSD: pmap.c,v 1.34 2016/12/23 17:26:43 scole Exp $ */
-
+/* $NetBSD: pmap.c,v 1.35 2017/04/08 18:08:33 scole Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -80,18 +79,18 @@
 
 /* __FBSDID("$FreeBSD: src/sys/ia64/ia64/pmap.c,v 1.172 2005/11/20 06:09:48 alc Exp $"); */
 
-
-/* XXX: This module is a mess. Need to clean up Locking, list traversal. etc....... */
-
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.34 2016/12/23 17:26:43 scole Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.35 2017/04/08 18:08:33 scole Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/buf.h>
 #include <sys/reboot.h>
 #include <sys/lock.h>
+#include <sys/pool.h>
+#include <sys/sched.h>
+#include <sys/bitops.h>
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_physseg.h>
@@ -99,47 +98,158 @@
 #include <machine/pal.h>
 #include <machine/atomic.h>
 #include <machine/pte.h>
-#include <sys/sched.h>
 #include <machine/cpufunc.h>
 #include <machine/md_var.h>
-
+#include <machine/vmparam.h>
+
+/*
+ *     Manages physical address maps.
+ *
+ *     Since the information managed by this module is
+ *     also stored by the logical address mapping module,
+ *     this module may throw away valid virtual-to-physical
+ *     mappings at almost any time.  However, invalidations
+ *     of virtual-to-physical mappings must be done as
+ *     requested.
+ *
+ *     In order to cope with hardware architectures which
+ *     make virtual-to-physical map invalidates expensive,
+ *     this module may delay invalidate or reduced protection
+ *     operations until such time as they are actually
+ *     necessary.  This module is given full information as
+ *     to which processors are currently using which maps,
+ *     and to when physical maps must be made correct.
+ */
+
+/*
+ * Following the Linux model, region IDs are allocated in groups of
+ * eight so that a single region ID can be used for as many RRs as we
+ * want by encoding the RR number into the low bits of the ID.
+ *
+ * We reserve region ID 0 for the kernel and allocate the remaining
+ * IDs for user pmaps.
+ *
+ * Region 0-3: User virtually mapped
+ * Region 4:   PBVM and special mappings
+ * Region 5:   Kernel virtual memory
+ * Region 6:   Direct-mapped uncacheable
+ * Region 7:   Direct-mapped cacheable
+ */
+
+#if !defined(DIAGNOSTIC)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PV_STATS
+#define PV_STAT(x)     do { x ; } while (0)
+#else
+#define PV_STAT(x)     do { } while (0)
+#endif
+
+#define        pmap_accessed(lpte)             ((lpte)->pte & PTE_ACCESSED)
+#define        pmap_dirty(lpte)                ((lpte)->pte & PTE_DIRTY)
+#define        pmap_exec(lpte)                 ((lpte)->pte & PTE_AR_RX)
+#define        pmap_managed(lpte)              ((lpte)->pte & PTE_MANAGED)
+#define        pmap_ppn(lpte)                  ((lpte)->pte & PTE_PPN_MASK)
+#define        pmap_present(lpte)              ((lpte)->pte & PTE_PRESENT)
+#define        pmap_prot(lpte)                 (((lpte)->pte & PTE_PROT_MASK) >> 56)
+#define        pmap_wired(lpte)                ((lpte)->pte & PTE_WIRED)
+
+#define        pmap_clear_accessed(lpte)       (lpte)->pte &= ~PTE_ACCESSED
+#define        pmap_clear_dirty(lpte)          (lpte)->pte &= ~PTE_DIRTY
+#define        pmap_clear_present(lpte)        (lpte)->pte &= ~PTE_PRESENT
+#define        pmap_clear_wired(lpte)          (lpte)->pte &= ~PTE_WIRED
+
+#define        pmap_set_wired(lpte)            (lpte)->pte |= PTE_WIRED
+
+/*
+ * Individual PV entries are stored in per-pmap chunks.  This saves
+ * space by eliminating the need to record the pmap within every PV
+ * entry.
+ */
+#if PAGE_SIZE == 8192
+#define        _NPCM   6
+#define        _NPCPV  337
+#define        _NPCS   2
+#elif PAGE_SIZE == 16384
+#define        _NPCM   11
+#define        _NPCPV  677
+#define        _NPCS   1
+#else
+#error "invalid page size"
+#endif
+
+struct pv_chunk {
+       pmap_t                  pc_pmap;
+       TAILQ_ENTRY(pv_chunk)   pc_list;
+       u_long                  pc_map[_NPCM];  /* bitmap; 1 = free */
+       TAILQ_ENTRY(pv_chunk)   pc_lru;
+       u_long                  pc_spare[_NPCS];
+       struct pv_entry         pc_pventry[_NPCPV];
+};
+
+/*
+ * The VHPT bucket head structure.
+ */
+struct ia64_bucket {
+       uint64_t        chain;
+       kmutex_t        mutex;
+       u_int           length;
+};
+
+/*
+ * Statically allocated kernel pmap
+ */
+static struct pmap kernel_pmap_store;/* the kernel's pmap (proc0) */
+struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
+
+vaddr_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vaddr_t virtual_end;   /* VA of last avail page (end of kernel AS) */
+
+/* XXX freebsd, needs to be sorted out */
+#define kernel_pmap                    pmap_kernel()
+#define critical_enter()               kpreempt_disable()
+#define critical_exit()                        kpreempt_enable()
+/* flags the entire page as dirty */
+#define vm_page_dirty(page)            (page->flags &= ~PG_CLEAN)
+#define vm_page_is_managed(page) (pmap_initialized && uvm_pageismanaged(VM_PAGE_TO_PHYS(page)))
 
 /*
  * Kernel virtual memory management.
  */
 static int nkpt;
-struct ia64_lpte **ia64_kptdir;
-#define KPTE_DIR_INDEX(va) \
-       ((va >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
+
+extern struct ia64_lpte ***ia64_kptdir;
+
+#define KPTE_DIR0_INDEX(va) \
+       (((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1))
+#define KPTE_DIR1_INDEX(va) \
+       (((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
 #define KPTE_PTE_INDEX(va) \
-       ((va >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
+       (((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
+
 #define NKPTEPG                (PAGE_SIZE / sizeof(struct ia64_lpte))
 
-
-/* Values for ptc.e. XXX values for SKI. */
+vaddr_t kernel_vm_end;
+
+/* Defaults for ptc.e. */
+/*
+static uint64_t pmap_ptc_e_base = 0;
+static uint32_t pmap_ptc_e_count1 = 1;
+static uint32_t pmap_ptc_e_count2 = 1;
+static uint32_t pmap_ptc_e_stride1 = 0;
+static uint32_t pmap_ptc_e_stride2 = 0;
+*/
+/* Values for ptc.e. XXX values for SKI, add SKI kernel option methinks */
 static uint64_t pmap_ptc_e_base = 0x100000000;
 static uint64_t pmap_ptc_e_count1 = 3;
 static uint64_t pmap_ptc_e_count2 = 2;
 static uint64_t pmap_ptc_e_stride1 = 0x2000;
 static uint64_t pmap_ptc_e_stride2 = 0x100000000;
-kmutex_t pmap_ptc_lock;                        /* Global PTC lock */
-
-/* VHPT Base */
-
-vaddr_t vhpt_base;
-vaddr_t pmap_vhpt_log2size;
-
-struct ia64_bucket *pmap_vhpt_bucket;
-int pmap_vhpt_nbuckets;
-kmutex_t pmap_vhptlock;                        /* VHPT collision chain lock */
-
-int pmap_vhpt_inserts;
-int pmap_vhpt_resident;
-int pmap_vhpt_collisions;
-
-#ifdef DEBUG
-static void dump_vhpt(void);
-#endif
+
+kmutex_t pmap_ptc_mutex;
 
 /*
  * Data for the RID allocator
@@ -149,147 +259,1033 @@
 static int pmap_ridmapsz;
 static int pmap_ridmax;
 static uint64_t *pmap_ridmap;
-kmutex_t pmap_rid_lock;                        /* RID allocator lock */
-
-
-bool           pmap_initialized;       /* Has pmap_init completed? */
-u_long         pmap_pages_stolen;      /* instrumentation */
-
-static struct pmap kernel_pmap_store;  /* the kernel's pmap (proc0) */
-struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
-
-static vaddr_t kernel_vm_end;  /* VA of last avail page ( end of kernel Address Space ) */
+kmutex_t pmap_ridmutex;
+
+static krwlock_t pvh_global_lock __attribute__ ((aligned (128)));
+
+static pool_cache_t pmap_pool_cache;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static int pv_entry_count;
+
+/*
+ * Data for allocating PTEs for user processes.
+ */
+static pool_cache_t pte_pool_cache;
+
+struct ia64_bucket *pmap_vhpt_bucket;
+
+/* XXX For freebsd, these are sysctl variables */
+static uint64_t pmap_vhpt_nbuckets = 0;
+uint64_t pmap_vhpt_log2size = 0;
+static uint64_t pmap_vhpt_inserts = 0;
+
+static bool pmap_initialized = false;   /* Has pmap_init completed? */
+static uint64_t pmap_pages_stolen = 0;  /* instrumentation */
+
+static struct ia64_lpte *pmap_find_vhpt(vaddr_t va);
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, bool try);
+static struct vm_page *pmap_pv_reclaim(pmap_t locked_pmap);
+
+static void    pmap_free_pte(struct ia64_lpte *pte, vaddr_t va);
+static int     pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
+                   vaddr_t va, pv_entry_t pv, int freepte);
+static int     pmap_remove_vhpt(vaddr_t va);
+
+static vaddr_t pmap_steal_vhpt_memory(vsize_t);
+
+static struct vm_page *vm_page_alloc1(void);
+static void vm_page_free1(struct vm_page *pg);
+
+static vm_memattr_t pmap_flags_to_memattr(u_int flags);
+
+#if DEBUG
+static void pmap_testout(void);
+#endif
+
+static void
+pmap_initialize_vhpt(vaddr_t vhpt)
+{
+       struct ia64_lpte *pte;
+       u_int i;
+
+       pte = (struct ia64_lpte *)vhpt;
+       for (i = 0; i < pmap_vhpt_nbuckets; i++) {
+               pte[i].pte = 0;
+               pte[i].itir = 0;
+               pte[i].tag = 1UL << 63; /* Invalid tag */
+               pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
+       }
+}
+
+#ifdef MULTIPROCESSOR
+vaddr_t
+pmap_alloc_vhpt(void)



Home | Main Index | Thread Index | Old Index