Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch map all of physical memory using large pages.



details:   https://anonhg.NetBSD.org/src/rev/df93ff8faac4
branches:  trunk
changeset: 771860:df93ff8faac4
user:      chs <chs%NetBSD.org@localhost>
date:      Sun Dec 04 16:24:13 2011 +0000

description:
map all of physical memory using large pages.
ported from openbsd years ago by Murray Armfield,
updated for changes since then by me.

diffstat:

 sys/arch/amd64/amd64/locore.S  |   29 ++++++++-
 sys/arch/amd64/amd64/machdep.c |   64 ++++++++++++------
 sys/arch/amd64/include/types.h |   10 ++-
 sys/arch/x86/include/pmap.h    |   23 ++++++-
 sys/arch/x86/x86/pmap.c        |  138 ++++++++++++++++++++++++++++++++++++----
 5 files changed, 223 insertions(+), 41 deletions(-)

diffs (truncated from 555 to 300 lines):

diff -r 6437bce7320a -r df93ff8faac4 sys/arch/amd64/amd64/locore.S
--- a/sys/arch/amd64/amd64/locore.S     Sun Dec 04 15:15:41 2011 +0000
+++ b/sys/arch/amd64/amd64/locore.S     Sun Dec 04 16:24:13 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.65 2011/05/20 13:32:35 joerg Exp $        */
+/*     $NetBSD: locore.S,v 1.66 2011/12/04 16:24:13 chs Exp $  */
 
 /*
  * Copyright-o-rama!
@@ -1290,10 +1290,11 @@
        jmp     .Losyscall_checkast     /* re-check ASTs */
 
 /*
- * void sse2_idlezero_page(void *pg)
+ * bool sse2_idlezero_page(void *pg)
  *
  * Zero a page without polluting the cache.  Preemption must be
  * disabled by the caller. Abort if a preemption is pending.
+ * Returns true if the page is zeroed, false if not.
  */
 ENTRY(sse2_idlezero_page)
        pushq   %rbp
@@ -1323,3 +1324,27 @@
        sfence
        popq    %rbp
        ret
+
+/*
+ * void pagezero(vaddr_t va)
+ *
+ * Zero a page without polluting the cache.
+ */
+
+ENTRY(pagezero)
+       movq    $-PAGE_SIZE,%rdx
+       subq    %rdx,%rdi
+       xorq    %rax,%rax
+1:
+       movnti  %rax,(%rdi,%rdx)
+       movnti  %rax,8(%rdi,%rdx)
+       movnti  %rax,16(%rdi,%rdx)
+       movnti  %rax,24(%rdi,%rdx)
+       movnti  %rax,32(%rdi,%rdx)
+       movnti  %rax,40(%rdi,%rdx)
+       movnti  %rax,48(%rdi,%rdx)
+       movnti  %rax,56(%rdi,%rdx)
+       addq    $64,%rdx
+       jne     1b
+       sfence
+       ret
diff -r 6437bce7320a -r df93ff8faac4 sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c    Sun Dec 04 15:15:41 2011 +0000
+++ b/sys/arch/amd64/amd64/machdep.c    Sun Dec 04 16:24:13 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: machdep.c,v 1.171 2011/11/20 18:42:56 yamt Exp $       */
+/*     $NetBSD: machdep.c,v 1.172 2011/12/04 16:24:13 chs Exp $        */
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -111,7 +111,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.171 2011/11/20 18:42:56 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.172 2011/12/04 16:24:13 chs Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -1678,6 +1678,26 @@
         * Page 7:      Temporary page map level 4
         */
        avail_start = 8 * PAGE_SIZE;
+
+#if !defined(REALBASEMEM) && !defined(REALEXTMEM)
+
+       /*
+        * Check to see if we have a memory map from the BIOS (passed
+        * to us by the boot program.
+        */
+       bim = lookup_bootinfo(BTINFO_MEMMAP);
+       if (bim != NULL && bim->num > 0)
+               initx86_parse_memmap(bim, iomem_ex);
+
+#endif /* ! REALBASEMEM && ! REALEXTMEM */
+
+       /*
+        * If the loop above didn't find any valid segment, fall back to
+        * former code.
+        */
+       if (mem_cluster_cnt == 0)
+               initx86_fake_memmap(iomem_ex);
+
 #else  /* XEN */
        /* Parse Xen command line (replace bootinfo */
        xen_parse_cmdline(XEN_PARSE_BOOTFLAGS, NULL);
@@ -1701,25 +1721,6 @@
                pmap_prealloc_lowmem_ptps();
 
 #ifndef XEN
-#if !defined(REALBASEMEM) && !defined(REALEXTMEM)
-
-       /*
-        * Check to see if we have a memory map from the BIOS (passed
-        * to us by the boot program.
-        */
-       bim = lookup_bootinfo(BTINFO_MEMMAP);
-       if (bim != NULL && bim->num > 0)
-               initx86_parse_memmap(bim, iomem_ex);
-
-#endif /* ! REALBASEMEM && ! REALEXTMEM */
-
-       /*
-        * If the loop above didn't find any valid segment, fall back to
-        * former code.
-        */
-       if (mem_cluster_cnt == 0)
-               initx86_fake_memmap(iomem_ex);
-
        initx86_load_memmap(first_avail);
 
 #else  /* XEN */
@@ -2327,3 +2328,24 @@
        }
 }
 #endif
+
+#ifdef __HAVE_DIRECT_MAP
+bool
+mm_md_direct_mapped_io(void *addr, paddr_t *paddr)
+{
+       vaddr_t va = (vaddr_t)addr;
+
+       if (va >= PMAP_DIRECT_BASE && va < PMAP_DIRECT_END) {
+               *paddr = PMAP_DIRECT_UNMAP(va);
+               return true;
+       }
+       return false;
+}
+
+bool
+mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
+{
+       *vaddr = PMAP_DIRECT_MAP(paddr);
+       return true;
+}
+#endif
diff -r 6437bce7320a -r df93ff8faac4 sys/arch/amd64/include/types.h
--- a/sys/arch/amd64/include/types.h    Sun Dec 04 15:15:41 2011 +0000
+++ b/sys/arch/amd64/include/types.h    Sun Dec 04 16:24:13 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: types.h,v 1.39 2011/07/06 18:46:04 dyoung Exp $        */
+/*     $NetBSD: types.h,v 1.40 2011/12/04 16:24:13 chs Exp $   */
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -92,8 +92,14 @@
 #define        __HAVE_INTR_CONTROL
 
 #ifdef _KERNEL_OPT
+#define        __HAVE_RAS
+
 #include "opt_xen.h"
-#define        __HAVE_RAS
+#if defined(__x86_64__) && !defined(XEN)
+#define        __HAVE_DIRECT_MAP 1
+#define        __HAVE_MM_MD_DIRECT_MAPPED_IO
+#define        __HAVE_MM_MD_DIRECT_MAPPED_PHYS
+#endif
 #endif
 
 #else  /*      !__x86_64__     */
diff -r 6437bce7320a -r df93ff8faac4 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h       Sun Dec 04 15:15:41 2011 +0000
+++ b/sys/arch/x86/include/pmap.h       Sun Dec 04 16:24:13 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.48 2011/11/23 01:16:55 jym Exp $    */
+/*     $NetBSD: pmap.h,v 1.49 2011/12/04 16:24:13 chs Exp $    */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -461,6 +461,27 @@
  */
 #define        POOL_VTOPHYS(va)        vtophys((vaddr_t) (va))
 
+#ifdef __HAVE_DIRECT_MAP
+
+#define L4_SLOT_DIRECT         509
+#define PDIR_SLOT_DIRECT       L4_SLOT_DIRECT
+
+#define PMAP_DIRECT_BASE       (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
+#define PMAP_DIRECT_END                (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
+
+#define PMAP_DIRECT_MAP(pa)    ((vaddr_t)PMAP_DIRECT_BASE + (pa))
+#define PMAP_DIRECT_UNMAP(va)  ((paddr_t)(va) - PMAP_DIRECT_BASE)
+
+/*
+ * Alternate mapping hooks for pool pages.
+ */
+#define PMAP_MAP_POOLPAGE(pa)  PMAP_DIRECT_MAP((pa))
+#define PMAP_UNMAP_POOLPAGE(va)        PMAP_DIRECT_UNMAP((va))
+
+void   pagezero(vaddr_t);
+
+#endif /* __HAVE_DIRECT_MAP */
+
 #endif /* _KERNEL */
 
 #endif /* _X86_PMAP_H_ */
diff -r 6437bce7320a -r df93ff8faac4 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Sun Dec 04 15:15:41 2011 +0000
+++ b/sys/arch/x86/x86/pmap.c   Sun Dec 04 16:24:13 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $   */
+/*     $NetBSD: pmap.c,v 1.143 2011/12/04 16:24:13 chs Exp $   */
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.143 2011/12/04 16:24:13 chs Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -190,6 +190,7 @@
 #include <sys/cpu.h>
 #include <sys/intr.h>
 #include <sys/xcall.h>
+#include <sys/kcore.h>
 
 #include <uvm/uvm.h>
 
@@ -199,6 +200,7 @@
 #include <machine/gdt.h>
 #include <machine/isa_machdep.h>
 #include <machine/cpuvar.h>
+#include <machine/cputypes.h>
 
 #include <x86/pmap.h>
 #include <x86/pmap_pv.h>
@@ -486,6 +488,13 @@
 
 static struct pool_cache pmap_pv_cache;
 
+#ifdef __HAVE_DIRECT_MAP
+
+extern phys_ram_seg_t mem_clusters[];
+extern int mem_cluster_cnt;
+
+#else
+
 /*
  * MULTIPROCESSOR: special VA's/ PTE's are actually allocated inside a
  * maxcpus*NPTECL array of PTE's, to avoid cache line thrashing
@@ -506,6 +515,8 @@
 static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *early_zero_pte;
 static char *csrcp, *cdstp, *zerop, *ptpp, *early_zerop;
 
+#endif
+
 int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
 
 /* PDP pool_cache(9) and its callbacks */
@@ -1171,9 +1182,16 @@
        int i;
        vaddr_t kva;
 #ifndef XEN
+       pd_entry_t *pde;
        unsigned long p1i;
        vaddr_t kva_end;
 #endif
+#ifdef __HAVE_DIRECT_MAP
+       phys_ram_seg_t *mc;
+       long ndmpdp;
+       paddr_t dmpd, dmpdp, pdp;
+       vaddr_t tmpva;
+#endif
 
        pt_entry_t pg_nx = (cpu_feature[2] & CPUID_NOX ? PG_NX : 0);
 
@@ -1269,7 +1287,6 @@
 
        if (cpu_feature[0] & CPUID_PSE) {
                paddr_t pa;
-               pd_entry_t *pde;
                extern char __data_start;
 
                lcr4(rcr4() | CR4_PSE); /* enable hardware (via %cr4) */
@@ -1305,6 +1322,58 @@
        }
 #endif /* !XEN */
 
+#ifdef __HAVE_DIRECT_MAP
+
+       tmpva = (KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
+       pte = PTE_BASE + pl1_i(tmpva);
+
+       /*



Home | Main Index | Thread Index | Old Index