Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86/x86 Put the code in charge of remapping the ker...



details:   https://anonhg.NetBSD.org/src/rev/deec4ce72a94
branches:  trunk
changeset: 346204:deec4ce72a94
user:      maxv <maxv%NetBSD.org@localhost>
date:      Fri Jul 01 11:20:01 2016 +0000

description:
Put the code in charge of remapping the kernel segments with large pages
into another function. No functional change.

diffstat:

 sys/arch/x86/x86/pmap.c |  118 ++++++++++++++++++++++++++---------------------
 1 files changed, 64 insertions(+), 54 deletions(-)

diffs (160 lines):

diff -r e07ce64fba3c -r deec4ce72a94 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Fri Jul 01 11:10:48 2016 +0000
+++ b/sys/arch/x86/x86/pmap.c   Fri Jul 01 11:20:01 2016 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.198 2016/07/01 11:10:48 maxv Exp $  */
+/*     $NetBSD: pmap.c,v 1.199 2016/07/01 11:20:01 maxv Exp $  */
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.198 2016/07/01 11:10:48 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.199 2016/07/01 11:20:01 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -561,6 +561,10 @@
  * local prototypes
  */
 
+#ifndef XEN
+static void pmap_remap_largepages(void);
+#endif
+
 static struct vm_page  *pmap_get_ptp(struct pmap *, vaddr_t,
                                      pd_entry_t * const *);
 static struct vm_page  *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
@@ -1297,65 +1301,18 @@
         * Enable large pages if they are supported.
         */
        if (cpu_feature[0] & CPUID_PSE) {
-               paddr_t pa;
-               extern char __rodata_start;
-               extern char __data_start;
-               extern char __kernel_end;
-
                lcr4(rcr4() | CR4_PSE); /* enable hardware (via %cr4) */
                pmap_largepages = 1;    /* enable software */
 
                /*
-                * The TLB must be flushed after enabling large pages
-                * on Pentium CPUs, according to section 3.6.2.2 of
-                * "Intel Architecture Software Developer's Manual,
-                * Volume 3: System Programming".
+                * The TLB must be flushed after enabling large pages on Pentium
+                * CPUs, according to section 3.6.2.2 of "Intel Architecture
+                * Software Developer's Manual, Volume 3: System Programming".
                 */
                tlbflushg();
 
-               /*
-                * Now, we remap several kernel segments with large pages. We
-                * cover as many pages as we can.
-                */
-
-               /* Remap the kernel text using large pages. */
-               kva = KERNBASE;
-               kva_end = rounddown((vaddr_t)&__rodata_start, NBPD_L1);
-               pa = kva - KERNBASE;
-               for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2,
-                   pa += NBPD_L2) {
-                       pde = &L2_BASE[pl2_i(kva)];
-                       *pde = pa | pmap_pg_g | PG_PS | PG_KR | PG_V;
-                       tlbflushg();
-               }
-#if defined(DEBUG)
-               aprint_normal("kernel text is mapped with %" PRIuPSIZE " large "
-                   "pages and %" PRIuPSIZE " normal pages\n",
-                   howmany(kva - KERNBASE, NBPD_L2),
-                   howmany((vaddr_t)&__rodata_start - kva, NBPD_L1));
-#endif /* defined(DEBUG) */
-
-               /* Remap the kernel rodata using large pages. */
-               kva = roundup((vaddr_t)&__rodata_start, NBPD_L2);
-               kva_end = rounddown((vaddr_t)&__data_start, NBPD_L1);
-               pa = kva - KERNBASE;
-               for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2,
-                   pa += NBPD_L2) {
-                       pde = &L2_BASE[pl2_i(kva)];
-                       *pde = pa | pmap_pg_g | PG_PS | pmap_pg_nx | PG_KR | PG_V;
-                       tlbflushg();
-               }
-
-               /* Remap the kernel data+bss using large pages. */
-               kva = roundup((vaddr_t)&__data_start, NBPD_L2);
-               kva_end = rounddown((vaddr_t)&__kernel_end, NBPD_L1);
-               pa = kva - KERNBASE;
-               for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2,
-                   pa += NBPD_L2) {
-                       pde = &L2_BASE[pl2_i(kva)];
-                       *pde = pa | pmap_pg_g | PG_PS | pmap_pg_nx | PG_KW | PG_V;
-                       tlbflushg();
-               }
+               /* Remap the kernel. */
+               pmap_remap_largepages();
        }
 #endif /* !XEN */
 
@@ -1576,6 +1533,59 @@
        pmap_maxkvaddr = kva;
 }
 
+#ifndef XEN
+/*
+ * Remap several kernel segments with large pages. We cover as many pages as we
+ * can. Called only once at boot time, if the CPU supports large pages.
+ */
+static void
+pmap_remap_largepages(void)
+{
+       extern char __rodata_start;
+       extern char __data_start;
+       extern char __kernel_end;
+       pd_entry_t *pde;
+       vaddr_t kva, kva_end;
+       paddr_t pa;
+
+       /* Remap the kernel text using large pages. */
+       kva = KERNBASE;
+       kva_end = rounddown((vaddr_t)&__rodata_start, NBPD_L1);
+       pa = kva - KERNBASE;
+       for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2, pa += NBPD_L2) {
+               pde = &L2_BASE[pl2_i(kva)];
+               *pde = pa | pmap_pg_g | PG_PS | PG_KR | PG_V;
+               tlbflushg();
+       }
+#if defined(DEBUG)
+       aprint_normal("kernel text is mapped with %" PRIuPSIZE " large "
+           "pages and %" PRIuPSIZE " normal pages\n",
+           howmany(kva - KERNBASE, NBPD_L2),
+           howmany((vaddr_t)&__rodata_start - kva, NBPD_L1));
+#endif /* defined(DEBUG) */
+
+       /* Remap the kernel rodata using large pages. */
+       kva = roundup((vaddr_t)&__rodata_start, NBPD_L2);
+       kva_end = rounddown((vaddr_t)&__data_start, NBPD_L1);
+       pa = kva - KERNBASE;
+       for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2, pa += NBPD_L2) {
+               pde = &L2_BASE[pl2_i(kva)];
+               *pde = pa | pmap_pg_g | PG_PS | pmap_pg_nx | PG_KR | PG_V;
+               tlbflushg();
+       }
+
+       /* Remap the kernel data+bss using large pages. */
+       kva = roundup((vaddr_t)&__data_start, NBPD_L2);
+       kva_end = rounddown((vaddr_t)&__kernel_end, NBPD_L1);
+       pa = kva - KERNBASE;
+       for (/* */; kva + NBPD_L2 <= kva_end; kva += NBPD_L2, pa += NBPD_L2) {
+               pde = &L2_BASE[pl2_i(kva)];
+               *pde = pa | pmap_pg_g | PG_PS | pmap_pg_nx | PG_KW | PG_V;
+               tlbflushg();
+       }
+}
+#endif /* !XEN */
+
 #if defined(__x86_64__)
 /*
  * Pre-allocate PTPs for low memory, so that 1:1 mappings for various



Home | Main Index | Thread Index | Old Index