Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch allocate uareas contiguously and access them via th...



details:   https://anonhg.NetBSD.org/src/rev/cf1b9b67edfc
branches:  trunk
changeset: 772913:cf1b9b67edfc
user:      chs <chs%NetBSD.org@localhost>
date:      Sat Jan 21 16:48:56 2012 +0000

description:
allocate uareas contiguously and access them via the direct map.

diffstat:

 sys/arch/amd64/include/cpu.h   |   5 ++-
 sys/arch/amd64/include/types.h |   3 +-
 sys/arch/x86/x86/vm_machdep.c  |  61 +++++++++++++++++++++++++++++++++++++++--
 3 files changed, 64 insertions(+), 5 deletions(-)

diffs (122 lines):

diff -r 99276205987e -r cf1b9b67edfc sys/arch/amd64/include/cpu.h
--- a/sys/arch/amd64/include/cpu.h      Sat Jan 21 16:48:08 2012 +0000
+++ b/sys/arch/amd64/include/cpu.h      Sat Jan 21 16:48:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.h,v 1.59 2008/12/30 12:35:23 pooka Exp $   */
+/*     $NetBSD: cpu.h,v 1.60 2012/01/21 16:48:56 chs Exp $     */
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -89,6 +89,9 @@
 #define CLKF_INTR(frame)       (curcpu()->ci_idepth > 0)
 #define LWP_PC(l)              ((l)->l_md.md_regs->tf_rip)
 
+void   *cpu_uarea_alloc(bool);
+bool   cpu_uarea_free(void *);
+
 #endif /* _KERNEL */
 
 #else  /*      __x86_64__      */
diff -r 99276205987e -r cf1b9b67edfc sys/arch/amd64/include/types.h
--- a/sys/arch/amd64/include/types.h    Sat Jan 21 16:48:08 2012 +0000
+++ b/sys/arch/amd64/include/types.h    Sat Jan 21 16:48:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: types.h,v 1.40 2011/12/04 16:24:13 chs Exp $   */
+/*     $NetBSD: types.h,v 1.41 2012/01/21 16:48:56 chs Exp $   */
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -99,6 +99,7 @@
 #define        __HAVE_DIRECT_MAP 1
 #define        __HAVE_MM_MD_DIRECT_MAPPED_IO
 #define        __HAVE_MM_MD_DIRECT_MAPPED_PHYS
+#define        __HAVE_CPU_UAREA_ROUTINES
 #endif
 #endif
 
diff -r 99276205987e -r cf1b9b67edfc sys/arch/x86/x86/vm_machdep.c
--- a/sys/arch/x86/x86/vm_machdep.c     Sat Jan 21 16:48:08 2012 +0000
+++ b/sys/arch/x86/x86/vm_machdep.c     Sat Jan 21 16:48:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $    */
+/*     $NetBSD: vm_machdep.c,v 1.14 2012/01/21 16:48:57 chs Exp $      */
 
 /*-
  * Copyright (c) 1982, 1986 The Regents of the University of California.
@@ -80,7 +80,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2011/02/10 14:46:48 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.14 2012/01/21 16:48:57 chs Exp $");
 
 #include "opt_mtrr.h"
 
@@ -93,7 +93,7 @@
 #include <sys/exec.h>
 #include <sys/ptrace.h>
 
-#include <uvm/uvm_extern.h>
+#include <uvm/uvm.h>
 
 #include <machine/cpu.h>
 #include <machine/gdt.h>
@@ -356,3 +356,58 @@
        bp->b_data = bp->b_saveaddr;
        bp->b_saveaddr = 0;
 }
+
+#ifdef __HAVE_CPU_UAREA_ROUTINES
+void *
+cpu_uarea_alloc(bool system)
+{
+       struct pglist pglist;
+       int error;
+
+       /*
+        * Allocate a new physically contiguous uarea which can be
+        * direct-mapped.
+        */
+       error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
+       if (error) {
+               return NULL;
+       }
+
+       /*
+        * Get the physical address from the first page.
+        */
+       const struct vm_page * const pg = TAILQ_FIRST(&pglist);
+       KASSERT(pg != NULL);
+       const paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+       /*
+        * We need to return a direct-mapped VA for the pa.
+        */
+
+       return (void *)PMAP_MAP_POOLPAGE(pa);
+}
+
+/*
+ * Return true if we freed it, false if we didn't.
+ */
+bool
+cpu_uarea_free(void *vva)
+{
+       vaddr_t va = (vaddr_t) vva;
+
+       if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
+               return false;
+       }
+
+       /*
+        * Since the pages are physically contiguous, the vm_page structures
+        * will be as well.
+        */
+       struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
+       KASSERT(pg != NULL);
+       for (size_t i = 0; i < UPAGES; i++, pg++) {
+               uvm_pagefree(pg);
+       }
+       return true;
+}
+#endif /* __HAVE_CPU_UAREA_ROUTINES */



Home | Main Index | Thread Index | Old Index