Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/yamt-km]: src/sys/arch/xen - use new apis.



details:   https://anonhg.NetBSD.org/src/rev/984c54b22beb
branches:  yamt-km
changeset: 573342:984c54b22beb
user:      yamt <yamt%NetBSD.org@localhost>
date:      Sun Feb 13 10:20:49 2005 +0000

description:
- use new apis.
- simplify bootstrap and pvpage allocation.
- remove no longer needed .globl decls.

diffstat:

 sys/arch/xen/i386/cpu.c         |   12 ++-
 sys/arch/xen/i386/gdt.c         |   10 ++-
 sys/arch/xen/i386/locore.S      |    4 +-
 sys/arch/xen/i386/machdep.c     |    7 +-
 sys/arch/xen/i386/pmap.c        |  114 +++++++++------------------------------
 sys/arch/xen/i386/sys_machdep.c |   12 ++-
 sys/arch/xen/x86/bus_space.c    |   10 +-
 sys/arch/xen/xen/if_xennet.c    |   15 ++--
 sys/arch/xen/xen/xbd.c          |   16 ++--
 9 files changed, 72 insertions(+), 128 deletions(-)

diffs (truncated from 536 to 300 lines):

diff -r 4968509fa46e -r 984c54b22beb sys/arch/xen/i386/cpu.c
--- a/sys/arch/xen/i386/cpu.c   Sun Feb 13 10:06:04 2005 +0000
+++ b/sys/arch/xen/i386/cpu.c   Sun Feb 13 10:20:49 2005 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpu.c,v 1.1 2004/03/11 21:44:08 cl Exp $       */
+/*     $NetBSD: cpu.c,v 1.1.14.1 2005/02/13 10:20:49 yamt Exp $        */
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -72,7 +72,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.1 2004/03/11 21:44:08 cl Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.1.14.1 2005/02/13 10:20:49 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -300,7 +300,7 @@
        /*
         * Allocate UPAGES contiguous pages for the idle PCB and stack.
         */
-       kstack = uvm_km_alloc (kernel_map, USPACE);
+       kstack = uvm_km_alloc(kernel_map, USPACE, 0, UVM_KMF_WIRED);
        if (kstack == 0) {
                if (caa->cpu_role != CPU_ROLE_AP) {
                        panic("cpu_attach: unable to allocate idle stack for"
@@ -707,7 +707,8 @@
 #ifndef XEN
        struct segment_descriptor sd;
 
-       ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE);
+       ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
+           UVM_KMF_WIRED);
        cpu_init_tss(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack,
            IDTVEC(tss_trap08));
        setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1,
@@ -725,7 +726,8 @@
         * XXX overwriting the gate set in db_machine_init.
         * Should rearrange the code so that it's set only once.
         */
-       ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE);
+       ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE, 0,
+           UVM_KMF_WIRED);
        cpu_init_tss(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack,
            Xintrddbipi);
 
diff -r 4968509fa46e -r 984c54b22beb sys/arch/xen/i386/gdt.c
--- a/sys/arch/xen/i386/gdt.c   Sun Feb 13 10:06:04 2005 +0000
+++ b/sys/arch/xen/i386/gdt.c   Sun Feb 13 10:20:49 2005 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: gdt.c,v 1.1 2004/03/11 21:44:08 cl Exp $       */
+/*     $NetBSD: gdt.c,v 1.1.14.1 2005/02/13 10:20:49 yamt Exp $        */
 /*     NetBSD: gdt.c,v 1.32 2004/02/13 11:36:13 wiz Exp        */
 
 /*-
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.1 2004/03/11 21:44:08 cl Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.1.14.1 2005/02/13 10:20:49 yamt Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
@@ -146,7 +146,8 @@
        gdt_free[1] = GNULL_SEL;
 
        old_gdt = gdt;
-       gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len + max_len);
+       gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len + max_len, 0,
+           UVM_KMF_VAONLY);
        for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
                if (pg == NULL) {
@@ -174,7 +175,8 @@
        struct vm_page *pg;
        vaddr_t va;
 
-       ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
+       ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len, 0,
+           UVM_KMF_VAONLY);
        for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len;
            va += PAGE_SIZE) {
                while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
diff -r 4968509fa46e -r 984c54b22beb sys/arch/xen/i386/locore.S
--- a/sys/arch/xen/i386/locore.S        Sun Feb 13 10:06:04 2005 +0000
+++ b/sys/arch/xen/i386/locore.S        Sun Feb 13 10:20:49 2005 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.8 2004/12/14 17:13:56 tls Exp $   */
+/*     $NetBSD: locore.S,v 1.8.4.1 2005/02/13 10:20:49 yamt Exp $      */
 /*     NetBSD: locore.S,v 1.26 2004/04/12 13:17:46 yamt Exp    */
 
 /*-
@@ -1833,8 +1833,6 @@
 #ifndef MULTIPROCESSOR
        .globl  _C_LABEL(lwp0)
 #endif
-       .globl  _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
-       .globl  _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
 /* LINTSTUB: Func: void cpu_exit(struct lwp *l) */
 ENTRY(cpu_exit)
        movl    4(%esp),%edi            # old process
diff -r 4968509fa46e -r 984c54b22beb sys/arch/xen/i386/machdep.c
--- a/sys/arch/xen/i386/machdep.c       Sun Feb 13 10:06:04 2005 +0000
+++ b/sys/arch/xen/i386/machdep.c       Sun Feb 13 10:20:49 2005 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: machdep.c,v 1.11 2004/12/14 18:07:42 tls Exp $ */
+/*     $NetBSD: machdep.c,v 1.11.4.1 2005/02/13 10:20:49 yamt Exp $    */
 /*     NetBSD: machdep.c,v 1.552 2004/03/24 15:34:49 atatat Exp        */
 
 /*-
@@ -73,7 +73,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.11 2004/12/14 18:07:42 tls Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.11.4.1 2005/02/13 10:20:49 yamt Exp $");
 
 #include "opt_beep.h"
 #include "opt_compat_ibcs2.h"
@@ -327,7 +327,8 @@
        /*
         * Initialize error message buffer (et end of core).
         */
-       msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_round_page(MSGBUFSIZE));
+       msgbuf_vaddr = uvm_km_alloc(kernel_map, x86_round_page(MSGBUFSIZE), 0,
+           UVM_KMF_VAONLY);
        if (msgbuf_vaddr == 0)
                panic("failed to valloc msgbuf_vaddr");
 
diff -r 4968509fa46e -r 984c54b22beb sys/arch/xen/i386/pmap.c
--- a/sys/arch/xen/i386/pmap.c  Sun Feb 13 10:06:04 2005 +0000
+++ b/sys/arch/xen/i386/pmap.c  Sun Feb 13 10:20:49 2005 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.7 2005/01/01 21:00:06 yamt Exp $    */
+/*     $NetBSD: pmap.c,v 1.7.4.1 2005/02/13 10:20:49 yamt Exp $        */
 /*     NetBSD: pmap.c,v 1.172 2004/04/12 13:17:46 yamt Exp     */
 
 /*
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7 2005/01/01 21:00:06 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7.4.1 2005/02/13 10:20:49 yamt Exp $");
 
 #include "opt_cputype.h"
 #include "opt_user_ldt.h"
@@ -408,8 +408,6 @@
 static struct pv_pagelist pv_freepages;        /* list of pv_pages with free entrys */
 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
 static int pv_nfpvents;                        /* # of free pv entries */
-static struct pv_page *pv_initpage;    /* bootstrap page from kernel_map */
-static vaddr_t pv_cachedva;            /* cached VA for later use */
 
 #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
@@ -1347,22 +1345,9 @@
 {
        int i;
 
-       /*
-        * now we need to free enough pv_entry structures to allow us to get
-        * the kmem_map/kmem_object allocated and inited (done after this
-        * function is finished).  to do this we allocate one bootstrap page out
-        * of kernel_map and use it to provide an initial pool of pv_entry
-        * structures.   we never free this page.
-        */
-
-       pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
-       if (pv_initpage == NULL)
-               panic("pmap_init: pv_initpage");
-       pv_cachedva = 0;   /* a VA we have allocated but not used yet */
        pv_nfpvents = 0;
-       (void) pmap_add_pvpage(pv_initpage, FALSE);
-
-       pj_page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE);
+
+       pj_page = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
        if (pj_page == NULL)
                panic("pmap_init: pj_page");
 
@@ -1465,7 +1450,6 @@
        struct pmap *pmap;
        int mode;
 {
-       struct vm_page *pg;
        struct pv_page *pvpage;
        struct pv_entry *pv;
        int s;
@@ -1491,38 +1475,17 @@
        }
 
        /*
-        *  see if we've got a cached unmapped VA that we can map a page in.
-        * if not, try to allocate one.
-        */
-
-       if (pv_cachedva == 0) {
-               s = splvm();   /* must protect kmem_map with splvm! */
-               pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE,
-                   UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
-               splx(s);
-               if (pv_cachedva == 0) {
-                       return (NULL);
-               }
-       }
-
-       pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
-           UVM_PGA_USERESERVE);
-       if (pg == NULL)
-               return (NULL);
-       pg->flags &= ~PG_BUSY;  /* never busy */
-
-       /*
-        * add a mapping for our new pv_page and free its entrys (save one!)
-        *
         * NOTE: If we are allocating a PV page for the kernel pmap, the
         * pmap is already locked!  (...but entering the mapping is safe...)
         */
 
-       pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
-           VM_PROT_READ | VM_PROT_WRITE);
-       pmap_update(pmap_kernel());
-       pvpage = (struct pv_page *) pv_cachedva;
-       pv_cachedva = 0;
+       s = splvm();   /* must protect kmem_map with splvm! */
+       pvpage = (struct pv_page *)uvm_km_alloc(kmem_map, PAGE_SIZE, 0,
+           UVM_KMF_TRYLOCK|UVM_KMF_NOWAIT|UVM_KMF_WIRED);
+       splx(s);
+       if (pvpage == NULL)
+               return NULL;
+
        return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
 }
 
@@ -1658,51 +1621,23 @@
  *
  * => assume caller is holding the pvalloc_lock and that
  *     there is a page on the pv_unusedpgs list
- * => if we can't get a lock on the kmem_map we try again later
  */
 
 static void
 pmap_free_pvpage()
 {
        int s;
-       struct vm_map *map;
-       struct vm_map_entry *dead_entries;
        struct pv_page *pvp;
 
-       s = splvm(); /* protect kmem_map */
-
        pvp = TAILQ_FIRST(&pv_unusedpgs);
-
-       /*
-        * note: watch out for pv_initpage which is allocated out of
-        * kernel_map rather than kmem_map.
-        */
-
-       if (pvp == pv_initpage)
-               map = kernel_map;
-       else
-               map = kmem_map;
-       if (vm_map_lock_try(map)) {
-
-               /* remove pvp from pv_unusedpgs */
-               TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
-
-               /* unmap the page */
-               dead_entries = NULL;
-               uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
-                   &dead_entries, NULL);
-               vm_map_unlock(map);
-
-               if (dead_entries != NULL)
-                       uvm_unmap_detach(dead_entries, 0);
-
-               pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
-       }
-       if (pvp == pv_initpage)
-               /* no more initpage, we've freed it */
-               pv_initpage = NULL;
-
-       splx(s);
+       /* remove pvp from pv_unusedpgs */
+       TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
+
+       s = splvm();
+       uvm_km_free(kmem_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
+       splx(s);
+
+       pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
 }
 
 /*
@@ -2066,7 +2001,7 @@
                 */
                ldt_free(pmap);



Home | Main Index | Thread Index | Old Index