Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm Minor clean up.



details:   https://anonhg.NetBSD.org/src/rev/7383581e80a9
branches:  trunk
changeset: 758618:7383581e80a9
user:      uebayasi <uebayasi%NetBSD.org@localhost>
date:      Thu Nov 11 14:50:54 2010 +0000

description:
Minor clean up.

diffstat:

 sys/uvm/uvm_page.c |  91 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 48 insertions(+), 43 deletions(-)

diffs (252 lines):

diff -r 996544cb00ca -r 7383581e80a9 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c        Thu Nov 11 14:48:13 2010 +0000
+++ b/sys/uvm/uvm_page.c        Thu Nov 11 14:50:54 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.c,v 1.157 2010/11/06 15:42:43 uebayasi Exp $  */
+/*     $NetBSD: uvm_page.c,v 1.158 2010/11/11 14:50:54 uebayasi Exp $  */
 
 /*
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.157 2010/11/06 15:42:43 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.158 2010/11/11 14:50:54 uebayasi Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -127,6 +127,7 @@
 
 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];  /* XXXCDC: uvm.physmem */
 int vm_nphysseg = 0;                           /* XXXCDC: uvm.nphysseg */
+#define        vm_nphysmem     vm_nphysseg
 
 /*
  * Some supported CPUs in a given architecture don't support all
@@ -367,6 +368,7 @@
        psize_t freepages, pagecount, bucketcount, n;
        struct pgflbucket *bucketarray, *cpuarray;
        struct vm_page *pagearray;
+       struct vm_physseg *seg;
        int lcv;
        u_int i;
        paddr_t paddr;
@@ -398,7 +400,7 @@
         * now is to allocate vm_page structures for this memory.
         */
 
-       if (vm_nphysseg == 0)
+       if (vm_nphysmem == 0)
                panic("uvm_page_bootstrap: no memory pre-allocated");
 
        /*
@@ -410,8 +412,10 @@
         */
 
        freepages = 0;
-       for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
-               freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
+       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
+               seg = VM_PHYSMEM_PTR(lcv);
+               freepages += (seg->end - seg->start);
+       }
 
        /*
         * Let MD code initialize the number of colors, or default
@@ -455,27 +459,28 @@
         * init the vm_page structures and put them in the correct place.
         */
 
-       for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-               n = vm_physmem[lcv].end - vm_physmem[lcv].start;
+       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
+               seg = VM_PHYSMEM_PTR(lcv);
+               n = seg->end - seg->start;
 
                /* set up page array pointers */
-               vm_physmem[lcv].pgs = pagearray;
+               seg->pgs = pagearray;
                pagearray += n;
                pagecount -= n;
-               vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
+               seg->lastpg = seg->pgs + (n - 1);
 
                /* init and free vm_pages (we've already zeroed them) */
-               paddr = ctob(vm_physmem[lcv].start);
+               paddr = ctob(seg->start);
                for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
-                       vm_physmem[lcv].pgs[i].phys_addr = paddr;
+                       seg->pgs[i].phys_addr = paddr;
 #ifdef __HAVE_VM_PAGE_MD
-                       VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
+                       VM_MDPAGE_INIT(&seg->pgs[i]);
 #endif
-                       if (atop(paddr) >= vm_physmem[lcv].avail_start &&
-                           atop(paddr) <= vm_physmem[lcv].avail_end) {
+                       if (atop(paddr) >= seg->avail_start &&
+                           atop(paddr) <= seg->avail_end) {
                                uvmexp.npages++;
                                /* add page to free pool */
-                               uvm_pagefree(&vm_physmem[lcv].pgs[i]);
+                               uvm_pagefree(&seg->pgs[i]);
                        }
                }
        }
@@ -653,9 +658,9 @@
 
        /* pass 1: try allocating from a matching end */
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-       for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+       for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
 #else
-       for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
 #endif
        {
 
@@ -674,12 +679,12 @@
                        /* nothing left?   nuke it */
                        if (vm_physmem[lcv].avail_start ==
                            vm_physmem[lcv].end) {
-                               if (vm_nphysseg == 1)
+                               if (vm_nphysmem == 1)
                                    panic("uvm_page_physget: out of memory!");
-                               vm_nphysseg--;
-                               for (x = lcv ; x < vm_nphysseg ; x++)
+                               vm_nphysmem--;
+                               for (x = lcv ; x < vm_nphysmem ; x++)
                                        /* structure copy */
-                                       vm_physmem[x] = vm_physmem[x+1];
+                                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
                        }
                        return (true);
                }
@@ -693,12 +698,12 @@
                        /* nothing left?   nuke it */
                        if (vm_physmem[lcv].avail_end ==
                            vm_physmem[lcv].start) {
-                               if (vm_nphysseg == 1)
+                               if (vm_nphysmem == 1)
                                    panic("uvm_page_physget: out of memory!");
-                               vm_nphysseg--;
-                               for (x = lcv ; x < vm_nphysseg ; x++)
+                               vm_nphysmem--;
+                               for (x = lcv ; x < vm_nphysmem ; x++)
                                        /* structure copy */
-                                       vm_physmem[x] = vm_physmem[x+1];
+                                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
                        }
                        return (true);
                }
@@ -706,9 +711,9 @@
 
        /* pass2: forget about matching ends, just allocate something */
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-       for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+       for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
 #else
-       for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
 #endif
        {
 
@@ -723,12 +728,12 @@
 
                /* nothing left?   nuke it */
                if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
-                       if (vm_nphysseg == 1)
+                       if (vm_nphysmem == 1)
                                panic("uvm_page_physget: out of memory!");
-                       vm_nphysseg--;
-                       for (x = lcv ; x < vm_nphysseg ; x++)
+                       vm_nphysmem--;
+                       for (x = lcv ; x < vm_nphysmem ; x++)
                                /* structure copy */
-                               vm_physmem[x] = vm_physmem[x+1];
+                               VM_PHYSMEM_PTR_SWAP(x, x + 1);
                }
                return (true);
        }
@@ -778,7 +783,7 @@
         * do we have room?
         */
 
-       if (vm_nphysseg == VM_PHYSSEG_MAX) {
+       if (vm_nphysmem == VM_PHYSSEG_MAX) {
                printf("uvm_page_physload: unable to load physical memory "
                    "segment\n");
                printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
@@ -792,11 +797,11 @@
         * called yet, so malloc is not available).
         */
 
-       for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
                if (vm_physmem[lcv].pgs)
                        break;
        }
-       preload = (lcv == vm_nphysseg);
+       preload = (lcv == vm_nphysmem);
 
        /*
         * if VM is already running, attempt to malloc() vm_page structures
@@ -815,33 +820,33 @@
 
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
        /* random: put it at the end (easy!) */
-       ps = &vm_physmem[vm_nphysseg];
+       ps = &vm_physmem[vm_nphysmem];
 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
        {
                int x;
                /* sort by address for binary search */
-               for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+               for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
                        if (start < vm_physmem[lcv].start)
                                break;
                ps = &vm_physmem[lcv];
                /* move back other entries, if necessary ... */
-               for (x = vm_nphysseg ; x > lcv ; x--)
+               for (x = vm_nphysmem ; x > lcv ; x--)
                        /* structure copy */
-                       vm_physmem[x] = vm_physmem[x - 1];
+                       VM_PHYSMEM_PTR_SWAP(x, x - 1);
        }
 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
        {
                int x;
                /* sort by largest segment first */
-               for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+               for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
                        if ((end - start) >
                            (vm_physmem[lcv].end - vm_physmem[lcv].start))
                                break;
                ps = &vm_physmem[lcv];
                /* move back other entries, if necessary ... */
-               for (x = vm_nphysseg ; x > lcv ; x--)
+               for (x = vm_nphysmem ; x > lcv ; x--)
                        /* structure copy */
-                       vm_physmem[x] = vm_physmem[x - 1];
+                       VM_PHYSMEM_PTR_SWAP(x, x - 1);
        }
 #else
        panic("uvm_page_physload: unknown physseg strategy selected!");
@@ -858,7 +863,7 @@
                ps->lastpg = pgs + npages - 1;
        }
        ps->free_list = free_list;
-       vm_nphysseg++;
+       vm_nphysmem++;
 
        if (!preload) {
                uvmpdpol_reinit();
@@ -1980,7 +1985,7 @@
            " OWNER"
 #endif
            "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
-       for (i = 0; i < vm_nphysseg; i++) {
+       for (i = 0; i < vm_nphysmem; i++) {
                for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
                        (*pr)("%18p %04x %04x %18p %18p",
                            pg, pg->flags, pg->pqflags, pg->uobject,



Home | Main Index | Thread Index | Old Index