Current-Users archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Some fixes in MI code regarding PAE



Hi,

Jeremy kindly posted [1] different patches to enable PAE, as well as the NX bit, when possible.

I just finished porting Jeremy's patches to current, and I'd like to commit a tiny fraction of it: mainly fixes in UVM code where there is a risk of unwanted (but IMHO harmless) overflows when shifting bits from frame numbers to addresses.

Patch is attached, or available here [2].

It was compile tested for amd64, and compile+boot tested for i386.

Explanations:
- printfs using PRIxADDR instead of hardcoding long types in format
- ptoa() => ctob() + cast to uint64_t (when necessary) for integer values that could be higher than 1M, risking truncation when casted to vaddr_t via ptoa().

Opinions? (I am wondering if the part concerning the overflows should not be pulled-up to NetBSD-4 and NetBSD-5).

The rest (mostly modifications in x86 MD code) will follow in the next few days, after some polishing (still too many "#ifdef's PAE" to my taste in pmap). FWIW, current state is here [3].


[1] http://mail-index.netbsd.org/port-i386/2009/12/20/msg001674.html

[2] http://www.netbsd.org/~jym/pae-mi.diff

[3] http://www.netbsd.org/~jym/pae.diff

--
Jean-Yves Migeon
jeanyves.migeon%free.fr@localhost
Index: dev/isa/i82365_isasubr.c
===================================================================
RCS file: /cvsroot/src/sys/dev/isa/i82365_isasubr.c,v
retrieving revision 1.45
diff -u -u -r1.45 i82365_isasubr.c
--- dev/isa/i82365_isasubr.c    17 Sep 2009 18:14:41 -0000      1.45
+++ dev/isa/i82365_isasubr.c    7 Jan 2010 23:45:15 -0000
@@ -431,17 +431,19 @@
                sc->iosize = 0xbff;
        }
 
-       DPRINTF(("%s: bus_space_alloc range 0x%04lx-0x%04lx (probed)\n",
-           device_xname(&sc->dev), (long) sc->iobase,
-           (long) sc->iobase + sc->iosize));
+       DPRINTF(("%s: bus_space_alloc range "
+           "0x%04" PRIxPADDR "-0x%04" PRIxPADDR " (probed)\n",
+           device_xname(&sc->dev), sc->iobase,
+           sc->iobase + sc->iosize));
 
        if (pcic_isa_alloc_iobase && pcic_isa_alloc_iosize) {
                sc->iobase = pcic_isa_alloc_iobase;
                sc->iosize = pcic_isa_alloc_iosize;
 
-               DPRINTF(("%s: bus_space_alloc range 0x%04lx-0x%04lx "
-                   "(config override)\n", device_xname(&sc->dev),
-                   (long) sc->iobase, (long) sc->iobase + sc->iosize));
+               DPRINTF(("%s: bus_space_alloc range "
+                   "0x%04" PRIxPADDR "-0x%04" PRIxPADDR "(config override)\n",
+                   device_xname(&sc->dev),
+                   sc->iobase, sc->iobase + sc->iosize));
        }
 }
 
Index: dev/isapnp/i82365_isapnp.c
===================================================================
RCS file: /cvsroot/src/sys/dev/isapnp/i82365_isapnp.c,v
retrieving revision 1.30
diff -u -u -r1.30 i82365_isapnp.c
--- dev/isapnp/i82365_isapnp.c  17 Sep 2009 18:14:41 -0000      1.30
+++ dev/isapnp/i82365_isapnp.c  7 Jan 2010 23:45:15 -0000
@@ -1,4 +1,3 @@
-/*     $NetBSD: i82365_isapnp.c,v 1.30 2009/09/17 18:14:41 tsutsui Exp $       
*/
 
 /*
  * Copyright (c) 1998 Bill Sommerfeld.  All rights reserved.
@@ -153,7 +152,7 @@
                printf(": can't alloc mem space\n");
                return;
        }
-       printf(": using iomem 0x%lx iosiz 0x%x", maddr, msize);
+       printf(": using iomem %#" PRIxPADDR " iosiz %#x", maddr, msize);
        sc->membase = maddr;
        sc->subregionmask = (1 << (msize / PCIC_MEM_PAGESIZE)) - 1;
 
Index: dev/pci/agp.c
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/agp.c,v
retrieving revision 1.65
diff -u -u -r1.65 agp.c
--- dev/pci/agp.c       27 Jan 2009 08:39:33 -0000      1.65
+++ dev/pci/agp.c       7 Jan 2010 23:45:16 -0000
@@ -327,7 +327,7 @@
         * Work out an upper bound for agp memory allocation. This
         * uses a heuristic table from the Linux driver.
         */
-       memsize = ptoa(physmem) >> 20;
+       memsize = physmem >> 8; /* total physmem in MB */
        for (i = 0; i < agp_max_size; i++) {
                if (memsize <= agp_max[i][0])
                        break;
Index: kern/kern_proc.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_proc.c,v
retrieving revision 1.159
diff -u -u -r1.159 kern_proc.c
--- kern/kern_proc.c    17 Dec 2009 01:25:10 -0000      1.159
+++ kern/kern_proc.c    7 Jan 2010 23:45:40 -0000
@@ -416,7 +416,7 @@
        limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur =
            maxproc < maxuprc ? maxproc : maxuprc;
 
-       lim = ptoa(uvmexp.free);
+       lim = ctob((uint64_t)uvmexp.free);
        limit0.pl_rlimit[RLIMIT_RSS].rlim_max = lim;
        limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
        limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
Index: uvm/uvm_glue.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_glue.c,v
retrieving revision 1.143
diff -u -u -r1.143 uvm_glue.c
--- uvm/uvm_glue.c      17 Dec 2009 01:25:11 -0000      1.143
+++ uvm/uvm_glue.c      7 Jan 2010 23:54:46 -0000
@@ -407,7 +407,7 @@
        p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
        p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
        p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
-       p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
+       p->p_rlimit[RLIMIT_RSS].rlim_cur = ctob((uint64_t)uvmexp.free);
 }
 
 /*
Index: uvm/uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.152
diff -u -u -r1.152 uvm_page.c
--- uvm/uvm_page.c      7 Nov 2009 07:27:50 -0000       1.152
+++ uvm/uvm_page.c      7 Jan 2010 23:54:46 -0000
@@ -339,7 +339,7 @@
 void
 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
 {
-       vsize_t freepages, pagecount, bucketcount, n;
+       psize_t freepages, pagecount, bucketcount, n;
        struct pgflbucket *bucketarray, *cpuarray;
        struct vm_page *pagearray;
        int lcv;
@@ -439,7 +439,7 @@
                vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
 
                /* init and free vm_pages (we've already zeroed them) */
-               paddr = ptoa(vm_physmem[lcv].start);
+               paddr = ctob(vm_physmem[lcv].start);
                for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
                        vm_physmem[lcv].pgs[i].phys_addr = paddr;
 #ifdef __HAVE_VM_PAGE_MD
@@ -642,7 +642,7 @@
                /* try from front */
                if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
                    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
-                       *paddrp = ptoa(vm_physmem[lcv].avail_start);
+                       *paddrp = ctob(vm_physmem[lcv].avail_start);
                        vm_physmem[lcv].avail_start++;
                        vm_physmem[lcv].start++;
                        /* nothing left?   nuke it */
@@ -661,7 +661,7 @@
                /* try from rear */
                if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
                    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
-                       *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
+                       *paddrp = ctob(vm_physmem[lcv].avail_end - 1);
                        vm_physmem[lcv].avail_end--;
                        vm_physmem[lcv].end--;
                        /* nothing left?   nuke it */
@@ -690,7 +690,7 @@
                if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
                        continue;  /* nope */
 
-               *paddrp = ptoa(vm_physmem[lcv].avail_start);
+               *paddrp = ctob(vm_physmem[lcv].avail_start);
                vm_physmem[lcv].avail_start++;
                /* truncate! */
                vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
@@ -793,7 +793,7 @@
                }
                /* zero data, init phys_addr and free_list, and free pages */
                memset(pgs, 0, sizeof(struct vm_page) * npages);
-               for (lcv = 0, paddr = ptoa(start) ;
+               for (lcv = 0, paddr = ctob(start) ;
                                 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
                        pgs[lcv].phys_addr = paddr;
                        pgs[lcv].free_list = free_list;


Home | Main Index | Thread Index | Old Index