Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/powerpc/ibm4xx Style. No binary changes.



details:   https://anonhg.NetBSD.org/src/rev/2a050600552f
branches:  trunk
changeset: 1023359:2a050600552f
user:      rin <rin%NetBSD.org@localhost>
date:      Sat Sep 04 14:31:04 2021 +0000

description:
Style. No binary changes.

Also, remove old #if-0'ed code block copied from oea (and therefore
will never be enabled).

diffstat:

 sys/arch/powerpc/ibm4xx/pmap.c |  457 +++++++++++++++++++---------------------
 1 files changed, 220 insertions(+), 237 deletions(-)

diffs (truncated from 1234 to 300 lines):

diff -r 18c02a89a3c4 -r 2a050600552f sys/arch/powerpc/ibm4xx/pmap.c
--- a/sys/arch/powerpc/ibm4xx/pmap.c    Sat Sep 04 14:26:32 2021 +0000
+++ b/sys/arch/powerpc/ibm4xx/pmap.c    Sat Sep 04 14:31:04 2021 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.98 2021/04/15 00:00:46 rin Exp $    */
+/*     $NetBSD: pmap.c,v 1.99 2021/09/04 14:31:04 rin Exp $    */
 
 /*
  * Copyright 2001 Wasabi Systems, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.98 2021/04/15 00:00:46 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.99 2021/09/04 14:31:04 rin Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -99,7 +99,7 @@
  * kernmap is an array of PTEs large enough to map in
  * 4GB.  At 16KB/page it is 256K entries or 2MB.
  */
-#define KERNMAP_SIZE   ((0xffffffffU/PAGE_SIZE)+1)
+#define KERNMAP_SIZE   ((0xffffffffU / PAGE_SIZE) + 1)
 void *kernmap;
 
 #define MINCTX         2
@@ -127,11 +127,11 @@
 
 /* Event counters */
 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
-       NULL, "cpu", "tlbmiss");
+    NULL, "cpu", "tlbmiss");
 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
-       NULL, "cpu", "tlbflush");
+    NULL, "cpu", "tlbflush");
 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
-       NULL, "cpu", "tlbenter");
+    NULL, "cpu", "tlbenter");
 EVCNT_ATTACH_STATIC(tlbmiss_ev);
 EVCNT_ATTACH_STATIC(tlbflush_ev);
 EVCNT_ATTACH_STATIC(tlbenter_ev);
@@ -229,8 +229,7 @@
 static inline int
 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
 {
-       int seg = STIDX(va);
-       int ptn = PTIDX(va);
+       int seg = STIDX(va), ptn = PTIDX(va);
        u_int oldpte;
 
        if (!pm->pm_ptbl[seg]) {
@@ -266,8 +265,7 @@
 volatile u_int *
 pte_find(struct pmap *pm, vaddr_t va)
 {
-       int seg = STIDX(va);
-       int ptn = PTIDX(va);
+       int seg = STIDX(va), ptn = PTIDX(va);
 
        if (pm->pm_ptbl[seg])
                return &pm->pm_ptbl[seg][ptn];
@@ -296,9 +294,8 @@
        /*
         * Initialize kernel page table.
         */
-       for (i = 0; i < STSZ; i++) {
+       for (i = 0; i < STSZ; i++)
                pmap_kernel()->pm_ptbl[i] = NULL;
-       }
        ctxbusy[0] = ctxbusy[1] = pmap_kernel();
 
        /*
@@ -313,7 +310,7 @@
        mem_regions(&mem, &avail);
        for (mp = mem; mp->size; mp++) {
                physmem += btoc(mp->size);
-               printf("+%lx,",mp->size);
+               printf("+%lx,", mp->size);
        }
        printf("\n");
        ppc4xx_tlb_init();
@@ -333,7 +330,7 @@
        for (mp = avail; mp->size; mp++) {
                s = mp->start;
                e = mp->start + mp->size;
-               printf("%08x-%08x -> ",s,e);
+               printf("%08x-%08x -> ", s, e);
                /*
                 * Check whether this region holds all of the kernel.
                 */
@@ -366,14 +363,14 @@
                if (e < s)
                        e = s;
                sz = e - s;
-               printf("%08x-%08x = %x\n",s,e,sz);
+               printf("%08x-%08x = %x\n", s, e, sz);
                /*
                 * Check whether some memory is left here.
                 */
                if (sz == 0) {
-               empty:
+ empty:
                        memmove(mp, mp + 1,
-                               (cnt - (mp - avail)) * sizeof *mp);
+                           (cnt - (mp - avail)) * sizeof(*mp));
                        cnt--;
                        mp--;
                        continue;
@@ -415,13 +412,13 @@
        msgbuf_paddr = mp->start + mp->size - sz;
        mp->size -= sz;
        if (mp->size <= 0)
-               memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
+               memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof(*mp));
 #endif
 
        for (mp = avail; mp->size; mp++)
                uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
-                       atop(mp->start), atop(mp->start + mp->size),
-                       VM_FREELIST_DEFAULT);
+                   atop(mp->start), atop(mp->start + mp->size),
+                   VM_FREELIST_DEFAULT);
 
        /*
         * Initialize kernel pmap and hardware.
@@ -468,14 +465,15 @@
        struct pv_entry *pv;
        vsize_t sz;
        vaddr_t addr;
-       int i, s;
-       int bank;
+       int bank, i, s;
        char *attr;
 
        sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
        sz = round_page(sz);
        addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
+
        s = splvm();
+
        pv = pv_table = (struct pv_entry *)addr;
        for (i = npgs; --i >= 0;)
                pv++->pv_pm = NULL;
@@ -484,8 +482,7 @@
 
        pv = pv_table;
        attr = pmap_attrib;
-       for (bank = uvm_physseg_get_first();
-            uvm_physseg_valid_p(bank);
+       for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
             bank = uvm_physseg_get_next(bank)) {
                sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
                uvm_physseg_get_pmseg(bank)->pvent = pv;
@@ -495,11 +492,12 @@
        }
 
        pmap_initialized = 1;
+
        splx(s);
 
        /* Setup a pool for additional pvlist structures */
-       pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL,
-           IPL_VM);
+       pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry",
+           NULL, IPL_VM);
 }
 
 /*
@@ -509,16 +507,8 @@
 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
 {
 
-#if 0
-       /*
-        * Reserve one segment for kernel virtual memory
-        */
-       *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
-       *end = *start + SEGMENT_LENGTH;
-#else
        *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
        *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
-#endif
 }
 
 #ifdef PMAP_GROWKERNEL
@@ -540,27 +530,23 @@
 vaddr_t
 pmap_growkernel(vaddr_t maxkvaddr)
 {
-       int s;
-       int seg;
+       struct pmap *pm = pmap_kernel();
        paddr_t pg;
-       struct pmap *pm = pmap_kernel();
+       int seg, s;
 
        s = splvm();
 
        /* Align with the start of a page table */
-       for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
-            kbreak += PTMAP) {
+       for (kbreak &= ~(PTMAP - 1); kbreak < maxkvaddr; kbreak += PTMAP) {
                seg = STIDX(kbreak);
 
                if (pte_find(pm, kbreak))
                        continue;
 
-               if (uvm.page_init_done) {
+               if (uvm.page_init_done)
                        pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
-               } else {
-                       if (!uvm_page_physget(&pg))
-                               panic("pmap_growkernel: no memory");
-               }
+               else if (!uvm_page_physget(&pg))
+                       panic("pmap_growkernel: no memory");
                if (!pg)
                        panic("pmap_growkernel: no pages");
                pmap_zero_page((paddr_t)pg);
@@ -568,7 +554,9 @@
                /* XXX This is based on all phymem being addressable */
                pm->pm_ptbl[seg] = (u_int *)pg;
        }
+
        splx(s);
+
        return kbreak;
 }
 
@@ -601,10 +589,12 @@
 void
 vm_page_free1(struct vm_page *pg)
 {
+
 #ifdef DIAGNOSTIC
        if (pg->flags != (PG_CLEAN|PG_FAKE)) {
                printf("Freeing invalid page %p\n", pg);
-               printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
+               printf("pa = %llx\n",
+                   (unsigned long long)VM_PAGE_TO_PHYS(pg));
 #ifdef DDB
                Debugger();
 #endif
@@ -626,7 +616,7 @@
        struct pmap *pm;
 
        pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
-       memset(pm, 0, sizeof *pm);
+       memset(pm, 0, sizeof(*pm));
        pm->pm_refs = 1;
        return pm;
 }
@@ -650,9 +640,8 @@
 {
        int i;
 
-       if (--pm->pm_refs > 0) {
+       if (--pm->pm_refs > 0)
                return;
-       }
        KASSERT(pm->pm_stats.resident_count == 0);
        KASSERT(pm->pm_stats.wired_count == 0);
        for (i = 0; i < STSZ; i++)
@@ -694,14 +683,14 @@
 void
 pmap_zero_page(paddr_t pa)
 {
+       int i;
 
 #ifdef PPC_4XX_NOCACHE
        memset((void *)pa, 0, PAGE_SIZE);
 #else
-       int i;
 
        for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
-               __asm volatile ("dcbz 0,%0" :: "r"(pa));
+               __asm volatile ("dcbz 0,%0" : : "r"(pa));
                pa += CACHELINESIZE;
        }
 #endif
@@ -731,6 +720,7 @@
                return 0;
 
        s = splvm();
+
        pv = pa_to_pv(pa);
        if (!pv->pv_pm) {
                /*
@@ -761,7 +751,9 @@
                PV_WIRE(pv);
                pm->pm_stats.wired_count++;
        }
+
        splx(s);
+
        return 1;
 }
 
@@ -784,9 +776,8 @@



Home | Main Index | Thread Index | Old Index