Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/powerpc/ibm4xx Real fix for pmap_procwr(), attempte...



details:   https://anonhg.NetBSD.org/src/rev/856318c7b6b0
branches:  trunk
changeset: 943762:856318c7b6b0
user:      rin <rin%NetBSD.org@localhost>
date:      Thu Sep 10 04:31:55 2020 +0000

description:
Real fix for pmap_procwr(), attempted in revs 1.85 and 1.87:
http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/arch/powerpc/ibm4xx/pmap.c#rev1.85
http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/arch/powerpc/ibm4xx/pmap.c#rev1.87

ibm4xx has VIPT icache and operations in pmap_procwr() should be done with
DMMU enabled (write back dcache into memory and invalidate icache).

When p == curproc, this is trivial. However, p != curproc needs a special
care; we cannot rely upon TLB miss handler in user context. Therefore,
extract pa and operate against it.

Note that va below VM_MIN_KERNEL_ADDRESS (== 2GB at the moment) is reserved
for direct mapping.

Tested by gdb with WIP software single stepping for ibm4xx.

diffstat:

 sys/arch/powerpc/ibm4xx/pmap.c |  100 ++++++++++++++++++++++++++--------------
 1 files changed, 65 insertions(+), 35 deletions(-)

diffs (124 lines):

diff -r 21b2679693b9 -r 856318c7b6b0 sys/arch/powerpc/ibm4xx/pmap.c
--- a/sys/arch/powerpc/ibm4xx/pmap.c    Thu Sep 10 03:32:46 2020 +0000
+++ b/sys/arch/powerpc/ibm4xx/pmap.c    Thu Sep 10 04:31:55 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.93 2020/09/10 03:32:46 rin Exp $    */
+/*     $NetBSD: pmap.c,v 1.94 2020/09/10 04:31:55 rin Exp $    */
 
 /*
  * Copyright 2001 Wasabi Systems, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.93 2020/09/10 03:32:46 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.94 2020/09/10 04:31:55 rin Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -1160,42 +1160,72 @@
 void
 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
 {
-       struct pmap *pm = p->p_vmspace->vm_map.pmap;
-       int msr, ctx, opid, step;
+
+       if (__predict_true(p == curproc)) {
+               struct pmap *pm = p->p_vmspace->vm_map.pmap;
+               int msr, ctx, opid;
+
+               /*
+                * Take it easy! TLB miss handler takes care of us.
+                */
 
-       step = CACHELINESIZE;
+               /*
+                * Need to turn off IMMU and switch to user context.
+                * (icbi uses DMMU).
+                */
+
+               if (!(ctx = pm->pm_ctx)) {
+                       /* No context -- assign it one */
+                       ctx_alloc(pm);
+                       ctx = pm->pm_ctx;
+               }
 
-       /*
-        * Need to turn off IMMU and switch to user context.
-        * (icbi uses DMMU).
-        */
-       if (!(ctx = pm->pm_ctx)) {
-               /* No context -- assign it one */
-               ctx_alloc(pm);
-               ctx = pm->pm_ctx;
+               __asm volatile(
+                       "mfmsr %0;"
+                       "li %1,0x20;"           /* Turn off IMMU */
+                       "andc %1,%0,%1;"
+                       "ori %1,%1,0x10;"       /* Turn on DMMU for sure */
+                       "mtmsr %1;"
+                       "isync;"
+                       "mfpid %1;"
+                       "mtpid %2;"
+                       "isync;"
+               "1:"
+                       "dcbst 0,%3;"
+                       "icbi 0,%3;"
+                       "add %3,%3,%5;"
+                       "sub. %4,%4,%5;"
+                       "bge 1b;"
+                       "sync;"
+                       "mtpid %1;"
+                       "mtmsr %0;"
+                       "isync;"
+                       : "=&r" (msr), "=&r" (opid)
+                       : "r" (ctx), "r" (va), "r" (len), "r" (CACHELINESIZE));
+       } else {
+               struct pmap *pm = p->p_vmspace->vm_map.pmap;
+               paddr_t pa;
+               vaddr_t tva, eva;
+               int tlen;
+
+               /*
+                * For p != curproc, we cannot rely upon TLB miss handler in
+                * user context. Therefore, extract pa and operate againt it.
+                *
+                * Note that va below VM_MIN_KERNEL_ADDRESS is reserved for
+                * direct mapping.
+                */
+
+               for (tva = va; len > 0; tva = eva, len -= tlen) {
+                       eva = uimin(tva + len, trunc_page(tva + PAGE_SIZE));
+                       tlen = eva - tva;
+                       if (!pmap_extract(pm, tva, &pa)) {
+                               /* XXX should be already unmapped */
+                               continue;
+                       }
+                       __syncicache((void *)pa, tlen);
+               }
        }
-       __asm volatile(
-               "mfmsr %0;"
-               "li %1, %7;"
-               "andc %1,%0,%1;"
-               "mtmsr %1;"
-               "isync;"
-               "mfpid %1;"
-               "mtpid %2;"
-               "isync;"
-               "1:"
-               "dcbst 0,%3;"
-               "icbi 0,%3;"
-               "add %3,%3,%5;"
-               "addc. %4,%4,%6;"
-               "bge 1b;"
-               "sync;"
-               "mtpid %1;"
-               "mtmsr %0;"
-               "isync;"
-               : "=&r" (msr), "=&r" (opid)
-               : "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step),
-                 "K" (PSL_IR | PSL_DR));
 }
 
 static inline void



Home | Main Index | Thread Index | Old Index