Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/sommerfeld_i386mp_1]: src/sys/arch/i386 Clean up the TLB invalidation lo...



details:   https://anonhg.NetBSD.org/src/rev/9e5f0728455d
branches:  sommerfeld_i386mp_1
changeset: 482340:9e5f0728455d
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed Jan 03 16:55:46 2001 +0000

description:
Clean up the TLB invalidation logic.  Remove the last vestiges of
the old "remove record" stuff, and just use the shootdown path, even
for uniprocessor kernels.  Change the shootdown path to defer actually
peforming work (or sending the IPI to signal other processors to begin
work) until explicitly triggered by a call to pmap_tlb_shootnow().

Also remove all of the PTP and PV entry stealing code -- it's complicated,
not often called, and not really useful anymore now that UVM itself can
handle pmap_enter() failures.

diffstat:

 sys/arch/i386/i386/bus_machdep.c |    22 +-
 sys/arch/i386/i386/pmap.c        |  1204 +++++--------------------------------
 sys/arch/i386/i386/vm_machdep.c  |    34 +-
 sys/arch/i386/include/pmap.h     |    35 +-
 4 files changed, 183 insertions(+), 1112 deletions(-)

diffs (truncated from 1912 to 300 lines):

diff -r 2b260665e73e -r 9e5f0728455d sys/arch/i386/i386/bus_machdep.c
--- a/sys/arch/i386/i386/bus_machdep.c  Wed Jan 03 10:09:00 2001 +0000
+++ b/sys/arch/i386/i386/bus_machdep.c  Wed Jan 03 16:55:46 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: bus_machdep.c,v 1.1.2.3 2000/11/18 22:56:26 sommerfeld Exp $   */
+/*     $NetBSD: bus_machdep.c,v 1.1.2.4 2001/01/03 16:55:46 thorpej Exp $      */
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -275,10 +275,7 @@
        u_long pa, endpa;
        vaddr_t va;
        pt_entry_t *pte;
-#ifdef MULTIPROCESSOR
-       pt_entry_t opte;
-#endif
-       
+       int32_t cpumask = 0;
 
        pa = i386_trunc_page(bpa);
        endpa = i386_round_page(bpa + size);
@@ -302,6 +299,10 @@
                 * the mainboard has wired up device space non-cacheable
                 * on those machines.
                 *
+                * Note that it's not necessary to use atomic ops to
+                * fiddle with the PTE here, because we don't care
+                * about mod/ref information.
+                *
                 * XXX should hand this bit to pmap_kenter_pa to
                 * save the extra invalidate!
                 *
@@ -309,19 +310,16 @@
                 */
                if (cpu_class != CPUCLASS_386) {
                        pte = kvtopte(va);
-#ifdef MULTIPROCESSOR
-                       opte = *pte;
-#endif
                        if (cacheable)
                                *pte &= ~PG_N;
                        else
                                *pte |= PG_N;
-                       pmap_update_pg(va);
-#ifdef MULTIPROCESSOR
-                       pmap_tlb_shootdown(pmap_kernel(), va, opte);
-#endif
+                       pmap_tlb_shootdown(pmap_kernel(), va, *pte,
+                           &cpumask);
                }
        }
+
+       pmap_tlb_shootnow(cpumask);
  
        return 0;
 }
diff -r 2b260665e73e -r 9e5f0728455d sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Wed Jan 03 10:09:00 2001 +0000
+++ b/sys/arch/i386/i386/pmap.c Wed Jan 03 16:55:46 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.83.2.19 2001/01/02 06:58:08 thorpej Exp $   */
+/*     $NetBSD: pmap.c,v 1.83.2.20 2001/01/03 16:55:46 thorpej Exp $   */
 
 /*
  *
@@ -152,16 +152,10 @@
  * is a void function.
  *
  * [B] new page tables pages (PTP)
- *     - plan 1: call uvm_pagealloc()
+ *     - call uvm_pagealloc()
  *             => success: zero page, add to pm_pdir
- *             => failure: we are out of free vm_pages
- *     - plan 2: using a linked LIST of active pmaps we attempt
- *     to "steal" a PTP from another process.   we lock
- *     the target pmap with simple_lock_try so that if it is
- *     busy we do not block.
- *             => success: remove old mappings, zero, add to pm_pdir
- *             => failure: highly unlikely
- *     - plan 3: panic
+ *             => failure: we are out of free vm_pages, let pmap_enter()
+ *                tell UVM about it.
  *
  * note: for kernel PTPs, we start with NKPTP of them.   as we map
  * kernel memory (at uvm_map time) we check to see if we've grown
@@ -184,14 +178,7 @@
  *             => success: map it in, free the pv_entry's, DONE!
  *             => failure: kmem_object locked, no free vm_pages, etc.
  *                     save VA for later call to [a], go to plan 3.
- *     - plan 3: using the pv_entry/pv_head lists find a pv_entry
- *             structure that is part of a non-kernel lockable pmap
- *             and "steal" that pv_entry by removing the mapping
- *             and reusing that pv_entry.
- *             => success: done
- *             => failure: highly unlikely: unable to lock and steal
- *                     pv_entry
- *     - plan 4: we panic.
+ *     If we fail, we simply let pmap_enter() tell UVM about it.
  */
 
 /*
@@ -305,6 +292,8 @@
        int pq_flushu;          /* pending flush user */
 } pmap_tlb_shootdown_q[I386_MAXPROCS];
 
+#define        PMAP_TLB_MAXJOBS        16
+
 struct pool pmap_tlb_shootdown_job_pool;
 int pj_nentries, pj_nbytes;
 void *pj_page;
@@ -315,8 +304,6 @@
 void   pmap_tlb_shootdown_job_put __P((struct pmap_tlb_shootdown_q *,
            struct pmap_tlb_shootdown_job *));
 
-void pmap_tlb_shootnow __P((void));
-
 /*
  * global data structures
  */
@@ -391,7 +378,6 @@
  */
 
 static struct pmap_head pmaps;
-static struct pmap *pmaps_hand = NULL; /* used by pmap_steal_ptp */
 
 /*
  * pool that pmap structures are allocated from
@@ -438,7 +424,7 @@
  */
 
 static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
-static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, int, boolean_t));
+static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, int));
 static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
 #define ALLOCPV_NEED   0       /* need PV now */
 #define ALLOCPV_TRY    1       /* just try to allocate, don't steal */
@@ -451,20 +437,18 @@
 static void             pmap_free_pvs __P((struct pmap *, struct pv_entry *));
 static void             pmap_free_pv_doit __P((struct pv_entry *));
 static void             pmap_free_pvpage __P((void));
-static struct vm_page  *pmap_get_ptp __P((struct pmap *, int, boolean_t));
+static struct vm_page  *pmap_get_ptp __P((struct pmap *, int));
 static boolean_t        pmap_is_curpmap __P((struct pmap *));
 static boolean_t        pmap_is_active __P((struct pmap *, int));
 static pt_entry_t      *pmap_map_ptes __P((struct pmap *));
 static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
                                             vaddr_t));
 static boolean_t        pmap_remove_pte __P((struct pmap *, struct vm_page *,
-                                             pt_entry_t *, vaddr_t));
+                                             pt_entry_t *, vaddr_t,
+                                             int32_t *));
 static void             pmap_remove_ptes __P((struct pmap *,
-                                              void *,
                                               struct vm_page *, vaddr_t,
-                                              vaddr_t, vaddr_t));
-static struct vm_page  *pmap_steal_ptp __P((struct uvm_object *,
-                                            vaddr_t));
+                                              vaddr_t, vaddr_t, int32_t *));
 static vaddr_t          pmap_tmpmap_pa __P((paddr_t));
 static pt_entry_t      *pmap_tmpmap_pvepte __P((struct pv_entry *));
 static void             pmap_tmpunmap_pa __P((void));
@@ -476,21 +460,11 @@
                                         struct pmap_transfer_location *,
                                         int, boolean_t));
 #endif
-static boolean_t        pmap_try_steal_pv __P((struct pv_head *,
-                                               struct pv_entry *,
-                                               struct pv_entry *));
 static void            pmap_unmap_ptes __P((struct pmap *));
 
 void                   pmap_pinit __P((pmap_t));
 void                   pmap_release __P((pmap_t));
 
-static void            pmap_defer_flush __P((void *prr,
-                                        struct pmap *pmap,
-                                        vaddr_t va, pt_entry_t));
-
-static void            pmap_flush_deferred __P((void *prr,
-                                        struct pmap *pmap));
-
 /*
  * p m a p   i n l i n e   h e l p e r   f u n c t i o n s
  */
@@ -621,6 +595,8 @@
         * Flush the APTE mapping from all other CPUs that
         * are using the pmap we are using (who's APTE space
         * is the one we've just modified).
+        *
+        * XXXthorpej -- find a way to defer the IPI.
         */
        for (CPU_INFO_FOREACH(cii, ci)) {
                if (ci == self)
@@ -729,16 +705,20 @@
 
        pte = vtopte(va);
 
-       npte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
+       npte = pa | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
             PG_V | pmap_pg_g;
 
        opte = i386_atomic_testset_ul(pte, npte); /* zap! */
 
        if (pmap_valid_entry(opte)) {
+#if defined(MULTIPROCESSOR)
+               int32_t cpumask = 0;
+
+               pmap_tlb_shootdown(pmap_kernel(), va, opte, &cpumask);
+               pmap_tlb_shootnow(cpumask);
+#else
+               /* Don't bother deferring in the single CPU case. */
                pmap_update_pg(va);
-#ifdef MULTIPROCESSOR
-               /* XXX only strictly needed if opte ">" pte */
-               pmap_tlb_shootdown(pmap_kernel(), va, opte);
 #endif
        }
 }
@@ -760,6 +740,7 @@
        vsize_t len;
 {
        pt_entry_t *pte, opte;
+       int32_t cpumask = 0;
 
        len >>= PAGE_SHIFT;
        for ( /* null */ ; len ; len--, va += NBPG) {
@@ -770,22 +751,9 @@
                        panic("pmap_kremove: PG_PVLIST mapping for 0x%lx\n",
                              va);
 #endif
-
-#if defined(I386_CPU)
-               if (cpu_class != CPUCLASS_386)
-#endif
-               {
-                       pmap_update_pg(va);
-#ifdef MULTIPROCESSOR
-                       pmap_tlb_shootdown(pmap_kernel(), va, opte);
-#endif
-               }
-               
+               pmap_tlb_shootdown(pmap_kernel(), va, opte, &cpumask);
        }
-#if defined(I386_CPU)
-       if (cpu_class == CPUCLASS_386)
-               tlbflush();
-#endif
+       pmap_tlb_shootnow(cpumask);
 }
 
 /*
@@ -799,36 +767,19 @@
        int npgs;
 {
        pt_entry_t *pte, opte;
-       int lcv;
+       int32_t cpumask = 0;
        vaddr_t tva;
-#if defined(I386_CPU)
-       boolean_t need_update = FALSE;
-#endif
+       int lcv;
 
        for (lcv = 0 ; lcv < npgs ; lcv++) {
                tva = va + lcv * NBPG;
                pte = vtopte(tva);
                opte = *pte;
                *pte = VM_PAGE_TO_PHYS(pgs[lcv]) | PG_RW | PG_V | pmap_pg_g;
-#if defined(I386_CPU)
-               if (cpu_class == CPUCLASS_386) {
-                       if (pmap_valid_entry(opte))
-                               need_update = TRUE;
-                       continue;
-               }
-#endif
-               if (pmap_valid_entry(opte)) {
-                       pmap_update_pg(tva);
-#ifdef MULTIPROCESSOR
-                       /* XXX only strictly needed if opte ">" pte */
-                       pmap_tlb_shootdown(pmap_kernel(), va, opte);
-#endif
-               }
+               if (pmap_valid_entry(opte))
+                       pmap_tlb_shootdown(pmap_kernel(), va, opte, &cpumask);
        }
-#if defined(I386_CPU)
-       if (need_update && cpu_class == CPUCLASS_386)
-               tlbflush();
-#endif
+       pmap_tlb_shootnow(cpumask);
 }
 
 /*
@@ -1286,8 +1237,8 @@
 {
        struct vm_page *pg;
        struct pv_page *pvpage;
-       int lcv, idx, npg, s;
-       struct pv_entry *pv, *cpv, *prevpv;
+       struct pv_entry *pv;
+       int s;



Home | Main Index | Thread Index | Old Index