Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Cleanup comments in pmap_tlb.c. Attach tlb evcnts.



details:   https://anonhg.NetBSD.org/src/rev/ecbdbdade8cb
branches:  trunk
changeset: 766455:ecbdbdade8cb
user:      matt <matt%NetBSD.org@localhost>
date:      Thu Jun 23 05:42:27 2011 +0000

description:
Cleanup comments in pmap_tlb.c.  Attach tlb evcnts.
eliminate ti_mask
Cleanup some of the MP code.  Conditionalize shootdown code.

diffstat:

 sys/arch/powerpc/booke/booke_machdep.c |    5 +
 sys/arch/powerpc/booke/e500_tlb.c      |    6 +-
 sys/common/pmap/tlb/pmap.h             |    5 +-
 sys/common/pmap/tlb/pmap_tlb.c         |  233 +++++++++++++++++++++-----------
 4 files changed, 160 insertions(+), 89 deletions(-)

diffs (truncated from 622 to 300 lines):

diff -r b2579d3087e9 -r ecbdbdade8cb sys/arch/powerpc/booke/booke_machdep.c
--- a/sys/arch/powerpc/booke/booke_machdep.c    Thu Jun 23 04:39:24 2011 +0000
+++ b/sys/arch/powerpc/booke/booke_machdep.c    Thu Jun 23 05:42:27 2011 +0000
@@ -177,6 +177,11 @@
        printf("avail memory = %s\n", pbuf);
 
        /*
+        * Register the tlb's evcnts
+        */
+       pmap_tlb_info_evcnt_attach(curcpu()->ci_tlb_info);
+
+       /*
         * Set up the board properties database.
         */
        board_info_init();
diff -r b2579d3087e9 -r ecbdbdade8cb sys/arch/powerpc/booke/e500_tlb.c
--- a/sys/arch/powerpc/booke/e500_tlb.c Thu Jun 23 04:39:24 2011 +0000
+++ b/sys/arch/powerpc/booke/e500_tlb.c Thu Jun 23 05:42:27 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: e500_tlb.c,v 1.4 2011/06/23 01:27:20 matt Exp $        */
+/*     $NetBSD: e500_tlb.c,v 1.5 2011/06/23 05:42:27 matt Exp $        */
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -36,7 +36,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: e500_tlb.c,v 1.4 2011/06/23 01:27:20 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: e500_tlb.c,v 1.5 2011/06/23 05:42:27 matt Exp $");
 
 #include <sys/param.h>
 
@@ -436,7 +436,7 @@
                         */
                        if ((mas1 & (MAS1_V|MAS1_TS)) == (MAS1_V|MAS1_TS)
                            && asid_lo <= (mas1 & MAS1_TID)
-                           && (mas1 & MAS1_TID) < asid_hi) {
+                           && (mas1 & MAS1_TID) <= asid_hi) {
                                mtspr(SPR_MAS1, mas1 ^ MAS1_V);
 #if 0
                                printf("%s[%zu,%zu]->[%x]\n",
diff -r b2579d3087e9 -r ecbdbdade8cb sys/common/pmap/tlb/pmap.h
--- a/sys/common/pmap/tlb/pmap.h        Thu Jun 23 04:39:24 2011 +0000
+++ b/sys/common/pmap/tlb/pmap.h        Thu Jun 23 05:42:27 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.6 2011/06/23 02:33:44 matt Exp $    */
+/*     $NetBSD: pmap.h,v 1.7 2011/06/23 05:42:27 matt Exp $    */
 
 /*
  * Copyright (c) 1992, 1993
@@ -161,7 +161,6 @@
 #define        tlbinfo_noasids_p(ti)   ((ti)->ti_asids_free == 0)
        kmutex_t *ti_lock;
        u_int ti_wired;                 /* # of wired TLB entries */
-       uint32_t ti_asid_mask;
        uint32_t ti_asid_max;
        LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
 #ifdef MULTIPROCESSOR
@@ -180,6 +179,7 @@
 #else
 #define tlbinfo_index(ti)      (0)
 #endif
+       struct evcnt ti_evcnt_asid_reinits;
        u_long ti_asid_bitmap[256 / (sizeof(u_long) * 8)];
 };
 
@@ -238,6 +238,7 @@
 void   pmap_tlb_syncicache(vaddr_t, uint32_t);
 #endif
 void   pmap_tlb_info_init(struct pmap_tlb_info *);
+void   pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
 void   pmap_tlb_asid_acquire(pmap_t, struct lwp *l);
 void   pmap_tlb_asid_deactivate(pmap_t);
 void   pmap_tlb_asid_release_all(pmap_t);
diff -r b2579d3087e9 -r ecbdbdade8cb sys/common/pmap/tlb/pmap_tlb.c
--- a/sys/common/pmap/tlb/pmap_tlb.c    Thu Jun 23 04:39:24 2011 +0000
+++ b/sys/common/pmap/tlb/pmap_tlb.c    Thu Jun 23 05:42:27 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_tlb.c,v 1.6 2011/06/23 02:33:44 matt Exp $        */
+/*     $NetBSD: pmap_tlb.c,v 1.7 2011/06/23 05:42:27 matt Exp $        */
 
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.6 2011/06/23 02:33:44 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.7 2011/06/23 05:42:27 matt Exp $");
 
 /*
  * Manages address spaces in a TLB.
@@ -79,16 +79,16 @@
  *
  * When a change to the local TLB may require a change in the TLB's of other
  * CPUs, we try to avoid sending an IPI if at all possible.  For instance, if
- * are updating a PTE and that PTE previously was invalid and therefore
- * couldn't support an active mapping, there's no need for an IPI since can be
- * no TLB entry to invalidate.  The other case is when we change a PTE to be
- * modified we just update the local TLB.  If another TLB has a stale entry,
- * a TLB MOD exception will be raised and that will cause the local TLB to be
- * updated.
+ * we are updating a PTE and that PTE previously was invalid and therefore
+ * couldn't support an active mapping, there's no need for an IPI since there
+ * can't be a TLB entry to invalidate.  The other case is when we change a PTE
+ * to be modified we just update the local TLB.  If another TLB has a stale
+ * entry, a TLB MOD exception will be raised and that will cause the local TLB
+ * to be updated.
  *
  * We never need to update a non-local TLB if the pmap doesn't have a valid
  * ASID for that TLB.  If it does have a valid ASID but isn't current "onproc"
- * we simply reset its ASID for that TLB and then time it goes "onproc" it
+ * we simply reset its ASID for that TLB and then when it goes "onproc" it
  * will allocate a new ASID and any existing TLB entries will be orphaned.
  * Only in the case that pmap has an "onproc" ASID do we actually have to send
  * an IPI.
@@ -110,12 +110,12 @@
  *     0) nothing,
  *     1) if that ASID is still "onproc", we invalidate the TLB entries for
  *        that single ASID.  If not, just reset the pmap's ASID to invalidate
- *        and let it allocated the next time it goes "onproc",
+ *        and let it allocate a new ASID the next time it goes "onproc",
  *     2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
  *        invalidate all non-wired non-global TLB entries,
  *     3) we invalidate all of the non-wired global TLB entries,
  *     4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
- *        invalidate all non-wried TLB entries.
+ *        invalidate all non-wired TLB entries.
  *
  * As you can see, shootdowns are not concerned with addresses, just address
  * spaces.  Since the number of TLB entries is usually quite small, this avoids
@@ -135,12 +135,6 @@
 #include <uvm/uvm.h>
 
 static kmutex_t pmap_tlb0_mutex __cacheline_aligned;
-#ifdef MULTIPROCESSOR
-static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
-       [0] = &pmap_tlb_info,
-};
-static u_int pmap_ntlbs = 1;
-#endif
 
 #define        IFCONSTANT(x)   (__builtin_constant_p((x)) ? (x) : 0)
 
@@ -157,7 +151,7 @@
 #endif
        .ti_lock = &pmap_tlb0_mutex,
        .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR)
        .ti_cpu_mask = 1,
        .ti_tlbinvop = TLBINV_NOBODY,
 #endif
@@ -165,6 +159,13 @@
 
 #undef IFCONSTANT
 
+#if defined(MULTIPROCESSOR)
+static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
+       [0] = &pmap_tlb0_info,
+};
+static u_int pmap_ntlbs = 1;
+#endif
+
 #define        __BITMAP_SET(bm, n) \
        ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
 #define        __BITMAP_CLR(bm, n) \
@@ -182,21 +183,15 @@
 {
 #ifdef DIAGNOSTIC
        struct pmap_asid_info *pai;
-//     printf("%s: ", __func__);
        LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
-//             printf(" %p=%u", pai, pai->pai_asid);
                KASSERT(pai != NULL);
-#if 1
                KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
                KASSERT(pai->pai_asid > KERNEL_PID);
                KASSERT(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid));
-#endif
        }
-//     printf("\n");
 #endif
 }
 
-
 static inline void
 pmap_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
        struct pmap *pm)
@@ -205,7 +200,7 @@
         * We must have an ASID but it must not be onproc (on a processor).
         */
        KASSERT(pai->pai_asid > KERNEL_PID);
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR)
        KASSERT((pm->pm_onproc & ti->ti_cpu_mask) == 0);
 #endif
        LIST_REMOVE(pai, pai_link);
@@ -222,7 +217,7 @@
         */
        pai->pai_asid = 0;
 
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR)
        /*
         * The bits in pm_active belonging to this TLB can only be changed
         * while this TLB's lock is held.
@@ -232,9 +227,37 @@
 }
 
 void
+pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
+{
+#if defined(MULTIPROCESSOR)
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
+           EVCNT_TYPE_MISC, NULL,
+           ti->ti_name, "icache syncs desired");
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
+           EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+           ti->ti_name, "icache sync asts");
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
+           EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
+           ti->ti_name, "icache full syncs");
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
+           EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
+           ti->ti_name, "icache pages synced");
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
+           EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+           ti->ti_name, "icache dup pages skipped");
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
+           EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+           ti->ti_name, "icache pages deferred");
+#endif /* MULTIPROCESSOR */
+       evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
+           EVCNT_TYPE_MISC, NULL,
+           ti->ti_name, "asid pool reinit");
+}
+
+void
 pmap_tlb_info_init(struct pmap_tlb_info *ti)
 {
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR)
        if (ti != &pmap_tlb0_info) {
 
                KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
@@ -243,7 +266,6 @@
                ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
                ti->ti_asid_hint = KERNEL_PID + 1;
                ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
-               ti->ti_asid_mask = pmap_tlbs[0]->ti_asid_mask;
                ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
                ti->ti_tlbinvop = TLBINV_NOBODY,
                ti->ti_victim = NULL;
@@ -251,26 +273,23 @@
                ti->ti_index = pmap_ntlbs++;
                ti->ti_wired = 0;
                pmap_tlbs[ti->ti_index] = ti;
+               snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
+                   ti->ti_index);
+               pmap_tlb_info_evcnt_attach(ti);
                return;
        }
-#endif
+#endif /* MULTIPROCESSOR */
        KASSERT(ti == &pmap_tlb0_info);
        mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
        if (ti->ti_asid_max == 0) {
                ti->ti_asid_max = pmap_md_tlb_asid_max();
                ti->ti_asids_free = ti->ti_asid_max - (KERNEL_PID + 1);
        }
-       /*
-        * Now figure out what mask we need to focus on asid_max.
-        */
-       ti->ti_asid_mask = ~0U >> __builtin_clz(ti->ti_asid_max);
 
        KASSERT(ti->ti_asid_max < sizeof(ti->ti_asid_bitmap)*8);
-       KASSERT(ti->ti_asid_max <= ti->ti_asid_mask);
-       KASSERT(((ti->ti_asid_mask + 1) & ti->ti_asid_mask) == 0);
 }
 
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR)
 void
 pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
 {
@@ -282,9 +301,15 @@
        const __cpuset_t cpu_mask = CPUSET_SINGLE(cpu_index(ci));
        CPUSET_ADDSET(ti->ti_cpu_mask, cpu_mask);
        ci->ci_tlb_info = ti;
-       ci->ci_ksp_tlb_slot = ti->ti_wired++;
+
        /*
-        * Mark the kernel as active and "onproc" for this cpu.
+        * Do any MD tlb info init.
+        */
+       pmap_md_tlb_info_attach(ti, ci);
+
+       /*



Home | Main Index | Thread Index | Old Index