Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm Go back to freeing struct vm_anon one by one. There...



details:   https://anonhg.NetBSD.org/src/rev/2f036f127a47
branches:  trunk
changeset: 746042:2f036f127a47
user:      ad <ad%NetBSD.org@localhost>
date:      Fri Mar 20 19:08:54 2020 +0000

description:
Go back to freeing struct vm_anon one by one.  There may have been an
advantage circa ~2008 but there isn't now.

diffstat:

 sys/uvm/uvm_amap.c  |   69 ++++++++++++---------------------
 sys/uvm/uvm_amap.h  |    7 +-
 sys/uvm/uvm_anon.c  |  105 +++++++++++++++------------------------------------
 sys/uvm/uvm_anon.h  |   12 +----
 sys/uvm/uvm_fault.c |    6 +-
 sys/uvm/uvm_loan.c  |   11 ++--
 sys/uvm/uvm_map.c   |   12 ++---
 7 files changed, 76 insertions(+), 146 deletions(-)

diffs (truncated from 607 to 300 lines):

diff -r 43531c438cdc -r 2f036f127a47 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c        Fri Mar 20 19:06:14 2020 +0000
+++ b/sys/uvm/uvm_amap.c        Fri Mar 20 19:08:54 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_amap.c,v 1.118 2020/03/14 20:23:51 ad Exp $        */
+/*     $NetBSD: uvm_amap.c,v 1.119 2020/03/20 19:08:54 ad Exp $        */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.118 2020/03/14 20:23:51 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.119 2020/03/20 19:08:54 ad Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -358,7 +358,7 @@
        int *newppref, *oldppref;
 #endif
        int i, *newsl, *newbck, *oldsl, *oldbck;
-       struct vm_anon **newover, **oldover, *tofree;
+       struct vm_anon **newover, **oldover;
        const km_flag_t kmflags =
            (flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
 
@@ -386,7 +386,6 @@
                slotadj = slotadd - slotoff;
                slotarea = amap->am_maxslot - slotmapped;
        }
-       tofree = NULL;
 
        /*
         * case 1: we already have enough slots in the map and thus
@@ -399,10 +398,10 @@
 #ifdef UVM_AMAP_PPREF
                        if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
                                amap_pp_adjref(amap, slotoff + slotmapped,
-                                   slotadd, 1, &tofree);
+                                   slotadd, 1);
                        }
 #endif
-                       uvm_anon_freelst(amap, tofree);
+                       amap_unlock(amap);
                        UVMHIST_LOG(maphist,
                            "<- done (case 1f), amap = %#jx, sltneed=%jd",
                            (uintptr_t)amap, slotneed, 0, 0);
@@ -414,11 +413,10 @@
                        entry->aref.ar_pageoff = slotoff;
 #ifdef UVM_AMAP_PPREF
                        if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
-                               amap_pp_adjref(amap, slotoff, slotadd, 1,
-                                   &tofree);
+                               amap_pp_adjref(amap, slotoff, slotadd, 1);
                        }
 #endif
-                       uvm_anon_freelst(amap, tofree);
+                       amap_unlock(amap);
                        UVMHIST_LOG(maphist,
                            "<- done (case 1b), amap = %#jx, sltneed=%jd",
                            (uintptr_t)amap, slotneed, 0, 0);
@@ -439,14 +437,13 @@
                                        amap_pp_adjref(amap,
                                            slotoff + slotmapped,
                                            (amap->am_nslot -
-                                           (slotoff + slotmapped)), 1,
-                                           &tofree);
+                                           (slotoff + slotmapped)), 1);
                                pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
                                    slotneed - amap->am_nslot);
                        }
 #endif
                        amap->am_nslot = slotneed;
-                       uvm_anon_freelst(amap, tofree);
+                       amap_unlock(amap);
 
                        /*
                         * no need to zero am_anon since that was done at
@@ -620,8 +617,7 @@
                if ((flags & AMAP_EXTEND_FORWARDS) &&
                    (slotoff + slotmapped) < amap->am_nslot)
                        amap_pp_adjref(amap, slotoff + slotmapped,
-                           (amap->am_nslot - (slotoff + slotmapped)), 1,
-                           &tofree);
+                           (amap->am_nslot - (slotoff + slotmapped)), 1);
                if (flags & AMAP_EXTEND_FORWARDS)
                        pp_setreflen(newppref, amap->am_nslot, 1,
                            slotneed - amap->am_nslot);
@@ -646,8 +642,7 @@
        }
        oldnslots = amap->am_maxslot;
        amap->am_maxslot = slotalloc;
-
-       uvm_anon_freelst(amap, tofree);
+       amap_unlock(amap);
 
        kmem_free(oldsl, oldnslots * sizeof(*oldsl));
        kmem_free(oldbck, oldnslots * sizeof(*oldbck));
@@ -727,7 +722,6 @@
 void
 amap_wipeout(struct vm_amap *amap)
 {
-       struct vm_anon *tofree = NULL;
        u_int lcv;
 
        UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
@@ -757,14 +751,13 @@
                    (uintptr_t)anon, anon->an_ref, 0, 0);
 
                /*
-                * Drop the reference.  Defer freeing.
+                * Drop the reference.
                 */
 
-               if (--anon->an_ref == 0) {
-                       anon->an_link = tofree;
-                       tofree = anon;
+               if (__predict_true(--anon->an_ref == 0)) {
+                       uvm_anfree(anon);
                }
-               if ((lcv & 31) == 31) {
+               if (__predict_false((lcv & 31) == 31)) {
                        preempt_point();
                }
        }
@@ -774,7 +767,7 @@
         */
 
        amap->am_nused = 0;
-       uvm_anon_freelst(amap, tofree);
+       amap_unlock(amap);
        amap_free(amap);
        UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
 }
@@ -799,7 +792,6 @@
 {
        const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0;
        struct vm_amap *amap, *srcamap;
-       struct vm_anon *tofree;
        u_int slots, lcv;
        krwlock_t *oldlock;
        vsize_t len;
@@ -958,15 +950,14 @@
        if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0) {
                srcamap->am_flags &= ~AMAP_SHARED;
        }
-       tofree = NULL;
 #ifdef UVM_AMAP_PPREF
        if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
                amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
-                   len >> PAGE_SHIFT, -1, &tofree);
+                   len >> PAGE_SHIFT, -1);
        }
 #endif
 
-       uvm_anon_freelst(srcamap, tofree);
+       amap_unlock(srcamap);
 
        /*
         * Install new amap.
@@ -1078,7 +1069,7 @@
                                nanon->an_lock = NULL;
                                nanon->an_ref--;
                                KASSERT(nanon->an_ref == 0);
-                               uvm_anon_free(nanon);
+                               uvm_anfree(nanon);
                        }
                        uvm_wait("cownowpage");
                        goto ReStart;
@@ -1173,8 +1164,7 @@
  * => map and amap must be locked.
  */
 void
-amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval,
-    struct vm_anon **tofree)
+amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
 {
        int stopslot, *ppref, lcv, prevlcv;
        int ref, len, prevref, prevlen;
@@ -1232,7 +1222,7 @@
                        pp_setreflen(ppref, lcv, ref, len);
                }
                if (ref == 0) {
-                       amap_wiperange(amap, lcv, len, tofree);
+                       amap_wiperange(amap, lcv, len);
                }
        }
 }
@@ -1244,8 +1234,7 @@
  * => Both map and amap must be locked by caller.
  */
 void
-amap_wiperange(struct vm_amap *amap, int slotoff, int slots,
-    struct vm_anon **tofree)
+amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
 {
        u_int lcv, stop, slotend;
        bool byanon;
@@ -1307,12 +1296,7 @@
 
                KASSERT(anon->an_lock == amap->am_lock);
                if (--anon->an_ref == 0) {
-                       /*
-                        * Eliminated the last reference to an anon - defer
-                        * freeing as uvm_anon_freelst() will unlock the amap.
-                        */
-                       anon->an_link = *tofree;
-                       *tofree = anon;
+                       uvm_anfree(anon);
                }
        }
 }
@@ -1570,7 +1554,6 @@
 amap_adjref_anons(struct vm_amap *amap, vaddr_t offset, vsize_t len,
     int refv, bool all)
 {
-       struct vm_anon *tofree = NULL;
 
 #ifdef UVM_AMAP_PPREF
        KASSERT(rw_write_held(amap->am_lock));
@@ -1590,13 +1573,13 @@
 #ifdef UVM_AMAP_PPREF
        if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
                if (all) {
-                       amap_pp_adjref(amap, 0, amap->am_nslot, refv, &tofree);
+                       amap_pp_adjref(amap, 0, amap->am_nslot, refv);
                } else {
-                       amap_pp_adjref(amap, offset, len, refv, &tofree);
+                       amap_pp_adjref(amap, offset, len, refv);
                }
        }
 #endif
-       uvm_anon_freelst(amap, tofree);
+       amap_unlock(amap);
 }
 
 /*
diff -r 43531c438cdc -r 2f036f127a47 sys/uvm/uvm_amap.h
--- a/sys/uvm/uvm_amap.h        Fri Mar 20 19:06:14 2020 +0000
+++ b/sys/uvm/uvm_amap.h        Fri Mar 20 19:08:54 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_amap.h,v 1.40 2020/02/23 15:46:43 ad Exp $ */
+/*     $NetBSD: uvm_amap.h,v 1.41 2020/03/20 19:08:54 ad Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -266,12 +266,11 @@
 #define PPREF_NONE ((int *) -1)        /* not using ppref */
 
 void           amap_pp_adjref          /* adjust references */
-                       (struct vm_amap *, int, vsize_t, int,
-                       struct vm_anon **);
+                       (struct vm_amap *, int, vsize_t, int);
 void           amap_pp_establish       /* establish ppref */
                        (struct vm_amap *, vaddr_t);
 void           amap_wiperange          /* wipe part of an amap */
-                       (struct vm_amap *, int, int, struct vm_anon **);
+                       (struct vm_amap *, int, int);
 #endif /* UVM_AMAP_PPREF */
 
 #endif /* _KERNEL */
diff -r 43531c438cdc -r 2f036f127a47 sys/uvm/uvm_anon.c
--- a/sys/uvm/uvm_anon.c        Fri Mar 20 19:06:14 2020 +0000
+++ b/sys/uvm/uvm_anon.c        Fri Mar 20 19:08:54 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $ */
+/*     $NetBSD: uvm_anon.c,v 1.76 2020/03/20 19:08:54 ad Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.76 2020/03/20 19:08:54 ad Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -95,26 +95,28 @@
 }
 
 /*
- * uvm_anon_dispose: free any resident page or swap resources of anon.
+ * uvm_anfree: free a single anon structure
  *
  * => anon must be removed from the amap (if anon was in an amap).
- * => amap must be locked; we may drop and re-acquire the lock here.
+ * => amap must be locked, if anon was owned by amap.
+ * => we may drop and re-acquire the lock here (to break loans).
  */
-static bool
-uvm_anon_dispose(struct vm_anon *anon)
+void
+uvm_anfree(struct vm_anon *anon)
 {
-       struct vm_page *pg = anon->an_page;
+       struct vm_page *pg = anon->an_page, *pg2 __diagused;
 



Home | Main Index | Thread Index | Old Index