Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-6]: src/sys/uvm Pull up following revision(s) (requested by rmind...



details:   https://anonhg.NetBSD.org/src/rev/e3afb098428a
branches:  netbsd-6
changeset: 775499:e3afb098428a
user:      riz <riz%NetBSD.org@localhost>
date:      Thu Nov 22 20:25:21 2012 +0000

description:
Pull up following revision(s) (requested by rmind in ticket #694):
        sys/uvm/uvm_aobj.h: revision 1.22
        sys/uvm/uvm_aobj.c: revision 1.117
        sys/uvm/uvm_aobj.c: revision 1.118
        sys/uvm/uvm_aobj.c: revision 1.119
        sys/uvm/uvm_object.h: revision 1.33
- Describe uvm_aobj and the lock order.
- Remove unnecessary uao_dropswap_range1() wrapper.
- KNF.  Sprinkle some __cacheline_aligned.
- Manage anonymous UVM object reference count with atomic ops.
- Fix an old bug of possible lock against oneself (uao_detach_locked() is
  called from uao_swap_off() with uao_list_lock acquired).  Also removes
  the try-lock dance in uao_swap_off(), since the lock order changes.

diffstat:

 sys/uvm/uvm_aobj.c   |  363 +++++++++++++++++----------------------------------
 sys/uvm/uvm_aobj.h   |   36 ++--
 sys/uvm/uvm_object.h |    5 +-
 3 files changed, 140 insertions(+), 264 deletions(-)

diffs (truncated from 714 to 300 lines):

diff -r 9aca7fe6570e -r e3afb098428a sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c        Thu Nov 22 18:51:48 2012 +0000
+++ b/sys/uvm/uvm_aobj.c        Thu Nov 22 20:25:21 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_aobj.c,v 1.116 2011/09/06 16:41:55 matt Exp $      */
+/*     $NetBSD: uvm_aobj.c,v 1.116.8.1 2012/11/22 20:25:21 riz Exp $   */
 
 /*
  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,80 +38,69 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116 2011/09/06 16:41:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.8.1 2012/11/22 20:25:21 riz Exp $");
 
 #include "opt_uvmhist.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
-#include <sys/proc.h>
 #include <sys/kernel.h>
 #include <sys/kmem.h>
 #include <sys/pool.h>
+#include <sys/atomic.h>
 
 #include <uvm/uvm.h>
 
 /*
- * an aobj manages anonymous-memory backed uvm_objects.   in addition
- * to keeping the list of resident pages, it also keeps a list of
- * allocated swap blocks.  depending on the size of the aobj this list
- * of allocated swap blocks is either stored in an array (small objects)
- * or in a hash table (large objects).
- */
-
-/*
- * local structures
+ * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
+ * keeping the list of resident pages, it may also keep a list of allocated
+ * swap blocks.  Depending on the size of the object, this list is either
+ * stored in an array (small objects) or in a hash table (large objects).
+ *
+ * Lock order
+ *
+ *     uao_list_lock ->
+ *             uvm_object::vmobjlock
  */
 
 /*
- * for hash tables, we break the address space of the aobj into blocks
- * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
- * be a power of two.
+ * Note: for hash tables, we break the address space of the aobj into blocks
+ * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
  */
 
-#define UAO_SWHASH_CLUSTER_SHIFT 4
-#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
+#define        UAO_SWHASH_CLUSTER_SHIFT        4
+#define        UAO_SWHASH_CLUSTER_SIZE         (1 << UAO_SWHASH_CLUSTER_SHIFT)
 
-/* get the "tag" for this page index */
-#define UAO_SWHASH_ELT_TAG(PAGEIDX) \
-       ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
+/* Get the "tag" for this page index. */
+#define        UAO_SWHASH_ELT_TAG(idx)         ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
+#define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
+    ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
 
-#define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
-       ((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
+/* Given an ELT and a page index, find the swap slot. */
+#define        UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
+    ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
 
-/* given an ELT and a page index, find the swap slot */
-#define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
-       ((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
+/* Given an ELT, return its pageidx base. */
+#define        UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
+    ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
 
-/* given an ELT, return its pageidx base */
-#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
-       ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
+/* The hash function. */
+#define        UAO_SWHASH_HASH(aobj, idx) \
+    (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
+    & (aobj)->u_swhashmask)])
 
 /*
- * the swhash hash function
- */
-
-#define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
-       (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
-                           & (AOBJ)->u_swhashmask)])
-
-/*
- * the swhash threshhold determines if we will use an array or a
+ * The threshold which determines whether we will use an array or a
  * hash table to store the list of allocated swap blocks.
  */
-
-#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
-#define UAO_USES_SWHASH(AOBJ) \
-       ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)        /* use hash? */
+#define        UAO_SWHASH_THRESHOLD            (UAO_SWHASH_CLUSTER_SIZE * 4)
+#define        UAO_USES_SWHASH(aobj) \
+    ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
 
-/*
- * the number of buckets in a swhash, with an upper bound
- */
-
-#define UAO_SWHASH_MAXBUCKETS 256
-#define UAO_SWHASH_BUCKETS(AOBJ) \
-       (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
-            UAO_SWHASH_MAXBUCKETS))
+/* The number of buckets in a hash, with an upper bound. */
+#define        UAO_SWHASH_MAXBUCKETS           256
+#define        UAO_SWHASH_BUCKETS(aobj) \
+    (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
 
 /*
  * uao_swhash_elt: when a hash table is being used, this structure defines
@@ -135,7 +124,7 @@
  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
  * Note: pages for this pool must not come from a pageable kernel map.
  */
-static struct pool uao_swhash_elt_pool;
+static struct pool     uao_swhash_elt_pool     __cacheline_aligned;
 
 /*
  * uvm_aobj: the actual anon-backed uvm_object
@@ -159,25 +148,17 @@
        LIST_ENTRY(uvm_aobj) u_list;    /* global list of aobjs */
 };
 
-/*
- * local functions
- */
-
 static void    uao_free(struct uvm_aobj *);
 static int     uao_get(struct uvm_object *, voff_t, struct vm_page **,
                    int *, int, vm_prot_t, int, int);
 static int     uao_put(struct uvm_object *, voff_t, voff_t, int);
 
-static void uao_detach_locked(struct uvm_object *);
-static void uao_reference_locked(struct uvm_object *);
-
 #if defined(VMSWAP)
 static struct uao_swhash_elt *uao_find_swhash_elt
     (struct uvm_aobj *, int, bool);
 
 static bool uao_pagein(struct uvm_aobj *, int, int);
 static bool uao_pagein_page(struct uvm_aobj *, int);
-static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
 #endif /* defined(VMSWAP) */
 
 /*
@@ -197,12 +178,8 @@
  * uao_list: global list of active aobjs, locked by uao_list_lock
  */
 
-static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
-static kmutex_t uao_list_lock;
-
-/*
- * functions
- */
+static LIST_HEAD(aobjlist, uvm_aobj) uao_list  __cacheline_aligned;
+static kmutex_t                uao_list_lock           __cacheline_aligned;
 
 /*
  * hash table/array related functions
@@ -272,7 +249,7 @@
         */
 
        if (aobj->u_flags & UAO_FLAG_NOSWAP)
-               return(0);
+               return 0;
 
        /*
         * if hashing, look in hash table.
@@ -280,17 +257,14 @@
 
        if (UAO_USES_SWHASH(aobj)) {
                elt = uao_find_swhash_elt(aobj, pageidx, false);
-               if (elt)
-                       return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
-               else
-                       return(0);
+               return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
        }
 
        /*
         * otherwise, look in the array
         */
 
-       return(aobj->u_swslots[pageidx]);
+       return aobj->u_swslots[pageidx];
 }
 
 /*
@@ -319,11 +293,8 @@
         */
 
        if (aobj->u_flags & UAO_FLAG_NOSWAP) {
-               if (slot == 0)
-                       return(0);
-
-               printf("uao_set_swslot: uobj = %p\n", uobj);
-               panic("uao_set_swslot: NOSWAP object");
+               KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
+               return 0;
        }
 
        /*
@@ -368,7 +339,7 @@
                oldslot = aobj->u_swslots[pageidx];
                aobj->u_swslots[pageidx] = slot;
        }
-       return (oldslot);
+       return oldslot;
 }
 
 #endif /* defined(VMSWAP) */
@@ -386,12 +357,11 @@
 static void
 uao_free(struct uvm_aobj *aobj)
 {
+       struct uvm_object *uobj = &aobj->u_obj;
 
-#if defined(VMSWAP)
-       uao_dropswap_range1(aobj, 0, 0);
-#endif /* defined(VMSWAP) */
-
-       mutex_exit(aobj->u_obj.vmobjlock);
+       KASSERT(mutex_owned(uobj->vmobjlock));
+       uao_dropswap_range(uobj, 0, 0);
+       mutex_exit(uobj->vmobjlock);
 
 #if defined(VMSWAP)
        if (UAO_USES_SWHASH(aobj)) {
@@ -415,7 +385,7 @@
         * finally free the aobj itself
         */
 
-       uvm_obj_destroy(&aobj->u_obj, true);
+       uvm_obj_destroy(uobj, true);
        kmem_free(aobj, sizeof(struct uvm_aobj));
 }
 
@@ -493,7 +463,7 @@
 
                if (flags) {
                        aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
-                       return(&aobj->u_obj);
+                       return &aobj->u_obj;
                }
        }
 
@@ -519,8 +489,6 @@
        return(&aobj->u_obj);
 }
 
-
-
 /*
  * uao_init: set up aobj pager subsystem
  *
@@ -542,124 +510,62 @@
 }
 
 /*
- * uao_reference: add a ref to an aobj
- *
- * => aobj must be unlocked
- * => just lock it and call the locked version
+ * uao_reference: hold a reference to an anonymous UVM object.
  */
-
 void
 uao_reference(struct uvm_object *uobj)
 {
-
-       /*
-        * kernel_object already has plenty of references, leave it alone.
-        */
-
-       if (UVM_OBJ_IS_KERN_OBJECT(uobj))
+       /* Kernel object is persistent. */
+       if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
                return;
-
-       mutex_enter(uobj->vmobjlock);
-       uao_reference_locked(uobj);
-       mutex_exit(uobj->vmobjlock);



Home | Main Index | Thread Index | Old Index