Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/rmind-uvmplock]: src/sys/uvm Add ubc_purge() and purge/deassociate any r...



details:   https://anonhg.NetBSD.org/src/rev/f82b8da03b38
branches:  rmind-uvmplock
changeset: 753055:f82b8da03b38
user:      rmind <rmind%NetBSD.org@localhost>
date:      Mon Apr 26 02:20:59 2010 +0000

description:
Add ubc_purge() and purge/deassociate any related UBC entries during
object (usually, vnode) destruction.  Since locking (and thus object)
is required to enter/remove mappings - object is not allowed anymore
to disappear with any UBC entries left.

>From original patch by ad@ with some modifications.

diffstat:

 sys/uvm/uvm_bio.c    |  44 +++++++++++++++++++++++++++++++++++---------
 sys/uvm/uvm_extern.h |   3 ++-
 sys/uvm/uvm_object.c |  16 +++++++++++-----
 sys/uvm/uvm_object.h |   4 +++-
 4 files changed, 51 insertions(+), 16 deletions(-)

diffs (209 lines):

diff -r 5d4aedf8bcab -r f82b8da03b38 sys/uvm/uvm_bio.c
--- a/sys/uvm/uvm_bio.c Sun Apr 25 22:48:26 2010 +0000
+++ b/sys/uvm/uvm_bio.c Mon Apr 26 02:20:59 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_bio.c,v 1.68.4.3 2010/04/25 22:48:26 rmind Exp $   */
+/*     $NetBSD: uvm_bio.c,v 1.68.4.4 2010/04/26 02:20:59 rmind Exp $   */
 
 /*
  * Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.68.4.3 2010/04/25 22:48:26 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.68.4.4 2010/04/26 02:20:59 rmind Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_ubc.h"
@@ -79,8 +79,7 @@
 #define UMAP_PAGES_LOCKED      0x0001
 #define UMAP_MAPPING_CACHED    0x0002
 
-struct ubc_map
-{
+struct ubc_map {
        struct uvm_object *     uobj;           /* mapped object */
        voff_t                  offset;         /* offset into uobj */
        voff_t                  writeoff;       /* write offset */
@@ -91,10 +90,10 @@
 
        LIST_ENTRY(ubc_map)     hash;           /* hash table */
        TAILQ_ENTRY(ubc_map)    inactive;       /* inactive queue */
+       LIST_ENTRY(ubc_map)     list;           /* per-object list */
 };
 
-static struct ubc_object
-{
+static struct ubc_object {
        struct uvm_object uobj;         /* glue for uvm_map() */
        char *kva;                      /* where ubc_object is mapped */
        struct ubc_map *umap;           /* array of ubc_map's */
@@ -104,7 +103,6 @@
 
        TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
                                        /* inactive queues for ubc_map's */
-
 } ubc_object;
 
 const struct uvm_pagerops ubc_pager = {
@@ -449,8 +447,8 @@
 
        /*
         * The object is already referenced, so we do not need to add a ref.
+        * Lock order: UBC object -> ubc_map::uobj.
         */
-
        mutex_enter(ubc_object.uobj.vmobjlock);
 again:
        umap = ubc_find_mapping(uobj, umap_offset);
@@ -474,6 +472,7 @@
 
                if (oobj != NULL) {
                        LIST_REMOVE(umap, hash);
+                       LIST_REMOVE(umap, list);
                        if (umap->flags & UMAP_MAPPING_CACHED) {
                                umap->flags &= ~UMAP_MAPPING_CACHED;
                                mutex_enter(oobj->vmobjlock);
@@ -489,6 +488,7 @@
                umap->offset = umap_offset;
                LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
                    umap, hash);
+               LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
        } else {
                UBC_EVCNT_INCR(wincachehit);
                va = UBC_UMAP_ADDR(umap);
@@ -709,7 +709,6 @@
        return error;
 }
 
-
 /*
  * uvm_vnp_zerorange:  set a range of bytes in a file to zero.
  */
@@ -737,3 +736,30 @@
                len -= bytelen;
        }
 }
+
+/*
+ * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
+ */
+
+void
+ubc_purge(struct uvm_object *uobj)
+{
+       struct ubc_map *umap;
+       vaddr_t va;
+
+       KASSERT(uobj->uo_npages == 0);
+
+       mutex_enter(ubc_object.uobj.vmobjlock);
+       while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
+               KASSERT(umap->refcount == 0);
+               for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
+                       KASSERT(!pmap_extract(pmap_kernel(),
+                           va + UBC_UMAP_ADDR(umap), NULL));
+               }
+               LIST_REMOVE(umap, list);
+               LIST_REMOVE(umap, hash);
+               umap->flags &= ~UMAP_MAPPING_CACHED;
+               umap->uobj = NULL;
+       }
+       mutex_exit(ubc_object.uobj.vmobjlock);
+}
diff -r 5d4aedf8bcab -r f82b8da03b38 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h      Sun Apr 25 22:48:26 2010 +0000
+++ b/sys/uvm/uvm_extern.h      Mon Apr 26 02:20:59 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_extern.h,v 1.162.2.3 2010/04/23 21:18:00 rmind Exp $       */
+/*     $NetBSD: uvm_extern.h,v 1.162.2.4 2010/04/26 02:20:59 rmind Exp $       */
 
 /*
  *
@@ -579,6 +579,7 @@
 void                   ubc_release(void *, int);
 int                    ubc_uiomove(struct uvm_object *, struct uio *, vsize_t,
                            int, int);
+void                   ubc_purge(struct uvm_object *);
 
 /* uvm_emap.c */
 void                   uvm_emap_sysinit(void);
diff -r 5d4aedf8bcab -r f82b8da03b38 sys/uvm/uvm_object.c
--- a/sys/uvm/uvm_object.c      Sun Apr 25 22:48:26 2010 +0000
+++ b/sys/uvm/uvm_object.c      Mon Apr 26 02:20:59 2010 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: uvm_object.c,v 1.7.4.3 2010/04/24 21:24:03 rmind Exp $ */
+/*     $NetBSD: uvm_object.c,v 1.7.4.4 2010/04/26 02:20:59 rmind Exp $ */
 
 /*
- * Copyright (c) 2006 The NetBSD Foundation, Inc.
+ * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.3 2010/04/24 21:24:03 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.4 2010/04/26 02:20:59 rmind Exp $");
 
 #include "opt_ddb.h"
 
@@ -49,8 +49,8 @@
 #include <uvm/uvm.h>
 #include <uvm/uvm_ddb.h>
 
-/* We will fetch this page count per step */
-#define        FETCH_PAGECOUNT 16
+/* Page count to fetch per single step. */
+#define        FETCH_PAGECOUNT                 16
 
 /*
  * uvm_obj_init: initialize UVM memory object.
@@ -68,6 +68,7 @@
        }
        uo->pgops = ops;
        TAILQ_INIT(&uo->memq);
+       LIST_INIT(&uo->uo_ubc);
        uo->uo_npages = 0;
        uo->uo_refs = refs;
        rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
@@ -83,6 +84,11 @@
        void *tmp = NULL;
        KASSERT(rb_tree_find_node_geq(&uo->rb_tree, &tmp) == NULL);
 #endif
+       /* Purge any UBC entries with this object. */
+       if (__predict_false(!LIST_EMPTY(&uo->uo_ubc))) {
+               ubc_purge(uo);
+       }
+       /* Finally, safe to destory the lock. */
        if (lockptr) {
                KASSERT(uo->vmobjlock == lockptr);
                mutex_destroy(uo->vmobjlock);
diff -r 5d4aedf8bcab -r f82b8da03b38 sys/uvm/uvm_object.h
--- a/sys/uvm/uvm_object.h      Sun Apr 25 22:48:26 2010 +0000
+++ b/sys/uvm/uvm_object.h      Mon Apr 26 02:20:59 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_object.h,v 1.26.20.1 2010/03/16 15:38:18 rmind Exp $       */
+/*     $NetBSD: uvm_object.h,v 1.26.20.2 2010/04/26 02:20:59 rmind Exp $       */
 
 /*
  *
@@ -41,6 +41,7 @@
  * uvm_object.h
  */
 
+#include <sys/queue.h>
 #include <sys/rb.h>
 
 /*
@@ -54,6 +55,7 @@
        int                     uo_npages;      /* # of pages in memq */
        unsigned                uo_refs;        /* reference count */
        struct rb_tree          rb_tree;        /* tree of pages */
+       LIST_HEAD(,ubc_map)     uo_ubc;         /* ubc mappings */
 };
 
 /*



Home | Main Index | Thread Index | Old Index