Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/uebayasi-xip]: src/sys/uvm Sync with HEAD.



details:   https://anonhg.NetBSD.org/src/rev/39e19a0ab6c0
branches:  uebayasi-xip
changeset: 751598:39e19a0ab6c0
user:      uebayasi <uebayasi%NetBSD.org@localhost>
date:      Wed Feb 24 16:22:58 2010 +0000

description:
Sync with HEAD.

diffstat:

 sys/uvm/uvm_fault.c |  663 ++++++++++++++++++++++++++++-----------------------
 1 files changed, 363 insertions(+), 300 deletions(-)

diffs (truncated from 963 to 300 lines):

diff -r 7d16babcd834 -r 39e19a0ab6c0 sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c       Wed Feb 24 01:19:37 2010 +0000
+++ b/sys/uvm/uvm_fault.c       Wed Feb 24 16:22:58 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault.c,v 1.166.2.4 2010/02/23 07:11:46 uebayasi Exp $     */
+/*     $NetBSD: uvm_fault.c,v 1.166.2.5 2010/02/24 16:22:58 uebayasi Exp $     */
 
 /*
  *
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.4 2010/02/23 07:11:46 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.5 2010/02/24 16:22:58 uebayasi Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_device_page.h"
@@ -702,6 +702,7 @@
        bool wire_paging;
        bool maxprot;
        bool cow_now;
+       bool promote;
 };
 
 static inline int      uvm_fault_check(
@@ -730,29 +731,20 @@
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            struct uvm_object *, struct vm_anon *,
                            struct vm_page *, struct vm_anon *);
-static inline int      uvm_fault_upper_done(
+static inline void     uvm_fault_upper_done(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            struct uvm_object *, struct vm_anon *,
-                           struct vm_page *, struct vm_anon *);
+                           struct vm_page *);
 
 static int             uvm_fault_lower(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            struct vm_page **);
-static inline int      uvm_fault_lower_special(
-                           struct uvm_faultinfo *, struct uvm_faultctx *,
-                           struct vm_page **);
-static inline          int uvm_fault_lower_lookup(
+static inline void     uvm_fault_lower_lookup(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            struct vm_page **);
 static inline void     uvm_fault_lower_neighbor(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            vaddr_t, struct vm_page *, bool);
-static inline int      uvm_fault_lower_generic(
-                           struct uvm_faultinfo *, struct uvm_faultctx *,
-                           struct vm_page **);
-static inline int      uvm_fault_lower1(
-                           struct uvm_faultinfo *, struct uvm_faultctx *,
-                           struct uvm_object *, struct vm_page *);
 static inline int      uvm_fault_lower_io(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
                            struct uvm_object **, struct vm_page **);
@@ -771,10 +763,10 @@
                            struct uvm_object *,
                            struct vm_anon *, struct vm_page *,
                            struct vm_page *);
-static inline int      uvm_fault_lower_done(
+static inline void     uvm_fault_lower_done(
                            struct uvm_faultinfo *, struct uvm_faultctx *,
-                           struct uvm_object *,
-                           struct vm_anon *, struct vm_page *);
+                           struct uvm_object *, struct vm_anon *,
+                           struct vm_page *);
 
 int
 uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
@@ -826,8 +818,29 @@
 
                if (pages[flt.centeridx] == PGO_DONTCARE)
                        error = uvm_fault_upper(&ufi, &flt, anons);
-               else
-                       error = uvm_fault_lower(&ufi, &flt, pages);
+               else {
+                       struct uvm_object * const uobj = ufi.entry->object.uvm_obj;
+
+                       if (uobj && uobj->pgops->pgo_fault != NULL) {
+                               /*
+                                * invoke "special" fault routine.
+                                */
+                               mutex_enter(&uobj->vmobjlock);
+                               /* locked: maps(read), amap(if there), uobj */
+                               error = uobj->pgops->pgo_fault(&ufi,
+                                   flt.startva, pages, flt.npages,
+                                   flt.centeridx, flt.access_type,
+                                   PGO_LOCKED|PGO_SYNCIO);
+
+                               /* locked: nothing, pgo_fault has unlocked everything */
+
+                               /*
+                                * object fault routine responsible for pmap_update().
+                                */
+                       } else {
+                               error = uvm_fault_lower(&ufi, &flt, pages);
+                       }
+               }
        }
 
        if (flt.anon_spare != NULL) {
@@ -837,6 +850,21 @@
        return error;
 }
 
+/*
+ * uvm_fault_check: check prot, handle needs-copy, etc.
+ *
+ *     1. lookup entry.
+ *     2. check protection.
+ *     3. adjust fault condition (mainly for simulated fault).
+ *     4. handle needs-copy (lazy amap copy).
+ *     5. establish range of interest for neighbor fault (aka pre-fault).
+ *     6. look up anons (if amap exists).
+ *     7. flush pages (if MADV_SEQUENTIAL)
+ *
+ * => called with nothing locked.
+ * => if we fail (result != 0) we unlock everything.
+ */
+
 static int
 uvm_fault_check(
        struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
@@ -900,6 +928,8 @@
                flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
        }
 
+       flt->promote = false;
+
        /*
         * handle "needs_copy" case.   if we need to copy the amap we will
         * have to drop our readlock and relock it with a write lock.  (we
@@ -1032,11 +1062,6 @@
                /* now forget about the backpages */
                if (amap)
                        *ranons += nback;
-#if 0
-               /* XXXUEBS */
-               if (uobj)
-                       *rpages += nback;
-#endif
                flt->startva += (nback << PAGE_SHIFT);
                flt->npages -= nback;
                flt->centeridx = 0;
@@ -1049,6 +1074,17 @@
        return 0;
 }
 
+/*
+ * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
+ *
+ * iterate range of interest:
+ *     1. check if h/w mapping exists.  if yes, we don't care
+ *     2. check if anon exists.  if not, page is lower.
+ *     3. if anon exists, enter h/w mapping for neighbors.
+ *
+ * => called with amap locked (if exists).
+ */
+
 static int
 uvm_fault_upper_lookup(
        struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
@@ -1101,8 +1137,13 @@
                        struct vm_anon *anon = anons[lcv];
 
                        mutex_enter(&anon->an_lock);
-                       uvm_fault_upper_neighbor(ufi, flt, currva,
-                           anon->an_page, anon->an_ref > 1);
+                       struct vm_page *pg = anon->an_page;
+
+                       /* ignore loaned and busy pages */
+                       if (pg != NULL && pg->loan_count == 0 &&
+                           (pg->flags & PG_BUSY) == 0)
+                               uvm_fault_upper_neighbor(ufi, flt, currva,
+                                   pg, anon->an_ref > 1);
                        mutex_exit(&anon->an_lock);
                }
        }
@@ -1124,6 +1165,12 @@
        return 0;
 }
 
+/*
+ * uvm_fault_upper_neighbor: enter single lower neighbor page.
+ *
+ * => called with amap and anon locked.
+ */
+
 static void
 uvm_fault_upper_neighbor(
        struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
@@ -1131,10 +1178,7 @@
 {
        UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist);
 
-       /* ignore loaned and busy pages */
-       if (pg == NULL || pg->loan_count != 0 ||
-           (pg->flags & PG_BUSY) != 0)
-               goto uvm_fault_upper_lookup_enter_done;
+       /* locked: amap, anon */
 
        mutex_enter(&uvm_pageqlock);
        uvm_pageenqueue(pg);
@@ -1156,237 +1200,18 @@
            flt->enter_prot,
            PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
 
-uvm_fault_upper_lookup_enter_done:
        pmap_update(ufi->orig_map->pmap);
 }
 
-static int
-uvm_fault_lower(
-       struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
-       struct vm_page **pages)
-{
-       struct uvm_object *uobj = ufi->entry->object.uvm_obj;
-       int error;
-
-       /*
-        * if the desired page is not shadowed by the amap and we have a
-        * backing object, then we check to see if the backing object would
-        * prefer to handle the fault itself (rather than letting us do it
-        * with the usual pgo_get hook).  the backing object signals this by
-        * providing a pgo_fault routine.
-        */
-
-       if (uobj && uobj->pgops->pgo_fault != NULL) {
-               error = uvm_fault_lower_special(ufi, flt, pages);
-       } else {
-               error = uvm_fault_lower_generic(ufi, flt, pages);
-       }
-       return error;
-}
-
-static int
-uvm_fault_lower_special(
-       struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
-       struct vm_page **pages)
-{
-       struct uvm_object *uobj = ufi->entry->object.uvm_obj;
-       int error;
-
-       mutex_enter(&uobj->vmobjlock);
-       /* locked: maps(read), amap (if there), uobj */
-       error = uobj->pgops->pgo_fault(ufi, flt->startva, pages, flt->npages,
-           flt->centeridx, flt->access_type, PGO_LOCKED|PGO_SYNCIO);
-
-       /* locked: nothing, pgo_fault has unlocked everything */
-
-       if (error == ERESTART)
-               error = ERESTART;               /* try again! */
-       /*
-        * object fault routine responsible for pmap_update().
-        */
-
-       return error;
-}
-
-static int
-uvm_fault_lower_generic(
-       struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
-       struct vm_page **pages)
-{
-#ifdef DIAGNOSTIC
-       struct vm_amap *amap = ufi->entry->aref.ar_amap;
-#endif
-       struct uvm_object *uobj = ufi->entry->object.uvm_obj;
-       struct vm_page *uobjpage;
-
-       /*
-        * now, if the desired page is not shadowed by the amap and we have
-        * a backing object that does not have a special fault routine, then
-        * we ask (with pgo_get) the object for resident pages that we care
-        * about and attempt to map them in.  we do not let pgo_get block
-        * (PGO_LOCKED).
-        */
-
-       if (uobj == NULL) {
-               /* zero fill; don't care neighbor pages */
-               uobjpage = NULL;
-       } else {
-               uvm_fault_lower_lookup(ufi, flt, pages);
-               uobjpage = pages[flt->centeridx];
-       }
-
-       /* locked: maps(read), amap(if there), uobj(if !null), uobjpage(if !null) */
-       KASSERT(amap == NULL || mutex_owned(&amap->am_l));
-       KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
-       KASSERT(uobjpage == NULL || uvm_pageisdevice_p(uobjpage) ||
-           (uobjpage->flags & PG_BUSY) != 0);
-
-       /*
-        * note that at this point we are done with any front or back pages.
-        * we are now going to focus on the center page (i.e. the one we've
-        * faulted on).  if we have faulted on the upper (anon) layer



Home | Main Index | Thread Index | Old Index