Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Keep interrupt-safe maps on an additional queue. In uvm...



details:   https://anonhg.NetBSD.org/src/rev/19ef506971ea
branches:  trunk
changeset: 473457:19ef506971ea
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Fri Jun 04 23:38:41 1999 +0000

description:
Keep interrupt-safe maps on an additional queue.  In uvm_fault(), if we're
looking up a kernel address, check to see if the address is on this
"interrupt-safe" list.  If so, return failure immediately.  This prevents
a locking screw if a page fault is taken on an interrupt-safe map in or
out of interrupt context.

diffstat:

 sys/kern/kern_malloc.c |   6 +++---
 sys/uvm/uvm_fault.c    |  15 ++++++++++++++-
 sys/uvm/uvm_fault.h    |   3 ++-
 sys/uvm/uvm_fault_i.h  |  35 ++++++++++++++++++++++++++++++++++-
 sys/uvm/uvm_km.c       |  13 +++++++++++--
 sys/uvm/uvm_map_i.h    |  20 ++++++++++++++++++--
 6 files changed, 82 insertions(+), 10 deletions(-)

diffs (197 lines):

diff -r 9852bb27e144 -r 19ef506971ea sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c    Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/kern/kern_malloc.c    Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_malloc.c,v 1.43 1999/05/28 19:40:09 thorpej Exp $ */
+/*     $NetBSD: kern_malloc.c,v 1.44 1999/06/04 23:38:42 thorpej Exp $ */
 
 /*
  * Copyright (c) 1996 Christopher G. Demetriou.  All rights reserved.
@@ -50,7 +50,7 @@
 
 #include <uvm/uvm_extern.h>
 
-static struct vm_map kmem_map_store;
+static struct vm_map_intrsafe kmem_map_store;
 vm_map_t kmem_map = NULL;
 
 #include "opt_kmemstats.h"
@@ -627,7 +627,7 @@
                (vsize_t)(npg * sizeof(struct kmemusage)));
        kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
                (vaddr_t *)&kmemlimit, (vsize_t)(npg * NBPG), 
-                       VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
+                       VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
 #ifdef KMEMSTATS
        for (indx = 0; indx < MINBUCKET + 16; indx++) {
                if (1 << indx >= CLBYTES)
diff -r 9852bb27e144 -r 19ef506971ea sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c       Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/uvm/uvm_fault.c       Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault.c,v 1.32 1999/06/02 23:26:21 thorpej Exp $   */
+/*     $NetBSD: uvm_fault.c,v 1.33 1999/06/04 23:38:41 thorpej Exp $   */
 
 /*
  *
@@ -589,6 +589,19 @@
                narrow = FALSE;         /* normal fault */
 
        /*
+        * before we do anything else, if this is a fault on a kernel
+        * address, check to see if the address is managed by an
+        * interrupt-safe map.  If it is, we fail immediately.  Intrsafe
+        * maps are never pageable, and this approach avoids an evil
+        * locking mess.
+        */
+       if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) {
+               UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p",
+                   ufi.orig_rvaddr, ufi.map, 0, 0);
+               return (KERN_FAILURE);
+       }
+
+       /*
         * "goto ReFault" means restart the page fault from ground zero.
         */
 ReFault:
diff -r 9852bb27e144 -r 19ef506971ea sys/uvm/uvm_fault.h
--- a/sys/uvm/uvm_fault.h       Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/uvm/uvm_fault.h       Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault.h,v 1.10 1999/05/28 20:49:51 thorpej Exp $   */
+/*     $NetBSD: uvm_fault.h,v 1.11 1999/06/04 23:38:41 thorpej Exp $   */
 
 /*
  *
@@ -72,6 +72,7 @@
 
 int uvmfault_anonget __P((struct uvm_faultinfo *, struct vm_amap *,
                                                                                                        struct vm_anon *));
+static boolean_t uvmfault_check_intrsafe __P((struct uvm_faultinfo *));
 static boolean_t uvmfault_lookup __P((struct uvm_faultinfo *, boolean_t));
 static boolean_t uvmfault_relock __P((struct uvm_faultinfo *));
 static void uvmfault_unlockall __P((struct uvm_faultinfo *, struct vm_amap *,
diff -r 9852bb27e144 -r 19ef506971ea sys/uvm/uvm_fault_i.h
--- a/sys/uvm/uvm_fault_i.h     Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/uvm/uvm_fault_i.h     Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault_i.h,v 1.8 1999/03/25 18:48:51 mrg Exp $      */
+/*     $NetBSD: uvm_fault_i.h,v 1.9 1999/06/04 23:38:41 thorpej Exp $  */
 
 /*
  *
@@ -82,6 +82,39 @@
 }
 
 /*
+ * uvmfault_check_intrsafe: check for a virtual address managed by
+ * an interrupt-safe map.
+ *
+ * => caller must provide a uvm_faultinfo structure with the IN
+ *     params properly filled in
+ * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
+ */
+
+static __inline boolean_t
+uvmfault_check_intrsafe(ufi)
+       struct uvm_faultinfo *ufi;
+{
+       struct vm_map_intrsafe *vmi;
+       int s;
+
+       s = vmi_list_lock();
+       for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
+            vmi = LIST_NEXT(vmi, vmi_list)) {
+               if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
+                   ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
+                       break;
+       }
+       vmi_list_unlock(s);
+
+       if (vmi != NULL) {
+               ufi->map = &vmi->vmi_map;
+               return (TRUE);
+       }
+
+       return (FALSE);
+}
+
+/*
  * uvmfault_lookup: lookup a virtual address in a map
  *
  * => caller must provide a uvm_faultinfo structure with the IN
diff -r 9852bb27e144 -r 19ef506971ea sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c  Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/uvm/uvm_km.c  Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_km.c,v 1.26 1999/05/26 19:27:49 thorpej Exp $      */
+/*     $NetBSD: uvm_km.c,v 1.27 1999/06/04 23:38:41 thorpej Exp $      */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -159,6 +159,9 @@
 
 vm_map_t kernel_map = NULL;
 
+struct vmi_list vmi_list;
+simple_lock_data_t vmi_list_slock;
+
 /*
  * local functions
  */
@@ -418,7 +421,13 @@
        vaddr_t base = VM_MIN_KERNEL_ADDRESS;
 
        /*
-        * first, init kernel memory objects.
+        * first, initialize the interrupt-safe map list.
+        */
+       LIST_INIT(&vmi_list);
+       simple_lock_init(&vmi_list_slock);
+
+       /*
+        * next, init kernel memory objects.
         */
 
        /* kernel_object: for pageable anonymous kernel memory */
diff -r 9852bb27e144 -r 19ef506971ea sys/uvm/uvm_map_i.h
--- a/sys/uvm/uvm_map_i.h       Fri Jun 04 21:43:12 1999 +0000
+++ b/sys/uvm/uvm_map_i.h       Fri Jun 04 23:38:41 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map_i.h,v 1.13 1999/05/26 19:16:36 thorpej Exp $   */
+/*     $NetBSD: uvm_map_i.h,v 1.14 1999/06/04 23:38:42 thorpej Exp $   */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -93,7 +93,10 @@
 {
        vm_map_t result;
 
-       MALLOC(result, vm_map_t, sizeof(struct vm_map), M_VMMAP, M_WAITOK);
+       MALLOC(result, vm_map_t,
+           (flags & VM_MAP_INTRSAFE) ? sizeof(struct vm_map_intrsafe) :
+                                       sizeof(struct vm_map),
+           M_VMMAP, M_WAITOK);
        uvm_map_setup(result, min, max, flags);
        result->pmap = pmap;
        return(result);
@@ -125,6 +128,19 @@
        lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
        simple_lock_init(&map->ref_lock);
        simple_lock_init(&map->hint_lock);
+
+       /*
+        * If the map is interrupt safe, place it on the list
+        * of interrupt safe maps, for uvm_fault().
+        */
+       if (flags & VM_MAP_INTRSAFE) {
+               struct vm_map_intrsafe *vmi = (struct vm_map_intrsafe *)map;
+               int s;
+
+               s = vmi_list_lock();
+               LIST_INSERT_HEAD(&vmi_list, vmi, vmi_list);
+               vmi_list_unlock(s);
+       }
 }
 
 



Home | Main Index | Thread Index | Old Index