Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm Simplify the last even more; We downgraded to a shar...



details:   https://anonhg.NetBSD.org/src/rev/7ddac48bb4cf
branches:  trunk
changeset: 473422:7ddac48bb4cf
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed Jun 02 22:40:51 1999 +0000

description:
Simplify the last even more; We downgraded to a shared (read) lock, so
setting recursive has no effect!  The kernel lock manager doesn't allow
an exclusive recursion into a shared lock.  This situation must simply
be avoided.  The only place where this might be a problem is the (ab)use
of uvm_map_pageable() in the Utah-derived pmaps for m68k (they should
either toss the iffy scheme they use completely, or use something like
uvm_fault_wire()).

In addition, once we have looped over uvm_fault_wire(), only upgrade to
an exclusive (write) lock if we need to modify the map again (i.e.
wiring a page failed).

diffstat:

 sys/uvm/uvm_map.c |  61 +++++++-----------------------------------------------
 1 files changed, 8 insertions(+), 53 deletions(-)

diffs (98 lines):

diff -r 44221ee4a5a2 -r 7ddac48bb4cf sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Wed Jun 02 22:14:07 1999 +0000
+++ b/sys/uvm/uvm_map.c Wed Jun 02 22:40:51 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.51 1999/06/02 21:23:08 thorpej Exp $     */
+/*     $NetBSD: uvm_map.c,v 1.52 1999/06/02 22:40:51 thorpej Exp $     */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -192,39 +192,6 @@
  */
 
 /* XXX Should not exist! */
-static __inline void vm_map_set_recursive __P((vm_map_t));
-static __inline void
-vm_map_set_recursive(map)
-       vm_map_t map;
-{
-
-#ifdef DIAGNOSTIC
-       if (map->flags & VM_MAP_INTRSAFE)
-               panic("vm_map_set_recursive: intrsafe map");
-#endif
-       simple_lock(&map->lock.lk_interlock);
-       map->lock.lk_flags |= LK_CANRECURSE;
-       simple_unlock(&map->lock.lk_interlock);
-}
-
-/* XXX Should not exist! */
-static __inline void vm_map_clear_recursive __P((vm_map_t));
-static __inline void
-vm_map_clear_recursive(map)
-       vm_map_t map;
-{
-
-#ifdef DIAGNOSTIC
-       if (map->flags & VM_MAP_INTRSAFE)
-               panic("vm_map_clear_recursive: intrsafe map");
-#endif
-       simple_lock(&map->lock.lk_interlock);
-       if (map->lock.lk_exclusivecount <= 1)
-               map->lock.lk_flags &= ~LK_CANRECURSE;
-       simple_unlock(&map->lock.lk_interlock);
-}
-
-/* XXX Should not exist! */
 #define        vm_map_downgrade(map)                                           \
        (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
 
@@ -2185,18 +2152,6 @@
         * Pass 2.
         */
 
-       /*
-        * XXX Note, even if we're a kernel map, just set recursion on
-        * XXX the lock.  If the pmap (via uvm_fault()) needs to lock
-        * XXX this map again in this thread, it will be able to due
-        * XXX to the recursion setting.  Note that we have already
-        * XXX done what we need to do to the map entries, so we
-        * XXX should be okay.
-        *
-        * JEEZ, THIS IS A MESS!
-        */
-
-       vm_map_set_recursive(map);
        vm_map_downgrade(map);
 
        rv = 0;
@@ -2217,14 +2172,13 @@
                entry = entry->next;
        }
 
-       /*
-        * Get back to an exclusive, non-recursive lock.  (XXX: see above)
-        */
-       vm_map_upgrade(map);
-       vm_map_clear_recursive(map);
-
        if (rv) {        /* failed? */
                /*
+                * Get back to an exclusive (write) lock.
+                */
+               vm_map_upgrade(map);
+
+               /*
                 * first drop the wiring count on all the entries
                 * which haven't actually been wired yet.
                 */
@@ -2242,7 +2196,8 @@
                return(rv);
        }
 
-       vm_map_unlock(map);
+       /* We are holding a read lock here. */
+       vm_map_unlock_read(map);
        
        UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
        return(KERN_SUCCESS);



Home | Main Index | Thread Index | Old Index