Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Make "intrsafe" maps locked only by exclusive spin locks...



details:   https://anonhg.NetBSD.org/src/rev/090863d4ba91
branches:  trunk
changeset: 473307:090863d4ba91
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Fri May 28 20:31:42 1999 +0000

description:
Make "intrsafe" maps locked only by exclusive spin locks, never sleep
locks (and thus, never shared locks).  Move the "set/clear recursive"
functions to uvm_map.c, which is the only placed they're used (and
they should go away anyhow).  Delete some unused cruft.

diffstat:

 sys/uvm/uvm_map.c |   39 ++++++++++++++++-
 sys/vm/vm_map.h   |  115 ++++++++++++++++++++++++++++++++---------------------
 2 files changed, 105 insertions(+), 49 deletions(-)

diffs (210 lines):

diff -r d3512a79105a -r 090863d4ba91 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Fri May 28 20:17:29 1999 +0000
+++ b/sys/uvm/uvm_map.c Fri May 28 20:31:42 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.46 1999/05/26 23:53:48 thorpej Exp $     */
+/*     $NetBSD: uvm_map.c,v 1.47 1999/05/28 20:31:43 thorpej Exp $     */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -191,6 +191,39 @@
  * local inlines
  */
 
+/* XXX Should not exist! */
+static __inline void vm_map_set_recursive __P((vm_map_t));
+static __inline void
+vm_map_set_recursive(map)
+       vm_map_t map;
+{
+
+#ifdef DIAGNOSTIC
+       if (map->flags & VM_MAP_INTRSAFE)
+               panic("vm_map_set_recursive: intrsafe map");
+#endif
+       simple_lock(&map->lock.lk_interlock);
+       map->lock.lk_flags |= LK_CANRECURSE;
+       simple_unlock(&map->lock.lk_interlock);
+}
+
+/* XXX Should not exist! */
+static __inline void vm_map_clear_recursive __P((vm_map_t));
+static __inline void
+vm_map_clear_recursive(map)
+       vm_map_t map;
+{
+
+#ifdef DIAGNOSTIC
+       if (map->flags & VM_MAP_INTRSAFE)
+               panic("vm_map_clear_recursive: intrsafe map");
+#endif
+       simple_lock(&map->lock.lk_interlock);
+       if (map->lock.lk_exclusivecount <= 1)
+               map->lock.lk_flags &= ~LK_CANRECURSE;
+       simple_unlock(&map->lock.lk_interlock);
+}
+
 /*
  * uvm_mapent_alloc: allocate a map entry
  *
@@ -2149,7 +2182,7 @@
        if (vm_map_pmap(map) == pmap_kernel()) {
                vm_map_unlock(map);         /* trust me ... */
        } else {
-               vm_map_set_recursive(&map->lock);
+               vm_map_set_recursive(map);
                lockmgr(&map->lock, LK_DOWNGRADE, (void *)0);
        }
 
@@ -2182,7 +2215,7 @@
        if (vm_map_pmap(map) == pmap_kernel()) {
                vm_map_lock(map);     /* relock */
        } else {
-               vm_map_clear_recursive(&map->lock);
+               vm_map_clear_recursive(map);
        } 
 
        if (rv) {        /* failed? */
diff -r d3512a79105a -r 090863d4ba91 sys/vm/vm_map.h
--- a/sys/vm/vm_map.h   Fri May 28 20:17:29 1999 +0000
+++ b/sys/vm/vm_map.h   Fri May 28 20:31:42 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vm_map.h,v 1.25 1999/05/26 19:16:37 thorpej Exp $      */
+/*     $NetBSD: vm_map.h,v 1.26 1999/05/28 20:31:42 thorpej Exp $      */
 
 /* 
  * Copyright (c) 1991, 1993
@@ -151,65 +151,88 @@
 #define        VM_MAP_INTRSAFE         0x02            /* interrupt safe map */
 
 /*
- *     Macros:         vm_map_lock, etc.
- *     Function:
- *             Perform locking on the data portion of a map.
+ * VM map locking operations:
+ *
+ *     These operations perform locking on the data portion of the
+ *     map.
+ *
+ *     vm_map_lock_try: try to lock a map, failing if it is already locked.
+ *
+ *     vm_map_lock: acquire an exclusive (write) lock on a map.
+ *
+ *     vm_map_lock_read: acquire a shared (read) lock on a map.
+ *
+ *     vm_map_unlock: release an exclusive lock on a map.
+ *
+ *     vm_map_unlock_read: release a shared lock on a map.
+ *
+ * Note that "intrsafe" maps use only exclusive, spin locks.  We simply
+ * use the sleep lock's interlock for this.
  */
 
+#ifdef _KERNEL
+/* XXX: clean up later */
 #include <sys/time.h>
 #include <sys/proc.h>  /* XXX for curproc and p_pid */
 
-#define        vm_map_lock_drain_interlock(map) { \
-       lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
-               &(map)->ref_lock); \
-       (map)->timestamp++; \
-}
-#ifdef DIAGNOSTIC
-#define        vm_map_lock(map) { \
-       if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0) != 0) { \
-               panic("vm_map_lock: failed to get lock"); \
-       } \
-       (map)->timestamp++; \
-}
-#else
-#define        vm_map_lock(map) { \
-       lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0); \
-       (map)->timestamp++; \
-}
-#endif /* DIAGNOSTIC */
-#define        vm_map_unlock(map) \
-               lockmgr(&(map)->lock, LK_RELEASE, (void *)0)
-#define        vm_map_lock_read(map) \
-               lockmgr(&(map)->lock, LK_SHARED, (void *)0)
-#define        vm_map_unlock_read(map) \
-               lockmgr(&(map)->lock, LK_RELEASE, (void *)0)
-#define vm_map_set_recursive(map) { \
-       simple_lock(&(map)->lk_interlock); \
-       (map)->lk_flags |= LK_CANRECURSE; \
-       simple_unlock(&(map)->lk_interlock); \
-}
-#define vm_map_clear_recursive(map) { \
-       simple_lock(&(map)->lk_interlock); \
-       if ((map)->lk_exclusivecount <= 1) \
-               (map)->lk_flags &= ~LK_CANRECURSE; \
-       simple_unlock(&(map)->lk_interlock); \
-}
-
-#ifdef _KERNEL
-/* XXX: clean up later */
 static __inline boolean_t vm_map_lock_try __P((vm_map_t));
 
 static __inline boolean_t
 vm_map_lock_try(map)
        vm_map_t map;
 {
-       if (lockmgr(&(map)->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0) != 0)
-               return(FALSE);
-       map->timestamp++;
-       return(TRUE);
+       boolean_t rv;
+
+       if (map->flags & VM_MAP_INTRSAFE)
+               rv = simple_lock_try(&map->lock.lk_intrlock);
+       else
+               rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT, NULL) == 0);
+
+       if (rv)
+               map->timestamp++;
+
+       return (rv);
 }
+
+#ifdef DIAGNOSTIC
+#define        _vm_map_lock(map)                                               \
+do {                                                                   \
+       if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL) != 0)             \
+               panic("vm_map_lock: failed to get lock");               \
+} while (0)
+#else
+#define        _vm_map_lock(map)                                               \
+       (void) lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL)
 #endif
 
+#define        vm_map_lock(map)                                                \
+do {                                                                   \
+       if ((map)->flags & VM_MAP_INTRSAFE)                             \
+               simple_lock(&(map)->lock.lk_interlock);                 \
+       else                                                            \
+               _vm_map_lock((map));                                    \
+       (map)->timestamp++;                                             \
+} while (0)
+
+#ifdef DIAGNOSTIC
+#define        vm_map_lock_read(map)                                           \
+do {                                                                   \
+       if (map->flags & VM_MAP_INTRSAFE)                               \
+               panic("vm_map_lock_read: intrsafe map");                \
+       (void) lockmgr(&(map)->lock, LK_SHARED, NULL);                  \
+} while (0)
+#else
+#define        vm_map_lock_read(map)                                           \
+       (void) lockmgr(&(map)->lock, LK_SHARED, NULL)
+#endif
+
+#define        vm_map_unlock(map)                                              \
+       (void) lockmgr(&(map)->lock, LK_RELEASE, (void *)0)
+
+#define        vm_map_unlock_read(map)                                         \
+       (void) lockmgr(&(map)->lock, LK_RELEASE, (void *)0)
+#endif /* _KERNEL */
+
 /*
  *     Functions implemented as macros
  */



Home | Main Index | Thread Index | Old Index