Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm * Rename uvm_fault_unwire() to uvm_fault_unwire_lock...



details:   https://anonhg.NetBSD.org/src/rev/31846b73a717
branches:  trunk
changeset: 473720:31846b73a717
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed Jun 16 22:11:23 1999 +0000

description:
* Rename uvm_fault_unwire() to uvm_fault_unwire_locked(), and require that
  the map be at least read-locked to call this function.  This requirement
  will be taken advantage of in a future commit.
* Write a uvm_fault_unwire() wrapper which read-locks the map and calls
  uvm_fault_unwire_locked().
* Update the comments describing the locking contraints of uvm_fault_wire()
  and uvm_fault_unwire().

diffstat:

 sys/uvm/uvm_fault.c |  27 ++++++++++++++++++++++-----
 sys/uvm/uvm_fault.h |   3 ++-
 sys/uvm/uvm_map.c   |  12 ++----------
 3 files changed, 26 insertions(+), 16 deletions(-)

diffs (105 lines):

diff -r 563407cfbc08 -r 31846b73a717 sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c       Wed Jun 16 21:18:42 1999 +0000
+++ b/sys/uvm/uvm_fault.c       Wed Jun 16 22:11:23 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault.c,v 1.35 1999/06/16 18:43:28 thorpej Exp $   */
+/*     $NetBSD: uvm_fault.c,v 1.36 1999/06/16 22:11:23 thorpej Exp $   */
 
 /*
  *
@@ -1708,10 +1708,10 @@
 /*
  * uvm_fault_wire: wire down a range of virtual addresses in a map.
  *
- * => map should be locked by caller?   If so how can we call
- *     uvm_fault?   WRONG.
- * => XXXCDC: locking here is all screwed up!!!  start with 
- *     uvm_map_pageable and fix it.
+ * => map may be read-locked by caller, but MUST NOT be write-locked.
+ * => if map is read-locked, any operations which may cause map to
+ *     be write-locked in uvm_fault() must be taken care of by
+ *     the caller.  See uvm_map_pageable().
  */
 
 int
@@ -1760,6 +1760,23 @@
        vm_map_t map;
        vaddr_t start, end;
 {
+
+       vm_map_lock_read(map);
+       uvm_fault_unwire_locked(map, start, end);
+       vm_map_unlock_read(map);
+}
+
+/*
+ * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
+ *
+ * => map must be at least read-locked.
+ */
+
+void
+uvm_fault_unwire_locked(map, start, end)
+       vm_map_t map;
+       vaddr_t start, end;
+{
        pmap_t pmap = vm_map_pmap(map);
        vaddr_t va;
        paddr_t pa;
diff -r 563407cfbc08 -r 31846b73a717 sys/uvm/uvm_fault.h
--- a/sys/uvm/uvm_fault.h       Wed Jun 16 21:18:42 1999 +0000
+++ b/sys/uvm/uvm_fault.h       Wed Jun 16 22:11:23 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_fault.h,v 1.11 1999/06/04 23:38:41 thorpej Exp $   */
+/*     $NetBSD: uvm_fault.h,v 1.12 1999/06/16 22:11:23 thorpej Exp $   */
 
 /*
  *
@@ -81,5 +81,6 @@
 
 int uvm_fault_wire __P((vm_map_t, vaddr_t, vaddr_t, vm_prot_t));
 void uvm_fault_unwire __P((vm_map_t, vaddr_t, vaddr_t));
+void uvm_fault_unwire_locked __P((vm_map_t, vaddr_t, vaddr_t));
 
 #endif /* _UVM_UVM_FAULT_H_ */
diff -r 563407cfbc08 -r 31846b73a717 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Wed Jun 16 21:18:42 1999 +0000
+++ b/sys/uvm/uvm_map.c Wed Jun 16 22:11:23 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.56 1999/06/16 19:34:24 thorpej Exp $     */
+/*     $NetBSD: uvm_map.c,v 1.57 1999/06/16 22:11:23 thorpej Exp $     */
 
 /* 
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -298,8 +298,8 @@
        vm_map_entry_t entry;
 {
 
-       uvm_fault_unwire(map, entry->start, entry->end);
        entry->wired_count = 0;
+       uvm_fault_unwire_locked(map, entry->start, entry->end);
 }
 
 /*
@@ -2049,10 +2049,6 @@
                 * POSIX 1003.1b - a single munlock call unlocks a region,
                 * regardless of the number of mlock calls made on that
                 * region.
-                *
-                * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
-                * does not lock the map, so we don't have to do anything
-                * special regarding locking here.
                 */
                entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
@@ -2235,10 +2231,6 @@
                /*
                 * POSIX 1003.1b -- munlockall unlocks all regions,
                 * regardless of how many times mlockall has been called.
-                *
-                * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
-                * does not lock the map, so we don't have to do anything
-                * special regarding locking here.
                 */
                for (entry = map->header.next; entry != &map->header;
                     entry = entry->next) {



Home | Main Index | Thread Index | Old Index