Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm make MAP_FIXED mapping operations atomic. fixes PR 5...



details:   https://anonhg.NetBSD.org/src/rev/3ea1025da999
branches:  trunk
changeset: 824038:3ea1025da999
user:      chs <chs%NetBSD.org@localhost>
date:      Fri May 19 15:30:19 2017 +0000

description:
make MAP_FIXED mapping operations atomic. fixes PR 52239.
previously, unmapping any entries being replaced was done separately
from entering the new mapping, which allowed another thread doing
a non-MAP_FIXED mapping to allocate the range out from under the
MAP_FIXED thread.

diffstat:

 sys/uvm/uvm_map.c  |  54 ++++++++++++++++++++++++++++++++++++++++++++++++------
 sys/uvm/uvm_mmap.c |   7 +++----
 2 files changed, 51 insertions(+), 10 deletions(-)

diffs (148 lines):

diff -r e08114a0fdb0 -r 3ea1025da999 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Fri May 19 15:16:12 2017 +0000
+++ b/sys/uvm/uvm_map.c Fri May 19 15:30:19 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.346 2017/05/19 14:42:00 christos Exp $   */
+/*     $NetBSD: uvm_map.c,v 1.347 2017/05/19 15:30:19 chs Exp $        */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.346 2017/05/19 14:42:00 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.347 2017/05/19 15:30:19 chs Exp $");
 
 #include "opt_ddb.h"
 #include "opt_pax.h"
@@ -1163,8 +1163,25 @@
                }
                vm_map_lock(map); /* could sleep here */
        }
-       prev_entry = uvm_map_findspace(map, start, size, &start,
-           uobj, uoffset, align, flags);
+       if (flags & UVM_FLAG_FIXED) {
+               KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
+
+               /*
+                * Set prev_entry to what it will need to be after any existing
+                * entries are removed later in uvm_map_enter().
+                */
+
+               if (uvm_map_lookup_entry(map, start, &prev_entry)) {
+                       if (start == prev_entry->start)
+                               prev_entry = prev_entry->prev;
+                       else
+                               UVM_MAP_CLIP_END(map, prev_entry, start);
+                       SAVE_HINT(map, map->hint, prev_entry);
+               }
+       } else {
+               prev_entry = uvm_map_findspace(map, start, size, &start,
+                   uobj, uoffset, align, flags);
+       }
        if (prev_entry == NULL) {
                unsigned int timestamp;
 
@@ -1255,7 +1272,7 @@
     struct vm_map_entry *new_entry)
 {
        struct vm_map_entry *prev_entry = args->uma_prev;
-       struct vm_map_entry *dead = NULL;
+       struct vm_map_entry *dead = NULL, *dead_entries = NULL;
 
        const uvm_flag_t flags = args->uma_flags;
        const vm_prot_t prot = UVM_PROTECTION(flags);
@@ -1284,6 +1301,8 @@
 
        KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
        KASSERT(vm_map_locked_p(map));
+       KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_FIXED)) !=
+               (UVM_FLAG_NOWAIT | UVM_FLAG_FIXED));
 
        if (uobj)
                newetype = UVM_ET_OBJ;
@@ -1297,6 +1316,27 @@
        }
 
        /*
+        * For fixed mappings, remove any old entries now.  Adding the new
+        * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
+        * is set, and we do not support nowait and fixed together.
+        */
+
+       if (flags & UVM_FLAG_FIXED) {
+               uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
+#ifdef DEBUG
+               struct vm_map_entry *tmp_entry;
+               bool rv;
+
+               rv = uvm_map_lookup_entry(map, start, &tmp_entry);
+               KASSERT(!rv);
+               KASSERTMSG(prev_entry == tmp_entry,
+                          "args %p prev_entry %p tmp_entry %p",
+                          args, prev_entry, tmp_entry);
+#endif
+               SAVE_HINT(map, map->hint, prev_entry);
+       }
+
+       /*
         * try and insert in map by extending previous entry, if possible.
         * XXX: we don't try and pull back the next entry.   might be useful
         * for a stack, but we are currently allocating our stack in advance.
@@ -1569,17 +1609,19 @@
        UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
 
        error = 0;
+
 done:
        vm_map_unlock(map);
 
        if (new_entry) {
                uvm_mapent_free(new_entry);
        }
-
        if (dead) {
                KDASSERT(merged);
                uvm_mapent_free(dead);
        }
+       if (dead_entries)
+               uvm_unmap_detach(dead_entries, 0);
 
        return error;
 }
diff -r e08114a0fdb0 -r 3ea1025da999 sys/uvm/uvm_mmap.c
--- a/sys/uvm/uvm_mmap.c        Fri May 19 15:16:12 2017 +0000
+++ b/sys/uvm/uvm_mmap.c        Fri May 19 15:30:19 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_mmap.c,v 1.164 2017/05/06 21:34:52 joerg Exp $     */
+/*     $NetBSD: uvm_mmap.c,v 1.165 2017/05/19 15:30:19 chs Exp $       */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -46,7 +46,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.164 2017/05/06 21:34:52 joerg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.165 2017/05/19 15:30:19 chs Exp $");
 
 #include "opt_compat_netbsd.h"
 #include "opt_pax.h"
@@ -924,7 +924,7 @@
 
        /*
         * for non-fixed mappings, round off the suggested address.
-        * for fixed mappings, check alignment and zap old mappings.
+        * for fixed mappings, check alignment.
         */
 
        if ((flags & MAP_FIXED) == 0) {
@@ -933,7 +933,6 @@
                if (*addr & PAGE_MASK)
                        return EINVAL;
                uvmflag |= UVM_FLAG_FIXED;
-               (void) uvm_unmap(map, *addr, *addr + size);
        }
 
        /*



Home | Main Index | Thread Index | Old Index