Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern simpler fix for the race between shmat() and shmdt():



details:   https://anonhg.NetBSD.org/src/rev/d96a1770ffa5
branches:  trunk
changeset: 455159:d96a1770ffa5
user:      chs <chs%NetBSD.org@localhost>
date:      Wed Oct 09 17:47:13 2019 +0000

description:
simpler fix for the race between shmat() and shmdt():
change shmat() to hold shm_lock until it is completely done.

diffstat:

 sys/kern/sysv_shm.c |  21 +++------------------
 1 files changed, 3 insertions(+), 18 deletions(-)

diffs (73 lines):

diff -r 1e8e6009edaf -r d96a1770ffa5 sys/kern/sysv_shm.c
--- a/sys/kern/sysv_shm.c       Wed Oct 09 17:44:45 2019 +0000
+++ b/sys/kern/sysv_shm.c       Wed Oct 09 17:47:13 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: sysv_shm.c,v 1.140 2019/10/09 17:44:45 chs Exp $       */
+/*     $NetBSD: sysv_shm.c,v 1.141 2019/10/09 17:47:13 chs Exp $       */
 
 /*-
  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.140 2019/10/09 17:44:45 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.141 2019/10/09 17:47:13 chs Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_sysv.h"
@@ -437,42 +437,30 @@
 
        /*
         * Create a map entry, add it to the list and increase the counters.
-        * The lock will be dropped before the mapping, disable reallocation.
         */
        shmmap_s = shmmap_getprivate(p);
        SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
        shmmap_s->nitems++;
        shmseg->shm_lpid = p->p_pid;
        shmseg->shm_nattch++;
-       shm_realloc_disable++;
 
        /*
-        * Add a reference to the uvm object while we hold the
-        * shm_lock.
+        * Map the segment into the address space.
         */
        uobj = shmseg->_shm_internal;
        uao_reference(uobj);
-       mutex_exit(&shm_lock);
-
-       /*
-        * Drop the shm_lock to map it into the address space, and lock
-        * the memory, if needed (XXX where does this lock memory?).
-        */
        error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
            UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
        if (error)
                goto err_detach;
 
        /* Set the new address, and update the time */
-       mutex_enter(&shm_lock);
        shmmap_se->va = attach_va;
        shmseg->shm_atime = time_second;
-       shm_realloc_disable--;
        retval[0] = attach_va;
        SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
            p->p_vmspace, shmmap_se->shmid, attach_va));
 err:
-       cv_broadcast(&shm_realloc_cv);
        mutex_exit(&shm_lock);
        if (error && shmmap_se) {
                kmem_free(shmmap_se, sizeof(struct shmmap_entry));
@@ -481,10 +469,7 @@
 
 err_detach:
        uao_detach(uobj);
-       mutex_enter(&shm_lock);
        uobj = shm_delete_mapping(shmmap_s, shmmap_se);
-       shm_realloc_disable--;
-       cv_broadcast(&shm_realloc_cv);
        mutex_exit(&shm_lock);
        if (uobj != NULL) {
                uao_detach(uobj);



Home | Main Index | Thread Index | Old Index