Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm Split out the minherit code into separate functions ...



details:   https://anonhg.NetBSD.org/src/rev/cdc924ac7374
branches:  trunk
changeset: 330691:cdc924ac7374
user:      christos <christos%NetBSD.org@localhost>
date:      Fri Jul 18 12:19:09 2014 +0000

description:
Split out the minherit code into separate functions for readability (allows
us to indent them properly), and merge the new vm_map_entry creation into
a common function to avoid code duplication. No functional change.

diffstat:

 sys/uvm/uvm_map.c |  336 +++++++++++++++++++++++++++--------------------------
 1 files changed, 171 insertions(+), 165 deletions(-)

diffs (truncated from 382 to 300 lines):

diff -r 64c9757cf30a -r cdc924ac7374 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Fri Jul 18 11:14:18 2014 +0000
+++ b/sys/uvm/uvm_map.c Fri Jul 18 12:19:09 2014 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.328 2014/03/05 05:35:55 matt Exp $       */
+/*     $NetBSD: uvm_map.c,v 1.329 2014/07/18 12:19:09 christos Exp $   */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.328 2014/03/05 05:35:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.329 2014/07/18 12:19:09 christos Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -4183,6 +4183,168 @@
        pool_cache_put(&uvm_vmspace_cache, vm);
 }
 
+static struct vm_map_entry *
+uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
+    int flags)
+{
+       struct vm_map_entry *new_entry;
+
+       new_entry = uvm_mapent_alloc(new_map, 0);
+       /* old_entry -> new_entry */
+       uvm_mapent_copy(old_entry, new_entry);
+
+       /* new pmap has nothing wired in it */
+       new_entry->wired_count = 0;
+
+       /*
+        * gain reference to object backing the map (can't
+        * be a submap, already checked this case).
+        */
+
+       if (new_entry->aref.ar_amap)
+               uvm_map_reference_amap(new_entry, flags);
+
+       if (new_entry->object.uvm_obj &&
+           new_entry->object.uvm_obj->pgops->pgo_reference)
+               new_entry->object.uvm_obj->pgops->pgo_reference(
+                       new_entry->object.uvm_obj);
+
+       /* insert entry at end of new_map's entry list */
+       uvm_map_entry_link(new_map, new_map->header.prev,
+           new_entry);
+
+       return new_entry;
+}
+
+/*
+ * share the mapping: this means we want the old and
+ * new entries to share amaps and backing objects.
+ */
+static void
+uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
+    struct vm_map_entry *old_entry)
+{
+       /*
+        * if the old_entry needs a new amap (due to prev fork)
+        * then we need to allocate it now so that we have
+        * something we own to share with the new_entry.   [in
+        * other words, we need to clear needs_copy]
+        */
+
+       if (UVM_ET_ISNEEDSCOPY(old_entry)) {
+               /* get our own amap, clears needs_copy */
+               amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
+                   0, 0);
+               /* XXXCDC: WAITOK??? */
+       }
+
+       uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
+}
+
+
+static void
+uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
+    struct vm_map_entry *old_entry)
+{
+       struct vm_map_entry *new_entry;
+
+       /*
+        * copy-on-write the mapping (using mmap's
+        * MAP_PRIVATE semantics)
+        *
+        * allocate new_entry, adjust reference counts.
+        * (note that new references are read-only).
+        */
+
+       new_entry = uvm_mapent_clone(new_map, old_entry, 0);
+
+       new_entry->etype |=
+           (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
+
+       /*
+        * the new entry will need an amap.  it will either
+        * need to be copied from the old entry or created
+        * from scratch (if the old entry does not have an
+        * amap).  can we defer this process until later
+        * (by setting "needs_copy") or do we need to copy
+        * the amap now?
+        *
+        * we must copy the amap now if any of the following
+        * conditions hold:
+        * 1. the old entry has an amap and that amap is
+        *    being shared.  this means that the old (parent)
+        *    process is sharing the amap with another
+        *    process.  if we do not clear needs_copy here
+        *    we will end up in a situation where both the
+        *    parent and child process are refering to the
+        *    same amap with "needs_copy" set.  if the
+        *    parent write-faults, the fault routine will
+        *    clear "needs_copy" in the parent by allocating
+        *    a new amap.   this is wrong because the
+        *    parent is supposed to be sharing the old amap
+        *    and the new amap will break that.
+        *
+        * 2. if the old entry has an amap and a non-zero
+        *    wire count then we are going to have to call
+        *    amap_cow_now to avoid page faults in the
+        *    parent process.   since amap_cow_now requires
+        *    "needs_copy" to be clear we might as well
+        *    clear it here as well.
+        *
+        */
+
+       if (old_entry->aref.ar_amap != NULL) {
+               if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
+                   VM_MAPENT_ISWIRED(old_entry)) {
+
+                       amap_copy(new_map, new_entry,
+                           AMAP_COPY_NOCHUNK, 0, 0);
+                       /* XXXCDC: M_WAITOK ... ok? */
+               }
+       }
+
+       /*
+        * if the parent's entry is wired down, then the
+        * parent process does not want page faults on
+        * access to that memory.  this means that we
+        * cannot do copy-on-write because we can't write
+        * protect the old entry.   in this case we
+        * resolve all copy-on-write faults now, using
+        * amap_cow_now.   note that we have already
+        * allocated any needed amap (above).
+        */
+
+       if (VM_MAPENT_ISWIRED(old_entry)) {
+
+               /*
+                * resolve all copy-on-write faults now
+                * (note that there is nothing to do if
+                * the old mapping does not have an amap).
+                */
+               if (old_entry->aref.ar_amap)
+                       amap_cow_now(new_map, new_entry);
+
+       } else {
+               /*
+                * setup mappings to trigger copy-on-write faults
+                * we must write-protect the parent if it has
+                * an amap and it is not already "needs_copy"...
+                * if it is already "needs_copy" then the parent
+                * has already been write-protected by a previous
+                * fork operation.
+                */
+               if (old_entry->aref.ar_amap &&
+                   !UVM_ET_ISNEEDSCOPY(old_entry)) {
+                       if (old_entry->max_protection & VM_PROT_WRITE) {
+                               pmap_protect(old_map->pmap,
+                                   old_entry->start, old_entry->end,
+                                   old_entry->protection & ~VM_PROT_WRITE);
+                       }
+                       old_entry->etype |= UVM_ET_NEEDSCOPY;
+               }
+       }
+}
+
 /*
  *   F O R K   -   m a i n   e n t r y   p o i n t
  */
@@ -4200,7 +4362,6 @@
        struct vm_map *old_map = &vm1->vm_map;
        struct vm_map *new_map;
        struct vm_map_entry *old_entry;
-       struct vm_map_entry *new_entry;
        UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
 
        vm_map_lock(old_map);
@@ -4230,7 +4391,6 @@
 
                switch (old_entry->inheritance) {
                case MAP_INHERIT_NONE:
-
                        /*
                         * drop the mapping, modify size
                         */
@@ -4238,171 +4398,17 @@
                        break;
 
                case MAP_INHERIT_SHARE:
-
-                       /*
-                        * share the mapping: this means we want the old and
-                        * new entries to share amaps and backing objects.
-                        */
-                       /*
-                        * if the old_entry needs a new amap (due to prev fork)
-                        * then we need to allocate it now so that we have
-                        * something we own to share with the new_entry.   [in
-                        * other words, we need to clear needs_copy]
-                        */
-
-                       if (UVM_ET_ISNEEDSCOPY(old_entry)) {
-                               /* get our own amap, clears needs_copy */
-                               amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
-                                   0, 0);
-                               /* XXXCDC: WAITOK??? */
-                       }
-
-                       new_entry = uvm_mapent_alloc(new_map, 0);
-                       /* old_entry -> new_entry */
-                       uvm_mapent_copy(old_entry, new_entry);
-
-                       /* new pmap has nothing wired in it */
-                       new_entry->wired_count = 0;
-
-                       /*
-                        * gain reference to object backing the map (can't
-                        * be a submap, already checked this case).
-                        */
-
-                       if (new_entry->aref.ar_amap)
-                               uvm_map_reference_amap(new_entry, AMAP_SHARED);
-
-                       if (new_entry->object.uvm_obj &&
-                           new_entry->object.uvm_obj->pgops->pgo_reference)
-                               new_entry->object.uvm_obj->
-                                   pgops->pgo_reference(
-                                       new_entry->object.uvm_obj);
-
-                       /* insert entry at end of new_map's entry list */
-                       uvm_map_entry_link(new_map, new_map->header.prev,
-                           new_entry);
-
+                       uvm_mapent_forkshared(new_map, old_map, old_entry);
                        break;
 
                case MAP_INHERIT_COPY:
-
-                       /*
-                        * copy-on-write the mapping (using mmap's
-                        * MAP_PRIVATE semantics)
-                        *
-                        * allocate new_entry, adjust reference counts.
-                        * (note that new references are read-only).
-                        */
-
-                       new_entry = uvm_mapent_alloc(new_map, 0);
-                       /* old_entry -> new_entry */
-                       uvm_mapent_copy(old_entry, new_entry);
-
-                       if (new_entry->aref.ar_amap)
-                               uvm_map_reference_amap(new_entry, 0);
-
-                       if (new_entry->object.uvm_obj &&
-                           new_entry->object.uvm_obj->pgops->pgo_reference)
-                               new_entry->object.uvm_obj->pgops->pgo_reference
-                                   (new_entry->object.uvm_obj);
-
-                       /* new pmap has nothing wired in it */
-                       new_entry->wired_count = 0;
-
-                       new_entry->etype |=
-                           (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
-                       uvm_map_entry_link(new_map, new_map->header.prev,
-                           new_entry);
-
-                       /*
-                        * the new entry will need an amap.  it will either
-                        * need to be copied from the old entry or created
-                        * from scratch (if the old entry does not have an
-                        * amap).  can we defer this process until later
-                        * (by setting "needs_copy") or do we need to copy
-                        * the amap now?
-                        *
-                        * we must copy the amap now if any of the following
-                        * conditions hold:
-                        * 1. the old entry has an amap and that amap is
-                        *    being shared.  this means that the old (parent)
-                        *    process is sharing the amap with another
-                        *    process.  if we do not clear needs_copy here
-                        *    we will end up in a situation where both the
-                        *    parent and child process are refering to the



Home | Main Index | Thread Index | Old Index