Subject: Re: lock bug in getnewvnode, or uvm_km_kmemalloc/uvm_map ?
To: Chuck Silvers <chuq@chuq.com>
From: Manuel Bouyer <bouyer@antioche.eu.org>
List: tech-kern
Date: 11/29/2002 21:56:37
--IJpNTDwzlM2Ie8A6
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

On Fri, Nov 29, 2002 at 08:53:36AM -0800, Chuck Silvers wrote:
> > but the code may be less efficient; I guess the compiler will have to do a
> > | operation, when it would be expanded to a constant otherwise.
> 
> in this case, I'll argue that code clarity is worth a few extra instructions,
> since the runtime difference won't be detectable.  if you really care about
> this, you might want to think about enhancing gcc to detect this pattern and
> do some smarter constant folding.  maybe it does it already, I haven't looked
> at the generated code.

Neither did I. OK, I'll go this route, using 'const' as Jason suggested.

> 
> you're still reversing the direction in the third case below.

Ops, thanks again !
And this was what caused my opera to crash !

I think this time I got the directions right.

-- 
Manuel Bouyer <bouyer@antioche.eu.org>
     NetBSD: 23 ans d'experience feront toujours la difference
--

--IJpNTDwzlM2Ie8A6
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename=diff

Index: uvm_amap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_amap.c,v
retrieving revision 1.47
diff -u -r1.47 uvm_amap.c
--- uvm_amap.c	2002/11/15 17:30:35	1.47
+++ uvm_amap.c	2002/11/29 20:56:03
@@ -284,10 +284,10 @@
  *    one (thus it can't be shared)
  */
 int
-amap_extend(entry, addsize, forwards)
+amap_extend(entry, addsize, flags)
 	struct vm_map_entry *entry;
 	vsize_t addsize;
-	int forwards;
+	int flags;
 {
 	struct vm_amap *amap = entry->aref.ar_amap;
 	int slotoff = entry->aref.ar_pageoff;
@@ -298,10 +298,13 @@
 #endif
 	int i, *newsl, *newbck, *oldsl, *oldbck;
 	struct vm_anon **newover, **oldover;
+	int mflag = (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT :
+		        (M_WAITOK | M_CANFAIL);
+
 	UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
 
-	UVMHIST_LOG(maphist, "  (entry=0x%x, addsize=0x%x, forwards=%d)",
-	    entry, addsize, forwards, 0);
+	UVMHIST_LOG(maphist, "  (entry=0x%x, addsize=0x%x, flags=0x%x)",
+	    entry, addsize, flags, 0);
 
 	/*
 	 * first, determine how many slots we need in the amap.  don't
@@ -312,7 +315,7 @@
 	amap_lock(amap);
 	AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
 	AMAP_B2SLOT(slotadd, addsize);			/* slots to add */
-	if (forwards) {
+	if (flags & AMAP_EXTEND_FORWARDS) {
 		slotneed = slotoff + slotmapped + slotadd;
 		slotadj = 0;
 		slotspace = 0;
@@ -329,7 +332,7 @@
 	 * adding.
 	 */
 
-	if (forwards) {
+	if (flags & AMAP_EXTEND_FORWARDS) {
 		if (amap->am_nslot >= slotneed) {
 #ifdef UVM_AMAP_PPREF
 			if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -366,7 +369,7 @@
 	 */
 
 	if (amap->am_maxslot >= slotneed) {
-		if (forwards) {
+		if (flags & AMAP_EXTEND_FORWARDS) {
 #ifdef UVM_AMAP_PPREF
 			if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
 				if ((slotoff + slotmapped) < amap->am_nslot)
@@ -461,15 +464,12 @@
 #ifdef UVM_AMAP_PPREF
 	newppref = NULL;
 	if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
-		newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
-		    M_WAITOK | M_CANFAIL);
+		newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
 #endif
-	newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
-	    M_WAITOK | M_CANFAIL);
-	newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
-	    M_WAITOK | M_CANFAIL);
+	newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
+	newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
 	newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
-	    M_WAITOK | M_CANFAIL);
+		    mflag);
 	if (newsl == NULL || newbck == NULL || newover == NULL) {
 #ifdef UVM_AMAP_PPREF
 		if (newppref != NULL) {
@@ -495,12 +495,12 @@
 	 */
 
 	slotadded = slotalloc - amap->am_nslot;
-	if (!forwards)
+	if (!(flags & AMAP_EXTEND_FORWARDS))
 		slotspace = slotalloc - slotmapped;
 
 	/* do am_slots */
 	oldsl = amap->am_slots;
-	if (forwards)
+	if (flags & AMAP_EXTEND_FORWARDS)
 		memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
 	else
 		for (i = 0; i < amap->am_nused; i++)
@@ -509,7 +509,7 @@
 
 	/* do am_anon */
 	oldover = amap->am_anon;
-	if (forwards) {
+	if (flags & AMAP_EXTEND_FORWARDS) {
 		memcpy(newover, oldover,
 		    sizeof(struct vm_anon *) * amap->am_nslot);
 		memset(newover + amap->am_nslot, 0,
@@ -524,7 +524,7 @@
 
 	/* do am_bckptr */
 	oldbck = amap->am_bckptr;
-	if (forwards)
+	if (flags & AMAP_EXTEND_FORWARDS)
 		memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
 	else
 		memcpy(newbck + slotspace, oldbck + slotoff,
@@ -535,7 +535,7 @@
 	/* do ppref */
 	oldppref = amap->am_ppref;
 	if (newppref) {
-		if (forwards) {
+		if (flags & AMAP_EXTEND_FORWARDS) {
 			memcpy(newppref, oldppref,
 			    sizeof(int) * amap->am_nslot);
 			memset(newppref + amap->am_nslot, 0,
@@ -545,10 +545,11 @@
 			    sizeof(int) * slotmapped);
 		}
 		amap->am_ppref = newppref;
-		if (forwards && (slotoff + slotmapped) < amap->am_nslot)
+		if ((flags & AMAP_EXTEND_FORWARDS) &&
+		    (slotoff + slotmapped) < amap->am_nslot)
 			amap_pp_adjref(amap, slotoff + slotmapped,
 			    (amap->am_nslot - (slotoff + slotmapped)), 1);
-		if (forwards)
+		if (flags & AMAP_EXTEND_FORWARDS)
 			pp_setreflen(newppref, amap->am_nslot, 1,
 			    slotneed - amap->am_nslot);
 		else {
@@ -564,7 +565,7 @@
 #endif
 
 	/* update master values */
-	if (forwards)
+	if (flags & AMAP_EXTEND_FORWARDS)
 		amap->am_nslot = slotneed;
 	else {
 		entry->aref.ar_pageoff = slotspace - slotadd;
Index: uvm_amap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_amap.h,v
retrieving revision 1.19
diff -u -r1.19 uvm_amap.h
--- uvm_amap.h	2002/11/14 17:58:48	1.19
+++ uvm_amap.h	2002/11/29 20:56:03
@@ -137,10 +137,11 @@
 #define AMAP_REFALL	0x2	/* amap_ref: reference entire amap */
 
 /*
- * amap_extend directions
+ * amap_extend flags
  */
-#define AMAP_EXTEND_BACKWARDS	0	/* add "size" to start of map */
-#define AMAP_EXTEND_FORWARDS	1	/* add "size" to end of map */
+#define AMAP_EXTEND_BACKWARDS	0x00	/* add "size" to start of map */
+#define AMAP_EXTEND_FORWARDS	0x01	/* add "size" to end of map */
+#define AMAP_EXTEND_NOWAIT	0x02	/* not allowed to sleep */
 
 #endif /* _KERNEL */
 
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_km.c,v
retrieving revision 1.59
diff -u -r1.59 uvm_km.c
--- uvm_km.c	2002/10/05 17:26:06	1.59
+++ uvm_km.c	2002/11/29 20:56:05
@@ -394,7 +394,8 @@
 
 	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
 	      0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
-			  UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
+			  UVM_ADV_RANDOM,
+			  (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))))
 			!= 0)) {
 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
 		return(0);
@@ -745,7 +746,8 @@
 	 */
 
 	s = splvm();
-	va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
+	va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
+	    waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
 	splx(s);
 	return (va);
 #endif /* PMAP_MAP_POOLPAGE */
Index: uvm_map.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_map.c,v
retrieving revision 1.125
diff -u -r1.125 uvm_map.c
--- uvm_map.c	2002/11/14 17:58:48	1.125
+++ uvm_map.c	2002/11/29 20:56:11
@@ -190,7 +190,7 @@
  * local prototypes
  */
 
-static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *));
+static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *, int));
 static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *));
 static void uvm_mapent_free __P((struct vm_map_entry *));
 static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *));
@@ -206,11 +206,13 @@
  */
 
 static __inline struct vm_map_entry *
-uvm_mapent_alloc(map)
+uvm_mapent_alloc(map, flags)
 	struct vm_map *map;
+	int flags;
 {
 	struct vm_map_entry *me;
 	int s;
+	int pflags = (flags & UVM_KMF_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
 	UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
 
 	if (map->flags & VM_MAP_INTRSAFE || cold) {
@@ -220,17 +222,21 @@
 		if (me) uvm.kentry_free = me->next;
 		simple_unlock(&uvm.kentry_lock);
 		splx(s);
-		if (me == NULL) {
+		if (__predict_false(me == NULL)) {
 			panic("uvm_mapent_alloc: out of static map entries, "
 			      "check MAX_KMAPENT (currently %d)",
 			      MAX_KMAPENT);
 		}
 		me->flags = UVM_MAP_STATIC;
 	} else if (map == kernel_map) {
-		me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK);
+		me = pool_get(&uvm_map_entry_kmem_pool, pflags);
+		if (__predict_false(me == NULL))
+			return NULL;
 		me->flags = UVM_MAP_KMEM;
 	} else {
-		me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
+		me = pool_get(&uvm_map_entry_pool, pflags);
+		if (__predict_false(me == NULL))
+			return NULL;
 		me->flags = 0;
 	}
 
@@ -421,7 +427,7 @@
 	 * starting address.
 	 */
 
-	new_entry = uvm_mapent_alloc(map);
+	new_entry = uvm_mapent_alloc(map, 0);
 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
 
 	new_entry->end = start;
@@ -471,7 +477,7 @@
 	 *	AFTER the specified entry
 	 */
 
-	new_entry = uvm_mapent_alloc(map);
+	new_entry = uvm_mapent_alloc(map, 0);
 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
 
 	new_entry->start = entry->end = end;
@@ -537,6 +543,8 @@
 	uvm_flag_t flags;
 {
 	struct vm_map_entry *prev_entry, *new_entry;
+	const int amapwaitflag = (flags & UVM_KMF_NOWAIT) ?
+	    AMAP_EXTEND_NOWAIT : 0;
 	vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
 	    UVM_MAXPROTECTION(flags);
 	vm_inherit_t inherit = UVM_INHERIT(flags);
@@ -572,7 +580,9 @@
 
 	new_entry = NULL;
 	if (map == pager_map) {
-		new_entry = uvm_mapent_alloc(map);
+		new_entry = uvm_mapent_alloc(map, (flags & UVM_KMF_NOWAIT));
+		 if (__predict_false(new_entry == NULL))
+			return ENOMEM;
 	}
 
 	/*
@@ -680,8 +690,8 @@
 		}
 
 		if (prev_entry->aref.ar_amap) {
-			error = amap_extend(prev_entry, size,
-			    AMAP_EXTEND_FORWARDS);
+			error = amap_extend(prev_entry, size, 
+			    amapwaitflag | AMAP_EXTEND_FORWARDS);
 			if (error) {
 				vm_map_unlock(map);
 				if (new_entry) {
@@ -779,7 +789,7 @@
 				if (amap_extend(prev_entry,
 				    prev_entry->next->end -
 				    prev_entry->next->start,
-				    AMAP_EXTEND_FORWARDS))
+				    amapwaitflag | AMAP_EXTEND_FORWARDS))
 				goto nomerge;
 			}
 
@@ -797,7 +807,7 @@
 				if (amap_extend(prev_entry->next,
 				    prev_entry->end -
 				    prev_entry->start + size,
-				    AMAP_EXTEND_BACKWARDS))
+				    amapwaitflag | AMAP_EXTEND_BACKWARDS))
 				goto nomerge;
 			}
 		} else {
@@ -807,7 +817,7 @@
 			 */
 			if (prev_entry->next->aref.ar_amap) {
 				error = amap_extend(prev_entry->next, size,
-				    AMAP_EXTEND_BACKWARDS);
+				    amapwaitflag | AMAP_EXTEND_BACKWARDS);
 				if (error) {
 					vm_map_unlock(map);
 					if (new_entry) {
@@ -879,7 +889,12 @@
 		 */
 
 		if (new_entry == NULL) {
-			new_entry = uvm_mapent_alloc(map);
+			new_entry = uvm_mapent_alloc(map,
+				(flags & UVM_KMF_NOWAIT));
+			if (__predict_false(new_entry == NULL)) {
+				vm_map_unlock(map);
+				return ENOMEM;
+			}
 		}
 		new_entry->start = *startp;
 		new_entry->end = new_entry->start + size;
@@ -911,7 +926,13 @@
 
 			vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
 				UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
-			struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
+			struct vm_amap *amap = amap_alloc(size, to_add,
+			    (flags & UVM_KMF_NOWAIT) ? M_NOWAIT : M_WAITOK);
+			if (__predict_false(amap == NULL)) {
+				vm_map_unlock(map);
+				uvm_mapent_free(new_entry);
+				return ENOMEM;
+			}
 			new_entry->aref.ar_pageoff = 0;
 			new_entry->aref.ar_amap = amap;
 		} else {
@@ -1697,7 +1718,7 @@
 		oldoffset = (entry->start + fudge) - start;
 
 		/* allocate a new map entry */
-		newentry = uvm_mapent_alloc(dstmap);
+		newentry = uvm_mapent_alloc(dstmap, 0);
 		if (newentry == NULL) {
 			error = ENOMEM;
 			goto bad;
@@ -3178,7 +3199,7 @@
 				/* XXXCDC: WAITOK??? */
 			}
 
-			new_entry = uvm_mapent_alloc(new_map);
+			new_entry = uvm_mapent_alloc(new_map, 0);
 			/* old_entry -> new_entry */
 			uvm_mapent_copy(old_entry, new_entry);
 
@@ -3215,7 +3236,7 @@
 			 * (note that new references are read-only).
 			 */
 
-			new_entry = uvm_mapent_alloc(new_map);
+			new_entry = uvm_mapent_alloc(new_map, 0);
 			/* old_entry -> new_entry */
 			uvm_mapent_copy(old_entry, new_entry);
 

--IJpNTDwzlM2Ie8A6--