Subject: rbtree for vm_map
To: None <tech-kern@netbsd.org>
From: YAMAMOTO Takashi <yamt@mwd.biglobe.ne.jp>
List: tech-kern
Date: 08/23/2003 16:42:22
--NextPart-20030823163036-0093400
Content-Type: Text/Plain; charset=us-ascii

hi,

i'd like to bring in uvm_map rbtree stuffs from OpenBSD.
(patch attached)

any objection?

YAMAMOTO Takashi


--NextPart-20030823163036-0093400
Content-Type: Text/Plain; charset=us-ascii
Content-Disposition: attachment; filename="uvm_map.rb.diff"

Index: uvm_map.c
===================================================================
--- uvm_map.c	(revision 287)
+++ uvm_map.c	(working copy)
@@ -1,4 +1,5 @@
 /*	$NetBSD: uvm_map.c,v 1.136 2003/04/09 21:39:29 thorpej Exp $	*/
+/*	$OpenBSD: uvm_map.c,v 1.60 2003/06/29 17:31:12 avsm Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -85,6 +86,8 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 
 #include <sys/pool.h>
 #include <sys/kernel.h>
 #include <sys/mount.h>
+/* XXX uvm_map.h is included by vnode.h */
+#define	RB_AUGMENT(x)	uvm_rb_augment(x)
 #include <sys/vnode.h>
 
 #ifdef SYSVSHM
@@ -142,11 +145,13 @@ vaddr_t uvm_maxkaddr;
  * => map must be locked
  */
 #define uvm_map_entry_link(map, after_where, entry) do { \
+	KASSERT(entry->start < entry->end); \
 	(map)->nentries++; \
 	(entry)->prev = (after_where); \
 	(entry)->next = (after_where)->next; \
 	(entry)->prev->next = (entry); \
 	(entry)->next->prev = (entry); \
+	uvm_rb_insert((map), (entry)); \
 } while (/*CONSTCOND*/ 0)
 
 /*
@@ -158,6 +163,7 @@ vaddr_t uvm_maxkaddr;
 	(map)->nentries--; \
 	(entry)->next->prev = (entry)->prev; \
 	(entry)->prev->next = (entry)->next; \
+	uvm_rb_remove((map), (entry)); \
 } while (/*CONSTCOND*/ 0)
 
 /*
@@ -198,6 +204,177 @@ static void uvm_map_entry_unwire __P((st
 static void uvm_map_reference_amap __P((struct vm_map_entry *, int));
 static void uvm_map_unreference_amap __P((struct vm_map_entry *, int));
 
+int uvm_map_spacefits(struct vm_map *, vaddr_t *, vsize_t,
+    struct vm_map_entry *, voff_t, vsize_t);
+
+#ifdef DEBUG
+int _uvm_tree_sanity(struct vm_map *, const char *);
+#endif /* DEBUG */
+static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
+
+static __inline int
+uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
+{
+	if (a->start < b->start)
+		return (-1);
+	else if (a->start > b->start)
+		return (1);
+	
+	return (0);
+}
+
+
+static __inline void
+uvm_rb_augment(struct vm_map_entry *entry)
+{
+	entry->space = uvm_rb_subtree_space(entry);
+}
+
+RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
+
+RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
+
+static __inline vsize_t
+uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
+{
+	struct vm_map_entry * next;
+	vaddr_t space;
+
+	if ((next = entry->next) == &map->header)
+		space = map->max_offset - entry->end;
+	else {
+		KASSERT(next);
+		space = next->start - entry->end;
+	}
+	return (space);
+}
+		
+static vsize_t
+uvm_rb_subtree_space(const struct vm_map_entry *entry)
+{
+	vaddr_t space, tmp;
+
+	space = entry->ownspace;
+	if (RB_LEFT(entry, rb_entry)) {
+		tmp = RB_LEFT(entry, rb_entry)->space;
+		if (tmp > space)
+			space = tmp;
+	}
+
+	if (RB_RIGHT(entry, rb_entry)) {
+		tmp = RB_RIGHT(entry, rb_entry)->space;
+		if (tmp > space)
+			space = tmp;
+	}
+
+	return (space);
+}
+
+static __inline void
+uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
+{
+	/* We need to traverse to the very top */
+	do {
+		entry->ownspace = uvm_rb_space(map, entry);
+		entry->space = uvm_rb_subtree_space(entry);
+	} while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
+}
+
+static __inline void
+uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
+{
+	vaddr_t space = uvm_rb_space(map, entry);
+	struct vm_map_entry * tmp;
+
+	entry->ownspace = entry->space = space;
+	tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
+#ifdef DIAGNOSTIC
+	if (tmp != NULL)
+		panic("uvm_rb_insert: duplicate entry?");
+#endif
+	uvm_rb_fixup(map, entry);
+	if (entry->prev != &map->header)
+		uvm_rb_fixup(map, entry->prev);
+}
+
+static __inline void
+uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
+{
+	struct vm_map_entry * parent;
+
+	parent = RB_PARENT(entry, rb_entry);
+	RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
+	if (entry->prev != &map->header)
+		uvm_rb_fixup(map, entry->prev);
+	if (parent)
+		uvm_rb_fixup(map, parent);
+}
+
+#ifdef DEBUG
+#define uvm_tree_sanity(x,y) _uvm_tree_sanity(x,y)
+#else
+#define uvm_tree_sanity(x,y)
+#endif
+
+int
+_uvm_tree_sanity(struct vm_map *map, const char *name)
+{
+	struct vm_map_entry *tmp, *trtmp;
+	int n = 0, i = 1;
+
+	RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
+		if (tmp->ownspace != uvm_rb_space(map, tmp)) {
+			printf("%s: %d/%d ownspace %lx != %lx %s\n",
+			    name, n + 1, map->nentries,
+			    (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
+			    tmp->next == &map->header ? "(last)" : "");
+			goto error;
+		}
+	}
+	trtmp = NULL;
+	RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
+		if (tmp->space != uvm_rb_subtree_space(tmp)) {
+			printf("%s: space %lx != %lx\n",
+			    name, (ulong)tmp->space,
+			    (ulong)uvm_rb_subtree_space(tmp));
+			goto error;
+		}
+		if (trtmp != NULL && trtmp->start >= tmp->start) {
+			printf("%s: corrupt: 0x%lx >= 0x%lx\n",
+			    name, trtmp->start, tmp->start);
+			goto error;
+		}
+		n++;
+
+		trtmp = tmp;
+	}
+
+	if (n != map->nentries) {
+		printf("%s: nentries: %d vs %d\n",
+		    name, n, map->nentries);
+		goto error;
+	}
+
+	for (tmp = map->header.next; tmp && tmp != &map->header;
+	    tmp = tmp->next, i++) {
+		trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
+		if (trtmp != tmp) {
+			printf("%s: lookup: %d: %p - %p: %p\n",
+			    name, i, tmp, trtmp,
+			    RB_PARENT(tmp, rb_entry));
+			goto error;
+		}
+	}
+
+	return (0);
+ error:
+#ifdef	DDB
+	/* handy breakpoint location for error case */
+	__asm(".globl treesanity_label\ntreesanity_label:");
+#endif
+	return (-1);
+}
+
 /*
  * local inlines
  */
@@ -422,6 +599,8 @@ uvm_map_clip_start(map, entry, start)
 
 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
 
+	uvm_tree_sanity(map, "clip_start entry");
+
 	/*
 	 * Split off the front portion.  note that we must insert the new
 	 * entry BEFORE this one, so that this entry has the specified
@@ -435,6 +614,8 @@ uvm_map_clip_start(map, entry, start)
 	new_adj = start - new_entry->start;
 	if (entry->object.uvm_obj)
 		entry->offset += new_adj;	/* shift start over */
+
+	/* Does not change order for the RB tree */
 	entry->start = start;
 
 	if (new_entry->aref.ar_amap) {
@@ -453,6 +634,8 @@ uvm_map_clip_start(map, entry, start)
 			entry->object.uvm_obj->pgops->pgo_reference(
 			    entry->object.uvm_obj);
 	}
+
+	uvm_tree_sanity(map, "clip_start leave");
 }
 
 /*
@@ -473,6 +656,7 @@ uvm_map_clip_end(map, entry, end)
 	struct vm_map_entry *	new_entry;
 	vaddr_t new_adj; /* #bytes we move start forward */
 
+	uvm_tree_sanity(map, "clip_end entry");
 	/*
 	 *	Create a new entry and insert it
 	 *	AFTER the specified entry
@@ -489,6 +673,8 @@ uvm_map_clip_end(map, entry, end)
 	if (entry->aref.ar_amap)
 		amap_splitref(&entry->aref, &new_entry->aref, new_adj);
 
+	uvm_rb_fixup(map, entry);
+
 	uvm_map_entry_link(map, entry, new_entry);
 
 	if (UVM_ET_ISSUBMAP(entry)) {
@@ -501,6 +687,8 @@ uvm_map_clip_end(map, entry, end)
 			entry->object.uvm_obj->pgops->pgo_reference(
 			    entry->object.uvm_obj);
 	}
+
+	uvm_tree_sanity(map, "clip_end leave");
 }
 
 
@@ -566,6 +754,13 @@ uvm_map(map, startp, size, uobj, uoffset
 	    (map->flags & VM_MAP_INTRSAFE));
 
 	/*
+	 * zero-sized mapping doesn't make any sense.
+	 */
+	KASSERT(size > 0);
+
+	uvm_tree_sanity(map, "map entry");
+
+	/*
 	 * check sanity of protection code
 	 */
 
@@ -718,8 +913,11 @@ uvm_map(map, startp, size, uobj, uoffset
 			uobj->pgops->pgo_detach(uobj);
 
 		prev_entry->end += size;
+		uvm_rb_fixup(map, prev_entry);
 		map->size += size;
 
+		uvm_tree_sanity(map, "map backmerged");
+
 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
 		if (new_entry) {
 			uvm_mapent_free(new_entry);
@@ -865,11 +1063,15 @@ forwardmerge:
 			uvm_mapent_free(dead);
 		} else {
 			prev_entry->next->start -= size;
+			if (prev_entry != &map->header)
+				uvm_rb_fixup(map, prev_entry);
 			map->size += size;
 			if (uobj)
 				prev_entry->next->offset = uoffset;
 		}
 
+		uvm_tree_sanity(map, "map forwardmerged");
+
 		UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
 		if (new_entry) {
 			uvm_mapent_free(new_entry);
@@ -974,6 +1176,7 @@ uvm_map_lookup_entry(map, address, entry
 {
 	struct vm_map_entry *cur;
 	struct vm_map_entry *last;
+	boolean_t use_tree = FALSE;
 	UVMHIST_FUNC("uvm_map_lookup_entry");
 	UVMHIST_CALLED(maphist);
 
@@ -1015,14 +1218,50 @@ uvm_map_lookup_entry(map, address, entry
 			    cur, 0, 0, 0);
 			return (TRUE);
 		}
+
+		if (map->nentries > 30)
+			use_tree = TRUE;
 	} else {
 
+#if 0
 	    	/*
 		 * go from start to hint, *inclusively*
 		 */
 
 		last = cur->next;
 		cur = map->header.next;
+#endif
+		/*
+		 * invalid hint.  use tree.
+		 */
+		use_tree = TRUE;
+	}
+
+	uvm_tree_sanity(map, __func__);
+
+	if (use_tree) {
+		struct vm_map_entry *prev = &map->header;
+		cur = RB_ROOT(&map->rbhead);
+
+		/*
+		 * Simple lookup in the tree.  Happens when the hint is
+		 * invalid, or nentries reach a threshold.
+		 */
+		while (cur) {
+			if (address >= cur->start) {
+				if (address < cur->end) {
+					*entry = cur;
+					SAVE_HINT(map, map->hint, cur);
+					return (TRUE);
+				}
+				prev = cur;
+				cur = RB_RIGHT(cur, rb_entry);
+			} else
+				cur = RB_LEFT(cur, rb_entry);
+		}
+		*entry = prev;
+		UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
+		return (FALSE);
 	}
 
 	/*
@@ -1054,6 +1293,41 @@ uvm_map_lookup_entry(map, address, entry
 }
 
 /*
+ * Checks if address pointed to be phint fits into the empty
+ * space before the vm_map_entry after.  Takes aligment and
+ * offset into consideration.
+ */
+
+int
+uvm_map_spacefits(struct vm_map *map, vaddr_t *phint, vsize_t length,
+    struct vm_map_entry *after, voff_t uoffset, vsize_t align)
+{
+	vaddr_t hint = *phint;
+	vaddr_t end;
+
+#ifdef PMAP_PREFER
+	/*
+	 * push hint forward as needed to avoid VAC alias problems.
+	 * we only do this if a valid offset is specified.
+	 */
+	if (uoffset != UVM_UNKNOWN_OFFSET)
+		PMAP_PREFER(uoffset, &hint);
+#endif
+	if (align != 0)
+		if ((hint & (align - 1)) != 0)
+			hint = roundup(hint, align);
+	*phint = hint;
+
+	end = hint + length;
+	if (end > map->max_offset || end < hint)
+		return (FALSE);
+	if (after != NULL && after != &map->header && after->start < end)
+		return (FALSE);
+	
+	return (TRUE);
+}
+
+/*
  * uvm_map_findspace: find "length" sized space in "map".
  *
  * => "hint" is a hint about where we want it, unless FINDSPACE_FIXED is
@@ -1078,6 +1352,7 @@ uvm_map_findspace(map, hint, length, res
 	int flags;
 {
 	struct vm_map_entry *entry, *next, *tmp;
+	struct vm_map_entry *child, *prev = NULL;
 	vaddr_t end, orig_hint;
 	const int topdown = map->flags & VM_MAP_TOPDOWN;
 	UVMHIST_FUNC("uvm_map_findspace");
@@ -1088,6 +1363,8 @@ uvm_map_findspace(map, hint, length, res
 	KASSERT((align & (align - 1)) == 0);
 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
 
+	uvm_tree_sanity(map, "map_findspace entry");
+
 	/*
 	 * remember the original hint.  if we are aligning, then we
 	 * may have to try again with no alignment constraint if
@@ -1145,11 +1422,108 @@ uvm_map_findspace(map, hint, length, res
 		} else if ((tmp->next == &map->header ||
 			    tmp->next->start >= hint + length)) {
 			entry = tmp;
-			goto quickfind;
+			goto found;
 		}
 		entry = tmp;
 	}
 
+	if (flags & UVM_FLAG_FIXED) {
+		end = hint + length;
+		if (end > map->max_offset || end < hint) {
+			UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
+			goto error;
+		}
+		next = entry->next;
+		if (next == &map->header || next->start >= end)
+			goto found;
+		UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
+		return(NULL); /* only one shot at it ... */
+	}
+
+	/* Try to find the space in the red-black tree */
+
+	/* Check slot before any entry */
+	if (uvm_map_spacefits(map, &hint, length, entry->next, uoffset, align))
+		goto found;
+	
+	/* If there is not enough space in the whole tree, we fail */
+	tmp = RB_ROOT(&map->rbhead);
+	if (tmp == NULL || tmp->space < length)
+		goto error;
+
+	/* Find an entry close to hint that has enough space */
+	for (; tmp;) {
+		if (tmp->end >= hint &&
+		    (prev == NULL || tmp->end < prev->end)) {
+			if (tmp->ownspace >= length)
+				prev = tmp;
+			else if ((child = RB_RIGHT(tmp, rb_entry)) != NULL &&
+			    child->space >= length)
+				prev = tmp;
+		}
+		if (tmp->end < hint)
+			child = RB_RIGHT(tmp, rb_entry);
+		else if (tmp->end > hint)
+			child = RB_LEFT(tmp, rb_entry);
+		else {
+			if (tmp->ownspace >= length)
+				break;
+			child = RB_RIGHT(tmp, rb_entry);
+		}
+		if (child == NULL || child->space < length)
+			break;
+		tmp = child;
+	}
+	
+	if (tmp != NULL && hint < tmp->end + tmp->ownspace) {
+		/* 
+		 * Check if the entry that we found satifies the
+		 * space requirement
+		 */
+		if (hint < tmp->end)
+			hint = tmp->end;
+		if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset,
+			align)) {
+			entry = tmp;
+			goto found;
+		} else if (tmp->ownspace >= length)
+			goto listsearch;
+	}
+	if (prev == NULL)
+		goto error;
+	
+	hint = prev->end;
+	if (uvm_map_spacefits(map, &hint, length, prev->next, uoffset,
+		align)) {
+		entry = prev;
+		goto found;
+	} else if (prev->ownspace >= length)
+		goto listsearch;
+	
+	tmp = RB_RIGHT(prev, rb_entry);
+	for (;;) {
+		KASSERT(tmp && tmp->space >= length);
+		child = RB_LEFT(tmp, rb_entry);
+		if (child && child->space >= length) {
+			tmp = child;
+			continue;
+		}
+		if (tmp->ownspace >= length)
+			break;
+		tmp = RB_RIGHT(tmp, rb_entry);
+	}
+	
+	hint = tmp->end;
+	if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset, align)) {
+		entry = tmp;
+		goto found;
+	}
+
+	/* 
+	 * The tree fails to find an entry because of offset or alignment
+	 * restrictions.  Search the list instead.
+	 */
+ listsearch:
 	/*
 	 * Look through the rest of the map, trying to fit a new region in
 	 * the gap between existing regions, or after the very last region.
@@ -1193,14 +1567,7 @@ uvm_map_findspace(map, hint, length, res
 		end = hint + length;
 		if (end > map->max_offset || end < hint) {
 			UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
-			if (align != 0) {
-				UVMHIST_LOG(maphist,
-				    "calling recursively, no align",
-				    0,0,0,0);
-				return (uvm_map_findspace(map, orig_hint,
-				    length, result, uobj, uoffset, 0, flags));
-			}
-			return (NULL);
+			goto error;
 		}
 		if (!topdown || next == NULL) {
 			next = entry->next;
@@ -1215,13 +1582,23 @@ uvm_map_findspace(map, hint, length, res
 			return(NULL); /* only one shot at it ... */
 		}
 	}
- quickfind:
+ found:
 	SAVE_HINT(map, map->hint, entry);
 	if (topdown && entry->start >= hint + length)
 		entry = entry->prev;
 	*result = hint;
 	UVMHIST_LOG(maphist,"<- got it!  (result=0x%x)", hint, 0,0,0);
 	return (entry);
+
+ error:
+	if (align != 0) {
+		UVMHIST_LOG(maphist,
+		    "calling recursively, no align",
+		    0,0,0,0);
+		return (uvm_map_findspace(map, orig_hint,
+		    length, result, uobj, uoffset, 0, flags));
+	}
+	return (NULL);
 }
 
 /*
@@ -1251,6 +1628,8 @@ uvm_unmap_remove(map, start, end, entry_
 	    map, start, end, 0);
 	VM_MAP_RANGE_CHECK(map, start, end);
 
+	uvm_tree_sanity(map, "unmap_remove entry");
+
 	/*
 	 * find first entry
 	 */
@@ -1401,6 +1780,8 @@ uvm_unmap_remove(map, start, end, entry_
 		pmap_update(vm_map_pmap(map));
 	}
 
+	uvm_tree_sanity(map, "unmap_remove leave");
+
 	/*
 	 * now we've cleaned up the map and are ready for the caller to drop
 	 * references to the mapped objects.
@@ -1522,6 +1903,8 @@ uvm_map_replace(map, start, end, newents
 {
 	struct vm_map_entry *oldent, *last;
 
+	uvm_tree_sanity(map, "map_replace entry");
+
 	/*
 	 * first find the blank map entry at the specified address
 	 */
@@ -1589,10 +1972,25 @@ uvm_map_replace(map, start, end, newents
 
 		last->next = oldent->next;
 		last->next->prev = last;
+
+		/* Fix RB tree */
+		uvm_rb_remove(map, oldent);
+
 		newents->prev = oldent->prev;
 		newents->prev->next = newents;
 		map->nentries = map->nentries + (nnewents - 1);
 
+		/* Fixup the RB tree */
+		{
+			int i;
+			struct vm_map_entry *tmp;
+
+			tmp = newents;
+			for (i = 0; i < nnewents && tmp; i++) {
+				uvm_rb_insert(map, tmp);
+				tmp = tmp->next;
+			}
+		}
 	} else {
 
 		/* critical: flush stale hints out of map */
@@ -1604,6 +2002,7 @@ uvm_map_replace(map, start, end, newents
 		uvm_map_entry_unlink(map, oldent);
 	}
 
+	uvm_tree_sanity(map, "map_replace leave");
 
 	/*
 	 * now we can free the old blank entry, unlock the map and return.
@@ -1650,6 +2049,9 @@ uvm_map_extract(srcmap, start, len, dstm
 	    len,0);
 	UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
 
+	uvm_tree_sanity(srcmap, "map_extract src enter");
+	uvm_tree_sanity(dstmap, "map_extract dst enter");
+
 	/*
 	 * step 0: sanity check: start must be on a page boundary, length
 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
@@ -1928,6 +2330,10 @@ uvm_map_extract(srcmap, start, len, dstm
 			goto bad2;
 		}
 	}
+
+	uvm_tree_sanity(srcmap, "map_extract src leave");
+	uvm_tree_sanity(dstmap, "map_extract dst leave");
+
 	return(0);
 
 	/*
@@ -1939,6 +2345,10 @@ bad2:			/* src already unlocked */
 	if (chain)
 		uvm_unmap_detach(chain,
 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
+
+	uvm_tree_sanity(srcmap, "map_extract src err leave");
+	uvm_tree_sanity(dstmap, "map_extract dst err leave");
+
 	uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
 	return(error);
 }
@@ -3107,7 +3517,11 @@ uvmspace_exec(l, start, end)
 		 */
 
 		map->min_offset = start;
+		uvm_tree_sanity(map, "resize enter");
 		map->max_offset = end;
+		if (map->header.prev != &map->header)
+			uvm_rb_fixup(map, map->header.prev);
+		uvm_tree_sanity(map, "resize leave");
 	} else {
 
 		/*
Index: uvm_map.h
===================================================================
--- uvm_map.h	(revision 198)
+++ uvm_map.h	(working copy)
@@ -1,4 +1,5 @@
 /*	$NetBSD: uvm_map.h,v 1.34 2003/02/20 22:16:08 atatat Exp $	*/
+/*	$OpenBSD: uvm_map.h,v 1.29 2003/04/14 04:53:51 art Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -109,6 +110,8 @@
 
 #endif /* _KERNEL */
 
+#include <sys/tree.h>
+
 #include <uvm/uvm_anon.h>
 
 /*
@@ -118,6 +121,9 @@
  * Also included is control information for virtual copy operations.
  */
 struct vm_map_entry {
+	RB_ENTRY(vm_map_entry)	rb_entry;	/* tree information */
+	vaddr_t			ownspace;	/* free space after */
+	vaddr_t			space;		/* space in subtree */
 	struct vm_map_entry	*prev;		/* previous entry */
 	struct vm_map_entry	*next;		/* next entry */
 	vaddr_t			start;		/* start address */
@@ -208,6 +214,7 @@ struct vm_map_entry {
 struct vm_map {
 	struct pmap *		pmap;		/* Physical map */
 	struct lock		lock;		/* Lock for map data */
+	RB_HEAD(uvm_tree, vm_map_entry) rbhead;	/* Tree for entries */
 	struct vm_map_entry	header;		/* List of entries */
 	int			nentries;	/* Number of entries */
 	vsize_t			size;		/* virtual size */
Index: uvm_map_i.h
===================================================================
--- uvm_map_i.h	(revision 1)
+++ uvm_map_i.h	(working copy)
@@ -1,4 +1,5 @@
 /*	$NetBSD: uvm_map_i.h,v 1.24 2002/12/01 22:58:43 matt Exp $	*/
+/*	$OpenBSD: uvm_map_i.h,v 1.16 2002/10/29 18:30:21 art Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -111,6 +112,7 @@ uvm_map_setup(map, min, max, flags)
 	int flags;
 {
 
+	RB_INIT(&map->rbhead);
 	map->header.next = map->header.prev = &map->header;
 	map->nentries = 0;
 	map->size = 0;

--NextPart-20030823163036-0093400--