Subject: ubc/uvm patch
To: tech-kern@netbsd.org <tech-kern@netbsd.org>
From: Lars Heidieker <lars@heidieker.de>
List: tech-kern
Date: 04/07/2001 16:26:28
This is a multi-part message in MIME format.
--------------81C4609DF4D7EFC8230AA816
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

oh
the actual patch....

--------------81C4609DF4D7EFC8230AA816
Content-Type: text/plain; charset=us-ascii;
 name="uvm.diff"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
 filename="uvm.diff"

Common subdirectories: uvm.rep/CVS and uvm/CVS
diff -u uvm.rep/uvm.h uvm/uvm.h
--- uvm.rep/uvm.h	Mon Nov 27 09:40:02 2000
+++ uvm/uvm.h	Wed Apr  4 15:26:43 2001
@@ -80,8 +80,7 @@
 		/* vm_page queues */
 	struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
 	struct pglist page_active;	/* allocated pages, in use */
-	struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
-	struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
+	struct pglist page_inactive;	/* pages inactive (reclaim or free) */
 	simple_lock_data_t pageqlock;	/* lock for active/inactive page q */
 	simple_lock_data_t fpageqlock;	/* lock for free page q */
 	boolean_t page_init_done;	/* TRUE if uvm_page_init() finished */
diff -u uvm.rep/uvm_extern.h uvm/uvm_extern.h
--- uvm.rep/uvm_extern.h	Thu Mar 15 07:10:56 2001
+++ uvm/uvm_extern.h	Sat Apr  7 13:34:34 2001
@@ -269,12 +269,6 @@
 	int freetarg;   /* target number of free pages */
 	int inactarg;   /* target number of inactive pages */
 	int wiredmax;   /* max number of wired pages */
-	int anonmin;	/* min threshold for anon pages */
-	int vtextmin;	/* min threshold for vtext pages */
-	int vnodemin;	/* min threshold for vnode pages */
-	int anonminpct;	/* min percent anon pages */
-	int vtextminpct;/* min percent vtext pages */
-	int vnodeminpct;/* min percent vnode pages */
 
 	/* swap */
 	int nswapdev;	/* number of configured swap devices in system */
diff -u uvm.rep/uvm_map.c uvm/uvm_map.c
--- uvm.rep/uvm_map.c	Thu Mar 15 07:10:57 2001
+++ uvm/uvm_map.c	Wed Apr  4 15:27:35 2001
@@ -3322,8 +3322,7 @@
 		pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
 		    PGFL_ZEROS : PGFL_UNKNOWN];
 	} else if (pg->pqflags & PQ_INACTIVE) {
-		pgl = (pg->pqflags & PQ_SWAPBACKED) ?
-		    &uvm.page_inactive_swp : &uvm.page_inactive_obj;
+		pgl = &uvm.page_inactive;
 	} else if (pg->pqflags & PQ_ACTIVE) {
 		pgl = &uvm.page_active;
  	} else {
diff -u uvm.rep/uvm_meter.c uvm/uvm_meter.c
--- uvm.rep/uvm_meter.c	Fri Mar  9 02:02:12 2001
+++ uvm/uvm_meter.c	Wed Apr  4 20:03:36 2001
@@ -130,7 +130,6 @@
 	struct proc *p;
 {
 	struct vmtotal vmtotals;
-	int rv, t;
 
 	/* all sysctl names at this level are terminal */
 	if (namelen != 1)
@@ -156,45 +155,6 @@
 
 	case VM_NKMEMPAGES:
 		return (sysctl_rdint(oldp, oldlenp, newp, nkmempages));
-
-	case VM_ANONMIN:
-		t = uvmexp.anonminpct;
-		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
-		if (rv) {
-			return rv;
-		}
-		if (t + uvmexp.vtextminpct + uvmexp.vnodeminpct > 95 || t < 0) {
-			return EINVAL;
-		}
-		uvmexp.anonminpct = t;
-		uvmexp.anonmin = t * 256 / 100;
-		return rv;
-
-	case VM_VTEXTMIN:
-		t = uvmexp.vtextminpct;
-		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
-		if (rv) {
-			return rv;
-		}
-		if (uvmexp.anonminpct + t + uvmexp.vnodeminpct > 95 || t < 0) {
-			return EINVAL;
-		}
-		uvmexp.vtextminpct = t;
-		uvmexp.vtextmin = t * 256 / 100;
-		return rv;
-
-	case VM_VNODEMIN:
-		t = uvmexp.vnodeminpct;
-		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
-		if (rv) {
-			return rv;
-		}
-		if (uvmexp.anonminpct + uvmexp.vtextminpct + t > 95 || t < 0) {
-			return EINVAL;
-		}
-		uvmexp.vnodeminpct = t;
-		uvmexp.vnodemin = t * 256 / 100;
-		return rv;
 
 	default:
 		return (EOPNOTSUPP);
diff -u uvm.rep/uvm_page.c uvm/uvm_page.c
--- uvm.rep/uvm_page.c	Fri Mar  9 02:02:12 2001
+++ uvm/uvm_page.c	Wed Apr  4 20:04:27 2001
@@ -226,8 +226,7 @@
 			TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
 	}
 	TAILQ_INIT(&uvm.page_active);
-	TAILQ_INIT(&uvm.page_inactive_swp);
-	TAILQ_INIT(&uvm.page_inactive_obj);
+	TAILQ_INIT(&uvm.page_inactive);
 	simple_lock_init(&uvm.pageqlock);
 	simple_lock_init(&uvm.fpageqlock);
 
@@ -340,12 +339,6 @@
 
 	uvmexp.reserve_pagedaemon = 1;
 	uvmexp.reserve_kernel = 5;
-	uvmexp.anonminpct = 10;
-	uvmexp.vnodeminpct = 10;
-	uvmexp.vtextminpct = 5;
-	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
-	uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
-	uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
 
 	/*
 	 * determine if we should zero pages in the idle loop.
@@ -1157,10 +1150,7 @@
 		uvmexp.active--;
 	}
 	if (pg->pqflags & PQ_INACTIVE) {
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 		pg->pqflags &= ~PQ_INACTIVE;
 		uvmexp.inactive--;
 	}
diff -u uvm.rep/uvm_page_i.h uvm/uvm_page_i.h
--- uvm.rep/uvm_page_i.h	Mon Jan 29 00:30:45 2001
+++ uvm/uvm_page_i.h	Wed Apr  4 15:29:43 2001
@@ -160,10 +160,7 @@
 			uvmexp.active--;
 		}
 		if (pg->pqflags & PQ_INACTIVE) {
-			if (pg->pqflags & PQ_SWAPBACKED)
-				TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-			else
-				TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+			TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 			pg->pqflags &= ~PQ_INACTIVE;
 			uvmexp.inactive--;
 		}
@@ -212,10 +209,7 @@
 	}
 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
 		KASSERT(pg->wire_count == 0);
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
 		pg->pqflags |= PQ_INACTIVE;
 		uvmexp.inactive++;
 
@@ -242,10 +236,7 @@
 	struct vm_page *pg;
 {
 	if (pg->pqflags & PQ_INACTIVE) {
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 		pg->pqflags &= ~PQ_INACTIVE;
 		uvmexp.inactive--;
 	}
diff -u uvm.rep/uvm_pdaemon.c uvm/uvm_pdaemon.c
--- uvm.rep/uvm_pdaemon.c	Sat Mar 10 23:46:50 2001
+++ uvm/uvm_pdaemon.c	Sat Apr  7 13:45:15 2001
@@ -364,22 +364,16 @@
 	struct uvm_object *uobj;
 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
 	int npages;
-	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
-	int swnpages, swcpages;				/* XXX: see below */
+	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 
+	int swnpages, swcpages;	
 	int swslot;
 	struct vm_anon *anon;
 	boolean_t swap_backed;
 	vaddr_t start;
-	int dirtyreacts, t;
-	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
+	int dirtyreacts; 
+	int balancecount;
 
-	/*
-	 * note: we currently keep swap-backed pages on a seperate inactive
-	 * list from object-backed pages.   however, merging the two lists
-	 * back together again hasn't been ruled out.   thus, we keep our
-	 * swap cluster in "swpps" rather than in pps (allows us to mix
-	 * clustering types in the event of a mixed inactive queue).
-	 */
+	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * swslot is non-zero if we are building a swap cluster.  we want
@@ -392,6 +386,12 @@
 	free = 0;
 	dirtyreacts = 0;
 
+	if(uvmexp.vnodepages > uvmexp.npages / 3) {
+		balancecount = 32;
+	} else {
+		balancecount = 1024;
+	}
+ 
 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
 
 		/*
@@ -449,35 +449,6 @@
 			}
 
 			/*
-			 * enforce the minimum thresholds on different
-			 * types of memory usage.  if reusing the current
-			 * page would reduce that type of usage below its
-			 * minimum, reactivate the page instead and move
-			 * on to the next page.
-			 */
-
-			t = uvmexp.active + uvmexp.inactive + uvmexp.free;
-			if (p->uanon &&
-			    uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8) {
-				uvm_pageactivate(p);
-				uvmexp.pdreanon++;
-				continue;
-			}
-			if (p->uobject && UVM_OBJ_IS_VTEXT(p->uobject) &&
-			    uvmexp.vtextpages <= (t * uvmexp.vtextmin) >> 8) {
-				uvm_pageactivate(p);
-				uvmexp.pdrevtext++;
-				continue;
-			}
-			if (p->uobject && UVM_OBJ_IS_VNODE(p->uobject) &&
-			    !UVM_OBJ_IS_VTEXT(p->uobject) &&
-			    uvmexp.vnodepages <= (t * uvmexp.vnodemin) >> 8) {
-				uvm_pageactivate(p);
-				uvmexp.pdrevnode++;
-				continue;
-			}
-
-			/*
 			 * first we attempt to lock the object that this page
 			 * belongs to.  if our attempt fails we skip on to
 			 * the next page (no harm done).  it is important to
@@ -549,6 +520,19 @@
 				p->flags &= ~PG_CLEAN;
 			}
 
+			if (balancecount < 0) {
+				if (!(uobj && UVM_OBJ_IS_VNODE(uobj) &&
+				    (uobj->uo_refs < 2))) {
+					if (anon) {
+						simple_unlock(&anon->an_lock);
+					} else {
+						simple_unlock(&uobj->vmobjlock);
+					}
+					continue;
+				}
+			}
+ 			balancecount--;
+
 			if (p->flags & PG_CLEAN) {
 				if (p->pqflags & PQ_SWAPBACKED) {
 					/* this page now lives only in swap */
@@ -828,7 +812,6 @@
 	int s, free, inactive_shortage, swap_shortage, pages_freed;
 	struct vm_page *p, *nextpg;
 	struct uvm_object *uobj;
-	boolean_t got_it;
 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
 
 	uvmexp.pdrevs++;		/* counter */
@@ -871,14 +854,8 @@
 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
 	 */
 
-	got_it = FALSE;
 	pages_freed = uvmexp.pdfreed;
-	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
-		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
-	if (!got_it)
-		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
-	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
-		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
+	(void) uvmpd_scan_inactive(&uvm.page_inactive);
 	pages_freed = uvmexp.pdfreed - pages_freed;
 
 	/*
@@ -971,8 +948,8 @@
 		 * shortage of inactive pages.
 		 */
 
-		if (inactive_shortage > 0 &&
-		    pmap_clear_reference(p) == FALSE) {
+		if (inactive_shortage > 0) { 
+			pmap_clear_reference(p);
 			/* no need to check wire_count as pg is "active" */
 			uvm_pagedeactivate(p);
 			uvmexp.pddeact++;

--------------81C4609DF4D7EFC8230AA816--