Subject: UVM/UBC unified inactive queue patch
To: tech-kern@netbsd.org <tech-kern@netbsd.org>
From: Lars Heidieker <lars@heidieker.de>
List: tech-kern
Date: 04/04/2001 15:59:24
This is a multi-part message in MIME format.
--------------D342709E1AFE9E5965D9251E
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

HI,

here is the patch that unifies the two inactive queues into one make the
system a bit more balanced....

lars

--------------D342709E1AFE9E5965D9251E
Content-Type: text/plain; charset=us-ascii;
 name="uvm.diff"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
 filename="uvm.diff"

Common subdirectories: uvm.rep/CVS and uvm/CVS
diff -u uvm.rep/uvm.h uvm/uvm.h
--- uvm.rep/uvm.h	Wed Apr  4 15:53:53 2001
+++ uvm/uvm.h	Wed Apr  4 15:26:43 2001
@@ -80,8 +80,7 @@
 		/* vm_page queues */
 	struct pgfreelist page_free[VM_NFREELIST]; /* unallocated pages */
 	struct pglist page_active;	/* allocated pages, in use */
-	struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
-	struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
+	struct pglist page_inactive;	/* pages inactive (reclaim or free) */
 	simple_lock_data_t pageqlock;	/* lock for active/inactive page q */
 	simple_lock_data_t fpageqlock;	/* lock for free page q */
 	boolean_t page_init_done;	/* TRUE if uvm_page_init() finished */
diff -u uvm.rep/uvm_map.c uvm/uvm_map.c
--- uvm.rep/uvm_map.c	Wed Apr  4 15:55:42 2001
+++ uvm/uvm_map.c	Wed Apr  4 15:27:35 2001
@@ -3322,8 +3322,7 @@
 		pgl = &uvm.page_free[fl].pgfl_queues[((pg)->flags & PG_ZERO) ?
 		    PGFL_ZEROS : PGFL_UNKNOWN];
 	} else if (pg->pqflags & PQ_INACTIVE) {
-		pgl = (pg->pqflags & PQ_SWAPBACKED) ?
-		    &uvm.page_inactive_swp : &uvm.page_inactive_obj;
+		pgl = &uvm.page_inactive;
 	} else if (pg->pqflags & PQ_ACTIVE) {
 		pgl = &uvm.page_active;
  	} else {
diff -u uvm.rep/uvm_page.c uvm/uvm_page.c
--- uvm.rep/uvm_page.c	Wed Apr  4 15:56:06 2001
+++ uvm/uvm_page.c	Wed Apr  4 15:28:17 2001
@@ -226,8 +226,7 @@
 			TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
 	}
 	TAILQ_INIT(&uvm.page_active);
-	TAILQ_INIT(&uvm.page_inactive_swp);
-	TAILQ_INIT(&uvm.page_inactive_obj);
+	TAILQ_INIT(&uvm.page_inactive);
 	simple_lock_init(&uvm.pageqlock);
 	simple_lock_init(&uvm.fpageqlock);
 
@@ -1157,10 +1156,7 @@
 		uvmexp.active--;
 	}
 	if (pg->pqflags & PQ_INACTIVE) {
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 		pg->pqflags &= ~PQ_INACTIVE;
 		uvmexp.inactive--;
 	}
diff -u uvm.rep/uvm_page_i.h uvm/uvm_page_i.h
--- uvm.rep/uvm_page_i.h	Wed Apr  4 15:56:11 2001
+++ uvm/uvm_page_i.h	Wed Apr  4 15:29:43 2001
@@ -160,10 +160,7 @@
 			uvmexp.active--;
 		}
 		if (pg->pqflags & PQ_INACTIVE) {
-			if (pg->pqflags & PQ_SWAPBACKED)
-				TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-			else
-				TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+			TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 			pg->pqflags &= ~PQ_INACTIVE;
 			uvmexp.inactive--;
 		}
@@ -212,10 +209,7 @@
 	}
 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
 		KASSERT(pg->wire_count == 0);
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
 		pg->pqflags |= PQ_INACTIVE;
 		uvmexp.inactive++;
 
@@ -242,10 +236,7 @@
 	struct vm_page *pg;
 {
 	if (pg->pqflags & PQ_INACTIVE) {
-		if (pg->pqflags & PQ_SWAPBACKED)
-			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
-		else
-			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
+		TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
 		pg->pqflags &= ~PQ_INACTIVE;
 		uvmexp.inactive--;
 	}
diff -u uvm.rep/uvm_pdaemon.c uvm/uvm_pdaemon.c
--- uvm.rep/uvm_pdaemon.c	Wed Apr  4 15:56:26 2001
+++ uvm/uvm_pdaemon.c	Wed Apr  4 15:41:17 2001
@@ -364,8 +364,8 @@
 	struct uvm_object *uobj;
 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
 	int npages;
-	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
-	int swnpages, swcpages;				/* XXX: see below */
+	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 
+	int swnpages, swcpages;	
 	int swslot;
 	struct vm_anon *anon;
 	boolean_t swap_backed;
@@ -374,14 +374,6 @@
 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
 
 	/*
-	 * note: we currently keep swap-backed pages on a seperate inactive
-	 * list from object-backed pages.   however, merging the two lists
-	 * back together again hasn't been ruled out.   thus, we keep our
-	 * swap cluster in "swpps" rather than in pps (allows us to mix
-	 * clustering types in the event of a mixed inactive queue).
-	 */
-
-	/*
 	 * swslot is non-zero if we are building a swap cluster.  we want
 	 * to stay in the loop while we have a page to scan or we have
 	 * a swap-cluster to build.
@@ -828,7 +820,6 @@
 	int s, free, inactive_shortage, swap_shortage, pages_freed;
 	struct vm_page *p, *nextpg;
 	struct uvm_object *uobj;
-	boolean_t got_it;
 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
 
 	uvmexp.pdrevs++;		/* counter */
@@ -871,14 +862,8 @@
 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
 	 */
 
-	got_it = FALSE;
 	pages_freed = uvmexp.pdfreed;
-	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
-		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
-	if (!got_it)
-		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
-	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
-		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
+	(void) uvmpd_scan_inactive(&uvm.page_inactive);
 	pages_freed = uvmexp.pdfreed - pages_freed;
 
 	/*

--------------D342709E1AFE9E5965D9251E--