Subject: Re: UBC performance patch
To: None <eeh@netbsd.org>
From: Jason R Thorpe <thorpej@zembu.com>
List: tech-kern
Date: 01/23/2001 15:22:52
On Tue, Jan 23, 2001 at 11:03:35PM -0000, eeh@netbsd.org wrote:
> No; but it really improves interactive performance
> if you do `dd if=/dev/null of=/tmp/foo'.
Indeed. I also tried a similar change, but to the case in ubc_alloc()
where a window is recycled. That shows similar improvement, but not
as much in the dd example above (BTW -- /dev/zero :-)
...but my version also has less TLB invalidation traffic (the recycle
case of ubc_alloc() already does a pmap_remove() for the UBC window, so
the page deactication isn't as big of an expense there).
--
-- Jason R. Thorpe <thorpej@zembu.com>
Index: uvm_bio.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_bio.c,v
retrieving revision 1.6
diff -c -r1.6 uvm_bio.c
*** uvm_bio.c 2000/12/27 09:01:45 1.6
--- uvm_bio.c 2001/01/23 23:20:19
***************
*** 117,122 ****
--- 117,134 ----
#endif
/*
+ * If true, this causes UBC pages to be placed on the inactive list
+ * rather than remain on the active list after being released.
+ */
+ boolean_t ubc_release_deactivate = FALSE;
+
+ /*
+ * If true, this causes UBC pages to be placed on the inactive list
+ * rather than remain on the active list after a UBC window is reused.
+ */
+ boolean_t ubc_recycle_deactivate = TRUE;
+
+ /*
* ubc_init
*
* init pager private data structures.
***************
*** 400,405 ****
--- 412,465 ----
if (umap->uobj != NULL) {
LIST_REMOVE(umap, hash);
+
+ if (ubc_recycle_deactivate) {
+ /*
+ * Deactivate the pages. This saves the
+ * pagedaemon from having to scan them,
+ * and also makes it more likely that
+ * these will be thrown away first, rather
+ * than some random process's working set.
+ */
+ struct uvm_object *oobj = umap->uobj;
+ vm_page_t pg;
+ voff_t objoff;
+
+ /*
+ * XXX Should-do's:
+ *
+ * - Limit this only to the actual
+ * number of pages mapped by the
+ * window. But the actual window
+ * size isn't stored?
+ *
+ * - Don't deactivate pages on vnodes
+ * which are also mmap'd. But we
+ * need to add some sort of counter
+ * to uvm_vnode which keeps track
+ * of uvn_attach/uvn/detach (i.e.
+ * "when things are mmap'd").
+ */
+ uvm_lock_pageq();
+ for (objoff = umap->offset;
+ objoff < umap->offset + ubc_winsize;
+ objoff += PAGE_SIZE) {
+ pg = uvm_pagelookup(oobj, objoff);
+ /*
+ * Note we're only looking for pages
+ * on the active queue, so we don't
+ * need to test for wire_count.
+ */
+ if (pg == NULL ||
+ (pg->pqflags & PQ_ACTIVE) == 0 ||
+ (pg->flags & PG_BUSY) != 0)
+ continue;
+ pmap_page_protect(pg, VM_PROT_NONE);
+ uvm_pagedeactivate(pg);
+ uvmexp.pddeact++;
+ }
+ uvm_unlock_pageq();
+ }
}
umap->uobj = uobj;
***************
*** 408,413 ****
--- 468,478 ----
LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
umap, hash);
+ /*
+ * XXX This pmap_remove() should be redundant now
+ * XXX if ubc_recycle_deactivate is TRUE.
+ * XXX --thorpej
+ */
va = (vaddr_t)(ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize);
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
***************
*** 462,467 ****
--- 527,577 ----
umap->writeoff = 0;
umap->writelen = 0;
umap->refcount--;
+
+ /*
+ * Deactivate the pages, if possible. This saves the
+ * pagedaemon from having to scan them, and also makes
+ * it more likely that these will be thrown away first,
+ * rather than some random process's working set.
+ */
+ if (ubc_release_deactivate) {
+ vm_page_t pg;
+ voff_t objoff;
+
+ /*
+ * XXX Should-do's:
+ *
+ * - Limit this only to the actual number
+ * of pages mapped by the window. But
+ * "wlen" is not currently meaningful.
+ *
+ * - Don't deactivate pages on vnodes which
+ * are also mmap'd. But we need to add
+ * some sort of counter to uvm_vnode which
+ * keeps track of uvn_attach/uvn_detach
+ * (i.e. "when things are mmap'd").
+ */
+ uvm_lock_pageq();
+ for (objoff = umap->offset;
+ objoff < umap->offset + ubc_winsize;
+ objoff += PAGE_SIZE) {
+ pg = uvm_pagelookup(uobj, objoff);
+ /*
+ * Note, we're only looking for pages on the
+ * active queue, so we don't need to test for
+ * wire_count.
+ */
+ if (pg == NULL ||
+ (pg->pqflags & PQ_ACTIVE) == 0 ||
+ (pg->flags & PG_BUSY) != 0)
+ continue;
+ pmap_page_protect(pg, VM_PROT_NONE);
+ uvm_pagedeactivate(pg);
+ uvmexp.pddeact++;
+ }
+ uvm_unlock_pageq();
+ }
+
if (umap->refcount == 0) {
if (UBC_RELEASE_UNMAP &&
(((struct vnode *)uobj)->v_flag & VTEXT)) {
***************
*** 478,483 ****
--- 588,598 ----
* as soon as possible.
*/
+ /*
+ * XXX This pmap_remove() should be redundant
+ * XXX now if ubc_release_deactivate is TRUE.
+ * XXX --thorpej
+ */
va = (vaddr_t)(ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize);
pmap_remove(pmap_kernel(), va, va + ubc_winsize);