Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Add a new, more aggressive allocator for uvm_pglista...
details: https://anonhg.NetBSD.org/src/rev/2ab41ea9238f
branches: trunk
changeset: 955680:2ab41ea9238f
user: chs <chs%NetBSD.org@localhost>
date: Wed Oct 07 17:51:50 2020 +0000
description:
Add a new, more aggressive allocator for uvm_pglistalloc() to allocate
contiguous physical pages, and try this new allocator if the existing
one fails. The existing contig allocator only tries to allocate pages
that are already free, which works fine shortly after boot but rarely
works after the system has been up for a while. The new allocator uses
the pagedaemon to evict pages from memory in the hope that this will
free up a range of pages that satisfies the constraits of the request.
This should help with things like plugging in a USB device, which often
fails for some USB controllers because they can't get contigous memory.
diffstat:
sys/uvm/uvm_init.c | 5 +-
sys/uvm/uvm_page.h | 6 +-
sys/uvm/uvm_pglist.c | 197 ++++++++++++++++++++++++++++++++++++++++++++++----
sys/uvm/uvm_swap.c | 8 +-
4 files changed, 191 insertions(+), 25 deletions(-)
diffs (truncated from 354 to 300 lines):
diff -r 4bee2e81eebf -r 2ab41ea9238f sys/uvm/uvm_init.c
--- a/sys/uvm/uvm_init.c Wed Oct 07 16:03:10 2020 +0000
+++ b/sys/uvm/uvm_init.c Wed Oct 07 17:51:50 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_init.c,v 1.53 2020/03/06 20:46:12 ad Exp $ */
+/* $NetBSD: uvm_init.c,v 1.54 2020/10/07 17:51:50 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.53 2020/03/06 20:46:12 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.54 2020/10/07 17:51:50 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -107,6 +107,7 @@
*/
uvm_page_init(&kvm_start, &kvm_end);
+ uvm_pglistalloc_init();
/*
* Init the map sub-system.
diff -r 4bee2e81eebf -r 2ab41ea9238f sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h Wed Oct 07 16:03:10 2020 +0000
+++ b/sys/uvm/uvm_page.h Wed Oct 07 17:51:50 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.h,v 1.106 2020/09/20 10:30:05 skrll Exp $ */
+/* $NetBSD: uvm_page.h,v 1.107 2020/10/07 17:51:50 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -259,6 +259,7 @@
#define PG_FREE 0x00004000 /* page is on free list */
#define PG_MARKER 0x00008000 /* dummy marker page */
#define PG_PAGER1 0x00010000 /* pager-specific flag */
+#define PG_PGLCA 0x00020000 /* allocated by uvm_pglistalloc_contig */
#define PG_STAT (PG_ANON|PG_AOBJ|PG_FILE)
#define PG_SWAPBACKED (PG_ANON|PG_AOBJ)
@@ -268,7 +269,7 @@
"\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
"\11ZERO\12TABLED\13AOBJ\14ANON" \
"\15FILE\16READAHEAD\17FREE\20MARKER" \
- "\21PAGER1"
+ "\21PAGER1\22PGLCA"
/*
* Flags stored in pg->pqflags, which is protected by pg->interlock.
@@ -330,6 +331,7 @@
*/
void uvm_page_init(vaddr_t *, vaddr_t *);
+void uvm_pglistalloc_init(void);
#if defined(UVM_PAGE_TRKOWN)
void uvm_page_own(struct vm_page *, const char *);
#endif
diff -r 4bee2e81eebf -r 2ab41ea9238f sys/uvm/uvm_pglist.c
--- a/sys/uvm/uvm_pglist.c Wed Oct 07 16:03:10 2020 +0000
+++ b/sys/uvm/uvm_pglist.c Wed Oct 07 17:51:50 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pglist.c,v 1.85 2020/06/14 21:41:42 ad Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.86 2020/10/07 17:51:50 chs Exp $ */
/*-
* Copyright (c) 1997, 2019 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.85 2020/06/14 21:41:42 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.86 2020/10/07 17:51:50 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -59,6 +59,8 @@
#define STAT_DECR(v)
#endif
+kmutex_t uvm_pglistalloc_contig_lock;
+
/*
* uvm_pglistalloc: allocate a list of pages
*
@@ -293,13 +295,161 @@
}
static int
+uvm_pglistalloc_contig_aggressive(int num, paddr_t low, paddr_t high,
+ paddr_t alignment, paddr_t boundary, struct pglist *rlist)
+{
+ struct vm_page *pg;
+ struct pglist tmp;
+ paddr_t pa, off, spa, amask, bmask, rlo, rhi;
+ uvm_physseg_t upm;
+ int error, i, run, acnt;
+
+ /*
+ * Allocate pages the normal way and for each new page, check if
+ * the page completes a range satisfying the request.
+ * The pagedaemon will evict pages as we go and we are very likely
+ * to get compatible pages eventually.
+ */
+
+ error = ENOMEM;
+ TAILQ_INIT(&tmp);
+ acnt = atop(alignment);
+ amask = ~(alignment - 1);
+ bmask = ~(boundary - 1);
+ KASSERT(bmask <= amask);
+ mutex_enter(&uvm_pglistalloc_contig_lock);
+ while (uvm_reclaimable()) {
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (pg == NULL) {
+ uvm_wait("pglac2");
+ continue;
+ }
+ pg->flags |= PG_PGLCA;
+ TAILQ_INSERT_HEAD(&tmp, pg, pageq.queue);
+
+ pa = VM_PAGE_TO_PHYS(pg);
+ if (pa < low || pa >= high) {
+ continue;
+ }
+
+ upm = uvm_physseg_find(atop(pa), &off);
+ KASSERT(uvm_physseg_valid_p(upm));
+
+ spa = pa & amask;
+
+ /*
+ * Look backward for at most num - 1 pages, back to
+ * the highest of:
+ * - the first page in the physseg
+ * - the specified low address
+ * - num-1 pages before the one we just allocated
+ * - the start of the boundary range containing pa
+ * all rounded up to alignment.
+ */
+
+ rlo = roundup2(ptoa(uvm_physseg_get_avail_start(upm)), alignment);
+ rlo = MAX(rlo, roundup2(low, alignment));
+ rlo = MAX(rlo, roundup2(pa - ptoa(num - 1), alignment));
+ if (boundary) {
+ rlo = MAX(rlo, spa & bmask);
+ }
+
+ /*
+ * Look forward as far as the lowest of:
+ * - the last page of the physseg
+ * - the specified high address
+ * - the boundary after pa
+ */
+
+ rhi = ptoa(uvm_physseg_get_avail_end(upm));
+ rhi = MIN(rhi, high);
+ if (boundary) {
+ rhi = MIN(rhi, rounddown2(pa, boundary) + boundary);
+ }
+
+ /*
+ * Make sure our range to consider is big enough.
+ */
+
+ if (rhi - rlo < ptoa(num)) {
+ continue;
+ }
+
+ run = 0;
+ while (spa > rlo) {
+
+ /*
+ * Examine pages before spa in groups of acnt.
+ * If all the pages in a group are marked then add
+ * these pages to the run.
+ */
+
+ for (i = 0; i < acnt; i++) {
+ pg = PHYS_TO_VM_PAGE(spa - alignment + ptoa(i));
+ if ((pg->flags & PG_PGLCA) == 0) {
+ break;
+ }
+ }
+ if (i < acnt) {
+ break;
+ }
+ spa -= alignment;
+ run += acnt;
+ }
+
+ /*
+ * Look forward for any remaining pages.
+ */
+
+ for (; run < num; run++) {
+ pg = PHYS_TO_VM_PAGE(spa + ptoa(run));
+ if ((pg->flags & PG_PGLCA) == 0) {
+ break;
+ }
+ }
+ if (run < num) {
+ continue;
+ }
+
+ /*
+ * We found a match. Move these pages from the tmp list to
+ * the caller's list.
+ */
+
+ for (i = 0; i < num; i++) {
+ pg = PHYS_TO_VM_PAGE(spa + ptoa(i));
+ TAILQ_REMOVE(&tmp, pg, pageq.queue);
+ pg->flags &= ~PG_PGLCA;
+ TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
+ STAT_INCR(uvm_pglistalloc_npages);
+ }
+
+ error = 0;
+ break;
+ }
+
+ /*
+ * Free all the pages that we didn't need.
+ */
+
+ while (!TAILQ_EMPTY(&tmp)) {
+ pg = TAILQ_FIRST(&tmp);
+ TAILQ_REMOVE(&tmp, pg, pageq.queue);
+ pg->flags &= ~PG_PGLCA;
+ uvm_pagefree(pg);
+ }
+ mutex_exit(&uvm_pglistalloc_contig_lock);
+ return error;
+}
+
+static int
uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
- paddr_t boundary, struct pglist *rlist)
+ paddr_t boundary, struct pglist *rlist, int waitok)
{
int fl;
int error;
+ uvm_physseg_t psi;
- uvm_physseg_t psi;
/* Default to "lose". */
error = ENOMEM;
@@ -338,14 +488,22 @@
}
out:
+ uvm_pgfl_unlock();
+
/*
- * check to see if we need to generate some free pages waking
- * the pagedaemon.
+ * If that didn't work, try the more aggressive approach.
*/
-
- uvm_pgfl_unlock();
- uvm_kick_pdaemon();
- return (error);
+
+ if (error) {
+ if (waitok) {
+ error = uvm_pglistalloc_contig_aggressive(num, low, high,
+ alignment, boundary, rlist);
+ } else {
+ uvm_pglistfree(rlist);
+ uvm_kick_pdaemon();
+ }
+ }
+ return error;
}
static int
@@ -356,7 +514,7 @@
struct vm_page *pg;
bool second_pass;
#ifdef PGALLOC_VERBOSE
- printf("pgalloc: simple %d pgs from psi %zd\n", num, psi);
+ printf("pgalloc: simple %d pgs from psi %d\n", num, psi);
#endif
KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
@@ -489,11 +647,6 @@
if (error) {
if (waitok) {
- /* XXX perhaps some time limitation? */
-#ifdef DEBUG
- if (count == 1)
- printf("pglistalloc waiting\n");
-#endif
uvm_wait("pglalloc");
goto again;
Home |
Main Index |
Thread Index |
Old Index