Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 - free L1-L3 pages that has been emptied by...
details: https://anonhg.NetBSD.org/src/rev/d823d5a387d3
branches: trunk
changeset: 449749:d823d5a387d3
user: ryo <ryo%NetBSD.org@localhost>
date: Tue Mar 19 16:05:49 2019 +0000
description:
- free L1-L3 pages that has been emptied by pmap_remove().
- if no memories, pmap_enter will return correctly ENOMEM if PMAP_CANFAIL, or wait until available any memories if !PMAP_CANFAIL.
These changes improves the stability when we use a huge virtual memory spaces with mmap.
diffstat:
sys/arch/aarch64/aarch64/pmap.c | 314 ++++++++++++++++++++++++++++++++-------
sys/arch/aarch64/include/asan.h | 4 +-
sys/arch/aarch64/include/pmap.h | 10 +-
3 files changed, 267 insertions(+), 61 deletions(-)
diffs (truncated from 554 to 300 lines):
diff -r 1b1539b587db -r d823d5a387d3 sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Tue Mar 19 13:38:53 2019 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Tue Mar 19 16:05:49 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $ */
+/* $NetBSD: pmap.c,v 1.36 2019/03/19 16:05:49 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.36 2019/03/19 16:05:49 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -191,11 +191,15 @@
#define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1)
+void atomic_add_16(volatile uint16_t *, int16_t);
+uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
+
static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
-static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, struct pv_entry **);
+static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool,
+ struct pv_entry **);
static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
static struct pmap kernel_pmap;
@@ -434,8 +438,19 @@
kpm->pm_l0table = l0;
kpm->pm_l0table_pa = l0pa;
kpm->pm_activated = true;
- SLIST_INIT(&kpm->pm_vmlist);
+ TAILQ_INIT(&kpm->pm_vmlist);
mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
+
+ CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
+ CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long));
+#define PMSTAT_INC_WIRED_COUNT(pm) \
+ atomic_inc_ulong(&(pm)->pm_stats.wired_count)
+#define PMSTAT_DEC_WIRED_COUNT(pm) \
+ atomic_dec_ulong(&(pm)->pm_stats.wired_count)
+#define PMSTAT_INC_RESIDENT_COUNT(pm) \
+ atomic_inc_ulong(&(pm)->pm_stats.resident_count)
+#define PMSTAT_DEC_RESIDENT_COUNT(pm) \
+ atomic_dec_ulong(&(pm)->pm_stats.resident_count)
}
inline static int
@@ -544,39 +559,61 @@
atomic_inc_uint(&pm->pm_refcnt);
}
-pd_entry_t *
-pmap_alloc_pdp(struct pmap *pm, paddr_t *pap)
+paddr_t
+pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, bool waitok)
{
paddr_t pa;
+ struct vm_page *pg;
UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist);
if (uvm.page_init_done) {
- struct vm_page *pg;
-
+ retry:
pg = uvm_pagealloc(NULL, 0, NULL,
UVM_PGA_USERESERVE | UVM_PGA_ZERO);
- if (pg == NULL)
- panic("%s: cannot allocate L3 table", __func__);
+ if (pg == NULL) {
+ if (waitok) {
+ uvm_wait("pmap_alloc_pdp");
+ goto retry;
+ }
+ return POOL_PADDR_INVALID;
+ }
+
+ TAILQ_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+ pg->flags &= ~PG_BUSY; /* never busy */
+ pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */
pa = VM_PAGE_TO_PHYS(pg);
-
- SLIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
PMAP_COUNT(pdp_alloc);
+ VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL;
+
} else {
/* uvm_pageboot_alloc() returns AARCH64 KSEG address */
+ pg = NULL;
pa = AARCH64_KVA_TO_PA(
uvm_pageboot_alloc(Ln_TABLE_SIZE));
PMAP_COUNT(pdp_alloc_boot);
}
- if (pap != NULL)
- *pap = pa;
-
- UVMHIST_LOG(pmaphist, "pa=%llx, va=%llx",
- pa, AARCH64_PA_TO_KVA(pa), 0, 0);
-
- return (void *)AARCH64_PA_TO_KVA(pa);
+ if (pgp != NULL)
+ *pgp = pg;
+
+ UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx",
+ pa, pg, 0, 0);
+
+ return pa;
+}
+
+static void
+pmap_free_pdp(struct pmap *pm, struct vm_page *pg)
+{
+ TAILQ_REMOVE(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+ pg->flags |= PG_BUSY;
+ pg->wire_count = 0;
+ VM_MDPAGE_INIT(pg);
+
+ uvm_pagefree(pg);
+ PMAP_COUNT(pdp_free);
}
static void
@@ -584,9 +621,8 @@
{
struct vm_page *pg, *tmp;
- SLIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
- uvm_pagefree(pg);
- PMAP_COUNT(pdp_free);
+ TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
+ pmap_free_pdp(pm, pg);
}
}
@@ -617,7 +653,7 @@
bool
pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
{
- static pt_entry_t *ptep, pte;
+ pt_entry_t *ptep, pte;
paddr_t pa;
vsize_t blocksize = 0;
extern char __kernel_text[];
@@ -660,6 +696,9 @@
return pa;
}
+/*
+ * return pointer of the pte. regardess of whether the entry is valid or not.
+ */
static pt_entry_t *
_pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs)
{
@@ -1238,9 +1277,11 @@
memset(pm, 0, sizeof(*pm));
pm->pm_refcnt = 1;
pm->pm_asid = -1;
- SLIST_INIT(&pm->pm_vmlist);
+ TAILQ_INIT(&pm->pm_vmlist);
mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
- pm->pm_l0table = pmap_alloc_pdp(pm, &pm->pm_l0table_pa);
+ pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, true);
+ KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID);
+ pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa);
KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
@@ -1282,11 +1323,115 @@
PMAP_COUNT(destroy);
}
+static inline void
+_pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep)
+{
+ if ((pm != pmap_kernel()) && (pg != NULL))
+ VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep;
+}
+
+/*
+ * increment reference counter of the page descriptor page.
+ * the reference counter should be equal to
+ * 1 + num of valid entries the page has.
+ */
+static inline void
+_pmap_pdp_addref(struct pmap *pm, paddr_t pdppa, struct vm_page *pdppg_hint)
+{
+ struct vm_page *pg;
+
+ /* kernel L0-L3 page will be never freed */
+ if (pm == pmap_kernel())
+ return;
+ /* no need for L0 page */
+ if (pm->pm_l0table_pa == pdppa)
+ return;
+
+ pg = pdppg_hint;
+ if (pg == NULL)
+ pg = PHYS_TO_VM_PAGE(pdppa);
+ KASSERT(pg != NULL);
+
+ CTASSERT(sizeof(pg->wire_count) == sizeof(uint16_t));
+ atomic_add_16(&pg->wire_count, 1);
+
+ KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
+ "pg=%p, wire_count=%d", pg, pg->wire_count);
+}
+
+/*
+ * decrement reference counter of the page descriptr page.
+ * if reference counter is 1(=empty), pages will be freed, and return true.
+ * otherwise return false.
+ * kernel page, or L0 page descriptor page will be never freed.
+ */
+static bool
+_pmap_pdp_delref(struct pmap *pm, paddr_t pdppa, bool do_free_pdp)
+{
+ struct vm_page *pg;
+ bool removed;
+ uint16_t wirecount;
+
+ /* kernel L0-L3 page will be never freed */
+ if (pm == pmap_kernel())
+ return false;
+ /* no need for L0 page */
+ if (pm->pm_l0table_pa == pdppa)
+ return false;
+
+ pg = PHYS_TO_VM_PAGE(pdppa);
+ KASSERT(pg != NULL);
+
+ wirecount = atomic_add_16_nv(&pg->wire_count, -1);
+
+ if (!do_free_pdp)
+ return false;
+
+ /* if no reference, free pdp */
+ removed = false;
+ while (wirecount == 1) {
+ pd_entry_t *ptep_in_parent, opte;;
+
+ ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
+ if (ptep_in_parent == NULL) {
+ /* no parent */
+ pmap_free_pdp(pm, pg);
+ removed = true;
+ break;
+ }
+
+ /* unlink from parent */
+ opte = atomic_swap_64(ptep_in_parent, 0);
+ KASSERT(lxpde_valid(opte));
+ wirecount = atomic_add_16_nv(&pg->wire_count, -1); /* 1 -> 0 */
+ KASSERT(wirecount == 0);
+ pmap_free_pdp(pm, pg);
+ removed = true;
+
+ /* L3->L2->L1. no need for L0 */
+ pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
+ if (pdppa == pm->pm_l0table_pa)
+ break;
+
+ pg = PHYS_TO_VM_PAGE(pdppa);
+ KASSERT(pg != NULL);
+ KASSERTMSG(pg->wire_count >= 1,
+ "wire_count=%d", pg->wire_count);
+ /* decrement wire_count of parent */
+ wirecount = atomic_add_16_nv(&pg->wire_count, -1);
+ KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
+ "pm=%p[%d], pg=%p, wire_count=%d",
+ pm, pm->pm_asid, pg, pg->wire_count);
+ }
+
+ return removed;
+}
+
static int
_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
u_int flags, bool kenter)
{
- struct vm_page *pg;
+ struct vm_page *pg, *pdppg, *pdppg0;
struct pv_entry *spv, *opv = NULL;
pd_entry_t pde;
pt_entry_t attr, pte, *ptep;
@@ -1294,12 +1439,12 @@
pt_entry_t opte;
#endif
pd_entry_t *l0, *l1, *l2, *l3;
- paddr_t pdppa;
Home |
Main Index |
Thread Index |
Old Index