Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86/x86 - pmap_enter(): under low memory conditions...



details:   https://anonhg.NetBSD.org/src/rev/999e08bc6f04
branches:  trunk
changeset: 970252:999e08bc6f04
user:      ad <ad%NetBSD.org@localhost>
date:      Tue Mar 17 22:37:05 2020 +0000

description:
- pmap_enter(): under low memory conditions, if PTP allocation succeeded and
  then PV entry allocation failed, PTP pages were being freed without their
  struct pmap_page being reset back to the non-PTP setup, which then caused
  havoc with pmap_page_removed().  Fix it.

- pmap_enter_pv(): don't do the PV check if memory allocation failed.

Reported-by: syzbot+d9b42238107c155ca0cd%syzkaller.appspotmail.com@localhost
Reported-by: syzbot+80cf4850dc1cf29901dc%syzkaller.appspotmail.com@localhost

diffstat:

 sys/arch/x86/x86/pmap.c |  61 +++++++++++++-----------------------------------
 1 files changed, 17 insertions(+), 44 deletions(-)

diffs (136 lines):

diff -r b0b85360808c -r 999e08bc6f04 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Tue Mar 17 22:29:19 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c   Tue Mar 17 22:37:05 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.374 2020/03/17 22:29:19 ad Exp $    */
+/*     $NetBSD: pmap.c,v 1.375 2020/03/17 22:37:05 ad Exp $    */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.374 2020/03/17 22:29:19 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.375 2020/03/17 22:37:05 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -635,30 +635,6 @@
 }
 
 /*
- * pmap_ptp_init: initialize new page table page
- */
-static inline void
-pmap_ptp_init(struct vm_page *ptp)
-{
-
-       ptp->uanon = (struct vm_anon *)(vaddr_t)~0L;
-       rb_tree_init(&VM_PAGE_TO_PP(ptp)->pp_rb, &pmap_rbtree_ops);
-       PMAP_CHECK_PP(VM_PAGE_TO_PP(ptp));
-}
-
-/*
- * pmap_ptp_fini: finalize a page table page
- */
-static inline void
-pmap_ptp_fini(struct vm_page *ptp)
-{
-
-       KASSERT(RB_TREE_MIN(&VM_PAGE_TO_PP(ptp)->pp_rb) == NULL);
-       PMAP_CHECK_PP(VM_PAGE_TO_PP(ptp));
-       ptp->uanon = NULL;
-}
-
-/*
  * pmap_ptp_range_set: abuse ptp->uanon to record minimum VA of PTE
  */
 static inline void
@@ -2158,7 +2134,9 @@
                LIST_INSERT_HEAD(&pp->pp_pvlist, pve, pve_list);
        }
        mutex_spin_exit(&pp->pp_lock);
-       pmap_check_pv(pmap, ptp, pp, va, true);
+       if (error == 0) {
+               pmap_check_pv(pmap, ptp, pp, va, true);
+       }
 
        return error;
 }
@@ -2252,13 +2230,15 @@
        int lidx;
 
        KASSERT(ptp->wire_count == 1);
+       PMAP_CHECK_PP(VM_PAGE_TO_PP(ptp));
 
        lidx = level - 1;
        pmap_stats_update(pmap, -1, 0);
        if (pmap->pm_ptphint[lidx] == ptp)
                pmap->pm_ptphint[lidx] = NULL;
        ptp->wire_count = 0;
-       pmap_ptp_fini(ptp);
+       ptp->uanon = NULL;
+       KASSERT(RB_TREE_MIN(&VM_PAGE_TO_PP(ptp)->pp_rb) == NULL);
 
        /*
         * Enqueue the PTP to be freed by pmap_update().  We can't remove
@@ -2357,19 +2337,21 @@
 
                if (pt->pg[i] == NULL) {
                        pt->pg[i] = uvm_pagealloc(obj, off, NULL, aflags);
-                       pt->alloced[i] = true;
-                       if (pt->pg[i] != NULL) {
-                               pmap_ptp_init(pt->pg[i]);
-                       }
+                       pt->alloced[i] = (pt->pg[i] != NULL);
                } else if (pt->pg[i]->wire_count == 0) {
                        /* This page was queued to be freed; dequeue it. */
                        LIST_REMOVE(pt->pg[i], mdpage.mp_pp.pp_link);
-                       pmap_ptp_init(pt->pg[i]);
+                       pt->alloced[i] = true;
                }
                PMAP_DUMMY_UNLOCK(pmap);
                if (pt->pg[i] == NULL) {
                        pmap_unget_ptp(pmap, pt);
                        return ENOMEM;
+               } else {
+                       pt->pg[i]->uanon = (struct vm_anon *)(vaddr_t)~0L;
+                       rb_tree_init(&VM_PAGE_TO_PP(pt->pg[i])->pp_rb,
+                           &pmap_rbtree_ops);
+                       PMAP_CHECK_PP(VM_PAGE_TO_PP(pt->pg[i]));
                }
        }
        ptp = pt->pg[2];
@@ -2464,22 +2446,12 @@
        KASSERT(mutex_owned(&pmap->pm_lock));
 
        for (i = PTP_LEVELS; i > 1; i--) {
-               if (pt->pg[i] == NULL) {
-                       break;
-               }
                if (!pt->alloced[i]) {
                        continue;
                }
                KASSERT(pt->pg[i]->wire_count == 0);
                PMAP_CHECK_PP(VM_PAGE_TO_PP(pt->pg[i]));
-               /* pmap zeros all pages before freeing. */
-               pt->pg[i]->flags |= PG_ZERO; 
-               pmap_ptp_fini(pt->pg[i]);
-               PMAP_DUMMY_LOCK(pmap);
-               uvm_pagefree(pt->pg[i]);
-               PMAP_DUMMY_UNLOCK(pmap);
-               pt->pg[i] = NULL;
-               pmap->pm_ptphint[0] = NULL;
+               pmap_freepage(pmap, pt->pg[i], i - 1);
        }
 }
 
@@ -5232,6 +5204,7 @@
                mutex_enter(&pmap->pm_lock);
                while ((ptp = LIST_FIRST(&pmap->pm_gc_ptp)) != NULL) {
                        KASSERT(ptp->wire_count == 0);
+                       KASSERT(ptp->uanon == NULL);
                        LIST_REMOVE(ptp, mdpage.mp_pp.pp_link);
                        pp = VM_PAGE_TO_PP(ptp);
                        LIST_INIT(&pp->pp_pvlist);



Home | Main Index | Thread Index | Old Index