Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-9]: src/sys/kern Pull up following revision(s) (requested by maxv...



details:   https://anonhg.NetBSD.org/src/rev/3ebc1362ef92
branches:  netbsd-9
changeset: 458200:3ebc1362ef92
user:      martin <martin%NetBSD.org@localhost>
date:      Sun Aug 18 09:52:12 2019 +0000

description:
Pull up following revision(s) (requested by maxv in ticket #81):

        sys/kern/subr_pool.c: revision 1.253
        sys/kern/subr_pool.c: revision 1.254
        sys/kern/subr_pool.c: revision 1.255

Kernel Heap Hardening: perform certain sanity checks on the pool caches
directly, to immediately detect certain bugs that would otherwise have
been detected only later on the pool layer, if the buffer ever reached
the pool layer.

 -

Replace || by && in KASAN, to increase the pool coverage.
Strictly speaking, what we want to avoid is poisoning buffers that were
referenced in a global list as part of the ctor. But, if a buffer indeed
got referenced as part of the ctor, it necessarily has to be unreferenced
in the dtor; which implies it has to have a dtor. So we want both a ctor
and a dtor, and not just one of them.

Note that POOL_QUARANTINE already implicitly provides this increased
coverage.

 -

Initialize pp->pr_redzone to false. For some reason with KUBSAN GCC does
not eliminate the unused branch in pr_item_linkedlist_put(), and this
leads to a unused uninitialized access which triggers KUBSAN messages.

diffstat:

 sys/kern/subr_pool.c |  72 +++++++++++++++++++++++++++++++++++----------------
 1 files changed, 49 insertions(+), 23 deletions(-)

diffs (144 lines):

diff -r 36773c94e884 -r 3ebc1362ef92 sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c      Fri Aug 16 19:31:27 2019 +0000
+++ b/sys/kern/subr_pool.c      Sun Aug 18 09:52:12 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_pool.c,v 1.252 2019/06/29 11:13:23 maxv Exp $     */
+/*     $NetBSD: subr_pool.c,v 1.252.2.1 2019/08/18 09:52:12 martin Exp $       */
 
 /*
  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.252 2019/06/29 11:13:23 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.252.2.1 2019/08/18 09:52:12 martin Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -216,6 +216,8 @@
 
 #define        POOL_NEEDS_CATCHUP(pp)                                          \
        ((pp)->pr_nitems < (pp)->pr_minitems)
+#define        POOL_OBJ_TO_PAGE(pp, v)                                         \
+       (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask)
 
 /*
  * Pool cache management.
@@ -408,6 +410,40 @@
 
 /* -------------------------------------------------------------------------- */
 
+static inline void
+pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page,
+    void *object)
+{
+       if (__predict_false((void *)ph->ph_page != page)) {
+               panic("%s: [%s] item %p not part of pool", __func__,
+                   pp->pr_wchan, object);
+       }
+       if (__predict_false((char *)object < (char *)page + ph->ph_off)) {
+               panic("%s: [%s] item %p below item space", __func__,
+                   pp->pr_wchan, object);
+       }
+       if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
+               panic("%s: [%s] item %p poolid %u != %u", __func__,
+                   pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid);
+       }
+}
+
+static inline void
+pc_phinpage_check(pool_cache_t pc, void *object)
+{
+       struct pool_item_header *ph;
+       struct pool *pp;
+       void *page;
+
+       pp = &pc->pc_pool;
+       page = POOL_OBJ_TO_PAGE(pp, object);
+       ph = (struct pool_item_header *)page;
+
+       pr_phinpage_check(pp, ph, page, object);
+}
+
+/* -------------------------------------------------------------------------- */
+
 static inline int
 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
 {
@@ -456,25 +492,10 @@
        if ((pp->pr_roflags & PR_NOALIGN) != 0) {
                ph = pr_find_pagehead_noalign(pp, v);
        } else {
-               void *page =
-                   (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
-
+               void *page = POOL_OBJ_TO_PAGE(pp, v);
                if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
                        ph = (struct pool_item_header *)page;
-                       if (__predict_false((void *)ph->ph_page != page)) {
-                               panic("%s: [%s] item %p not part of pool",
-                                   __func__, pp->pr_wchan, v);
-                       }
-                       if (__predict_false((char *)v < (char *)page +
-                           ph->ph_off)) {
-                               panic("%s: [%s] item %p below item space",
-                                   __func__, pp->pr_wchan, v);
-                       }
-                       if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
-                               panic("%s: [%s] item %p poolid %u != %u",
-                                   __func__, pp->pr_wchan, v, ph->ph_poolid,
-                                   pp->pr_poolid);
-                       }
+                       pr_phinpage_check(pp, ph, page, v);
                } else {
                        tmp.ph_page = page;
                        ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
@@ -746,6 +767,7 @@
        pp->pr_drain_hook = NULL;
        pp->pr_drain_hook_arg = NULL;
        pp->pr_freecheck = NULL;
+       pp->pr_redzone = false;
        pool_redzone_init(pp, size);
        pool_quarantine_init(pp);
 
@@ -1832,7 +1854,7 @@
        int n;
 
        if ((pp->pr_roflags & PR_NOALIGN) == 0) {
-               page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
+               page = POOL_OBJ_TO_PAGE(pp, ph);
                if (page != ph->ph_page &&
                    (pp->pr_roflags & PR_PHINPAGE) != 0) {
                        if (label != NULL)
@@ -1866,7 +1888,7 @@
                if ((pp->pr_roflags & PR_NOALIGN) != 0) {
                        continue;
                }
-               page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
+               page = POOL_OBJ_TO_PAGE(pp, pi);
                if (page == ph->ph_page)
                        continue;
 
@@ -2616,6 +2638,10 @@
        pool_cache_redzone_check(pc, object);
        FREECHECK_IN(&pc->pc_freecheck, object);
 
+       if (pc->pc_pool.pr_roflags & PR_PHINPAGE) {
+               pc_phinpage_check(pc, object);
+       }
+
        if (pool_cache_put_quarantine(pc, object, pa)) {
                return;
        }
@@ -3073,8 +3099,8 @@
 pool_cache_redzone_check(pool_cache_t pc, void *p)
 {
 #ifdef KASAN
-       /* If there is a ctor/dtor, leave the data as valid. */
-       if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
+       /* If there is a ctor+dtor, leave the data as valid. */
+       if (__predict_false(pc_has_ctor(pc) && pc_has_dtor(pc))) {
                return;
        }
 #endif



Home | Main Index | Thread Index | Old Index