Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Retire KMEM_GUARD. It has been superseded by kASan, ...



details:   https://anonhg.NetBSD.org/src/rev/dc6883284406
branches:  trunk
changeset: 453520:dc6883284406
user:      maxv <maxv%NetBSD.org@localhost>
date:      Thu Aug 15 12:06:42 2019 +0000

description:
Retire KMEM_GUARD. It has been superseded by kASan, which is much more
powerful, has much more coverage - far beyond just kmem(9) -, and also
consumes less memory.

KMEM_GUARD was a debug-only option that required special DDB tweaking, and
had no use in releases or even diagnostic kernels.

As a general rule, the policy now is to harden the pool layer by default
in GENERIC, and use kASan as a diagnostic/debug/fuzzing feature to verify
each memory allocation & access in the system.

diffstat:

 sys/kern/subr_kmem.c |  232 +-------------------------------------------------
 1 files changed, 7 insertions(+), 225 deletions(-)

diffs (truncated from 354 to 300 lines):

diff -r 3b2a9e7953fb -r dc6883284406 sys/kern/subr_kmem.c
--- a/sys/kern/subr_kmem.c      Thu Aug 15 10:29:07 2019 +0000
+++ b/sys/kern/subr_kmem.c      Thu Aug 15 12:06:42 2019 +0000
@@ -1,6 +1,6 @@
-/*     $NetBSD: subr_kmem.c,v 1.75 2019/04/07 09:20:04 maxv Exp $      */
+/*     $NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $      */
 
-/*-
+/*
  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -29,7 +29,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*-
+/*
  * Copyright (c)2006 YAMAMOTO Takashi,
  * All rights reserved.
  *
@@ -66,7 +66,7 @@
  *     the exact user-requested allocation size in it. When freeing, compare
  *     it with kmem_free's "size" argument.
  *
- * This option enabled on DIAGNOSTIC.
+ * This option is enabled on DIAGNOSTIC.
  *
  *  |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+
@@ -77,22 +77,8 @@
  *  |Size |    Buffer usable by the caller (requested size)   |Unused\
  */
 
-/*
- * KMEM_GUARD
- *     A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
- *     in. See the comment below for what kind of bugs it tries to detect. Even
- *     if compiled in, it's disabled by default because it's very expensive.
- *     You can enable it on boot by:
- *             boot -d
- *             db> w kmem_guard_depth 0t30000
- *             db> c
- *
- *     The default value of kmem_guard_depth is 0, which means disabled.
- *     It can be changed by KMEM_GUARD_DEPTH kernel config option.
- */
-
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.75 2019/04/07 09:20:04 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_kmem.h"
@@ -177,8 +163,6 @@
 #endif
 
 #if defined(DEBUG) && defined(_HARDKERNEL)
-#define        KMEM_SIZE
-#define        KMEM_GUARD
 static void *kmem_freecheck;
 #endif
 
@@ -195,31 +179,12 @@
 #define        kmem_size_check(p, sz)  /* nothing */
 #endif
 
-#if defined(KMEM_GUARD)
-#ifndef KMEM_GUARD_DEPTH
-#define KMEM_GUARD_DEPTH 0
-#endif
-struct kmem_guard {
-       u_int           kg_depth;
-       intptr_t *      kg_fifo;
-       u_int           kg_rotor;
-       vmem_t *        kg_vmem;
-};
-static bool kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
-static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
-static void kmem_guard_free(struct kmem_guard *, size_t, void *);
-int kmem_guard_depth = KMEM_GUARD_DEPTH;
-static bool kmem_guard_enabled;
-static struct kmem_guard kmem_guard;
-#endif /* defined(KMEM_GUARD) */
-
 CTASSERT(KM_SLEEP == PR_WAITOK);
 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
 
 /*
  * kmem_intr_alloc: allocate wired memory.
  */
-
 void *
 kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
 {
@@ -236,13 +201,6 @@
        KASSERT((kmflags & KM_SLEEP) || (kmflags & KM_NOSLEEP));
        KASSERT(!(kmflags & KM_SLEEP) || !(kmflags & KM_NOSLEEP));
 
-#ifdef KMEM_GUARD
-       if (kmem_guard_enabled) {
-               return kmem_guard_alloc(&kmem_guard, requested_size,
-                   (kmflags & KM_SLEEP) != 0);
-       }
-#endif
-
        kasan_add_redzone(&requested_size);
        size = kmem_roundup_size(requested_size);
        allocsz = size + SIZE_SIZE;
@@ -280,7 +238,6 @@
 /*
  * kmem_intr_zalloc: allocate zeroed wired memory.
  */
-
 void *
 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
 {
@@ -296,7 +253,6 @@
 /*
  * kmem_intr_free: free wired memory allocated by kmem_alloc.
  */
-
 void
 kmem_intr_free(void *p, size_t requested_size)
 {
@@ -307,13 +263,6 @@
        KASSERT(p != NULL);
        KASSERT(requested_size > 0);
 
-#ifdef KMEM_GUARD
-       if (kmem_guard_enabled) {
-               kmem_guard_free(&kmem_guard, requested_size, p);
-               return;
-       }
-#endif
-
        kasan_add_redzone(&requested_size);
        size = kmem_roundup_size(requested_size);
        allocsz = size + SIZE_SIZE;
@@ -341,13 +290,12 @@
        pool_cache_put(pc, p);
 }
 
-/* ---- kmem API */
+/* -------------------------------- Kmem API -------------------------------- */
 
 /*
  * kmem_alloc: allocate wired memory.
  * => must not be called from interrupt context.
  */
-
 void *
 kmem_alloc(size_t size, km_flag_t kmflags)
 {
@@ -364,7 +312,6 @@
  * kmem_zalloc: allocate zeroed wired memory.
  * => must not be called from interrupt context.
  */
-
 void *
 kmem_zalloc(size_t size, km_flag_t kmflags)
 {
@@ -381,7 +328,6 @@
  * kmem_free: free wired memory allocated by kmem_alloc.
  * => must not be called from interrupt context.
  */
-
 void
 kmem_free(void *p, size_t size)
 {
@@ -444,10 +390,6 @@
 void
 kmem_init(void)
 {
-#ifdef KMEM_GUARD
-       kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
-           kmem_va_arena);
-#endif
        kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
            kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
        kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
@@ -525,7 +467,7 @@
        kmem_free(str, strlen(str) + 1);
 }
 
-/* ------------------ DEBUG / DIAGNOSTIC ------------------ */
+/* --------------------------- DEBUG / DIAGNOSTIC --------------------------- */
 
 #if defined(KMEM_SIZE)
 static void
@@ -553,163 +495,3 @@
        hd->size = -1;
 }
 #endif /* defined(KMEM_SIZE) */
-
-#if defined(KMEM_GUARD)
-/*
- * The ultimate memory allocator for debugging, baby.  It tries to catch:
- *
- * 1. Overflow, in realtime. A guard page sits immediately after the
- *    requested area; a read/write overflow therefore triggers a page
- *    fault.
- * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
- *    just before the requested area, and holds the allocated size. Any
- *    difference with what is given at free triggers a panic.
- * 3. Underflow, at free. If an underflow occurs, the kmem header will be
- *    modified, and 2. will trigger a panic.
- * 4. Use-after-free. When freeing, the memory is unmapped, and depending
- *    on the value of kmem_guard_depth, the kernel will more or less delay
- *    the recycling of that memory. Which means that any ulterior read/write
- *    access to the memory will trigger a page fault, given it hasn't been
- *    recycled yet.
- */
-
-#include <sys/atomic.h>
-#include <uvm/uvm.h>
-
-static bool
-kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
-{
-       vaddr_t va;
-
-       /* If not enabled, we have nothing to do. */
-       if (depth == 0) {
-               return false;
-       }
-       depth = roundup(depth, PAGE_SIZE / sizeof(void *));
-       KASSERT(depth != 0);
-
-       /*
-        * Allocate fifo.
-        */
-       va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
-           UVM_KMF_WIRED | UVM_KMF_ZERO);
-       if (va == 0) {
-               return false;
-       }
-
-       /*
-        * Init object.
-        */
-       kg->kg_vmem = vm;
-       kg->kg_fifo = (void *)va;
-       kg->kg_depth = depth;
-       kg->kg_rotor = 0;
-
-       printf("kmem_guard(%p): depth %d\n", kg, depth);
-       return true;
-}
-
-static void *
-kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
-{
-       struct vm_page *pg;
-       vm_flag_t flags;
-       vmem_addr_t va;
-       vaddr_t loopva;
-       vsize_t loopsize;
-       size_t size;
-       void **p;
-
-       /*
-        * Compute the size: take the kmem header into account, and add a guard
-        * page at the end.
-        */
-       size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
-
-       /* Allocate pages of kernel VA, but do not map anything in yet. */
-       flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
-       if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
-               return NULL;
-       }
-
-       loopva = va;
-       loopsize = size - PAGE_SIZE;
-
-       while (loopsize) {
-               pg = uvm_pagealloc(NULL, loopva, NULL, 0);
-               if (__predict_false(pg == NULL)) {
-                       if (waitok) {
-                               uvm_wait("kmem_guard");
-                               continue;
-                       } else {
-                               uvm_km_pgremove_intrsafe(kernel_map, va,
-                                   va + size);
-                               vmem_free(kg->kg_vmem, va, size);
-                               return NULL;
-                       }
-               }
-
-               pg->flags &= ~PG_BUSY;  /* new page */
-               UVM_PAGE_OWN(pg, NULL);
-               pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
-                   VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
-
-               loopva += PAGE_SIZE;
-               loopsize -= PAGE_SIZE;
-       }
-
-       pmap_update(pmap_kernel());



Home | Main Index | Thread Index | Old Index