Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Use a temporary pool to consolidate entropy atomica...



details:   https://anonhg.NetBSD.org/src/rev/95d8c24908f3
branches:  trunk
changeset: 1009969:95d8c24908f3
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sun May 10 00:08:12 2020 +0000

description:
Use a temporary pool to consolidate entropy atomically.

There was a low-probability race with the entropy consolidation
logic: calls to entropy_extract at the same time as consolidation is
happening might witness partial contributions from the CPUs when
needed=256, say 64 bits at a time.

To avoid this, feed everything from the per-CPU pools into a
temporary pool, and then feed the temporary pool into the global pool
under the lock at the same time as we update needed.

diffstat:

 sys/kern/kern_entropy.c |  25 +++++++++++++++++--------
 1 files changed, 17 insertions(+), 8 deletions(-)

diffs (77 lines):

diff -r f0245e3197e2 -r 95d8c24908f3 sys/kern/kern_entropy.c
--- a/sys/kern/kern_entropy.c   Sat May 09 22:00:48 2020 +0000
+++ b/sys/kern/kern_entropy.c   Sun May 10 00:08:12 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_entropy.c,v 1.18 2020/05/09 06:12:32 riastradh Exp $      */
+/*     $NetBSD: kern_entropy.c,v 1.19 2020/05/10 00:08:12 riastradh Exp $      */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -75,7 +75,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.18 2020/05/09 06:12:32 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.19 2020/05/10 00:08:12 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -984,11 +984,14 @@
 {
        static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
        static struct timeval lasttime; /* serialized by E->lock */
+       struct entpool pool;
+       uint8_t buf[ENTPOOL_CAPACITY];
        unsigned diff;
        uint64_t ticket;
 
-       /* Gather entropy on all CPUs.  */
-       ticket = xc_broadcast(0, &entropy_consolidate_xc, NULL, NULL);
+       /* Gather entropy on all CPUs into a temporary pool.  */
+       memset(&pool, 0, sizeof pool);
+       ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
        xc_wait(ticket);
 
        /* Acquire the lock to notify waiters.  */
@@ -1000,6 +1003,11 @@
        /* Note when we last consolidated, i.e. now.  */
        E->timestamp = time_uptime;
 
+       /* Mix what we gathered into the global pool.  */
+       entpool_extract(&pool, buf, sizeof buf);
+       entpool_enter(&E->pool, buf, sizeof buf);
+       explicit_memset(&pool, 0, sizeof pool);
+
        /* Count the entropy that was gathered.  */
        diff = MIN(E->needed, E->pending);
        atomic_store_relaxed(&E->needed, E->needed - diff);
@@ -1024,8 +1032,9 @@
  *     into the global pool.
  */
 static void
-entropy_consolidate_xc(void *arg1 __unused, void *arg2 __unused)
+entropy_consolidate_xc(void *vpool, void *arg2 __unused)
 {
+       struct entpool *pool = vpool;
        struct entropy_cpu *ec;
        uint8_t buf[ENTPOOL_CAPACITY];
        uint32_t extra[7];
@@ -1063,15 +1072,15 @@
 
        /*
         * Copy over statistics, and enter the per-CPU extract and the
-        * extra timing into the global pool, under the global lock.
+        * extra timing into the temporary pool, under the global lock.
         */
        mutex_enter(&E->lock);
        extra[i++] = entropy_timer();
-       entpool_enter(&E->pool, buf, sizeof buf);
+       entpool_enter(pool, buf, sizeof buf);
        explicit_memset(buf, 0, sizeof buf);
        extra[i++] = entropy_timer();
        KASSERT(i == __arraycount(extra));
-       entpool_enter(&E->pool, extra, sizeof extra);
+       entpool_enter(pool, extra, sizeof extra);
        explicit_memset(extra, 0, sizeof extra);
        mutex_exit(&E->lock);
 }



Home | Main Index | Thread Index | Old Index