Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Consolidate entropy on RNDADDDATA and writes to /dev/ran...



details:   https://anonhg.NetBSD.org/src/rev/ad33cacf809e
branches:  trunk
changeset: 932452:ad33cacf809e
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Thu May 07 19:05:51 2020 +0000

description:
Consolidate entropy on RNDADDDATA and writes to /dev/random.

The man page for some time has advertised:

  Writing to either /dev/random or /dev/urandom influences subsequent
  output of both devices, guaranteed to take effect at next open.

So let's make that true again.

It is a conscious choice _not_ to consolidate entropy frequently.
For example, if you have a _slow_ HWRNG, which provides 32 bits of
entropy every few seconds, and you reveal a hash that to the
adversary before any more comes in, the adversary can in principle
just keep guessing the intermediate state by a brute force search
over ~2^32 possibilities.

To mitigate this, the kernel generally tries to avoid consolidating
entropy from the per-CPU pools until doing so would bring us from
zero entropy to full entropy.

However, there are various _possible_ sources of entropy which are
just hard to give honest estimates for that are valid on ~all
machines -- like interrupt timings.  The time at which we read a seed
in, which usually happens via /etc/rc.d/random_seed early in
userland, is a reasonable time to gather this up.  An operator or
system engineer who knows another opportune moment can always issue
`sysctl -w kern.entropy.consolidate=1'.

Prompted by a suggestion from nia@ to consolidate entropy at the
first transition to userland.  I chose not to do that because it
would likely cause warning fatigue on systems that are perfectly fine
with a random seed -- doing it this way instead lets rndctl -L
trigger the consolidation automatically.  A subsequent commit will
reorder the operations in rndctl again to make it work out better.

diffstat:

 sys/dev/random.c        |  12 +++++-
 sys/kern/kern_entropy.c |  81 ++++++++++++++++++++++++++++++------------------
 sys/sys/entropy.h       |   3 +-
 3 files changed, 61 insertions(+), 35 deletions(-)

diffs (239 lines):

diff -r 09bb54ba1232 -r ad33cacf809e sys/dev/random.c
--- a/sys/dev/random.c  Thu May 07 18:15:29 2020 +0000
+++ b/sys/dev/random.c  Thu May 07 19:05:51 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: random.c,v 1.2 2020/04/30 04:26:29 riastradh Exp $     */
+/*     $NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $     */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -47,7 +47,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.2 2020/04/30 04:26:29 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -384,7 +384,7 @@
 {
        kauth_cred_t cred = kauth_cred_get();
        uint8_t *buf;
-       bool privileged = false;
+       bool privileged = false, any = false;
        int error = 0;
 
        /* Verify user's authorization to affect the entropy pool.  */
@@ -429,10 +429,16 @@
                if (error)
                        break;
                rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
+               any = true;
        }
 
        /* Zero the buffer and return it to the pool cache.  */
        explicit_memset(buf, 0, RANDOM_BUFSIZE);
        pool_cache_put(random_buf_pc, buf);
+
+       /* If we added anything, consolidate entropy now.  */
+       if (any)
+               entropy_consolidate();
+
        return error;
 }
diff -r 09bb54ba1232 -r ad33cacf809e sys/kern/kern_entropy.c
--- a/sys/kern/kern_entropy.c   Thu May 07 18:15:29 2020 +0000
+++ b/sys/kern/kern_entropy.c   Thu May 07 19:05:51 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_entropy.c,v 1.12 2020/05/07 00:55:13 riastradh Exp $      */
+/*     $NetBSD: kern_entropy.c,v 1.13 2020/05/07 19:05:51 riastradh Exp $      */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -60,9 +60,7 @@
  *       transition from partial entropy to full entropy, so that
  *       users can easily determine when to reseed.  This also
  *       facilitates an operator explicitly causing everything to
- *       reseed by sysctl -w kern.entropy.consolidate=1, e.g. if they
- *       just flipped a coin 256 times and wrote `echo tthhhhhthh... >
- *       /dev/random'.
+ *       reseed by sysctl -w kern.entropy.consolidate=1.
  *
  *     * No entropy estimation based on the sample values, which is a
  *       contradiction in terms and a potential source of side
@@ -77,7 +75,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.12 2020/05/07 00:55:13 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.13 2020/05/07 19:05:51 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -241,8 +239,8 @@
 static void    entropy_thread(void *);
 static uint32_t        entropy_pending(void);
 static void    entropy_pending_cpu(void *, void *, struct cpu_info *);
-static void    entropy_consolidate(void);
-static void    entropy_gather_xc(void *, void *);
+static void    entropy_do_consolidate(void);
+static void    entropy_consolidate_xc(void *, void *);
 static void    entropy_notify(void);
 static int     sysctl_entropy_consolidate(SYSCTLFN_ARGS);
 static int     sysctl_entropy_gather(SYSCTLFN_ARGS);
@@ -959,7 +957,7 @@
 
                if (consolidate) {
                        /* Do it.  */
-                       entropy_consolidate();
+                       entropy_do_consolidate();
 
                        /* Mitigate abuse.  */
                        kpause("entropy", false, hz, NULL);
@@ -993,13 +991,13 @@
 }
 
 /*
- * entropy_consolidate()
+ * entropy_do_consolidate()
  *
  *     Issue a cross-call to gather entropy on all CPUs and advance
  *     the entropy epoch.
  */
 static void
-entropy_consolidate(void)
+entropy_do_consolidate(void)
 {
        static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
        static struct timeval lasttime; /* serialized by E->lock */
@@ -1007,7 +1005,7 @@
        uint64_t ticket;
 
        /* Gather entropy on all CPUs.  */
-       ticket = xc_broadcast(0, &entropy_gather_xc, NULL, NULL);
+       ticket = xc_broadcast(0, &entropy_consolidate_xc, NULL, NULL);
        xc_wait(ticket);
 
        /* Acquire the lock to notify waiters.  */
@@ -1037,13 +1035,13 @@
 }
 
 /*
- * entropy_gather_xc(arg1, arg2)
+ * entropy_consolidate_xc(arg1, arg2)
  *
  *     Extract output from the local CPU's input pool and enter it
  *     into the global pool.
  */
 static void
-entropy_gather_xc(void *arg1 __unused, void *arg2 __unused)
+entropy_consolidate_xc(void *arg1 __unused, void *arg2 __unused)
 {
        struct entropy_cpu *ec;
        uint8_t buf[ENTPOOL_CAPACITY];
@@ -1144,19 +1142,49 @@
 }
 
 /*
+ * entropy_consolidate()
+ *
+ *     Trigger entropy consolidation and wait for it to complete.
+ *
+ *     This should be used sparingly, not periodically -- requiring
+ *     conscious intervention by the operator or a clear policy
+ *     decision.  Otherwise, the kernel will automatically consolidate
+ *     when enough entropy has been gathered into per-CPU pools to
+ *     transition to full entropy.
+ */
+void
+entropy_consolidate(void)
+{
+       uint64_t ticket;
+       int error;
+
+       KASSERT(E->stage == ENTROPY_HOT);
+
+       mutex_enter(&E->lock);
+       ticket = entropy_consolidate_evcnt.ev_count;
+       E->consolidate = true;
+       cv_broadcast(&E->cv);
+       while (ticket == entropy_consolidate_evcnt.ev_count) {
+               error = cv_wait_sig(&E->cv, &E->lock);
+               if (error)
+                       break;
+       }
+       mutex_exit(&E->lock);
+}
+
+/*
  * sysctl -w kern.entropy.consolidate=1
  *
  *     Trigger entropy consolidation and wait for it to complete.
- *     Writable only by superuser.  This is the only way for the
- *     system to consolidate entropy if the operator knows something
- *     the kernel doesn't about how unpredictable the pending entropy
- *     pools are.
+ *     Writable only by superuser.  This, writing to /dev/random, and
+ *     ioctl(RNDADDDATA) are the only ways for the system to
+ *     consolidate entropy if the operator knows something the kernel
+ *     doesn't about how unpredictable the pending entropy pools are.
  */
 static int
 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
 {
        struct sysctlnode node = *rnode;
-       uint64_t ticket;
        int arg;
        int error;
 
@@ -1166,18 +1194,8 @@
        error = sysctl_lookup(SYSCTLFN_CALL(&node));
        if (error || newp == NULL)
                return error;
-       if (arg) {
-               mutex_enter(&E->lock);
-               ticket = entropy_consolidate_evcnt.ev_count;
-               E->consolidate = true;
-               cv_broadcast(&E->cv);
-               while (ticket == entropy_consolidate_evcnt.ev_count) {
-                       error = cv_wait_sig(&E->cv, &E->lock);
-                       if (error)
-                               break;
-               }
-               mutex_exit(&E->lock);
-       }
+       if (arg)
+               entropy_consolidate();
 
        return error;
 }
@@ -2214,9 +2232,10 @@
                        mutex_exit(&E->lock);
                }
 
-               /* Enter the data.  */
+               /* Enter the data and consolidate entropy.  */
                rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
                    entropybits);
+               entropy_consolidate();
                break;
        }
        default:
diff -r 09bb54ba1232 -r ad33cacf809e sys/sys/entropy.h
--- a/sys/sys/entropy.h Thu May 07 18:15:29 2020 +0000
+++ b/sys/sys/entropy.h Thu May 07 19:05:51 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: entropy.h,v 1.1 2020/04/30 03:28:19 riastradh Exp $    */
+/*     $NetBSD: entropy.h,v 1.2 2020/05/07 19:05:51 riastradh Exp $    */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -48,6 +48,7 @@
 #define        ENTROPY_SIG     0x02
 
 void   entropy_bootrequest(void);
+void   entropy_consolidate(void);
 unsigned entropy_epoch(void);
 int    entropy_extract(void *, size_t, int);
 int    entropy_poll(int);



Home | Main Index | Thread Index | Old Index