tech-crypto archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Kernel entropy pool / cprng race fix



The attached patch should fix a race that exists when an entropy sink
(such as a CPRNG instance that is being reseeded) is destroyed while
it is currently being reseeded.

I've minimally tested it but since the problem is hard to reproduce
I'd appreciate additional testers or additional eyes.

The diff is also at http://www.panix.com/~tls/seed-destroy.diff .

Thanks!

-- 
Thor Lancelot Simon                                          
tls%panix.com@localhost
  "The liberties...lose much of their value whenever those who have greater
   private means are permitted to use their advantages to control the course
   of public debate."                                   -John Rawls
? rh
? seed-destroy.diff
? arch/amd64/conf/RNDVERBOSE
? arch/arm/xscale/.iopaau.c.swp
? kern/1
? kern/lastbatch.txt
? kern/separate-mutex.diff
Index: kern/kern_rndq.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_rndq.c,v
retrieving revision 1.1
diff -u -r1.1 kern_rndq.c
--- kern/kern_rndq.c    2 Feb 2012 19:43:07 -0000       1.1
+++ kern/kern_rndq.c    6 Apr 2012 04:26:02 -0000
@@ -109,13 +109,17 @@
 SIMPLEQ_HEAD(, _rnd_sample_t)  rnd_samples;
 kmutex_t                       rnd_mtx;
 
+
 /*
  * Entropy sinks: usually other generators waiting to be rekeyed.
  *
  * A sink's callback MUST NOT re-add the sink to the list, or
- * list corruption will occur.
+ * list corruption will occur.  The list is protected by the
+ * rndsink_mtx, which must be released before calling any sink's
+ * callback.
  */
 TAILQ_HEAD(, rndsink)          rnd_sinks;
+kmutex_t                       rndsink_mtx;
 
 /*
  * Memory pool for sample buffers
@@ -215,6 +219,7 @@
        /*
         * First, take care of in-kernel consumers needing rekeying.
         */
+       mutex_spin_enter(&rndsink_mtx);
        TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
                if ((sink->len + RND_ENTROPY_THRESHOLD) * 8 <
                        rndpool_get_entropy_count(&rnd_pool)) {
@@ -225,11 +230,16 @@
                                panic("could not extract estimated "
                                      "entropy from pool");
                        }
+                       /* Skip if busy, else mark in-progress */
+                       if (!mutex_tryenter(&sink->mtx)) {
+                           continue;
+                       }
                        /* Move this sink to the list of pending callbacks */
                        TAILQ_REMOVE(&rnd_sinks, sink, tailq);
                        TAILQ_INSERT_HEAD(&sunk, sink, tailq);
                }
        }
+       mutex_spin_exit(&rndsink_mtx);
                
        /*
         * If we still have enough new bits to do something, feed userspace.
@@ -261,6 +271,7 @@
 #endif
                sink->cb(sink->arg);
                TAILQ_REMOVE(&sunk, sink, tailq);
+               mutex_spin_exit(&sink->mtx);
        }
 }
 
@@ -362,6 +373,7 @@
                return;
 
        mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM);
+       mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM);
 
        callout_init(&rnd_callout, CALLOUT_MPSAFE);
        callout_setfunc(&rnd_callout, rnd_timeout, NULL);
@@ -962,9 +974,12 @@
        printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n",
               rs->name, (int)rs->len);
 #endif
-       mutex_spin_enter(&rndpool_mtx);
+
+       KASSERT(mutex_owned(&rs->mtx));
+
+       mutex_spin_enter(&rndsink_mtx);
        TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq);
-       mutex_spin_exit(&rndpool_mtx);
+       mutex_spin_exit(&rndsink_mtx);
        mutex_spin_enter(&rnd_mtx);
        if (rnd_timeout_pending == 0) {
                rnd_timeout_pending = 1;
@@ -980,13 +995,16 @@
 #ifdef RND_VERBOSE
        printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name);
 #endif
-       mutex_spin_enter(&rndpool_mtx);
+
+       KASSERT(mutex_owned(&rs->mtx));
+
+       mutex_spin_enter(&rndsink_mtx);
        TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
                if (sink == rs) {
                        TAILQ_REMOVE(&rnd_sinks, rs, tailq);
                }
        }
-       mutex_spin_exit(&rndpool_mtx);
+       mutex_spin_exit(&rndsink_mtx);
 }
 
 void
Index: kern/subr_cprng.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_cprng.c,v
retrieving revision 1.5
diff -u -r1.5 subr_cprng.c
--- kern/subr_cprng.c   17 Dec 2011 20:05:39 -0000      1.5
+++ kern/subr_cprng.c   6 Apr 2012 04:26:02 -0000
@@ -75,10 +75,11 @@
 cprng_strong_sched_reseed(cprng_strong_t *const c)
 {
        KASSERT(mutex_owned(&c->mtx));
-       if (!(c->reseed_pending)) {
+       if (!(c->reseed_pending) && mutex_tryenter(&c->reseed.mtx)) {
                c->reseed_pending = 1;
                c->reseed.len = NIST_BLOCK_KEYLEN_BYTES;
                rndsink_attach(&c->reseed);
+               mutex_spin_exit(&c->reseed.mtx);
        }
 }
 
@@ -123,6 +124,7 @@
        c->reseed_pending = 0;
        c->reseed.cb = cprng_strong_reseed;
        c->reseed.arg = c;
+       mutex_init(&c->reseed.mtx, MUTEX_DEFAULT, IPL_VM);
        strlcpy(c->reseed.name, name, sizeof(c->reseed.name));
 
        mutex_init(&c->mtx, MUTEX_DEFAULT, ipl);
@@ -266,6 +268,7 @@
 void
 cprng_strong_destroy(cprng_strong_t *c)
 {
+       mutex_spin_enter(&c->reseed.mtx);
        mutex_enter(&c->mtx);
 
        if (c->flags & CPRNG_USE_CV) {
@@ -277,6 +280,8 @@
        if (c->reseed_pending) {
                rndsink_detach(&c->reseed);
        }
+       mutex_spin_exit(&c->reseed.mtx);
+
        nist_ctr_drbg_destroy(&c->drbg);
 
        mutex_exit(&c->mtx);
Index: lib/libkern/arc4random.c
===================================================================
RCS file: /cvsroot/src/sys/lib/libkern/arc4random.c,v
retrieving revision 1.31
diff -u -r1.31 arc4random.c
--- lib/libkern/arc4random.c    14 Feb 2012 18:57:35 -0000      1.31
+++ lib/libkern/arc4random.c    6 Apr 2012 04:26:02 -0000
@@ -167,7 +167,9 @@
                                    "forcibly rekeying.\n");
                                r = rnd_extract_data(key, ARC4_KEYBYTES,
                                    RND_EXTRACT_ANY);
+                               mutex_spin_enter(&rs.mtx);
                                rndsink_detach(&rs);
+                               mutex_spin_exit(&rs.mtx);
                                callback_pending = 0;
                                goto got_entropy;
                        } else {
@@ -195,11 +197,13 @@
                callback_pending = 0;
        } else if (!callback_pending) {
                callback_pending = 1;
+               mutex_spin_enter(&rs.mtx);
                strlcpy(rs.name, "arc4random", sizeof(rs.name));
                rs.cb = arc4_randrekey;
                rs.arg = &rs;
                rs.len = ARC4_KEYBYTES;
                rndsink_attach(&rs);
+               mutex_spin_exit(&rs.mtx);
        }
 #endif
        /*
@@ -260,6 +264,7 @@
        int n;
 
        mutex_init(&arc4_mtx, MUTEX_DEFAULT, IPL_VM);
+       mutex_init(&rs.mtx, MUTEX_DEFAULT, IPL_VM);
        arc4_i = arc4_j = 0;
        for (n = 0; n < 256; n++)
                arc4_sbox[n] = (u_int8_t) n;
Index: sys/rnd.h
===================================================================
RCS file: /cvsroot/src/sys/sys/rnd.h,v
retrieving revision 1.29
diff -u -r1.29 rnd.h
--- sys/rnd.h   2 Feb 2012 19:43:08 -0000       1.29
+++ sys/rnd.h   6 Apr 2012 04:26:04 -0000
@@ -129,6 +129,7 @@
 
 typedef struct rndsink {
         TAILQ_ENTRY(rndsink) tailq;     /* the queue */
+       kmutex_t        mtx;            /* lock to seed or unregister */
         void            (*cb)(void *);  /* callback function when ready */
         void            *arg;           /* callback function argument */
         char            name[16];       /* sink name */


Home | Main Index | Thread Index | Old Index