Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern entropy(9): Use the early-entropy path only while c...



details:   https://anonhg.NetBSD.org/src/rev/b18db24fe436
branches:  trunk
changeset: 363954:b18db24fe436
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Fri Mar 18 23:34:56 2022 +0000

description:
entropy(9): Use the early-entropy path only while cold.

This way, we never take the global entropy lock from interrupt
handlers (no interrupts while cold), so the global entropy lock need
not block interrupts.

There's an annoying ordering issue here: softint_establish doesn't
work until after CPUs have been detected, which happens inside
configure(), which is also what enables interrupts.  So we have no
opportunity to softint_establish the entropy softint _before_
interrupts are enabled.

To work around this, we have to put a conditional into the interrupt
path, and go out of our way to process any queued samples after
establishing the softint.  If we just made softint_establish work
early, like percpu_create does now, this problem would go away and we
could delete a bit of logic here.

Candidate fix for PR kern/56730.

diffstat:

 sys/kern/kern_entropy.c |  60 +++++++++++++++++++++++++++++++-----------------
 1 files changed, 39 insertions(+), 21 deletions(-)

diffs (174 lines):

diff -r 46d88dc06d9d -r b18db24fe436 sys/kern/kern_entropy.c
--- a/sys/kern/kern_entropy.c   Fri Mar 18 23:34:44 2022 +0000
+++ b/sys/kern/kern_entropy.c   Fri Mar 18 23:34:56 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_entropy.c,v 1.36 2022/03/18 23:34:44 riastradh Exp $      */
+/*     $NetBSD: kern_entropy.c,v 1.37 2022/03/18 23:34:56 riastradh Exp $      */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -75,7 +75,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.36 2022/03/18 23:34:44 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.37 2022/03/18 23:34:56 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -396,6 +396,13 @@
        E->stage = ENTROPY_WARM;
 }
 
+static void
+entropy_init_late_cpu(void *a, void *b)
+{
+
+       entropy_softintr(NULL);
+}
+
 /*
  * entropy_init_late()
  *
@@ -406,6 +413,7 @@
 static void
 entropy_init_late(void)
 {
+       void *sih;
        int error;
 
        KASSERT(E->stage == ENTROPY_WARM);
@@ -414,9 +422,9 @@
         * Establish the softint at the highest softint priority level.
         * Must happen after CPU detection.
         */
-       entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
+       sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
            &entropy_softintr, NULL);
-       if (entropy_sih == NULL)
+       if (sih == NULL)
                panic("unable to establish entropy softint");
 
        /*
@@ -431,10 +439,22 @@
 
        /*
         * Wait until the per-CPU initialization has hit all CPUs
-        * before proceeding to mark the entropy system hot.
+        * before proceeding to mark the entropy system hot and
+        * enabling use of the softint.
         */
        xc_barrier(XC_HIGHPRI);
        E->stage = ENTROPY_HOT;
+       atomic_store_relaxed(&entropy_sih, sih);
+
+       /*
+        * At this point, entering new samples from interrupt handlers
+        * will trigger the softint to process them.  But there may be
+        * some samples that were entered from interrupt handlers
+        * before the softint was available.  Make sure we process
+        * those samples on all CPUs by running the softint logic on
+        * all CPUs.
+        */
+       xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL));
 }
 
 /*
@@ -651,7 +671,7 @@
 {
        unsigned diff;
 
-       KASSERT(E->stage == ENTROPY_HOT);
+       KASSERT(E->stage >= ENTROPY_WARM);
 
        /*
         * If there's no entropy needed, and entropy has been
@@ -738,8 +758,7 @@
 {
        bool notify = false;
 
-       if (E->stage >= ENTROPY_WARM)
-               mutex_enter(&E->lock);
+       KASSERT(E->stage == ENTROPY_COLD);
 
        /* Enter it into the pool.  */
        entpool_enter(&E->pool, buf, len);
@@ -758,9 +777,6 @@
                entropy_notify();
                entropy_immediate_evcnt.ev_count++;
        }
-
-       if (E->stage >= ENTROPY_WARM)
-               mutex_exit(&E->lock);
 }
 
 /*
@@ -784,7 +800,7 @@
            "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
 
        /* If it's too early after boot, just use entropy_enter_early.  */
-       if (__predict_false(E->stage < ENTROPY_HOT)) {
+       if (__predict_false(E->stage == ENTROPY_COLD)) {
                entropy_enter_early(buf, len, nbits);
                return;
        }
@@ -839,12 +855,13 @@
        struct entropy_cpu *ec;
        bool fullyused = false;
        uint32_t pending;
+       void *sih;
 
        KASSERTMSG(howmany(nbits, NBBY) <= len,
            "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
 
        /* If it's too early after boot, just use entropy_enter_early.  */
-       if (__predict_false(E->stage < ENTROPY_HOT)) {
+       if (__predict_false(E->stage == ENTROPY_COLD)) {
                entropy_enter_early(buf, len, nbits);
                return true;
        }
@@ -865,7 +882,9 @@
         * truncated, schedule a softint to stir the pool and stop.
         */
        if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
-               softint_schedule(entropy_sih);
+               sih = atomic_load_relaxed(&entropy_sih);
+               if (__predict_true(sih != NULL))
+                       softint_schedule(sih);
                goto out1;
        }
        fullyused = true;
@@ -878,8 +897,11 @@
        /* Schedule a softint if we added anything and it matters.  */
        if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
                atomic_load_relaxed(&entropy_depletion)) &&
-           nbits != 0)
-               softint_schedule(entropy_sih);
+           nbits != 0) {
+               sih = atomic_load_relaxed(&entropy_sih);
+               if (__predict_true(sih != NULL))
+                       softint_schedule(sih);
+       }
 
 out1:  /* Release the per-CPU state.  */
        KASSERT(ec->ec_locked);
@@ -1873,9 +1895,7 @@
         * contributed from this source.
         */
        if (fullyused) {
-               if (E->stage < ENTROPY_HOT) {
-                       if (E->stage >= ENTROPY_WARM)
-                               mutex_enter(&E->lock);
+               if (__predict_false(E->stage == ENTROPY_COLD)) {
                        rs->total = add_sat(rs->total, entropybits);
                        switch (flag) {
                        case RND_FLAG_COLLECT_TIME:
@@ -1887,8 +1907,6 @@
                                    add_sat(rs->value_delta.insamples, 1);
                                break;
                        }
-                       if (E->stage >= ENTROPY_WARM)
-                               mutex_exit(&E->lock);
                } else {
                        struct rndsource_cpu *rc = percpu_getref(rs->state);
 



Home | Main Index | Thread Index | Old Index