Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/net Re-factor how pktq_barrier() is issued by if_detach().



details:   https://anonhg.NetBSD.org/src/rev/fb6947f417bf
branches:  trunk
changeset: 369813:fb6947f417bf
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Fri Sep 02 05:50:36 2022 +0000

description:
Re-factor how pktq_barrier() is issued by if_detach().

Rather than excplicitly referencing ip_pktq and ip6_pktq in if_detach(),
instead add all pktqueues to a global list.  This list is then used in
the new pktq_ifdetach() function to issue a barrier on all pktqueues.

Note that the performance of this list is not critical; it will seldom
be accessed (then pktqueues are created/destroyed and when network
interfaces are detached), and so a simple synchronization strategy using
a rwlock is sufficient.

diffstat:

 sys/net/if.c       |  20 ++++++---------
 sys/net/pktqueue.c |  65 +++++++++++++++++++++++++++++++++++++++++++++++++++--
 sys/net/pktqueue.h |   3 +-
 3 files changed, 72 insertions(+), 16 deletions(-)

diffs (188 lines):

diff -r 3c6e690ff44f -r fb6947f417bf sys/net/if.c
--- a/sys/net/if.c      Fri Sep 02 05:09:49 2022 +0000
+++ b/sys/net/if.c      Fri Sep 02 05:50:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: if.c,v 1.522 2022/09/02 04:34:58 thorpej Exp $ */
+/*     $NetBSD: if.c,v 1.523 2022/09/02 05:50:36 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc.
@@ -90,7 +90,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.522 2022/09/02 04:34:58 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.523 2022/09/02 05:50:36 thorpej Exp $");
 
 #if defined(_KERNEL_OPT)
 #include "opt_inet.h"
@@ -1412,17 +1412,13 @@
        }
 
        /*
-        * IP queues have to be processed separately: net-queue barrier
-        * ensures that the packets are dequeued while a cross-call will
-        * ensure that the interrupts have completed. FIXME: not quite..
+        * Ensure that all packets on protocol input pktqueues have been
+        * processed, or, at least, removed from the queues.
+        *
+        * A cross-call will ensure that the interrupts have completed.
+        * FIXME: not quite..
         */
-#ifdef INET
-       pktq_barrier(ip_pktq);
-#endif
-#ifdef INET6
-       if (in6_present)
-               pktq_barrier(ip6_pktq);
-#endif
+       pktq_ifdetach();
        xc_barrier(0);
 
        /*
diff -r 3c6e690ff44f -r fb6947f417bf sys/net/pktqueue.c
--- a/sys/net/pktqueue.c        Fri Sep 02 05:09:49 2022 +0000
+++ b/sys/net/pktqueue.c        Fri Sep 02 05:50:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pktqueue.c,v 1.19 2022/09/02 03:50:00 thorpej Exp $    */
+/*     $NetBSD: pktqueue.c,v 1.20 2022/09/02 05:50:36 thorpej Exp $    */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.19 2022/09/02 03:50:00 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.20 2022/09/02 05:50:36 thorpej Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -53,6 +53,9 @@
 #include <sys/proc.h>
 #include <sys/percpu.h>
 #include <sys/xcall.h>
+#include <sys/once.h>
+#include <sys/queue.h>
+#include <sys/rwlock.h>
 
 #include <net/pktqueue.h>
 #include <net/rss_config.h>
@@ -80,8 +83,11 @@
        percpu_t *      pq_counters;
        void *          pq_sih;
 
-       /* Finally, per-CPU queues. */
+       /* The per-CPU queues. */
        struct percpu * pq_pcq; /* struct pcq * */
+
+       /* The linkage on the list of all pktqueues. */
+       LIST_ENTRY(pktqueue) pq_list;
 };
 
 /* The counters of the packet queue. */
@@ -97,6 +103,28 @@
 /* Special marker value used by pktq_barrier() mechanism. */
 #define        PKTQ_MARKER     ((void *)(~0ULL))
 
+/*
+ * This is a list of all pktqueues.  This list is used by
+ * pktq_ifdetach() to issue a barrier on every pktqueue.
+ *
+ * The r/w lock is acquired for writing in pktq_create() and
+ * pktq_destroy(), and for reading in pktq_ifdetach().
+ *
+ * This list is not performance critical, and will seldom be
+ * accessed.
+ */
+static LIST_HEAD(, pktqueue) pktqueue_list     __read_mostly;
+static krwlock_t pktqueue_list_lock            __read_mostly;
+static once_t pktqueue_list_init_once          __read_mostly;
+
+static int
+pktqueue_list_init(void)
+{
+       LIST_INIT(&pktqueue_list);
+       rw_init(&pktqueue_list_lock);
+       return 0;
+}
+
 static void
 pktq_init_cpu(void *vqp, void *vpq, struct cpu_info *ci)
 {
@@ -141,6 +169,8 @@
        percpu_t *pc;
        void *sih;
 
+       RUN_ONCE(&pktqueue_list_init_once, pktqueue_list_init);
+
        pc = percpu_alloc(sizeof(pktq_counters_t));
        if ((sih = softint_establish(sflags, intrh, sc)) == NULL) {
                percpu_free(pc, sizeof(pktq_counters_t));
@@ -155,6 +185,10 @@
        pq->pq_pcq = percpu_create(sizeof(struct pcq *),
            pktq_init_cpu, pktq_fini_cpu, pq);
 
+       rw_enter(&pktqueue_list_lock, RW_WRITER);
+       LIST_INSERT_HEAD(&pktqueue_list, pq, pq_list);
+       rw_exit(&pktqueue_list_lock);
+
        return pq;
 }
 
@@ -162,6 +196,12 @@
 pktq_destroy(pktqueue_t *pq)
 {
 
+       KASSERT(pktqueue_list_init_once.o_status == ONCE_DONE);
+
+       rw_enter(&pktqueue_list_lock, RW_WRITER);
+       LIST_REMOVE(pq, pq_list);
+       rw_exit(&pktqueue_list_lock);
+
        percpu_free(pq->pq_pcq, sizeof(struct pcq *));
        percpu_free(pq->pq_counters, sizeof(pktq_counters_t));
        softint_disestablish(pq->pq_sih);
@@ -472,6 +512,25 @@
 }
 
 /*
+ * pktq_ifdetach: issue a barrier on all pktqueues when a network
+ * interface is detached.
+ */
+void
+pktq_ifdetach(void)
+{
+       pktqueue_t *pq;
+
+       /* Just in case no pktqueues have been created yet... */
+       RUN_ONCE(&pktqueue_list_init_once, pktqueue_list_init);
+
+       rw_enter(&pktqueue_list_lock, RW_READER);
+       LIST_FOREACH(pq, &pktqueue_list, pq_list) {
+               pktq_barrier(pq);
+       }
+       rw_exit(&pktqueue_list_lock);
+}
+
+/*
  * pktq_flush: free mbufs in all queues.
  *
  * => The caller must ensure there are no concurrent writers or flush calls.
diff -r 3c6e690ff44f -r fb6947f417bf sys/net/pktqueue.h
--- a/sys/net/pktqueue.h        Fri Sep 02 05:09:49 2022 +0000
+++ b/sys/net/pktqueue.h        Fri Sep 02 05:50:36 2022 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pktqueue.h,v 1.7 2022/09/02 03:50:00 thorpej Exp $     */
+/*     $NetBSD: pktqueue.h,v 1.8 2022/09/02 05:50:36 thorpej Exp $     */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -53,6 +53,7 @@
 bool           pktq_enqueue(pktqueue_t *, struct mbuf *, const u_int);
 struct mbuf *  pktq_dequeue(pktqueue_t *);
 void           pktq_barrier(pktqueue_t *);
+void           pktq_ifdetach(void);
 void           pktq_flush(pktqueue_t *);
 int            pktq_set_maxlen(pktqueue_t *, size_t);
 



Home | Main Index | Thread Index | Old Index