NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/52160: panic: assert_sleepable
The following reply was made to PR kern/52160; it has been noted by GNATS.
From: Nick Hudson <skrll%netbsd.org@localhost>
To: gnats-bugs%NetBSD.org@localhost, kern-bug-people%netbsd.org@localhost,
gnats-admin%netbsd.org@localhost, netbsd-bugs%netbsd.org@localhost
Cc:
Subject: Re: kern/52160: panic: assert_sleepable
Date: Fri, 14 Apr 2017 10:55:16 +0100
This is a multi-part message in MIME format.
--------------020900010209020906050409
Content-Type: text/plain; charset=windows-1252; format=flowed
Content-Transfer-Encoding: 7bit
Can you test this patch please? It's a backport of this change on head
Module Name: src
Committed By: ozaki-r
Date: Mon Jul 11 07:37:00 UTC 2016
Modified Files:
src/sys/net: route.c
src/sys/netinet: ip_flow.c
src/sys/netinet6: ip6_flow.c nd6.c
Log Message:
Run timers in workqueue
Timers (such as nd6_timer) typically free/destroy some data in callout
(softint). If we apply psz/psref for such data, we cannot do free/destroy
process in there because synchronization of psz/psref cannot be used in
softint. So run timer callbacks in workqueue works (normal LWP context).
Doing workqueue_enqueue a work twice (i.e., call workqueue_enqueue before
a previous task is scheduled) isn't allowed. For nd6_timer and
rt_timer_timer, this doesn't happen because callout_reset is called only
from workqueue's work. OTOH, ip{,6}flow_slowtimo's callout can be called
before its work starts and completes because the callout is periodically
called regardless of completion of the work. To avoid such a situation,
add a flag for each protocol; the flag is set true when a work is
enqueued and set false after the work finished. workqueue_enqueue is
called only if the flag is false.
Proposed on tech-net and tech-kern.
To generate a diff of this commit:
cvs rdiff -u -r1.169 -r1.170 src/sys/net/route.c
cvs rdiff -u -r1.72 -r1.73 src/sys/netinet/ip_flow.c
cvs rdiff -u -r1.27 -r1.28 src/sys/netinet6/ip6_flow.c
cvs rdiff -u -r1.202 -r1.203 src/sys/netinet6/nd6.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
--------------020900010209020906050409
Content-Type: text/x-patch;
name="nd6_timer.netbsd-7.diff"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename="nd6_timer.netbsd-7.diff"
? sys/cscope.out
? sys/dev/usb/usbroothub.c.new
Index: sys/net/route.c
===================================================================
RCS file: /cvsroot/src/sys/net/route.c,v
retrieving revision 1.132
diff -u -p -w -b -r1.132 route.c
--- sys/net/route.c 6 Jun 2014 01:27:32 -0000 1.132
+++ sys/net/route.c 14 Apr 2017 09:50:28 -0000
@@ -110,6 +110,7 @@ __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.
#include <sys/ioctl.h>
#include <sys/pool.h>
#include <sys/kauth.h>
+#include <sys/workqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -133,6 +134,8 @@ struct pool rtentry_pool;
struct pool rttimer_pool;
struct callout rt_timer_ch; /* callout for rt_timer_timer() */
+struct workqueue *rt_timer_wq;
+struct work rt_timer_wk;
#ifdef RTFLUSH_DEBUG
static int _rtcache_debug = 0;
@@ -1031,14 +1034,22 @@ static int rt_init_done = 0;
* that this is run when the first queue is added...
*/
+static void rt_timer_work(struct work *, void *);
+
void
rt_timer_init(void)
{
+ int error;
+
assert(rt_init_done == 0);
LIST_INIT(&rttimer_queue_head);
callout_init(&rt_timer_ch, 0);
callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
+ error = workqueue_create(&rt_timer_wq, "rt_timer",
+ rt_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
+ if (error)
+ panic("%s: workqueue_create failed (%d)\n", __func__, error);
rt_init_done = 1;
}
@@ -1171,9 +1182,8 @@ rt_timer_add(struct rtentry *rt,
return 0;
}
-/* ARGSUSED */
-void
-rt_timer_timer(void *arg)
+static void
+rt_timer_work(struct work *wk, void *arg)
{
struct rttimer_queue *rtq;
struct rttimer *r;
@@ -1198,6 +1208,13 @@ rt_timer_timer(void *arg)
callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
}
+void
+rt_timer_timer(void *arg)
+{
+
+ workqueue_enqueue(rt_timer_wq, &rt_timer_wk, NULL);
+}
+
static struct rtentry *
_rtcache_init(struct route *ro, int flag)
{
Index: sys/netinet/ip_flow.c
===================================================================
RCS file: /cvsroot/src/sys/netinet/ip_flow.c,v
retrieving revision 1.64
diff -u -p -w -b -r1.64 ip_flow.c
--- sys/netinet/ip_flow.c 22 May 2014 22:01:12 -0000 1.64
+++ sys/netinet/ip_flow.c 14 Apr 2017 09:50:28 -0000
@@ -45,6 +45,7 @@ __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v
#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/sysctl.h>
+#include <sys/workqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -96,6 +97,10 @@ static int ip_hashsize = IPFLOW_DEFAULT_
static void ipflow_sysctl_init(struct sysctllog **);
+static void ipflow_slowtimo_work(struct work *, void *);
+static struct workqueue *ipflow_slowtimo_wq;
+static struct work ipflow_slowtimo_wk;
+
static size_t
ipflow_hash(const struct ip *ip)
{
@@ -130,6 +135,12 @@ ipflow_lookup(const struct ip *ip)
void
ipflow_poolinit(void)
{
+ int error;
+
+ error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
+ ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
+ if (error != 0)
+ panic("%s: workqueue_create failed (%d)\n", __func__, error);
pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
NULL, IPL_NET);
@@ -390,8 +401,10 @@ ipflow_reap(bool just_one)
return NULL;
}
-void
-ipflow_slowtimo(void)
+static bool ipflow_work_enqueued = false;
+
+static void
+ipflow_slowtimo_work(struct work *wk, void *arg)
{
struct rtentry *rt;
struct ipflow *ipf, *next_ipf;
@@ -415,11 +428,28 @@ ipflow_slowtimo(void)
ipf->ipf_uses = 0;
}
}
+ ipflow_work_enqueued = false;
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
}
void
+ipflow_slowtimo(void)
+{
+
+ /* Avoid enqueuing another work when one is already enqueued */
+ KERNEL_LOCK(1, NULL);
+ if (ipflow_work_enqueued) {
+ KERNEL_UNLOCK_ONE(NULL);
+ return;
+ }
+ ipflow_work_enqueued = true;
+ KERNEL_UNLOCK_ONE(NULL);
+
+ workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
+}
+
+void
ipflow_create(const struct route *ro, struct mbuf *m)
{
const struct ip *const ip = mtod(m, const struct ip *);
Index: sys/netinet6/ip6_flow.c
===================================================================
RCS file: /cvsroot/src/sys/netinet6/ip6_flow.c,v
retrieving revision 1.23
diff -u -p -w -b -r1.23 ip6_flow.c
--- sys/netinet6/ip6_flow.c 20 May 2014 20:23:56 -0000 1.23
+++ sys/netinet6/ip6_flow.c 14 Apr 2017 09:50:29 -0000
@@ -52,6 +52,7 @@ __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v
#include <sys/kernel.h>
#include <sys/pool.h>
#include <sys/sysctl.h>
+#include <sys/workqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -92,6 +93,10 @@ static struct ip6flowhead *ip6flowtable
static struct ip6flowhead ip6flowlist;
static int ip6flow_inuse;
+static void ip6flow_slowtimo_work(struct work *, void *);
+static struct workqueue *ip6flow_slowtimo_wq;
+static struct work ip6flow_slowtimo_wk;
+
/*
* Insert an ip6flow into the list.
*/
@@ -182,6 +187,12 @@ ip6flow_init(int table_size)
{
struct ip6flowhead *new_table;
size_t i;
+ int error;
+
+ error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
+ ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
+ if (error != 0)
+ panic("%s: workqueue_create failed (%d)\n", __func__, error);
new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
table_size, M_RTABLE, M_NOWAIT);
@@ -415,8 +426,10 @@ ip6flow_reap(int just_one)
return NULL;
}
+static bool ip6flow_work_enqueued = false;
+
void
-ip6flow_slowtimo(void)
+ip6flow_slowtimo_work(struct work *wk, void *arg)
{
struct ip6flow *ip6f, *next_ip6f;
@@ -436,11 +449,28 @@ ip6flow_slowtimo(void)
ip6f->ip6f_forwarded = 0;
}
}
+ ip6flow_work_enqueued = false;
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
}
+void
+ip6flow_slowtimo(void)
+{
+
+ /* Avoid enqueuing another work when one is already enqueued */
+ KERNEL_LOCK(1, NULL);
+ if (ip6flow_work_enqueued) {
+ KERNEL_UNLOCK_ONE(NULL);
+ return;
+ }
+ ip6flow_work_enqueued = true;
+ KERNEL_UNLOCK_ONE(NULL);
+
+ workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
+}
+
/*
* We have successfully forwarded a packet using the normal
* IPv6 stack. Now create/update a flow.
Index: sys/netinet6/nd6.c
===================================================================
RCS file: /cvsroot/src/sys/netinet6/nd6.c,v
retrieving revision 1.152.2.3
diff -u -p -w -b -r1.152.2.3 nd6.c
--- sys/netinet6/nd6.c 6 Apr 2015 01:32:33 -0000 1.152.2.3
+++ sys/netinet6/nd6.c 14 Apr 2017 09:50:29 -0000
@@ -53,6 +53,7 @@ __KERNEL_RCSID(0, "$NetBSD: nd6.c,v 1.15
#include <sys/syslog.h>
#include <sys/queue.h>
#include <sys/cprng.h>
+#include <sys/workqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -122,11 +123,14 @@ static void nd6_setmtu0(struct ifnet *,
static void nd6_slowtimo(void *);
static int regen_tmpaddr(struct in6_ifaddr *);
static struct llinfo_nd6 *nd6_free(struct rtentry *, int);
+static void nd6_timer_work(struct work *, void *);
static void nd6_llinfo_timer(void *);
static void clear_llinfo_pqueue(struct llinfo_nd6 *);
callout_t nd6_slowtimo_ch;
callout_t nd6_timer_ch;
+static struct workqueue *nd6_timer_wq;
+static struct work nd6_timer_wk;
extern callout_t in6_tmpaddrtimer_ch;
static int fill_drlist(void *, size_t *, size_t);
@@ -148,6 +152,7 @@ void
nd6_init(void)
{
static int nd6_init_done = 0;
+ int error;
if (nd6_init_done) {
log(LOG_NOTICE, "nd6_init called more than once(ignored)\n");
@@ -162,6 +167,11 @@ nd6_init(void)
callout_init(&nd6_slowtimo_ch, CALLOUT_MPSAFE);
callout_init(&nd6_timer_ch, CALLOUT_MPSAFE);
+ error = workqueue_create(&nd6_timer_wq, "nd6_timer",
+ nd6_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, 0);
+ if (error)
+ panic("%s: workqueue_create failed (%d)\n", __func__, error);
+
/* start timer */
callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
nd6_slowtimo, NULL);
@@ -541,7 +551,7 @@ nd6_llinfo_timer(void *arg)
* ND6 timer routine to expire default route list and prefix list
*/
void
-nd6_timer(void *ignored_arg)
+nd6_timer_work(struct work *wk, void *arg)
{
struct nd_defrouter *next_dr, *dr;
struct nd_prefix *next_pr, *pr;
@@ -661,6 +671,13 @@ nd6_timer(void *ignored_arg)
mutex_exit(softnet_lock);
}
+void
+nd6_timer(void *ignored_arg)
+{
+
+ workqueue_enqueue(nd6_timer_wq, &nd6_timer_wk, NULL);
+}
+
/* ia6: deprecated/invalidated temporary address */
static int
regen_tmpaddr(struct in6_ifaddr *ia6)
--------------020900010209020906050409--
Home |
Main Index |
Thread Index |
Old Index