Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/net Remove the global lock for rtcache



details:   https://anonhg.NetBSD.org/src/rev/655c2d359105
branches:  trunk
changeset: 826691:655c2d359105
user:      ozaki-r <ozaki-r%NetBSD.org@localhost>
date:      Fri Sep 22 05:05:32 2017 +0000

description:
Remove the global lock for rtcache

Thanks to removal of LIST_ENTRY of struct route, rtcaches are accessed only by
their users. And in existing usages a rtcache is guranteed to be not accessed
simultaneously. So the rtcache framework doesn't need any exclusion controls
in itself.

diffstat:

 sys/net/route.c |  208 ++++++++++++++++---------------------------------------
 1 files changed, 60 insertions(+), 148 deletions(-)

diffs (truncated from 424 to 300 lines):

diff -r f7deebb72d09 -r 655c2d359105 sys/net/route.c
--- a/sys/net/route.c   Fri Sep 22 04:45:56 2017 +0000
+++ b/sys/net/route.c   Fri Sep 22 05:05:32 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: route.c,v 1.199 2017/09/21 07:15:34 ozaki-r Exp $      */
+/*     $NetBSD: route.c,v 1.200 2017/09/22 05:05:32 ozaki-r Exp $      */
 
 /*-
  * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
 #endif
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: route.c,v 1.199 2017/09/21 07:15:34 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: route.c,v 1.200 2017/09/22 05:05:32 ozaki-r Exp $");
 
 #include <sys/param.h>
 #ifdef RTFLUSH_DEBUG
@@ -169,8 +169,12 @@
  * Locking notes:
  * - The routing table is protected by a global rwlock
  *   - API: RT_RLOCK and friends
- * - rtcaches are protected by a global rwlock
- *   - API: RTCACHE_RLOCK and friends
+ * - rtcaches are NOT protected by the framework
+ *   - Callers must guarantee a rtcache isn't accessed simultaneously
+ *   - How the constraint is guranteed in the wild
+ *     - Protect a rtcache by a mutex (e.g., inp_route)
+ *     - Make rtcache per-CPU and allow only accesses from softint
+ *       (e.g., ipforward_rt_percpu)
  * - References to a rtentry is managed by reference counting and psref
  *   - Reference couting is used for temporal reference when a rtentry
  *     is fetched from the routing table
@@ -213,8 +217,7 @@
  */
 
 /*
- * Global locks for the routing table and rtcaches.
- * Locking order: rtcache_lock => rt_lock
+ * Global lock for the routing table.
  */
 static krwlock_t               rt_lock __cacheline_aligned;
 #ifdef NET_MPSAFE
@@ -231,21 +234,6 @@
 #define        RT_ASSERT_WLOCK()       do {} while (0)
 #endif
 
-static krwlock_t               rtcache_lock __cacheline_aligned;
-#ifdef NET_MPSAFE
-#define RTCACHE_RLOCK()                rw_enter(&rtcache_lock, RW_READER)
-#define RTCACHE_WLOCK()                rw_enter(&rtcache_lock, RW_WRITER)
-#define RTCACHE_UNLOCK()       rw_exit(&rtcache_lock)
-#define        RTCACHE_ASSERT_WLOCK()  KASSERT(rw_write_held(&rtcache_lock))
-#define        RTCACHE_WLOCKED()       rw_write_held(&rtcache_lock)
-#else
-#define RTCACHE_RLOCK()                do {} while (0)
-#define RTCACHE_WLOCK()                do {} while (0)
-#define RTCACHE_UNLOCK()       do {} while (0)
-#define        RTCACHE_ASSERT_WLOCK()  do {} while (0)
-#define        RTCACHE_WLOCKED()       false
-#endif
-
 static uint64_t rtcache_generation;
 
 /*
@@ -290,10 +278,6 @@
 
 static struct rtentry *
     rtalloc1_locked(const struct sockaddr *, int, bool, bool);
-static struct rtentry *
-    rtcache_validate_locked(struct route *);
-static void rtcache_free_locked(struct route *);
-static int rtcache_setdst_locked(struct route *, const struct sockaddr *);
 
 static void rtcache_ref(struct rtentry *, struct route *);
 
@@ -503,9 +487,7 @@
        if (rtcache_debug())
                printf("%s: enter\n", __func__);
 
-       RTCACHE_WLOCK();
        rtcache_generation++;
-       RTCACHE_UNLOCK();
 }
 
 #ifdef RT_DEBUG
@@ -569,23 +551,14 @@
        if (ISSET(rt->rt_flags, RTF_UPDATING) &&
            /* XXX updater should be always able to acquire */
            curlwp != rt_update_global.lwp) {
-               bool need_lock = false;
                if (!wait_ok || !rt_wait_ok())
                        goto miss;
                RT_UNLOCK();
                splx(s);
 
-               /* XXX need more proper solution */
-               if (RTCACHE_WLOCKED()) {
-                       RTCACHE_UNLOCK();
-                       need_lock = true;
-               }
-
                /* We can wait until the update is complete */
                rt_update_wait();
 
-               if (need_lock)
-                       RTCACHE_WLOCK();
                if (wlock)
                        RT_WLOCK();
                else
@@ -775,17 +748,14 @@
 
        dlog(LOG_DEBUG, "%s: updating rt=%p lwp=%p\n", __func__, rt, curlwp);
 
-       RTCACHE_WLOCK();
        RT_WLOCK();
        /* If the entry is being destroyed, don't proceed the update. */
        if (!ISSET(rt->rt_flags, RTF_UP)) {
                RT_UNLOCK();
-               RTCACHE_UNLOCK();
                return -1;
        }
        rt->rt_flags |= RTF_UPDATING;
        RT_UNLOCK();
-       RTCACHE_UNLOCK();
 
        mutex_enter(&rt_update_global.lock);
        while (rt_update_global.ongoing) {
@@ -810,11 +780,9 @@
 rt_update_finish(struct rtentry *rt)
 {
 
-       RTCACHE_WLOCK();
        RT_WLOCK();
        rt->rt_flags &= ~RTF_UPDATING;
        RT_UNLOCK();
-       RTCACHE_UNLOCK();
 
        mutex_enter(&rt_update_global.lock);
        rt_update_global.ongoing = false;
@@ -1679,7 +1647,6 @@
 
        /* XXX should be in rt_init */
        rw_init(&rt_lock);
-       rw_init(&rtcache_lock);
 
        LIST_INIT(&rttimer_queue_head);
        callout_init(&rt_timer_ch, CALLOUT_MPSAFE);
@@ -1872,19 +1839,20 @@
 
        rtcache_invariants(ro);
        KASSERT(ro->_ro_rt == NULL);
-       RTCACHE_ASSERT_WLOCK();
 
        if (rtcache_getdst(ro) == NULL)
                return NULL;
        rt = rtalloc1(rtcache_getdst(ro), flag);
-       if (rt != NULL && ISSET(rt->rt_flags, RTF_UP)) {
-               ro->_ro_rt = rt;
-               ro->ro_rtcache_generation = rtcache_generation;
-               KASSERT(!ISSET(rt->rt_flags, RTF_UPDATING));
-               rtcache_ref(rt, ro);
+       if (rt != NULL) {
+               RT_RLOCK();
+               if (ISSET(rt->rt_flags, RTF_UP)) {
+                       ro->_ro_rt = rt;
+                       ro->ro_rtcache_generation = rtcache_generation;
+                       rtcache_ref(rt, ro);
+               }
+               RT_UNLOCK();
                rt_unref(rt);
-       } else if (rt != NULL)
-               rt_unref(rt);
+       }
 
        rtcache_invariants(ro);
        return ro->_ro_rt;
@@ -1893,32 +1861,23 @@
 struct rtentry *
 rtcache_init(struct route *ro)
 {
-       struct rtentry *rt;
-       RTCACHE_WLOCK();
-       rt = _rtcache_init(ro, 1);
-       RTCACHE_UNLOCK();
-       return rt;
+
+       return _rtcache_init(ro, 1);
 }
 
 struct rtentry *
 rtcache_init_noclone(struct route *ro)
 {
-       struct rtentry *rt;
-       RTCACHE_WLOCK();
-       rt = _rtcache_init(ro, 0);
-       RTCACHE_UNLOCK();
-       return rt;
+
+       return _rtcache_init(ro, 0);
 }
 
 struct rtentry *
 rtcache_update(struct route *ro, int clone)
 {
-       struct rtentry *rt;
-       RTCACHE_WLOCK();
+
        ro->_ro_rt = NULL;
-       rt = _rtcache_init(ro, clone);
-       RTCACHE_UNLOCK();
-       return rt;
+       return _rtcache_init(ro, clone);
 }
 
 void
@@ -1939,11 +1898,9 @@
        if (ret != 0)
                goto out;
 
-       RTCACHE_WLOCK();
        new_ro->_ro_rt = rt;
        new_ro->ro_rtcache_generation = rtcache_generation;
        rtcache_invariants(new_ro);
-       RTCACHE_UNLOCK();
 out:
        rtcache_unref(rt, old_ro);
        return;
@@ -1991,55 +1948,45 @@
 #endif
 }
 
-static struct rtentry *
-rtcache_validate_locked(struct route *ro)
+struct rtentry *
+rtcache_validate(struct route *ro)
 {
        struct rtentry *rt = NULL;
 
 #ifdef NET_MPSAFE
 retry:
 #endif
-       rt = ro->_ro_rt;
        rtcache_invariants(ro);
-
        if (ro->ro_rtcache_generation != rtcache_generation) {
                /* The cache is invalidated */
+               return NULL;
+       }
+
+       rt = ro->_ro_rt;
+       if (rt == NULL)
+               return NULL;
+
+       RT_RLOCK();
+       if ((rt->rt_flags & RTF_UP) == 0) {
                rt = NULL;
                goto out;
        }
+#ifdef NET_MPSAFE
+       if (ISSET(rt->rt_flags, RTF_UPDATING)) {
+               if (rt_wait_ok()) {
+                       RT_UNLOCK();
 
-       RT_RLOCK();
-       if (rt != NULL && (rt->rt_flags & RTF_UP) != 0) {
-#ifdef NET_MPSAFE
-               if (ISSET(rt->rt_flags, RTF_UPDATING)) {
-                       if (rt_wait_ok()) {
-                               RT_UNLOCK();
-                               RTCACHE_UNLOCK();
-                               /* We can wait until the update is complete */
-                               rt_update_wait();
-                               RTCACHE_RLOCK();
-                               goto retry;
-                       } else {
-                               rt = NULL;
-                       }
-               } else
+                       /* We can wait until the update is complete */
+                       rt_update_wait();
+                       goto retry;
+               } else {
+                       rt = NULL;
+               }
+       } else
 #endif
-                       rtcache_ref(rt, ro);
-       } else
-               rt = NULL;
-       RT_UNLOCK();
+               rtcache_ref(rt, ro);
 out:
-       return rt;
-}
-
-struct rtentry *
-rtcache_validate(struct route *ro)
-{
-       struct rtentry *rt;



Home | Main Index | Thread Index | Old Index