Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/external/bsd/drm2/include/linux Provide reader/writer se...



details:   https://anonhg.NetBSD.org/src/rev/ed71c59fdb37
branches:  trunk
changeset: 344732:ed71c59fdb37
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Wed Apr 13 08:43:56 2016 +0000

description:
Provide reader/writer semantics with recursive readers.

diffstat:

 sys/external/bsd/drm2/include/linux/spinlock.h |  79 ++++++++++++++++++++++---
 1 files changed, 67 insertions(+), 12 deletions(-)

diffs (101 lines):

diff -r 30c314471fe9 -r ed71c59fdb37 sys/external/bsd/drm2/include/linux/spinlock.h
--- a/sys/external/bsd/drm2/include/linux/spinlock.h    Wed Apr 13 08:31:00 2016 +0000
+++ b/sys/external/bsd/drm2/include/linux/spinlock.h    Wed Apr 13 08:43:56 2016 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: spinlock.h,v 1.6 2015/01/01 01:15:42 mrg Exp $ */
+/*     $NetBSD: spinlock.h,v 1.7 2016/04/13 08:43:56 riastradh Exp $   */
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -35,6 +35,8 @@
 #include <sys/cdefs.h>
 #include <sys/mutex.h>
 
+#include <machine/limits.h>
+
 #define        __acquires(lock)        /* XXX lockdep stuff */
 #define        __releases(lock)        /* XXX lockdep stuff */
 
@@ -108,18 +110,71 @@
        KASSERT(mutex_owned(&(spinlock)->sl_lock))
 
 /*
- * Linux rwlocks are reader/writer spin locks.  We implement them as
- * normal spin locks without reader/writer semantics for expedience.
- * If that turns out to not work, adapting to reader/writer semantics
- * shouldn't be too hard.
+ * Stupid reader/writer spin locks.  No attempt to avoid writer
+ * starvation.  Must allow recursive readers.  We use mutex and state
+ * instead of compare-and-swap for expedience and LOCKDEBUG support.
  */
 
-#define        rwlock_t                spinlock_t
-#define        rwlock_init             spin_lock_init
-#define        rwlock_destroy          spin_lock_destroy
-#define        write_lock_irq          spin_lock_irq
-#define        write_unlock_irq        spin_unlock_irq
-#define        read_lock               spin_lock
-#define        read_unlock             spin_unlock
+typedef struct linux_rwlock {
+       kmutex_t        rw_lock;
+       unsigned        rw_nreaders;
+} rwlock_t;
+
+static inline void
+rwlock_init(rwlock_t *rw)
+{
+
+       mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
+       rw->rw_nreaders = 0;
+}
+
+static inline void
+rwlock_destroy(rwlock_t *rw)
+{
+
+       KASSERTMSG(rw->rw_nreaders == 0,
+           "rwlock still held by %u readers", rw->rw_nreaders);
+       mutex_destroy(&rw->rw_lock);
+}
+
+static inline void
+write_lock_irq(rwlock_t *rw)
+{
+
+       for (;;) {
+               mutex_spin_enter(&rw->rw_lock);
+               if (rw->rw_nreaders == 0)
+                       break;
+               mutex_spin_exit(&rw->rw_lock);
+       }
+}
+
+static inline void
+write_unlock_irq(rwlock_t *rw)
+{
+
+       KASSERT(rw->rw_nreaders == 0);
+       mutex_spin_exit(&rw->rw_lock);
+}
+
+static inline void
+read_lock(rwlock_t *rw)
+{
+
+       mutex_spin_enter(&rw->rw_lock);
+       KASSERT(rw->rw_nreaders < UINT_MAX);
+       rw->rw_nreaders++;
+       mutex_spin_exit(&rw->rw_lock);
+}
+
+static inline void
+read_unlock(rwlock_t *rw)
+{
+
+       mutex_spin_enter(&rw->rw_lock);
+       KASSERT(0 < rw->rw_nreaders);
+       rw->rw_nreaders--;
+       mutex_spin_exit(&rw->rw_lock);
+}
 
 #endif  /* _LINUX_SPINLOCK_H_ */



Home | Main Index | Thread Index | Old Index