Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src Support modifying an existing timer without having to delete...
details: https://anonhg.NetBSD.org/src/rev/b3f2ffacf0a4
branches: trunk
changeset: 989969:b3f2ffacf0a4
user: thorpej <thorpej%NetBSD.org@localhost>
date: Fri Oct 22 04:49:24 2021 +0000
description:
Support modifying an existing timer without having to delete it first.
Semantics match FreeBSD.
diffstat:
sys/kern/kern_event.c | 101 ++++++++++++++++++++++++++++++++++++++---
tests/kernel/kqueue/t_timer.c | 81 ++++++++++++++++++++++++++++++++-
2 files changed, 172 insertions(+), 10 deletions(-)
diffs (273 lines):
diff -r 4158083e2adc -r b3f2ffacf0a4 sys/kern/kern_event.c
--- a/sys/kern/kern_event.c Fri Oct 22 02:57:23 2021 +0000
+++ b/sys/kern/kern_event.c Fri Oct 22 04:49:24 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_event.c,v 1.135 2021/10/21 02:34:03 thorpej Exp $ */
+/* $NetBSD: kern_event.c,v 1.136 2021/10/22 04:49:24 thorpej Exp $ */
/*-
* Copyright (c) 2008, 2009, 2021 The NetBSD Foundation, Inc.
@@ -63,7 +63,7 @@
#endif /* _KERNEL_OPT */
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.135 2021/10/21 02:34:03 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.136 2021/10/22 04:49:24 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -102,6 +102,7 @@
static void knote_enqueue(struct knote *);
static void knote_activate(struct knote *);
static void knote_activate_locked(struct knote *);
+static void knote_deactivate_locked(struct knote *);
static void filt_kqdetach(struct knote *);
static int filt_kqueue(struct knote *, long hint);
@@ -113,6 +114,7 @@
static int filt_timerattach(struct knote *);
static void filt_timerdetach(struct knote *);
static int filt_timer(struct knote *, long hint);
+static int filt_timertouch(struct knote *, struct kevent *, long type);
static int filt_userattach(struct knote *);
static void filt_userdetach(struct knote *);
static int filt_user(struct knote *, long hint);
@@ -163,6 +165,7 @@
.f_attach = filt_timerattach,
.f_detach = filt_timerdetach,
.f_event = filt_timer,
+ .f_touch = filt_timertouch,
};
static const struct filterops user_filtops = {
@@ -1261,6 +1264,22 @@
mutex_spin_exit(&kq->kq_lock);
}
+static inline void
+filt_timerstart(struct knote *kn, uintptr_t tticks)
+{
+ callout_t *calloutp = kn->kn_hook;
+
+ KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
+ KASSERT(!callout_pending(calloutp));
+
+ if (__predict_false(tticks == FILT_TIMER_NOSCHED)) {
+ kn->kn_data = 1;
+ } else {
+ KASSERT(tticks <= INT_MAX);
+ callout_reset(calloutp, (int)tticks, filt_timerexpire, kn);
+ }
+}
+
static int
filt_timerattach(struct knote *kn)
{
@@ -1295,12 +1314,7 @@
KASSERT(kn->kn_sfflags == kev.fflags);
kn->kn_hook = calloutp;
- if (__predict_false(tticks == FILT_TIMER_NOSCHED)) {
- kn->kn_data = 1;
- } else {
- KASSERT(tticks <= INT_MAX);
- callout_reset(calloutp, (int)tticks, filt_timerexpire, kn);
- }
+ filt_timerstart(kn, tticks);
mutex_spin_exit(&kq->kq_lock);
@@ -1332,6 +1346,61 @@
}
static int
+filt_timertouch(struct knote *kn, struct kevent *kev, long type)
+{
+ struct kqueue *kq = kn->kn_kq;
+ callout_t *calloutp;
+ uintptr_t tticks;
+ int error;
+
+ KASSERT(mutex_owned(&kq->kq_lock));
+
+ switch (type) {
+ case EVENT_REGISTER:
+ /* Only relevant for EV_ADD. */
+ if ((kev->flags & EV_ADD) == 0) {
+ return 0;
+ }
+
+ /*
+ * Stop the timer, under the assumption that if
+ * an application is re-configuring the timer,
+ * they no longer care about the old one. We
+ * can safely drop the kq_lock while we wait
+ * because fdp->fd_lock will be held throughout,
+ * ensuring that no one can sneak in with an
+ * EV_DELETE or close the kq.
+ */
+ KASSERT(mutex_owned(&kq->kq_fdp->fd_lock));
+
+ calloutp = kn->kn_hook;
+ callout_halt(calloutp, &kq->kq_lock);
+ KASSERT(mutex_owned(&kq->kq_lock));
+ knote_deactivate_locked(kn);
+ kn->kn_data = 0;
+
+ error = filt_timercompute(kev, &tticks);
+ if (error) {
+ return error;
+ }
+ kn->kn_sdata = kev->data;
+ kn->kn_flags = kev->flags;
+ kn->kn_sfflags = kev->fflags;
+ filt_timerstart(kn, tticks);
+ break;
+
+ case EVENT_PROCESS:
+ *kev = kn->kn_kevent;
+ break;
+
+ default:
+ panic("%s: invalid type (%ld)", __func__, type);
+ }
+
+ return 0;
+}
+
+static int
filt_timer(struct knote *kn, long hint)
{
struct kqueue *kq = kn->kn_kq;
@@ -2686,6 +2755,22 @@
mutex_spin_exit(&kq->kq_lock);
}
+static void
+knote_deactivate_locked(struct knote *kn)
+{
+ struct kqueue *kq = kn->kn_kq;
+
+ if (kn->kn_status & KN_QUEUED) {
+ kq_check(kq);
+ kn->kn_status &= ~KN_QUEUED;
+ TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
+ KASSERT(KQ_COUNT(kq) > 0);
+ kq->kq_count--;
+ kq_check(kq);
+ }
+ kn->kn_status &= ~KN_ACTIVE;
+}
+
/*
* Set EV_EOF on the specified knote. Also allows additional
* EV_* flags to be set (e.g. EV_ONESHOT).
diff -r 4158083e2adc -r b3f2ffacf0a4 tests/kernel/kqueue/t_timer.c
--- a/tests/kernel/kqueue/t_timer.c Fri Oct 22 02:57:23 2021 +0000
+++ b/tests/kernel/kqueue/t_timer.c Fri Oct 22 04:49:24 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: t_timer.c,v 1.1 2021/10/13 04:57:19 thorpej Exp $ */
+/* $NetBSD: t_timer.c,v 1.2 2021/10/22 04:49:24 thorpej Exp $ */
/*-
* Copyright (c) 2021 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: t_timer.c,v 1.1 2021/10/13 04:57:19 thorpej Exp $");
+__RCSID("$NetBSD: t_timer.c,v 1.2 2021/10/22 04:49:24 thorpej Exp $");
#include <sys/types.h>
#include <sys/event.h>
@@ -170,6 +170,82 @@
event[0].data == TIME1_COUNT + 1);
}
+ATF_TC(modify);
+ATF_TC_HEAD(modify, tc)
+{
+ atf_tc_set_md_var(tc, "descr",
+ "tests modifying a timer");
+}
+
+ATF_TC_BODY(modify, tc)
+{
+ struct kevent event[1];
+ struct timespec ts = { 0, 0 };
+ struct timespec sleepts;
+ int kq;
+
+ ATF_REQUIRE((kq = kqueue()) >= 0);
+
+ /*
+ * Start a 500ms timer, sleep for 5 seconds, and check
+ * the total count.
+ */
+ EV_SET(&event[0], 1, EVFILT_TIMER, EV_ADD, 0, 500, NULL);
+ ATF_REQUIRE(kevent(kq, event, 1, NULL, 0, NULL) == 0);
+
+ sleepts.tv_sec = 5;
+ sleepts.tv_nsec = 0;
+ ATF_REQUIRE(nanosleep(&sleepts, NULL) == 0);
+
+ ATF_REQUIRE(kevent(kq, NULL, 0, event, 1, &ts) == 1);
+ ATF_REQUIRE(event[0].ident == 1);
+ ATF_REQUIRE(event[0].data >= 9 && event[0].data <= 11);
+
+ /*
+ * Modify to a 4 second timer, sleep for 5 seconds, and check
+ * the total count.
+ */
+ EV_SET(&event[0], 1, EVFILT_TIMER, EV_ADD, 0, 4000, NULL);
+ ATF_REQUIRE(kevent(kq, event, 1, NULL, 0, NULL) == 0);
+
+ sleepts.tv_sec = 5;
+ sleepts.tv_nsec = 0;
+ ATF_REQUIRE(nanosleep(&sleepts, NULL) == 0);
+
+ ATF_REQUIRE(kevent(kq, NULL, 0, event, 1, &ts) == 1);
+ ATF_REQUIRE(event[0].ident == 1);
+ ATF_REQUIRE(event[0].data == 1);
+
+ /*
+ * Start a 500ms timer, sleep for 2 seconds.
+ */
+ EV_SET(&event[0], 1, EVFILT_TIMER, EV_ADD, 0, 500, NULL);
+ ATF_REQUIRE(kevent(kq, event, 1, NULL, 0, NULL) == 0);
+
+ sleepts.tv_sec = 2;
+ sleepts.tv_nsec = 0;
+ ATF_REQUIRE(nanosleep(&sleepts, NULL) == 0);
+
+ /*
+ * Set the SAME timer, sleep for 2 seconds.
+ */
+ EV_SET(&event[0], 1, EVFILT_TIMER, EV_ADD, 0, 500, NULL);
+ ATF_REQUIRE(kevent(kq, event, 1, NULL, 0, NULL) == 0);
+
+ sleepts.tv_sec = 2;
+ sleepts.tv_nsec = 0;
+ ATF_REQUIRE(nanosleep(&sleepts, NULL) == 0);
+
+ /*
+ * The kernel should have reset the count when modifying the
+ * timer, so we should only expect to see the expiration count
+ * for the second sleep.
+ */
+ ATF_REQUIRE(kevent(kq, NULL, 0, event, 1, &ts) == 1);
+ ATF_REQUIRE(event[0].ident == 1);
+ ATF_REQUIRE(event[0].data >= 3 && event[0].data <= 5);
+}
+
ATF_TC(abstime);
ATF_TC_HEAD(abstime, tc)
{
@@ -269,6 +345,7 @@
ATF_TP_ADD_TC(tp, count_expirations);
ATF_TP_ADD_TC(tp, abstime);
ATF_TP_ADD_TC(tp, timer_units);
+ ATF_TP_ADD_TC(tp, modify);
return atf_no_error();
}
Home |
Main Index |
Thread Index |
Old Index