tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: PHP performance on Xen domU with mulitple vcpu



> Date: Tue, 20 May 2025 14:34:51 +0000
> From: Emmanuel Dreyfus <manu%netbsd.org@localhost>
> 
> On Tue, May 20, 2025 at 01:53:45PM +0000, Taylor R Campbell wrote:
> > Can you please try the attached patch and share any output from the
> > same dtrace script?
> 
> I was not able to reach single-user, the console is swamped with messages
> like this:
> [  20.6228413] WARNING: hardclock skipped 2063201598ns (21653211628 -> 195900100
> 30), exceeding maximum of 4294967295ns for timecounter(9)

Sorry -- please try attached instead, forgot to update
ci->ci_xen_hardclock_systime_ns.
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1747748859 0
#      Tue May 20 13:47:39 2025 +0000
# Branch trunk
# Node ID 9c89bfc0cd823dced3dda0e2661d5635f254f95d
# Parent  99071f28975be8f07e6ea8ae2f93b2e0e5e6c42e
# EXP-Topic riastradh-highrestimer
xen: Avoid playing catchup with the hardclock timer.

It's not clear this is worthwhile for anything except possibly the
emulated schedclock, which really shouldn't be running via hardclock
anyway -- should be done with separate high-resolution timer logic,
since Xen gives us a high-resolution timer.

Also:

1. Rearm the timer immediately, rather than after calling hardclock,
   so less time has elapsed since it fired -- this should reduce the
   chance that rearming the timer fails.

2. Add a dtrace probe for when we retry rearming the timer.

PR port-xen/NNNNN: ...
PR kern/MMMMM: (the hardclock catchup semantics PR)

diff -r 99071f28975b -r 9c89bfc0cd82 sys/arch/xen/xen/xen_clock.c
--- a/sys/arch/xen/xen/xen_clock.c	Tue May 20 13:43:03 2025 +0000
+++ b/sys/arch/xen/xen/xen_clock.c	Tue May 20 13:47:39 2025 +0000
@@ -127,6 +127,10 @@ SDT_PROBE_DEFINE3(sdt, xen, hardclock, j
     "uint64_t"/*last_systime_ns*/,
     "uint64_t"/*this_systime_ns*/,
     "uint64_t"/*nticks*/);
+SDT_PROBE_DEFINE3(sdt, xen, hardclock, rearm__failed,
+    "uint64_t"/*last_systime_ns*/,
+    "uint64_t"/*this_systime_ns*/,
+    "uint64_t"/*next_systime_ns*/);
 SDT_PROBE_DEFINE3(sdt, xen, hardclock, missed,
     "uint64_t"/*last_systime_ns*/,
     "uint64_t"/*this_systime_ns*/,
@@ -800,6 +804,7 @@ xen_timer_handler(void *cookie, struct c
 #if defined(XENPV)
 	frame = NULL; /* We use values cached in curcpu()  */
 #endif
+
 	/*
 	 * Find how many nanoseconds of Xen system time has elapsed
 	 * since the last hardclock tick.
@@ -822,12 +827,18 @@ xen_timer_handler(void *cookie, struct c
 		 */
 		ci->ci_xen_hardclock_systime_ns = last = now - ns_per_tick;
 	}
+
+	/*
+	 * Compute the time since the last tick.  Ideally, this should
+	 * lie somewhere in [ns_per_tick, 2*ns_per_tick), but if the
+	 * timer fired early it might lie in [0, ns_per_tick) and if
+	 * the timer fired excessively late it might lie in
+	 * [2*ns_per_tick, \infty).
+	 */
 	delta = now - last;
 
 	/*
-	 * Play hardclock catchup: run the hardclock timer as many
-	 * times as appears necessary based on how much time has
-	 * passed.
+	 * Check to see if we ran late by a tick or more.
 	 */
 	if (__predict_false(delta >= 2*ns_per_tick)) {
 		SDT_PROBE3(sdt, xen, hardclock, jump,
@@ -848,36 +859,44 @@ xen_timer_handler(void *cookie, struct c
 			    xen_timecounter.tc_counter_mask);
 			ci->ci_xen_timecounter_jump_evcnt.ev_count++;
 		}
-		/* don't try to catch up more than one second at once */
-		if (delta > 1000000000UL)
-			delta = 1000000000UL;
+
+		/*
+		 * If delta \in [k*ns_per_tick, (k + 1)*ns_per_tick),
+		 * set next := (k + 1)*ns_per_tick.  Note that
+		 * rounddown(delta, ns_per_tick) = k*ns_per_tick, so we
+		 * add one more to get (k + 1)*ns_per_tick.
+		 */
+		next = last + rounddown(delta, ns_per_tick) + ns_per_tick;
+	} else {
+		next = last + 2*ns_per_tick;
 	}
-	while (delta >= ns_per_tick) {
-		ci->ci_xen_hardclock_systime_ns += ns_per_tick;
-		delta -= ns_per_tick;
-		hardclock(frame);
-		if (__predict_false(delta >= ns_per_tick)) {
-			SDT_PROBE3(sdt, xen, hardclock, missed,
-			    last, now, delta);
-			ci->ci_xen_missed_hardclock_evcnt.ev_count++;
+
+	/*
+	 * Re-arm the timer.  If it fails, the time may have already
+	 * passed, so try one more tick later.  If that fails, panic.
+	 */
+	ci->ci_xen_hardclock_systime_ns = next - ns_per_tick;
+	error = HYPERVISOR_set_timer_op(next);
+	if (error) {
+		SDT_PROBE3(sdt, xen, hardclock, rearm__failed,
+		    last, now, next);
+		last = next;
+		next += roundup(xen_vcputime_systime_ns() - last,
+		    ns_per_tick);
+		next += ns_per_tick;
+		ci->ci_xen_hardclock_systime_ns = next - ns_per_tick;
+		error = HYPERVISOR_set_timer_op(next);
+		if (error) {
+			panic("failed to rearm Xen timer"
+			    " at %"PRIu64" ns or %"PRIu64" ns: %d",
+			    last, next, error);
 		}
 	}
 
 	/*
-	 * Re-arm the timer.  If it fails, it's probably because the
-	 * time is in the past, possibly because we're in the
-	 * process of catching up missed hardclock calls.
-	 * In this case schedule a tick in the near future.
+	 * Call hardclock once.
 	 */
-	next = ci->ci_xen_hardclock_systime_ns + ns_per_tick;
-	error = HYPERVISOR_set_timer_op(next);
-	if (error) {
-		next = xen_vcputime_systime_ns() + ns_per_tick / 2;
-		error = HYPERVISOR_set_timer_op(next);
-		if (error) {
-			panic("failed to re-arm Xen timer %d", error);
-		}
-	}
+	hardclock(frame);
 
 	/* Success!  */
 	return 0;


Home | Main Index | Thread Index | Old Index