tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

xc_barrier()



gcc 8 -Wcast-function-type (enabled by -Wextra that we do turn on for
x86 ports and a few others) is not very happy about many function
casts for nullop and friends in the kernel.

A small portion of them is code that does xcall barrier with:

	uint64_t where;
	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
	xc_wait(where);

The attached patch replaces all these with

	xc_barrier(0);

with obvious implementation.

Suggestions for a better name and (especially) for the descriptive
comment and the man-page text are welcome.

-uwe
Index: sys/xcall.h
===================================================================
RCS file: /cvsroot/src/sys/sys/xcall.h,v
retrieving revision 1.7
diff -u -p -r1.7 xcall.h
--- sys/xcall.h	27 Aug 2018 07:10:15 -0000	1.7
+++ sys/xcall.h	6 Oct 2019 12:28:38 -0000
@@ -53,6 +53,8 @@ uint64_t	xc_broadcast(u_int, xcfunc_t, v
 uint64_t	xc_unicast(u_int, xcfunc_t, void *, void *, struct cpu_info *);
 void		xc_wait(uint64_t);
 
+void		xc_barrier(u_int);
+
 unsigned int	xc_encode_ipl(int);
 
 #endif	/* _KERNEL */
Index: kern/subr_xcall.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_xcall.c,v
retrieving revision 1.26
diff -u -p -r1.26 subr_xcall.c
--- kern/subr_xcall.c	7 Feb 2018 04:25:09 -0000	1.26
+++ kern/subr_xcall.c	6 Oct 2019 12:28:37 -0000
@@ -247,6 +247,30 @@ xc_init_cpu(struct cpu_info *ci)
 	KASSERT(error == 0);
 }
 
+
+static void
+xc_nop(void *arg1, void *arg2)
+{
+
+    return;
+}
+
+
+/*
+ * xc_barrier:
+ *
+ *	Broadcast a nop to all CPUs in the system.
+ */
+void
+xc_barrier(unsigned int flags)
+{
+	uint64_t where;
+
+	where = xc_broadcast(flags, xc_nop, NULL, NULL);
+	xc_wait(where);
+}
+
+
 /*
  * xc_broadcast:
  *
Index: arch/x86/acpi/acpi_cpu_md.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/acpi/acpi_cpu_md.c,v
retrieving revision 1.79
diff -u -p -r1.79 acpi_cpu_md.c
--- arch/x86/acpi/acpi_cpu_md.c	10 Nov 2018 09:42:42 -0000	1.79
+++ arch/x86/acpi/acpi_cpu_md.c	6 Oct 2019 12:28:35 -0000
@@ -378,7 +378,6 @@ acpicpu_md_cstate_stop(void)
 {
 	static char text[16];
 	void (*func)(void);
-	uint64_t xc;
 	bool ipi;
 
 	x86_cpu_idle_get(&func, text, sizeof(text));
@@ -393,8 +392,7 @@ acpicpu_md_cstate_stop(void)
 	 * Run a cross-call to ensure that all CPUs are
 	 * out from the ACPI idle-loop before detachment.
 	 */
-	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(xc);
+	xc_barrier(0);
 
 	return 0;
 }
Index: kern/kern_lwp.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_lwp.c,v
retrieving revision 1.204
diff -u -p -r1.204 kern_lwp.c
--- kern/kern_lwp.c	3 Oct 2019 22:48:44 -0000	1.204
+++ kern/kern_lwp.c	6 Oct 2019 12:28:37 -0000
@@ -367,7 +367,6 @@ static void
 lwp_dtor(void *arg, void *obj)
 {
 	lwp_t *l = obj;
-	uint64_t where;
 	(void)l;
 
 	/*
@@ -379,8 +378,7 @@ lwp_dtor(void *arg, void *obj)
 	 * the value of l->l_cpu must be still valid at this point.
 	 */
 	KASSERT(l->l_cpu != NULL);
-	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(where);
+	xc_barrier(0);
 }
 
 /*
Index: kern/kern_ras.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_ras.c,v
retrieving revision 1.38
diff -u -p -r1.38 kern_ras.c
--- kern/kern_ras.c	4 Jul 2016 07:56:07 -0000	1.38
+++ kern/kern_ras.c	6 Oct 2019 12:28:37 -0000
@@ -66,9 +66,7 @@ ras_sync(void)
 	/* No need to sync if exiting or single threaded. */
 	if (curproc->p_nlwps > 1 && ncpu > 1) {
 #ifdef NO_SOFTWARE_PATENTS
-		uint64_t where;
-		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-		xc_wait(where);
+		xc_barrier(0);
 #else
 		/*
 		 * Assumptions:
Index: kern/kern_softint.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_softint.c,v
retrieving revision 1.47
diff -u -p -r1.47 kern_softint.c
--- kern/kern_softint.c	17 May 2019 03:34:26 -0000	1.47
+++ kern/kern_softint.c	6 Oct 2019 12:28:37 -0000
@@ -407,7 +407,6 @@ softint_disestablish(void *arg)
 	softcpu_t *sc;
 	softhand_t *sh;
 	uintptr_t offset;
-	uint64_t where;
 	u_int flags;
 
 	offset = (uintptr_t)arg;
@@ -432,8 +431,7 @@ softint_disestablish(void *arg)
 	 * SOFTINT_ACTIVE already set.
 	 */
 	if (__predict_true(mp_online)) {
-		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-		xc_wait(where);
+		xc_barrier(0);
 	}
 
 	for (;;) {
Index: kern/kern_syscall.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_syscall.c,v
retrieving revision 1.18
diff -u -p -r1.18 kern_syscall.c
--- kern/kern_syscall.c	6 May 2019 08:05:03 -0000	1.18
+++ kern/kern_syscall.c	6 Oct 2019 12:28:37 -0000
@@ -146,7 +146,6 @@ syscall_disestablish(const struct emul *
 {
 	struct sysent *sy;
 	const uint32_t *sb;
-	uint64_t where;
 	lwp_t *l;
 	int i;
 
@@ -175,8 +174,7 @@ syscall_disestablish(const struct emul *
 	 * of sy_call visible to all CPUs, and upon return we can be sure
 	 * that we see pertinent values of l_sysent posted by remote CPUs.
 	 */
-	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(where);
+	xc_barrier(0);
 
 	/*
 	 * Now it's safe to check l_sysent.  Run through all LWPs and see
Index: kern/kern_tc.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_tc.c,v
retrieving revision 1.51
diff -u -p -r1.51 kern_tc.c
--- kern/kern_tc.c	1 Jul 2018 15:12:06 -0000	1.51
+++ kern/kern_tc.c	6 Oct 2019 12:28:37 -0000
@@ -609,7 +609,6 @@ tc_detach(struct timecounter *target)
 	struct timecounter *tc;
 	struct timecounter **tcp = NULL;
 	int removals;
-	uint64_t where;
 	lwp_t *l;
 
 	/* First, find the timecounter. */
@@ -652,8 +651,7 @@ tc_detach(struct timecounter *target)
 	 * old timecounter state.
 	 */
 	for (;;) {
-		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-		xc_wait(where);
+		xc_barrier(0);
 
 		mutex_enter(proc_lock);
 		LIST_FOREACH(l, &alllwp, l_list) {
Index: kern/subr_pserialize.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_pserialize.c,v
retrieving revision 1.12
diff -u -p -r1.12 subr_pserialize.c
--- kern/subr_pserialize.c	14 Aug 2018 01:06:01 -0000	1.12
+++ kern/subr_pserialize.c	6 Oct 2019 12:28:37 -0000
@@ -147,7 +147,6 @@ void
 pserialize_perform(pserialize_t psz)
 {
 	int n;
-	uint64_t xc;
 
 	KASSERT(!cpu_intr_p());
 	KASSERT(!cpu_softintr_p());
@@ -187,8 +186,7 @@ pserialize_perform(pserialize_t psz)
 		 */
 		if (n++ > 1)
 			kpause("psrlz", false, 1, NULL);
-		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
-		xc_wait(xc);
+		xc_barrier(XC_HIGHPRI);
 
 		mutex_spin_enter(&psz_lock);
 	} while (!kcpuset_iszero(psz->psz_target));
Index: net/if.c
===================================================================
RCS file: /cvsroot/src/sys/net/if.c,v
retrieving revision 1.462
diff -u -p -r1.462 if.c
--- net/if.c	25 Sep 2019 09:53:37 -0000	1.462
+++ net/if.c	6 Oct 2019 12:28:38 -0000
@@ -1307,7 +1307,6 @@ if_detach(struct ifnet *ifp)
 	struct domain *dp;
 	const struct protosw *pr;
 	int s, i, family, purged;
-	uint64_t xc;
 
 #ifdef IFAREF_DEBUG
 	if_build_ifa_list(ifp);
@@ -1513,8 +1512,7 @@ restart:
 	if (in6_present)
 		pktq_barrier(ip6_pktq);
 #endif
-	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(xc);
+	xc_barrier(0);
 
 	if (ifp->if_percpuq != NULL) {
 		if_percpuq_destroy(ifp->if_percpuq);
Index: net/agr/if_agr.c
===================================================================
RCS file: /cvsroot/src/sys/net/agr/if_agr.c,v
retrieving revision 1.49
diff -u -p -r1.49 if_agr.c
--- net/agr/if_agr.c	26 Apr 2019 11:51:56 -0000	1.49
+++ net/agr/if_agr.c	6 Oct 2019 12:28:38 -0000
@@ -894,13 +894,11 @@ agrreq_copyout(void *ubuf, struct agrreq
 static void
 agr_sync(void)
 {
-	uint64_t h;
 
 	if (!mp_online)
 		return;
 
-	h = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(h);
+	xc_barrier(0);
 }
 
 static int
Index: opencrypto/crypto.c
===================================================================
RCS file: /cvsroot/src/sys/opencrypto/crypto.c,v
retrieving revision 1.109
diff -u -p -r1.109 crypto.c
--- opencrypto/crypto.c	1 Oct 2019 18:00:09 -0000	1.109
+++ opencrypto/crypto.c	6 Oct 2019 12:28:38 -0000
@@ -618,7 +618,6 @@ crypto_destroy(bool exit_kthread)
 
 	if (exit_kthread) {
 		struct cryptocap *cap = NULL;
-		uint64_t where;
 		bool is_busy = false;
 
 		/* if we have any in-progress requests, don't unload */
@@ -657,8 +656,7 @@ crypto_destroy(bool exit_kthread)
 			qs->crp_ret_q_exit_flag = true;
 			crypto_put_crp_ret_qs(ci);
 		}
-		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-		xc_wait(where);
+		xc_barrier(0);
 	}
 
 	if (sysctl_opencrypto_clog != NULL)
Index: rump/kern/lib/libsysproxy/sysproxy.c
===================================================================
RCS file: /cvsroot/src/sys/rump/kern/lib/libsysproxy/sysproxy.c,v
retrieving revision 1.7
diff -u -p -r1.7 sysproxy.c
--- rump/kern/lib/libsysproxy/sysproxy.c	17 May 2019 03:34:26 -0000	1.7
+++ rump/kern/lib/libsysproxy/sysproxy.c	6 Oct 2019 12:28:38 -0000
@@ -141,7 +141,6 @@ static void
 hyp_lwpexit(void)
 {
 	struct proc *p = curproc;
-	uint64_t where;
 	struct lwp *l;
 
 	mutex_enter(p->p_lock);
@@ -163,8 +162,7 @@ hyp_lwpexit(void)
 	 * we wake up the threads.
 	 */
 
-	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
-	xc_wait(where);
+	xc_barrier(0);
 
 	/*
 	 * Ok, all lwps are either:


Home | Main Index | Thread Index | Old Index