Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-10]: src/tools/compat Pull up following revision(s) (requested by...



details:   https://anonhg.NetBSD.org/src/rev/cd44b60e4310
branches:  netbsd-10
changeset: 377189:cd44b60e4310
user:      martin <martin%NetBSD.org@localhost>
date:      Fri Jun 30 17:13:50 2023 +0000

description:
Pull up following revision(s) (requested by riastradh in ticket #221):

        external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c: revision 1.18
        external/cddl/osnet/sys/sys/opentypes.h: revision 1.7
        tools/compat/configure: revision 1.100
        external/cddl/osnet/dist/tools/ctf/cvt/barrier.c: revision 1.6
        external/cddl/osnet/dist/tools/ctf/cvt/barrier.h: revision 1.4
        external/cddl/osnet/dist/tools/ctf/cvt/barrier.c: revision 1.7
        external/cddl/osnet/dist/tools/ctf/cvt/barrier.c: revision 1.8
        tools/compat/configure.ac: revision 1.100
        external/cddl/osnet/dist/tools/ctf/cvt/tdata.c: revision 1.10
        tools/compat/nbtool_config.h.in: revision 1.54

ctfmerge: error check sem_*() and pthread_*() APIs

terminate() if sem_*() returns -1 or pthread_*() returns != 0.
(Set errno from pthread_*() so terminate() prints the strerror message).

Note: Failing on errors instead of ignoring them helps identify
reasons for intermittent failures, such as those on macOS host builds:

  ERROR: nbctfmerge: barrier_init: sem_init(bar_sem): Function not implemented

ctfmerge: fix macOS semaphore implementation

Use dispatch_semaphore_create() if present instead of sem_init().
macOS doesn't actually implement sem_init() (et al)
(even though it provides the prototypes as deprecated).

This was detected by the previous commit to ctfmerge
that added error handling.

Implement ctfmerge's barrier operations in terms of
dispatch(3) APIs such as dispatch_semaphore_create() (et al).

Update tools/compat/configure.ac to find dispatch_semaphore_create().
Fixes ctfmerge on macOS hosts.

Inspired by https://stackoverflow.com/a/27847103

tools/compat: regen for dispatch_semaphore_create

ctfmerge: fix macOS semaphore implementation, part 2
dispatch_semaphore_signal() doesn't return an error, just an
indicator of whether a thread was woken or not, so there's
no need to fail on non-zero return.

osnet: on macOS, use <mach/boolean.h> for boolean_t
macOS/x86_64 defines boolean_t as 'unsigned int' not 'int',
which causes a build issue with tools/ctfmerge on that host
after my recent fixes for macOS semaphores.

So use the <mach/boolean.h> instead of a local typedef ifdef __APPLE__.
May fix a macOS/x86_64 build issue reported by cjep@.
Builds fine on NetBSD/amd64 or macOS/arm.

Note: this compat stuff is clunky, and based on the commit log,
annoyingly error prone. A newer sync of osnet from upstream /may/
improve a lot of these compat typedef workarounds for solaris types...

diffstat:

 external/cddl/osnet/dist/tools/ctf/cvt/barrier.c  |   50 ++++-
 external/cddl/osnet/dist/tools/ctf/cvt/barrier.h  |    7 +
 external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c |  189 +++++++++++++++------
 external/cddl/osnet/dist/tools/ctf/cvt/tdata.c    |    7 +-
 external/cddl/osnet/sys/sys/opentypes.h           |    4 +
 tools/compat/configure                            |    1 +
 tools/compat/configure.ac                         |    9 +-
 tools/compat/nbtool_config.h.in                   |    7 +-
 8 files changed, 201 insertions(+), 73 deletions(-)

diffs (truncated from 593 to 300 lines):

diff -r 1f26e1277b27 -r cd44b60e4310 external/cddl/osnet/dist/tools/ctf/cvt/barrier.c
--- a/external/cddl/osnet/dist/tools/ctf/cvt/barrier.c  Tue Jun 27 18:56:25 2023 +0000
+++ b/external/cddl/osnet/dist/tools/ctf/cvt/barrier.c  Fri Jun 30 17:13:50 2023 +0000
@@ -42,6 +42,7 @@
  * get a return code of 0.
  */
 
+#include <errno.h>
 #include <pthread.h>
 #ifdef illumos
 #include <synch.h>
@@ -49,15 +50,22 @@
 #include <stdio.h>
 
 #include "barrier.h"
+#include "ctftools.h"
 
 void
 barrier_init(barrier_t *bar, int nthreads)
 {
-       pthread_mutex_init(&bar->bar_lock, NULL);
+       if ((errno = pthread_mutex_init(&bar->bar_lock, NULL)) != 0)
+               terminate("%s: pthread_mutex_init(bar_lock)", __func__);
 #ifdef illumos
-       sema_init(&bar->bar_sem, 0, USYNC_THREAD, NULL);
+       if ((errno = sema_init(&bar->bar_sem, 0, USYNC_THREAD, NULL)) != 0)
+               terminate("%s: sema_init(bar_sem)", __func__);
+#elif defined(HAVE_DISPATCH_SEMAPHORE_CREATE)
+       if ((bar->bar_sem = dispatch_semaphore_create(0)) == NULL)
+               terminate("%s: dispatch_semaphore_create()\n", __func__);
 #else
-       sem_init(&bar->bar_sem, 0, 0);
+       if (sem_init(&bar->bar_sem, 0, 0) == -1)
+               terminate("%s: sem_init(bar_sem)", __func__);
 #endif
 
        bar->bar_numin = 0;
@@ -67,14 +75,26 @@ barrier_init(barrier_t *bar, int nthread
 int
 barrier_wait(barrier_t *bar)
 {
-       pthread_mutex_lock(&bar->bar_lock);
+#if defined(HAVE_DISPATCH_SEMAPHORE_CREATE)
+       long error;
+#endif
+       if ((errno = pthread_mutex_lock(&bar->bar_lock)) != 0)
+               terminate("%s: pthread_mutex_lock(bar_lock)", __func__);
 
        if (++bar->bar_numin < bar->bar_nthr) {
-               pthread_mutex_unlock(&bar->bar_lock);
+               if ((errno = pthread_mutex_unlock(&bar->bar_lock)) != 0)
+                       terminate("%s: pthread_mutex_unlock(bar_lock)",
+                           __func__);
 #ifdef illumos
-               sema_wait(&bar->bar_sem);
+               if ((errno = sema_wait(&bar->bar_sem)) != 0)
+                       terminate("%s: sema_wait(bar_sem)", __func__);
+#elif defined(HAVE_DISPATCH_SEMAPHORE_CREATE)
+               if ((error = dispatch_semaphore_wait(bar->bar_sem, DISPATCH_TIME_FOREVER)) != 0)
+                       terminate("%s: dispatch_semaphore_wait(bar_sem) = %ld\n",
+                           __func__, error);
 #else
-               sem_wait(&bar->bar_sem);
+               if (sem_wait(&bar->bar_sem) == -1)
+                       terminate("%s: sem_wait(bar_sem)", __func__);
 #endif
 
                return (0);
@@ -84,13 +104,21 @@ barrier_wait(barrier_t *bar)
 
                /* reset for next use */
                bar->bar_numin = 0;
-               for (i = 1; i < bar->bar_nthr; i++)
+               for (i = 1; i < bar->bar_nthr; i++) {
 #ifdef illumos
-                       sema_post(&bar->bar_sem);
+                       if ((errno = sema_post(&bar->bar_sem)) != 0)
+                               terminate("%s: sema_post(bar_sem)", __func__);
+#elif defined(HAVE_DISPATCH_SEMAPHORE_CREATE)
+                       /* return value doesn't matter */
+                       dispatch_semaphore_signal(bar->bar_sem);
 #else
-                       sem_post(&bar->bar_sem);
+                       if (sem_post(&bar->bar_sem) == -1)
+                               terminate("%s: sem_post(bar_sem)", __func__);
 #endif
-               pthread_mutex_unlock(&bar->bar_lock);
+               }
+               if ((errno = pthread_mutex_unlock(&bar->bar_lock)) != 0)
+                       terminate("%s: pthread_mutex_unlock(bar_lock)",
+                           __func__);
 
                return (1);
        }
diff -r 1f26e1277b27 -r cd44b60e4310 external/cddl/osnet/dist/tools/ctf/cvt/barrier.h
--- a/external/cddl/osnet/dist/tools/ctf/cvt/barrier.h  Tue Jun 27 18:56:25 2023 +0000
+++ b/external/cddl/osnet/dist/tools/ctf/cvt/barrier.h  Fri Jun 30 17:13:50 2023 +0000
@@ -33,8 +33,15 @@
  * APIs for the barrier synchronization primitive.
  */
 
+#ifdef HAVE_NBTOOL_CONFIG_H
+#include "nbtool_config.h"
+#endif
+
 #ifdef illumos
 #include <synch.h>
+#elif defined(HAVE_DISPATCH_SEMAPHORE_CREATE)
+#include <dispatch/dispatch.h>
+typedef dispatch_semaphore_t sema_t;
 #else
 #include <semaphore.h>
 typedef sem_t  sema_t;
diff -r 1f26e1277b27 -r cd44b60e4310 external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c
--- a/external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c Tue Jun 27 18:56:25 2023 +0000
+++ b/external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c Fri Jun 30 17:13:50 2023 +0000
@@ -373,22 +373,31 @@ init_phase_two(workqueue_t *wq)
 static void
 wip_save_work(workqueue_t *wq, wip_t *slot, int slotnum)
 {
-       pthread_mutex_lock(&wq->wq_donequeue_lock);
+       if ((errno = pthread_mutex_lock(&wq->wq_donequeue_lock)) != 0)
+               terminate("%s: pthread_mutex_lock(wq_donequeue_lock)",
+                   __func__);
 
-       while (wq->wq_lastdonebatch + 1 < slot->wip_batchid)
-               pthread_cond_wait(&slot->wip_cv, &wq->wq_donequeue_lock);
+       while (wq->wq_lastdonebatch + 1 < slot->wip_batchid) {
+               if ((errno = pthread_cond_wait(&slot->wip_cv, &wq->wq_donequeue_lock)) != 0)
+                       terminate("%s: pthread_cond_wait(wip_cv,wq_donequeue_lock)",
+                           __func__);
+       }
        assert(wq->wq_lastdonebatch + 1 == slot->wip_batchid);
 
        fifo_add(wq->wq_donequeue, slot->wip_td);
        wq->wq_lastdonebatch++;
-       pthread_cond_signal(&wq->wq_wip[(slotnum + 1) %
-           wq->wq_nwipslots].wip_cv);
+       const int nextslot = (slotnum + 1) % wq->wq_nwipslots;
+       if ((errno = pthread_cond_signal(&wq->wq_wip[nextslot].wip_cv)) != 0)
+               terminate("%s: pthread_cond_signal(wq_wip[%d].wip_cv)",
+                   __func__, nextslot);
 
        /* reset the slot for next use */
        slot->wip_td = NULL;
        slot->wip_batchid = wq->wq_next_batchid++;
 
-       pthread_mutex_unlock(&wq->wq_donequeue_lock);
+       if ((errno = pthread_mutex_unlock(&wq->wq_donequeue_lock)) != 0)
+               terminate("%s: pthread_mutex_unlock(wq_donequeue_lock)",
+                   __func__);
 }
 
 static void
@@ -417,25 +426,35 @@ worker_runphase1(workqueue_t *wq)
        int wipslotnum, pownum;
 
        for (;;) {
-               pthread_mutex_lock(&wq->wq_queue_lock);
+               if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+                       terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+                           __func__);
 
                while (fifo_empty(wq->wq_queue)) {
                        if (wq->wq_nomorefiles == 1) {
-                               pthread_cond_broadcast(&wq->wq_work_avail);
-                               pthread_mutex_unlock(&wq->wq_queue_lock);
+                               if ((errno = pthread_cond_broadcast(&wq->wq_work_avail)) != 0)
+                                       terminate("%s: pthread_cond_broadcast(wq_work_avail)",
+                                           __func__);
+                               if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                                       terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                                           __func__);
 
                                /* on to phase 2 ... */
                                return;
                        }
 
-                       pthread_cond_wait(&wq->wq_work_avail,
-                           &wq->wq_queue_lock);
+                       if ((errno = pthread_cond_wait(&wq->wq_work_avail,
+                           &wq->wq_queue_lock)) != 0)
+                               terminate("%s: pthread_cond_wait(wq_work_avail,wq_queue_lock)",
+                                   __func__);
                }
 
                /* there's work to be done! */
                pow = fifo_remove(wq->wq_queue);
                pownum = wq->wq_nextpownum++;
-               pthread_cond_broadcast(&wq->wq_work_removed);
+               if ((errno = pthread_cond_broadcast(&wq->wq_work_removed)) != 0)
+                       terminate("%s: pthread_cond_broadcast(wq_work_removed)",
+                           __func__);
 
                assert(pow != NULL);
 
@@ -443,16 +462,21 @@ worker_runphase1(workqueue_t *wq)
                wipslotnum = pownum % wq->wq_nwipslots;
                wipslot = &wq->wq_wip[wipslotnum];
 
-               pthread_mutex_lock(&wipslot->wip_lock);
+               if ((errno = pthread_mutex_lock(&wipslot->wip_lock)) != 0)
+                       terminate("%s: pthread_mutex_lock(wip_lock)", __func__);
 
-               pthread_mutex_unlock(&wq->wq_queue_lock);
+               if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                       terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                           __func__);
 
                wip_add_work(wipslot, pow);
 
                if (wipslot->wip_nmerged == wq->wq_maxbatchsz)
                        wip_save_work(wq, wipslot, wipslotnum);
 
-               pthread_mutex_unlock(&wipslot->wip_lock);
+               if ((errno = pthread_mutex_unlock(&wipslot->wip_lock)) != 0)
+                       terminate("%s: pthread_mutex_unlock(wip_lock)",
+                           __func__);
        }
 }
 
@@ -463,28 +487,44 @@ worker_runphase2(workqueue_t *wq)
        int batchid;
 
        for (;;) {
-               pthread_mutex_lock(&wq->wq_queue_lock);
+               if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+                       terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+                           __func__);
 
                if (wq->wq_ninqueue == 1) {
-                       pthread_cond_broadcast(&wq->wq_work_avail);
-                       pthread_mutex_unlock(&wq->wq_queue_lock);
+                       if ((errno = pthread_cond_broadcast(&wq->wq_work_avail)) != 0)
+                           terminate("%s: pthread_cond_broadcast(wq_work_avail)",
+                               __func__);
+                       if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                               terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                                   __func__);
 
                        debug(2, "0x%jx: entering p2 completion barrier\n",
                            (uintmax_t)(uintptr_t)pthread_self());
                        if (barrier_wait(&wq->wq_bar1)) {
-                               pthread_mutex_lock(&wq->wq_queue_lock);
+                               if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+                                       terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+                                           __func__);
                                wq->wq_alldone = 1;
-                               pthread_cond_signal(&wq->wq_alldone_cv);
-                               pthread_mutex_unlock(&wq->wq_queue_lock);
+                               if ((errno = pthread_cond_signal(&wq->wq_alldone_cv)) != 0)
+                                       terminate("%s: pthread_cond_signal(wq_alldone_cv)",
+                                           __func__);
+                               if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                                       terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                                           __func__);
                        }
 
                        return;
                }
 
                if (fifo_len(wq->wq_queue) < 2) {
-                       pthread_cond_wait(&wq->wq_work_avail,
-                           &wq->wq_queue_lock);
-                       pthread_mutex_unlock(&wq->wq_queue_lock);
+                       if ((errno = pthread_cond_wait(&wq->wq_work_avail,
+                           &wq->wq_queue_lock)) != 0)
+                               terminate("%s: pthread_cond_wait(wq_work_avail,wq_queue_lock)",
+                                   __func__);
+                       if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                               terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                                   __func__);
                        continue;
                }
 
@@ -495,7 +535,9 @@ worker_runphase2(workqueue_t *wq)
 
                batchid = wq->wq_next_batchid++;
 
-               pthread_mutex_unlock(&wq->wq_queue_lock);
+               if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+                       terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+                           __func__);
 
                debug(2, "0x%jx: merging %p into %p\n",
                    (uintmax_t)(uintptr_t)pthread_self(),
@@ -507,10 +549,14 @@ worker_runphase2(workqueue_t *wq)
                 * merging is complete.  place at the tail of the queue in
                 * proper order.
                 */
-               pthread_mutex_lock(&wq->wq_queue_lock);
+               if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+                       terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+                           __func__);
                while (wq->wq_lastdonebatch + 1 != batchid) {
-                       pthread_cond_wait(&wq->wq_done_cv,
-                           &wq->wq_queue_lock);
+                       if ((errno = pthread_cond_wait(&wq->wq_done_cv,
+                           &wq->wq_queue_lock)) != 0)
+                               terminate("%s: pthread_cond_wait(wq_done_cv,wq_queue_lock)",



Home | Main Index | Thread Index | Old Index