Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src Add support for multiple threads in kcov(4)



details:   https://anonhg.NetBSD.org/src/rev/39ae13fa8907
branches:  trunk
changeset: 839884:39ae13fa8907
user:      kamil <kamil%NetBSD.org@localhost>
date:      Sun Mar 10 12:54:39 2019 +0000

description:
Add support for multiple threads in kcov(4)

Reuse the fd_clone() API to associate kcov descriptors (KD) with a file
descriptor. Each fd (/dev/kcov) can be reused for a single LWP.

Add new ATF regression tests and cleanup existing code there. All tests
pass.

Refresh the kcov(4) man page documentation.

Developed with help from <maxv>.

diffstat:

 share/man/man4/kcov.4  |   37 +++++-
 sys/kern/subr_kcov.c   |  266 +++++++++++++++++++++++++++++-------------------
 tests/modules/t_kcov.c |  184 ++++++++++++++++++++++++++++++---
 3 files changed, 356 insertions(+), 131 deletions(-)

diffs (truncated from 800 to 300 lines):

diff -r f3b51f8ff698 -r 39ae13fa8907 share/man/man4/kcov.4
--- a/share/man/man4/kcov.4     Sun Mar 10 12:49:48 2019 +0000
+++ b/share/man/man4/kcov.4     Sun Mar 10 12:54:39 2019 +0000
@@ -1,4 +1,4 @@
-.\"    $NetBSD: kcov.4,v 1.2 2019/02/23 17:33:01 wiz Exp $
+.\"    $NetBSD: kcov.4,v 1.3 2019/03/10 12:54:39 kamil Exp $
 .\"
 .\" Copyright (c) 2018 Anton Lindqvist <anton%openbsd.org@localhost>
 .\"
@@ -14,7 +14,7 @@
 .\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 .\"
-.Dd November 16, 2018
+.Dd March 10, 2019
 .Dt KCOV 4
 .Os
 .Sh NAME
@@ -28,12 +28,35 @@
 The
 .Nm
 driver implements collection of code coverage inside the kernel.
-It can be enabled on a per process basis from userland,
+It can be enabled on a per thread basis from userland,
 allowing the kernel program counter to be collected during syscalls triggered by
-the same process.
+the same thread.
+.Pp
+The
+.Nm
+descriptors (KD) are allocated during
+.Xr open 2 ,
+and are associated with a file descriptor.
+A thread can enable the
+.Nm
+device.
+When this happens,
+this thread becomes the owner of the
+.Nm
+descriptors (KD),
+and no thread can disable this KD except the owner.
+.Pp
+A
+.Nm
+descriptor (KD)
+is freed when its file descriptor is closed iff the KD is not active on a thread.
+If it is,
+we ask the thread to free it when it exits.
+.Pp
 The collected coverage can be accessed by mapping the device
 using
 .Xr mmap 2 .
+The buffers are mapped without risk that the kernel frees a buffer still mapped in a process.
 .Pp
 By default,
 .Nm
@@ -94,7 +117,7 @@
 main(void)
 {
        kcov_int_t *cover, i, n;
-       kcov_int_t size = 1024 * 100;
+       uint64_t size = 1024 * 100;
        int fd;
 
        fd = open("/dev/kcov", O_RDWR);
@@ -108,9 +131,9 @@
                err(1, "mmap");
        if (ioctl(fd, KCOV_IOC_ENABLE) == -1)
                err(1, "ioctl: KCOV_IOC_ENABLE");
-       __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+       KCOV_STORE(cover[0], 0);
        read(-1, NULL, 0); /* syscall paths to be traced */
-       n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+       n = KCOV_LOAD(cover[0]);
        if (ioctl(fd, KCOV_IOC_DISABLE) == -1)
                err(1, "ioctl: KCOV_IOC_DISABLE");
        for (i = 0; i < cover[0]; i++)
diff -r f3b51f8ff698 -r 39ae13fa8907 sys/kern/subr_kcov.c
--- a/sys/kern/subr_kcov.c      Sun Mar 10 12:49:48 2019 +0000
+++ b/sys/kern/subr_kcov.c      Sun Mar 10 12:54:39 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_kcov.c,v 1.3 2019/02/23 12:07:40 kamil Exp $      */
+/*     $NetBSD: subr_kcov.c,v 1.4 2019/03/10 12:54:39 kamil Exp $      */
 
 /*
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -38,7 +38,10 @@
 
 #include <sys/conf.h>
 #include <sys/condvar.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
 #include <sys/kmem.h>
+#include <sys/mman.h>
 #include <sys/mutex.h>
 #include <sys/queue.h>
 
@@ -47,28 +50,67 @@
 
 #define KCOV_BUF_MAX_ENTRIES   (256 << 10)
 
+static dev_type_open(kcov_open);
+
+const struct cdevsw kcov_cdevsw = {
+       .d_open = kcov_open,
+       .d_close = noclose,
+       .d_read = noread,
+       .d_write = nowrite,
+       .d_ioctl = noioctl,
+       .d_stop = nostop,
+       .d_tty = notty,
+       .d_poll = nopoll,
+       .d_mmap = nommap,
+       .d_kqfilter = nokqfilter,
+       .d_discard = nodiscard,
+       .d_flag = D_OTHER | D_MPSAFE
+};
+
+static int kcov_fops_ioctl(file_t *, u_long, void *);
+static int kcov_fops_close(file_t *);
+static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
+    struct uvm_object **, int *);
+
+const struct fileops kcov_fileops = {
+       .fo_read = fbadop_read,
+       .fo_write = fbadop_write,
+       .fo_ioctl = kcov_fops_ioctl,
+       .fo_fcntl = fnullop_fcntl,
+       .fo_poll = fnullop_poll,
+       .fo_stat = fbadop_stat,
+       .fo_close = kcov_fops_close,
+       .fo_kqfilter = fnullop_kqfilter,
+       .fo_restart = fnullop_restart,
+       .fo_mmap = kcov_fops_mmap,
+};
+
 /*
- * The KCOV descriptors are allocated during open(), and are associated with
- * the calling proc. They are freed lazily when their refcount reaches zero,
- * only when the process exits; this guarantees that kd->buf is not mmapped
- * in a currently running LWP. A KCOV descriptor is active on only one LWP
- * at the same time within the proc.
+ * The KCOV descriptors (KD) are allocated during open(), and are associated
+ * with a file descriptor.
+ *
+ * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
+ * the KD, and no LWP can 'disable' this KD except the owner.
  *
- * In the refcount, one ref is for the proc, and one ref is for the LWP where
- * the descriptor is active. In each case, the descriptor is pointed to in
- * the proc's and LWP's specificdata.
+ * A KD is freed when its file descriptor is closed _iff_ the KD is not active
+ * on an LWP. If it is, we ask the LWP to free it when it exits.
+ *
+ * The buffers mmapped are in a dedicated uobj, therefore there is no risk
+ * that the kernel frees a buffer still mmapped in a process: the uobj
+ * refcount will be non-zero, so the backing is not freed until an munmap
+ * occurs on said process.
  */
 
 typedef struct kcov_desc {
        kmutex_t lock;
-       int refcnt;
        kcov_int_t *buf;
+       struct uvm_object *uobj;
        size_t bufnent;
        size_t bufsize;
-       TAILQ_ENTRY(kcov_desc) entry;
+       bool enabled;
+       bool lwpfree;
 } kcov_t;
 
-static specificdata_key_t kcov_proc_key;
 static specificdata_key_t kcov_lwp_key;
 
 static void
@@ -76,7 +118,6 @@
 {
 
        mutex_enter(&kd->lock);
-       KASSERT(kd->refcnt > 0);
 }
 
 static void
@@ -87,60 +128,38 @@
 }
 
 static void
-kcov_lwp_take(kcov_t *kd)
+kcov_free(kcov_t *kd)
 {
 
-       kd->refcnt++;
-       KASSERT(kd->refcnt == 2);
-       lwp_setspecific(kcov_lwp_key, kd);
+       KASSERT(kd != NULL);
+       if (kd->buf != NULL) {
+               uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
+       }
+       mutex_destroy(&kd->lock);
+       kmem_free(kd, sizeof(*kd));
 }
 
 static void
-kcov_lwp_release(kcov_t *kd)
-{
-
-       KASSERT(kd->refcnt == 2);
-       kd->refcnt--;
-       lwp_setspecific(kcov_lwp_key, NULL);
-}
-
-static inline bool
-kcov_is_owned(kcov_t *kd)
-{
-
-       return (kd->refcnt > 1);
-}
-
-static void
-kcov_free(void *arg)
+kcov_lwp_free(void *arg)
 {
        kcov_t *kd = (kcov_t *)arg;
-       bool dofree;
 
        if (kd == NULL) {
                return;
        }
-
        kcov_lock(kd);
-       kd->refcnt--;
+       kd->enabled = false;
        kcov_unlock(kd);
-       dofree = (kd->refcnt == 0);
-
-       if (!dofree) {
-               return;
+       if (kd->lwpfree) {
+               kcov_free(kd);
        }
-       if (kd->buf != NULL) {
-               uvm_km_free(kernel_map, (vaddr_t)kd->buf, kd->bufsize,
-                   UVM_KMF_WIRED);
-       }
-       mutex_destroy(&kd->lock);
-       kmem_free(kd, sizeof(*kd));
 }
 
 static int
 kcov_allocbuf(kcov_t *kd, uint64_t nent)
 {
        size_t size;
+       int error;
 
        if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
                return EINVAL;
@@ -148,13 +167,25 @@
                return EEXIST;
 
        size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
-       kd->buf = (kcov_int_t *)uvm_km_alloc(kernel_map, size, 0,
-           UVM_KMF_WIRED|UVM_KMF_ZERO);
-       if (kd->buf == NULL)
-               return ENOMEM;
-
        kd->bufnent = nent - 1;
        kd->bufsize = size;
+       kd->uobj = uao_create(kd->bufsize, 0);
+
+       /* Map the uobj into the kernel address space, as wired. */
+       kd->buf = NULL;
+       error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
+           0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
+           UVM_ADV_RANDOM, 0));
+       if (error) {
+               uao_detach(kd->uobj);
+               return error;
+       }
+       error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
+           (vaddr_t)kd->buf + size, false, 0);
+       if (error) {
+               uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
+               return error;
+       }
 
        return 0;
 }
@@ -164,50 +195,63 @@
 static int
 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
 {
-       struct proc *p = l->l_proc;
+       struct file *fp;
+       int error, fd;
        kcov_t *kd;
 
-       kd = proc_getspecific(p, kcov_proc_key);
-       if (kd != NULL)
-               return EBUSY;



Home | Main Index | Thread Index | Old Index