Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/external/bsd/drm2/linux drm: Fence leak audit. No funct...



details:   https://anonhg.NetBSD.org/src/rev/0890a2efc285
branches:  trunk
changeset: 1028863:0890a2efc285
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sun Dec 19 12:14:02 2021 +0000

description:
drm: Fence leak audit.  No functional change intended.

Sprinkle nulling out variables, add kasserts to reflect them, and
propagate their consequences to eliminate dead code.  Should make it
easier to detect similar leak bugs.

diffstat:

 sys/external/bsd/drm2/linux/linux_dma_resv.c |  152 +++++++++++++++++---------
 1 files changed, 101 insertions(+), 51 deletions(-)

diffs (truncated from 505 to 300 lines):

diff -r 609c84a5bf7c -r 0890a2efc285 sys/external/bsd/drm2/linux/linux_dma_resv.c
--- a/sys/external/bsd/drm2/linux/linux_dma_resv.c      Sun Dec 19 12:13:53 2021 +0000
+++ b/sys/external/bsd/drm2/linux/linux_dma_resv.c      Sun Dec 19 12:14:02 2021 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: linux_dma_resv.c,v 1.9 2021/12/19 12:13:53 riastradh Exp $     */
+/*     $NetBSD: linux_dma_resv.c,v 1.10 2021/12/19 12:14:02 riastradh Exp $    */
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.9 2021/12/19 12:13:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.10 2021/12/19 12:14:02 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/poll.h>
@@ -108,15 +108,22 @@
 {
        unsigned i;
 
-       if (robj->robj_prealloc)
+       if (robj->robj_prealloc) {
                objlist_free(robj->robj_prealloc);
+               robj->robj_prealloc = NULL; /* paranoia */
+       }
        if (robj->fence) {
-               for (i = 0; i < robj->fence->shared_count; i++)
+               for (i = 0; i < robj->fence->shared_count; i++) {
                        dma_fence_put(robj->fence->shared[i]);
+                       robj->fence->shared[i] = NULL; /* paranoia */
+               }
                objlist_free(robj->fence);
+               robj->fence = NULL; /* paranoia */
        }
-       if (robj->fence_excl)
+       if (robj->fence_excl) {
                dma_fence_put(robj->fence_excl);
+               robj->fence_excl = NULL; /* paranoia */
+       }
        ww_mutex_destroy(&robj->lock);
 }
 
@@ -472,13 +479,18 @@
        dma_resv_write_commit(robj, &ticket);
 
        /* Release the old exclusive fence, if any.  */
-       if (old_fence)
+       if (old_fence) {
                dma_fence_put(old_fence);
+               old_fence = NULL; /* paranoia */
+       }
 
        /* Release any old shared fences.  */
        if (old_list) {
-               while (old_shared_count--)
+               while (old_shared_count--) {
                        dma_fence_put(old_list->shared[old_shared_count]);
+                       /* paranoia */
+                       old_list->shared[old_shared_count] = NULL;
+               }
        }
 }
 
@@ -593,8 +605,10 @@
        }
 
        /* Release a fence if we replaced it.  */
-       if (replace)
+       if (replace) {
                dma_fence_put(replace);
+               replace = NULL; /* paranoia */
+       }
 }
 
 /*
@@ -621,13 +635,14 @@
 dma_resv_get_fences_rcu(const struct dma_resv *robj,
     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
 {
-       const struct dma_resv_list *list;
-       struct dma_fence *fence;
+       const struct dma_resv_list *list = NULL;
+       struct dma_fence *fence = NULL;
        struct dma_fence **shared = NULL;
        unsigned shared_alloc, shared_count, i;
        struct dma_resv_read_ticket ticket;
 
-top:
+top:   KASSERT(fence == NULL);
+
        /* Enter an RCU read section and get a read ticket.  */
        rcu_read_lock();
        dma_resv_read_begin(robj, &ticket);
@@ -693,6 +708,7 @@
        }
 
        /* If there is an exclusive fence, grab it.  */
+       KASSERT(fence == NULL);
        fence = atomic_load_consume(&robj->fence_excl);
 
        /*
@@ -700,8 +716,10 @@
         * parking ticket.  If it's invalid, do not pass go and do not
         * collect $200.
         */
-       if (!dma_resv_read_valid(robj, &ticket))
+       if (!dma_resv_read_valid(robj, &ticket)) {
+               fence = NULL;
                goto restart;
+       }
 
        /*
         * Try to get a reference to the exclusive fence, if there is
@@ -735,10 +753,11 @@
        }
        if (fence) {
                dma_fence_put(fence);
-               fence = NULL;   /* paranoia */
+               fence = NULL;
        }
 
 restart:
+       KASSERT(fence == NULL);
        rcu_read_unlock();
        goto top;
 }
@@ -766,7 +785,8 @@
 
        KASSERT(dma_resv_held(dst_robj));
 
-top:
+top:   KASSERT(fence == NULL);
+
        /* Enter an RCU read section and get a read ticket.  */
        rcu_read_lock();
        dma_resv_read_begin(src_robj, &read_ticket);
@@ -792,6 +812,7 @@
                /* Copy over all fences that are not yet signalled.  */
                dst_list->shared_count = 0;
                for (i = 0; i < shared_count; i++) {
+                       KASSERT(fence == NULL);
                        fence = atomic_load_relaxed(&src_list->shared[i]);
                        if ((fence = dma_fence_get_rcu(fence)) == NULL)
                                goto restart;
@@ -806,6 +827,7 @@
        }
 
        /* Get the exclusive fence.  */
+       KASSERT(fence == NULL);
        if ((fence = atomic_load_consume(&src_robj->fence_excl)) != NULL) {
 
                /*
@@ -857,32 +879,35 @@
        dma_resv_write_commit(dst_robj, &write_ticket);
 
        /* Release the old exclusive fence, if any.  */
-       if (old_fence)
+       if (old_fence) {
                dma_fence_put(old_fence);
+               old_fence = NULL; /* paranoia */
+       }
 
        /* Release any old shared fences.  */
        if (old_list) {
-               for (i = old_list->shared_count; i --> 0;)
+               for (i = old_list->shared_count; i --> 0;) {
                        dma_fence_put(old_list->shared[i]);
+                       old_list->shared[i] = NULL; /* paranoia */
+               }
+               objlist_free(old_list);
+               old_list = NULL; /* paranoia */
        }
 
        /* Success!  */
        return 0;
 
 restart:
+       KASSERT(fence == NULL);
        rcu_read_unlock();
        if (dst_list) {
                for (i = dst_list->shared_count; i --> 0;) {
                        dma_fence_put(dst_list->shared[i]);
-                       dst_list->shared[i] = NULL;
+                       dst_list->shared[i] = NULL; /* paranoia */
                }
                objlist_free(dst_list);
                dst_list = NULL;
        }
-       if (fence) {
-               dma_fence_put(fence);
-               fence = NULL;
-       }
        goto top;
 }
 
@@ -903,19 +928,18 @@
 {
        struct dma_resv_read_ticket ticket;
        struct dma_resv_list *list;
-       struct dma_fence *fence;
+       struct dma_fence *fence = NULL;
        uint32_t i, shared_count;
        bool signaled = true;
 
-top:
+top:   KASSERT(fence == NULL);
+
        /* Enter an RCU read section and get a read ticket.  */
        rcu_read_lock();
        dma_resv_read_begin(robj, &ticket);
 
        /* If shared is requested and there is a shared list, test it.  */
-       if (!shared)
-               goto excl;
-       if ((list = atomic_load_consume(&robj->fence)) != NULL) {
+       if (shared && (list = atomic_load_consume(&robj->fence)) != NULL) {
 
                /* Find out how long it is.  */
                shared_count = atomic_load_relaxed(&list->shared_count);
@@ -934,19 +958,20 @@
                 * signalled.
                 */
                for (i = 0; i < shared_count; i++) {
+                       KASSERT(fence == NULL);
                        fence = atomic_load_relaxed(&list->shared[i]);
-                       fence = dma_fence_get_rcu(fence);
-                       if (fence == NULL)
+                       if ((fence = dma_fence_get_rcu(fence)) == NULL)
                                goto restart;
                        signaled &= dma_fence_is_signaled(fence);
                        dma_fence_put(fence);
+                       fence = NULL;
                        if (!signaled)
                                goto out;
                }
        }
 
-excl:
        /* If there is an exclusive fence, test it.  */
+       KASSERT(fence == NULL);
        if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) {
 
                /*
@@ -955,8 +980,10 @@
                 * XXX I'm not actually sure this is necessary since
                 * pointer writes are supposed to be atomic.
                 */
-               if (!dma_resv_read_valid(robj, &ticket))
+               if (!dma_resv_read_valid(robj, &ticket)) {
+                       fence = NULL;
                        goto restart;
+               }
 
                /*
                 * If it is going away, restart.  Otherwise, acquire a
@@ -966,14 +993,17 @@
                        goto restart;
                signaled &= dma_fence_is_signaled(fence);
                dma_fence_put(fence);
+               fence = NULL;
                if (!signaled)
                        goto out;
        }
 
-out:   rcu_read_unlock();
+out:   KASSERT(fence == NULL);
+       rcu_read_unlock();
        return signaled;
 
 restart:
+       KASSERT(fence == NULL);
        rcu_read_unlock();
        goto top;
 }
@@ -997,22 +1027,21 @@
 {
        struct dma_resv_read_ticket ticket;
        struct dma_resv_list *list;
-       struct dma_fence *fence;
+       struct dma_fence *fence = NULL;
        uint32_t i, shared_count;
        long ret;
 
        if (timeout == 0)
                return dma_resv_test_signaled_rcu(robj, shared);
 
-top:
+top:   KASSERT(fence == NULL);
+
        /* Enter an RCU read section and get a read ticket.  */
        rcu_read_lock();
        dma_resv_read_begin(robj, &ticket);
 
        /* If shared is requested and there is a shared list, wait on it.  */
-       if (!shared)
-               goto excl;
-       if ((list = atomic_load_consume(&robj->fence)) != NULL) {
+       if (shared && (list = atomic_load_consume(&robj->fence)) != NULL) {



Home | Main Index | Thread Index | Old Index