Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-8]: src/sys/external/bsd/drm2/dist/drm/nouveau Pull up following ...



details:   https://anonhg.NetBSD.org/src/rev/0e43c95feacf
branches:  netbsd-8
changeset: 435225:0e43c95feacf
user:      martin <martin%NetBSD.org@localhost>
date:      Fri Aug 31 17:35:51 2018 +0000

description:
Pull up following revision(s) (requested by riastradh in ticket #996):

        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c: revision 1.3
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.3
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.4
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.5
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.5
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.6
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.7
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.8
        sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.9

Rewrite nouveau_fence in an attempt to make it make sense.
PR kern/53441
XXX pullup-7
XXX pullup-8


Fences may last longer than their channels.
- Use a reference count on the nouveau_fence_chan object.
- Acquire it with kpreemption disabled.
- Use xcall to wait for kpreempt-disabled sections to complete.
PR kern/53441
XXX pullup-7
XXX pullup-8


Defer nouveau_fence_unref until spin unlock.
- kfree while holding a spin lock is not a good idea.
- Make sure we GC every time we might signal fences.
PR kern/53441
XXX pullup-7
XXX pullup-8


Attempt to make sense of return values of nouveau_fence_wait.
PR kern/53441
XXX pullup-7
XXX pullup-8


Fix edge case of reference counting, oops.
PR kern/53441
XXX pullup-7
XXX pullup-8

diffstat:

 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c      |  486 +++++++++--
 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h      |   20 +-
 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c |   15 +-
 3 files changed, 389 insertions(+), 132 deletions(-)

diffs (truncated from 811 to 300 lines):

diff -r 95a07099dc9c -r 0e43c95feacf sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c
--- a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c    Tue Aug 28 16:25:19 2018 +0000
+++ b/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c    Fri Aug 31 17:35:51 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $      */
+/*     $NetBSD: nouveau_fence.c,v 1.4.10.1 2018/08/31 17:35:51 martin Exp $    */
 
 /*
  * Copyright (C) 2007 Ben Skeggs.
@@ -27,7 +27,10 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4.10.1 2018/08/31 17:35:51 martin Exp $");
+
+#include <sys/types.h>
+#include <sys/xcall.h>
 
 #include <drm/drmP.h>
 
@@ -41,6 +44,12 @@
 
 #include <engine/fifo.h>
 
+/*
+ * struct fence_work
+ *
+ *     State for a work action scheduled when a fence is completed.
+ *     Will call func(data) at some point after that happens.
+ */
 struct fence_work {
        struct work_struct base;
        struct list_head head;
@@ -48,101 +57,291 @@
        void *data;
 };
 
+/*
+ * nouveau_fence_channel_acquire(fence)
+ *
+ *     Try to return the channel associated with fence.
+ */
+static struct nouveau_channel *
+nouveau_fence_channel_acquire(struct nouveau_fence *fence)
+{
+       struct nouveau_channel *chan;
+       struct nouveau_fence_chan *fctx;
+
+       /*
+        * Block cross-calls while we examine fence.  If we observe
+        * that fence->done is false, then the channel cannot be
+        * destroyed even by another CPU until after kpreempt_enable.
+        */
+       kpreempt_disable();
+       if (fence->done) {
+               chan = NULL;
+       } else {
+               chan = fence->channel;
+               fctx = chan->fence;
+               atomic_inc_uint(&fctx->refcnt);
+       }
+       kpreempt_enable();
+
+       return chan;
+}
+
+/*
+ * nouveau_fence_gc_grab(fctx, list)
+ *
+ *     Move all of channel's done fences to list.
+ *
+ *     Caller must hold channel's fence lock.
+ */
+static void
+nouveau_fence_gc_grab(struct nouveau_fence_chan *fctx, struct list_head *list)
+{
+       struct list_head *node, *next;
+
+       BUG_ON(!spin_is_locked(&fctx->lock));
+
+       list_for_each_safe(node, next, &fctx->done) {
+               list_move_tail(node, list);
+       }
+}
+
+/*
+ * nouveau_fence_gc_free(list)
+ *
+ *     Unreference all of the fences in the list.
+ *
+ *     Caller MUST NOT hold the fences' channel's fence lock.
+ */
+static void
+nouveau_fence_gc_free(struct list_head *list)
+{
+       struct nouveau_fence *fence, *next;
+
+       list_for_each_entry_safe(fence, next, list, head) {
+               list_del(&fence->head);
+               nouveau_fence_unref(&fence);
+       }
+}
+
+/*
+ * nouveau_fence_channel_release(channel)
+ *
+ *     Release the channel acquired with nouveau_fence_channel_acquire.
+ */
+static void
+nouveau_fence_channel_release(struct nouveau_channel *chan)
+{
+       struct nouveau_fence_chan *fctx = chan->fence;
+       unsigned old, new;
+
+       do {
+               old = fctx->refcnt;
+               if (old == 1) {
+                       spin_lock(&fctx->lock);
+                       if (atomic_dec_uint_nv(&fctx->refcnt) == 0)
+                               DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue,
+                                   &fctx->lock);
+                       spin_unlock(&fctx->lock);
+                       return;
+               }
+               new = old - 1;
+       } while (atomic_cas_uint(&fctx->refcnt, old, new) != old);
+}
+
+/*
+ * nouveau_fence_signal(fence)
+ *
+ *     Schedule all the work for fence's completion, mark it done, and
+ *     move it from the pending list to the done list.
+ *
+ *     Caller must hold fence's channel's fence lock.
+ */
 static void
 nouveau_fence_signal(struct nouveau_fence *fence)
 {
+       struct nouveau_channel *chan __diagused = fence->channel;
+       struct nouveau_fence_chan *fctx __diagused = chan->fence;
        struct fence_work *work, *temp;
 
+       BUG_ON(!spin_is_locked(&fctx->lock));
+       BUG_ON(fence->done);
+
+       /* Schedule all the work for this fence.  */
        list_for_each_entry_safe(work, temp, &fence->work, head) {
                schedule_work(&work->base);
                list_del(&work->head);
        }
 
-       fence->channel = NULL;
-       list_del(&fence->head);
+       /* Note that the fence is done.  */
+       fence->done = true;
+
+       /* Move it from the pending list to the done list.  */
+       list_move_tail(&fence->head, &fctx->done);
 }
 
+static void
+nouveau_fence_context_del_xc(void *a, void *b)
+{
+}
+
+/*
+ * nouveau_fence_context_del(fctx)
+ *
+ *     Artificially complete all fences in fctx, wait for their work
+ *     to drain, and destroy the memory associated with fctx.
+ */
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence, *fnext;
+       struct list_head done_list;
+       int ret __diagused;
+
+       INIT_LIST_HEAD(&done_list);
+
+       /* Signal all the fences in fctx.  */
        spin_lock(&fctx->lock);
        list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
                nouveau_fence_signal(fence);
        }
+       nouveau_fence_gc_grab(fctx, &done_list);
        spin_unlock(&fctx->lock);
+
+       /* Release any fences that we signalled.  */
+       nouveau_fence_gc_free(&done_list);
+
+       /* Wait for the workqueue to drain.  */
+       flush_scheduled_work();
+
+       /* Wait for nouveau_fence_channel_acquire to complete on all CPUs.  */
+       xc_wait(xc_broadcast(0, nouveau_fence_context_del_xc, NULL, NULL));
+
+       /* Release our reference and wait for any others to drain.  */
+       spin_lock(&fctx->lock);
+       KASSERT(fctx->refcnt > 0);
+       atomic_dec_uint(&fctx->refcnt);
+       DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &fctx->waitqueue, &fctx->lock,
+           fctx->refcnt == 0);
+       BUG_ON(ret);
+       spin_unlock(&fctx->lock);
+
+       /* Make sure there are no more fences on the list.  */
+       BUG_ON(!list_empty(&fctx->done));
+       BUG_ON(!list_empty(&fctx->flip));
+       BUG_ON(!list_empty(&fctx->pending));
+
+       /* Destroy the fence context.  */
+       DRM_DESTROY_WAITQUEUE(&fctx->waitqueue);
        spin_lock_destroy(&fctx->lock);
 }
 
+/*
+ * nouveau_fence_context_new(fctx)
+ *
+ *     Initialize the state fctx for all fences on a channel.
+ */
 void
 nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
 {
+
        INIT_LIST_HEAD(&fctx->flip);
        INIT_LIST_HEAD(&fctx->pending);
+       INIT_LIST_HEAD(&fctx->done);
        spin_lock_init(&fctx->lock);
+       DRM_INIT_WAITQUEUE(&fctx->waitqueue, "nvfnchan");
+       fctx->refcnt = 1;
 }
 
+/*
+ * nouveau_fence_work_handler(kwork)
+ *
+ *     Work handler for nouveau_fence_work.
+ */
 static void
 nouveau_fence_work_handler(struct work_struct *kwork)
 {
        struct fence_work *work = container_of(kwork, typeof(*work), base);
+
        work->func(work->data);
        kfree(work);
 }
 
+/*
+ * nouveau_fence_work(fence, func, data)
+ *
+ *     Arrange to call func(data) after fence is completed.  If fence
+ *     is already completed, call it immediately.  If memory is
+ *     scarce, synchronously wait for the fence and call it.
+ */
 void
 nouveau_fence_work(struct nouveau_fence *fence,
                   void (*func)(void *), void *data)
 {
-       struct nouveau_channel *chan = fence->channel;
+       struct nouveau_channel *chan;
        struct nouveau_fence_chan *fctx;
        struct fence_work *work = NULL;
 
-       if (nouveau_fence_done(fence)) {
-               func(data);
-               return;
-       }
-
+       if ((chan = nouveau_fence_channel_acquire(fence)) == NULL)
+               goto now0;
        fctx = chan->fence;
+
        work = kmalloc(sizeof(*work), GFP_KERNEL);
-       if (!work) {
+       if (work == NULL) {
                WARN_ON(nouveau_fence_wait(fence, false, false));
-               func(data);
-               return;
+               goto now1;
        }
 
        spin_lock(&fctx->lock);
-       if (!fence->channel) {
+       if (fence->done) {
                spin_unlock(&fctx->lock);
-               kfree(work);
-               func(data);
-               return;
+               goto now2;
        }
-
        INIT_WORK(&work->base, nouveau_fence_work_handler);
        work->func = func;
        work->data = data;
        list_add(&work->head, &fence->work);
+       if (atomic_dec_uint_nv(&fctx->refcnt) == 0)
+               DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
        spin_unlock(&fctx->lock);



Home | Main Index | Thread Index | Old Index