Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/external/bsd/drm2/dist/drm/nouveau Rewrite nouveau_fence...



details:   https://anonhg.NetBSD.org/src/rev/b0c1a972355b
branches:  trunk
changeset: 992287:b0c1a972355b
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Thu Aug 23 01:06:50 2018 +0000

description:
Rewrite nouveau_fence in an attempt to make it make sense.

PR kern/53441

XXX pullup-7
XXX pullup-8

diffstat:

 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c      |  275 +++++++----
 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h      |   18 +-
 sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c |   15 +-
 3 files changed, 194 insertions(+), 114 deletions(-)

diffs (truncated from 587 to 300 lines):

diff -r dd65649d1a30 -r b0c1a972355b sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c
--- a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c    Wed Aug 22 20:08:54 2018 +0000
+++ b/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c    Thu Aug 23 01:06:50 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $      */
+/*     $NetBSD: nouveau_fence.c,v 1.5 2018/08/23 01:06:50 riastradh Exp $      */
 
 /*
  * Copyright (C) 2007 Ben Skeggs.
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.5 2018/08/23 01:06:50 riastradh Exp $");
 
 #include <drm/drmP.h>
 
@@ -41,6 +41,12 @@
 
 #include <engine/fifo.h>
 
+/*
+ * struct fence_work
+ *
+ *     State for a work action scheduled when a fence is completed.
+ *     Will call func(data) at some point after that happens.
+ */
 struct fence_work {
        struct work_struct base;
        struct list_head head;
@@ -48,77 +54,119 @@
        void *data;
 };
 
+/*
+ * nouveau_fence_signal(fence)
+ *
+ *     Schedule all the work for fence's completion, and mark it done.
+ *
+ *     Caller must hold fence's channel's fence lock.
+ */
 static void
 nouveau_fence_signal(struct nouveau_fence *fence)
 {
+       struct nouveau_channel *chan __diagused = fence->channel;
+       struct nouveau_fence_chan *fctx __diagused = chan->fence;
        struct fence_work *work, *temp;
 
+       BUG_ON(!spin_is_locked(&fctx->lock));
+       BUG_ON(fence->done);
+
+       /* Schedule all the work for this fence.  */
        list_for_each_entry_safe(work, temp, &fence->work, head) {
                schedule_work(&work->base);
                list_del(&work->head);
        }
 
-       fence->channel = NULL;
+       /* Note that the fence is done.  */
+       fence->done = true;
        list_del(&fence->head);
 }
 
+/*
+ * nouveau_fence_context_del(fctx)
+ *
+ *     Artificially complete all fences in fctx, wait for their work
+ *     to drain, and destroy the memory associated with fctx.
+ */
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence, *fnext;
+
+       /* Signal all the fences in fctx.  */
        spin_lock(&fctx->lock);
        list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
                nouveau_fence_signal(fence);
+               /* XXX Doesn't this leak fence?  */
        }
        spin_unlock(&fctx->lock);
+
+       /* Wait for the workqueue to drain.  */
+       flush_scheduled_work();
+
+       /* Destroy the fence context.  */
+       DRM_DESTROY_WAITQUEUE(&fctx->waitqueue);
        spin_lock_destroy(&fctx->lock);
 }
 
+/*
+ * nouveau_fence_context_new(fctx)
+ *
+ *     Initialize the state fctx for all fences on a channel.
+ */
 void
 nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
 {
+
        INIT_LIST_HEAD(&fctx->flip);
        INIT_LIST_HEAD(&fctx->pending);
        spin_lock_init(&fctx->lock);
+       DRM_INIT_WAITQUEUE(&fctx->waitqueue, "nvfnchan");
 }
 
+/*
+ * nouveau_fence_work_handler(kwork)
+ *
+ *     Work handler for nouveau_fence_work.
+ */
 static void
 nouveau_fence_work_handler(struct work_struct *kwork)
 {
        struct fence_work *work = container_of(kwork, typeof(*work), base);
+
        work->func(work->data);
        kfree(work);
 }
 
+/*
+ * nouveau_fence_work(fence, func, data)
+ *
+ *     Arrange to call func(data) after fence is completed.  If fence
+ *     is already completed, call it immediately.  If memory is
+ *     scarce, synchronously wait for the fence and call it.
+ */
 void
 nouveau_fence_work(struct nouveau_fence *fence,
                   void (*func)(void *), void *data)
 {
        struct nouveau_channel *chan = fence->channel;
-       struct nouveau_fence_chan *fctx;
+       struct nouveau_fence_chan *fctx = chan->fence;
        struct fence_work *work = NULL;
 
-       if (nouveau_fence_done(fence)) {
-               func(data);
-               return;
-       }
-
-       fctx = chan->fence;
        work = kmalloc(sizeof(*work), GFP_KERNEL);
-       if (!work) {
+       if (work == NULL) {
                WARN_ON(nouveau_fence_wait(fence, false, false));
                func(data);
                return;
        }
 
        spin_lock(&fctx->lock);
-       if (!fence->channel) {
+       if (fence->done) {
                spin_unlock(&fctx->lock);
                kfree(work);
                func(data);
                return;
        }
-
        INIT_WORK(&work->base, nouveau_fence_work_handler);
        work->func = func;
        work->data = data;
@@ -126,23 +174,39 @@
        spin_unlock(&fctx->lock);
 }
 
+/*
+ * nouveau_fence_update(chan)
+ *
+ *     Test all fences on chan for completion.  For any that are
+ *     completed, mark them as such and schedule work for them.
+ *
+ *     Caller must hold chan's fence lock.
+ */
 static void
 nouveau_fence_update(struct nouveau_channel *chan)
 {
        struct nouveau_fence_chan *fctx = chan->fence;
        struct nouveau_fence *fence, *fnext;
 
-       spin_lock(&fctx->lock);
+       BUG_ON(!spin_is_locked(&fctx->lock));
        list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
                if (fctx->read(chan) < fence->sequence)
                        break;
-
                nouveau_fence_signal(fence);
                nouveau_fence_unref(&fence);
        }
-       spin_unlock(&fctx->lock);
+       BUG_ON(!spin_is_locked(&fctx->lock));
 }
 
+/*
+ * nouveau_fence_emit(fence, chan)
+ *
+ *     - Initialize fence.
+ *     - Set its timeout to 15 sec from now.
+ *     - Assign it the next sequence number on channel.
+ *     - Submit it to the device with the device-specific emit routine.
+ *     - If that succeeds, add it to the list of pending fences on chan.
+ */
 int
 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 {
@@ -151,7 +215,9 @@
 
        fence->channel  = chan;
        fence->timeout  = jiffies + (15 * HZ);
+       spin_lock(&fctx->lock);
        fence->sequence = ++fctx->sequence;
+       spin_unlock(&fctx->lock);
 
        ret = fctx->emit(fence);
        if (!ret) {
@@ -164,42 +230,90 @@
        return ret;
 }
 
+/*
+ * nouveau_fence_done_locked(fence, chan)
+ *
+ *     Test whether fence, which must be on chan, is done.  If it is
+ *     not marked as done, poll all fences on chan first.
+ *
+ *     Caller must hold chan's fence lock.
+ */
+static bool
+nouveau_fence_done_locked(struct nouveau_fence *fence,
+    struct nouveau_channel *chan)
+{
+       struct nouveau_fence_chan *fctx __diagused = chan->fence;
+
+       BUG_ON(!spin_is_locked(&fctx->lock));
+       BUG_ON(fence->channel != chan);
+
+       /* If it's not done, poll it for changes.  */
+       if (!fence->done)
+               nouveau_fence_update(chan);
+
+       /* Check, possibly again, whether it is done now.  */
+       return fence->done;
+}
+
+/*
+ * nouveau_fence_done(fence)
+ *
+ *     Test whether fence is done.  If it is not marked as done, poll
+ *     all fences on its channel first.  Caller MUST NOT hold the
+ *     fence lock.
+ */
 bool
 nouveau_fence_done(struct nouveau_fence *fence)
 {
-       if (fence->channel)
-               nouveau_fence_update(fence->channel);
-       return !fence->channel;
+       struct nouveau_channel *chan = fence->channel;
+       struct nouveau_fence_chan *fctx = chan->fence;
+       bool done;
+
+       spin_lock(&fctx->lock);
+       done = nouveau_fence_done_locked(fence, chan);
+       spin_unlock(&fctx->lock);
+
+       return done;
 }
 
+/*
+ * nouveau_fence_wait_uevent_handler(data, index)
+ *
+ *     Nouveau uevent handler for fence completion.  data is a
+ *     nouveau_fence_chan pointer.  Simply wake up all threads waiting
+ *     for completion of any fences on the channel.  Does not mark
+ *     fences as completed -- threads must poll fences for completion.
+ */
 static int
 nouveau_fence_wait_uevent_handler(void *data, int index)
 {
-       struct nouveau_fence_priv *priv = data;
-#ifdef __NetBSD__
-       spin_lock(&priv->waitlock);
-       /* XXX Set a flag...  */
-       DRM_SPIN_WAKEUP_ALL(&priv->waitqueue, &priv->waitlock);
-       spin_unlock(&priv->waitlock);
-#else
-       wake_up_all(&priv->waiting);
-#endif
+       struct nouveau_fence_chan *fctx = data;
+
+       spin_lock(&fctx->lock);
+       DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
+       spin_unlock(&fctx->lock);
+
        return NVKM_EVENT_KEEP;
 }
 
+/*
+ * nouveau_fence_wait_uevent(fence, intr)
+ *



Home | Main Index | Thread Index | Old Index