NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/53441: nouveau panic in 8.0_RC2 amd64
The following reply was made to PR kern/53441; it has been noted by GNATS.
From: Taylor R Campbell <campbell%mumble.net@localhost>
To: Greg Oster <oster%NetBSD.org@localhost>
Cc: gnats-bugs%NetBSD.org@localhost
Subject: Re: kern/53441: nouveau panic in 8.0_RC2 amd64
Date: Thu, 16 Aug 2018 05:37:54 +0000
This is a multi-part message in MIME format.
--=_73WqrbnRAUdWsUzKH5e7RXeLfJD5xyxZ
Please revert the previous patch, and try the attached patch instead.
--=_73WqrbnRAUdWsUzKH5e7RXeLfJD5xyxZ
Content-Type: text/plain; charset="ISO-8859-1"; name="53441-v2"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: attachment; filename="53441-v2.patch"
diff --git a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c b/sys/e=
xternal/bsd/drm2/dist/drm/nouveau/nouveau_fence.c
index 2a83285e07da..da0864f2f13d 100644
--- a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c
+++ b/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c
@@ -29,6 +29,9 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 rias=
tradh Exp $");
=20
+#include <sys/types.h>
+#include <sys/xcall.h>
+
#include <drm/drmP.h>
=20
#include <asm/param.h>
@@ -41,6 +44,12 @@ __KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/0=
4/13 07:57:15 riastradh
=20
#include <engine/fifo.h>
=20
+/*
+ * struct fence_work
+ *
+ * State for a work action scheduled when a fence is completed.
+ * Will call func(data) at some point after that happens.
+ */
struct fence_work {
struct work_struct base;
struct list_head head;
@@ -48,101 +57,289 @@ struct fence_work {
void *data;
};
=20
+/*
+ * nouveau_fence_channel_acquire(fence)
+ *
+ * Try to return the channel associated with fence.
+ */
+static struct nouveau_channel *
+nouveau_fence_channel_acquire(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan;
+ struct nouveau_fence_chan *fctx;
+
+ /*
+ * Block cross-calls while we examine fence. If we observe
+ * that fence->done is false, then the channel cannot be
+ * destroyed even by another CPU until after kpreempt_enable.
+ */
+ kpreempt_disable();
+ if (fence->done) {
+ chan =3D NULL;
+ } else {
+ chan =3D fence->channel;
+ fctx =3D chan->fence;
+ atomic_inc_uint(&fctx->refcnt);
+ }
+ kpreempt_enable();
+
+ return chan;
+}
+
+/*
+ * nouveau_fence_gc_grab(fctx, list)
+ *
+ * Move all of channel's done fences to list.
+ *
+ * Caller must hold channel's fence lock.
+ */
+static void
+nouveau_fence_gc_grab(struct nouveau_fence_chan *fctx, struct list_head *l=
ist)
+{
+ struct list_head *node, *next;
+
+ BUG_ON(!spin_is_locked(&fctx->lock));
+
+ list_for_each_safe(node, next, &fctx->done) {
+ list_move_tail(node, list);
+ }
+}
+
+/*
+ * nouveau_fence_gc_free(list)
+ *
+ * Unreference all of the fences in the list.
+ *
+ * Caller MUST NOT hold the fences' channel's fence lock.
+ */
+static void
+nouveau_fence_gc_free(struct list_head *list)
+{
+ struct nouveau_fence *fence, *next;
+
+ list_for_each_entry_safe(fence, next, list, head) {
+ list_del(&fence->head);
+ nouveau_fence_unref(&fence);
+ }
+}
+
+/*
+ * nouveau_fence_channel_release(channel)
+ *
+ * Release the channel acquired with nouveau_fence_channel_acquire.
+ */
+static void
+nouveau_fence_channel_release(struct nouveau_channel *chan)
+{
+ struct nouveau_fence_chan *fctx =3D chan->fence;
+ unsigned old, new;
+
+ do {
+ old =3D fctx->refcnt;
+ if (old =3D=3D 0) {
+ spin_lock(&fctx->lock);
+ if (atomic_dec_uint_nv(&fctx->refcnt) =3D=3D 0)
+ DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue,
+ &fctx->lock);
+ spin_unlock(&fctx->lock);
+ return;
+ }
+ new =3D old - 1;
+ } while (atomic_cas_uint(&fctx->refcnt, old, new) !=3D old);
+}
+
+/*
+ * nouveau_fence_signal(fence)
+ *
+ * Schedule all the work for fence's completion, mark it done, and
+ * move it from the pending list to the done list.
+ *
+ * Caller must hold fence's channel's fence lock.
+ */
static void
nouveau_fence_signal(struct nouveau_fence *fence)
{
+ struct nouveau_channel *chan __diagused =3D fence->channel;
+ struct nouveau_fence_chan *fctx __diagused =3D chan->fence;
struct fence_work *work, *temp;
=20
+ BUG_ON(!spin_is_locked(&fctx->lock));
+ BUG_ON(fence->done);
+
+ /* Schedule all the work for this fence. */
list_for_each_entry_safe(work, temp, &fence->work, head) {
schedule_work(&work->base);
list_del(&work->head);
}
=20
- fence->channel =3D NULL;
- list_del(&fence->head);
+ /* Note that the fence is done. */
+ fence->done =3D true;
+
+ /* Move it from the pending list to the done list. */
+ list_move_tail(&fence->head, &fctx->done);
+}
+
+static void
+nouveau_fence_context_del_xc(void *a, void *b)
+{
}
=20
+/*
+ * nouveau_fence_context_del(fctx)
+ *
+ * Artificially complete all fences in fctx, wait for their work
+ * to drain, and destroy the memory associated with fctx.
+ */
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
+ struct list_head done_list;
+ int ret __diagused;
+
+ INIT_LIST_HEAD(&done_list);
+
+ /* Signal all the fences in fctx. */
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
nouveau_fence_signal(fence);
}
+ nouveau_fence_gc_grab(fctx, &done_list);
+ spin_unlock(&fctx->lock);
+
+ /* Release any fences that we signalled. */
+ nouveau_fence_gc_free(&done_list);
+
+ /* Wait for the workqueue to drain. */
+ flush_scheduled_work();
+
+ /* Wait for nouveau_fence_channel_acquire to complete on all CPUs. */
+ xc_wait(xc_broadcast(0, nouveau_fence_context_del_xc, NULL, NULL));
+
+ /* Wait for any references to drain. */
+ spin_lock(&fctx->lock);
+ DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &fctx->waitqueue, &fctx->lock,
+ fctx->refcnt =3D=3D 0);
+ BUG_ON(ret);
spin_unlock(&fctx->lock);
+
+ /* Make sure there are no more fences on the list. */
+ BUG_ON(!list_empty(&fctx->done));
+ BUG_ON(!list_empty(&fctx->flip));
+ BUG_ON(!list_empty(&fctx->pending));
+
+ /* Destroy the fence context. */
+ DRM_DESTROY_WAITQUEUE(&fctx->waitqueue);
spin_lock_destroy(&fctx->lock);
}
=20
+/*
+ * nouveau_fence_context_new(fctx)
+ *
+ * Initialize the state fctx for all fences on a channel.
+ */
void
nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
+
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
+ INIT_LIST_HEAD(&fctx->done);
spin_lock_init(&fctx->lock);
+ DRM_INIT_WAITQUEUE(&fctx->waitqueue, "nvfnchan");
+ fctx->refcnt =3D 0;
}
=20
+/*
+ * nouveau_fence_work_handler(kwork)
+ *
+ * Work handler for nouveau_fence_work.
+ */
static void
nouveau_fence_work_handler(struct work_struct *kwork)
{
struct fence_work *work =3D container_of(kwork, typeof(*work), base);
+
work->func(work->data);
kfree(work);
}
=20
+/*
+ * nouveau_fence_work(fence, func, data)
+ *
+ * Arrange to call func(data) after fence is completed. If fence
+ * is already completed, call it immediately. If memory is
+ * scarce, synchronously wait for the fence and call it.
+ */
void
nouveau_fence_work(struct nouveau_fence *fence,
void (*func)(void *), void *data)
{
- struct nouveau_channel *chan =3D fence->channel;
+ struct nouveau_channel *chan;
struct nouveau_fence_chan *fctx;
struct fence_work *work =3D NULL;
=20
- if (nouveau_fence_done(fence)) {
- func(data);
- return;
- }
-
+ if ((chan =3D nouveau_fence_channel_acquire(fence)) =3D=3D NULL)
+ goto now0;
fctx =3D chan->fence;
+
work =3D kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work) {
+ if (work =3D=3D NULL) {
WARN_ON(nouveau_fence_wait(fence, false, false));
- func(data);
- return;
+ goto now1;
}
=20
spin_lock(&fctx->lock);
- if (!fence->channel) {
+ if (fence->done) {
spin_unlock(&fctx->lock);
- kfree(work);
- func(data);
- return;
+ goto now2;
}
-
INIT_WORK(&work->base, nouveau_fence_work_handler);
work->func =3D func;
work->data =3D data;
list_add(&work->head, &fence->work);
+ if (atomic_dec_uint_nv(&fctx->refcnt) =3D=3D 0)
+ DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
spin_unlock(&fctx->lock);
+ return;
+
+now2: kfree(work);
+now1: nouveau_fence_channel_release(chan);
+now0: func(data);
}
=20
+/*
+ * nouveau_fence_update(chan)
+ *
+ * Test all fences on chan for completion. For any that are
+ * completed, mark them as such and schedule work for them.
+ *
+ * Caller must hold chan's fence lock.
+ */
static void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct nouveau_fence_chan *fctx =3D chan->fence;
struct nouveau_fence *fence, *fnext;
=20
- spin_lock(&fctx->lock);
+ BUG_ON(!spin_is_locked(&fctx->lock));
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
if (fctx->read(chan) < fence->sequence)
break;
-
nouveau_fence_signal(fence);
- nouveau_fence_unref(&fence);
}
- spin_unlock(&fctx->lock);
+ BUG_ON(!spin_is_locked(&fctx->lock));
}
=20
+/*
+ * nouveau_fence_emit(fence, chan)
+ *
+ * - Initialize fence.
+ * - Set its timeout to 15 sec from now.
+ * - Assign it the next sequence number on channel.
+ * - Submit it to the device with the device-specific emit routine.
+ * - If that succeeds, add it to the list of pending fences on chan.
+ */
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *ch=
an)
{
@@ -151,7 +348,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct =
nouveau_channel *chan)
=20
fence->channel =3D chan;
fence->timeout =3D jiffies + (15 * HZ);
+ spin_lock(&fctx->lock);
fence->sequence =3D ++fctx->sequence;
+ spin_unlock(&fctx->lock);
=20
ret =3D fctx->emit(fence);
if (!ret) {
@@ -164,77 +363,130 @@ nouveau_fence_emit(struct nouveau_fence *fence, stru=
ct nouveau_channel *chan)
return ret;
}
=20
+/*
+ * nouveau_fence_done_locked(fence, chan)
+ *
+ * Test whether fence, which must be on chan, is done. If it is
+ * not marked as done, poll all fences on chan first.
+ *
+ * Caller must hold chan's fence lock.
+ */
+static bool
+nouveau_fence_done_locked(struct nouveau_fence *fence,
+ struct nouveau_channel *chan)
+{
+ struct nouveau_fence_chan *fctx __diagused =3D chan->fence;
+
+ BUG_ON(!spin_is_locked(&fctx->lock));
+ BUG_ON(fence->channel !=3D chan);
+
+ /* If it's not done, poll it for changes. */
+ if (!fence->done)
+ nouveau_fence_update(chan);
+
+ /* Check, possibly again, whether it is done now. */
+ return fence->done;
+}
+
+/*
+ * nouveau_fence_done(fence)
+ *
+ * Test whether fence is done. If it is not marked as done, poll
+ * all fences on its channel first. Caller MUST NOT hold the
+ * fence lock.
+ */
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->channel)
- nouveau_fence_update(fence->channel);
- return !fence->channel;
+ struct nouveau_channel *chan;
+ struct nouveau_fence_chan *fctx;
+ struct list_head done_list;
+ bool done;
+
+ if ((chan =3D nouveau_fence_channel_acquire(fence)) =3D=3D NULL)
+ return true;
+
+ INIT_LIST_HEAD(&done_list);
+
+ fctx =3D chan->fence;
+ spin_lock(&fctx->lock);
+ done =3D nouveau_fence_done_locked(fence, chan);
+ nouveau_fence_gc_grab(fctx, &done_list);
+ spin_unlock(&fctx->lock);
+
+ nouveau_fence_channel_release(chan);
+
+ nouveau_fence_gc_free(&done_list);
+
+ return done;
}
=20
+/*
+ * nouveau_fence_wait_uevent_handler(data, index)
+ *
+ * Nouveau uevent handler for fence completion. data is a
+ * nouveau_fence_chan pointer. Simply wake up all threads waiting
+ * for completion of any fences on the channel. Does not mark
+ * fences as completed -- threads must poll fences for completion.
+ */
static int
nouveau_fence_wait_uevent_handler(void *data, int index)
{
- struct nouveau_fence_priv *priv =3D data;
-#ifdef __NetBSD__
- spin_lock(&priv->waitlock);
- /* XXX Set a flag... */
- DRM_SPIN_WAKEUP_ALL(&priv->waitqueue, &priv->waitlock);
- spin_unlock(&priv->waitlock);
-#else
- wake_up_all(&priv->waiting);
-#endif
+ struct nouveau_fence_chan *fctx =3D data;
+
+ spin_lock(&fctx->lock);
+ DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
+ spin_unlock(&fctx->lock);
+
return NVKM_EVENT_KEEP;
}
=20
+/*
+ * nouveau_fence_wait_uevent(fence, chan, intr)
+ *
+ * Wait using a nouveau event for completion of fence on chan.
+ * Wait interruptibly iff intr is true.
+ */
static int
-nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
-
+nouveau_fence_wait_uevent(struct nouveau_fence *fence,
+ struct nouveau_channel *chan, bool intr)
{
- struct nouveau_channel *chan =3D fence->channel;
struct nouveau_fifo *pfifo =3D nouveau_fifo(chan->drm->device);
- struct nouveau_fence_priv *priv =3D chan->drm->fence;
+ struct nouveau_fence_chan *fctx =3D chan->fence;
struct nouveau_eventh *handler;
+ struct list_head done_list;
int ret =3D 0;
=20
+ BUG_ON(fence->channel !=3D chan);
+
ret =3D nouveau_event_new(pfifo->uevent, 0,
nouveau_fence_wait_uevent_handler,
- priv, &handler);
+ fctx, &handler);
if (ret)
return ret;
=20
nouveau_event_get(handler);
=20
+ INIT_LIST_HEAD(&done_list);
+
if (fence->timeout) {
unsigned long timeout =3D fence->timeout - jiffies;
=20
if (time_before(jiffies, fence->timeout)) {
-#ifdef __NetBSD__
- spin_lock(&priv->waitlock);
+ spin_lock(&fctx->lock);
if (intr) {
DRM_SPIN_TIMED_WAIT_UNTIL(ret,
- &priv->waitqueue, &priv->waitlock,
+ &fctx->waitqueue, &fctx->lock,
timeout,
- nouveau_fence_done(fence));
+ nouveau_fence_done_locked(fence, chan));
} else {
DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret,
- &priv->waitqueue, &priv->waitlock,
+ &fctx->waitqueue, &fctx->lock,
timeout,
- nouveau_fence_done(fence));
- }
- spin_unlock(&priv->waitlock);
-#else
- if (intr) {
- ret =3D wait_event_interruptible_timeout(
- priv->waiting,
- nouveau_fence_done(fence),
- timeout);
- } else {
- ret =3D wait_event_timeout(priv->waiting,
- nouveau_fence_done(fence),
- timeout);
+ nouveau_fence_done_locked(fence, chan));
}
-#endif
+ nouveau_fence_gc_grab(fctx, &done_list);
+ spin_unlock(&fctx->lock);
}
=20
if (ret >=3D 0) {
@@ -243,50 +495,53 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence=
, bool intr)
ret =3D -EBUSY;
}
} else {
-#ifdef __NetBSD__
- spin_lock(&priv->waitlock);
- if (intr) {
- DRM_SPIN_WAIT_UNTIL(ret, &priv->waitqueue,
- &priv->waitlock,
- nouveau_fence_done(fence));
- } else {
- DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &priv->waitqueue,
- &priv->waitlock,
- nouveau_fence_done(fence));
- }
- spin_unlock(&priv->waitlock);
-#else
+ spin_lock(&fctx->lock);
if (intr) {
- ret =3D wait_event_interruptible(priv->waiting,
- nouveau_fence_done(fence));
+ DRM_SPIN_WAIT_UNTIL(ret, &fctx->waitqueue,
+ &fctx->lock,
+ nouveau_fence_done_locked(fence, chan));
} else {
- wait_event(priv->waiting, nouveau_fence_done(fence));
+ DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &fctx->waitqueue,
+ &fctx->lock,
+ nouveau_fence_done_locked(fence, chan));
}
-#endif
+ nouveau_fence_gc_grab(fctx, &done_list);
+ spin_unlock(&fctx->lock);
}
=20
nouveau_event_ref(NULL, &handler);
+
+ nouveau_fence_gc_free(&done_list);
+
if (unlikely(ret < 0))
return ret;
=20
return 0;
}
=20
+/*
+ * nouveau_fence_wait(fence, lazy, intr)
+ *
+ * Wait for fence to complete. Wait interruptibly iff intr is
+ * true. If lazy is true, may sleep, either for a single tick or
+ * for an interrupt; otherwise will busy-wait.
+ */
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- struct nouveau_channel *chan =3D fence->channel;
- struct nouveau_fence_priv *priv =3D chan ? chan->drm->fence : NULL;
-#ifndef __NetBSD__
- unsigned long sleep_time =3D NSEC_PER_MSEC / 1000;
- ktime_t t;
-#endif
+ struct nouveau_channel *chan;
+ struct nouveau_fence_priv *priv;
+ unsigned long delay_usec =3D 1;
int ret =3D 0;
=20
+ if ((chan =3D nouveau_fence_channel_acquire(fence)) =3D=3D NULL)
+ goto out0;
+
+ priv =3D chan->drm->fence;
while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
- ret =3D nouveau_fence_wait_uevent(fence, intr);
+ ret =3D nouveau_fence_wait_uevent(fence, chan, intr);
if (ret < 0)
- return ret;
+ goto out1;
}
=20
while (!nouveau_fence_done(fence)) {
@@ -295,33 +550,19 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool =
lazy, bool intr)
break;
}
=20
-#ifdef __NetBSD__
- if (lazy)
- kpause("nvfencep", intr, 1, NULL);
- else
- DELAY(1);
-#else
- __set_current_state(intr ? TASK_INTERRUPTIBLE :
- TASK_UNINTERRUPTIBLE);
- if (lazy) {
- t =3D ktime_set(0, sleep_time);
- schedule_hrtimeout(&t, HRTIMER_MODE_REL);
- sleep_time *=3D 2;
- if (sleep_time > NSEC_PER_MSEC)
- sleep_time =3D NSEC_PER_MSEC;
- }
-
- if (intr && signal_pending(current)) {
- ret =3D -ERESTARTSYS;
- break;
+ if (lazy && delay_usec >=3D 1000*hztoms(1)) {
+ /* XXX errno NetBSD->Linux */
+ ret =3D -kpause("nvfencew", intr, 1, NULL);
+ if (ret !=3D -EWOULDBLOCK)
+ break;
+ } else {
+ DELAY(delay_usec);
+ delay_usec *=3D 2;
}
-#endif
}
=20
-#ifndef __NetBSD__
- __set_current_state(TASK_RUNNING);
-#endif
- return ret;
+out1: nouveau_fence_channel_release(chan);
+out0: return ret;
}
=20
int
@@ -331,13 +572,14 @@ nouveau_fence_sync(struct nouveau_fence *fence, struc=
t nouveau_channel *chan)
struct nouveau_channel *prev;
int ret =3D 0;
=20
- prev =3D fence ? fence->channel : NULL;
- if (prev) {
+ if (fence !=3D NULL &&
+ (prev =3D nouveau_fence_channel_acquire(fence)) !=3D NULL) {
if (unlikely(prev !=3D chan && !nouveau_fence_done(fence))) {
ret =3D fctx->sync(fence, prev, chan);
if (unlikely(ret))
ret =3D nouveau_fence_wait(fence, true, false);
}
+ nouveau_fence_channel_release(prev);
}
=20
return ret;
@@ -347,12 +589,14 @@ static void
nouveau_fence_del(struct kref *kref)
{
struct nouveau_fence *fence =3D container_of(kref, typeof(*fence), kref);
+
kfree(fence);
}
=20
void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
+
if (*pfence)
kref_put(&(*pfence)->kref, nouveau_fence_del);
*pfence =3D NULL;
@@ -361,6 +605,7 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *fence)
{
+
if (fence)
kref_get(&fence->kref);
return fence;
@@ -382,6 +627,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sy=
smem,
=20
INIT_LIST_HEAD(&fence->work);
fence->sysmem =3D sysmem;
+ fence->done =3D false;
kref_init(&fence->kref);
=20
ret =3D nouveau_fence_emit(fence, chan);
diff --git a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h b/sys/e=
xternal/bsd/drm2/dist/drm/nouveau/nouveau_fence.h
index f6f12ba1f38f..a0c32455bd55 100644
--- a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h
+++ b/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h
@@ -9,6 +9,7 @@ struct nouveau_fence {
struct kref kref;
=20
bool sysmem;
+ bool done;
=20
struct nouveau_channel *channel;
unsigned long timeout;
@@ -27,9 +28,15 @@ void nouveau_fence_work(struct nouveau_fence *, void (*)=
(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
=20
+/*
+ * struct nouveau_fence_chan:
+ *
+ * State common to all fences in a single nouveau_channel.
+ */
struct nouveau_fence_chan {
struct list_head pending;
struct list_head flip;
+ struct list_head done;
=20
int (*emit)(struct nouveau_fence *);
int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
@@ -39,9 +46,16 @@ struct nouveau_fence_chan {
int (*sync32)(struct nouveau_channel *, u64, u32);
=20
spinlock_t lock;
+ drm_waitqueue_t waitqueue;
+ volatile unsigned refcnt;
u32 sequence;
};
=20
+/*
+ * struct nouveau_fence_priv:
+ *
+ * Device-specific operations on fences.
+ */
struct nouveau_fence_priv {
void (*dtor)(struct nouveau_drm *);
bool (*suspend)(struct nouveau_drm *);
@@ -49,12 +63,6 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
=20
-#ifdef __NetBSD__
- spinlock_t waitlock;
- drm_waitqueue_t waitqueue;
-#else
- wait_queue_head_t waiting;
-#endif
bool uevent;
};
=20
diff --git a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c b/=
sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c
index 0bf784f0f11b..d4e6b8fa9992 100644
--- a/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c
+++ b/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c
@@ -216,11 +216,6 @@ nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv =3D drm->fence;
=20
-#ifdef __NetBSD__
- spin_lock_destroy(&priv->base.waitlock);
- DRM_DESTROY_WAITQUEUE(&priv->base.waitqueue);
-#endif
-
nouveau_bo_unmap(priv->bo_gart);
if (priv->bo_gart)
nouveau_bo_unpin(priv->bo_gart);
@@ -250,12 +245,6 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new =3D nv84_fence_context_new;
priv->base.context_del =3D nv84_fence_context_del;
=20
-#ifdef __NetBSD__
- spin_lock_init(&priv->base.waitlock);
- DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq");
-#else
- init_waitqueue_head(&priv->base.waiting);
-#endif
priv->base.uevent =3D true;
=20
ret =3D nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
--=_73WqrbnRAUdWsUzKH5e7RXeLfJD5xyxZ--
Home |
Main Index |
Thread Index |
Old Index