Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/yamt-uio_vmspace]: src/sys/dev/dmover adapt dmover.



details:   https://anonhg.NetBSD.org/src/rev/2a52398040bb
branches:  yamt-uio_vmspace
changeset: 586707:2a52398040bb
user:      yamt <yamt%NetBSD.org@localhost>
date:      Sat Feb 04 09:01:59 2006 +0000

description:
adapt dmover.

diffstat:

 sys/dev/dmover/dmover_io.c |  78 +++++++++++++++++++++++++++++++++++++--------
 1 files changed, 63 insertions(+), 15 deletions(-)

diffs (162 lines):

diff -r 396d2ead7501 -r 2a52398040bb sys/dev/dmover/dmover_io.c
--- a/sys/dev/dmover/dmover_io.c        Thu Feb 02 14:45:12 2006 +0000
+++ b/sys/dev/dmover/dmover_io.c        Sat Feb 04 09:01:59 2006 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: dmover_io.c,v 1.19.2.1 2006/02/01 14:52:08 yamt Exp $  */
+/*     $NetBSD: dmover_io.c,v 1.19.2.2 2006/02/04 09:01:59 yamt Exp $  */
 
 /*
  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -55,7 +55,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.19.2.1 2006/02/01 14:52:08 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.19.2.2 2006/02/04 09:01:59 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/queue.h>
@@ -70,16 +70,26 @@
 #include <sys/filio.h>
 #include <sys/select.h>
 #include <sys/systm.h>
+#include <sys/workqueue.h>
+#include <sys/once.h>
+
+#include <uvm/uvm_extern.h>
 
 #include <dev/dmover/dmovervar.h>
 #include <dev/dmover/dmover_io.h>
 
 struct dmio_usrreq_state {
-       TAILQ_ENTRY(dmio_usrreq_state) dus_q;
+       union {
+               struct work u_work;
+               TAILQ_ENTRY(dmio_usrreq_state) u_q;
+       } dus_u;
+#define        dus_q           dus_u.u_q
+#define        dus_work        dus_u.u_work
        struct uio dus_uio_out;
        struct uio *dus_uio_in;
        struct dmover_request *dus_req;
        uint32_t dus_id;
+       struct vmspace *dus_vmspace;
 };
 
 struct dmio_state {
@@ -92,6 +102,11 @@
        struct simplelock ds_slock;
 };
 
+static ONCE_DECL(dmio_cleaner_control);
+static struct workqueue *dmio_cleaner;
+static int dmio_cleaner_init(void);
+static void dmio_usrreq_fini1(struct work *wk, void *);
+
 #define        DMIO_STATE_SEL          0x0001
 #define        DMIO_STATE_DEAD         0x0002
 #define        DMIO_STATE_LARVAL       0x0004
@@ -128,6 +143,19 @@
 }
 
 /*
+ * dmio_cleaner_init:
+ *
+ *     Create cleaner thread.
+ */
+static int
+dmio_cleaner_init(void)
+{
+
+       return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
+           NULL, PWAIT, 0 /* IPL_SOFTCLOCK */, 0);
+}
+
+/*
  * dmio_usrreq_init:
  *
  *     Build a request structure.
@@ -147,6 +175,16 @@
 
        /* XXX How should malloc interact w/ FNONBLOCK? */
 
+       error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
+       if (error) {
+               return error;
+       }
+
+       error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
+       if (error) {
+               return error;
+       }
+
        if (req->req_outbuf.dmbuf_iovcnt != 0) {
                if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
                        return (EINVAL);
@@ -170,8 +208,8 @@
                uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
                uio_out->uio_resid = len;
                uio_out->uio_rw = UIO_READ;
-               uio_out->uio_segflg = UIO_USERSPACE;
-               uio_out->uio_lwp = curlwp;
+               uio_out->uio_vmspace = dus->dus_vmspace;
+
                dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
                dreq->dreq_outbuf.dmbuf_uio = uio_out;
        } else {
@@ -236,8 +274,7 @@
                uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
                uio_in->uio_resid = len;
                uio_in->uio_rw = UIO_WRITE;
-               uio_in->uio_segflg = UIO_USERSPACE;
-               uio_in->uio_lwp = curlwp;
+               uio_in->uio_vmspace = dus->dus_vmspace;
 
                dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
        }
@@ -254,6 +291,7 @@
        free(dus->dus_uio_in, M_TEMP);
        if (uio_out != NULL)
                free(uio_out->uio_iov, M_TEMP);
+       uvmspace_free(dus->dus_vmspace);
        return (error);
 }
 
@@ -273,19 +311,29 @@
        if (uio_out->uio_iov != NULL)
                free(uio_out->uio_iov, M_TEMP);
 
-       if (dses->dses_ninputs == 0) {
-               pool_put(&dmio_usrreq_state_pool, dus);
-               return;
+       if (dses->dses_ninputs) {
+               for (i = 0; i < dses->dses_ninputs; i++) {
+                       uio_in = &dus->dus_uio_in[i];
+                       free(uio_in->uio_iov, M_TEMP);
+               }
+               free(dus->dus_uio_in, M_TEMP);
        }
 
-       for (i = 0; i < dses->dses_ninputs; i++) {
-               uio_in = &dus->dus_uio_in[i];
-               free(uio_in->uio_iov, M_TEMP);
-       }
+       workqueue_enqueue(dmio_cleaner, &dus->dus_work);
+}
 
-       free(dus->dus_uio_in, M_TEMP);
+static void
+dmio_usrreq_fini1(struct work *wk, void *dummy)
+{
+       struct dmio_usrreq_state *dus = (void *)wk;
+       int s;
 
+       KASSERT(wk == &dus->dus_work);
+
+       uvmspace_free(dus->dus_vmspace);
+       s = splsoftclock();
        pool_put(&dmio_usrreq_state_pool, dus);
+       splx(s);
 }
 
 /*



Home | Main Index | Thread Index | Old Index