Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/dev/sbus We alloc xflist and dma maps in mailbox setup. ...



details:   https://anonhg.NetBSD.org/src/rev/a8d90da31ebc
branches:  trunk
changeset: 477176:a8d90da31ebc
user:      mjacob <mjacob%NetBSD.org@localhost>
date:      Thu Oct 14 02:16:04 1999 +0000

description:
We alloc xflist and dma maps in mailbox setup. We swizzle endian stuff
at the end of dma setup.

diffstat:

 sys/dev/sbus/isp_sbus.c |  112 ++++++++++++++++++++++++++---------------------
 1 files changed, 61 insertions(+), 51 deletions(-)

diffs (210 lines):

diff -r 7a91ba4e2ab1 -r a8d90da31ebc sys/dev/sbus/isp_sbus.c
--- a/sys/dev/sbus/isp_sbus.c   Thu Oct 14 02:14:35 1999 +0000
+++ b/sys/dev/sbus/isp_sbus.c   Thu Oct 14 02:16:04 1999 +0000
@@ -1,5 +1,4 @@
-/* $NetBSD: isp_sbus.c,v 1.13 1999/09/30 23:04:42 thorpej Exp $ */
-/* release_03_25_99 */
+/* $NetBSD: isp_sbus.c,v 1.14 1999/10/14 02:16:04 mjacob Exp $ */
 /*
  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
  *
@@ -83,7 +82,7 @@
        int             sbus_node;
        int             sbus_pri;
        struct ispmdvec sbus_mdvec;
-       bus_dmamap_t    sbus_dmamap[MAXISPREQUEST];
+       bus_dmamap_t    *sbus_dmamap;
        int16_t         sbus_poff[_NREG_BLKS];
 };
 
@@ -205,8 +204,7 @@
                return;
        }
 
-       for (i = 0; i < MAXISPREQUEST; i++) {
-
+       for (i = 0; i < isp->isp_maxcmds; i++) {
                /* Allocate a DMA handle */
                if (bus_dmamap_create(sbc->sbus_dmatag, MAXPHYS, 1, MAXPHYS, 0,
                    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
@@ -242,7 +240,7 @@
 }
 
 static void
-isp_sbus_wr_reg (isp, regoff, val)
+isp_sbus_wr_reg(isp, regoff, val)
        struct ispsoftc *isp;
        int regoff;
        u_int16_t val;
@@ -260,26 +258,43 @@
        struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
        bus_dma_segment_t seg;
        int rseg;
+       size_t n;
        bus_size_t len;
 
-       /*
-        * NOTE: Since most Sun machines aren't I/O coherent,
-        * map the mailboxes through kdvma space to force them
-        * to be uncached.
-        */
+       if (isp->isp_rquest_dma)
+               return (0);
 
+       n = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
+       isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(n, M_DEVBUF, M_WAITOK);
+       if (isp->isp_xflist == NULL) {
+               printf("%s: cannot alloc xflist array\n", isp->isp_name);
+               return (1);
+       }
+       bzero(isp->isp_xflist, n);
+       n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
+       sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
+       if (sbc->sbus_dmamap == NULL) {
+               free(isp->isp_xflist, M_DEVBUF);
+               printf("%s: cannot alloc dmamap array\n", isp->isp_name);
+               return (1);
+       }
        /*
         * Allocate and map the request queue.
         */
        len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
        if (bus_dmamem_alloc(sbc->sbus_dmatag, len, NBPG, 0,
-                            &seg, 1, &rseg, BUS_DMA_NOWAIT) != 0)
+           &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
+               free(sbc->sbus_dmamap, M_DEVBUF);
+               free(isp->isp_xflist, M_DEVBUF);
                return (1);
+       }
 
        if (bus_dmamem_map(sbc->sbus_dmatag, &seg, rseg, len,
-                          (caddr_t *)&isp->isp_rquest,
-                          BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
+           (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
+               free(sbc->sbus_dmamap, M_DEVBUF);
+               free(isp->isp_xflist, M_DEVBUF);
                return (1);
+       }
        isp->isp_rquest_dma = seg.ds_addr;
 
        /*
@@ -287,20 +302,24 @@
         */
        len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
        if (bus_dmamem_alloc(sbc->sbus_dmatag, len, NBPG, 0,
-                            &seg, 1, &rseg, BUS_DMA_NOWAIT) != 0)
+           &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
+               free(sbc->sbus_dmamap, M_DEVBUF);
+               free(isp->isp_xflist, M_DEVBUF);
                return (1);
-
+       }
        if (bus_dmamem_map(sbc->sbus_dmatag, &seg, rseg, len,
-                          (caddr_t *)&isp->isp_result,
-                          BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
+           (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
+               free(sbc->sbus_dmamap, M_DEVBUF);
+               free(isp->isp_xflist, M_DEVBUF);
                return (1);
+       }
        isp->isp_result_dma = seg.ds_addr;
-
        return (0);
 }
 
 /*
- * TODO: If kdvma_mapin fails, try using multiple smaller chunks..
+ * Map a DMA request.
+ * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
  */
 
 static int
@@ -314,36 +333,26 @@
        struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
        bus_dmamap_t dmamap;
        int dosleep = (xs->xs_control & XS_CTL_NOSLEEP) != 0;
+       int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
 
        if (xs->datalen == 0) {
                rq->req_seg_count = 1;
-               return (CMD_QUEUED);
+               goto mbxsync;
        }
-
-       if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
-               panic("%s: bad handle (%d) in isp_sbus_dmasetup\n",
-                       isp->isp_name, rq->req_handle);
-               /* NOTREACHED */
-       }
-
+       assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
        dmamap = sbc->sbus_dmamap[rq->req_handle - 1];
        if (dmamap->dm_nsegs != 0) {
                panic("%s: dma map already allocated\n", isp->isp_name);
                /* NOTREACHED */
        }
-       if (bus_dmamap_load(sbc->sbus_dmatag, dmamap,
-                           xs->data, xs->datalen, NULL,
-                           dosleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
+       if (bus_dmamap_load(sbc->sbus_dmatag, dmamap, xs->data, xs->datalen,
+           NULL, dosleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
                XS_SETERR(xs, HBA_BOTCH);
                return (CMD_COMPLETE);
        }
-       bus_dmamap_sync(sbc->sbus_dmatag, dmamap,
-                       dmamap->dm_segs[0].ds_addr, xs->datalen,
-                       (xs->xs_control & XS_CTL_DATA_IN)
-                               ? BUS_DMASYNC_PREREAD
-                               : BUS_DMASYNC_PREWRITE);
-
-       if (xs->xs_control & XS_CTL_DATA_IN) {
+       bus_dmamap_sync(sbc->sbus_dmatag, dmamap, dmamap->dm_segs[0].ds_addr,
+           xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
+       if (in) {
                rq->req_flags |= REQFLAG_DATA_IN;
        } else {
                rq->req_flags |= REQFLAG_DATA_OUT;
@@ -351,6 +360,15 @@
        rq->req_dataseg[0].ds_count = xs->datalen;
        rq->req_dataseg[0].ds_base =  dmamap->dm_segs[0].ds_addr;
        rq->req_seg_count = 1;
+mbxsync:
+        ISP_SWIZZLE_REQUEST(isp, rq);
+#if    0
+       /*
+        * If we ever map cacheable memory, we need to do something like this.
+        */
+        bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
+            sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+#endif
        return (CMD_QUEUED);
 }
 
@@ -362,22 +380,14 @@
 {
        struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
        bus_dmamap_t dmamap;
-
-       if (handle >= RQUEST_QUEUE_LEN) {
-               panic("%s: bad handle (%d) in isp_sbus_dmateardown\n",
-                       isp->isp_name, handle);
-               /* NOTREACHED */
-       }
-
-       dmamap = sbc->sbus_dmamap[handle];
+       assert(handle != 0 && handle <= isp->isp_maxcmds);
+       dmamap = sbc->sbus_dmamap[handle-1];
        if (dmamap->dm_nsegs == 0) {
                panic("%s: dma map not already allocated\n", isp->isp_name);
                /* NOTREACHED */
        }
-       bus_dmamap_sync(sbc->sbus_dmatag, dmamap,
-                       dmamap->dm_segs[0].ds_addr, xs->datalen,
-                       (xs->xs_control & XS_CTL_DATA_IN)
-                               ? BUS_DMASYNC_POSTREAD
-                               : BUS_DMASYNC_POSTWRITE);
+       bus_dmamap_sync(sbc->sbus_dmatag, dmamap, dmamap->dm_segs[0].ds_addr,
+           xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
+           BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
        bus_dmamap_unload(sbc->sbus_dmatag, dmamap);
 }



Home | Main Index | Thread Index | Old Index