Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/dev/ic revert change from rev. 1.12:



details:   https://anonhg.NetBSD.org/src/rev/a9986e912a75
branches:  trunk
changeset: 818639:a9986e912a75
user:      jdolecek <jdolecek%NetBSD.org@localhost>
date:      Thu Oct 20 19:20:40 2016 +0000

description:
revert change from rev. 1.12:
"""
slightly optimize memory access - change struct nvme_queue so that the
struct dmamem members are allocated as part of it, instead of separate
kmem_alloc()s
"""

that change quite curiously caused completion queue corruption on MP systems,
regardless of MPSAFE setting for the pci/softintr interrupt

diffstat:

 sys/dev/ic/nvme.c    |  115 ++++++++++++++++++++++++++------------------------
 sys/dev/ic/nvmevar.h |   16 +++---
 2 files changed, 68 insertions(+), 63 deletions(-)

diffs (truncated from 348 to 300 lines):

diff -r 409dcb25c301 -r a9986e912a75 sys/dev/ic/nvme.c
--- a/sys/dev/ic/nvme.c Thu Oct 20 18:42:28 2016 +0000
+++ b/sys/dev/ic/nvme.c Thu Oct 20 19:20:40 2016 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: nvme.c,v 1.18 2016/10/19 19:34:31 jdolecek Exp $       */
+/*     $NetBSD: nvme.c,v 1.19 2016/10/20 19:20:40 jdolecek Exp $       */
 /*     $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
 
 /*
@@ -18,7 +18,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.18 2016/10/19 19:34:31 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.19 2016/10/20 19:20:40 jdolecek Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -85,9 +85,11 @@
 static int     nvme_q_complete(struct nvme_softc *, struct nvme_queue *q);
 static void    nvme_q_free(struct nvme_softc *, struct nvme_queue *);
 
-static int     nvme_dmamem_alloc(struct nvme_softc *, size_t,
-                   struct nvme_dmamem *);
+static struct nvme_dmamem *
+               nvme_dmamem_alloc(struct nvme_softc *, size_t);
 static void    nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
+static void    nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *,
+                   int);
 
 static void    nvme_ns_io_fill(struct nvme_queue *, struct nvme_ccb *,
                    void *);
@@ -151,12 +153,8 @@
 #endif
 }
 #endif /* __LP64__ */
-
 #define nvme_barrier(_s, _r, _l, _f) \
        bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
-#define nvme_dmamem_sync(sc, mem, ops) \
-       bus_dmamap_sync((sc)->sc_dmat, NVME_DMA_MAP(mem), \
-           0, NVME_DMA_LEN(mem), (ops));
 
 static void
 nvme_version(struct nvme_softc *sc, uint32_t ver)
@@ -568,19 +566,19 @@
 {
        struct nvme_sqe sqe;
        struct nvm_identify_namespace *identify;
-       struct nvme_dmamem mem;
+       struct nvme_dmamem *mem;
        struct nvme_ccb *ccb;
        struct nvme_namespace *ns;
-       int error;
+       int rv;
 
        KASSERT(nsid > 0);
 
        ccb = nvme_ccb_get(sc->sc_admin_q);
        KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
 
-       error = nvme_dmamem_alloc(sc, sizeof(*identify), &mem);
-       if (error)
-               return error;
+       mem = nvme_dmamem_alloc(sc, sizeof(*identify));
+       if (mem == NULL)
+               return ENOMEM;
 
        memset(&sqe, 0, sizeof(sqe));
        sqe.opcode = NVM_ADMIN_IDENTIFY;
@@ -592,30 +590,30 @@
        ccb->ccb_cookie = &sqe;
 
        nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
-       error = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill,
-           NVME_TIMO_IDENT);
+       rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
        nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
 
        nvme_ccb_put(sc->sc_admin_q, ccb);
 
-       if (error != 0) {
-               error = EIO;
+       if (rv != 0) {
+               rv = EIO;
                goto done;
        }
 
        /* commit */
 
        identify = kmem_zalloc(sizeof(*identify), KM_SLEEP);
-       memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
+       *identify = *((volatile struct nvm_identify_namespace *)NVME_DMA_KVA(mem));
+       //memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
 
        ns = nvme_ns_get(sc, nsid);
        KASSERT(ns);
        ns->ident = identify;
 
 done:
-       nvme_dmamem_free(sc, &mem);
+       nvme_dmamem_free(sc, mem);
 
-       return error;
+       return rv;
 }
 
 int
@@ -1108,29 +1106,29 @@
 {
        char sn[41], mn[81], fr[17];
        struct nvm_identify_controller *identify;
-       struct nvme_dmamem mem;
+       struct nvme_dmamem *mem;
        struct nvme_ccb *ccb;
        u_int mdts;
-       int error;
+       int rv = 1;
 
        ccb = nvme_ccb_get(sc->sc_admin_q);
        KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
 
-       error = nvme_dmamem_alloc(sc, sizeof(*identify), &mem);
-       if (error)
-               return error;
+       mem = nvme_dmamem_alloc(sc, sizeof(*identify));
+       if (mem == NULL)
+               return 1;
 
        ccb->ccb_done = nvme_empty_done;
-       ccb->ccb_cookie = &mem;
+       ccb->ccb_cookie = mem;
 
        nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
-       error = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
+       rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
            NVME_TIMO_IDENT);
        nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
 
        nvme_ccb_put(sc->sc_admin_q, ccb);
 
-       if (error != 0)
+       if (rv != 0)
                goto done;
 
        identify = NVME_DMA_KVA(mem);
@@ -1155,9 +1153,9 @@
        memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
 
 done:
-       nvme_dmamem_free(sc, &mem);
+       nvme_dmamem_free(sc, mem);
 
-       return error;
+       return rv;
 }
 
 static int
@@ -1260,7 +1258,7 @@
        struct nvme_dmamem *mem = ccb->ccb_cookie;
 
        sqe->opcode = NVM_ADMIN_IDENTIFY;
-       htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(*mem));
+       htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
        htolem32(&sqe->cdw10, 1);
 }
 
@@ -1272,7 +1270,6 @@
        bus_addr_t off;
        uint64_t *prpl;
        u_int i;
-       int error;
 
        mutex_init(&q->q_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
        SIMPLEQ_INIT(&q->q_ccb_list);
@@ -1282,12 +1279,8 @@
                return 1;
 
        q->q_nccbs = nccbs;
-       error = nvme_dmamem_alloc(sc, sizeof(*prpl) * sc->sc_max_sgl * nccbs,
-           &q->q_ccb_prpls);
-       if (error) {
-               kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
-               return error;
-       }
+       q->q_ccb_prpls = nvme_dmamem_alloc(sc,
+           sizeof(*prpl) * sc->sc_max_sgl * nccbs);
 
        prpl = NVME_DMA_KVA(q->q_ccb_prpls);
        off = 0;
@@ -1362,7 +1355,7 @@
        }
        mutex_exit(&q->q_ccb_mtx);
 
-       nvme_dmamem_free(sc, &q->q_ccb_prpls);
+       nvme_dmamem_free(sc, q->q_ccb_prpls);
        kmem_free(q->q_ccbs, sizeof(*ccb) * q->q_nccbs);
        q->q_ccbs = NULL;
        mutex_destroy(&q->q_ccb_mtx);
@@ -1372,21 +1365,20 @@
 nvme_q_alloc(struct nvme_softc *sc, uint16_t id, u_int entries, u_int dstrd)
 {
        struct nvme_queue *q;
-       int error;
 
        q = kmem_alloc(sizeof(*q), KM_SLEEP);
        if (q == NULL)
                return NULL;
 
        q->q_sc = sc;
-       error = nvme_dmamem_alloc(sc, sizeof(struct nvme_sqe) * entries,
-           &q->q_sq_dmamem);
-       if (error)
+       q->q_sq_dmamem = nvme_dmamem_alloc(sc,
+           sizeof(struct nvme_sqe) * entries);
+       if (q->q_sq_dmamem == NULL)
                goto free;
 
-       error = nvme_dmamem_alloc(sc, sizeof(struct nvme_cqe) * entries,
-           &q->q_cq_dmamem);
-       if (error)
+       q->q_cq_dmamem = nvme_dmamem_alloc(sc,
+           sizeof(struct nvme_cqe) * entries);
+       if (q->q_cq_dmamem == NULL)
                goto free_sq;
 
        memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
@@ -1413,9 +1405,9 @@
        return q;
 
 free_cq:
-       nvme_dmamem_free(sc, &q->q_cq_dmamem);
+       nvme_dmamem_free(sc, q->q_cq_dmamem);
 free_sq:
-       nvme_dmamem_free(sc, &q->q_sq_dmamem);
+       nvme_dmamem_free(sc, q->q_sq_dmamem);
 free:
        kmem_free(q, sizeof(*q));
 
@@ -1430,8 +1422,8 @@
        mutex_destroy(&q->q_cq_mtx);
        nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
        nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
-       nvme_dmamem_free(sc, &q->q_cq_dmamem);
-       nvme_dmamem_free(sc, &q->q_sq_dmamem);
+       nvme_dmamem_free(sc, q->q_cq_dmamem);
+       nvme_dmamem_free(sc, q->q_sq_dmamem);
        kmem_free(q, sizeof(*q));
 }
 
@@ -1499,12 +1491,16 @@
        nvme_q_complete(sc, q);
 }
 
-static int
-nvme_dmamem_alloc(struct nvme_softc *sc, size_t size, struct nvme_dmamem *ndm)
+static struct nvme_dmamem *
+nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
 {
+       struct nvme_dmamem *ndm;
        int nsegs;
 
-       memset(ndm, 0, sizeof(*ndm));
+       ndm = kmem_zalloc(sizeof(*ndm), KM_SLEEP);
+       if (ndm == NULL)
+               return NULL;
+
        ndm->ndm_size = size;
 
        if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
@@ -1524,7 +1520,7 @@
            NULL, BUS_DMA_WAITOK) != 0)
                goto unmap;
 
-       return 0;
+       return ndm;
 
 unmap:
        bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
@@ -1533,7 +1529,15 @@
 destroy:
        bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
 ndmfree:
-       return ENOMEM;
+       kmem_free(ndm, sizeof(*ndm));
+       return NULL;
+}
+
+static void
+nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
+{
+       bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
+           0, NVME_DMA_LEN(mem), ops);
 }
 
 void
@@ -1543,6 +1547,7 @@
        bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
        bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
        bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);



Home | Main Index | Thread Index | Old Index