Current-Users archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: nvme(4) related KASSERT ?



Hi,

Could you try the attached patch.

2017-02-13 2:21 GMT+09:00 Nicolas Joly <njoly%pasteur.fr@localhost>:

> On Sun, Feb 12, 2017 at 05:24:20PM +0100, Nicolas Joly wrote:
>>
>> NB: Attached full dmesg in the working case.
>
> Here it is ...
>
> --
> Nicolas Joly
>
> Cluster & Computing Group
> Biology IT Center
> Institut Pasteur, Paris.
diff --git a/sys/dev/ic/nvme.c b/sys/dev/ic/nvme.c
index 9c2e2363f3c..df1c07e8bb1 100644
--- a/sys/dev/ic/nvme.c
+++ b/sys/dev/ic/nvme.c
@@ -107,6 +107,8 @@ static void	nvme_pt_done(struct nvme_queue *, struct nvme_ccb *,
 static int	nvme_command_passthrough(struct nvme_softc *,
 		    struct nvme_pt_command *, uint16_t, struct lwp *, bool);
 
+static int	nvme_get_number_of_queues(struct nvme_softc *, u_int *);
+
 #define NVME_TIMO_QOP		5	/* queue create and delete timeout */
 #define NVME_TIMO_IDENT		10	/* probe identify timeout */
 #define NVME_TIMO_PT		-1	/* passthrough cmd timeout */
@@ -333,6 +335,7 @@ nvme_attach(struct nvme_softc *sc)
 	uint32_t reg;
 	u_int dstrd;
 	u_int mps = PAGE_SHIFT;
+	u_int ioq_allocated;
 	uint16_t adminq_entries = nvme_adminq_size;
 	uint16_t ioq_entries = nvme_ioq_size;
 	int i;
@@ -395,6 +398,17 @@ nvme_attach(struct nvme_softc *sc)
 	nvme_ccbs_free(sc->sc_admin_q);
 	nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
 
+	if (sc->sc_use_mq) {
+		/* Limit the number of queues to the number allocated in HW */
+		if (nvme_get_number_of_queues(sc, &ioq_allocated) != 0) {
+			aprint_error_dev(sc->sc_dev,
+			    "unable to get number of queues\n");
+			goto disable;
+		}
+		if (sc->sc_nq > ioq_allocated)
+			sc->sc_nq = ioq_allocated;
+	}
+
 	sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
 	if (sc->sc_q == NULL) {
 		aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
@@ -1270,6 +1284,42 @@ nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
 	htolem32(&sqe->cdw10, 1);
 }
 
+static int
+nvme_get_number_of_queues(struct nvme_softc *sc, u_int *nqap)
+{
+	struct nvme_pt_command pt;
+	struct nvme_ccb *ccb;
+	uint32_t nqa;
+	uint16_t ncqa, nsqa;
+	int rv;
+
+	ccb = nvme_ccb_get(sc->sc_admin_q);
+	KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
+
+	memset(&pt, 0, sizeof(pt));
+	pt.cmd.opcode = NVM_ADMIN_GET_FEATURES;
+	pt.cmd.cdw10 = 7 /*NVME_FEAT_NUMBER_OF_QUEUES*/;
+
+	ccb->ccb_done = nvme_pt_done;
+	ccb->ccb_cookie = &pt;
+
+	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_pt_fill, NVME_TIMO_QOP);
+
+	nvme_ccb_put(sc->sc_admin_q, ccb);
+
+	if (rv != 0) {
+		*nqap = 0;
+		return EIO;
+	}
+
+	nqa = le32toh(pt.cpl.cdw0);
+	ncqa = nqa >> 16;
+	nsqa = nqa & 0xffff;
+	*nqap = MIN(ncqa, nsqa) + 1;
+
+	return 0;
+}
+
 static int
 nvme_ccbs_alloc(struct nvme_queue *q, uint16_t nccbs)
 {


Home | Main Index | Thread Index | Old Index