NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: kern/58639: qemufwcfg(4), mount_qemufwcfg(8): writes are not supported



The attached patch implements some of the missing write support.

There's a small snag: the current ioctl(SET_INDEX) and read interface
don't really play nicely with file offsets, or pread(2).  It doesn't
really pose a problem for current low-volume use, but I think we
should introduce a new ioctl(SET_INDEX_SEEKABLE) or something to make
this cleaner -- if you use that, then the VM's data offset is tracked
by the file offset and seeks work and we can use the DMA interface for
reads too.

I stopped here because I discovered it's not actually useful for
testing changes to the VM generation ID -- you can't write to that
from the guest -- which was my motivation for looking at qemufwcfg in
the first place (PR 58632).
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1724594546 0
#      Sun Aug 25 14:02:26 2024 +0000
# Branch trunk
# Node ID 553cb2960ab36427d16ffda89f4c871a4eda1959
# Parent  cf7a8f9687ea781207542c43a006460dc134ea3b
# EXP-Topic riastradh-pr58639-qemufwcfgwrite
qemufwcfg(4): KNF and sprinkle needed includes.

No functional change intended.

PR kern/58639: qemufwcfg(4), mount_qemufwcfg(8): writes are not
supported

diff -r cf7a8f9687ea -r 553cb2960ab3 sys/dev/ic/qemufwcfg.c
--- a/sys/dev/ic/qemufwcfg.c	Sat Aug 24 07:24:34 2024 +0000
+++ b/sys/dev/ic/qemufwcfg.c	Sun Aug 25 14:02:26 2024 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: qemufwcfg.c,v 1.3 2024/04/06 13:42:18 skrll Exp $ */
+/*	$NetBSD: qemufwcfg.c,v 1.3 2024/04/06 13:42:18 skrll Exp $	*/
 
 /*-
  * Copyright (c) 2017 Jared McNeill <jmcneill%invisible.ca@localhost>
@@ -71,13 +71,16 @@ static dev_type_ioctl(fwcfg_ioctl);
 static void
 fwcfg_select(struct fwcfg_softc *sc, uint16_t index)
 {
-	bus_space_write_2(sc->sc_bst, sc->sc_bsh, FWCFG_SEL_REG, FWCFG_SEL_SWAP(index));
+
+	bus_space_write_2(sc->sc_bst, sc->sc_bsh, FWCFG_SEL_REG,
+	    FWCFG_SEL_SWAP(index));
 }
 
 static int
 fwcfg_open(dev_t dev, int flag, int mode, lwp_t *l)
 {
-	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd, FWCFGUNIT(dev));
+	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd,
+	    FWCFGUNIT(dev));
 	int error;
 
 	if (sc == NULL)
@@ -97,7 +100,8 @@ fwcfg_open(dev_t dev, int flag, int mode
 static int
 fwcfg_close(dev_t dev, int flag, int mode, lwp_t *l)
 {
-	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd, FWCFGUNIT(dev));
+	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd,
+	    FWCFGUNIT(dev));
 	int error;
 
 	if (sc == NULL)
@@ -117,7 +121,8 @@ fwcfg_close(dev_t dev, int flag, int mod
 static int
 fwcfg_read(dev_t dev, struct uio *uio, int flags)
 {
-	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd, FWCFGUNIT(dev));
+	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd,
+	    FWCFGUNIT(dev));
 	uint8_t buf[64];
 	size_t count;
 	int error = 0;
@@ -127,7 +132,8 @@ fwcfg_read(dev_t dev, struct uio *uio, i
 
 	while (uio->uio_resid > 0) {
 		count = uimin(sizeof(buf), uio->uio_resid);
-		bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG, buf, count);
+		bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG,
+		    buf, count);
 		error = uiomove(buf, count, uio);
 		if (error != 0)
 			break;
@@ -139,7 +145,8 @@ fwcfg_read(dev_t dev, struct uio *uio, i
 static int
 fwcfg_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
 {
-	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd, FWCFGUNIT(dev));
+	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd,
+	    FWCFGUNIT(dev));
 	uint16_t index;
 
 	if (sc == NULL)
@@ -179,6 +186,8 @@ fwcfg_attach(struct fwcfg_softc *sc)
 
 	/* Read signature */
 	fwcfg_select(sc, FW_CFG_SIGNATURE);
-	bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG, sig, sizeof(sig));
-	aprint_verbose_dev(sc->sc_dev, "<%c%c%c%c>\n", sig[0], sig[1], sig[2], sig[3]);
+	bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG,
+	    sig, sizeof(sig));
+	aprint_verbose_dev(sc->sc_dev, "<%c%c%c%c>\n",
+	    sig[0], sig[1], sig[2], sig[3]);
 }
diff -r cf7a8f9687ea -r 553cb2960ab3 sys/dev/ic/qemufwcfgvar.h
--- a/sys/dev/ic/qemufwcfgvar.h	Sat Aug 24 07:24:34 2024 +0000
+++ b/sys/dev/ic/qemufwcfgvar.h	Sun Aug 25 14:02:26 2024 +0000
@@ -29,6 +29,11 @@
 #ifndef _QEMUFWCFGVAR_H
 #define _QEMUFWCFGVAR_H
 
+#include <sys/bus.h>
+#include <sys/device_if.h>
+#include <sys/mutex.h>
+#include <sys/stdbool.h>
+
 struct fwcfg_softc {
 	device_t		sc_dev;
 	bus_space_tag_t		sc_bst;
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1724617724 0
#      Sun Aug 25 20:28:44 2024 +0000
# Branch trunk
# Node ID 2d2ce7a8cd570c3782acc4f08ea5ade90a2407ed
# Parent  553cb2960ab36427d16ffda89f4c871a4eda1959
# EXP-Topic riastradh-pr58639-qemufwcfgwrite
WIP: qemufwcfg(4), mount_qemufwcfg(8): Add write support.

PR kern/58639: qemufwcfg(4), mount_qemufwcfg(8): writes are not
supported

diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sbin/mount_qemufwcfg/fwcfg.c
--- a/sbin/mount_qemufwcfg/fwcfg.c	Sun Aug 25 14:02:26 2024 +0000
+++ b/sbin/mount_qemufwcfg/fwcfg.c	Sun Aug 25 20:28:44 2024 +0000
@@ -58,24 +58,17 @@ struct fwcfg_file {
 
 static int fwcfg_fd;
 static mode_t fwcfg_dir_mask = 0555;
-static mode_t fwcfg_file_mask = 0444;
+static mode_t fwcfg_file_mask = 0644;
 static uid_t fwcfg_uid;
 static gid_t fwcfg_gid;
 
 static virtdir_t fwcfg_virtdir;
 
-static void
+static int
 set_index(uint16_t index)
 {
-	if (ioctl(fwcfg_fd, FWCFGIO_SET_INDEX, &index) != 0)
-		err(EXIT_FAILURE, "failed to set index 0x%04x", index);
-}
 
-static void
-read_data(void *buf, size_t buflen)
-{
-	if (read(fwcfg_fd, buf, buflen) != (ssize_t)buflen)
-		err(EXIT_FAILURE, "failed to read data");
+	return ioctl(fwcfg_fd, FWCFGIO_SET_INDEX, &index);
 }
 
 static int
@@ -144,6 +137,7 @@ fwcfg_read(const char *path, char *buf, 
 {
 	virt_dirent_t *ep;
 	uint8_t tmp[64];
+	ssize_t nread;
 
 	if ((ep = virtdir_find(&fwcfg_virtdir, path, strlen(path))) == NULL)
 		return -ENOENT;
@@ -151,28 +145,65 @@ fwcfg_read(const char *path, char *buf, 
 	if (ep->select == 0)
 		return -ENOENT;
 
-	set_index(ep->select);
+	if (set_index(ep->select) == -1)
+		return -errno;
 
 	/* Seek to correct offset */
 	while (offset > 0) {
 		const size_t len = MIN(sizeof(tmp), (size_t)offset);
-		read_data(tmp, len);
-		offset -= (off_t)len;
+
+		nread = read(fwcfg_fd, tmp, len);
+		if (nread == -1)
+			return -errno;
+		if (nread == 0)
+			return 0;
+		offset -= (off_t)nread;
 	}
 
 	/* Read the data */
-	read_data(buf, size);
+	nread = read(fwcfg_fd, buf, MIN(size, INT_MAX));
+	if (nread == -1)
+		return -errno;
 
 	return (int)size;
 }
 
 static int
+fwcfg_write(const char *path, const char *buf, size_t size, off_t offset,
+    struct fuse_file_info *fi)
+{
+	virt_dirent_t *ep;
+	ssize_t nwrit;
+
+	if ((ep = virtdir_find(&fwcfg_virtdir, path, strlen(path))) == NULL)
+		return -ENOENT;
+
+	if (ep->select == 0)
+		return -ENOENT;
+
+	if (set_index(ep->select) == -1)
+		return -errno;
+
+	nwrit = pwrite(fwcfg_fd, buf, MIN(size, INT_MAX), offset);
+	if (nwrit == -1)
+		return -errno;
+
+	return (int)nwrit;
+}
+
+static int
 fwcfg_statfs(const char *path, struct statvfs *st)
 {
 	uint32_t count;
+	ssize_t nread;
 
-	set_index(FW_CFG_FILE_DIR);
-	read_data(&count, sizeof(count));
+	if (set_index(FW_CFG_FILE_DIR) == -1)
+		return -errno;
+	nread = read(fwcfg_fd, &count, sizeof(count));
+	if (nread == -1)
+		return -errno;
+	if (nread != sizeof(count))
+		return -EIO;
 
 	memset(st, 0, sizeof(*st));
 	st->f_files = be32toh(count);
@@ -185,6 +216,8 @@ static struct fuse_operations fwcfg_ops 
 	.readdir = fwcfg_readdir,
 	.open = fwcfg_open,
 	.read = fwcfg_read,
+	.write = fwcfg_write,
+	.truncate = fwcfg_truncate,
 	.statfs = fwcfg_statfs,
 };
 
@@ -195,16 +228,26 @@ build_tree(virtdir_t *v)
 	struct fwcfg_file f;
 	uint32_t count, n;
 	struct stat st;
+	ssize_t nread;
 
 	memset(&st, 0, sizeof(st));
 	st.st_uid = fwcfg_uid;
 	st.st_gid = fwcfg_gid;
 	virtdir_init(v, NULL, &st, &st, &st);
 
-	set_index(FW_CFG_FILE_DIR);
-	read_data(&count, sizeof(count));
+	if (set_index(FW_CFG_FILE_DIR) == -1)
+		err(EXIT_FAILURE, "failed to select file directory");
+	nread = read(fwcfg_fd, &count, sizeof(count));
+	if (nread == -1)
+		err(EXIT_FAILURE, "failed to enumerate file directory");
+	if ((size_t)nread != sizeof(count))
+		errx(EXIT_FAILURE, "truncated file directory");
 	for (n = 0; n < be32toh(count); n++) {
-		read_data(&f, sizeof(f));
+		nread = read(fwcfg_fd, &f, sizeof(f));
+		if (nread == -1)
+			err(EXIT_FAILURE, "failed to read file directory");
+		if ((size_t)nread != sizeof(f))
+			errx(EXIT_FAILURE, "truncated file directory");
 		snprintf(path, sizeof(path), "/%s", f.name);
 		virtdir_add(v, path, strlen(path), 'f', NULL,
 		    be32toh(f.size), be16toh(f.select));
diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sys/dev/acpi/qemufwcfg_acpi.c
--- a/sys/dev/acpi/qemufwcfg_acpi.c	Sun Aug 25 14:02:26 2024 +0000
+++ b/sys/dev/acpi/qemufwcfg_acpi.c	Sun Aug 25 20:28:44 2024 +0000
@@ -104,6 +104,9 @@ fwcfg_acpi_attach(device_t parent, devic
 		return;
 	}
 
+	sc->sc_dmat = BUS_DMA_TAG_VALID(aa->aa_dmat64) ? aa->aa_dmat64
+	    : aa->aa_dmat;
+
 	fwcfg_attach(sc);
 
 	pmf_device_register(self, NULL, NULL);
diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sys/dev/fdt/qemufwcfg_fdt.c
--- a/sys/dev/fdt/qemufwcfg_fdt.c	Sun Aug 25 14:02:26 2024 +0000
+++ b/sys/dev/fdt/qemufwcfg_fdt.c	Sun Aug 25 20:28:44 2024 +0000
@@ -78,6 +78,7 @@ fwcfg_fdt_attach(device_t parent, device
 
 	sc->sc_dev = self;
 	sc->sc_bst = faa->faa_bst;
+	sc->sc_dmat = faa->faa_dmat;
 
 	if (bus_space_map(sc->sc_bst, base, size, 0, &sc->sc_bsh) != 0) {
 		aprint_error_dev(self, "couldn't map registers\n");
diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sys/dev/ic/qemufwcfg.c
--- a/sys/dev/ic/qemufwcfg.c	Sun Aug 25 14:02:26 2024 +0000
+++ b/sys/dev/ic/qemufwcfg.c	Sun Aug 25 20:28:44 2024 +0000
@@ -64,6 +64,7 @@
 static dev_type_open(fwcfg_open);
 static dev_type_close(fwcfg_close);
 static dev_type_read(fwcfg_read);
+static dev_type_write(fwcfg_write);
 static dev_type_ioctl(fwcfg_ioctl);
 
 #define	FWCFGUNIT(d)	minor(d)
@@ -72,8 +73,97 @@ static void
 fwcfg_select(struct fwcfg_softc *sc, uint16_t index)
 {
 
+	mutex_enter(&sc->sc_lock);
 	bus_space_write_2(sc->sc_bst, sc->sc_bsh, FWCFG_SEL_REG,
 	    FWCFG_SEL_SWAP(index));
+	sc->sc_index = index;
+	sc->sc_dataoff = 0;
+	mutex_exit(&sc->sc_lock);
+}
+
+/*
+ * fwcfg_dma(sc)
+ *
+ *	Caller has initialized sc->sc_dmadesc with a DMA operation.
+ *	Trigger the DMA operation and wait for it to complete, fail, or
+ *	time out.
+ *
+ *	Currently qemu does these DMA operations synchronously, so
+ *	timeout should never happen, but the spec allows the host to do
+ *	asynchronous operations in the future.
+ */
+static int
+fwcfg_dma(struct fwcfg_softc *sc)
+{
+	uint32_t control;
+	unsigned timo = 1000;	/* up to 1ms */
+
+	/*
+	 * Caller initialized a DMA descriptor, so make sure the device
+	 * will see it before we trigger DMA (BUS_DMASYNC_PREWRITE),
+	 * and make sure we are ready to read what the device put there
+	 * when DMA completes (BUS_DMASYNC_PREREAD).
+	 */
+	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmadescmap,
+	    0, sizeof(*sc->sc_dmadesc),
+	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+
+	/*
+	 * Trigger DMA by writing the DMA descriptor's bus address to
+	 * the DMA address register, in big-endian -- high 32 bits
+	 * first, low 32 bits next.
+	 */
+	bus_space_write_4(sc->sc_bst, sc->sc_bsh, FWCFG_DMA_ADDR,
+	    htobe32(BUS_ADDR_HI32(sc->sc_dmadescmap->dm_segs[0].ds_addr)));
+	bus_space_write_4(sc->sc_bst, sc->sc_bsh, FWCFG_DMA_ADDR + 4,
+	    htobe32(BUS_ADDR_LO32(sc->sc_dmadescmap->dm_segs[0].ds_addr)));
+
+	/*
+	 * We have finished writing the descriptor to the device
+	 * (BUS_DMASYNC_POSTWRITE); make sure we are ready to read what
+	 * the device put there now that DMA has complteed
+	 * (BUS_DMASYNC_POSTREAD).
+	 */
+	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmadescmap,
+	    0, sizeof(*sc->sc_dmadesc),
+	    BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
+
+	/*
+	 * Wait until one of:
+	 *
+	 * (a) The transfer has successfully completed, zeroing the
+	 *     control register.
+	 *
+	 * (b) Something went wrong, and the device set the ERROR bit.
+	 *
+	 * (c) We have timed out and given up.
+	 */
+	while ((control = atomic_load_relaxed(&sc->sc_dmadesc->qd_control))
+	    != 0) {
+		if (control & FWCFG_DMA_CONTROL_ERROR)
+			return EIO;
+		if (--timo == 0)
+			return ETIMEDOUT;
+
+		/*
+		 * Pause for 1us before trying again.
+		 */
+		DELAY(1);
+
+		/*
+		 * If anything changed, make sure we observe updates
+		 * from the device (BUS_DMASYNC_POSTREAD).
+		 */
+		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmadescmap,
+		    offsetof(struct fwcfg_dma, qd_control),
+		    sizeof(sc->sc_dmadesc->qd_control),
+		    BUS_DMASYNC_POSTREAD);
+	}
+
+	/*
+	 * Success!
+	 */
+	return 0;
 }
 
 static int
@@ -130,15 +220,150 @@ fwcfg_read(dev_t dev, struct uio *uio, i
 	if (sc == NULL)
 		return ENXIO;
 
+	/*
+	 * Take the lock to serialize access to the device's data
+	 * offset.
+	 */
+	mutex_enter(&sc->sc_lock);
+
+	/*
+	 * Reject transfers that aren't at the device's current data
+	 * offset.
+	 *
+	 * XXX Do these with the DMA interface instead, if supported.
+	 *
+	 * XXX OOPS -- This breaks userland compat.
+	 */
+	if (0 && uio->uio_offset != sc->sc_dataoff) {
+		error = ENODEV;
+		goto out;
+	}
+
+	/*
+	 * Grab chunks of up to 64 bytes at a time by reading bytes
+	 * sequentially from the data register, and uiomove them out to
+	 * the user.
+	 */
 	while (uio->uio_resid > 0) {
 		count = uimin(sizeof(buf), uio->uio_resid);
 		bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG,
 		    buf, count);
+		sc->sc_dataoff += count;
 		error = uiomove(buf, count, uio);
 		if (error != 0)
 			break;
 	}
 
+out:	mutex_exit(&sc->sc_lock);
+	return error;
+}
+
+static int
+fwcfg_write(dev_t dev, struct uio *uio, int flags)
+{
+	struct fwcfg_softc * const sc = device_lookup_private(&qemufwcfg_cd,
+	    FWCFGUNIT(dev));
+	bool loaded = false;
+	size_t xfersz = 0;
+	int error;
+
+	if (sc == NULL)
+		return ENXIO;
+
+	/*
+	 * Reject writes to hosts that don't support the DMA interface.
+	 *
+	 * Extremely old versions of qemu support writes through the
+	 * traditional interface too.  If anyone still cares about qemu
+	 * v2.4, we can consider doing that.
+	 */
+	if ((sc->sc_id & FW_CFG_ID_DMA_IF) == 0)
+		return ENODEV;
+
+	/*
+	 * Reject writes to negative space or to bytes beyond the
+	 * 32-bit address space, since we can't seek past there and
+	 * presumably qemu doesn't hold any data beyond.
+	 */
+	if (uio->uio_offset < 0 ||
+	    uio->uio_offset >= UINT32_MAX)
+		return EIO;
+
+	/*
+	 * Take the lock to serialize access to the device's data
+	 * offset and DMA.
+	 */
+	mutex_enter(&sc->sc_lock);
+
+	/*
+	 * If the transfer isn't to the device's current data offset,
+	 * reset to the start by re-selecting the index, and skip bytes
+	 * up to the transfer's offset.
+	 */
+	if (uio->uio_offset != sc->sc_dataoff) {
+		memset(sc->sc_dmadesc, 0, sizeof(*sc->sc_dmadesc));
+		sc->sc_dmadesc->qd_control = FWCFG_DMA_CONTROL_SKIP |
+		    FWCFG_DMA_CONTROL_SELECT |
+		    __SHIFTIN(sc->sc_index, FWCFG_DMA_CONTROL_INDEX);
+		sc->sc_dmadesc->qd_length = (uint32_t)uio->uio_offset;
+		sc->sc_dmadesc->qd_address = 0;
+		error = fwcfg_dma(sc);
+		if (error)
+			goto out;
+	}
+
+	/*
+	 * Load and sync the DMA map for write transfer from the user's
+	 * buffer.
+	 */
+	error = bus_dmamap_load_uio(sc->sc_dmat, sc->sc_dmadatamap, uio,
+	    BUS_DMA_NOWAIT);
+	if (error)
+		goto out;
+	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmadatamap,
+	    0, sc->sc_dmadatamap->dm_mapsize,
+	    BUS_DMASYNC_PREWRITE);
+	loaded = true;
+
+	/*
+	 * Determine the number of bytes we can actually transfer:
+	 *
+	 * 1. Limit it to the residual size of the transfer, uio_resid.
+	 * 2. Limit it to the size of the DMA map.  3. Limit it to end
+	 * at an offset of 2^32 - 1, since we can't seek past there and
+	 * presumably qemu doesn't hold any data beyond.
+	 */
+	xfersz = MIN(MIN(uio->uio_resid, sc->sc_dmadatamap->dm_mapsize),
+	    UINT32_MAX - uio->uio_offset);
+
+	/*
+	 * Initialize a DMA operation for write transfer from the
+	 * user's data buffer.
+	 */
+	memset(sc->sc_dmadesc, 0, sizeof(*sc->sc_dmadesc));
+	sc->sc_dmadesc->qd_control = FWCFG_DMA_CONTROL_WRITE;
+	sc->sc_dmadesc->qd_length = xfersz;
+	sc->sc_dmadesc->qd_address = sc->sc_dmadatamap->dm_segs[0].ds_addr;
+	error = fwcfg_dma(sc);
+	if (error)
+		goto out;
+
+	/*
+	 * Success!  Advance the transfer and the device's data offset.
+	 */
+	uio->uio_resid -= xfersz;
+	uio->uio_offset += xfersz;
+	sc->sc_dataoff += xfersz;
+	error = 0;
+
+out:	if (loaded) {
+		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmadatamap,
+		    0, sc->sc_dmadatamap->dm_mapsize,
+		    BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(sc->sc_dmat, sc->sc_dmadatamap);
+	}
+
+	mutex_exit(&sc->sc_lock);
 	return error;
 }
 
@@ -166,7 +391,7 @@ const struct cdevsw qemufwcfg_cdevsw = {
 	.d_open = fwcfg_open,
 	.d_close = fwcfg_close,
 	.d_read = fwcfg_read,
-	.d_write = nowrite,
+	.d_write = fwcfg_write,
 	.d_ioctl = fwcfg_ioctl,
 	.d_stop = nostop,
 	.d_tty = notty,
@@ -181,13 +406,162 @@ void
 fwcfg_attach(struct fwcfg_softc *sc)
 {
 	char sig[4];
+	uint8_t id_le[4];
+	char idstr[128];
+	int error;
 
 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
 
-	/* Read signature */
+	/*
+	 * Read signature and id.  Print the signature and the
+	 * supported features.
+	 */
 	fwcfg_select(sc, FW_CFG_SIGNATURE);
 	bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG,
 	    sig, sizeof(sig));
-	aprint_verbose_dev(sc->sc_dev, "<%c%c%c%c>\n",
-	    sig[0], sig[1], sig[2], sig[3]);
+	fwcfg_select(sc, FW_CFG_ID);
+	bus_space_read_multi_1(sc->sc_bst, sc->sc_bsh, FWCFG_DATA_REG,
+	    id_le, sizeof(id_le));
+	sc->sc_id = le32dec(id_le);
+	snprintb(idstr, sizeof(idstr), FW_CFG_ID_FMT, sc->sc_id);
+	aprint_verbose_dev(sc->sc_dev, "<%c%c%c%c> features=%s\n",
+	    sig[0], sig[1], sig[2], sig[3], idstr);
+
+	/*
+	 * If the traditional interface is not supported, report an
+	 * error.
+	 */
+	if ((sc->sc_id & FW_CFG_ID_TRAD_IF) == 0) {
+		aprint_error_dev(sc->sc_dev,
+		    "traditional interface not supported\n");
+	}
+
+	/*
+	 * If DMA is supported, create a DMA descriptor and DMA maps.
+	 */
+	if (sc->sc_id & FW_CFG_ID_DMA_IF) do {
+		uint64_t dmasig;
+		int rseg = 0;
+		void *kva = NULL;
+		bool loaded = false;
+
+		/*
+		 * Verify DMA is supported: DMA address register should
+		 * read out 0x51454d5520434647, or `QEMU CFG' in
+		 * big-endian.
+		 */
+		dmasig = be32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+			FWCFG_DMA_ADDR + 4));
+		dmasig |= (uint64_t)be32toh(bus_space_read_4(sc->sc_bst,
+			sc->sc_bsh, FWCFG_DMA_ADDR)) << 32;
+		if (dmasig != 0x51454d5520434647) {
+			aprint_error_dev(sc->sc_dev, "bad DMA signature:"
+			    " 0x%016"PRIx64"\n",
+			    dmasig);
+			goto nodma;
+		}
+
+		/*
+		 * Allocate memory for the DMA descriptor.
+		 */
+		error = bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dmadesc),
+		    /*align*/_Alignof(*sc->sc_dmadesc), /*boundary*/0,
+		    &sc->sc_dmadescseg, /*nseg*/1, &rseg,
+		    BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK);
+		if (error) {
+			aprint_error_dev(sc->sc_dev, "bus_dmamem_alloc: %d\n",
+			    error);
+			goto nodma;
+		}
+		KASSERTMSG(rseg == 1, "rseg=%d", rseg);
+
+		/*
+		 * Map the DMA descriptor memory into KVA.
+		 */
+		error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmadescseg, 1,
+		    sizeof(*sc->sc_dmadesc), &kva,
+		    BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK);
+		if (error) {
+			aprint_error_dev(sc->sc_dev, "bus_dmamem_map: %d\n",
+			    error);
+			goto nodma;
+		}
+		sc->sc_dmadesc = kva;
+
+		/*
+		 * Create a DMA map for the DMA descriptor so we can
+		 * punch its bus address into the DMA address register.
+		 */
+		error = bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dmadesc),
+		    /*nseg*/1, /*maxsegsz*/sizeof(*sc->sc_dmadesc),
+		    /*boundary*/0,
+		    BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK,
+		    &sc->sc_dmadescmap);
+		if (error) {
+			aprint_error_dev(sc->sc_dev, "bus_dmamap_create(desc):"
+			    " %d\n",
+			    error);
+			goto nodma;
+		}
+
+		/*
+		 * Load the DMA descriptor into the DMA map so we can
+		 * get the bus address to punch into the DMA address
+		 * register.
+		 */
+		error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmadescmap,
+		    sc->sc_dmadesc, sizeof(*sc->sc_dmadesc), NULL,
+		    BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK);
+		if (error) {
+			aprint_error_dev(sc->sc_dev, "bus_dmamap_load: %d\n",
+			    error);
+			goto nodma;
+		}
+		loaded = true;
+
+		/*
+		 * Create a DMA map for a transfer of up to one page in
+		 * a single segment.
+		 */
+		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
+		    /*nseg*/1, /*maxsegsz*/PAGE_SIZE,
+		    /*boundary*/PAGE_SIZE,
+		    BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK,
+		    &sc->sc_dmadatamap);
+		if (error) {
+			aprint_error_dev(sc->sc_dev, "bus_dmamap_create(data):"
+			    " %d\n",
+			    error);
+			goto nodma;
+		}
+
+		/*
+		 * Success!
+		 */
+		break;
+
+nodma:		aprint_error_dev(sc->sc_dev, "disabling DMA\n");
+		sc->sc_id &= FW_CFG_ID_DMA_IF;
+		if (sc->sc_dmadatamap) {
+			bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmadatamap);
+			sc->sc_dmadatamap = NULL;
+		}
+		if (loaded) {
+			bus_dmamap_unload(sc->sc_dmat, sc->sc_dmadescmap);
+			loaded = false;
+		}
+		if (sc->sc_dmadescmap) {
+			bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmadescmap);
+			sc->sc_dmadescmap = NULL;
+		}
+		if (sc->sc_dmadesc) {
+			bus_dmamem_unmap(sc->sc_dmat, sc->sc_dmadesc,
+			    sizeof(*sc->sc_dmadesc));
+			sc->sc_dmadesc = NULL;
+		}
+		if (rseg) {
+			bus_dmamem_free(sc->sc_dmat, &sc->sc_dmadescseg, 1);
+			rseg = 0;
+		}
+	} while (0);
 }
diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sys/dev/ic/qemufwcfgio.h
--- a/sys/dev/ic/qemufwcfgio.h	Sun Aug 25 14:02:26 2024 +0000
+++ b/sys/dev/ic/qemufwcfgio.h	Sun Aug 25 20:28:44 2024 +0000
@@ -34,6 +34,12 @@
 /* Fixed selector keys */
 #define	FW_CFG_SIGNATURE	0x0000	/* Signature */
 #define	FW_CFG_ID		0x0001	/* Revision / feature bitmap */
+#define	 FW_CFG_ID_TRAD_IF		__BIT(0) /* Traditional interface */
+#define	 FW_CFG_ID_DMA_IF		__BIT(1) /* DMA interface */
+#define	 FW_CFG_ID_FMT		"\177\020"				      \
+	"b\000"		"TRAD_IF\0"					      \
+	"b\001"		"DMA_IF\0"					      \
+	"\0"
 #define	FW_CFG_FILE_DIR		0x0019	/* File directory */
 #define	FW_CFG_FILE_FIRST	0x0020	/* First file in directory */
 
diff -r 553cb2960ab3 -r 2d2ce7a8cd57 sys/dev/ic/qemufwcfgvar.h
--- a/sys/dev/ic/qemufwcfgvar.h	Sun Aug 25 14:02:26 2024 +0000
+++ b/sys/dev/ic/qemufwcfgvar.h	Sun Aug 25 20:28:44 2024 +0000
@@ -29,18 +29,42 @@
 #ifndef _QEMUFWCFGVAR_H
 #define _QEMUFWCFGVAR_H
 
+#include <sys/types.h>
+
 #include <sys/bus.h>
+#include <sys/cdefs.h>
 #include <sys/device_if.h>
 #include <sys/mutex.h>
-#include <sys/stdbool.h>
+
+struct fwcfg_dma {		/* FWCfgDmaAccess */
+	uint32_t	qd_control;
+#define	FWCFG_DMA_CONTROL_ERROR		__BIT(0)
+#define	FWCFG_DMA_CONTROL_READ		__BIT(1)
+#define	FWCFG_DMA_CONTROL_SKIP		__BIT(2)
+#define	FWCFG_DMA_CONTROL_SELECT	__BIT(3)
+#define	FWCFG_DMA_CONTROL_WRITE		__BIT(4)
+#define	FWCFG_DMA_CONTROL_INDEX		__BITS(16,31)
+	uint32_t	qd_length;
+	uint64_t	qd_address;
+};
 
 struct fwcfg_softc {
 	device_t		sc_dev;
 	bus_space_tag_t		sc_bst;
 	bus_space_handle_t	sc_bsh;
+	bus_dma_tag_t		sc_dmat;
+
+	uint32_t		sc_id;
 
 	kmutex_t		sc_lock;
 	bool			sc_open;
+	uint16_t		sc_index;
+	off_t			sc_dataoff;
+
+	bus_dmamap_t		sc_dmadescmap;
+	bus_dma_segment_t	sc_dmadescseg;
+	struct fwcfg_dma	*sc_dmadesc;
+	bus_dmamap_t		sc_dmadatamap;
 };
 
 void	fwcfg_attach(struct fwcfg_softc *);


Home | Main Index | Thread Index | Old Index