Subject: ugen change for review
To: None <tech-kern@netbsd.org>
From: Joanne M Mikkelson <jmmikkel@bbn.com>
List: tech-kern
Date: 07/19/2006 20:58:39
--2754.Wed.Jul.19.20:47:34.EDT.2006
Content-type: text/plain
Content-Transfer-Encoding: 7bit

Hi,

I've attached a patch for review that implements an addition to ugen,
providing improved bulk USB transfer rates when enabled.  This is the
work that was proposed earlier to enable the USRP to work better.
(See http://mail-index.netbsd.org/tech-kern/2006/05/03/0008.html for
the proposal.)  If the changes are okay, Greg Troxel will commit them.

Thanks in advance!

Joanne


--2754.Wed.Jul.19.20:47:34.EDT.2006
Content-type: text/plain;
 name="ugen.diff"
Content-Disposition: inline;
 filename="ugen.diff"
Content-Transfer-Encoding: 7bit

Index: sys/dev/usb/usb.h
===================================================================
RCS file: /cvs/netbsd/netbsd/src/sys/dev/usb/usb.h,v
retrieving revision 1.1.1.1
retrieving revision 1.3
diff -d -r1.1.1.1 -r1.3
632a633,637
> struct usb_bulk_ra_wb_opt {
> 	int	ra_wb_buffer_size;
> 	int	ra_wb_request_size;
> };
> 
686a692,695
> #define USB_SET_BULK_RA		_IOW ('U', 115, int)
> #define USB_SET_BULK_WB		_IOW ('U', 116, int)
> #define USB_SET_BULK_RA_OPT	_IOW ('U', 117, struct usb_bulk_ra_wb_opt)
> #define USB_SET_BULK_WB_OPT	_IOW ('U', 118, struct usb_bulk_ra_wb_opt)
Index: sys/dev/usb/ugen.c
===================================================================
RCS file: /cvs/netbsd/netbsd/src/sys/dev/usb/ugen.c,v
retrieving revision 1.1.1.2
retrieving revision 1.11
diff -d -r1.1.1.2 -r1.11
10a11,15
>  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
>  * Effort sponsored in part by the Defense Advanced Research Projects
>  * Agency (DARPA) and the Department of the Interior National Business
>  * Center under agreement number NBCHC050166.
>  *
43a49,50
> #include "opt_ugen_bulk_ra_wb.h"
> 
87a95,98
> #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
> #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
> 
> 
94a106,108
> #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
> #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
> #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
102a117,123
> #ifdef UGEN_BULK_RA_WB
> 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
> 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
> 	u_int32_t ra_wb_used;	 /* how much is in buffer */
> 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
> 	usbd_xfer_handle ra_wb_xfer;
> #endif
171a193,198
> #ifdef UGEN_BULK_RA_WB
> Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
> 			     usbd_status status);
> Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
> 			     usbd_status status);
> #endif
398a426,433
> #ifdef UGEN_BULK_RA_WB
> 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
> 			/* 
> 			 * Use request size for non-RA/WB transfers
> 			 * as the default.
> 			 */
> 			sce->ra_wb_reqsize = UGEN_BBSIZE;
> #endif
502a538,545
> 			break;
> #ifdef UGEN_BULK_RA_WB
> 		case UE_BULK:
> 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
> 				/* ibuf freed below */
> 				usbd_free_xfer(sce->ra_wb_xfer);
> 			break;
> #endif
586a630,703
> #ifdef UGEN_BULK_RA_WB
> 		if (sce->state & UGEN_BULK_RA) {
> 			DPRINTFN(5, ("ugenread: BULK_RA req: %d used: %d\n",
> 				     uio->uio_resid, sce->ra_wb_used));
> 			xfer = sce->ra_wb_xfer;
> 
> 			s = splusb();
> 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
> 				splx(s);
> 				return (EWOULDBLOCK);
> 			}
> 			while (uio->uio_resid > 0 && !error) {
> 				while (sce->ra_wb_used == 0) {
> 					sce->state |= UGEN_ASLP;
> 					DPRINTFN(5,
> 						 ("ugenread: sleep on %p\n",
> 						  sce));
> 					error = tsleep(sce, PZERO | PCATCH,
> 						       "ugenrb", 0);
> 					DPRINTFN(5,
> 						 ("ugenread: woke, error=%d\n",
> 						  error));
> 					if (sc->sc_dying)
> 						error = EIO;
> 					if (error) {
> 						sce->state &= ~UGEN_ASLP;
> 						break;
> 					}
> 				}
> 
> 				/* Copy data to the process. */
> 				while (uio->uio_resid > 0
> 				       && sce->ra_wb_used > 0) {
> 					n = min(uio->uio_resid,
> 						sce->ra_wb_used);
> 					n = min(n, sce->limit - sce->cur);
> 					error = uiomove(sce->cur, n, uio);
> 					if (error)
> 						break;
> 					sce->cur += n;
> 					sce->ra_wb_used -= n;
> 					if (sce->cur == sce->limit)
> 						sce->cur = sce->ibuf;
> 				}
> 
> 				/* 
> 				 * If the transfers stopped because the
> 				 * buffer was full, restart them.
> 				 */
> 				if (sce->state & UGEN_RA_WB_STOP &&
> 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
> 					n = (sce->limit - sce->ibuf)
> 					    - sce->ra_wb_used;
> 					usbd_setup_xfer(xfer,
> 					    sce->pipeh, sce, NULL,
> 					    min(n, sce->ra_wb_xferlen),
> 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
> 					    ugen_bulkra_intr);
> 					sce->state &= ~UGEN_RA_WB_STOP;
> 					err = usbd_transfer(xfer);
> 					if (err != USBD_IN_PROGRESS)
> 						/*
> 						 * The transfer has not been
> 						 * queued.  Setting STOP
> 						 * will make us try
> 						 * again at the next read.
> 						 */
> 						sce->state |= UGEN_RA_WB_STOP;
> 				}
> 			}
> 			splx(s);
> 			break;
> 		}
> #endif
680a798,802
> #ifdef UGEN_BULK_RA_WB
> 	int s;
> 	u_int32_t tn;
> 	char *dbuf;
> #endif
704a827,909
> #ifdef UGEN_BULK_RA_WB
> 		if (sce->state & UGEN_BULK_WB) {
> 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %d used: %d\n",
> 				     uio->uio_resid, sce->ra_wb_used));
> 			xfer = sce->ra_wb_xfer;
> 
> 			s = splusb();
> 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
> 			    flag & IO_NDELAY) {
> 				splx(s);
> 				return (EWOULDBLOCK);
> 			}
> 			while (uio->uio_resid > 0 && !error) {
> 				while (sce->ra_wb_used == 
> 				       sce->limit - sce->ibuf) {
> 					sce->state |= UGEN_ASLP;
> 					DPRINTFN(5,
> 						 ("ugenwrite: sleep on %p\n",
> 						  sce));
> 					error = tsleep(sce, PZERO | PCATCH,
> 						       "ugenwb", 0);
> 					DPRINTFN(5,
> 						 ("ugenwrite: woke, error=%d\n",
> 						  error));
> 					if (sc->sc_dying)
> 						error = EIO;
> 					if (error) {
> 						sce->state &= ~UGEN_ASLP;
> 						break;
> 					}
> 				}
> 
> 				/* Copy data from the process. */
> 				while (uio->uio_resid > 0 &&
> 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
> 					n = min(uio->uio_resid,
> 						(sce->limit - sce->ibuf)
> 						 - sce->ra_wb_used);
> 					n = min(n, sce->limit - sce->fill);
> 					error = uiomove(sce->fill, n, uio);
> 					if (error)
> 						break;
> 					sce->fill += n;
> 					sce->ra_wb_used += n;
> 					if (sce->fill == sce->limit)
> 						sce->fill = sce->ibuf;
> 				}
> 
> 				/*
> 				 * If the transfers stopped because the
> 				 * buffer was empty, restart them.
> 				 */
> 				if (sce->state & UGEN_RA_WB_STOP &&
> 				    sce->ra_wb_used > 0) {
> 					dbuf = (char *)usbd_get_buffer(xfer);
> 					n = min(sce->ra_wb_used,
> 						sce->ra_wb_xferlen);
> 					tn = min(n, sce->limit - sce->cur);
> 					memcpy(dbuf, sce->cur, tn);
> 					dbuf += tn;
> 					if (n - tn > 0)
> 						memcpy(dbuf, sce->ibuf,
> 						       n - tn);
> 					usbd_setup_xfer(xfer,
> 					    sce->pipeh, sce, NULL, n,
> 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
> 					    ugen_bulkwb_intr);
> 					sce->state &= ~UGEN_RA_WB_STOP;
> 					err = usbd_transfer(xfer);
> 					if (err != USBD_IN_PROGRESS)
> 						/*
> 						 * The transfer has not been
> 						 * queued.  Setting STOP
> 						 * will make us try again
> 						 * at the next read.
> 						 */
> 						sce->state |= UGEN_RA_WB_STOP;
> 				}
> 			}
> 			splx(s);
> 			break;
> 		}
> #endif
942a1148,1281
> #ifdef UGEN_BULK_RA_WB
> Static void
> ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
> 		 usbd_status status)
> {
> 	struct ugen_endpoint *sce = addr;
> 	u_int32_t count, n;
> 	char const *tbuf;
> 	usbd_status err;
> 
> 	/* Return if we are aborting. */
> 	if (status == USBD_CANCELLED)
> 		return;
> 
> 	if (status != USBD_NORMAL_COMPLETION) {
> 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
> 		sce->state |= UGEN_RA_WB_STOP;
> 		if (status == USBD_STALLED)
> 		    usbd_clear_endpoint_stall_async(sce->pipeh);
> 		return;
> 	}
> 
> 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
> 
> 	/* Keep track of how much is in the buffer. */
> 	sce->ra_wb_used += count;
> 
> 	/* Copy data to buffer. */
> 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
> 	n = min(count, sce->limit - sce->fill);
> 	memcpy(sce->fill, tbuf, n);
> 	tbuf += n;
> 	count -= n;
> 	sce->fill += n;
> 	if (sce->fill == sce->limit)
> 		sce->fill = sce->ibuf;
> 	if (count > 0) {
> 		memcpy(sce->fill, tbuf, count);
> 		sce->fill += count;
> 	}
> 
> 	/* Set up the next request if necessary. */
> 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
> 	if (n > 0) {
> 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
> 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
> 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
> 		err = usbd_transfer(xfer);
> 		if (err != USBD_IN_PROGRESS) {
> 			printf("usbd_bulkra_intr: error=%d\n", err);
> 			/*
> 			 * The transfer has not been queued.  Setting STOP
> 			 * will make us try again at the next read.
> 			 */
> 			sce->state |= UGEN_RA_WB_STOP;
> 		}
> 	}
> 	else
> 		sce->state |= UGEN_RA_WB_STOP;
> 
> 	if (sce->state & UGEN_ASLP) {
> 		sce->state &= ~UGEN_ASLP;
> 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
> 		wakeup(sce);
> 	}
> 	selnotify(&sce->rsel, 0);
> }
> 
> Static void
> ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
> 		 usbd_status status)
> {
> 	struct ugen_endpoint *sce = addr;
> 	u_int32_t count, n;
> 	char *tbuf;
> 	usbd_status err;
> 
> 	/* Return if we are aborting. */
> 	if (status == USBD_CANCELLED)
> 		return;
> 
> 	if (status != USBD_NORMAL_COMPLETION) {
> 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
> 		sce->state |= UGEN_RA_WB_STOP;
> 		if (status == USBD_STALLED)
> 		    usbd_clear_endpoint_stall_async(sce->pipeh);
> 		return;
> 	}
> 
> 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
> 
> 	/* Keep track of how much is in the buffer. */
> 	sce->ra_wb_used -= count;
> 
> 	/* Update buffer pointers. */
> 	sce->cur += count;
> 	if (sce->cur >= sce->limit)
> 		sce->cur = sce->ibuf + (sce->cur - sce->limit); 
> 
> 	/* Set up next request if necessary. */
> 	if (sce->ra_wb_used > 0) {
> 		/* copy data from buffer */
> 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
> 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
> 		n = min(count, sce->limit - sce->cur);
> 		memcpy(tbuf, sce->cur, n);
> 		tbuf += n;
> 		if (count - n > 0)
> 			memcpy(tbuf, sce->ibuf, count - n);
> 
> 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
> 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
> 		err = usbd_transfer(xfer);
> 		if (err != USBD_IN_PROGRESS) {
> 			printf("usbd_bulkwb_intr: error=%d\n", err);
> 			/*
> 			 * The transfer has not been queued.  Setting STOP
> 			 * will make us try again at the next write.
> 			 */
> 			sce->state |= UGEN_RA_WB_STOP;
> 		}
> 	}
> 	else
> 		sce->state |= UGEN_RA_WB_STOP;
> 
> 	if (sce->state & UGEN_ASLP) {
> 		sce->state &= ~UGEN_ASLP;
> 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
> 		wakeup(sce);
> 	}
> 	selnotify(&sce->rsel, 0);
> }
> #endif
> 
1091a1431,1593
> 	case USB_SET_BULK_RA:
> #ifdef UGEN_BULK_RA_WB
> 		if (endpt == USB_CONTROL_ENDPOINT)
> 			return (EINVAL);
> 		sce = &sc->sc_endpoints[endpt][IN];
> 		if (sce == NULL || sce->pipeh == NULL)
> 			return (EINVAL);
> 		edesc = sce->edesc;
> 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
> 			return (EINVAL);
> 
> 		if (*(int *)addr) {
> 			/* Only turn RA on if it's currently off. */
> 			if (sce->state & UGEN_BULK_RA)
> 				return (0);
> 
> 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
> 				/* shouldn't happen */
> 				return (EINVAL);
> 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
> 			if (sce->ra_wb_xfer == NULL)
> 				return (ENOMEM);
> 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
> 			/*
> 			 * Set up a dmabuf because we reuse the xfer with
> 			 * the same (max) request length like isoc.
> 			 */
> 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
> 					      sce->ra_wb_xferlen) == 0) {
> 				usbd_free_xfer(sce->ra_wb_xfer);
> 				return (ENOMEM);
> 			}
> 			sce->ibuf = malloc(sce->ra_wb_bufsize,
> 					   M_USBDEV, M_WAITOK);
> 			sce->fill = sce->cur = sce->ibuf;
> 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
> 			sce->ra_wb_used = 0;
> 			sce->state |= UGEN_BULK_RA;
> 			sce->state &= ~UGEN_RA_WB_STOP;
> 			/* Now start reading. */
> 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
> 			    NULL,
> 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
> 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
> 			    ugen_bulkra_intr);
> 			err = usbd_transfer(sce->ra_wb_xfer);
> 			if (err != USBD_IN_PROGRESS) {
> 				sce->state &= ~UGEN_BULK_RA;
> 				free(sce->ibuf, M_USBDEV);
> 				sce->ibuf = NULL;
> 				usbd_free_xfer(sce->ra_wb_xfer);
> 				return (EIO);
> 			}
> 		} else {
> 			/* Only turn RA off if it's currently on. */
> 			if (!(sce->state & UGEN_BULK_RA))
> 				return (0);
> 
> 			sce->state &= ~UGEN_BULK_RA;
> 			usbd_abort_pipe(sce->pipeh);
> 			usbd_free_xfer(sce->ra_wb_xfer);
> 			/*
> 			 * XXX Discard whatever's in the buffer, but we
> 			 * should keep it around and drain the buffer
> 			 * instead.
> 			 */
> 			free(sce->ibuf, M_USBDEV);
> 			sce->ibuf = NULL;
> 		}
> 		return (0);
> #else
> 		return (EOPNOTSUPP);
> #endif
> 	case USB_SET_BULK_WB:
> #ifdef UGEN_BULK_RA_WB
> 		if (endpt == USB_CONTROL_ENDPOINT)
> 			return (EINVAL);
> 		sce = &sc->sc_endpoints[endpt][OUT];
> 		if (sce == NULL || sce->pipeh == NULL)
> 			return (EINVAL);
> 		edesc = sce->edesc;
> 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
> 			return (EINVAL);
> 
> 		if (*(int *)addr) {
> 			/* Only turn WB on if it's currently off. */
> 			if (sce->state & UGEN_BULK_WB)
> 				return (0);
> 
> 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
> 				/* shouldn't happen */
> 				return (EINVAL);
> 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
> 			if (sce->ra_wb_xfer == NULL)
> 				return (ENOMEM);
> 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
> 			/*
> 			 * Set up a dmabuf because we reuse the xfer with
> 			 * the same (max) request length like isoc.
> 			 */
> 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
> 					      sce->ra_wb_xferlen) == 0) {
> 				usbd_free_xfer(sce->ra_wb_xfer);
> 				return (ENOMEM);
> 			}
> 			sce->ibuf = malloc(sce->ra_wb_bufsize,
> 					   M_USBDEV, M_WAITOK);
> 			sce->fill = sce->cur = sce->ibuf;
> 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
> 			sce->ra_wb_used = 0;
> 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
> 		} else {
> 			/* Only turn WB off if it's currently on. */
> 			if (!(sce->state & UGEN_BULK_WB))
> 				return (0);
> 
> 			sce->state &= ~UGEN_BULK_WB;
> 			/*
> 			 * XXX Discard whatever's in the buffer, but we
> 			 * should keep it around and keep writing to 
> 			 * drain the buffer instead.
> 			 */
> 			usbd_abort_pipe(sce->pipeh);
> 			usbd_free_xfer(sce->ra_wb_xfer);
> 			free(sce->ibuf, M_USBDEV);
> 			sce->ibuf = NULL;
> 		}
> 		return (0);
> #else
> 		return (EOPNOTSUPP);
> #endif
> 	case USB_SET_BULK_RA_OPT:
> 	case USB_SET_BULK_WB_OPT:
> #ifdef UGEN_BULK_RA_WB
> 	{
> 		struct usb_bulk_ra_wb_opt *opt;
> 
> 		if (endpt == USB_CONTROL_ENDPOINT)
> 			return (EINVAL);
> 		opt = (struct usb_bulk_ra_wb_opt *)addr;
> 		if (cmd == USB_SET_BULK_RA_OPT)
> 			sce = &sc->sc_endpoints[endpt][IN];
> 		else
> 			sce = &sc->sc_endpoints[endpt][OUT];
> 		if (sce == NULL || sce->pipeh == NULL)
> 			return (EINVAL);
> 		if (opt->ra_wb_buffer_size < 1 ||
> 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
> 		    opt->ra_wb_request_size < 1 ||
> 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
> 			return (EINVAL);
> 		/* 
> 		 * XXX These changes do not take effect until the
> 		 * next time RA/WB mode is enabled but they ought to
> 		 * take effect immediately.
> 		 */
> 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
> 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
> 		return (0);
> 	}
> #else
> 		return (EOPNOTSUPP);
> #endif
1333c1835
< 	struct ugen_endpoint *sce;
---
> 	struct ugen_endpoint *sce_in, *sce_out;
1342,1344c1844,1846
< 	/* XXX always IN */
< 	sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
< 	if (sce == NULL)
---
> 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
> 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
> 	if (sce_in == NULL && sce_out == NULL)
1347c1849
< 	if (!sce->edesc) {
---
> 	if (!sce_in->edesc && !sce_out->edesc) {
1351c1853,1854
< 	if (!sce->pipeh) {
---
> 	/* It's possible to have only one pipe open. */
> 	if (!sce_in->pipeh && !sce_out->pipeh) {
1357,1360c1860,1863
< 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
< 	case UE_INTERRUPT:
< 		if (events & (POLLIN | POLLRDNORM)) {
< 			if (sce->q.c_cc > 0)
---
> 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
> 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
> 		case UE_INTERRUPT:
> 			if (sce_in->q.c_cc > 0)
1363,1368c1866,1869
< 				selrecord(l, &sce->rsel);
< 		}
< 		break;
< 	case UE_ISOCHRONOUS:
< 		if (events & (POLLIN | POLLRDNORM)) {
< 			if (sce->cur != sce->fill)
---
> 				selrecord(l, &sce_in->rsel);
> 			break;
> 		case UE_ISOCHRONOUS:
> 			if (sce_in->cur != sce_in->fill)
1371c1872,1893
< 				selrecord(l, &sce->rsel);
---
> 				selrecord(l, &sce_in->rsel);
> 			break;
> 		case UE_BULK:
> #ifdef UGEN_BULK_RA_WB
> 			if (sce_in->state & UGEN_BULK_RA) {
> 				if (sce_in->ra_wb_used > 0)
> 					revents |= events &
> 					    (POLLIN | POLLRDNORM);
> 				else
> 					selrecord(l, &sce_in->rsel);
> 				break;
> 			}
> #endif
> 			/*
> 			 * We have no easy way of determining if a read will
> 			 * yield any data or a write will happen.
> 			 * Pretend they will.
> 			 */
> 			 revents |= events & (POLLIN | POLLRDNORM);
> 			 break;
> 		default:
> 			break;
1373,1385c1895,1924
< 		break;
< 	case UE_BULK:
< 		/*
< 		 * We have no easy way of determining if a read will
< 		 * yield any data or a write will happen.
< 		 * Pretend they will.
< 		 */
< 		revents |= events &
< 			   (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM);
< 		break;
< 	default:
< 		break;
< 	}
---
> 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
> 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
> 		case UE_INTERRUPT:
> 		case UE_ISOCHRONOUS:
> 			/* XXX unimplemented */
> 			break;
> 		case UE_BULK:
> #ifdef UGEN_BULK_RA_WB
> 			if (sce_out->state & UGEN_BULK_WB) {
> 				if (sce_out->ra_wb_used <
> 				    sce_out->limit - sce_out->ibuf)
> 					revents |= events &
> 					    (POLLOUT | POLLWRNORM);
> 				else
> 					selrecord(l, &sce_out->rsel);
> 				break;
> 			}
> #endif
> 			/*
> 			 * We have no easy way of determining if a read will
> 			 * yield any data or a write will happen.
> 			 * Pretend they will.
> 			 */
> 			 revents |= events & (POLLOUT | POLLWRNORM);
> 			 break;
> 		default:
> 			break;
> 		}
> 
> 
1426a1966,2009
> #ifdef UGEN_BULK_RA_WB
> static int
> filt_ugenread_bulk(struct knote *kn, long hint)
> {
> 	struct ugen_endpoint *sce = kn->kn_hook;
> 
> 	if (!(sce->state & UGEN_BULK_RA))
> 		/*
> 		 * We have no easy way of determining if a read will
> 		 * yield any data or a write will happen.
> 		 * So, emulate "seltrue".
> 		 */
> 		return (filt_seltrue(kn, hint));
> 
> 	if (sce->ra_wb_used == 0)
> 		return (0);
> 
> 	kn->kn_data = sce->ra_wb_used;
> 
> 	return (1);
> }
> 
> static int
> filt_ugenwrite_bulk(struct knote *kn, long hint)
> {
> 	struct ugen_endpoint *sce = kn->kn_hook;
> 
> 	if (!(sce->state & UGEN_BULK_WB))
> 		/*
> 		 * We have no easy way of determining if a read will
> 		 * yield any data or a write will happen.
> 		 * So, emulate "seltrue".
> 		 */
> 		return (filt_seltrue(kn, hint));
> 
> 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
> 		return (0);
> 
> 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
> 
> 	return (1);
> }
> #endif
> 
1432a2016,2022
> #ifdef UGEN_BULK_RA_WB
> static const struct filterops ugenread_bulk_filtops =
> 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
> 
> static const struct filterops ugenwrite_bulk_filtops =
> 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
> #else
1434a2025
> #endif
1449,1453d2039
< 	/* XXX always IN */
< 	sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
< 	if (sce == NULL)
< 		return (1);
< 
1455a2042,2045
> 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
> 		if (sce == NULL)
> 			return (1);
> 
1464a2055,2058
> #ifdef UGEN_BULK_RA_WB
> 			kn->kn_fop = &ugenread_bulk_filtops;
> 			break;
> #else
1470a2065
> #endif
1477a2073,2076
> 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
> 		if (sce == NULL)
> 			return (1);
> 
1485a2085,2087
> #ifdef UGEN_BULK_RA_WB
> 			kn->kn_fop = &ugenwrite_bulk_filtops;
> #else
1491a2094
> #endif
Index: sys/dev/usb/files.usb
===================================================================
RCS file: /cvs/netbsd/netbsd/src/sys/dev/usb/files.usb,v
retrieving revision 1.1.1.2
retrieving revision 1.3
diff -d -r1.1.1.2 -r1.3
50a51
> defflag UGEN_BULK_RA_WB
Index: share/man/man4/ugen.4
===================================================================
RCS file: /cvs/netbsd/netbsd/src/share/man/man4/ugen.4,v
retrieving revision 1.1.1.1
retrieving revision 1.2
diff -d -r1.1.1.1 -r1.2
80c80
< To find out what endpoints that exist there are a series of
---
> To find out what endpoints exist there are a series of
82c82
< operation on the control endpoint that returns the USB descriptors
---
> operations on the control endpoint that return the USB descriptors
86,87c86,87
< which is always endpoint 0.  The control endpoint accepts request
< and may respond with an answer to such request.  Control request
---
> which is always endpoint 0.  The control endpoint accepts requests
> and may respond with an answer to such requests.  Control requests
111c111,119
< All IO operations on a bulk endpoint are unbuffered.
---
> All IO operations on a bulk endpoint are normally unbuffered.
> Buffering can be enabled using a
> .Dv USB_SET_BULK_RA
> or
> .Dv USB_SET_BULK_WB
> .Xr ioctl 2
> call, to enable read-ahead and write-behind respectively.  When
> read-ahead or write-behind are enabled, the file descriptor may
> be set to use non-blocking IO.
274a283,333
> Bulk endpoints handle the following
> .Xr ioctl 2
> calls:
> .Pp
> .Bl -tag -width indent -compact
> .It Dv USB_SET_BULK_RA (int)
> Enable or disable bulk read-ahead.  When enabled, the driver will
> begin to read data from the device into a buffer.  The 
> .Xr read 2
> call will read data from this buffer, blocking if necessary until
> there is enough data to read the length of data requested.  The
> buffer size and the read request length can be set by the
> .Dv USB_SET_BULK_RA_OPT
> .Xr ioctl 2
> call.
> .It Dv USB_SET_BULK_WB (int)
> Enable or disable bulk write-behind.  When enabled, the driver will
> buffer data from the
> .Xr write 2
> call before writing it to the device.  
> .Xr write 2
> will block if there is not enough room in the buffer for all
> the data.  The buffer size and the write request length can be set
> by the
> .Dv USB_SET_BULK_WB_OPT
> .Xr ioctl 2
> call.
> .It Dv USB_SET_BULK_RA_OPT (struct usb_bulk_ra_wb_opt)
> Set the size of the buffer and the length of the read requests used by
> the driver when bulk read-ahead is enabled.  The changes do not take
> effect until the next time bulk read-ahead is enabled.  Read requests
> are made for the length specified, and the host controller driver
> (i.e.,
> .Xr ehci 4 ,
> .Xr ohci 4 , and
> .Xr uhci 4 ) will perform as many bus transfers as required.  If
> transfers from the device should be smaller than the maximum length,
> .Dv ra_wb_request_size
> must be set to the required length.
> .Bd -literal
> struct usb_bulk_ra_wb_opt {
> 	int	ra_wb_buffer_size;
> 	int	ra_wb_request_size;
> };
> .Ed
> .It Dv USB_SET_BULK_WB_OPT (struct usb_bulk_ra_wb_opt)
> Set the size of the buffer and the length of the write requests used
> by the driver when bulk write-behind is enabled.  The changes do not
> take effect until the next time bulk write-behind is enabled.
> .El
> .Pp

--2754.Wed.Jul.19.20:47:34.EDT.2006--