Subject: kern/22334: Back port of txp driver to 1.6 branch, pullup request
To: None <gnats-bugs@gnats.netbsd.org>
From: None <buhrow@lothlorien.nfbcal.org>
List: netbsd-bugs
Date: 08/01/2003 10:05:07
>Number:         22334
>Category:       kern
>Synopsis:       Diffs to make the txp driver work under NetBSD-1.6.X
>Confidential:   yes
>Severity:       non-critical
>Priority:       medium
>Responsible:    kern-bug-people
>State:          open
>Class:          change-request
>Submitter-Id:   net
>Arrival-Date:   Fri Aug 01 17:06:00 UTC 2003
>Closed-Date:
>Last-Modified:
>Originator:     Brian Buhrow
>Release:        NetBSD 1.6.1
>Organization:
	
>Environment:
	
	
System: NetBSD lothlorien.nfbcal.org 1.6.1 NetBSD 1.6.1 (NFBNETBSD_TXP) #0: Fri Aug 1 08:25:29 PDT 2003 buhrow@lothlorien.nfbcal.org:/usr/local/netbsd/src/sys/arch/i386/compile/NFBNETBSD_TXP i386
Architecture: i386
Machine: i386
>Description:
	Here are patches to make the txp(4) driver work under NetBSD-1.6.X
environments.  I've modified the i386/GENERIC config file, but didn't
modify any others because I'm not sure on which other platforms the txp(4)
driver would be used.  Also, I don't quite know how to get a unified diff
for files which don't reside in the cvs repository, so I've included the
if_txp* files wholesale.
	These diffs are against the 1.6 branch of the tree as of 08/01/2003.
The driver compiles and works perfectly, as far as I can tell.
	If someone could review these patches and incorporate them into the
1.6 branch, that would be great.
-thanks
-Brian

<diffs of non-txp files, first.>
Index: pci/files.pci
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/files.pci,v
retrieving revision 1.173.2.2
diff -u -r1.173.2.2 files.pci
--- pci/files.pci	2003/01/28 06:21:23	1.173.2.2
+++ pci/files.pci	2003/08/01 16:43:35
@@ -47,7 +47,7 @@
 # 3ware RAID controllers
 device	twe {unit = -1}
 attach	twe at pci
-file	dev/pci/twe.c			twe
+file	dev/pci/twe.c			twe	needs-flag
 
 attach	ld at twe with ld_twe
 file	dev/pci/ld_twe.c		ld_twe
@@ -476,6 +476,11 @@
 # DECchip 21x4x Ethernet controller family, and assorted clones.
 attach	tlp at pci with tlp_pci
 file	dev/pci/if_tlp_pci.c		tlp_pci
+
+#3Com 3XP/Sidewinder ethernet cards
+device txp: ether, ifnet, arp, mii
+attach txp at pci 
+file dev/pci/if_txp.c			txp
 
 # Bit3 PCI-VME mod. 617
 device	btvmei: vmebus
Index: pci/pcidevs
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs,v
retrieving revision 1.428.2.20
diff -u -r1.428.2.20 pcidevs
--- pci/pcidevs	2003/01/28 06:22:37	1.428.2.20
+++ pci/pcidevs	2003/08/01 16:43:35
@@ -618,10 +618,14 @@
 product 3COM 3C905CTX	0x9200	3c905C-TX 10/100 Ethernet with mngmt
 product 3COM 3C980SRV	0x9800	3c980 Server Adapter 10/100 Ethernet
 product 3COM 3C980CTXM	0x9805	3c980C-TXM 10/100 Ethernet
+product 3COM 3CR990	0x9900	3c990-TX 10/100 Ethernet with 3XP
 product 3COM 3CR990TX95	0x9902	3CR990-TX-95 10/100 Ethernet with 3XP
 product 3COM 3CR990TX97	0x9903	3CR990-TX-97 10/100 Ethernet with 3XP
+product 3COM 3C990B	0x9904	3c990B 10/100 Ethernet with 3XP
+product 3COM 3CR990FX	0x9905	3CR990-FX 100 Ethernet with 3XP
 product 3COM 3CR990SVR95 0x9908	3CR990-SVR-95 10/100 Ethernet with 3XP
 product 3COM 3CR990SVR97 0x9909	3CR990-SVR-97 10/100 Ethernet with 3XP
+product 3COM 3C990BSVR	0x990a	3c990BSVR 10/100 Ethernet with 3XP
 
 /* 3Dfx Interactive products */
 product 3DFX VOODOO	0x0001	Voodoo
Index: pci/pcidevs.h
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs.h,v
retrieving revision 1.433.2.18
diff -u -r1.433.2.18 pcidevs.h
--- pci/pcidevs.h	2003/01/28 06:24:46	1.433.2.18
+++ pci/pcidevs.h	2003/08/01 16:43:36
@@ -1,10 +1,10 @@
-/*	$NetBSD: pcidevs.h,v 1.433.2.18 2003/01/28 06:24:46 jmc Exp $	*/
+/*	$NetBSD$	*/
 
 /*
  * THIS FILE AUTOMATICALLY GENERATED.  DO NOT EDIT.
  *
  * generated from:
- *	NetBSD: pcidevs,v 1.428.2.19 2003/01/27 06:21:36 jmc Exp 
+ *	NetBSD: pcidevs,v 1.428.2.20 2003/01/28 06:22:37 jmc Exp 
  */
 
 /*
@@ -625,10 +625,14 @@
 #define	PCI_PRODUCT_3COM_3C905CTX	0x9200		/* 3c905C-TX 10/100 Ethernet with mngmt */
 #define	PCI_PRODUCT_3COM_3C980SRV	0x9800		/* 3c980 Server Adapter 10/100 Ethernet */
 #define	PCI_PRODUCT_3COM_3C980CTXM	0x9805		/* 3c980C-TXM 10/100 Ethernet */
+#define	PCI_PRODUCT_3COM_3CR990	0x9900		/* 3c990-TX 10/100 Ethernet with 3XP */
 #define	PCI_PRODUCT_3COM_3CR990TX95	0x9902		/* 3CR990-TX-95 10/100 Ethernet with 3XP */
 #define	PCI_PRODUCT_3COM_3CR990TX97	0x9903		/* 3CR990-TX-97 10/100 Ethernet with 3XP */
+#define	PCI_PRODUCT_3COM_3C990B	0x9904		/* 3c990B 10/100 Ethernet with 3XP */
+#define	PCI_PRODUCT_3COM_3CR990FX	0x9905		/* 3CR990-FX 100 Ethernet with 3XP */
 #define	PCI_PRODUCT_3COM_3CR990SVR95	0x9908		/* 3CR990-SVR-95 10/100 Ethernet with 3XP */
 #define	PCI_PRODUCT_3COM_3CR990SVR97	0x9909		/* 3CR990-SVR-97 10/100 Ethernet with 3XP */
+#define	PCI_PRODUCT_3COM_3C990BSVR	0x990a		/* 3c990BSVR 10/100 Ethernet with 3XP */
 
 /* 3Dfx Interactive products */
 #define	PCI_PRODUCT_3DFX_VOODOO	0x0001		/* Voodoo */
Index: pci/pcidevs_data.h
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs_data.h,v
retrieving revision 1.431.2.18
diff -u -r1.431.2.18 pcidevs_data.h
--- pci/pcidevs_data.h	2003/01/28 06:24:48	1.431.2.18
+++ pci/pcidevs_data.h	2003/08/01 16:43:37
@@ -1,10 +1,10 @@
-/*	$NetBSD: pcidevs_data.h,v 1.431.2.18 2003/01/28 06:24:48 jmc Exp $	*/
+/*	$NetBSD$	*/
 
 /*
  * THIS FILE AUTOMATICALLY GENERATED.  DO NOT EDIT.
  *
  * generated from:
- *	NetBSD: pcidevs,v 1.428.2.19 2003/01/27 06:21:36 jmc Exp 
+ *	NetBSD: pcidevs,v 1.428.2.20 2003/01/28 06:22:37 jmc Exp 
  */
 
 /*
@@ -202,6 +202,12 @@
 	    "3c980C-TXM 10/100 Ethernet",
 	},
 	{
+	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990,
+	    0,
+	    "3Com",
+	    "3c990-TX 10/100 Ethernet with 3XP",
+	},
+	{
 	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95,
 	    0,
 	    "3Com",
@@ -214,6 +220,18 @@
 	    "3CR990-TX-97 10/100 Ethernet with 3XP",
 	},
 	{
+	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B,
+	    0,
+	    "3Com",
+	    "3c990B 10/100 Ethernet with 3XP",
+	},
+	{
+	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX,
+	    0,
+	    "3Com",
+	    "3CR990-FX 100 Ethernet with 3XP",
+	},
+	{
 	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95,
 	    0,
 	    "3Com",
@@ -224,6 +242,12 @@
 	    0,
 	    "3Com",
 	    "3CR990-SVR-97 10/100 Ethernet with 3XP",
+	},
+	{
+	    PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR,
+	    0,
+	    "3Com",
+	    "3c990BSVR 10/100 Ethernet with 3XP",
 	},
 	{
 	    PCI_VENDOR_3DFX, PCI_PRODUCT_3DFX_VOODOO,
Index: GENERIC
===================================================================
RCS file: /cvsroot/src/sys/arch/i386/conf/GENERIC,v
retrieving revision 1.491.4.4
diff -u -r1.491.4.4 GENERIC
--- GENERIC	2003/01/28 06:28:20	1.491.4.4
+++ GENERIC	2003/08/01 16:49:44
@@ -628,6 +628,7 @@
 ti*	at pci? dev ? function ?	# Alteon ACEnic gigabit Ethernet
 tl*	at pci? dev ? function ?	# ThunderLAN-based Ethernet
 tlp*	at pci? dev ? function ?	# DECchip 21x4x and clones
+txp*	at pci? dev ? function ?	# DECchip 21x4x and clones
 vr*	at pci? dev ? function ?	# VIA Rhine Fast Ethernet
 wi*	at pci? dev ? function ?	# Intersil Prism Mini-PCI (802.11b)
 wm*	at pci? dev ? function ?	# Intel 82543/82544 gigabit

<if_txp.c>
/* $NetBSD: if_txp.c,v 1.3 2003/07/14 15:47:24 lukem Exp $ */

/*
 * Copyright (c) 2001
 *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
 *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Driver for 3c990 (Typhoon) Ethernet ASIC
 */

#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.3 2003/07/14 15:47:24 lukem Exp $");

#include "bpfilter.h"
/* #include "vlan.h" XXX notyet */
#include "opt_inet.h"

#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/device.h>
#include <sys/callout.h>

#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_ether.h>
#include <net/if_arp.h>

#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_inarp.h>
#endif

#include <net/if_media.h>

#if NBPFILTER > 0
#include <net/bpf.h>
#endif

#if NVLAN > 0
#include <net/if_vlanvar.h>
#endif

#include <uvm/uvm_extern.h>              /* for vtophys */
#include <machine/bus.h>

#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcidevs.h>

#include <dev/pci/if_txpreg.h>

#include <dev/microcode/typhoon/3c990img.h>

/*
 * These currently break the 3c990 firmware, hopefully will be resolved
 * at some point.
 */
#undef	TRY_TX_UDP_CSUM
#undef	TRY_TX_TCP_CSUM

int txp_probe(struct device *, struct cfdata *, void *);
void txp_attach(struct device *, struct device *, void *);
int txp_intr(void *);
void txp_tick(void *);
void txp_shutdown(void *);
int txp_ioctl(struct ifnet *, u_long, caddr_t);
void txp_start(struct ifnet *);
void txp_stop(struct txp_softc *);
void txp_init(struct txp_softc *);
void txp_watchdog(struct ifnet *);

int txp_chip_init(struct txp_softc *);
int txp_reset_adapter(struct txp_softc *);
int txp_download_fw(struct txp_softc *);
int txp_download_fw_wait(struct txp_softc *);
int txp_download_fw_section(struct txp_softc *,
    struct txp_fw_section_header *, int);
int txp_alloc_rings(struct txp_softc *);
void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
void txp_set_filter(struct txp_softc *);

int txp_cmd_desc_numfree(struct txp_softc *);
int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
    u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int);
int txp_command2(struct txp_softc *, u_int16_t, u_int16_t,
    u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
    struct txp_rsp_desc **, int);
int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
    struct txp_rsp_desc **);
void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
    struct txp_rsp_desc *);
void txp_capabilities(struct txp_softc *);

void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
int txp_ifmedia_upd(struct ifnet *);
void txp_show_descriptor(void *);
void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
    struct txp_dma_alloc *);
void txp_rxbuf_reclaim(struct txp_softc *);
void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
    struct txp_dma_alloc *);

struct cfattach txp_ca = {
	sizeof(struct txp_softc), txp_probe, txp_attach
};


const struct txp_pci_match {
	int vid, did, flags;
} txp_devices[] = {
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION },
	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM },
};

static const struct txp_pci_match *txp_pcilookup(pcireg_t);

static const struct {
	u_int16_t mask, value;
	int flags;
} txp_subsysinfo[] = {
	{0xf000, 0x2000, TXP_SERVERVERSION},
	{0x0100, 0x0100, TXP_FIBER},
#if 0 /* information from 3com header, unused */
	{0x0010, 0x0010, /* secured firmware */},
	{0x0003, 0x0000, /* variable DES */},
	{0x0003, 0x0001, /* single DES - "95" */},
	{0x0003, 0x0002, /* triple DES - "97" */},
#endif
};

static const struct txp_pci_match *
txp_pcilookup(id)
	pcireg_t id;
{
	int i;

	for (i = 0; i < sizeof(txp_devices) / sizeof(txp_devices[0]); i++)
		if ((PCI_VENDOR(id) == txp_devices[i].vid) &&
		    (PCI_PRODUCT(id) == txp_devices[i].did))
			return (&txp_devices[i]);
	return (0);
}

int
txp_probe(parent, match, aux)
	struct device *parent;
	struct cfdata *match;
	void *aux;
{
	struct pci_attach_args *pa = aux;

	if (txp_pcilookup(pa->pa_id))
			return (1);
	return (0);
}

void
txp_attach(parent, self, aux)
	struct device *parent, *self;
	void *aux;
{
	struct txp_softc *sc = (struct txp_softc *)self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr = NULL;
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	u_int32_t command;
	u_int16_t p1;
	u_int32_t p2;
	u_char enaddr[6];
	const struct txp_pci_match *pcimatch;
	u_int16_t subsys;
	int i, flags;
	char devinfo[256];

	sc->sc_cold = 1;

	pcimatch = txp_pcilookup(pa->pa_id);
	flags = pcimatch->flags;
	if (pcimatch->flags & TXP_USESUBSYSTEM) {
		subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag,
						   PCI_SUBSYS_ID_REG));
		for (i = 0;
		     i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]);
		     i++)
			if ((subsys & txp_subsysinfo[i].mask) ==
			    txp_subsysinfo[i].value)
				flags |= txp_subsysinfo[i].flags;
	}
	sc->sc_flags = flags;

	pci_devinfo(pa->pa_id, 0, 0, devinfo);
#define TXP_EXTRAINFO ((flags & (TXP_USESUBSYSTEM|TXP_SERVERVERSION)) == \
  (TXP_USESUBSYSTEM|TXP_SERVERVERSION) ? " (SVR)" : "")
	printf(": %s%s\n%s", devinfo, TXP_EXTRAINFO, sc->sc_dev.dv_xname);

	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);

	if (!(command & PCI_COMMAND_MASTER_ENABLE)) {
		printf(": failed to enable bus mastering\n");
		return;
	}

	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
		printf(": failed to enable memory mapping\n");
		return;
	}
	if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
	    &sc->sc_bt, &sc->sc_bh, NULL, NULL)) {
		printf(": can't map mem space %d\n", 0);
		return;
	}

	sc->sc_dmat = pa->pa_dmat;

	/*
	 * Allocate our interrupt.
	 */
	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc);
	if (sc->sc_ih == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}
	printf(": interrupting at %s\n", intrstr);

	if (txp_chip_init(sc))
		return;

	if (txp_download_fw(sc))
		return;

	if (txp_alloc_rings(sc))
		return;

	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
	    NULL, NULL, NULL, 1))
		return;

	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
	    &p1, &p2, NULL, 1))
		return;

	txp_set_filter(sc);

	p1 = htole16(p1);
	enaddr[0] = ((u_int8_t *)&p1)[1];
	enaddr[1] = ((u_int8_t *)&p1)[0];
	p2 = htole32(p2);
	enaddr[2] = ((u_int8_t *)&p2)[3];
	enaddr[3] = ((u_int8_t *)&p2)[2];
	enaddr[4] = ((u_int8_t *)&p2)[1];
	enaddr[5] = ((u_int8_t *)&p2)[0];

	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
	       ether_sprintf(enaddr));
	sc->sc_cold = 0;

	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
	if (flags & TXP_FIBER) {
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_HDX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_FDX,
			    0, NULL);
	} else {
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX,
			    0, NULL);
		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX,
			    0, NULL);
	}
	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);

	sc->sc_xcvr = TXP_XCVR_AUTO;
	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
	    NULL, NULL, NULL, 0);
	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);

	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = txp_ioctl;
	ifp->if_start = txp_start;
	ifp->if_watchdog = txp_watchdog;
	ifp->if_baudrate = 10000000;
	IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
	IFQ_SET_READY(&ifp->if_snd);
	ifp->if_capabilities = 0;
	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);

	txp_capabilities(sc);

	callout_init(&sc->sc_tick);
	callout_reset(&sc->sc_tick, hz, txp_tick, sc);

	/*
	 * Attach us everywhere
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);

	shutdownhook_establish(txp_shutdown, sc);
}

int
txp_chip_init(sc)
	struct txp_softc *sc;
{
	/* disable interrupts */
	WRITE_REG(sc, TXP_IER, 0);
	WRITE_REG(sc, TXP_IMR,
	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_LATCH);

	/* ack all interrupts */
	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);

	if (txp_reset_adapter(sc))
		return (-1);

	/* disable interrupts */
	WRITE_REG(sc, TXP_IER, 0);
	WRITE_REG(sc, TXP_IMR,
	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_LATCH);

	/* ack all interrupts */
	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);

	return (0);
}

int
txp_reset_adapter(sc)
	struct txp_softc *sc;
{
	u_int32_t r;
	int i;

	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
	DELAY(1000);
	WRITE_REG(sc, TXP_SRR, 0);

	/* Should wait max 6 seconds */
	for (i = 0; i < 6000; i++) {
		r = READ_REG(sc, TXP_A2H_0);
		if (r == STAT_WAITING_FOR_HOST_REQUEST)
			break;
		DELAY(1000);
	}

	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
		printf("%s: reset hung\n", TXP_DEVNAME(sc));
		return (-1);
	}

	return (0);
}

int
txp_download_fw(sc)
	struct txp_softc *sc;
{
	struct txp_fw_file_header *fileheader;
	struct txp_fw_section_header *secthead;
	int sect;
	u_int32_t r, i, ier, imr;

	ier = READ_REG(sc, TXP_IER);
	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);

	imr = READ_REG(sc, TXP_IMR);
	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);

	for (i = 0; i < 10000; i++) {
		r = READ_REG(sc, TXP_A2H_0);
		if (r == STAT_WAITING_FOR_HOST_REQUEST)
			break;
		DELAY(50);
	}
	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
		printf(": not waiting for host request\n");
		return (-1);
	}

	/* Ack the status */
	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);

	fileheader = (struct txp_fw_file_header *)tc990image;
	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
		printf(": fw invalid magic\n");
		return (-1);
	}

	/* Tell boot firmware to get ready for image */
	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);

	if (txp_download_fw_wait(sc)) {
		printf("%s: fw wait failed, initial\n", sc->sc_dev.dv_xname);
		return (-1);
	}

	secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) +
	    sizeof(struct txp_fw_file_header));

	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
		if (txp_download_fw_section(sc, secthead, sect))
			return (-1);
		secthead = (struct txp_fw_section_header *)
		    (((u_int8_t *)secthead) + le32toh(secthead->nbytes) +
			sizeof(*secthead));
	}

	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);

	for (i = 0; i < 10000; i++) {
		r = READ_REG(sc, TXP_A2H_0);
		if (r == STAT_WAITING_FOR_BOOT)
			break;
		DELAY(50);
	}
	if (r != STAT_WAITING_FOR_BOOT) {
		printf(": not waiting for boot\n");
		return (-1);
	}

	WRITE_REG(sc, TXP_IER, ier);
	WRITE_REG(sc, TXP_IMR, imr);

	return (0);
}

int
txp_download_fw_wait(sc)
	struct txp_softc *sc;
{
	u_int32_t i, r;

	for (i = 0; i < 10000; i++) {
		r = READ_REG(sc, TXP_ISR);
		if (r & TXP_INT_A2H_0)
			break;
		DELAY(50);
	}

	if (!(r & TXP_INT_A2H_0)) {
		printf(": fw wait failed comm0\n");
		return (-1);
	}

	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);

	r = READ_REG(sc, TXP_A2H_0);
	if (r != STAT_WAITING_FOR_SEGMENT) {
		printf(": fw not waiting for segment\n");
		return (-1);
	}
	return (0);
}

int
txp_download_fw_section(sc, sect, sectnum)
	struct txp_softc *sc;
	struct txp_fw_section_header *sect;
	int sectnum;
{
	struct txp_dma_alloc dma;
	int rseg, err = 0;
	struct mbuf m;
	u_int16_t csum;

	/* Skip zero length sections */
	if (sect->nbytes == 0)
		return (0);

	/* Make sure we aren't past the end of the image */
	rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image);
	if (rseg >= sizeof(tc990image)) {
		printf(": fw invalid section address, section %d\n", sectnum);
		return (-1);
	}

	/* Make sure this section doesn't go past the end */
	rseg += le32toh(sect->nbytes);
	if (rseg >= sizeof(tc990image)) {
		printf(": fw truncated section %d\n", sectnum);
		return (-1);
	}

	/* map a buffer, copy segment to it, get physaddr */
	if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) {
		printf(": fw dma malloc failed, section %d\n", sectnum);
		return (-1);
	}

	bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr,
	    le32toh(sect->nbytes));

	/*
	 * dummy up mbuf and verify section checksum
	 */
	m.m_type = MT_DATA;
	m.m_next = m.m_nextpkt = NULL;
	m.m_len = le32toh(sect->nbytes);
	m.m_data = dma.dma_vaddr;
	m.m_flags = 0;
	csum = in_cksum(&m, le32toh(sect->nbytes));
	if (csum != sect->cksum) {
		printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
		    sectnum, sect->cksum, csum);
		err = -1;
		goto bail;
	}

	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
	    dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);

	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
	WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum));
	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
	WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
	WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);

	if (txp_download_fw_wait(sc)) {
		printf("%s: fw wait failed, section %d\n",
		    sc->sc_dev.dv_xname, sectnum);
		err = -1;
	}

	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
	    dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);

bail:
	txp_dma_free(sc, &dma);

	return (err);
}

int
txp_intr(vsc)
	void *vsc;
{
	struct txp_softc *sc = vsc;
	struct txp_hostvar *hv = sc->sc_hostvar;
	u_int32_t isr;
	int claimed = 0;

	/* mask all interrupts */
	WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);

	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
	    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);

	isr = READ_REG(sc, TXP_ISR);
	while (isr) {
		claimed = 1;
		WRITE_REG(sc, TXP_ISR, isr);

		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
			txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
			txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);

		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
			txp_rxbuf_reclaim(sc);

		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
			txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);

		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
			txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);

		isr = READ_REG(sc, TXP_ISR);
	}

	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
	    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);

	/* unmask all interrupts */
	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);

	txp_start(&sc->sc_arpcom.ec_if);

	return (claimed);
}

void
txp_rx_reclaim(sc, r, dma)
	struct txp_softc *sc;
	struct txp_rx_ring *r;
	struct txp_dma_alloc *dma;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	struct txp_rx_desc *rxd;
	struct mbuf *m;
	struct txp_swdesc *sd;
	u_int32_t roff, woff;
	int sumflags = 0;
	int idx;

	roff = le32toh(*r->r_roff);
	woff = le32toh(*r->r_woff);
	idx = roff / sizeof(struct txp_rx_desc);
	rxd = r->r_desc + idx;

	while (roff != woff) {

		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
		    idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
		    BUS_DMASYNC_POSTREAD);

		if (rxd->rx_flags & RX_FLAGS_ERROR) {
			printf("%s: error 0x%x\n", sc->sc_dev.dv_xname,
			    le32toh(rxd->rx_stat));
			ifp->if_ierrors++;
			goto next;
		}

		/* retrieve stashed pointer */
		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));

		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
		    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
		bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
		m = sd->sd_mbuf;
		free(sd, M_DEVBUF);
		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);

#ifdef __STRICT_ALIGNMENT
		{
			/*
			 * XXX Nice chip, except it won't accept "off by 2"
			 * buffers, so we're force to copy.  Supposedly
			 * this will be fixed in a newer firmware rev
			 * and this will be temporary.
			 */
			struct mbuf *mnew;

			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
			if (mnew == NULL) {
				m_freem(m);
				goto next;
			}
			if (m->m_len > (MHLEN - 2)) {
				MCLGET(mnew, M_DONTWAIT);
				if (!(mnew->m_flags & M_EXT)) {
					m_freem(mnew);
					m_freem(m);
					goto next;
				}
			}
			mnew->m_pkthdr.rcvif = ifp;
			mnew->m_pkthdr.len = mnew->m_len = m->m_len;
			mnew->m_data += 2;
			bcopy(m->m_data, mnew->m_data, m->m_len);
			m_freem(m);
			m = mnew;
		}
#endif

#if NBPFILTER > 0
		/*
		 * Handle BPF listeners. Let the BPF user see the packet.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m);
#endif

		if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
			sumflags |= (M_CSUM_IPv4|M_CSUM_IPv4_BAD);
		else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
			sumflags |= M_CSUM_IPv4;

		if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
			sumflags |= (M_CSUM_TCPv4|M_CSUM_TCP_UDP_BAD);
		else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
			sumflags |= M_CSUM_TCPv4;

		if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
			sumflags |= (M_CSUM_UDPv4|M_CSUM_TCP_UDP_BAD);
		else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
			sumflags |= M_CSUM_UDPv4;

		m->m_pkthdr.csum_flags = sumflags;

#if NVLAN > 0
		if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
			if (vlan_input_tag(m, htons(rxd->rx_vlan >> 16)) < 0)
				ifp->if_noproto++;
			goto next;
		}
#endif

		(*ifp->if_input)(ifp, m);

next:
		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
		    idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
		    BUS_DMASYNC_PREREAD);

		roff += sizeof(struct txp_rx_desc);
		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
			idx = 0;
			roff = 0;
			rxd = r->r_desc;
		} else {
			idx++;
			rxd++;
		}
		woff = le32toh(*r->r_woff);
	}

	*r->r_roff = htole32(woff);
}

void
txp_rxbuf_reclaim(sc)
	struct txp_softc *sc;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	struct txp_hostvar *hv = sc->sc_hostvar;
	struct txp_rxbuf_desc *rbd;
	struct txp_swdesc *sd;
	u_int32_t i, end;

	end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
	i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx));

	if (++i == RXBUF_ENTRIES)
		i = 0;

	rbd = sc->sc_rxbufs + i;

	while (i != end) {
		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
		    M_DEVBUF, M_NOWAIT);
		if (sd == NULL)
			break;

		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
		if (sd->sd_mbuf == NULL)
			goto err_sd;

		MCLGET(sd->sd_mbuf, M_DONTWAIT);
		if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
			goto err_mbuf;
		sd->sd_mbuf->m_pkthdr.rcvif = ifp;
		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
			goto err_mbuf;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
		    BUS_DMA_NOWAIT)) {
			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
			goto err_mbuf;
		}

		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
		    i * sizeof(struct txp_rxbuf_desc),
		    sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
		    
		/* stash away pointer */
		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));

		rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
		    & 0xffffffff;
		rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
		    >> 32;

		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);

		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
		    i * sizeof(struct txp_rxbuf_desc),
		    sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);

		hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));

		if (++i == RXBUF_ENTRIES) {
			i = 0;
			rbd = sc->sc_rxbufs;
		} else
			rbd++;
	}
	return;

err_mbuf:
	m_freem(sd->sd_mbuf);
err_sd:
	free(sd, M_DEVBUF);
}

/*
 * Reclaim mbufs and entries from a transmit ring.
 */
void
txp_tx_reclaim(sc, r, dma)
	struct txp_softc *sc;
	struct txp_tx_ring *r;
	struct txp_dma_alloc *dma;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	u_int32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
	u_int32_t cons = r->r_cons, cnt = r->r_cnt;
	struct txp_tx_desc *txd = r->r_desc + cons;
	struct txp_swdesc *sd = sc->sc_txd + cons;
	struct mbuf *m;

	while (cons != idx) {
		if (cnt == 0)
			break;

		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
		    cons * sizeof(struct txp_tx_desc),
		    sizeof(struct txp_tx_desc),
		    BUS_DMASYNC_POSTWRITE);

		if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
		    TX_FLAGS_TYPE_DATA) {
			bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
			m = sd->sd_mbuf;
			if (m != NULL) {
				m_freem(m);
				txd->tx_addrlo = 0;
				txd->tx_addrhi = 0;
				ifp->if_opackets++;
			}
		}
		ifp->if_flags &= ~IFF_OACTIVE;

		if (++cons == TX_ENTRIES) {
			txd = r->r_desc;
			cons = 0;
			sd = sc->sc_txd;
		} else {
			txd++;
			sd++;
		}

		cnt--;
	}

	r->r_cons = cons;
	r->r_cnt = cnt;
	if (cnt == 0)
		ifp->if_timer = 0;
}

void
txp_shutdown(vsc)
	void *vsc;
{
	struct txp_softc *sc = (struct txp_softc *)vsc;

	/* mask all interrupts */
	WRITE_REG(sc, TXP_IMR,
	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_LATCH);

	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
}

int
txp_alloc_rings(sc)
	struct txp_softc *sc;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	struct txp_boot_record *boot;
	struct txp_swdesc *sd;
	u_int32_t r;
	int i, j;

	/* boot record */
	if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma,
	    BUS_DMA_COHERENT)) {
		printf(": can't allocate boot record\n");
		return (-1);
	}
	boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
	bzero(boot, sizeof(*boot));
	sc->sc_boot = boot;

	/* host variables */
	if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
	    BUS_DMA_COHERENT)) {
		printf(": can't allocate host ring\n");
		goto bail_boot;
	}
	bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar));
	boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff);
	boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32);
	sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;

	/* high priority tx ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
	    &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate high tx ring\n");
		goto bail_host;
	}
	bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
	boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff);
	boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32);
	boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
	sc->sc_txhir.r_reg = TXP_H2A_1;
	sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
	for (i = 0; i < TX_ENTRIES; i++) {
		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
		    TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0,
		    BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) {
			for (j = 0; j < i; j++) {
				bus_dmamap_destroy(sc->sc_dmat,
				    sc->sc_txd[j].sd_map);
				sc->sc_txd[j].sd_map = NULL;
			}
			goto bail_txhiring;
		}
	}

	/* low priority tx ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
	    &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate low tx ring\n");
		goto bail_txhiring;
	}
	bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
	boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff);
	boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32);
	boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
	sc->sc_txlor.r_reg = TXP_H2A_3;
	sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;

	/* high priority rx ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
	    &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate high rx ring\n");
		goto bail_txloring;
	}
	bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
	boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff);
	boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32);
	boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
	sc->sc_rxhir.r_desc =
	    (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
	    0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);

	/* low priority ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
	    &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate low rx ring\n");
		goto bail_rxhiring;
	}
	bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
	boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff);
	boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32);
	boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
	sc->sc_rxlor.r_desc =
	    (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
	    0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);

	/* command ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
	    &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate command ring\n");
		goto bail_rxloring;
	}
	bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
	boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff);
	boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32);
	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
	sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
	sc->sc_cmdring.lastwrite = 0;

	/* response ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
	    &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate response ring\n");
		goto bail_cmdring;
	}
	bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
	boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff);
	boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32);
	boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
	sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
	sc->sc_rspring.lastwrite = 0;

	/* receive buffer ring */
	if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
	    &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
		printf(": can't allocate rx buffer ring\n");
		goto bail_rspring;
	}
	bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
	boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff);
	boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32);
	boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
	sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
	for (i = 0; i < RXBUF_ENTRIES; i++) {
		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
		    M_DEVBUF, M_NOWAIT);
		if (sd == NULL)
			break;

		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
		if (sd->sd_mbuf == NULL) {
			goto bail_rxbufring;
		}

		MCLGET(sd->sd_mbuf, M_DONTWAIT);
		if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
			goto bail_rxbufring;
		}
		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
		sd->sd_mbuf->m_pkthdr.rcvif = ifp;
		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
			goto bail_rxbufring;
		}
		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
		    BUS_DMA_NOWAIT)) {
			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
			goto bail_rxbufring;
		}
		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);

		/* stash away pointer */
		bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd));

		sc->sc_rxbufs[i].rb_paddrlo =
		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
		sc->sc_rxbufs[i].rb_paddrhi =
		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
	}
	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
	    0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
	    BUS_DMASYNC_PREWRITE);
	sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
	    sizeof(struct txp_rxbuf_desc));

	/* zero dma */
	if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
	    BUS_DMA_COHERENT)) {
		printf(": can't allocate response ring\n");
		goto bail_rxbufring;
	}
	bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t));
	boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff);
	boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32);

	/* See if it's waiting for boot, and try to boot it */
	for (i = 0; i < 10000; i++) {
		r = READ_REG(sc, TXP_A2H_0);
		if (r == STAT_WAITING_FOR_BOOT)
			break;
		DELAY(50);
	}
	if (r != STAT_WAITING_FOR_BOOT) {
		printf(": not waiting for boot\n");
		goto bail;
	}
	WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
	WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);

	/* See if it booted */
	for (i = 0; i < 10000; i++) {
		r = READ_REG(sc, TXP_A2H_0);
		if (r == STAT_RUNNING)
			break;
		DELAY(50);
	}
	if (r != STAT_RUNNING) {
		printf(": fw not running\n");
		goto bail;
	}

	/* Clear TX and CMD ring write registers */
	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);

	return (0);

bail:
	txp_dma_free(sc, &sc->sc_zero_dma);
bail_rxbufring:
	txp_dma_free(sc, &sc->sc_rxbufring_dma);
bail_rspring:
	txp_dma_free(sc, &sc->sc_rspring_dma);
bail_cmdring:
	txp_dma_free(sc, &sc->sc_cmdring_dma);
bail_rxloring:
	txp_dma_free(sc, &sc->sc_rxloring_dma);
bail_rxhiring:
	txp_dma_free(sc, &sc->sc_rxhiring_dma);
bail_txloring:
	txp_dma_free(sc, &sc->sc_txloring_dma);
bail_txhiring:
	txp_dma_free(sc, &sc->sc_txhiring_dma);
bail_host:
	txp_dma_free(sc, &sc->sc_host_dma);
bail_boot:
	txp_dma_free(sc, &sc->sc_boot_dma);
	return (-1);
}

int
txp_dma_malloc(sc, size, dma, mapflags)
	struct txp_softc *sc;
	bus_size_t size;
	struct txp_dma_alloc *dma;
	int mapflags;
{
	int r;

	if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
	    &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
		goto fail_0;

	if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
	    size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
		goto fail_1;

	if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
		goto fail_2;

	if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
	    size, NULL, BUS_DMA_NOWAIT)) != 0)
		goto fail_3;

	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
	return (0);

fail_3:
	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
fail_2:
	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
fail_1:
	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
fail_0:
	return (r);
}

void
txp_dma_free(sc, dma)
	struct txp_softc *sc;
	struct txp_dma_alloc *dma;
{
	bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
}

int
txp_ioctl(ifp, command, data)
	struct ifnet *ifp;
	u_long command;
	caddr_t data;
{
	struct txp_softc *sc = ifp->if_softc;
	struct ifreq *ifr = (struct ifreq *)data;
	struct ifaddr *ifa = (struct ifaddr *)data;
	int s, error = 0;

	s = splnet();

#if 0
	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
		splx(s);
		return error;
	}
#endif

	switch(command) {
	case SIOCSIFADDR:
		ifp->if_flags |= IFF_UP;
		switch (ifa->ifa_addr->sa_family) {
#ifdef INET
		case AF_INET:
			txp_init(sc);
			arp_ifinit(ifp, ifa);
			break;
#endif /* INET */
		default:
			txp_init(sc);
			break;
		}
		break;
	case SIOCSIFFLAGS:
		if (ifp->if_flags & IFF_UP) {
			txp_init(sc);
		} else {
			if (ifp->if_flags & IFF_RUNNING)
				txp_stop(sc);
		}
		break;
	case SIOCADDMULTI:
	case SIOCDELMULTI:
		error = (command == SIOCADDMULTI) ?
		    ether_addmulti(ifr, &sc->sc_arpcom) :
		    ether_delmulti(ifr, &sc->sc_arpcom);

		if (error == ENETRESET) {
			/*
			 * Multicast list has changed; set the hardware
			 * filter accordingly.
			 */
			txp_set_filter(sc);
			error = 0;
		}
		break;
	case SIOCGIFMEDIA:
	case SIOCSIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
		break;
	default:
		error = EINVAL;
		break;
	}

	splx(s);

	return(error);
}

void
txp_init(sc)
	struct txp_softc *sc;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	int s;

	txp_stop(sc);

	s = splnet();

	txp_set_filter(sc);

	txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
	txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);

	WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);

	ifp->if_flags |= IFF_RUNNING;
	ifp->if_flags &= ~IFF_OACTIVE;
	ifp->if_timer = 0;

	if (!callout_pending(&sc->sc_tick))
		callout_reset(&sc->sc_tick, hz, txp_tick, sc);

	splx(s);
}

void
txp_tick(vsc)
	void *vsc;
{
	struct txp_softc *sc = vsc;
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	struct txp_rsp_desc *rsp = NULL;
	struct txp_ext_desc *ext;
	int s;

	s = splnet();
	txp_rxbuf_reclaim(sc);

	if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
	    &rsp, 1))
		goto out;
	if (rsp->rsp_numdesc != 6)
		goto out;
	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
	    NULL, NULL, NULL, 1))
		goto out;
	ext = (struct txp_ext_desc *)(rsp + 1);

	ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
	    ext[4].ext_1 + ext[4].ext_4;
	ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
	    ext[2].ext_1;
	ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
	    ext[1].ext_3;
	ifp->if_opackets += rsp->rsp_par2;
	ifp->if_ipackets += ext[2].ext_3;

out:
	if (rsp != NULL)
		free(rsp, M_DEVBUF);

	splx(s);
	callout_reset(&sc->sc_tick, hz, txp_tick, sc);
}

void
txp_start(ifp)
	struct ifnet *ifp;
{
	struct txp_softc *sc = ifp->if_softc;
	struct txp_tx_ring *r = &sc->sc_txhir;
	struct txp_tx_desc *txd;
	int txdidx;
	struct txp_frag_desc *fxd;
	struct mbuf *m, *mnew;
	struct txp_swdesc *sd;
	u_int32_t firstprod, firstcnt, prod, cnt, i;
#if NVLAN > 0
	struct ifvlan		*ifv;
#endif

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	prod = r->r_prod;
	cnt = r->r_cnt;

	while (1) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;
		mnew = NULL;

		firstprod = prod;
		firstcnt = cnt;

		sd = sc->sc_txd + prod;
		sd->sd_mbuf = m;

		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
		    BUS_DMA_NOWAIT)) {
			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
			if (mnew == NULL)
				goto oactive1;
			if (m->m_pkthdr.len > MHLEN) {
				MCLGET(mnew, M_DONTWAIT);
				if ((mnew->m_flags & M_EXT) == 0) {
					m_freem(mnew);
					goto oactive1;
				}
			}
			m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t));
			mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
			IFQ_DEQUEUE(&ifp->if_snd, m);
			m_freem(m);
			m = mnew;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
			    BUS_DMA_NOWAIT))
				goto oactive1;
		}

		if ((TX_ENTRIES - cnt) < 4)
			goto oactive;

		txd = r->r_desc + prod;
		txdidx = prod;
		txd->tx_flags = TX_FLAGS_TYPE_DATA;
		txd->tx_numdesc = 0;
		txd->tx_addrlo = 0;
		txd->tx_addrhi = 0;
		txd->tx_totlen = m->m_pkthdr.len;
		txd->tx_pflags = 0;
		txd->tx_numdesc = sd->sd_map->dm_nsegs;

		if (++prod == TX_ENTRIES)
			prod = 0;

		if (++cnt >= (TX_ENTRIES - 4))
			goto oactive;

#if NVLAN > 0
		if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
		    m->m_pkthdr.rcvif != NULL) {
			ifv = m->m_pkthdr.rcvif->if_softc;
			txd->tx_pflags = TX_PFLAGS_VLAN |
			    (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S);
		}
#endif

		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
			txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
#ifdef TRY_TX_TCP_CSUM
		if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
			txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
#endif
#ifdef TRY_TX_UDP_CSUM
		if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
			txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
#endif

		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);

		fxd = (struct txp_frag_desc *)(r->r_desc + prod);
		for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
			if (++cnt >= (TX_ENTRIES - 4)) {
				bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
				    0, sd->sd_map->dm_mapsize,
				    BUS_DMASYNC_POSTWRITE);
				goto oactive;
			}

			fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
			    FRAG_FLAGS_VALID;
			fxd->frag_rsvd1 = 0;
			fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
			fxd->frag_addrlo =
			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
			    0xffffffff;
			fxd->frag_addrhi =
			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
			    32;
			fxd->frag_rsvd2 = 0;

			bus_dmamap_sync(sc->sc_dmat,
			    sc->sc_txhiring_dma.dma_map,
			    prod * sizeof(struct txp_frag_desc),
			    sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);

			if (++prod == TX_ENTRIES) {
				fxd = (struct txp_frag_desc *)r->r_desc;
				prod = 0;
			} else
				fxd++;

		}

		/*
		 * if mnew isn't NULL, we already dequeued and copied
		 * the packet.
		 */
		if (mnew == NULL)
			IFQ_DEQUEUE(&ifp->if_snd, m);

		ifp->if_timer = 5;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m);
#endif

		txd->tx_flags |= TX_FLAGS_VALID;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
		    txdidx * sizeof(struct txp_tx_desc),
		    sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);

#if 0
		{
			struct mbuf *mx;
			int i;

			printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
			    txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
			    txd->tx_pflags);
			for (mx = m; mx != NULL; mx = mx->m_next) {
				for (i = 0; i < mx->m_len; i++) {
					printf(":%02x",
					    (u_int8_t)m->m_data[i]);
				}
			}
			printf("\n");
		}
#endif

		WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
	}

	r->r_prod = prod;
	r->r_cnt = cnt;
	return;

oactive:
	bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
oactive1:
	ifp->if_flags |= IFF_OACTIVE;
	r->r_prod = firstprod;
	r->r_cnt = firstcnt;
}

/*
 * Handle simple commands sent to the typhoon
 */
int
txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait)
	struct txp_softc *sc;
	u_int16_t id, in1, *out1;
	u_int32_t in2, in3, *out2, *out3;
	int wait;
{
	struct txp_rsp_desc *rsp = NULL;

	if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
		return (-1);

	if (!wait)
		return (0);

	if (out1 != NULL)
		*out1 = le16toh(rsp->rsp_par1);
	if (out2 != NULL)
		*out2 = le32toh(rsp->rsp_par2);
	if (out3 != NULL)
		*out3 = le32toh(rsp->rsp_par3);
	free(rsp, M_DEVBUF);
	return (0);
}

int
txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait)
	struct txp_softc *sc;
	u_int16_t id, in1;
	u_int32_t in2, in3;
	struct txp_ext_desc *in_extp;
	u_int8_t in_extn;
	struct txp_rsp_desc **rspp;
	int wait;
{
	struct txp_hostvar *hv = sc->sc_hostvar;
	struct txp_cmd_desc *cmd;
	struct txp_ext_desc *ext;
	u_int32_t idx, i;
	u_int16_t seq;

	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
		printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
		return (-1);
	}

	idx = sc->sc_cmdring.lastwrite;
	cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
	bzero(cmd, sizeof(*cmd));

	cmd->cmd_numdesc = in_extn;
	seq = sc->sc_seq++;
	cmd->cmd_seq = htole16(seq);
	cmd->cmd_id = htole16(id);
	cmd->cmd_par1 = htole16(in1);
	cmd->cmd_par2 = htole32(in2);
	cmd->cmd_par3 = htole32(in3);
	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
	    (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;

	idx += sizeof(struct txp_cmd_desc);
	if (idx == sc->sc_cmdring.size)
		idx = 0;

	for (i = 0; i < in_extn; i++) {
		ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
		in_extp++;
		idx += sizeof(struct txp_cmd_desc);
		if (idx == sc->sc_cmdring.size)
			idx = 0;
	}

	sc->sc_cmdring.lastwrite = idx;

	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
	    sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);

	if (!wait)
		return (0);

	for (i = 0; i < 10000; i++) {
		bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
		    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
		idx = le32toh(hv->hv_resp_read_idx);
		if (idx != le32toh(hv->hv_resp_write_idx)) {
			*rspp = NULL;
			if (txp_response(sc, idx, id, seq, rspp))
				return (-1);
			if (*rspp != NULL)
				break;
		}
		bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
		    sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
		DELAY(50);
	}
	if (i == 1000 || (*rspp) == NULL) {
		printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
		return (-1);
	}

	return (0);
}

int
txp_response(sc, ridx, id, seq, rspp)
	struct txp_softc *sc;
	u_int32_t ridx;
	u_int16_t id;
	u_int16_t seq;
	struct txp_rsp_desc **rspp;
{
	struct txp_hostvar *hv = sc->sc_hostvar;
	struct txp_rsp_desc *rsp;

	while (ridx != le32toh(hv->hv_resp_write_idx)) {
		rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);

		if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) {
			*rspp = (struct txp_rsp_desc *)malloc(
			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
			    M_DEVBUF, M_NOWAIT);
			if ((*rspp) == NULL)
				return (-1);
			txp_rsp_fixup(sc, rsp, *rspp);
			return (0);
		}

		if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
			printf("%s: response error: id 0x%x\n",
			    TXP_DEVNAME(sc), le16toh(rsp->rsp_id));
			txp_rsp_fixup(sc, rsp, NULL);
			ridx = le32toh(hv->hv_resp_read_idx);
			continue;
		}

		switch (le16toh(rsp->rsp_id)) {
		case TXP_CMD_CYCLE_STATISTICS:
		case TXP_CMD_MEDIA_STATUS_READ:
			break;
		case TXP_CMD_HELLO_RESPONSE:
			printf("%s: hello\n", TXP_DEVNAME(sc));
			break;
		default:
			printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
			    le16toh(rsp->rsp_id));
		}

		txp_rsp_fixup(sc, rsp, NULL);
		ridx = le32toh(hv->hv_resp_read_idx);
		hv->hv_resp_read_idx = le32toh(ridx);
	}

	return (0);
}

void
txp_rsp_fixup(sc, rsp, dst)
	struct txp_softc *sc;
	struct txp_rsp_desc *rsp, *dst;
{
	struct txp_rsp_desc *src = rsp;
	struct txp_hostvar *hv = sc->sc_hostvar;
	u_int32_t i, ridx;

	ridx = le32toh(hv->hv_resp_read_idx);

	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
		if (dst != NULL)
			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
		ridx += sizeof(struct txp_rsp_desc);
		if (ridx == sc->sc_rspring.size) {
			src = sc->sc_rspring.base;
			ridx = 0;
		} else
			src++;
		sc->sc_rspring.lastwrite = ridx;
		hv->hv_resp_read_idx = htole32(ridx);
	}
	
	hv->hv_resp_read_idx = htole32(ridx);
}

int
txp_cmd_desc_numfree(sc)
	struct txp_softc *sc;
{
	struct txp_hostvar *hv = sc->sc_hostvar;
	struct txp_boot_record *br = sc->sc_boot;
	u_int32_t widx, ridx, nfree;

	widx = sc->sc_cmdring.lastwrite;
	ridx = le32toh(hv->hv_cmd_read_idx);

	if (widx == ridx) {
		/* Ring is completely free */
		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
	} else {
		if (widx > ridx)
			nfree = le32toh(br->br_cmd_siz) -
			    (widx - ridx + sizeof(struct txp_cmd_desc));
		else
			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
	}

	return (nfree / sizeof(struct txp_cmd_desc));
}

void
txp_stop(sc)
	struct txp_softc *sc;
{
	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);

	if (callout_pending(&sc->sc_tick))
		callout_stop(&sc->sc_tick);
}

void
txp_watchdog(ifp)
	struct ifnet *ifp;
{
}

int
txp_ifmedia_upd(ifp)
	struct ifnet *ifp;
{
	struct txp_softc *sc = ifp->if_softc;
	struct ifmedia *ifm = &sc->sc_ifmedia;
	u_int16_t new_xcvr;

	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
		return (EINVAL);

	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
			new_xcvr = TXP_XCVR_10_FDX;
		else
			new_xcvr = TXP_XCVR_10_HDX;
	} else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) ||
		   (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) {
		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
			new_xcvr = TXP_XCVR_100_FDX;
		else
			new_xcvr = TXP_XCVR_100_HDX;
	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
		new_xcvr = TXP_XCVR_AUTO;
	} else
		return (EINVAL);

	/* nothing to do */
	if (sc->sc_xcvr == new_xcvr)
		return (0);

	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
	    NULL, NULL, NULL, 0);
	sc->sc_xcvr = new_xcvr;

	return (0);
}

void
txp_ifmedia_sts(ifp, ifmr)
	struct ifnet *ifp;
	struct ifmediareq *ifmr;
{
	struct txp_softc *sc = ifp->if_softc;
	struct ifmedia *ifm = &sc->sc_ifmedia;
	u_int16_t bmsr, bmcr, anlpar;

	ifmr->ifm_status = IFM_AVALID;
	ifmr->ifm_active = IFM_ETHER;

	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
	    &bmsr, NULL, NULL, 1))
		goto bail;
	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
	    &bmsr, NULL, NULL, 1))
		goto bail;

	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
	    &bmcr, NULL, NULL, 1))
		goto bail;

	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
	    &anlpar, NULL, NULL, 1))
		goto bail;

	if (bmsr & BMSR_LINK)
		ifmr->ifm_status |= IFM_ACTIVE;

	if (bmcr & BMCR_ISO) {
		ifmr->ifm_active |= IFM_NONE;
		ifmr->ifm_status = 0;
		return;
	}

	if (bmcr & BMCR_LOOP)
		ifmr->ifm_active |= IFM_LOOP;

	if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) {
		if ((bmsr & BMSR_ACOMP) == 0) {
			ifmr->ifm_active |= IFM_NONE;
			return;
		}

		if (anlpar & ANLPAR_T4)
			ifmr->ifm_active |= IFM_100_T4;
		else if (anlpar & ANLPAR_TX_FD)
			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
		else if (anlpar & ANLPAR_TX)
			ifmr->ifm_active |= IFM_100_TX;
		else if (anlpar & ANLPAR_10_FD)
			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
		else if (anlpar & ANLPAR_10)
			ifmr->ifm_active |= IFM_10_T;
		else
			ifmr->ifm_active |= IFM_NONE;
	} else
		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
	return;

bail:
	ifmr->ifm_active |= IFM_NONE;
	ifmr->ifm_status &= ~IFM_AVALID;
}

void
txp_show_descriptor(d)
	void *d;
{
	struct txp_cmd_desc *cmd = d;
	struct txp_rsp_desc *rsp = d;
	struct txp_tx_desc *txd = d;
	struct txp_frag_desc *frgd = d;

	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
	case CMD_FLAGS_TYPE_CMD:
		/* command descriptor */
		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
		break;
	case CMD_FLAGS_TYPE_RESP:
		/* response descriptor */
		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
		break;
	case CMD_FLAGS_TYPE_DATA:
		/* data header (assuming tx for now) */
		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
		    txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
		    txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
		break;
	case CMD_FLAGS_TYPE_FRAG:
		/* fragment descriptor */
		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
		    frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
		    frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
		break;
	default:
		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
		break;
	}
}

void
txp_set_filter(sc)
	struct txp_softc *sc;
{
	struct ethercom *ac = &sc->sc_arpcom;
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	u_int32_t crc, carry, hashbit, hash[2];
	u_int16_t filter;
	u_int8_t octet;
	int i, j, mcnt = 0;
	struct ether_multi *enm;
	struct ether_multistep step;

	if (ifp->if_flags & IFF_PROMISC) {
		filter = TXP_RXFILT_PROMISC;
		goto setit;
	}

again:
	filter = TXP_RXFILT_DIRECT;

	if (ifp->if_flags & IFF_BROADCAST)
		filter |= TXP_RXFILT_BROADCAST;

	if (ifp->if_flags & IFF_ALLMULTI)
		filter |= TXP_RXFILT_ALLMULTI;
	else {
		hash[0] = hash[1] = 0;

		ETHER_FIRST_MULTI(step, ac, enm);
		while (enm != NULL) {
			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
				/*
				 * We must listen to a range of multicast
				 * addresses.  For now, just accept all
				 * multicasts, rather than trying to set only
				 * those filter bits needed to match the range.
				 * (At this time, the only use of address
				 * ranges is for IP multicast routing, for
				 * which the range is big enough to require
				 * all bits set.)
				 */
				ifp->if_flags |= IFF_ALLMULTI;
				goto again;
			}

			mcnt++;
			crc = 0xffffffff;

			for (i = 0; i < ETHER_ADDR_LEN; i++) {
				octet = enm->enm_addrlo[i];
				for (j = 0; j < 8; j++) {
					carry = ((crc & 0x80000000) ? 1 : 0) ^
					    (octet & 1);
					crc <<= 1;
					octet >>= 1;
					if (carry)
						crc = (crc ^ TXP_POLYNOMIAL) |
						    carry;
				}
			}
			hashbit = (u_int16_t)(crc & (64 - 1));
			hash[hashbit / 32] |= (1 << hashbit % 32);
			ETHER_NEXT_MULTI(step, enm);
		}

		if (mcnt > 0) {
			filter |= TXP_RXFILT_HASHMULTI;
			txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
			    2, hash[0], hash[1], NULL, NULL, NULL, 0);
		}
	}

setit:
	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
	    NULL, NULL, NULL, 1);
}

void
txp_capabilities(sc)
	struct txp_softc *sc;
{
	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
	struct txp_rsp_desc *rsp = NULL;
	struct txp_ext_desc *ext;

	if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
		goto out;

	if (rsp->rsp_numdesc != 1)
		goto out;
	ext = (struct txp_ext_desc *)(rsp + 1);

	sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
	sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;

#if NVLAN > 0
	ifp->if_capabilities |= IFCAP_VLAN_MTU;
	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
		sc->sc_tx_capability |= OFFLOAD_VLAN;
		sc->sc_rx_capability |= OFFLOAD_VLAN;
		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
	}
#endif

#if 0
	/* not ready yet */
	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
		sc->sc_tx_capability |= OFFLOAD_IPSEC;
		sc->sc_rx_capability |= OFFLOAD_IPSEC;
		ifp->if_capabilities |= IFCAP_IPSEC;
	}
#endif

	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
		sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
		sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
		ifp->if_capabilities |= IFCAP_CSUM_IPv4;
	}

	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
		sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
#ifdef TRY_TX_TCP_CSUM
		sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
		ifp->if_capabilities |= IFCAP_CSUM_TCPv4;
#endif
	}

	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
		sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
#ifdef TRY_TX_UDP_CSUM
		sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
		ifp->if_capabilities |= IFCAP_CSUM_UDPv4;
#endif
	}

	if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
	    sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
		goto out;

out:
	if (rsp != NULL)
		free(rsp, M_DEVBUF);
}

<if_txpreg.h>
/* $NetBSD: if_txpreg.h,v 1.2 2003/07/07 15:18:24 drochner Exp $ */

/*
 * Copyright (c) 2001 Aaron Campbell <aaron@monkey.org>.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#define	TXP_PCI_LOMEM			0x14	/* pci conf, memory map BAR */
#define	TXP_PCI_LOIO			0x10	/* pci conf, IO map BAR */

/*
 * Typhoon registers.
 */
#define	TXP_SRR				0x00	/* soft reset register */
#define	TXP_ISR				0x04	/* interrupt status register */
#define	TXP_IER				0x08	/* interrupt enable register */
#define	TXP_IMR				0x0c	/* interrupt mask register */
#define	TXP_SIR				0x10	/* self interrupt register */
#define	TXP_H2A_7			0x14	/* host->arm comm 7 */
#define	TXP_H2A_6			0x18	/* host->arm comm 6 */
#define	TXP_H2A_5			0x1c	/* host->arm comm 5 */
#define	TXP_H2A_4			0x20	/* host->arm comm 4 */
#define	TXP_H2A_3			0x24	/* host->arm comm 3 */
#define	TXP_H2A_2			0x28	/* host->arm comm 2 */
#define	TXP_H2A_1			0x2c	/* host->arm comm 1 */
#define	TXP_H2A_0			0x30	/* host->arm comm 0 */
#define	TXP_A2H_3			0x34	/* arm->host comm 3 */
#define	TXP_A2H_2			0x38	/* arm->host comm 2 */
#define	TXP_A2H_1			0x3c	/* arm->host comm 1 */
#define	TXP_A2H_0			0x40	/* arm->host comm 0 */

/*
 * interrupt bits (IMR, ISR, IER)
 */
#define	TXP_INT_RESERVED	0xffff0000
#define	TXP_INT_A2H_7		0x00008000	/* arm->host comm 7 */
#define	TXP_INT_A2H_6		0x00004000	/* arm->host comm 6 */
#define	TXP_INT_A2H_5		0x00002000	/* arm->host comm 5 */
#define	TXP_INT_A2H_4		0x00001000	/* arm->host comm 4 */
#define	TXP_INT_SELF		0x00000800	/* self interrupt */
#define	TXP_INT_PCI_TABORT	0x00000400	/* pci target abort */
#define	TXP_INT_PCI_MABORT	0x00000200	/* pci master abort */
#define	TXP_INT_DMA3		0x00000100	/* dma3 done */
#define	TXP_INT_DMA2		0x00000080	/* dma2 done */
#define	TXP_INT_DMA1		0x00000040	/* dma1 done */
#define	TXP_INT_DMA0		0x00000020	/* dma0 done */
#define	TXP_INT_A2H_3		0x00000010	/* arm->host comm 3 */
#define	TXP_INT_A2H_2		0x00000008	/* arm->host comm 2 */
#define	TXP_INT_A2H_1		0x00000004	/* arm->host comm 1 */
#define	TXP_INT_A2H_0		0x00000002	/* arm->host comm 0 */
#define	TXP_INT_LATCH		0x00000001	/* interrupt latch */

/*
 * soft reset register (SRR)
 */
#define	TXP_SRR_ALL		0x0000007f	/* full reset */

/*
 * Typhoon boot commands.
 */
#define	TXP_BOOTCMD_NULL			0x00
#define	TXP_BOOTCMD_DOWNLOAD_COMPLETE		0xfb
#define	TXP_BOOTCMD_SEGMENT_AVAILABLE		0xfc
#define	TXP_BOOTCMD_RUNTIME_IMAGE		0xfd
#define	TXP_BOOTCMD_REGISTER_BOOT_RECORD	0xff

/*
 * Typhoon runtime commands.
 */
#define	TXP_CMD_GLOBAL_RESET			0x00
#define	TXP_CMD_TX_ENABLE			0x01
#define	TXP_CMD_TX_DISABLE			0x02
#define	TXP_CMD_RX_ENABLE			0x03
#define	TXP_CMD_RX_DISABLE			0x04
#define	TXP_CMD_RX_FILTER_WRITE			0x05
#define	TXP_CMD_RX_FILTER_READ			0x06
#define	TXP_CMD_READ_STATISTICS			0x07
#define	TXP_CMD_CYCLE_STATISTICS		0x08
#define	TXP_CMD_CLEAR_STATISTICS		0x09
#define	TXP_CMD_MEMORY_READ			0x0a
#define	TXP_CMD_MEMORY_WRITE_SINGLE		0x0b
#define	TXP_CMD_VARIABLE_SECTION_READ		0x0c
#define	TXP_CMD_VARIABLE_SECTION_WRITE		0x0d
#define	TXP_CMD_STATIC_SECTION_READ		0x0e
#define	TXP_CMD_STATIC_SECTION_WRITE		0x0f
#define	TXP_CMD_IMAGE_SECTION_PROGRAM		0x10
#define	TXP_CMD_NVRAM_PAGE_READ			0x11
#define	TXP_CMD_NVRAM_PAGE_WRITE		0x12
#define	TXP_CMD_XCVR_SELECT			0x13
#define	TXP_CMD_TEST_MUX			0x14
#define	TXP_CMD_PHYLOOPBACK_ENABLE		0x15
#define	TXP_CMD_PHYLOOPBACK_DISABLE		0x16
#define	TXP_CMD_MAC_CONTROL_READ		0x17
#define	TXP_CMD_MAC_CONTROL_WRITE		0x18
#define	TXP_CMD_MAX_PKT_SIZE_READ		0x19
#define	TXP_CMD_MAX_PKT_SIZE_WRITE		0x1a
#define	TXP_CMD_MEDIA_STATUS_READ		0x1b
#define	TXP_CMD_MEDIA_STATUS_WRITE		0x1c
#define	TXP_CMD_NETWORK_DIAGS_READ		0x1d
#define	TXP_CMD_NETWORK_DIAGS_WRITE		0x1e
#define	TXP_CMD_PHY_MGMT_READ			0x1f
#define	TXP_CMD_PHY_MGMT_WRITE			0x20
#define	TXP_CMD_VARIABLE_PARAMETER_READ		0x21
#define	TXP_CMD_VARIABLE_PARAMETER_WRITE	0x22
#define	TXP_CMD_GOTO_SLEEP			0x23
#define	TXP_CMD_FIREWALL_CONTROL		0x24
#define	TXP_CMD_MCAST_HASH_MASK_WRITE		0x25
#define	TXP_CMD_STATION_ADDRESS_WRITE		0x26
#define	TXP_CMD_STATION_ADDRESS_READ		0x27
#define	TXP_CMD_STATION_MASK_WRITE		0x28
#define	TXP_CMD_STATION_MASK_READ		0x29
#define	TXP_CMD_VLAN_ETHER_TYPE_READ		0x2a
#define	TXP_CMD_VLAN_ETHER_TYPE_WRITE		0x2b
#define	TXP_CMD_VLAN_MASK_READ			0x2c
#define	TXP_CMD_VLAN_MASK_WRITE			0x2d
#define	TXP_CMD_BCAST_THROTTLE_WRITE		0x2e
#define	TXP_CMD_BCAST_THROTTLE_READ		0x2f
#define	TXP_CMD_DHCP_PREVENT_WRITE		0x30
#define	TXP_CMD_DHCP_PREVENT_READ		0x31
#define	TXP_CMD_RECV_BUFFER_CONTROL		0x32
#define	TXP_CMD_SOFTWARE_RESET			0x33
#define	TXP_CMD_CREATE_SA			0x34
#define	TXP_CMD_DELETE_SA			0x35
#define	TXP_CMD_ENABLE_RX_IP_OPTION		0x36
#define	TXP_CMD_RANDOM_NUMBER_CONTROL		0x37
#define	TXP_CMD_RANDOM_NUMBER_READ		0x38
#define	TXP_CMD_MATRIX_TABLE_MODE_WRITE		0x39
#define	TXP_CMD_MATRIX_DETAIL_READ		0x3a
#define	TXP_CMD_FILTER_ARRAY_READ		0x3b
#define	TXP_CMD_FILTER_DETAIL_READ		0x3c
#define	TXP_CMD_FILTER_TABLE_MODE_WRITE		0x3d
#define	TXP_CMD_FILTER_TCL_WRITE		0x3e
#define	TXP_CMD_FILTER_TBL_READ			0x3f
#define	TXP_CMD_VERSIONS_READ			0x43
#define	TXP_CMD_FILTER_DEFINE			0x45
#define	TXP_CMD_ADD_WAKEUP_PKT			0x46
#define	TXP_CMD_ADD_SLEEP_PKT			0x47
#define	TXP_CMD_ENABLE_SLEEP_EVENTS		0x48
#define	TXP_CMD_ENABLE_WAKEUP_EVENTS		0x49
#define	TXP_CMD_GET_IP_ADDRESS			0x4a
#define	TXP_CMD_READ_PCI_REG			0x4c
#define	TXP_CMD_WRITE_PCI_REG			0x4d
#define	TXP_CMD_OFFLOAD_READ			0x4e
#define	TXP_CMD_OFFLOAD_WRITE			0x4f
#define	TXP_CMD_HELLO_RESPONSE			0x57
#define	TXP_CMD_ENABLE_RX_FILTER		0x58
#define	TXP_CMD_RX_FILTER_CAPABILITY		0x59
#define	TXP_CMD_HALT				0x5d
#define	TXP_CMD_READ_IPSEC_INFO			0x54
#define	TXP_CMD_GET_IPSEC_ENABLE		0x67
#define	TXP_CMD_INVALID				0xffff

#define	TXP_FRAGMENT		0x0000
#define	TXP_TXFRAME		0x0001
#define	TXP_COMMAND		0x0002
#define	TXP_OPTION		0x0003
#define	TXP_RECEIVE		0x0004
#define	TXP_RESPONSE		0x0005

#define	TXP_TYPE_IPSEC		0x0000
#define	TXP_TYPE_TCPSEGMENT	0x0001

#define	TXP_PFLAG_NOCRC		0x0000
#define	TXP_PFLAG_IPCKSUM	0x0001
#define	TXP_PFLAG_TCPCKSUM	0x0002
#define	TXP_PFLAG_TCPSEGMENT	0x0004
#define	TXP_PFLAG_INSERTVLAN	0x0008
#define	TXP_PFLAG_IPSEC		0x0010
#define	TXP_PFLAG_PRIORITY	0x0020
#define	TXP_PFLAG_UDPCKSUM	0x0040
#define	TXP_PFLAG_PADFRAME	0x0080

#define	TXP_MISC_FIRSTDESC	0x0000
#define	TXP_MISC_LASTDESC	0x0001

#define	TXP_ERR_INTERNAL	0x0000
#define	TXP_ERR_FIFOUNDERRUN	0x0001
#define	TXP_ERR_BADSSD		0x0002
#define	TXP_ERR_RUNT		0x0003
#define	TXP_ERR_CRC		0x0004
#define	TXP_ERR_OVERSIZE	0x0005
#define	TXP_ERR_ALIGNMENT	0x0006
#define	TXP_ERR_DRIBBLEBIT	0x0007

#define	TXP_PROTO_UNKNOWN	0x0000
#define	TXP_PROTO_IP		0x0001
#define	TXP_PROTO_IPX		0x0002
#define	TXP_PROTO_RESERVED	0x0003

#define	TXP_STAT_PROTO		0x0001
#define	TXP_STAT_VLAN		0x0002
#define	TXP_STAT_IPFRAGMENT	0x0004
#define	TXP_STAT_IPSEC		0x0008
#define	TXP_STAT_IPCKSUMBAD	0x0010
#define	TXP_STAT_TCPCKSUMBAD	0x0020
#define	TXP_STAT_UDPCKSUMBAD	0x0040
#define	TXP_STAT_IPCKSUMGOOD	0x0080
#define	TXP_STAT_TCPCKSUMGOOD	0x0100
#define	TXP_STAT_UDPCKSUMGOOD	0x0200

struct txp_tx_desc {
	volatile u_int8_t	tx_flags;	/* type/descriptor flags */
	volatile u_int8_t	tx_numdesc;	/* number of descriptors */
	volatile u_int16_t	tx_totlen;	/* total packet length */
	volatile u_int32_t	tx_addrlo;	/* virt addr low word */
	volatile u_int32_t	tx_addrhi;	/* virt addr high word */
	volatile u_int32_t	tx_pflags;	/* processing flags */
};
#define	TX_FLAGS_TYPE_M		0x07		/* type mask */
#define	TX_FLAGS_TYPE_FRAG	0x00		/* type: fragment */
#define	TX_FLAGS_TYPE_DATA	0x01		/* type: data frame */
#define	TX_FLAGS_TYPE_CMD	0x02		/* type: command frame */
#define	TX_FLAGS_TYPE_OPT	0x03		/* type: options */
#define	TX_FLAGS_TYPE_RX	0x04		/* type: command */
#define	TX_FLAGS_TYPE_RESP	0x05		/* type: response */
#define	TX_FLAGS_RESP		0x40		/* response requested */
#define	TX_FLAGS_VALID		0x80		/* valid descriptor */

#define	TX_PFLAGS_DNAC		0x00000001	/* do not add crc */
#define	TX_PFLAGS_IPCKSUM	0x00000002	/* ip checksum */
#define	TX_PFLAGS_TCPCKSUM	0x00000004	/* tcp checksum */
#define	TX_PFLAGS_TCPSEG	0x00000008	/* tcp segmentation */
#define	TX_PFLAGS_VLAN		0x00000010	/* insert vlan */
#define	TX_PFLAGS_IPSEC		0x00000020	/* perform ipsec */
#define	TX_PFLAGS_PRIO		0x00000040	/* priority field valid */
#define	TX_PFLAGS_UDPCKSUM	0x00000080	/* udp checksum */
#define	TX_PFLAGS_PADFRAME	0x00000100	/* pad frame */
#define	TX_PFLAGS_VLANTAG_M	0x0ffff000	/* vlan tag mask */
#define	TX_PFLAGS_VLANPRI_M	0x00700000	/* vlan priority mask */
#define	TX_PFLAGS_VLANTAG_S	12		/* amount to shift tag */

struct txp_rx_desc {
	volatile u_int8_t	rx_flags;	/* type/descriptor flags */
	volatile u_int8_t	rx_numdesc;	/* number of descriptors */
	volatile u_int16_t	rx_len;		/* frame length */
	volatile u_int32_t	rx_vaddrlo;	/* virtual address, lo word */
	volatile u_int32_t	rx_vaddrhi;	/* virtual address, hi word */
	volatile u_int32_t	rx_stat;	/* status */
	volatile u_int16_t	rx_filter;	/* filter status */
	volatile u_int16_t	rx_hash;	/* hash status */
	volatile u_int32_t	rx_vlan;	/* vlan tag/priority */
};

/* txp_rx_desc.rx_flags */
#define	RX_FLAGS_TYPE_M		0x07		/* type mask */
#define	RX_FLAGS_TYPE_FRAG	0x00		/* type: fragment */
#define	RX_FLAGS_TYPE_DATA	0x01		/* type: data frame */
#define	RX_FLAGS_TYPE_CMD	0x02		/* type: command frame */
#define	RX_FLAGS_TYPE_OPT	0x03		/* type: options */
#define	RX_FLAGS_TYPE_RX	0x04		/* type: command */
#define	RX_FLAGS_TYPE_RESP	0x05		/* type: response */
#define	RX_FLAGS_RCV_TYPE_M	0x18		/* rcvtype mask */
#define	RX_FLAGS_RCV_TYPE_RX	0x00		/* rcvtype: receive */
#define	RX_FLAGS_RCV_TYPE_RSP	0x08		/* rcvtype: response */
#define	RX_FLAGS_ERROR		0x40		/* error in packet */

/* txp_rx_desc.rx_stat (if rx_flags & RX_FLAGS_ERROR bit set) */
#define	RX_ERROR_ADAPTER	0x00000000	/* adapter internal error */
#define	RX_ERROR_FIFO		0x00000001	/* fifo underrun */
#define	RX_ERROR_BADSSD		0x00000002	/* bad ssd */
#define	RX_ERROR_RUNT		0x00000003	/* runt packet */
#define	RX_ERROR_CRC		0x00000004	/* bad crc */
#define	RX_ERROR_OVERSIZE	0x00000005	/* oversized packet */
#define	RX_ERROR_ALIGN		0x00000006	/* alignment error */
#define	RX_ERROR_DRIBBLE	0x00000007	/* dribble bit */

/* txp_rx_desc.rx_stat (if rx_flags & RX_FLAGS_ERROR not bit set) */
#define	RX_STAT_PROTO_M		0x00000003	/* protocol mask */
#define	RX_STAT_PROTO_UK	0x00000000	/* unknown protocol */
#define	RX_STAT_PROTO_IPX	0x00000001	/* IPX */
#define	RX_STAT_PROTO_IP	0x00000002	/* IP */
#define	RX_STAT_PROTO_RSV	0x00000003	/* reserved */
#define	RX_STAT_VLAN		0x00000004	/* vlan tag (in rxd) */
#define	RX_STAT_IPFRAG		0x00000008	/* fragment, ipsec not done */
#define	RX_STAT_IPSEC		0x00000010	/* ipsec decoded packet */
#define	RX_STAT_IPCKSUMBAD	0x00000020	/* ip checksum failed */
#define	RX_STAT_UDPCKSUMBAD	0x00000040	/* udp checksum failed */
#define	RX_STAT_TCPCKSUMBAD	0x00000080	/* tcp checksum failed */
#define	RX_STAT_IPCKSUMGOOD	0x00000100	/* ip checksum succeeded */
#define	RX_STAT_UDPCKSUMGOOD	0x00000200	/* udp checksum succeeded */
#define	RX_STAT_TCPCKSUMGOOD	0x00000400	/* tcp checksum succeeded */


struct txp_rxbuf_desc {
	volatile u_int32_t	rb_paddrlo;
	volatile u_int32_t	rb_paddrhi;
	volatile u_int32_t	rb_vaddrlo;
	volatile u_int32_t	rb_vaddrhi;
};

/* Extension descriptor */
struct txp_ext_desc {
	volatile u_int32_t	ext_1;
	volatile u_int32_t	ext_2;
	volatile u_int32_t	ext_3;
	volatile u_int32_t	ext_4;
};

struct txp_cmd_desc {
	volatile u_int8_t	cmd_flags;
	volatile u_int8_t	cmd_numdesc;
	volatile u_int16_t	cmd_id;
	volatile u_int16_t	cmd_seq;
	volatile u_int16_t	cmd_par1;
	volatile u_int32_t	cmd_par2;
	volatile u_int32_t	cmd_par3;
};
#define	CMD_FLAGS_TYPE_M	0x07		/* type mask */
#define	CMD_FLAGS_TYPE_FRAG	0x00		/* type: fragment */
#define	CMD_FLAGS_TYPE_DATA	0x01		/* type: data frame */
#define	CMD_FLAGS_TYPE_CMD	0x02		/* type: command frame */
#define	CMD_FLAGS_TYPE_OPT	0x03		/* type: options */
#define	CMD_FLAGS_TYPE_RX	0x04		/* type: command */
#define	CMD_FLAGS_TYPE_RESP	0x05		/* type: response */
#define	CMD_FLAGS_RESP		0x40		/* response requested */
#define	CMD_FLAGS_VALID		0x80		/* valid descriptor */

struct txp_rsp_desc {
	volatile u_int8_t	rsp_flags;
	volatile u_int8_t	rsp_numdesc;
	volatile u_int16_t	rsp_id;
	volatile u_int16_t	rsp_seq;
	volatile u_int16_t	rsp_par1;
	volatile u_int32_t	rsp_par2;
	volatile u_int32_t	rsp_par3;
};
#define	RSP_FLAGS_TYPE_M	0x07		/* type mask */
#define	RSP_FLAGS_TYPE_FRAG	0x00		/* type: fragment */
#define	RSP_FLAGS_TYPE_DATA	0x01		/* type: data frame */
#define	RSP_FLAGS_TYPE_CMD	0x02		/* type: command frame */
#define	RSP_FLAGS_TYPE_OPT	0x03		/* type: options */
#define	RSP_FLAGS_TYPE_RX	0x04		/* type: command */
#define	RSP_FLAGS_TYPE_RESP	0x05		/* type: response */
#define	RSP_FLAGS_ERROR		0x40		/* response error */

struct txp_frag_desc {
	volatile u_int8_t	frag_flags;	/* type/descriptor flags */
	volatile u_int8_t	frag_rsvd1;
	volatile u_int16_t	frag_len;	/* bytes in this fragment */
	volatile u_int32_t	frag_addrlo;	/* phys addr low word */
	volatile u_int32_t	frag_addrhi;	/* phys addr high word */
	volatile u_int32_t	frag_rsvd2;
};
#define	FRAG_FLAGS_TYPE_M	0x07		/* type mask */
#define	FRAG_FLAGS_TYPE_FRAG	0x00		/* type: fragment */
#define	FRAG_FLAGS_TYPE_DATA	0x01		/* type: data frame */
#define	FRAG_FLAGS_TYPE_CMD	0x02		/* type: command frame */
#define	FRAG_FLAGS_TYPE_OPT	0x03		/* type: options */
#define	FRAG_FLAGS_TYPE_RX	0x04		/* type: command */
#define	FRAG_FLAGS_TYPE_RESP	0x05		/* type: response */
#define	FRAG_FLAGS_VALID	0x80		/* valid descriptor */

struct txp_opt_desc {
	u_int8_t		opt_desctype:3,
				opt_rsvd:1,
				opt_type:4;

	u_int8_t		opt_num;
	u_int16_t		opt_dep1;
	u_int32_t		opt_dep2;
	u_int32_t		opt_dep3;
	u_int32_t		opt_dep4;
};

struct txp_ipsec_desc {
	u_int8_t		ipsec_desctpe:3,
				ipsec_rsvd:1,
				ipsec_type:4;

	u_int8_t		ipsec_num;
	u_int16_t		ipsec_flags;
	u_int16_t		ipsec_ah1;
	u_int16_t		ipsec_esp1;
	u_int16_t		ipsec_ah2;
	u_int16_t		ipsec_esp2;
	u_int32_t		ipsec_rsvd1;
};

struct txp_tcpseg_desc {
	u_int8_t		tcpseg_desctype:3,
				tcpseg_rsvd:1,
				tcpseg_type:4;

	u_int8_t		tcpseg_num;

	u_int16_t		tcpseg_mss:12,
				tcpseg_misc:4;

	u_int32_t		tcpseg_respaddr;
	u_int32_t		tcpseg_txbytes;
	u_int32_t		tcpseg_lss;
};

/*
 * Transceiver types
 */
#define	TXP_XCVR_10_HDX		0
#define	TXP_XCVR_10_FDX		1
#define	TXP_XCVR_100_HDX	2
#define	TXP_XCVR_100_FDX	3
#define	TXP_XCVR_AUTO		4

#define TXP_MEDIA_CRC		0x0004	/* crc strip disable */
#define	TXP_MEDIA_CD		0x0010	/* collision detection */
#define	TXP_MEDIA_CS		0x0020	/* carrier sense */
#define	TXP_MEDIA_POL		0x0400	/* polarity reversed */
#define	TXP_MEDIA_NOLINK	0x0800	/* 0 = link, 1 = no link */

/*
 * receive filter bits (par1 to TXP_CMD_RX_FILTER_{READ|WRITE}
 */
#define	TXP_RXFILT_DIRECT	0x0001	/* directed packets */
#define	TXP_RXFILT_ALLMULTI	0x0002	/* all multicast packets */
#define	TXP_RXFILT_BROADCAST	0x0004	/* broadcast packets */
#define	TXP_RXFILT_PROMISC	0x0008	/* promiscuous mode */
#define	TXP_RXFILT_HASHMULTI	0x0010	/* use multicast filter */

/* multicast polynomial */
#define	TXP_POLYNOMIAL		0x04c11db7

/*
 * boot record (pointers to rings)
 */
struct txp_boot_record {
	volatile u_int32_t	br_hostvar_lo;		/* host ring pointer */
	volatile u_int32_t	br_hostvar_hi;
	volatile u_int32_t	br_txlopri_lo;		/* tx low pri ring */
	volatile u_int32_t	br_txlopri_hi;
	volatile u_int32_t	br_txlopri_siz;
	volatile u_int32_t	br_txhipri_lo;		/* tx high pri ring */
	volatile u_int32_t	br_txhipri_hi;
	volatile u_int32_t	br_txhipri_siz;
	volatile u_int32_t	br_rxlopri_lo;		/* rx low pri ring */
	volatile u_int32_t	br_rxlopri_hi;
	volatile u_int32_t	br_rxlopri_siz;
	volatile u_int32_t	br_rxbuf_lo;		/* rx buffer ring */
	volatile u_int32_t	br_rxbuf_hi;
	volatile u_int32_t	br_rxbuf_siz;
	volatile u_int32_t	br_cmd_lo;		/* command ring */
	volatile u_int32_t	br_cmd_hi;
	volatile u_int32_t	br_cmd_siz;
	volatile u_int32_t	br_resp_lo;		/* response ring */
	volatile u_int32_t	br_resp_hi;
	volatile u_int32_t	br_resp_siz;
	volatile u_int32_t	br_zero_lo;		/* zero word */
	volatile u_int32_t	br_zero_hi;
	volatile u_int32_t	br_rxhipri_lo;		/* rx high pri ring */
	volatile u_int32_t	br_rxhipri_hi;
	volatile u_int32_t	br_rxhipri_siz;
};

/*
 * hostvar structure (shared with typhoon)
 */
struct txp_hostvar {
	volatile u_int32_t	hv_rx_hi_read_idx;	/* host->arm */
	volatile u_int32_t	hv_rx_lo_read_idx;	/* host->arm */
	volatile u_int32_t	hv_rx_buf_write_idx;	/* host->arm */
	volatile u_int32_t	hv_resp_read_idx;	/* host->arm */
	volatile u_int32_t	hv_tx_lo_desc_read_idx;	/* arm->host */
	volatile u_int32_t	hv_tx_hi_desc_read_idx;	/* arm->host */
	volatile u_int32_t	hv_rx_lo_write_idx;	/* arm->host */
	volatile u_int32_t	hv_rx_buf_read_idx;	/* arm->host */
	volatile u_int32_t	hv_cmd_read_idx;	/* arm->host */
	volatile u_int32_t	hv_resp_write_idx;	/* arm->host */
	volatile u_int32_t	hv_rx_hi_write_idx;	/* arm->host */
};

/*
 * TYPHOON status register state (in TXP_A2H_0)
 */
#define	STAT_ROM_CODE			0x00000001
#define	STAT_ROM_EEPROM_LOAD		0x00000002
#define	STAT_WAITING_FOR_BOOT		0x00000007
#define	STAT_RUNNING			0x00000009
#define	STAT_WAITING_FOR_HOST_REQUEST	0x0000000d
#define	STAT_WAITING_FOR_SEGMENT	0x00000010
#define	STAT_SLEEPING			0x00000011
#define	STAT_HALTED			0x00000014

#define	TX_ENTRIES			256
#define	RX_ENTRIES			128
#define	RXBUF_ENTRIES			256
#define	CMD_ENTRIES			32
#define	RSP_ENTRIES			32

#define	OFFLOAD_TCPCKSUM		0x00000002	/* tcp checksum */
#define	OFFLOAD_UDPCKSUM		0x00000004	/* udp checksum */
#define	OFFLOAD_IPCKSUM			0x00000008	/* ip checksum */
#define	OFFLOAD_IPSEC			0x00000010	/* ipsec enable */
#define	OFFLOAD_BCAST			0x00000020	/* broadcast throttle */
#define	OFFLOAD_DHCP			0x00000040	/* dhcp prevention */
#define	OFFLOAD_VLAN			0x00000080	/* vlan enable */
#define	OFFLOAD_FILTER			0x00000100	/* filter enable */
#define	OFFLOAD_TCPSEG			0x00000200	/* tcp segmentation */
#define	OFFLOAD_MASK			0xfffffffe	/* mask off low bit */

/*
 * Macros for converting array indices to offsets within the descriptor
 * arrays.  The chip operates on offsets, but it's much easier for us
 * to operate on indices.  Assumes descriptor entries are 16 bytes.
 */
#define	TXP_IDX2OFFSET(idx)	((idx) << 4)
#define	TXP_OFFSET2IDX(off)	((off) >> 4)

struct txp_dma_alloc {
	u_int64_t		dma_paddr;
	caddr_t			dma_vaddr;
	bus_dmamap_t		dma_map;
	bus_dma_segment_t	dma_seg;
	int			dma_nseg;
};

struct txp_cmd_ring {
	struct txp_cmd_desc	*base;
	u_int32_t		lastwrite;
	u_int32_t		size;
};

struct txp_rsp_ring {
	struct txp_rsp_desc	*base;
	u_int32_t		lastwrite;
	u_int32_t		size;
};

struct txp_tx_ring {
	struct txp_tx_desc	*r_desc;	/* base address of descs */
	u_int32_t		r_reg;		/* register to activate */
	u_int32_t		r_prod;		/* producer */
	u_int32_t		r_cons;		/* consumer */
	u_int32_t		r_cnt;		/* # descs in use */
	volatile u_int32_t	*r_off;		/* hostvar index pointer */
};

struct txp_swdesc {
	struct mbuf *		sd_mbuf;
	bus_dmamap_t		sd_map;
};

struct txp_rx_ring {
	struct txp_rx_desc	*r_desc;	/* base address of descs */
	volatile u_int32_t	*r_roff;	/* hv read offset ptr */
	volatile u_int32_t	*r_woff;	/* hv write offset ptr */
};

struct txp_softc {
	struct device		sc_dev;		/* base device */
	struct ethercom		sc_arpcom;	/* ethernet common */
	struct txp_hostvar	*sc_hostvar;
	struct txp_boot_record	*sc_boot;
	bus_space_handle_t	sc_bh;		/* bus handle (regs) */
	bus_space_tag_t		sc_bt;		/* bus tag (regs) */
	bus_dma_tag_t		sc_dmat;	/* dma tag */
	struct txp_cmd_ring	sc_cmdring;
	struct txp_rsp_ring	sc_rspring;
	struct txp_swdesc	sc_txd[TX_ENTRIES];
	void *			sc_ih;
	struct callout		sc_tick;
	struct ifmedia		sc_ifmedia;
	struct txp_tx_ring	sc_txhir, sc_txlor;
	struct txp_rxbuf_desc	*sc_rxbufs;
	struct txp_rx_ring	sc_rxhir, sc_rxlor;
	u_int16_t		sc_xcvr;
	u_int16_t		sc_seq;
	struct txp_dma_alloc	sc_boot_dma, sc_host_dma, sc_zero_dma;
	struct txp_dma_alloc	sc_rxhiring_dma, sc_rxloring_dma;
	struct txp_dma_alloc	sc_txhiring_dma, sc_txloring_dma;
	struct txp_dma_alloc	sc_cmdring_dma, sc_rspring_dma;
	struct txp_dma_alloc	sc_rxbufring_dma;
	int			sc_cold;
	u_int32_t		sc_rx_capability, sc_tx_capability;
	int			sc_flags;
#define TXP_USESUBSYSTEM	0x1 /* use PCI subsys reg for detail info */
#define TXP_SERVERVERSION	0x2
#define TXP_FIBER		0x4
};

#define	TXP_DEVNAME(sc)		((sc)->sc_cold ? "" : (sc)->sc_dev.dv_xname)

struct txp_fw_file_header {
	u_int8_t	magicid[8];	/* TYPHOON\0 */
	u_int32_t	version;
	u_int32_t	nsections;
	u_int32_t	addr;
};

struct txp_fw_section_header {
	u_int32_t	nbytes;
	u_int16_t	cksum;
	u_int16_t	reserved;
	u_int32_t	addr;
};

#define	TXP_MAX_SEGLEN	0xffff
#define	TXP_MAX_PKTLEN	0x0800

#define	WRITE_REG(sc,reg,val) \
    bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, reg, val)
#define	READ_REG(sc,reg) \
    bus_space_read_4((sc)->sc_bt, (sc)->sc_bh, reg)


<txp.4>
.\"     $NetBSD: txp.4,v 1.2 2003/07/09 14:16:26 wiz Exp $
.\"
.\" Copyright (c) 2001 Jason L. Wright (jason@thought.net)
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\"    notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\"    notice, this list of conditions and the following disclaimer in the
.\"    documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
.\" DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
.\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
.\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
.\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd July 4, 2003
.Dt TXP 4
.Os
.Sh NAME
.Nm txp
.Nd 3Com 3XP Typhoon/Sidewinder (3CR990) Ethernet interface
.Sh SYNOPSIS
.Cd "txp* at pci? dev ? function ?"
.Sh DESCRIPTION
The
.Nm
interface provides access to the 10Mb/s and 100Mb/s Ethernet networks via the
.Tn 3Com
.Tn Typhoon/Sidewinder
chipset.
This driver supports the following cards:
.Pp
.Bl -bullet -offset indent -compact
.It
3Com 3CR990-TX-95
.It
3Com 3CR990-TX-97
.It
3Com 3CR990SVR95
.It
3Com 3CR990SVR97
.El
.Pp
Basic Ethernet functions are provided as well as support for
.\" .Xr vlan 4
.\" tag removal and insertion assistance,
receive
.Xr ip 4 ,
.Xr tcp 4 ,
and
.Xr udp 4
checksum offloading,
and
transmit
.Xr ip 4
checksum offloading.
There is currently no support for
transmit
.Xr tcp 4
or
.Xr udp 4
checksum offloading,
.Xr tcp 4
segmentation, nor
.Xr ipsec 4
acceleration.
Note that hardware checksumming is only used when the interface is not
in
.Xr bridge 4
mode.
.Pp
Each of the host's network addresses
is specified at boot time with an
.Dv SIOCSIFADDR
.Xr ioctl 2 .
The
.Nm
interface employs the address resolution protocol described in
.Xr arp 4
to dynamically map between Internet and Ethernet addresses on the local
network.
.Pp
When a
.Nm
interface is brought up, by default, it will attempt to auto-negotiate the
link speed and duplex mode.
The speeds, in order of attempt, are: 100Mb/s Full Duplex, 100Mb/s Half Duplex,
10 Mb/s Full Duplex, and 10 Mb/s Half Duplex.
.Pp
The
.Nm
supports several media types, which are selected via the
.Xr ifconfig 8
command.
The supported media types are:
.Bl -tag -width xxxxxxxxxxxxxx -offset indent
.It media autoselect
Attempt to autoselect the media type (default)
.It media 100baseTX mediaopt full-duplex
Use 100baseTX, full duplex
.It media 100baseTX Op mediaopt half-duplex
Use 100baseTX, half duplex
.It media 10baseT mediaopt full-duplex
Use 10baseT, full duplex
.It media 10baseT Op mediaopt half-duplex
Use 10baseT, half duplex
.El
.Sh SEE ALSO
.Xr arp 4 ,
.Xr ifmedia 4 ,
.Xr inet 4 ,
.Xr intro 4 ,
.Xr ip 4 ,
.Xr netintro 4 ,
.Xr pci 4 ,
.Xr tcp 4 ,
.Xr udp 4 ,
.Xr vlan 4 ,
.Xr ifconfig 8
.Sh HISTORY
The
.Nm
driver first appeared in
.Nx 2.0 .

<dmesg output from a working kernel>
NetBSD 1.6.1 (NFBNETBSD_TXP) #0: Fri Aug  1 08:25:29 PDT 2003
    buhrow@lothlorien.nfbcal.org:/usr/local/netbsd/src/sys/arch/i386/compile/NFBNETBSD_TXP
cpu0: Intel Pentium III (Coppermine) (686-class), 756.83 MHz
cpu0: I-cache 16 KB 32b/line 4-way, D-cache 16 KB 32b/line 2-way
cpu0: L2 cache 256 KB 32b/line 8-way
cpu0: features 383f9ff<FPU,VME,DE,PSE,TSC,MSR,PAE,MCE,CX8,SEP,MTRR>
cpu0: features 383f9ff<PGE,MCA,CMOV,FGPAT,PSE36,MMX>
cpu0: features 383f9ff<FXSR,SSE>
total memory = 254 MB
avail memory = 230 MB
using 3284 buffers containing 13136 KB of memory
BIOS32 rev. 0 found at 0xf06b0
PCI BIOS rev. 2.1 found at 0xf08b0
PCI IRQ Routing Table rev. 1.0 found at 0xf0e70, size 208 bytes (11 entries)
PCI Interrupt Router at 000:31:0 (Intel 82371FB PCI-to-ISA Bridge (PIIX))
mainbus0 (root)
pci0 at mainbus0 bus 0: configuration mode 1
pci0: i/o space, memory space enabled, rd/line, rd/mult, wr/inv ok
pchb0 at pci0 dev 0 function 0
pchb0: Intel 82810E Memory Controller Hub (rev. 0x03)
pchb0: random number generator enabled
agp0 at pchb0: aperture at 0xe4000000, size 0x4000000
vga1 at pci0 dev 1 function 0: Intel 82810E Graphics Controller (rev. 0x03)
wsdisplay0 at vga1 kbdmux 1
wsmux1: connecting to wsdisplay0
ppb0 at pci0 dev 30 function 0: Intel 82801AA Hub-to-PCI Bridge (rev. 0x02)
pci1 at ppb0 bus 1
pci1: i/o space, memory space enabled
clcs0 at pci1 dev 5 function 0: Cirrus Logic CS4280 CrystalClear Audio Interface (rev. 0x01)
clcs0: interrupting at irq 10
clcs0: CRY20 codec; headphone, 20 bit DAC, 18 bit ADC, Crystal Semi 3D
audio0 at clcs0: full duplex, independent
midi0 at clcs0: CS4280 MIDI UART
pciide1 at pci1 dev 8 function 0: Promise Ultra133/ATA Bus Master IDE Accelerator (rev. 0x02)
pciide1: bus-master DMA support present
pciide1: primary channel configured to native-PCI mode
pciide1: using irq 11 for native-PCI interrupt
wd1 at pciide1 channel 0 drive 0: <ST380021A>
wd1: drive supports 16-sector PIO transfers, LBA addressing
wd1: 76319 MB, 16383 cyl, 16 head, 63 sec, 512 bytes/sect x 156301488 sectors
wd1: 32-bit data port
wd1: drive supports PIO mode 4, DMA mode 2, Ultra-DMA mode 5 (Ultra/100)
wd1(pciide1:0:0): using PIO mode 4, Ultra-DMA mode 5 (Ultra/100) (using DMA data transfers)
pciide1: secondary channel configured to native-PCI mode
wd2 at pciide1 channel 1 drive 0: <ST380021A>
wd2: drive supports 16-sector PIO transfers, LBA addressing
wd2: 76319 MB, 16383 cyl, 16 head, 63 sec, 512 bytes/sect x 156301488 sectors
wd2: 32-bit data port
wd2: drive supports PIO mode 4, DMA mode 2, Ultra-DMA mode 5 (Ultra/100)
wd3 at pciide1 channel 1 drive 1: <ST380021A>
wd3: drive supports 16-sector PIO transfers, LBA addressing
wd3: 76319 MB, 16383 cyl, 16 head, 63 sec, 512 bytes/sect x 156301488 sectors
wd3: 32-bit data port
wd3: drive supports PIO mode 4, DMA mode 2, Ultra-DMA mode 5 (Ultra/100)
wd2(pciide1:1:0): using PIO mode 4, Ultra-DMA mode 5 (Ultra/100) (using DMA data transfers)
wd3(pciide1:1:1): using PIO mode 4, Ultra-DMA mode 5 (Ultra/100) (using DMA data transfers)
ahc1 at pci1 dev 9 function 0
ahc1: interrupting at irq 10
ahc1: aic7870 Single Channel A, SCSI Id=7, 16/255 SCBs
scsibus0 at ahc1: 8 targets, 8 luns per target
txp0 at pci1 dev 10 function 0: 3Com 3c990B 10/100 Ethernet with 3XP (SVR)
txp0: interrupting at irq 12
txp0: Ethernet address 00:10:23:d8:5e:e2
tlp0 at pci1 dev 11 function 0: Lite-On 82C169 Ethernet, pass 2.0
tlp0: interrupting at irq 9
tlp0: Ethernet address 00:a0:cc:60:b6:fd
bmtphy0 at tlp0 phy 1: BCM5201 10/100 media interface, rev. 2
bmtphy0: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto
pcib0 at pci0 dev 31 function 0
pcib0: Intel 82801AA LPC Interface Bridge (rev. 0x02)
pciide0 at pci0 dev 31 function 1: Intel 82801AA IDE Controller (ICH) (rev. 0x02)
pciide0: bus-master DMA support present
pciide0: primary channel wired to compatibility mode
wd0 at pciide0 channel 0 drive 0: <ST380021A>
wd0: drive supports 16-sector PIO transfers, LBA addressing
wd0: 76319 MB, 16383 cyl, 16 head, 63 sec, 512 bytes/sect x 156301488 sectors
wd0: 32-bit data port
wd0: drive supports PIO mode 4, DMA mode 2, Ultra-DMA mode 5 (Ultra/100)
pciide0: primary channel interrupting at irq 14
wd0(pciide0:0:0): using PIO mode 4, Ultra-DMA mode 4 (Ultra/66) (using DMA data transfers)
pciide0: secondary channel wired to compatibility mode
atapibus0 at pciide0 channel 1: 2 targets
cd0 at atapibus0 drive 0: <FX4820T, , D03D> type 5 cdrom removable
cd0: 32-bit data port
cd0: drive supports PIO mode 4, DMA mode 2, Ultra-DMA mode 2 (Ultra/33)
pciide0: secondary channel interrupting at irq 15
cd0(pciide0:1:0): using PIO mode 4, Ultra-DMA mode 2 (Ultra/33) (using DMA data transfers)
uhci0 at pci0 dev 31 function 2: Intel 82801AA USB Controller (rev. 0x02)
uhci0: interrupting at irq 9
usb0 at uhci0: USB revision 1.0
uhub0 at usb0
uhub0: Intel UHCI root hub, class 9/0, rev 1.00/1.00, addr 1
uhub0: 2 ports with 2 removable, self powered
Intel 82801AA SMBus Controller (SMBus serial bus, revision 0x02) at pci0 dev 31 function 3 not configured
isa0 at pcib0
com0 at isa0 port 0x3f8-0x3ff irq 4: ns16550a, working fifo
com0: console
com1 at isa0 port 0x2f8-0x2ff irq 3: ns16550a, working fifo
pckbc0 at isa0 port 0x60-0x64
lpt0 at isa0 port 0x378-0x37b irq 7
pcppi0 at isa0 port 0x61
midi1 at pcppi0: PC speaker
spkr0 at pcppi0
sysbeep0 at pcppi0
isapnp0 at isa0 port 0x279: ISA Plug 'n Play device support
npx0 at isa0 port 0xf0-0xff: using exception 16
fdc0 at isa0 port 0x3f0-0x3f7 irq 6 drq 2
fd0 at fdc0 drive 0: 1.44MB, 80 cyl, 2 head, 18 sec
isapnp0: no ISA Plug 'n Play devices found
apm0 at mainbus0: Power Management spec V1.2 (BIOS mgmt disabled)
APM power mgmt engage (device 1): power management disabled (0x10f)
biomask ef67 netmask ff67 ttymask ffe7
ahc1: Someone reset channel A
scsibus0: waiting 2 seconds for devices to settle...
Kernelized RAIDframe activated
RAID autoconfigure
Configuring raid0:
RAIDFRAME: protectedSectors is 64
RAIDFRAME: Configure (RAID Level 5): total number of sectors is 304213760 (148541 MB)
RAIDFRAME(RAID Level 5): Using 20 floating recon bufs with head sep limit 10
ugen0 at uhub0 port 2
ugen0: Keyspan USA-19W serial adapter, rev 1.00/80.01, addr 2
boot device: raid0
root on raid0a dumps on wd0b
root file system type: ffs
raid0: Device already configured!
Accounting started
>How-To-Repeat:
>Fix:
>Release-Note:
>Audit-Trail:
>Unformatted: