NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
kern/39094: Add et network driver (Agere ET1310/ET1301)
>Number: 39094
>Category: kern
>Synopsis: Add et (Agere ET1310/ET1301) network driver to NetBSD
>Confidential: no
>Severity: non-critical
>Priority: medium
>Responsible: kern-bug-people
>State: open
>Class: change-request
>Submitter-Id: net
>Arrival-Date: Thu Jul 03 17:30:00 +0000 2008
>Originator: Kaspar Brand
>Release: NetBSD-current
>Description:
It would be nice to see support for the Agere ET1310/ET1301
Ethernet controllers in NetBSD. A driver was originally written by
Sepherosa Ziehau for DragonFly and then ported to OpenBSD by Jonathan Gray.
I've ported it to NetBSD recently, and it works fine for me with
4.99.67 (on i386). For reference purposes, I'm also including a diff
which shows the changes compared to the versions from OpenBSD
(et_openbsd_netbsd.diff, driver files only).
Thanks for considering inclusion of this driver. If there's anything
else I can do to help with getting it imported, please let me know.
>Fix:
Import the attached patch (et.patch), which adds/touches the following
files:
sys/dev/mii/etphy.c
sys/dev/mii/miidevs
sys/dev/mii/miidevs.h
sys/dev/mii/miidevs_data.h
sys/dev/mii/files.mii
sys/dev/pci/if_et.c
sys/dev/pci/if_etreg.h
sys/dev/pci/pcidevs
sys/dev/pci/pcidevs.h
sys/dev/pci/pcidevs_data.h
sys/dev/pci/files.pci
share/man/man4/et.4
share/man/man4/etphy.4
--------------020302010700040001020308
Content-Type: text/plain;
name="et.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
filename="et.patch"
Index: sys/dev/mii/etphy.c
===================================================================
RCS file: sys/dev/mii/etphy.c
diff -N sys/dev/mii/etphy.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/dev/mii/etphy.c 3 Jul 2008 16:34:02 -0000
@@ -0,0 +1,349 @@
+/* $NetBSD$ */
+/* $OpenBSD: etphy.c,v 1.4 2008/04/02 20:12:58 brad Exp $ */
+
+/*
+ * Copyright (c) 2007 The DragonFly Project. All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Sepherosa Ziehau <sepherosa%gmail.com@localhost>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of The DragonFly Project nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific, prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $DragonFly: src/sys/dev/netif/mii_layer/truephy.c,v 1.1 2007/10/12 14:12:42
sephe Exp $
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include <dev/mii/miidevs.h>
+
+#define ETPHY_INDEX 0x10 /* XXX reserved in DS */
+#define ETPHY_INDEX_MAGIC 0x402
+#define ETPHY_DATA 0x11 /* XXX reserved in DS */
+
+#define ETPHY_CTRL 0x12
+#define ETPHY_CTRL_DIAG 0x0004
+#define ETPHY_CTRL_RSV1 0x0002 /* XXX reserved */
+#define ETPHY_CTRL_RSV0 0x0001 /* XXX reserved */
+
+#define ETPHY_CONF 0x16
+#define ETPHY_CONF_TXFIFO_MASK 0x3000
+#define ETPHY_CONF_TXFIFO_8 0x0000
+#define ETPHY_CONF_TXFIFO_16 0x1000
+#define ETPHY_CONF_TXFIFO_24 0x2000
+#define ETPHY_CONF_TXFIFO_32 0x3000
+
+#define ETPHY_SR 0x1a
+#define ETPHY_SR_SPD_MASK 0x0300
+#define ETPHY_SR_SPD_1000T 0x0200
+#define ETPHY_SR_SPD_100TX 0x0100
+#define ETPHY_SR_SPD_10T 0x0000
+#define ETPHY_SR_FDX 0x0080
+
+
+int etphy_service(struct mii_softc *, struct mii_data *, int);
+void etphy_attach(device_t, device_t, void *);
+int etphy_match(device_t, cfdata_t, void *);
+void etphy_reset(struct mii_softc *);
+void etphy_status(struct mii_softc *);
+
+const struct mii_phy_funcs etphy_funcs = {
+ etphy_service, etphy_status, etphy_reset,
+};
+
+static const struct mii_phydesc etphys[] = {
+ { MII_OUI_AGERE, MII_MODEL_AGERE_ET1011,
+ MII_STR_AGERE_ET1011 },
+ { 0, 0,
+ NULL },
+};
+
+CFATTACH_DECL_NEW(etphy, sizeof(struct mii_softc),
+ etphy_match, etphy_attach, mii_phy_detach, mii_phy_activate);
+
+static const struct etphy_dsp {
+ uint16_t index;
+ uint16_t data;
+} etphy_dspcode[] = {
+ { 0x880b, 0x0926 }, /* AfeIfCreg4B1000Msbs */
+ { 0x880c, 0x0926 }, /* AfeIfCreg4B100Msbs */
+ { 0x880d, 0x0926 }, /* AfeIfCreg4B10Msbs */
+
+ { 0x880e, 0xb4d3 }, /* AfeIfCreg4B1000Lsbs */
+ { 0x880f, 0xb4d3 }, /* AfeIfCreg4B100Lsbs */
+ { 0x8810, 0xb4d3 }, /* AfeIfCreg4B10Lsbs */
+
+ { 0x8805, 0xb03e }, /* AfeIfCreg3B1000Msbs */
+ { 0x8806, 0xb03e }, /* AfeIfCreg3B100Msbs */
+ { 0x8807, 0xff00 }, /* AfeIfCreg3B10Msbs */
+
+ { 0x8808, 0xe090 }, /* AfeIfCreg3B1000Lsbs */
+ { 0x8809, 0xe110 }, /* AfeIfCreg3B100Lsbs */
+ { 0x880a, 0x0000 }, /* AfeIfCreg3B10Lsbs */
+
+ { 0x300d, 1 }, /* DisableNorm */
+
+ { 0x280c, 0x0180 }, /* LinkHoldEnd */
+
+ { 0x1c21, 0x0002 }, /* AlphaM */
+
+ { 0x3821, 6 }, /* FfeLkgTx0 */
+ { 0x381d, 1 }, /* FfeLkg1g4 */
+ { 0x381e, 1 }, /* FfeLkg1g5 */
+ { 0x381f, 1 }, /* FfeLkg1g6 */
+ { 0x3820, 1 }, /* FfeLkg1g7 */
+
+ { 0x8402, 0x01f0 }, /* Btinact */
+ { 0x800e, 20 }, /* LftrainTime */
+ { 0x800f, 24 }, /* DvguardTime */
+ { 0x8010, 46 } /* IdlguardTime */
+};
+
+int
+etphy_match(device_t parent, cfdata_t match, void *aux)
+{
+ struct mii_attach_args *ma = aux;
+
+ if (mii_phy_match(ma, etphys) != NULL)
+ return 10;
+
+ return 0;
+}
+
+void
+etphy_attach(device_t parent, device_t self, void *aux)
+{
+ struct mii_softc *sc = device_private(self);
+ struct mii_attach_args *ma = aux;
+ struct mii_data *mii = ma->mii_data;
+ const struct mii_phydesc *mpd;
+
+ mpd = mii_phy_match(ma, etphys);
+ aprint_normal(": %s, rev. %d\n", mpd->mpd_name, MII_REV(ma->mii_id2));
+
+ sc->mii_dev = self;
+ sc->mii_inst = mii->mii_instance;
+ sc->mii_phy = ma->mii_phyno;
+ sc->mii_funcs = &etphy_funcs;
+ sc->mii_mpd_model = MII_MODEL(ma->mii_id2);
+ sc->mii_pdata = mii;
+ sc->mii_flags = ma->mii_flags;
+
+ sc->mii_flags |= MIIF_NOISOLATE | MIIF_NOLOOP;
+
+ PHY_RESET(sc);
+
+ sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & ma->mii_capmask;
+ if (sc->mii_capabilities & BMSR_EXTSTAT) {
+ sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR);
+ /* No 1000baseT half-duplex support */
+ sc->mii_extcapabilities &= ~EXTSR_1000THDX;
+ }
+ aprint_normal_dev(self, "");
+ if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0)
+ aprint_error("no media present");
+ else
+ mii_phy_add_media(sc);
+ aprint_normal("\n");
+
+ if (!pmf_device_register(self, NULL, mii_phy_resume))
+ aprint_error_dev(self, "couldn't establish power handler\n");
+}
+
+int
+etphy_service(struct mii_softc *sc, struct mii_data *mii, int cmd)
+{
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+ int bmcr;
+
+ switch (cmd) {
+ case MII_POLLSTAT:
+ /*
+ * If we're not polling our PHY instance, just return.
+ */
+ if (IFM_INST(ife->ifm_media) != sc->mii_inst)
+ return 0;
+ break;
+
+ case MII_MEDIACHG:
+ /*
+ * If the media indicates a different PHY instance,
+ * isolate ourselves.
+ */
+ if (IFM_INST(ife->ifm_media) != sc->mii_inst) {
+ bmcr = PHY_READ(sc, MII_BMCR);
+ PHY_WRITE(sc, MII_BMCR, bmcr | BMCR_ISO);
+ return 0;
+ }
+
+ /*
+ * If the interface is not up, don't do anything.
+ */
+ if ((mii->mii_ifp->if_flags & IFF_UP) == 0)
+ break;
+
+ if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
+ bmcr = PHY_READ(sc, MII_BMCR) & ~BMCR_AUTOEN;
+ PHY_WRITE(sc, MII_BMCR, bmcr);
+ PHY_WRITE(sc, MII_BMCR, bmcr | BMCR_PDOWN);
+ }
+
+ mii_phy_setmedia(sc);
+
+ if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
+ bmcr = PHY_READ(sc, MII_BMCR) & ~BMCR_PDOWN;
+ PHY_WRITE(sc, MII_BMCR, bmcr);
+
+ if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T) {
+ PHY_WRITE(sc, MII_BMCR,
+ bmcr | BMCR_AUTOEN | BMCR_STARTNEG);
+ }
+ }
+ break;
+
+ case MII_TICK:
+ /*
+ * If we're not currently selected, just return.
+ */
+ if (IFM_INST(ife->ifm_media) != sc->mii_inst)
+ return 0;
+
+ if (mii_phy_tick(sc) == EJUSTRETURN)
+ return 0;
+ break;
+ }
+
+ /* Update the media status. */
+ mii_phy_status(sc);
+
+ /* Callback if something changed. */
+ mii_phy_update(sc, cmd);
+ return 0;
+}
+
+void
+etphy_reset(struct mii_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ PHY_READ(sc, MII_PHYIDR1);
+ PHY_READ(sc, MII_PHYIDR2);
+
+ PHY_READ(sc, ETPHY_CTRL);
+ PHY_WRITE(sc, ETPHY_CTRL,
+ ETPHY_CTRL_DIAG | ETPHY_CTRL_RSV1);
+
+ PHY_WRITE(sc, ETPHY_INDEX, ETPHY_INDEX_MAGIC);
+ PHY_READ(sc, ETPHY_DATA);
+
+ PHY_WRITE(sc, ETPHY_CTRL, ETPHY_CTRL_RSV1);
+ }
+
+ PHY_READ(sc, MII_BMCR);
+ PHY_READ(sc, ETPHY_CTRL);
+ PHY_WRITE(sc, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN | BMCR_S1000);
+ PHY_WRITE(sc, ETPHY_CTRL,
+ ETPHY_CTRL_DIAG | ETPHY_CTRL_RSV1 | ETPHY_CTRL_RSV0);
+
+#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
+
+ for (i = 0; i < N(etphy_dspcode); ++i) {
+ const struct etphy_dsp *dsp = &etphy_dspcode[i];
+
+ PHY_WRITE(sc, ETPHY_INDEX, dsp->index);
+ PHY_WRITE(sc, ETPHY_DATA, dsp->data);
+
+ PHY_WRITE(sc, ETPHY_INDEX, dsp->index);
+ PHY_READ(sc, ETPHY_DATA);
+ }
+
+#undef N
+
+ PHY_READ(sc, MII_BMCR);
+ PHY_READ(sc, ETPHY_CTRL);
+ PHY_WRITE(sc, MII_BMCR, BMCR_AUTOEN | BMCR_S1000);
+ PHY_WRITE(sc, ETPHY_CTRL, ETPHY_CTRL_RSV1);
+
+ mii_phy_reset(sc);
+}
+
+void
+etphy_status(struct mii_softc *sc)
+{
+ struct mii_data *mii = sc->mii_pdata;
+ int bmsr, bmcr, sr;
+
+ mii->mii_media_status = IFM_AVALID;
+ mii->mii_media_active = IFM_ETHER;
+
+ sr = PHY_READ(sc, ETPHY_SR);
+ bmcr = PHY_READ(sc, MII_BMCR);
+
+ bmsr = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR);
+ if (bmsr & BMSR_LINK)
+ mii->mii_media_status |= IFM_ACTIVE;
+
+ if (bmcr & BMCR_AUTOEN) {
+ if ((bmsr & BMSR_ACOMP) == 0) {
+ mii->mii_media_active |= IFM_NONE;
+ return;
+ }
+ }
+
+ switch (sr & ETPHY_SR_SPD_MASK) {
+ case ETPHY_SR_SPD_1000T:
+ mii->mii_media_active |= IFM_1000_T;
+ break;
+ case ETPHY_SR_SPD_100TX:
+ mii->mii_media_active |= IFM_100_TX;
+ break;
+ case ETPHY_SR_SPD_10T:
+ mii->mii_media_active |= IFM_10_T;
+ break;
+ default:
+ mii->mii_media_active |= IFM_NONE;
+ return;
+ }
+
+ if (sr & ETPHY_SR_FDX)
+ mii->mii_media_active |= IFM_FDX;
+ else
+ mii->mii_media_active |= IFM_HDX;
+}
Index: sys/dev/mii/miidevs
===================================================================
RCS file: /cvsroot/src/sys/dev/mii/miidevs,v
retrieving revision 1.76
diff -u -p -r1.76 miidevs
--- sys/dev/mii/miidevs 28 Apr 2008 20:23:53 -0000 1.76
+++ sys/dev/mii/miidevs 3 Jul 2008 16:34:02 -0000
@@ -45,6 +45,7 @@ $NetBSD: miidevs,v 1.76 2008/04/28 20:23
* which is mangled accordingly to compensate.
*/
+oui AGERE 0x00053d Agere
oui ALTIMA 0x0010a9 Altima Communications
oui AMD 0x00001a Advanced Micro Devices
oui BROADCOM 0x001018 Broadcom Corporation
@@ -99,6 +100,9 @@ oui yyREALTEK 0x000004 Realtek
* List of known models. Grouped by oui.
*/
+/* Agere PHYs */
+model AGERE ET1011 0x0004 Agere ET1011 10/100/1000baseT PHY
+
/* Altima Communications PHYs */
/* Don't know the model for ACXXX */
model ALTIMA ACXXX 0x0001 ACXXX 10/100 media interface
Index: sys/dev/mii/miidevs.h
===================================================================
RCS file: /cvsroot/src/sys/dev/mii/miidevs.h,v
retrieving revision 1.79
diff -u -p -r1.79 miidevs.h
--- sys/dev/mii/miidevs.h 28 Apr 2008 20:23:53 -0000 1.79
+++ sys/dev/mii/miidevs.h 3 Jul 2008 16:34:02 -0000
@@ -1,10 +1,10 @@
-/* $NetBSD: miidevs.h,v 1.79 2008/04/28 20:23:53 martin Exp $ */
+/* $NetBSD$ */
/*
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
- * NetBSD: miidevs,v 1.75 2008/02/20 11:17:41 markd Exp
+ * NetBSD: miidevs,v 1.76 2008/04/28 20:23:53 martin Exp
*/
/*-
@@ -52,6 +52,7 @@
* which is mangled accordingly to compensate.
*/
+#define MII_OUI_AGERE 0x00053d /* Agere */
#define MII_OUI_ALTIMA 0x0010a9 /* Altima Communications */
#define MII_OUI_AMD 0x00001a /* Advanced Micro Devices */
#define MII_OUI_BROADCOM 0x001018 /* Broadcom Corporation
*/
@@ -106,6 +107,10 @@
* List of known models. Grouped by oui.
*/
+/* Agere PHYs */
+#define MII_MODEL_AGERE_ET1011 0x0004
+#define MII_STR_AGERE_ET1011 "Agere ET1011 10/100/1000baseT PHY"
+
/* Altima Communications PHYs */
/* Don't know the model for ACXXX */
#define MII_MODEL_ALTIMA_ACXXX 0x0001
Index: sys/dev/mii/miidevs_data.h
===================================================================
RCS file: /cvsroot/src/sys/dev/mii/miidevs_data.h,v
retrieving revision 1.69
diff -u -p -r1.69 miidevs_data.h
--- sys/dev/mii/miidevs_data.h 28 Apr 2008 20:23:53 -0000 1.69
+++ sys/dev/mii/miidevs_data.h 3 Jul 2008 16:34:02 -0000
@@ -1,10 +1,10 @@
-/* $NetBSD: miidevs_data.h,v 1.69 2008/04/28 20:23:53 martin Exp $ */
+/* $NetBSD$ */
/*
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
- * NetBSD: miidevs,v 1.75 2008/02/20 11:17:41 markd Exp
+ * NetBSD: miidevs,v 1.76 2008/04/28 20:23:53 martin Exp
*/
/*-
@@ -37,6 +37,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
struct mii_knowndev mii_knowndevs[] = {
+ { MII_OUI_AGERE, MII_MODEL_AGERE_ET1011, MII_STR_AGERE_ET1011 },
{ MII_OUI_ALTIMA, MII_MODEL_ALTIMA_ACXXX, MII_STR_ALTIMA_ACXXX },
{ MII_OUI_ALTIMA, MII_MODEL_ALTIMA_AC101, MII_STR_ALTIMA_AC101 },
{ MII_OUI_ALTIMA, MII_MODEL_ALTIMA_AC101L, MII_STR_ALTIMA_AC101L },
Index: sys/dev/mii/files.mii
===================================================================
RCS file: /cvsroot/src/sys/dev/mii/files.mii,v
retrieving revision 1.41
diff -u -p -r1.41 files.mii
--- sys/dev/mii/files.mii 2 Jan 2008 00:41:07 -0000 1.41
+++ sys/dev/mii/files.mii 3 Jul 2008 16:34:02 -0000
@@ -131,3 +131,7 @@ file dev/mii/rlphy.c rlphy
device mvphy: mii_phy
attach mvphy at mii
file dev/mii/mvphy.c mvphy
+
+device etphy: mii_phy
+attach etphy at mii
+file dev/mii/etphy.c etphy
Index: sys/dev/pci/if_et.c
===================================================================
RCS file: sys/dev/pci/if_et.c
diff -N sys/dev/pci/if_et.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/dev/pci/if_et.c 3 Jul 2008 16:34:02 -0000
@@ -0,0 +1,2132 @@
+/* $NetBSD$ */
+/* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */
+/*
+ * Copyright (c) 2007 The DragonFly Project. All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Sepherosa Ziehau <sepherosa%gmail.com@localhost>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of The DragonFly Project nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific, prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe
Exp $
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD$");
+
+#include "opt_inet.h"
+#include "bpfilter.h"
+#include "vlan.h"
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/callout.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_ether.h>
+#include <net/if_arp.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_inarp.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/pci/if_etreg.h>
+
+/* XXX temporary porting goop */
+#define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond,
__func__)
+#undef KASSERT
+#define KASSERT(cond, complaint) if (!(cond)) panic complaint
+
+/* these macros in particular need to die, so gross */
+#define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
+#define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
+#define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
+/* XXX end porting goop */
+
+int et_match(device_t, cfdata_t, void *);
+void et_attach(device_t, device_t, void *);
+int et_detach(device_t, int flags);
+int et_shutdown(device_t);
+
+int et_miibus_readreg(device_t, int, int);
+void et_miibus_writereg(device_t, int, int, int);
+void et_miibus_statchg(device_t);
+
+int et_init(struct ifnet *ifp);
+int et_ioctl(struct ifnet *, u_long, void *);
+void et_start(struct ifnet *);
+void et_watchdog(struct ifnet *);
+
+int et_intr(void *);
+void et_enable_intrs(struct et_softc *, uint32_t);
+void et_disable_intrs(struct et_softc *);
+void et_rxeof(struct et_softc *);
+void et_txeof(struct et_softc *);
+void et_txtick(void *);
+
+int et_dma_alloc(struct et_softc *);
+void et_dma_free(struct et_softc *);
+int et_dma_mem_create(struct et_softc *, bus_size_t,
+ void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
+void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
+int et_dma_mbuf_create(struct et_softc *);
+void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
+void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
+
+int et_init_tx_ring(struct et_softc *);
+int et_init_rx_ring(struct et_softc *);
+void et_free_tx_ring(struct et_softc *);
+void et_free_rx_ring(struct et_softc *);
+int et_encap(struct et_softc *, struct mbuf **);
+int et_newbuf(struct et_rxbuf_data *, int, int, int);
+int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
+int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
+
+void et_stop(struct et_softc *);
+int et_chip_init(struct et_softc *);
+void et_chip_attach(struct et_softc *);
+void et_init_mac(struct et_softc *);
+void et_init_rxmac(struct et_softc *);
+void et_init_txmac(struct et_softc *);
+int et_init_rxdma(struct et_softc *);
+int et_init_txdma(struct et_softc *);
+int et_start_rxdma(struct et_softc *);
+int et_start_txdma(struct et_softc *);
+int et_stop_rxdma(struct et_softc *);
+int et_stop_txdma(struct et_softc *);
+int et_enable_txrx(struct et_softc *);
+void et_reset(struct et_softc *);
+int et_bus_config(struct et_softc *);
+void et_get_eaddr(struct et_softc *, uint8_t[]);
+void et_setmulti(struct et_softc *);
+void et_tick(void *);
+
+static int et_rx_intr_npkts = 32;
+static int et_rx_intr_delay = 20; /* x10 usec */
+static int et_tx_intr_nsegs = 128;
+static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
+
+struct et_bsize {
+ int bufsize;
+ et_newbuf_t newbuf;
+};
+
+static const struct et_bsize et_bufsize[ET_RX_NRING] = {
+ { .bufsize = 0, .newbuf = et_newbuf_hdr },
+ { .bufsize = 0, .newbuf = et_newbuf_cluster },
+};
+
+const struct et_product {
+ pci_vendor_id_t vendor;
+ pci_product_id_t product;
+} et_devices[] = {
+ { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 },
+ { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 }
+};
+
+CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
+ NULL);
+
+int
+et_match(device_t dev, cfdata_t match, void *aux)
+{
+ struct pci_attach_args *pa = aux;
+ const struct et_product *ep;
+ int i;
+
+ for (i = 0; i < sizeof(et_devices) / sizeof(et_devices[0]); i++) {
+ ep = &et_devices[i];
+ if (PCI_VENDOR(pa->pa_id) == ep->vendor &&
+ PCI_PRODUCT(pa->pa_id) == ep->product)
+ return 1;
+ }
+ return 0;
+}
+
+void
+et_attach(device_t parent, device_t self, void *aux)
+{
+ struct et_softc *sc = device_private(self);
+ struct pci_attach_args *pa = aux;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ pci_intr_handle_t ih;
+ const char *intrstr;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ pcireg_t memtype;
+ int error;
+ char devinfo[256];
+
+ aprint_naive(": Ethernet controller\n");
+
+ sc->sc_dev = self;
+ pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
+ aprint_normal(": %s (rev. 0x%02x)\n", devinfo,
PCI_REVISION(pa->pa_class));
+
+ /*
+ * Initialize tunables
+ */
+ sc->sc_rx_intr_npkts = et_rx_intr_npkts;
+ sc->sc_rx_intr_delay = et_rx_intr_delay;
+ sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
+ sc->sc_timer = et_timer;
+
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
+ if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
+ &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
+ aprint_error_dev(self, "could not map mem space\n");
+ return;
+ }
+
+ if (pci_intr_map(pa, &ih) != 0) {
+ aprint_error_dev(self, "could not map interrupt\n");
+ return;
+ }
+
+ intrstr = pci_intr_string(pc, ih);
+ sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc);
+ if (sc->sc_irq_handle == NULL) {
+ aprint_error_dev(self, "could not establish interrupt");
+ if (intrstr != NULL)
+ aprint_error(" at %s", intrstr);
+ aprint_error("\n");
+ return;
+ }
+ aprint_normal_dev(self, "interrupting at %s\n", intrstr);
+
+ sc->sc_dmat = pa->pa_dmat;
+ sc->sc_pct = pa->pa_pc;
+ sc->sc_pcitag = pa->pa_tag;
+
+ error = et_bus_config(sc);
+ if (error)
+ return;
+
+ et_get_eaddr(sc, sc->sc_enaddr);
+
+ aprint_normal_dev(self, "Ethernet address %s\n",
+ ether_sprintf(sc->sc_enaddr));
+
+ CSR_WRITE_4(sc, ET_PM,
+ ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
+
+ et_reset(sc);
+
+ et_disable_intrs(sc);
+
+ error = et_dma_alloc(sc);
+ if (error)
+ return;
+
+ ifp->if_softc = sc;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_init = et_init;
+ ifp->if_ioctl = et_ioctl;
+ ifp->if_start = et_start;
+ ifp->if_watchdog = et_watchdog;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
+ IFQ_SET_READY(&ifp->if_snd);
+ strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
+
+ et_chip_attach(sc);
+
+ sc->sc_miibus.mii_ifp = ifp;
+ sc->sc_miibus.mii_readreg = et_miibus_readreg;
+ sc->sc_miibus.mii_writereg = et_miibus_writereg;
+ sc->sc_miibus.mii_statchg = et_miibus_statchg;
+
+ sc->sc_ethercom.ec_mii = &sc->sc_miibus;
+ ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange,
+ ether_mediastatus);
+ mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
+ MII_OFFSET_ANY, 0);
+ if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
+ aprint_error_dev(self, "no PHY found!\n");
+ ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
+ 0, NULL);
+ ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
+ } else
+ ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
+
+ if_attach(ifp);
+ ether_ifattach(ifp, sc->sc_enaddr);
+
+ callout_init(&sc->sc_tick, 0);
+ callout_setfunc(&sc->sc_tick, et_tick, sc);
+ callout_init(&sc->sc_txtick, 0);
+ callout_setfunc(&sc->sc_txtick, et_txtick, sc);
+}
+
+int
+et_detach(device_t self, int flags)
+{
+ struct et_softc *sc = device_private(self);
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ int s;
+
+ s = splnet();
+ et_stop(sc);
+ splx(s);
+
+ mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
+
+ /* Delete all remaining media. */
+ ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
+
+ ether_ifdetach(ifp);
+ if_detach(ifp);
+ et_dma_free(sc);
+
+ if (sc->sc_irq_handle != NULL) {
+ pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
+ sc->sc_irq_handle = NULL;
+ }
+
+ bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
+
+ return 0;
+}
+
+int
+et_shutdown(device_t self)
+{
+ struct et_softc *sc = device_private(self);
+ int s;
+
+ s = splnet();
+ et_stop(sc);
+ splx(s);
+
+ return 0;
+}
+
+int
+et_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct et_softc *sc = device_private(dev);
+ uint32_t val;
+ int i, ret;
+
+ /* Stop any pending operations */
+ CSR_WRITE_4(sc, ET_MII_CMD, 0);
+
+ val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
+ __SHIFTIN(reg, ET_MII_ADDR_REG);
+ CSR_WRITE_4(sc, ET_MII_ADDR, val);
+
+ /* Start reading */
+ CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
+
+#define NRETRY 50
+
+ for (i = 0; i < NRETRY; ++i) {
+ val = CSR_READ_4(sc, ET_MII_IND);
+ if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
+ break;
+ DELAY(50);
+ }
+ if (i == NRETRY) {
+ aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
+ phy, reg);
+ ret = 0;
+ goto back;
+ }
+
+#undef NRETRY
+
+ val = CSR_READ_4(sc, ET_MII_STAT);
+ ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
+
+back:
+ /* Make sure that the current operation is stopped */
+ CSR_WRITE_4(sc, ET_MII_CMD, 0);
+ return ret;
+}
+
+void
+et_miibus_writereg(device_t dev, int phy, int reg, int val0)
+{
+ struct et_softc *sc = device_private(dev);
+ uint32_t val;
+ int i;
+
+ /* Stop any pending operations */
+ CSR_WRITE_4(sc, ET_MII_CMD, 0);
+
+ val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
+ __SHIFTIN(reg, ET_MII_ADDR_REG);
+ CSR_WRITE_4(sc, ET_MII_ADDR, val);
+
+ /* Start writing */
+ CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
+
+#define NRETRY 100
+
+ for (i = 0; i < NRETRY; ++i) {
+ val = CSR_READ_4(sc, ET_MII_IND);
+ if ((val & ET_MII_IND_BUSY) == 0)
+ break;
+ DELAY(50);
+ }
+ if (i == NRETRY) {
+ aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
+ phy, reg);
+ et_miibus_readreg(dev, phy, reg);
+ }
+
+#undef NRETRY
+
+ /* Make sure that the current operation is stopped */
+ CSR_WRITE_4(sc, ET_MII_CMD, 0);
+}
+
+void
+et_miibus_statchg(device_t dev)
+{
+ struct et_softc *sc = device_private(dev);
+ struct mii_data *mii = &sc->sc_miibus;
+ uint32_t cfg2, ctrl;
+
+ cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
+ cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
+ ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
+ cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
+ __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
+
+ ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
+ ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
+
+ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
+ cfg2 |= ET_MAC_CFG2_MODE_GMII;
+ } else {
+ cfg2 |= ET_MAC_CFG2_MODE_MII;
+ ctrl |= ET_MAC_CTRL_MODE_MII;
+ }
+
+ if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
+ cfg2 |= ET_MAC_CFG2_FDX;
+ else
+ ctrl |= ET_MAC_CTRL_GHDX;
+
+ CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
+ CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
+}
+
+void
+et_stop(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+
+ callout_stop(&sc->sc_tick);
+ callout_stop(&sc->sc_txtick);
+
+ et_stop_rxdma(sc);
+ et_stop_txdma(sc);
+
+ et_disable_intrs(sc);
+
+ et_free_tx_ring(sc);
+ et_free_rx_ring(sc);
+
+ et_reset(sc);
+
+ sc->sc_tx = 0;
+ sc->sc_tx_intr = 0;
+
+ ifp->if_timer = 0;
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+}
+
+int
+et_bus_config(struct et_softc *sc)
+{
+ uint32_t val; //, max_plsz;
+// uint16_t ack_latency, replay_timer;
+
+ /*
+ * Test whether EEPROM is valid
+ * NOTE: Read twice to get the correct value
+ */
+ pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
+ val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
+
+ if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
+ aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n",
val);
+ return ENXIO;
+ }
+
+ /* TODO: LED */
+#if 0
+ /*
+ * Configure ACK latency and replay timer according to
+ * max playload size
+ */
+ val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
+ max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
+
+ switch (max_plsz) {
+ case ET_PCIV_DEVICE_CAPS_PLSZ_128:
+ ack_latency = ET_PCIV_ACK_LATENCY_128;
+ replay_timer = ET_PCIV_REPLAY_TIMER_128;
+ break;
+
+ case ET_PCIV_DEVICE_CAPS_PLSZ_256:
+ ack_latency = ET_PCIV_ACK_LATENCY_256;
+ replay_timer = ET_PCIV_REPLAY_TIMER_256;
+ break;
+
+ default:
+ ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ ET_PCIR_ACK_LATENCY) >> 16;
+ replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ ET_PCIR_REPLAY_TIMER) >> 16;
+ aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer
%u\n",
+ ack_latency, replay_timer);
+ break;
+ }
+ if (ack_latency != 0) {
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag,
+ ET_PCIR_ACK_LATENCY, ack_latency << 16);
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag,
+ ET_PCIR_REPLAY_TIMER, replay_timer << 16);
+ }
+
+ /*
+ * Set L0s and L1 latency timer to 2us
+ */
+ val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
+ val << 24);
+
+ /*
+ * Set max read request size to 2048 bytes
+ */
+ val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ ET_PCIR_DEVICE_CTRL) >> 16;
+ val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
+ val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
+ val << 16);
+#endif
+
+ return 0;
+}
+
+void
+et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
+{
+ uint32_t r;
+
+ r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
+ eaddr[0] = r & 0xff;
+ eaddr[1] = (r >> 8) & 0xff;
+ eaddr[2] = (r >> 16) & 0xff;
+ eaddr[3] = (r >> 24) & 0xff;
+ r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
+ eaddr[4] = r & 0xff;
+ eaddr[5] = (r >> 8) & 0xff;
+}
+
+void
+et_reset(struct et_softc *sc)
+{
+ CSR_WRITE_4(sc, ET_MAC_CFG1,
+ ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
+ ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
+ ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
+
+ CSR_WRITE_4(sc, ET_SWRST,
+ ET_SWRST_TXDMA | ET_SWRST_RXDMA |
+ ET_SWRST_TXMAC | ET_SWRST_RXMAC |
+ ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
+
+ CSR_WRITE_4(sc, ET_MAC_CFG1,
+ ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
+ ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
+ CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
+}
+
+void
+et_disable_intrs(struct et_softc *sc)
+{
+ CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
+}
+
+void
+et_enable_intrs(struct et_softc *sc, uint32_t intrs)
+{
+ CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
+}
+
+int
+et_dma_alloc(struct et_softc *sc)
+{
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txstatus_data *txsd = &sc->sc_tx_status;
+ struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
+ struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
+ int i, error;
+
+ /*
+ * Create TX ring DMA stuffs
+ */
+ error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
+ (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
+ &tx_ring->tr_seg);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create TX ring DMA
stuffs\n");
+ return error;
+ }
+
+ /*
+ * Create TX status DMA stuffs
+ */
+ error = et_dma_mem_create(sc, sizeof(uint32_t),
+ (void **)&txsd->txsd_status,
+ &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create TX status DMA
stuffs\n");
+ return error;
+ }
+
+ /*
+ * Create DMA stuffs for RX rings
+ */
+ for (i = 0; i < ET_RX_NRING; ++i) {
+ static const uint32_t rx_ring_posreg[ET_RX_NRING] =
+ { ET_RX_RING0_POS, ET_RX_RING1_POS };
+
+ struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
+
+ error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
+ (void **)&rx_ring->rr_desc,
+ &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create DMA stuffs
for "
+ "the %d RX ring\n", i);
+ return error;
+ }
+ rx_ring->rr_posreg = rx_ring_posreg[i];
+ }
+
+ /*
+ * Create RX stat ring DMA stuffs
+ */
+ error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
+ (void **)&rxst_ring->rsr_stat,
+ &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA
stuffs\n");
+ return error;
+ }
+
+ /*
+ * Create RX status DMA stuffs
+ */
+ error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
+ (void **)&rxsd->rxsd_status,
+ &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create RX status DMA
stuffs\n");
+ return error;
+ }
+
+ /*
+ * Create mbuf DMA stuffs
+ */
+ error = et_dma_mbuf_create(sc);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+void
+et_dma_free(struct et_softc *sc)
+{
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txstatus_data *txsd = &sc->sc_tx_status;
+ struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
+ struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
+ int i, rx_done[ET_RX_NRING];
+
+ /*
+ * Destroy TX ring DMA stuffs
+ */
+ et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
+
+ /*
+ * Destroy TX status DMA stuffs
+ */
+ et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
+
+ /*
+ * Destroy DMA stuffs for RX rings
+ */
+ for (i = 0; i < ET_RX_NRING; ++i) {
+ struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
+
+ et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
+ }
+
+ /*
+ * Destroy RX stat ring DMA stuffs
+ */
+ et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
+
+ /*
+ * Destroy RX status DMA stuffs
+ */
+ et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
+
+ /*
+ * Destroy mbuf DMA stuffs
+ */
+ for (i = 0; i < ET_RX_NRING; ++i)
+ rx_done[i] = ET_RX_NDESC;
+ et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
+}
+
+int
+et_dma_mbuf_create(struct et_softc *sc)
+{
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ int i, error, rx_done[ET_RX_NRING];
+
+ /*
+ * Create spare DMA map for RX mbufs
+ */
+ error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
+ BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA
map\n");
+ return error;
+ }
+
+ /*
+ * Create DMA maps for RX mbufs
+ */
+ bzero(rx_done, sizeof(rx_done));
+ for (i = 0; i < ET_RX_NRING; ++i) {
+ struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
+ int j;
+
+ for (j = 0; j < ET_RX_NDESC; ++j) {
+ error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
+ MCLBYTES, 0, BUS_DMA_NOWAIT,
+ &rbd->rbd_buf[j].rb_dmap);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create %d
RX mbuf "
+ "for %d RX ring\n", j, i);
+ rx_done[i] = j;
+ et_dma_mbuf_destroy(sc, 0, rx_done);
+ return error;
+ }
+ }
+ rx_done[i] = ET_RX_NDESC;
+
+ rbd->rbd_softc = sc;
+ rbd->rbd_ring = &sc->sc_rx_ring[i];
+ }
+
+ /*
+ * Create DMA maps for TX mbufs
+ */
+ for (i = 0; i < ET_TX_NDESC; ++i) {
+ error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
+ 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
+ "DMA map\n", i);
+ et_dma_mbuf_destroy(sc, i, rx_done);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+void
+et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
+{
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ int i;
+
+ /*
+ * Destroy DMA maps for RX mbufs
+ */
+ for (i = 0; i < ET_RX_NRING; ++i) {
+ struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
+ int j;
+
+ for (j = 0; j < rx_done[i]; ++j) {
+ struct et_rxbuf *rb = &rbd->rbd_buf[j];
+
+ KASSERT(rb->rb_mbuf == NULL,
+ ("RX mbuf in %d RX ring is not freed yet\n", i));
+ bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
+ }
+ }
+
+ /*
+ * Destroy DMA maps for TX mbufs
+ */
+ for (i = 0; i < tx_done; ++i) {
+ struct et_txbuf *tb = &tbd->tbd_buf[i];
+
+ KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
+ bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
+ }
+
+ /*
+ * Destroy spare mbuf DMA map
+ */
+ bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
+}
+
+int
+et_dma_mem_create(struct et_softc *sc, bus_size_t size,
+ void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
+{
+ int error, nsegs;
+
+ error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
+ dmap);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
+ return error;
+ }
+
+ error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
+ 1, &nsegs, BUS_DMA_WAITOK);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
+ return error;
+ }
+
+ error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
+ size, (void **)addr, BUS_DMA_NOWAIT);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
+ return (error);
+ }
+
+ error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
+ BUS_DMA_WAITOK);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
+ bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
+ return error;
+ }
+
+ memset(*addr, 0, size);
+
+ *paddr = (*dmap)->dm_segs[0].ds_addr;
+
+ return 0;
+}
+
+void
+et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
+{
+ bus_dmamap_unload(sc->sc_dmat, dmap);
+ bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
+}
+
+void
+et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ KASSERT(nseg == 1, ("too many segments\n"));
+ *((bus_addr_t *)arg) = seg->ds_addr;
+}
+
+void
+et_chip_attach(struct et_softc *sc)
+{
+ uint32_t val;
+
+ /*
+ * Perform minimal initialization
+ */
+
+ /* Disable loopback */
+ CSR_WRITE_4(sc, ET_LOOPBACK, 0);
+
+ /* Reset MAC */
+ CSR_WRITE_4(sc, ET_MAC_CFG1,
+ ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
+ ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
+ ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
+
+ /*
+ * Setup half duplex mode
+ */
+ val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
+ __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
+ __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
+ ET_MAC_HDX_EXC_DEFER;
+ CSR_WRITE_4(sc, ET_MAC_HDX, val);
+
+ /* Clear MAC control */
+ CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
+
+ /* Reset MII */
+ CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
+
+ /* Bring MAC out of reset state */
+ CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
+
+ /* Enable memory controllers */
+ CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
+}
+
+int
+et_intr(void *xsc)
+{
+ struct et_softc *sc = xsc;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ uint32_t intrs;
+
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ return (0);
+
+ intrs = CSR_READ_4(sc, ET_INTR_STATUS);
+ if (intrs == 0 || intrs == 0xffffffff)
+ return (0);
+
+ et_disable_intrs(sc);
+ intrs &= ET_INTRS;
+ if (intrs == 0) /* Not interested */
+ goto back;
+
+ if (intrs & ET_INTR_RXEOF)
+ et_rxeof(sc);
+ if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
+ et_txeof(sc);
+ if (intrs & ET_INTR_TIMER)
+ CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
+back:
+ et_enable_intrs(sc, ET_INTRS);
+
+ return (1);
+}
+
+int
+et_init(struct ifnet *ifp)
+{
+ struct et_softc *sc = ifp->if_softc;
+ const struct et_bsize *arr;
+ int error, i, s;
+
+ if (ifp->if_flags & IFF_RUNNING)
+ return 0;
+
+ s = splnet();
+
+ et_stop(sc);
+
+ arr = ifp->if_mtu <= ETHERMTU ? et_bufsize : NULL;
+ for (i = 0; i < ET_RX_NRING; ++i) {
+ sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
+ sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
+ }
+
+ error = et_init_tx_ring(sc);
+ if (error)
+ goto back;
+
+ error = et_init_rx_ring(sc);
+ if (error)
+ goto back;
+
+ error = et_chip_init(sc);
+ if (error)
+ goto back;
+
+ error = et_enable_txrx(sc);
+ if (error)
+ goto back;
+
+ error = et_start_rxdma(sc);
+ if (error)
+ goto back;
+
+ error = et_start_txdma(sc);
+ if (error)
+ goto back;
+
+ et_enable_intrs(sc, ET_INTRS);
+
+ callout_schedule(&sc->sc_tick, hz);
+
+ CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+back:
+ if (error)
+ et_stop(sc);
+
+ splx(s);
+
+ return (0);
+}
+
+int
+et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
+{
+ struct et_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_flags & IFF_RUNNING))
+ et_init(ifp);
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(ifp, ifa);
+#endif
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
+ error = EINVAL;
+ else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
+ error = 0;
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ /*
+ * If only the PROMISC or ALLMULTI flag changes, then
+ * don't do a full re-init of the chip, just update
+ * the Rx filter.
+ */
+ if ((ifp->if_flags & IFF_RUNNING) &&
+ ((ifp->if_flags ^ sc->sc_if_flags) &
+ (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
+ et_setmulti(sc);
+ } else {
+ if (!(ifp->if_flags & IFF_RUNNING))
+ et_init(ifp);
+ }
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ et_stop(sc);
+ }
+ sc->sc_if_flags = ifp->if_flags;
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ et_setmulti(sc);
+ error = 0;
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ et_setmulti(sc);
+ error = 0;
+ }
+ break;
+
+ }
+
+ splx(s);
+
+ return error;
+}
+
+void
+et_start(struct ifnet *ifp)
+{
+ struct et_softc *sc = ifp->if_softc;
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ int trans;
+ struct mbuf *m;
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ trans = 0;
+ for (;;) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
+
+ if (et_encap(sc, &m)) {
+ ifp->if_oerrors++;
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
+
+ trans = 1;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf != NULL)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+ }
+
+ if (trans) {
+ callout_schedule(&sc->sc_txtick, hz);
+ ifp->if_timer = 5;
+ }
+}
+
+void
+et_watchdog(struct ifnet *ifp)
+{
+ struct et_softc *sc = ifp->if_softc;
+ aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
+
+ ifp->if_flags &= ~IFF_RUNNING;
+ et_init(ifp);
+ et_start(ifp);
+}
+
+int
+et_stop_rxdma(struct et_softc *sc)
+{
+ CSR_WRITE_4(sc, ET_RXDMA_CTRL,
+ ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
+
+ DELAY(5);
+ if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
+ aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
+ return ETIMEDOUT;
+ }
+ return 0;
+}
+
+int
+et_stop_txdma(struct et_softc *sc)
+{
+ CSR_WRITE_4(sc, ET_TXDMA_CTRL,
+ ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
+ return 0;
+}
+
+void
+et_free_tx_ring(struct et_softc *sc)
+{
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ int i;
+
+ for (i = 0; i < ET_TX_NDESC; ++i) {
+ struct et_txbuf *tb = &tbd->tbd_buf[i];
+
+ if (tb->tb_mbuf != NULL) {
+ bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
+ m_freem(tb->tb_mbuf);
+ tb->tb_mbuf = NULL;
+ }
+ }
+
+ bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
+ tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+}
+
+void
+et_free_rx_ring(struct et_softc *sc)
+{
+ int n;
+
+ for (n = 0; n < ET_RX_NRING; ++n) {
+ struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
+ struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
+ int i;
+
+ for (i = 0; i < ET_RX_NDESC; ++i) {
+ struct et_rxbuf *rb = &rbd->rbd_buf[i];
+
+ if (rb->rb_mbuf != NULL) {
+ bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
+ m_freem(rb->rb_mbuf);
+ rb->rb_mbuf = NULL;
+ }
+ }
+
+ bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
+ rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ }
+}
+
+void
+et_setmulti(struct et_softc *sc)
+{
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
+ uint32_t hash[4] = { 0, 0, 0, 0 };
+ uint32_t rxmac_ctrl, pktfilt;
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ uint8_t addr[ETHER_ADDR_LEN];
+ int i, count;
+
+ pktfilt = CSR_READ_4(sc, ET_PKTFILT);
+ rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
+
+ pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
+ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
+ goto back;
+ }
+
+ bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
+
+ count = 0;
+ ETHER_FIRST_MULTI(step, ec, enm);
+ while (enm != NULL) {
+ uint32_t *hp, h;
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ addr[i] &= enm->enm_addrlo[i];
+ }
+
+ h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr),
+ ETHER_ADDR_LEN);
+ h = (h & 0x3f800000) >> 23;
+
+ hp = &hash[0];
+ if (h >= 32 && h < 64) {
+ h -= 32;
+ hp = &hash[1];
+ } else if (h >= 64 && h < 96) {
+ h -= 64;
+ hp = &hash[2];
+ } else if (h >= 96) {
+ h -= 96;
+ hp = &hash[3];
+ }
+ *hp |= (1 << h);
+
+ ++count;
+ ETHER_NEXT_MULTI(step, enm);
+ }
+
+ for (i = 0; i < 4; ++i)
+ CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
+
+ if (count > 0)
+ pktfilt |= ET_PKTFILT_MCAST;
+ rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
+back:
+ CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
+ CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
+}
+
+int
+et_chip_init(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ uint32_t rxq_end;
+ int error;
+
+ /*
+ * Split internal memory between TX and RX according to MTU
+ */
+ if (ifp->if_mtu < 2048)
+ rxq_end = 0x2bc;
+ else if (ifp->if_mtu < 8192)
+ rxq_end = 0x1ff;
+ else
+ rxq_end = 0x1b3;
+ CSR_WRITE_4(sc, ET_RXQ_START, 0);
+ CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
+ CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
+ CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
+
+ /* No loopback */
+ CSR_WRITE_4(sc, ET_LOOPBACK, 0);
+
+ /* Clear MSI configure */
+ CSR_WRITE_4(sc, ET_MSI_CFG, 0);
+
+ /* Disable timer */
+ CSR_WRITE_4(sc, ET_TIMER, 0);
+
+ /* Initialize MAC */
+ et_init_mac(sc);
+
+ /* Enable memory controllers */
+ CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
+
+ /* Initialize RX MAC */
+ et_init_rxmac(sc);
+
+ /* Initialize TX MAC */
+ et_init_txmac(sc);
+
+ /* Initialize RX DMA engine */
+ error = et_init_rxdma(sc);
+ if (error)
+ return error;
+
+ /* Initialize TX DMA engine */
+ error = et_init_txdma(sc);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+int
+et_init_tx_ring(struct et_softc *sc)
+{
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txstatus_data *txsd = &sc->sc_tx_status;
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+
+ bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
+ tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+
+ tbd->tbd_start_index = 0;
+ tbd->tbd_start_wrap = 0;
+ tbd->tbd_used = 0;
+
+ bzero(txsd->txsd_status, sizeof(uint32_t));
+ bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
+ txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ return 0;
+}
+
+int
+et_init_rx_ring(struct et_softc *sc)
+{
+ struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
+ struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
+ int n;
+
+ for (n = 0; n < ET_RX_NRING; ++n) {
+ struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
+ int i, error;
+
+ for (i = 0; i < ET_RX_NDESC; ++i) {
+ error = rbd->rbd_newbuf(rbd, i, 1);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "%d ring %d buf,
newbuf failed: "
+ "%d\n", n, i, error);
+ return error;
+ }
+ }
+ }
+
+ bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
+ bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
+ rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+
+ bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
+ rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+
+ return 0;
+}
+
+int
+et_init_rxdma(struct et_softc *sc)
+{
+ struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
+ struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
+ struct et_rxdesc_ring *rx_ring;
+ int error;
+
+ error = et_stop_rxdma(sc);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
+ return error;
+ }
+
+ /*
+ * Install RX status
+ */
+ CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
+ CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
+
+ /*
+ * Install RX stat ring
+ */
+ CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
+ CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
+ CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
+ CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
+ CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
+
+ /* Match ET_RXSTAT_POS */
+ rxst_ring->rsr_index = 0;
+ rxst_ring->rsr_wrap = 0;
+
+ /*
+ * Install the 2nd RX descriptor ring
+ */
+ rx_ring = &sc->sc_rx_ring[1];
+ CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
+ CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
+ CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
+ CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
+ CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
+
+ /* Match ET_RX_RING1_POS */
+ rx_ring->rr_index = 0;
+ rx_ring->rr_wrap = 1;
+
+ /*
+ * Install the 1st RX descriptor ring
+ */
+ rx_ring = &sc->sc_rx_ring[0];
+ CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
+ CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
+ CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
+ CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
+ CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
+
+ /* Match ET_RX_RING0_POS */
+ rx_ring->rr_index = 0;
+ rx_ring->rr_wrap = 1;
+
+ /*
+ * RX intr moderation
+ */
+ CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
+ CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
+
+ return 0;
+}
+
+int
+et_init_txdma(struct et_softc *sc)
+{
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txstatus_data *txsd = &sc->sc_tx_status;
+ int error;
+
+ error = et_stop_txdma(sc);
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
+ return error;
+ }
+
+ /*
+ * Install TX descriptor ring
+ */
+ CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
+ CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
+ CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
+
+ /*
+ * Install TX status
+ */
+ CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
+ CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
+
+ CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
+
+ /* Match ET_TX_READY_POS */
+ tx_ring->tr_ready_index = 0;
+ tx_ring->tr_ready_wrap = 0;
+
+ return 0;
+}
+
+void
+et_init_mac(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
+ uint32_t val;
+
+ /* Reset MAC */
+ CSR_WRITE_4(sc, ET_MAC_CFG1,
+ ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
+ ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
+ ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
+
+ /*
+ * Setup inter packet gap
+ */
+ val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
+ __SHIFTIN(88, ET_IPG_NONB2B_2) |
+ __SHIFTIN(80, ET_IPG_MINIFG) |
+ __SHIFTIN(96, ET_IPG_B2B);
+ CSR_WRITE_4(sc, ET_IPG, val);
+
+ /*
+ * Setup half duplex mode
+ */
+ val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
+ __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
+ __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
+ ET_MAC_HDX_EXC_DEFER;
+ CSR_WRITE_4(sc, ET_MAC_HDX, val);
+
+ /* Clear MAC control */
+ CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
+
+ /* Reset MII */
+ CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
+
+ /*
+ * Set MAC address
+ */
+ val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
+ CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
+ val = (eaddr[0] << 16) | (eaddr[1] << 24);
+ CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
+
+ /* Set max frame length */
+ CSR_WRITE_4(sc, ET_MAX_FRMLEN,
+ ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
ETHER_CRC_LEN);
+
+ /* Bring MAC out of reset state */
+ CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
+}
+
+void
+et_init_rxmac(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
+ uint32_t val;
+ int i;
+
+ /* Disable RX MAC and WOL */
+ CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
+
+ /*
+ * Clear all WOL related registers
+ */
+ for (i = 0; i < 3; ++i)
+ CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
+ for (i = 0; i < 20; ++i)
+ CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
+
+ /*
+ * Set WOL source address. XXX is this necessary?
+ */
+ val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
+ CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
+ val = (eaddr[0] << 8) | eaddr[1];
+ CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
+
+ /* Clear packet filters */
+ CSR_WRITE_4(sc, ET_PKTFILT, 0);
+
+ /* No ucast filtering */
+ CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
+ CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
+ CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
+
+ if (ifp->if_mtu > 8192) {
+ /*
+ * In order to transmit jumbo packets greater than 8k,
+ * the FIFO between RX MAC and RX DMA needs to be reduced
+ * in size to (16k - MTU). In order to implement this, we
+ * must use "cut through" mode in the RX MAC, which chops
+ * packets down into segments which are (max_size * 16).
+ * In this case we selected 256 bytes, since this is the
+ * size of the PCI-Express TLP's that the 1310 uses.
+ */
+ val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
+ ET_RXMAC_MC_SEGSZ_ENABLE;
+ } else {
+ val = 0;
+ }
+ CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
+
+ CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
+
+ /* Initialize RX MAC management register */
+ CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
+
+ CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
+
+ CSR_WRITE_4(sc, ET_RXMAC_MGT,
+ ET_RXMAC_MGT_PASS_ECRC |
+ ET_RXMAC_MGT_PASS_ELEN |
+ ET_RXMAC_MGT_PASS_ETRUNC |
+ ET_RXMAC_MGT_CHECK_PKT);
+
+ /*
+ * Configure runt filtering (may not work on certain chip generation)
+ */
+ val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
+ CSR_WRITE_4(sc, ET_PKTFILT, val);
+
+ /* Enable RX MAC but leave WOL disabled */
+ CSR_WRITE_4(sc, ET_RXMAC_CTRL,
+ ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
+
+ /*
+ * Setup multicast hash and allmulti/promisc mode
+ */
+ et_setmulti(sc);
+}
+
+void
+et_init_txmac(struct et_softc *sc)
+{
+ /* Disable TX MAC and FC(?) */
+ CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
+
+ /* No flow control yet */
+ CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
+
+ /* Enable TX MAC but leave FC(?) diabled */
+ CSR_WRITE_4(sc, ET_TXMAC_CTRL,
+ ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
+}
+
+int
+et_start_rxdma(struct et_softc *sc)
+{
+ uint32_t val = 0;
+
+ val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
+ ET_RXDMA_CTRL_RING0_SIZE) |
+ ET_RXDMA_CTRL_RING0_ENABLE;
+ val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
+ ET_RXDMA_CTRL_RING1_SIZE) |
+ ET_RXDMA_CTRL_RING1_ENABLE;
+
+ CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
+
+ DELAY(5);
+
+ if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
+ aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
+ return ETIMEDOUT;
+ }
+ return 0;
+}
+
+int
+et_start_txdma(struct et_softc *sc)
+{
+ CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
+ return 0;
+}
+
+int
+et_enable_txrx(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ uint32_t val;
+ int i, rc = 0;
+
+ val = CSR_READ_4(sc, ET_MAC_CFG1);
+ val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
+ val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
+ ET_MAC_CFG1_LOOPBACK);
+ CSR_WRITE_4(sc, ET_MAC_CFG1, val);
+
+ if ((rc = ether_mediachange(ifp)) != 0)
+ goto out;
+
+#define NRETRY 100
+
+ for (i = 0; i < NRETRY; ++i) {
+ val = CSR_READ_4(sc, ET_MAC_CFG1);
+ if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
+ (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
+ break;
+
+ DELAY(10);
+ }
+ if (i == NRETRY) {
+ aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n");
+ return ETIMEDOUT;
+ }
+
+#undef NRETRY
+ return 0;
+out:
+ return rc;
+}
+
+void
+et_rxeof(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
+ struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
+ uint32_t rxs_stat_ring;
+ int rxst_wrap, rxst_index;
+
+ bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
+ rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
+ rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+
+ rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
+ rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
+ rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
+
+ while (rxst_index != rxst_ring->rsr_index ||
+ rxst_wrap != rxst_ring->rsr_wrap) {
+ struct et_rxbuf_data *rbd;
+ struct et_rxdesc_ring *rx_ring;
+ struct et_rxstat *st;
+ struct et_rxbuf *rb;
+ struct mbuf *m;
+ int buflen, buf_idx, ring_idx;
+ uint32_t rxstat_pos, rxring_pos;
+
+ KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
+ st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
+
+ buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
+ buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
+ ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
+
+ if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
+ rxst_ring->rsr_index = 0;
+ rxst_ring->rsr_wrap ^= 1;
+ }
+ rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
+ ET_RXSTAT_POS_INDEX);
+ if (rxst_ring->rsr_wrap)
+ rxstat_pos |= ET_RXSTAT_POS_WRAP;
+ CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
+
+ if (ring_idx >= ET_RX_NRING) {
+ ifp->if_ierrors++;
+ aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
+ ring_idx);
+ continue;
+ }
+ if (buf_idx >= ET_RX_NDESC) {
+ ifp->if_ierrors++;
+ aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
+ buf_idx);
+ continue;
+ }
+
+ rbd = &sc->sc_rx_data[ring_idx];
+ rb = &rbd->rbd_buf[buf_idx];
+ m = rb->rb_mbuf;
+ bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
+ rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+
+ if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
+ if (buflen < ETHER_CRC_LEN) {
+ m_freem(m);
+ ifp->if_ierrors++;
+ } else {
+ m->m_pkthdr.len = m->m_len = buflen -
+ ETHER_CRC_LEN;
+ m->m_pkthdr.rcvif = ifp;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf != NULL)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+
+ ifp->if_ipackets++;
+ (*ifp->if_input)(ifp, m);
+ }
+ } else {
+ ifp->if_ierrors++;
+ }
+
+ rx_ring = &sc->sc_rx_ring[ring_idx];
+
+ if (buf_idx != rx_ring->rr_index) {
+ aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
+ "buf_idx %d, rr_idx %d\n",
+ ring_idx, buf_idx, rx_ring->rr_index);
+ }
+
+ KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
+ if (++rx_ring->rr_index == ET_RX_NDESC) {
+ rx_ring->rr_index = 0;
+ rx_ring->rr_wrap ^= 1;
+ }
+ rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
+ if (rx_ring->rr_wrap)
+ rxring_pos |= ET_RX_RING_POS_WRAP;
+ CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
+ }
+}
+
+int
+et_encap(struct et_softc *sc, struct mbuf **m0)
+{
+ struct mbuf *m = *m0;
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ struct et_txdesc *td;
+ bus_dmamap_t map;
+ int error, maxsegs, first_idx, last_idx, i;
+ uint32_t tx_ready_pos, last_td_ctrl2;
+
+ maxsegs = ET_TX_NDESC - tbd->tbd_used;
+ if (maxsegs > ET_NSEG_MAX)
+ maxsegs = ET_NSEG_MAX;
+ KASSERT(maxsegs >= ET_NSEG_SPARE,
+ ("not enough spare TX desc (%d)\n", maxsegs));
+
+ KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
+ first_idx = tx_ring->tr_ready_index;
+ map = tbd->tbd_buf[first_idx].tb_dmap;
+
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
+ BUS_DMA_NOWAIT);
+ if (!error && map->dm_nsegs == 0) {
+ bus_dmamap_unload(sc->sc_dmat, map);
+ error = EFBIG;
+ }
+ if (error && error != EFBIG) {
+ aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
+ goto back;
+ }
+ if (error) { /* error == EFBIG */
+ struct mbuf *m_new;
+
+ error = 0;
+
+ MGETHDR(m_new, M_DONTWAIT, MT_DATA);
+ if (m_new == NULL) {
+ m_freem(m);
+ aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
+ error = ENOBUFS;
+ goto back;
+ }
+
+ M_COPY_PKTHDR(m_new, m);
+ if (m->m_pkthdr.len > MHLEN) {
+ MCLGET(m_new, M_DONTWAIT);
+ if (!(m_new->m_flags & M_EXT)) {
+ m_freem(m);
+ m_freem(m_new);
+ error = ENOBUFS;
+ }
+ }
+
+ if (error) {
+ aprint_error_dev(sc->sc_dev, "can't defrag TX
buffer\n");
+ goto back;
+ }
+
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
+ m_freem(m);
+ m_new->m_len = m_new->m_pkthdr.len;
+ *m0 = m = m_new;
+
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
+ BUS_DMA_NOWAIT);
+ if (error || map->dm_nsegs == 0) {
+ if (map->dm_nsegs == 0) {
+ bus_dmamap_unload(sc->sc_dmat, map);
+ error = EFBIG;
+ }
+ aprint_error_dev(sc->sc_dev, "can't load defraged TX
mbuf\n");
+ goto back;
+ }
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+
+ last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
+ sc->sc_tx += map->dm_nsegs;
+ if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
+ sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
+ last_td_ctrl2 |= ET_TDCTRL2_INTR;
+ }
+
+ last_idx = -1;
+ for (i = 0; i < map->dm_nsegs; ++i) {
+ int idx;
+
+ idx = (first_idx + i) % ET_TX_NDESC;
+ td = &tx_ring->tr_desc[idx];
+ td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
+ td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
+ td->td_ctrl1 =
+ __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
+
+ if (i == map->dm_nsegs - 1) { /* Last frag */
+ td->td_ctrl2 = last_td_ctrl2;
+ last_idx = idx;
+ }
+
+ KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
+ if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
+ tx_ring->tr_ready_index = 0;
+ tx_ring->tr_ready_wrap ^= 1;
+ }
+ }
+ td = &tx_ring->tr_desc[first_idx];
+ td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
+
+ KKASSERT(last_idx >= 0);
+ tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
+ tbd->tbd_buf[last_idx].tb_dmap = map;
+ tbd->tbd_buf[last_idx].tb_mbuf = m;
+
+ tbd->tbd_used += map->dm_nsegs;
+ KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
+
+ bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
+ tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+
+
+ tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
+ ET_TX_READY_POS_INDEX);
+ if (tx_ring->tr_ready_wrap)
+ tx_ready_pos |= ET_TX_READY_POS_WRAP;
+ CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
+
+ error = 0;
+back:
+ if (error) {
+ m_freem(m);
+ *m0 = NULL;
+ }
+ return error;
+}
+
+void
+et_txeof(struct et_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
+ struct et_txbuf_data *tbd = &sc->sc_tx_data;
+ uint32_t tx_done;
+ int end, wrap;
+
+ if (tbd->tbd_used == 0)
+ return;
+
+ tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
+ end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
+ wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
+
+ while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
+ struct et_txbuf *tb;
+
+ KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
+ tb = &tbd->tbd_buf[tbd->tbd_start_index];
+
+ bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
+ sizeof(struct et_txdesc));
+ bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
+ tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+
+ if (tb->tb_mbuf != NULL) {
+ bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
+ m_freem(tb->tb_mbuf);
+ tb->tb_mbuf = NULL;
+ ifp->if_opackets++;
+ }
+
+ if (++tbd->tbd_start_index == ET_TX_NDESC) {
+ tbd->tbd_start_index = 0;
+ tbd->tbd_start_wrap ^= 1;
+ }
+
+ KKASSERT(tbd->tbd_used > 0);
+ tbd->tbd_used--;
+ }
+
+ if (tbd->tbd_used == 0) {
+ callout_stop(&sc->sc_txtick);
+ ifp->if_timer = 0;
+ }
+ if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ et_start(ifp);
+}
+
+void
+et_txtick(void *xsc)
+{
+ struct et_softc *sc = xsc;
+ int s;
+
+ s = splnet();
+ et_txeof(sc);
+ splx(s);
+}
+
+void
+et_tick(void *xsc)
+{
+ struct et_softc *sc = xsc;
+ int s;
+
+ s = splnet();
+ mii_tick(&sc->sc_miibus);
+ callout_schedule(&sc->sc_tick, hz);
+ splx(s);
+}
+
+int
+et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
+{
+ return et_newbuf(rbd, buf_idx, init, MCLBYTES);
+}
+
+int
+et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
+{
+ return et_newbuf(rbd, buf_idx, init, MHLEN);
+}
+
+int
+et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
+{
+ struct et_softc *sc = rbd->rbd_softc;
+ struct et_rxdesc_ring *rx_ring;
+ struct et_rxdesc *desc;
+ struct et_rxbuf *rb;
+ struct mbuf *m;
+ bus_dmamap_t dmap;
+ int error, len;
+
+ KKASSERT(buf_idx < ET_RX_NDESC);
+ rb = &rbd->rbd_buf[buf_idx];
+
+ if (len0 >= MINCLSIZE) {
+ MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+ MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
+ len = MCLBYTES;
+ } else {
+ MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
+ len = MHLEN;
+ }
+
+ if (m == NULL) {
+ error = ENOBUFS;
+
+ /* XXX for debug */
+ aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
+ if (init) {
+ return error;
+ } else {
+ goto back;
+ }
+ }
+ m->m_len = m->m_pkthdr.len = len;
+
+ /*
+ * Try load RX mbuf into temporary DMA tag
+ */
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
+ init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
+ if (error) {
+ if (!error) {
+ bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
+ error = EFBIG;
+ aprint_error_dev(sc->sc_dev, "too many segments?!\n");
+ }
+ m_freem(m);
+
+ /* XXX for debug */
+ aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
+ if (init) {
+ return error;
+ } else {
+ goto back;
+ }
+ }
+
+ if (!init)
+ bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
+ rb->rb_mbuf = m;
+
+ /*
+ * Swap RX buf's DMA map with the loaded temporary one
+ */
+ dmap = rb->rb_dmap;
+ rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
+ rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
+ sc->sc_mbuf_tmp_dmap = dmap;
+
+ error = 0;
+back:
+ rx_ring = rbd->rbd_ring;
+ desc = &rx_ring->rr_desc[buf_idx];
+
+ desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
+ desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
+ desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
+
+ bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
+ rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ return error;
+}
Index: sys/dev/pci/if_etreg.h
===================================================================
RCS file: sys/dev/pci/if_etreg.h
diff -N sys/dev/pci/if_etreg.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/dev/pci/if_etreg.h 3 Jul 2008 16:34:02 -0000
@@ -0,0 +1,531 @@
+/* $NetBSD$ */
+/* $OpenBSD: if_etreg.h,v 1.3 2008/06/08 06:18:07 jsg Exp $ */
+
+/*
+ * Copyright (c) 2007 The DragonFly Project. All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Sepherosa Ziehau <sepherosa%gmail.com@localhost>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of The DragonFly Project nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific, prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $DragonFly: src/sys/dev/netif/et/if_etreg.h,v 1.1 2007/10/12 14:12:42 sephe
Exp $
+ */
+
+#ifndef _IF_ETREG_H
+#define _IF_ETREG_H
+
+#define ET_INTERN_MEM_SIZE 0x400
+#define ET_INTERN_MEM_END (ET_INTERN_MEM_SIZE - 1)
+
+/*
+ * PCI registers
+ *
+ * ET_PCIV_ACK_LATENCY_{128,256} are from
+ * PCI EXPRESS BASE SPECIFICATION, REV. 1.0a, Table 3-5
+ *
+ * ET_PCIV_REPLAY_TIMER_{128,256} are from
+ * PCI EXPRESS BASE SPECIFICATION, REV. 1.0a, Table 3-4
+ */
+#define ET_PCIR_BAR 0x10
+
+#define ET_PCIR_DEVICE_CAPS 0x4c
+#define ET_PCIM_DEVICE_CAPS_MAX_PLSZ 0x7 /* Max playload size */
+#define ET_PCIV_DEVICE_CAPS_PLSZ_128 0x0
+#define ET_PCIV_DEVICE_CAPS_PLSZ_256 0x1
+
+#define ET_PCIR_DEVICE_CTRL 0x50
+#define ET_PCIM_DEVICE_CTRL_MAX_RRSZ 0x7000 /* Max read request size */
+#define ET_PCIV_DEVICE_CTRL_RRSZ_2K 0x4000
+
+#define ET_PCIR_MACADDR_LO 0xa4
+#define ET_PCIR_MACADDR_HI 0xa8
+
+#define ET_PCIR_EEPROM_MISC 0xb0
+#define ET_PCIR_EEPROM_STATUS_MASK 0x0000ff00
+#define ET_PCIM_EEPROM_STATUS_ERROR 0x00004c00
+
+#define ET_PCIR_ACK_LATENCY 0xc0
+#define ET_PCIV_ACK_LATENCY_128 237
+#define ET_PCIV_ACK_LATENCY_256 416
+
+#define ET_PCIR_REPLAY_TIMER 0xc2
+#define ET_REPLAY_TIMER_RX_L0S_ADJ 250 /* XXX infered from default */
+#define ET_PCIV_REPLAY_TIMER_128 (711 + ET_REPLAY_TIMER_RX_L0S_ADJ)
+#define ET_PCIV_REPLAY_TIMER_256 (1248 + ET_REPLAY_TIMER_RX_L0S_ADJ)
+
+#define ET_PCIR_L0S_L1_LATENCY 0xcf
+#define ET_PCIM_L0S_LATENCY (7 << 0)
+#define ET_PCIM_L1_LATENCY (7 << 3)
+
+/*
+ * CSR
+ */
+#define ET_TXQ_START 0x0000
+#define ET_TXQ_END 0x0004
+#define ET_RXQ_START 0x0008
+#define ET_RXQ_END 0x000c
+
+#define ET_PM 0x0010
+#define ET_PM_SYSCLK_GATE (1 << 3)
+#define ET_PM_TXCLK_GATE (1 << 4)
+#define ET_PM_RXCLK_GATE (1 << 5)
+
+#define ET_INTR_STATUS 0x0018
+#define ET_INTR_MASK 0x001c
+
+#define ET_SWRST 0x0028
+#define ET_SWRST_TXDMA (1 << 0)
+#define ET_SWRST_RXDMA (1 << 1)
+#define ET_SWRST_TXMAC (1 << 2)
+#define ET_SWRST_RXMAC (1 << 3)
+#define ET_SWRST_MAC (1 << 4)
+#define ET_SWRST_MAC_STAT (1 << 5)
+#define ET_SWRST_MMC (1 << 6)
+#define ET_SWRST_SELFCLR_DISABLE (1 << 31)
+
+#define ET_MSI_CFG 0x0030
+
+#define ET_LOOPBACK 0x0034
+
+#define ET_TIMER 0x0038
+
+#define ET_TXDMA_CTRL 0x1000
+#define ET_TXDMA_CTRL_HALT (1 << 0)
+#define ET_TXDMA_CTRL_CACHE_THR 0xf0
+#define ET_TXDMA_CTRL_SINGLE_EPKT (1 << 8)
+
+#define ET_TX_RING_HI 0x1004
+#define ET_TX_RING_LO 0x1008
+#define ET_TX_RING_CNT 0x100c
+
+#define ET_TX_STATUS_HI 0x101c
+#define ET_TX_STATUS_LO 0x1020
+
+#define ET_TX_READY_POS 0x1024
+#define ET_TX_READY_POS_INDEX 0x03ff
+#define ET_TX_READY_POS_WRAP (1 << 10)
+
+#define ET_TX_DONE_POS 0x1060
+#define ET_TX_DONE_POS_INDEX 0x03ff
+#define ET_TX_DONE_POS_WRAP (1 << 10)
+
+#define ET_RXDMA_CTRL 0x2000
+#define ET_RXDMA_CTRL_HALT (1 << 0)
+#define ET_RXDMA_CTRL_RING0_SIZE (3 << 8)
+#define ET_RXDMA_CTRL_RING0_ENABLE (1 << 10)
+#define ET_RXDMA_CTRL_RING1_SIZE (3 << 11)
+#define ET_RXDMA_CTRL_RING1_ENABLE (1 << 13)
+#define ET_RXDMA_CTRL_HALTED (1 << 17)
+
+#define ET_RX_STATUS_LO 0x2004
+#define ET_RX_STATUS_HI 0x2008
+
+#define ET_RX_INTR_NPKTS 0x200c
+#define ET_RX_INTR_DELAY 0x2010
+
+#define ET_RXSTAT_LO 0x2020
+#define ET_RXSTAT_HI 0x2024
+#define ET_RXSTAT_CNT 0x2028
+
+#define ET_RXSTAT_POS 0x2030
+#define ET_RXSTAT_POS_INDEX 0x0fff
+#define ET_RXSTAT_POS_WRAP (1 << 12)
+
+#define ET_RXSTAT_MINCNT 0x2038
+
+#define ET_RX_RING0_LO 0x203c
+#define ET_RX_RING0_HI 0x2040
+#define ET_RX_RING0_CNT 0x2044
+
+#define ET_RX_RING0_POS 0x204c
+#define ET_RX_RING0_POS_INDEX 0x03ff
+#define ET_RX_RING0_POS_WRAP (1 << 10)
+
+#define ET_RX_RING0_MINCNT 0x2054
+
+#define ET_RX_RING1_LO 0x2058
+#define ET_RX_RING1_HI 0x205c
+#define ET_RX_RING1_CNT 0x2060
+
+#define ET_RX_RING1_POS 0x2068
+#define ET_RX_RING1_POS_INDEX 0x03ff
+#define ET_RX_RING1_POS_WRAP (1 << 10)
+
+#define ET_RX_RING1_MINCNT 0x2070
+
+#define ET_TXMAC_CTRL 0x3000
+#define ET_TXMAC_CTRL_ENABLE (1 << 0)
+#define ET_TXMAC_CTRL_FC_DISABLE (1 << 3)
+
+#define ET_TXMAC_FLOWCTRL 0x3010
+
+#define ET_RXMAC_CTRL 0x4000
+#define ET_RXMAC_CTRL_ENABLE (1 << 0)
+#define ET_RXMAC_CTRL_NO_PKTFILT (1 << 2)
+#define ET_RXMAC_CTRL_WOL_DISABLE (1 << 3)
+
+#define ET_WOL_CRC 0x4004
+#define ET_WOL_SA_LO 0x4010
+#define ET_WOL_SA_HI 0x4014
+#define ET_WOL_MASK 0x4018
+
+#define ET_UCAST_FILTADDR1 0x4068
+#define ET_UCAST_FILTADDR2 0x406c
+#define ET_UCAST_FILTADDR3 0x4070
+
+#define ET_MULTI_HASH 0x4074
+
+#define ET_PKTFILT 0x4084
+#define ET_PKTFILT_BCAST (1 << 0)
+#define ET_PKTFILT_MCAST (1 << 1)
+#define ET_PKTFILT_UCAST (1 << 2)
+#define ET_PKTFILT_FRAG (1 << 3)
+#define ET_PKTFILT_MINLEN 0x7f0000
+
+#define ET_RXMAC_MC_SEGSZ 0x4088
+#define ET_RXMAC_MC_SEGSZ_ENABLE (1 << 0)
+#define ET_RXMAC_MC_SEGSZ_FC (1 << 1)
+#define ET_RXMAC_MC_SEGSZ_MAX 0x03fc
+
+#define ET_RXMAC_MC_WATERMARK 0x408c
+#define ET_RXMAC_SPACE_AVL 0x4094
+
+#define ET_RXMAC_MGT 0x4098
+#define ET_RXMAC_MGT_PASS_ECRC (1 << 4)
+#define ET_RXMAC_MGT_PASS_ELEN (1 << 5)
+#define ET_RXMAC_MGT_PASS_ETRUNC (1 << 16)
+#define ET_RXMAC_MGT_CHECK_PKT (1 << 17)
+
+#define ET_MAC_CFG1 0x5000
+#define ET_MAC_CFG1_TXEN (1 << 0)
+#define ET_MAC_CFG1_SYNC_TXEN (1 << 1)
+#define ET_MAC_CFG1_RXEN (1 << 2)
+#define ET_MAC_CFG1_SYNC_RXEN (1 << 3)
+#define ET_MAC_CFG1_TXFLOW (1 << 4)
+#define ET_MAC_CFG1_RXFLOW (1 << 5)
+#define ET_MAC_CFG1_LOOPBACK (1 << 8)
+#define ET_MAC_CFG1_RST_TXFUNC (1 << 16)
+#define ET_MAC_CFG1_RST_RXFUNC (1 << 17)
+#define ET_MAC_CFG1_RST_TXMC (1 << 18)
+#define ET_MAC_CFG1_RST_RXMC (1 << 19)
+#define ET_MAC_CFG1_SIM_RST (1 << 30)
+#define ET_MAC_CFG1_SOFT_RST (1 << 31)
+
+#define ET_MAC_CFG2 0x5004
+#define ET_MAC_CFG2_FDX (1 << 0)
+#define ET_MAC_CFG2_CRC (1 << 1)
+#define ET_MAC_CFG2_PADCRC (1 << 2)
+#define ET_MAC_CFG2_LENCHK (1 << 4)
+#define ET_MAC_CFG2_BIGFRM (1 << 5)
+#define ET_MAC_CFG2_MODE_MII (1 << 8)
+#define ET_MAC_CFG2_MODE_GMII (1 << 9)
+#define ET_MAC_CFG2_PREAMBLE_LEN 0xf000
+
+#define ET_IPG 0x5008
+#define ET_IPG_B2B 0x0000007f
+#define ET_IPG_MINIFG 0x0000ff00
+#define ET_IPG_NONB2B_2 0x007f0000
+#define ET_IPG_NONB2B_1 0x7f000000
+
+#define ET_MAC_HDX 0x500c
+#define ET_MAC_HDX_COLLWIN 0x0003ff
+#define ET_MAC_HDX_REXMIT_MAX 0x00f000
+#define ET_MAC_HDX_REXMIT_MAX 0x00f000
+#define ET_MAC_HDX_EXC_DEFER (1 << 16)
+#define ET_MAC_HDX_NOBACKOFF (1 << 17)
+#define ET_MAC_HDX_BP_NOBACKOFF (1 << 18)
+#define ET_MAC_HDX_ALT_BEB (1 << 19)
+#define ET_MAC_HDX_ALT_BEB_TRUNC 0xf00000
+
+#define ET_MAX_FRMLEN 0x5010
+
+#define ET_MII_CFG 0x5020
+#define ET_MII_CFG_CLKRST (7 << 0)
+#define ET_MII_CFG_PREAMBLE_SUP (1 << 4)
+#define ET_MII_CFG_SCAN_AUTOINC (1 << 5)
+#define ET_MII_CFG_RST (1 << 31)
+
+#define ET_MII_CMD 0x5024
+#define ET_MII_CMD_READ (1 << 0)
+
+#define ET_MII_ADDR 0x5028
+#define ET_MII_ADDR_REG 0x001f
+#define ET_MII_ADDR_PHY 0x1f00
+#define ET_MII_ADDR_SHIFT 8
+
+
+#define ET_MII_CTRL 0x502c
+#define ET_MII_CTRL_VALUE 0xffff
+
+#define ET_MII_STAT 0x5030
+#define ET_MII_STAT_VALUE 0xffff
+
+#define ET_MII_IND 0x5034
+#define ET_MII_IND_BUSY (1 << 0)
+#define ET_MII_IND_INVALID (1 << 2)
+
+#define ET_MAC_CTRL 0x5038
+#define ET_MAC_CTRL_MODE_MII (1 << 24)
+#define ET_MAC_CTRL_LHDX (1 << 25)
+#define ET_MAC_CTRL_GHDX (1 << 26)
+
+#define ET_MAC_ADDR1 0x5040
+#define ET_MAC_ADDR2 0x5044
+
+#define ET_MMC_CTRL 0x7000
+#define ET_MMC_CTRL_ENABLE (1 << 0)
+#define ET_MMC_CTRL_ARB_DISABLE (1 << 1)
+#define ET_MMC_CTRL_RXMAC_DISABLE (1 << 2)
+#define ET_MMC_CTRL_TXMAC_DISABLE (1 << 3)
+#define ET_MMC_CTRL_TXDMA_DISABLE (1 << 4)
+#define ET_MMC_CTRL_RXDMA_DISABLE (1 << 5)
+#define ET_MMC_CTRL_FORCE_CE (1 << 6)
+
+/*
+ * Interrupts
+ */
+#define ET_INTR_TXEOF (1 << 3)
+#define ET_INTR_TXDMA_ERROR (1 << 4)
+#define ET_INTR_RXEOF (1 << 5)
+#define ET_INTR_RXRING0_LOW (1 << 6)
+#define ET_INTR_RXRING1_LOW (1 << 7)
+#define ET_INTR_RXSTAT_LOW (1 << 8)
+#define ET_INTR_RXDMA_ERROR (1 << 9)
+#define ET_INTR_TIMER (1 << 10)
+#define ET_INTR_WOL (1 << 15)
+#define ET_INTR_PHY (1 << 16)
+#define ET_INTR_TXMAC (1 << 17)
+#define ET_INTR_RXMAC (1 << 18)
+#define ET_INTR_MAC_STATS (1 << 19)
+#define ET_INTR_SLAVE_TO (1 << 20)
+
+#define ET_INTRS (ET_INTR_TXEOF | \
+ ET_INTR_RXEOF | \
+ ET_INTR_TIMER)
+
+/*
+ * RX ring position uses same layout
+ */
+#define ET_RX_RING_POS_INDEX (0x03ff << 0)
+#define ET_RX_RING_POS_WRAP (1 << 10)
+
+
+/* $DragonFly: src/sys/dev/netif/et/if_etvar.h,v 1.1 2007/10/12 14:12:42 sephe
Exp $ */
+
+#define ET_ALIGN 0x1000
+#define ET_NSEG_MAX 32 /* XXX no limit actually */
+#define ET_NSEG_SPARE 5
+
+#define ET_TX_NDESC 512
+#define ET_RX_NDESC 512
+#define ET_RX_NRING 2
+#define ET_RX_NSTAT (ET_RX_NRING * ET_RX_NDESC)
+
+#define ET_TX_RING_SIZE (ET_TX_NDESC * sizeof(struct et_txdesc))
+#define ET_RX_RING_SIZE (ET_RX_NDESC * sizeof(struct et_rxdesc))
+#define ET_RXSTAT_RING_SIZE (ET_RX_NSTAT * sizeof(struct et_rxstat))
+
+#define CSR_WRITE_4(sc, reg, val) \
+ bus_space_write_4((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg), (val))
+#define CSR_READ_4(sc, reg) \
+ bus_space_read_4((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg))
+
+#define ET_ADDR_HI(addr) ((uint64_t) (addr) >> 32)
+#define ET_ADDR_LO(addr) ((uint64_t) (addr) & 0xffffffff)
+
+struct et_txdesc {
+ uint32_t td_addr_hi;
+ uint32_t td_addr_lo;
+ uint32_t td_ctrl1; /* ET_TDCTRL1_ */
+ uint32_t td_ctrl2; /* ET_TDCTRL2_ */
+} __packed;
+
+#define ET_TDCTRL1_LEN 0xffff
+
+#define ET_TDCTRL2_LAST_FRAG (1 << 0)
+#define ET_TDCTRL2_FIRST_FRAG (1 << 1)
+#define ET_TDCTRL2_INTR (1 << 2)
+
+struct et_rxdesc {
+ uint32_t rd_addr_lo;
+ uint32_t rd_addr_hi;
+ uint32_t rd_ctrl; /* ET_RDCTRL_ */
+} __packed;
+
+#define ET_RDCTRL_BUFIDX 0x03ff
+
+struct et_rxstat {
+ uint32_t rxst_info1;
+ uint32_t rxst_info2; /* ET_RXST_INFO2_ */
+} __packed;
+
+#define ET_RXST_INFO2_LEN 0x000ffff
+#define ET_RXST_INFO2_BUFIDX 0x3ff0000
+#define ET_RXST_INFO2_RINGIDX (3 << 26)
+
+struct et_rxstatus {
+ uint32_t rxs_ring;
+ uint32_t rxs_stat_ring; /* ET_RXS_STATRING_ */
+} __packed;
+
+#define ET_RXS_STATRING_INDEX 0xfff0000
+#define ET_RXS_STATRING_WRAP (1 << 28)
+
+struct et_txbuf {
+ struct mbuf *tb_mbuf;
+ bus_dmamap_t tb_dmap;
+ bus_dma_segment_t tb_seg;
+};
+
+struct et_rxbuf {
+ struct mbuf *rb_mbuf;
+ bus_dmamap_t rb_dmap;
+ bus_dma_segment_t rb_seg;
+ bus_addr_t rb_paddr;
+};
+
+struct et_txstatus_data {
+ uint32_t *txsd_status;
+ bus_addr_t txsd_paddr;
+ bus_dma_tag_t txsd_dtag;
+ bus_dmamap_t txsd_dmap;
+ bus_dma_segment_t txsd_seg;
+};
+
+struct et_rxstatus_data {
+ struct et_rxstatus *rxsd_status;
+ bus_addr_t rxsd_paddr;
+ bus_dma_tag_t rxsd_dtag;
+ bus_dmamap_t rxsd_dmap;
+ bus_dma_segment_t rxsd_seg;
+};
+
+struct et_rxstat_ring {
+ struct et_rxstat *rsr_stat;
+ bus_addr_t rsr_paddr;
+ bus_dma_tag_t rsr_dtag;
+ bus_dmamap_t rsr_dmap;
+ bus_dma_segment_t rsr_seg;
+
+ int rsr_index;
+ int rsr_wrap;
+};
+
+struct et_txdesc_ring {
+ struct et_txdesc *tr_desc;
+ bus_addr_t tr_paddr;
+ bus_dma_tag_t tr_dtag;
+ bus_dmamap_t tr_dmap;
+ bus_dma_segment_t tr_seg;
+
+ int tr_ready_index;
+ int tr_ready_wrap;
+};
+
+struct et_rxdesc_ring {
+ struct et_rxdesc *rr_desc;
+ bus_addr_t rr_paddr;
+ bus_dma_tag_t rr_dtag;
+ bus_dmamap_t rr_dmap;
+ bus_dma_segment_t rr_seg;
+
+ uint32_t rr_posreg;
+ int rr_index;
+ int rr_wrap;
+};
+
+struct et_txbuf_data {
+ struct et_txbuf tbd_buf[ET_TX_NDESC];
+
+ int tbd_start_index;
+ int tbd_start_wrap;
+ int tbd_used;
+};
+
+struct et_softc;
+struct et_rxbuf_data;
+typedef int (*et_newbuf_t)(struct et_rxbuf_data *, int, int);
+
+struct et_rxbuf_data {
+ struct et_rxbuf rbd_buf[ET_RX_NDESC];
+
+ struct et_softc *rbd_softc;
+ struct et_rxdesc_ring *rbd_ring;
+
+ int rbd_bufsize;
+ et_newbuf_t rbd_newbuf;
+};
+
+struct et_softc {
+ device_t sc_dev;
+ struct ethercom sc_ethercom;
+ uint8_t sc_enaddr[ETHER_ADDR_LEN];
+ int sc_if_flags;
+
+ int sc_mem_rid;
+ struct resource *sc_mem_res;
+ bus_space_tag_t sc_mem_bt;
+ bus_space_handle_t sc_mem_bh;
+ bus_size_t sc_mem_size;
+ bus_dma_tag_t sc_dmat;
+ pci_chipset_tag_t sc_pct;
+ pcitag_t sc_pcitag;
+
+ int sc_irq_rid;
+ struct resource *sc_irq_res;
+ void *sc_irq_handle;
+
+ struct mii_data sc_miibus;
+ callout_t sc_tick;
+
+ struct et_rxdesc_ring sc_rx_ring[ET_RX_NRING];
+ struct et_rxstat_ring sc_rxstat_ring;
+ struct et_rxstatus_data sc_rx_status;
+
+ struct et_txdesc_ring sc_tx_ring;
+ struct et_txstatus_data sc_tx_status;
+ callout_t sc_txtick;
+
+ bus_dmamap_t sc_mbuf_tmp_dmap;
+ struct et_rxbuf_data sc_rx_data[ET_RX_NRING];
+ struct et_txbuf_data sc_tx_data;
+
+ uint32_t sc_tx;
+ uint32_t sc_tx_intr;
+
+ /*
+ * Sysctl variables
+ */
+ int sc_rx_intr_npkts;
+ int sc_rx_intr_delay;
+ int sc_tx_intr_nsegs;
+ uint32_t sc_timer;
+};
+
+#endif /* !_IF_ETREG_H */
Index: sys/dev/pci/pcidevs
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs,v
retrieving revision 1.942
diff -u -p -r1.942 pcidevs
--- sys/dev/pci/pcidevs 25 May 2008 15:58:48 -0000 1.942
+++ sys/dev/pci/pcidevs 3 Jul 2008 16:34:03 -0000
@@ -2697,6 +2697,8 @@ product LUCENT OR3TP12 0x5401 ORCA FPGA
product LUCENT USBHC 0x5801 USB Host Controller
product LUCENT EVDO 0x5802 Sierra Wireless AirCard 580
product LUCENT FW322_323 0x5811 FW322/323 IEEE 1394 Host Controller
+product LUCENT ET1310 0xed00 ET1310 10/100/1000 Ethernet
+product LUCENT ET1301 0xed01 ET1301 10/100 Ethernet
/* Macronix */
product MACRONIX MX98713 0x0512 MX98713 (PMAC) 10/100 Ethernet
Index: sys/dev/pci/pcidevs.h
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs.h,v
retrieving revision 1.943
diff -u -p -r1.943 pcidevs.h
--- sys/dev/pci/pcidevs.h 25 May 2008 15:59:21 -0000 1.943
+++ sys/dev/pci/pcidevs.h 3 Jul 2008 16:34:04 -0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pcidevs.h,v 1.943 2008/05/25 15:59:21 chs Exp $ */
+/* $NetBSD$ */
/*
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
@@ -2704,6 +2704,8 @@
#define PCI_PRODUCT_LUCENT_USBHC 0x5801 /* USB Host
Controller */
#define PCI_PRODUCT_LUCENT_EVDO 0x5802 /* Sierra Wireless
AirCard 580 */
#define PCI_PRODUCT_LUCENT_FW322_323 0x5811 /* FW322/323
IEEE 1394 Host Controller */
+#define PCI_PRODUCT_LUCENT_ET1310 0xed00 /* ET1310
10/100/1000 Ethernet */
+#define PCI_PRODUCT_LUCENT_ET1301 0xed01 /* ET1301
10/100 Ethernet */
/* Macronix */
#define PCI_PRODUCT_MACRONIX_MX98713 0x0512 /* MX98713
(PMAC) 10/100 Ethernet */
Index: sys/dev/pci/pcidevs_data.h
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pcidevs_data.h,v
retrieving revision 1.942
diff -u -p -r1.942 pcidevs_data.h
--- sys/dev/pci/pcidevs_data.h 25 May 2008 15:59:21 -0000 1.942
+++ sys/dev/pci/pcidevs_data.h 3 Jul 2008 16:34:04 -0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pcidevs_data.h,v 1.942 2008/05/25 15:59:21 chs Exp $ */
+/* $NetBSD$ */
/*
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
@@ -9524,6 +9524,14 @@ static const struct pci_product pci_prod
"FW322/323 IEEE 1394 Host Controller",
},
{
+ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
+ "ET1310 10/100/1000 Ethernet",
+ },
+ {
+ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301,
+ "ET1301 10/100 Ethernet",
+ },
+ {
PCI_VENDOR_MACRONIX, PCI_PRODUCT_MACRONIX_MX98713,
"MX98713 (PMAC) 10/100 Ethernet",
},
@@ -14252,4 +14260,4 @@ static const struct pci_product pci_prod
"Video Controller",
},
};
-const int pci_nproducts = 2963;
+const int pci_nproducts = 2965;
Index: sys/dev/pci/files.pci
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/files.pci,v
retrieving revision 1.304
diff -u -p -r1.304 files.pci
--- sys/dev/pci/files.pci 23 May 2008 21:11:40 -0000 1.304
+++ sys/dev/pci/files.pci 3 Jul 2008 16:34:04 -0000
@@ -899,6 +899,11 @@ device msk: ether, ifnet, arp, mii
attach msk at mskc
file dev/pci/if_msk.c mskc | msk
+# Agere ET1310/1301 Ethernet
+device et: ether, ifnet, arp, mii, mii_phy
+attach et at pci
+file dev/pci/if_et.c et
+
#
# Direct Rendering Manager
#
Index: share/man/man4/et.4
===================================================================
RCS file: share/man/man4/et.4
diff -N share/man/man4/et.4
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ share/man/man4/et.4 3 Jul 2008 16:34:04 -0000
@@ -0,0 +1,79 @@
+.\" $NetBSD$
+.\" $OpenBSD: et.4,v 1.2 2007/10/30 13:22:21 jmc Exp $
+.\"
+.\" Copyright (c) 2007 Jonathan Gray <jsg%openbsd.org@localhost>
+.\"
+.\" Permission to use, copy, modify, and distribute this software for any
+.\" purpose with or without fee is hereby granted, provided that the above
+.\" copyright notice and this permission notice appear in all copies.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+.\"
+.Dd July 2, 2008
+.Dt ET 4
+.Os
+.Sh NAME
+.Nm et
+.Nd Agere/LSI ET1310/ET1301 10/100/Gigabit Ethernet device
+.Sh SYNOPSIS
+.Cd "et* at pci? dev ? function ?"
+.Cd "etphy* at mii? phy ?"
+.Sh DESCRIPTION
+The
+.Nm
+driver supports PCI Express Ethernet adapters based on the Agere/LSI
+ET1310/ET1301 integrated MAC/PHY.
+.Pp
+The following
+.Ar media
+types are supported:
+.Pp
+.Bl -tag -width autoselect -compact
+.It Cm autoselect
+Enable autoselection of the media type and options.
+.It Cm 10baseT
+Set 10Mbps operation.
+.It Cm 100baseTX
+Set 100Mbps (Fast Ethernet) operation.
+.It Cm 1000baseT
+Set 1000Mbps (Gigabit Ethernet) operation (ET1310 only).
+.El
+.Sh SEE ALSO
+.Xr arp 4 ,
+.Xr etphy 4 ,
+.Xr ifmedia 4 ,
+.Xr intro 4 ,
+.Xr netintro 4 ,
+.Xr pci 4 ,
+.Xr ifconfig.if 5 ,
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Ox 4.3 .
+It was added to
+.Nx 5.0 .
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Sepherosa Ziehau
+for
+DragonFlyBSD,
+ported to
+.Ox
+by
+.An Jonathan Gray
+.Aq jsg%openbsd.org@localhost ,
+and subsequently ported to
+.Nx
+by
+.An Kaspar Brand .
Index: share/man/man4/etphy.4
===================================================================
RCS file: share/man/man4/etphy.4
diff -N share/man/man4/etphy.4
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ share/man/man4/etphy.4 3 Jul 2008 16:34:04 -0000
@@ -0,0 +1,58 @@
+.\" $NetBSD$
+.\" $OpenBSD: etphy.4,v 1.2 2007/10/30 13:22:21 jmc Exp $
+.\"
+.\" Copyright (c) 2007 Jonathan Gray <jsg%openbsd.org@localhost>
+.\"
+.\" Permission to use, copy, modify, and distribute this software for any
+.\" purpose with or without fee is hereby granted, provided that the above
+.\" copyright notice and this permission notice appear in all copies.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+.\"
+.Dd July 2, 2008
+.Dt ETPHY 4
+.Os
+.Sh NAME
+.Nm etphy
+.Nd Agere/LSI ET1011 TruePHY Gigabit Ethernet PHY
+.Sh SYNOPSIS
+.Cd "etphy* at mii? phy ?"
+.Sh DESCRIPTION
+The
+.Nm
+driver supports the Agere/LSI ET1011 TruePHY 10/100/1000 Ethernet PHYs
+including the integrated TruePHY in ET1310/ET1301 based adapters.
+.Sh SEE ALSO
+.Xr ifmedia 4 ,
+.Xr intro 4 ,
+.Xr mii 4 ,
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Ox 4.3 .
+It was added to
+.Nx 5.0 .
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Sepherosa Ziehau
+for DragonFlyBSD,
+ported to
+.Ox
+by
+.An Jonathan Gray
+.Aq jsg%openbsd.org@localhost ,
+and subsequently ported to
+.Nx
+by
+.An Kaspar Brand .
--------------020302010700040001020308
Content-Type: text/plain;
name="et_openbsd_netbsd.diff"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
filename="et_openbsd_netbsd.diff"
--- sys/dev/mii/etphy.c.orig 2008-05-31 05:45:31.000000000 +0200
+++ sys/dev/mii/etphy.c 2008-06-26 21:39:10.000000000 +0200
@@ -1,3 +1,4 @@
+/* $NetBSD$ */
/* $OpenBSD: etphy.c,v 1.4 2008/04/02 20:12:58 brad Exp $ */
/*
@@ -36,6 +37,9 @@
* $DragonFly: src/sys/dev/netif/mii_layer/truephy.c,v 1.1 2007/10/12 14:12:42
sephe Exp $
*/
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD$");
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -74,8 +78,8 @@
int etphy_service(struct mii_softc *, struct mii_data *, int);
-void etphy_attach(struct device *, struct device *, void *);
-int etphy_match(struct device *, void *, void *);
+void etphy_attach(device_t, device_t, void *);
+int etphy_match(device_t, cfdata_t, void *);
void etphy_reset(struct mii_softc *);
void etphy_status(struct mii_softc *);
@@ -90,14 +94,8 @@ static const struct mii_phydesc etphys[]
NULL },
};
-struct cfattach etphy_ca = {
- sizeof (struct mii_softc), etphy_match, etphy_attach,
- mii_phy_detach, mii_phy_activate
-};
-
-struct cfdriver etphy_cd = {
- NULL, "etphy", DV_DULL
-};
+CFATTACH_DECL_NEW(etphy, sizeof(struct mii_softc),
+ etphy_match, etphy_attach, mii_phy_detach, mii_phy_activate);
static const struct etphy_dsp {
uint16_t index;
@@ -138,31 +136,32 @@ static const struct etphy_dsp {
};
int
-etphy_match(struct device *parent, void *match, void *aux)
+etphy_match(device_t parent, cfdata_t match, void *aux)
{
struct mii_attach_args *ma = aux;
if (mii_phy_match(ma, etphys) != NULL)
- return (10);
+ return 10;
- return (0);
+ return 0;
}
void
-etphy_attach(struct device *parent, struct device *self, void *aux)
+etphy_attach(device_t parent, device_t self, void *aux)
{
- struct mii_softc *sc = (struct mii_softc *)self;
+ struct mii_softc *sc = device_private(self);
struct mii_attach_args *ma = aux;
struct mii_data *mii = ma->mii_data;
const struct mii_phydesc *mpd;
mpd = mii_phy_match(ma, etphys);
- printf(": %s, rev. %d\n", mpd->mpd_name, MII_REV(ma->mii_id2));
+ aprint_normal(": %s, rev. %d\n", mpd->mpd_name, MII_REV(ma->mii_id2));
+ sc->mii_dev = self;
sc->mii_inst = mii->mii_instance;
sc->mii_phy = ma->mii_phyno;
sc->mii_funcs = &etphy_funcs;
- sc->mii_model = MII_MODEL(ma->mii_id2);
+ sc->mii_mpd_model = MII_MODEL(ma->mii_id2);
sc->mii_pdata = mii;
sc->mii_flags = ma->mii_flags;
@@ -176,8 +175,15 @@ etphy_attach(struct device *parent, stru
/* No 1000baseT half-duplex support */
sc->mii_extcapabilities &= ~EXTSR_1000THDX;
}
+ aprint_normal_dev(self, "");
+ if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0)
+ aprint_error("no media present");
+ else
+ mii_phy_add_media(sc);
+ aprint_normal("\n");
- mii_phy_add_media(sc);
+ if (!pmf_device_register(self, NULL, mii_phy_resume))
+ aprint_error_dev(self, "couldn't establish power handler\n");
}
int
--- sys/dev/pci/if_et.c.orig 2008-07-01 18:02:05.000000000 +0200
+++ sys/dev/pci/if_et.c 2008-07-01 20:33:10.000000000 +0200
@@ -1,3 +1,4 @@
+/* $NetBSD$ */
/* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
@@ -35,6 +36,10 @@
* $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe
Exp $
*/
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD$");
+
+#include "opt_inet.h"
#include "bpfilter.h"
#include "vlan.h"
@@ -47,7 +52,7 @@
#include <sys/queue.h>
#include <sys/kernel.h>
#include <sys/device.h>
-#include <sys/timeout.h>
+#include <sys/callout.h>
#include <sys/socket.h>
#include <machine/bus.h>
@@ -55,19 +60,20 @@
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
+#include <net/if_ether.h>
+#include <net/if_arp.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
-#include <netinet/if_ether.h>
+#include <netinet/if_inarp.h>
#endif
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
-#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
@@ -89,21 +95,19 @@
#define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
/* XXX end porting goop */
-int et_match(struct device *, void *, void *);
-void et_attach(struct device *, struct device *, void *);
-int et_detach(struct device *, int flags);
-int et_shutdown(struct device *);
-
-int et_miibus_readreg(struct device *, int, int);
-void et_miibus_writereg(struct device *, int, int, int);
-void et_miibus_statchg(struct device *);
+int et_match(device_t, cfdata_t, void *);
+void et_attach(device_t, device_t, void *);
+int et_detach(device_t, int flags);
+int et_shutdown(device_t);
+
+int et_miibus_readreg(device_t, int, int);
+void et_miibus_writereg(device_t, int, int, int);
+void et_miibus_statchg(device_t);
int et_init(struct ifnet *ifp);
-int et_ioctl(struct ifnet *, u_long, caddr_t);
+int et_ioctl(struct ifnet *, u_long, void *);
void et_start(struct ifnet *);
void et_watchdog(struct ifnet *);
-int et_ifmedia_upd(struct ifnet *);
-void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
int et_intr(void *);
void et_enable_intrs(struct et_softc *, uint32_t);
@@ -164,37 +168,51 @@ static const struct et_bsize et_bufsize[
{ .bufsize = 0, .newbuf = et_newbuf_cluster },
};
-const struct pci_matchid et_devices[] = {
- { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE },
- { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE }
-};
-
-struct cfattach et_ca = {
- sizeof (struct et_softc), et_match, et_attach, et_detach
+const struct et_product {
+ pci_vendor_id_t vendor;
+ pci_product_id_t product;
+} et_devices[] = {
+ { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 },
+ { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 }
};
-struct cfdriver et_cd = {
- NULL, "et", DV_IFNET
-};
+CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
+ NULL);
int
-et_match(struct device *dev, void *match, void *aux)
+et_match(device_t dev, cfdata_t match, void *aux)
{
- return pci_matchbyid((struct pci_attach_args *)aux, et_devices,
- sizeof (et_devices) / sizeof (et_devices[0]));
+ struct pci_attach_args *pa = aux;
+ const struct et_product *ep;
+ int i;
+
+ for (i = 0; i < sizeof(et_devices) / sizeof(et_devices[0]); i++) {
+ ep = &et_devices[i];
+ if (PCI_VENDOR(pa->pa_id) == ep->vendor &&
+ PCI_PRODUCT(pa->pa_id) == ep->product)
+ return 1;
+ }
+ return 0;
}
void
-et_attach(struct device *parent, struct device *self, void *aux)
+et_attach(device_t parent, device_t self, void *aux)
{
- struct et_softc *sc = (struct et_softc *)self;
+ struct et_softc *sc = device_private(self);
struct pci_attach_args *pa = aux;
pci_chipset_tag_t pc = pa->pa_pc;
pci_intr_handle_t ih;
const char *intrstr;
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
pcireg_t memtype;
int error;
+ char devinfo[256];
+
+ aprint_naive(": Ethernet controller\n");
+
+ sc->sc_dev = self;
+ pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
+ aprint_normal(": %s (rev. 0x%02x)\n", devinfo,
PCI_REVISION(pa->pa_class));
/*
* Initialize tunables
@@ -206,27 +224,26 @@ et_attach(struct device *parent, struct
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
- &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
- printf(": could not map mem space\n");
+ &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
+ aprint_error_dev(self, "could not map mem space\n");
return;
}
if (pci_intr_map(pa, &ih) != 0) {
- printf(": could not map interrupt\n");
+ aprint_error_dev(self, "could not map interrupt\n");
return;
}
intrstr = pci_intr_string(pc, ih);
- sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc,
- sc->sc_dev.dv_xname);
+ sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc);
if (sc->sc_irq_handle == NULL) {
- printf(": could not establish interrupt");
+ aprint_error_dev(self, "could not establish interrupt");
if (intrstr != NULL)
- printf(" at %s", intrstr);
- printf("\n");
+ aprint_error(" at %s", intrstr);
+ aprint_error("\n");
return;
}
- printf(": %s", intrstr);
+ aprint_normal_dev(self, "interrupting at %s\n", intrstr);
sc->sc_dmat = pa->pa_dmat;
sc->sc_pct = pa->pa_pc;
@@ -236,9 +253,10 @@ et_attach(struct device *parent, struct
if (error)
return;
- et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr);
+ et_get_eaddr(sc, sc->sc_enaddr);
- printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
+ aprint_normal_dev(self, "Ethernet address %s\n",
+ ether_sprintf(sc->sc_enaddr));
CSR_WRITE_4(sc, ET_PM,
ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
@@ -252,6 +270,7 @@ et_attach(struct device *parent, struct
return;
ifp->if_softc = sc;
+ ifp->if_mtu = ETHERMTU;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = et_init;
ifp->if_ioctl = et_ioctl;
@@ -259,9 +278,7 @@ et_attach(struct device *parent, struct
ifp->if_watchdog = et_watchdog;
IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
IFQ_SET_READY(&ifp->if_snd);
- strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
-
- ifp->if_capabilities = IFCAP_VLAN_MTU;
+ strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
et_chip_attach(sc);
@@ -270,12 +287,13 @@ et_attach(struct device *parent, struct
sc->sc_miibus.mii_writereg = et_miibus_writereg;
sc->sc_miibus.mii_statchg = et_miibus_statchg;
- ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd,
- et_ifmedia_sts);
+ sc->sc_ethercom.ec_mii = &sc->sc_miibus;
+ ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange,
+ ether_mediastatus);
mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, 0);
if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
- printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(self, "no PHY found!\n");
ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
0, NULL);
ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
@@ -283,17 +301,19 @@ et_attach(struct device *parent, struct
ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
if_attach(ifp);
- ether_ifattach(ifp);
+ ether_ifattach(ifp, sc->sc_enaddr);
- timeout_set(&sc->sc_tick, et_tick, sc);
- timeout_set(&sc->sc_txtick, et_txtick, sc);
+ callout_init(&sc->sc_tick, 0);
+ callout_setfunc(&sc->sc_tick, et_tick, sc);
+ callout_init(&sc->sc_txtick, 0);
+ callout_setfunc(&sc->sc_txtick, et_txtick, sc);
}
int
-et_detach(struct device *self, int flags)
+et_detach(device_t self, int flags)
{
- struct et_softc *sc = (struct et_softc *)self;
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct et_softc *sc = device_private(self);
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
int s;
s = splnet();
@@ -320,9 +340,9 @@ et_detach(struct device *self, int flags
}
int
-et_shutdown(struct device *self)
+et_shutdown(device_t self)
{
- struct et_softc *sc = (struct et_softc *)self;
+ struct et_softc *sc = device_private(self);
int s;
s = splnet();
@@ -333,9 +353,9 @@ et_shutdown(struct device *self)
}
int
-et_miibus_readreg(struct device *dev, int phy, int reg)
+et_miibus_readreg(device_t dev, int phy, int reg)
{
- struct et_softc *sc = (struct et_softc *)dev;
+ struct et_softc *sc = device_private(dev);
uint32_t val;
int i, ret;
@@ -358,8 +378,8 @@ et_miibus_readreg(struct device *dev, in
DELAY(50);
}
if (i == NRETRY) {
- printf("%s: read phy %d, reg %d timed out\n",
- sc->sc_dev.dv_xname, phy, reg);
+ aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
+ phy, reg);
ret = 0;
goto back;
}
@@ -376,9 +396,9 @@ back:
}
void
-et_miibus_writereg(struct device *dev, int phy, int reg, int val0)
+et_miibus_writereg(device_t dev, int phy, int reg, int val0)
{
- struct et_softc *sc = (struct et_softc *)dev;
+ struct et_softc *sc = device_private(dev);
uint32_t val;
int i;
@@ -401,8 +421,8 @@ et_miibus_writereg(struct device *dev, i
DELAY(50);
}
if (i == NRETRY) {
- printf("%s: write phy %d, reg %d timed out\n",
- sc->sc_dev.dv_xname, phy, reg);
+ aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
+ phy, reg);
et_miibus_readreg(dev, phy, reg);
}
@@ -413,9 +433,9 @@ et_miibus_writereg(struct device *dev, i
}
void
-et_miibus_statchg(struct device *dev)
+et_miibus_statchg(device_t dev)
{
- struct et_softc *sc = (struct et_softc *)dev;
+ struct et_softc *sc = device_private(dev);
struct mii_data *mii = &sc->sc_miibus;
uint32_t cfg2, ctrl;
@@ -444,41 +464,13 @@ et_miibus_statchg(struct device *dev)
CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
}
-int
-et_ifmedia_upd(struct ifnet *ifp)
-{
- struct et_softc *sc = ifp->if_softc;
- struct mii_data *mii = &sc->sc_miibus;
-
- if (mii->mii_instance != 0) {
- struct mii_softc *miisc;
-
- LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
- mii_phy_reset(miisc);
- }
- mii_mediachg(mii);
-
- return 0;
-}
-
-void
-et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
-{
- struct et_softc *sc = ifp->if_softc;
- struct mii_data *mii = &sc->sc_miibus;
-
- mii_pollstat(mii);
- ifmr->ifm_active = mii->mii_media_active;
- ifmr->ifm_status = mii->mii_media_status;
-}
-
void
et_stop(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
- timeout_del(&sc->sc_tick);
- timeout_del(&sc->sc_txtick);
+ callout_stop(&sc->sc_tick);
+ callout_stop(&sc->sc_txtick);
et_stop_rxdma(sc);
et_stop_txdma(sc);
@@ -511,8 +503,7 @@ et_bus_config(struct et_softc *sc)
val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
- printf("%s: EEPROM status error 0x%02x\n",
- sc->sc_dev.dv_xname, val);
+ aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n",
val);
return ENXIO;
}
@@ -541,8 +532,8 @@ et_bus_config(struct et_softc *sc)
ET_PCIR_ACK_LATENCY) >> 16;
replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
ET_PCIR_REPLAY_TIMER) >> 16;
- printf("%s: ack latency %u, replay timer %u\n",
- sc->sc_dev.dv_xname, ack_latency, replay_timer);
+ aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer
%u\n",
+ ack_latency, replay_timer);
break;
}
if (ack_latency != 0) {
@@ -635,8 +626,7 @@ et_dma_alloc(struct et_softc *sc)
(void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
&tx_ring->tr_seg);
if (error) {
- printf("%s: can't create TX ring DMA stuffs\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create TX ring DMA
stuffs\n");
return error;
}
@@ -647,8 +637,7 @@ et_dma_alloc(struct et_softc *sc)
(void **)&txsd->txsd_status,
&txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
if (error) {
- printf("%s: can't create TX status DMA stuffs\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create TX status DMA
stuffs\n");
return error;
}
@@ -665,8 +654,8 @@ et_dma_alloc(struct et_softc *sc)
(void **)&rx_ring->rr_desc,
&rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
if (error) {
- printf("%s: can't create DMA stuffs for "
- "the %d RX ring\n", sc->sc_dev.dv_xname, i);
+ aprint_error_dev(sc->sc_dev, "can't create DMA stuffs
for "
+ "the %d RX ring\n", i);
return error;
}
rx_ring->rr_posreg = rx_ring_posreg[i];
@@ -679,8 +668,7 @@ et_dma_alloc(struct et_softc *sc)
(void **)&rxst_ring->rsr_stat,
&rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
if (error) {
- printf("%s: can't create RX stat ring DMA stuffs\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA
stuffs\n");
return error;
}
@@ -691,8 +679,7 @@ et_dma_alloc(struct et_softc *sc)
(void **)&rxsd->rxsd_status,
&rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
if (error) {
- printf("%s: can't create RX status DMA stuffs\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create RX status DMA
stuffs\n");
return error;
}
@@ -764,8 +751,7 @@ et_dma_mbuf_create(struct et_softc *sc)
error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
if (error) {
- printf("%s: can't create spare mbuf DMA map\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA
map\n");
return error;
}
@@ -782,9 +768,8 @@ et_dma_mbuf_create(struct et_softc *sc)
MCLBYTES, 0, BUS_DMA_NOWAIT,
&rbd->rbd_buf[j].rb_dmap);
if (error) {
- printf("%s: can't create %d RX mbuf "
- "for %d RX ring\n", sc->sc_dev.dv_xname,
- j, i);
+ aprint_error_dev(sc->sc_dev, "can't create %d
RX mbuf "
+ "for %d RX ring\n", j, i);
rx_done[i] = j;
et_dma_mbuf_destroy(sc, 0, rx_done);
return error;
@@ -803,8 +788,8 @@ et_dma_mbuf_create(struct et_softc *sc)
error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
if (error) {
- printf("%s: can't create %d TX mbuf "
- "DMA map\n", sc->sc_dev.dv_xname, i);
+ aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
+ "DMA map\n", i);
et_dma_mbuf_destroy(sc, i, rx_done);
return error;
}
@@ -860,28 +845,28 @@ et_dma_mem_create(struct et_softc *sc, b
error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
dmap);
if (error) {
- printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
return error;
}
error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
1, &nsegs, BUS_DMA_WAITOK);
if (error) {
- printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
return error;
}
error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
- size, (caddr_t *)addr, BUS_DMA_NOWAIT);
+ size, (void **)addr, BUS_DMA_NOWAIT);
if (error) {
- printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
return (error);
}
error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
BUS_DMA_WAITOK);
if (error) {
- printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
return error;
}
@@ -951,7 +936,7 @@ int
et_intr(void *xsc)
{
struct et_softc *sc = xsc;
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
uint32_t intrs;
if ((ifp->if_flags & IFF_RUNNING) == 0)
@@ -985,6 +970,9 @@ et_init(struct ifnet *ifp)
const struct et_bsize *arr;
int error, i, s;
+ if (ifp->if_flags & IFF_RUNNING)
+ return 0;
+
s = splnet();
et_stop(sc);
@@ -1021,7 +1009,7 @@ et_init(struct ifnet *ifp)
et_enable_intrs(sc, ET_INTRS);
- timeout_add(&sc->sc_tick, hz);
+ callout_schedule(&sc->sc_tick, hz);
CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
@@ -1037,7 +1025,7 @@ back:
}
int
-et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
{
struct et_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
@@ -1046,11 +1034,6 @@ et_ioctl(struct ifnet *ifp, u_long cmd,
s = splnet();
- if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
- splx(s);
- return error;
- }
-
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
@@ -1058,14 +1041,14 @@ et_ioctl(struct ifnet *ifp, u_long cmd,
et_init(ifp);
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
- arp_ifinit(&sc->sc_arpcom, ifa);
+ arp_ifinit(ifp, ifa);
#endif
break;
case SIOCSIFMTU:
- if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
error = EINVAL;
- else if (ifp->if_mtu != ifr->ifr_mtu)
- ifp->if_mtu = ifr->ifr_mtu;
+ else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
+ error = 0;
break;
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
@@ -1090,11 +1073,7 @@ et_ioctl(struct ifnet *ifp, u_long cmd,
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
- error = (cmd == SIOCADDMULTI) ?
- ether_addmulti(ifr, &sc->sc_arpcom) :
- ether_delmulti(ifr, &sc->sc_arpcom);
-
- if (error == ENETRESET) {
+ if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
if (ifp->if_flags & IFF_RUNNING)
et_setmulti(sc);
error = 0;
@@ -1105,7 +1084,14 @@ et_ioctl(struct ifnet *ifp, u_long cmd,
error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd);
break;
default:
- error = ENOTTY;
+ error = ether_ioctl(ifp, cmd, data);
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ et_setmulti(sc);
+ error = 0;
+ }
+ break;
+
}
splx(s);
@@ -1145,12 +1131,12 @@ et_start(struct ifnet *ifp)
#if NBPFILTER > 0
if (ifp->if_bpf != NULL)
- bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+ bpf_mtap(ifp->if_bpf, m);
#endif
}
if (trans) {
- timeout_add(&sc->sc_txtick, hz);
+ callout_schedule(&sc->sc_txtick, hz);
ifp->if_timer = 5;
}
}
@@ -1159,8 +1145,9 @@ void
et_watchdog(struct ifnet *ifp)
{
struct et_softc *sc = ifp->if_softc;
- printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
+ ifp->if_flags &= ~IFF_RUNNING;
et_init(ifp);
et_start(ifp);
}
@@ -1173,7 +1160,7 @@ et_stop_rxdma(struct et_softc *sc)
DELAY(5);
if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
- printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
return ETIMEDOUT;
}
return 0;
@@ -1238,8 +1225,8 @@ et_free_rx_ring(struct et_softc *sc)
void
et_setmulti(struct et_softc *sc)
{
- struct arpcom *ac = &sc->sc_arpcom;
- struct ifnet *ifp = &ac->ac_if;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
uint32_t hash[4] = { 0, 0, 0, 0 };
uint32_t rxmac_ctrl, pktfilt;
struct ether_multi *enm;
@@ -1259,7 +1246,7 @@ et_setmulti(struct et_softc *sc)
bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
count = 0;
- ETHER_FIRST_MULTI(step, ac, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
uint32_t *hp, h;
@@ -1302,7 +1289,7 @@ back:
int
et_chip_init(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
uint32_t rxq_end;
int error;
@@ -1389,8 +1376,8 @@ et_init_rx_ring(struct et_softc *sc)
for (i = 0; i < ET_RX_NDESC; ++i) {
error = rbd->rbd_newbuf(rbd, i, 1);
if (error) {
- printf("%s: %d ring %d buf, newbuf failed: "
- "%d\n", sc->sc_dev.dv_xname, n, i, error);
+ aprint_error_dev(sc->sc_dev, "%d ring %d buf,
newbuf failed: "
+ "%d\n", n, i, error);
return error;
}
}
@@ -1417,7 +1404,7 @@ et_init_rxdma(struct et_softc *sc)
error = et_stop_rxdma(sc);
if (error) {
- printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
return error;
}
@@ -1486,7 +1473,7 @@ et_init_txdma(struct et_softc *sc)
error = et_stop_txdma(sc);
if (error) {
- printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
return error;
}
@@ -1515,8 +1502,8 @@ et_init_txdma(struct et_softc *sc)
void
et_init_mac(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
- const uint8_t *eaddr = LLADDR(ifp->if_sadl);
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
uint32_t val;
/* Reset MAC */
@@ -1559,7 +1546,7 @@ et_init_mac(struct et_softc *sc)
/* Set max frame length */
CSR_WRITE_4(sc, ET_MAX_FRMLEN,
- ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN);
+ ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
ETHER_CRC_LEN);
/* Bring MAC out of reset state */
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
@@ -1568,8 +1555,8 @@ et_init_mac(struct et_softc *sc)
void
et_init_rxmac(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
- const uint8_t *eaddr = LLADDR(ifp->if_sadl);
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
uint32_t val;
int i;
@@ -1677,7 +1664,7 @@ et_start_rxdma(struct et_softc *sc)
DELAY(5);
if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
- printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
return ETIMEDOUT;
}
return 0;
@@ -1693,9 +1680,9 @@ et_start_txdma(struct et_softc *sc)
int
et_enable_txrx(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
uint32_t val;
- int i;
+ int i, rc = 0;
val = CSR_READ_4(sc, ET_MAC_CFG1);
val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
@@ -1703,7 +1690,8 @@ et_enable_txrx(struct et_softc *sc)
ET_MAC_CFG1_LOOPBACK);
CSR_WRITE_4(sc, ET_MAC_CFG1, val);
- et_ifmedia_upd(ifp);
+ if ((rc = ether_mediachange(ifp)) != 0)
+ goto out;
#define NRETRY 100
@@ -1716,18 +1704,20 @@ et_enable_txrx(struct et_softc *sc)
DELAY(10);
}
if (i == NRETRY) {
- printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n");
return ETIMEDOUT;
}
#undef NRETRY
return 0;
+out:
+ return rc;
}
void
et_rxeof(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
uint32_t rxs_stat_ring;
@@ -1771,14 +1761,14 @@ et_rxeof(struct et_softc *sc)
if (ring_idx >= ET_RX_NRING) {
ifp->if_ierrors++;
- printf("%s: invalid ring index %d\n",
- sc->sc_dev.dv_xname, ring_idx);
+ aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
+ ring_idx);
continue;
}
if (buf_idx >= ET_RX_NDESC) {
ifp->if_ierrors++;
- printf("%s: invalid buf index %d\n",
- sc->sc_dev.dv_xname, buf_idx);
+ aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
+ buf_idx);
continue;
}
@@ -1799,12 +1789,11 @@ et_rxeof(struct et_softc *sc)
#if NBPFILTER > 0
if (ifp->if_bpf != NULL)
- bpf_mtap(ifp->if_bpf, m,
- BPF_DIRECTION_IN);
+ bpf_mtap(ifp->if_bpf, m);
#endif
ifp->if_ipackets++;
- ether_input_mbuf(ifp, m);
+ (*ifp->if_input)(ifp, m);
}
} else {
ifp->if_ierrors++;
@@ -1813,8 +1802,8 @@ et_rxeof(struct et_softc *sc)
rx_ring = &sc->sc_rx_ring[ring_idx];
if (buf_idx != rx_ring->rr_index) {
- printf("%s: WARNING!! ring %d, "
- "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname,
+ aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
+ "buf_idx %d, rr_idx %d\n",
ring_idx, buf_idx, rx_ring->rr_index);
}
@@ -1858,7 +1847,7 @@ et_encap(struct et_softc *sc, struct mbu
error = EFBIG;
}
if (error && error != EFBIG) {
- printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
goto back;
}
if (error) { /* error == EFBIG */
@@ -1869,13 +1858,12 @@ et_encap(struct et_softc *sc, struct mbu
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL) {
m_freem(m);
- printf("%s: can't defrag TX mbuf\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
error = ENOBUFS;
goto back;
}
- M_DUP_PKTHDR(m_new, m);
+ M_COPY_PKTHDR(m_new, m);
if (m->m_pkthdr.len > MHLEN) {
MCLGET(m_new, M_DONTWAIT);
if (!(m_new->m_flags & M_EXT)) {
@@ -1886,12 +1874,11 @@ et_encap(struct et_softc *sc, struct mbu
}
if (error) {
- printf("%s: can't defrag TX buffer\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't defrag TX
buffer\n");
goto back;
}
- m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
m_freem(m);
m_new->m_len = m_new->m_pkthdr.len;
*m0 = m = m_new;
@@ -1903,8 +1890,7 @@ et_encap(struct et_softc *sc, struct mbu
bus_dmamap_unload(sc->sc_dmat, map);
error = EFBIG;
}
- printf("%s: can't load defraged TX mbuf\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't load defraged TX
mbuf\n");
goto back;
}
}
@@ -1974,7 +1960,7 @@ back:
void
et_txeof(struct et_softc *sc)
{
- struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
struct et_txbuf_data *tbd = &sc->sc_tx_data;
uint32_t tx_done;
@@ -2015,7 +2001,7 @@ et_txeof(struct et_softc *sc)
}
if (tbd->tbd_used == 0) {
- timeout_del(&sc->sc_txtick);
+ callout_stop(&sc->sc_txtick);
ifp->if_timer = 0;
}
if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
@@ -2043,7 +2029,7 @@ et_tick(void *xsc)
s = splnet();
mii_tick(&sc->sc_miibus);
- timeout_add(&sc->sc_tick, hz);
+ callout_schedule(&sc->sc_tick, hz);
splx(s);
}
@@ -2088,8 +2074,7 @@ et_newbuf(struct et_rxbuf_data *rbd, int
error = ENOBUFS;
/* XXX for debug */
- printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname,
- len0);
+ aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
if (init) {
return error;
} else {
@@ -2107,13 +2092,12 @@ et_newbuf(struct et_rxbuf_data *rbd, int
if (!error) {
bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
error = EFBIG;
- printf("%s: too many segments?!\n",
- sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "too many segments?!\n");
}
m_freem(m);
/* XXX for debug */
- printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
+ aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
if (init) {
return error;
} else {
--- sys/dev/pci/if_etreg.h.orig 2008-07-01 18:51:41.000000000 +0200
+++ sys/dev/pci/if_etreg.h 2008-07-01 07:14:19.000000000 +0200
@@ -1,3 +1,4 @@
+/* $NetBSD$ */
/* $OpenBSD: if_etreg.h,v 1.3 2008/06/08 06:18:07 jsg Exp $ */
/*
@@ -482,8 +483,9 @@ struct et_rxbuf_data {
};
struct et_softc {
- struct device sc_dev;
- struct arpcom sc_arpcom;
+ device_t sc_dev;
+ struct ethercom sc_ethercom;
+ uint8_t sc_enaddr[ETHER_ADDR_LEN];
int sc_if_flags;
int sc_mem_rid;
@@ -500,7 +502,7 @@ struct et_softc {
void *sc_irq_handle;
struct mii_data sc_miibus;
- struct timeout sc_tick;
+ callout_t sc_tick;
struct et_rxdesc_ring sc_rx_ring[ET_RX_NRING];
struct et_rxstat_ring sc_rxstat_ring;
@@ -508,7 +510,7 @@ struct et_softc {
struct et_txdesc_ring sc_tx_ring;
struct et_txstatus_data sc_tx_status;
- struct timeout sc_txtick;
+ callout_t sc_txtick;
bus_dmamap_t sc_mbuf_tmp_dmap;
struct et_rxbuf_data sc_rx_data[ET_RX_NRING];
--------------020302010700040001020308--
>Unformatted:
This is a multi-part message in MIME format.
--------------020302010700040001020308
Content-Type: text/plain; charset=ISO-8859-1
Content-Transfer-Encoding: 7bit
Home |
Main Index |
Thread Index |
Old Index