Ticket #2767: fbsd_if_re_port.diff

File fbsd_if_re_port.diff, 120.8 KB (added by nopper, 16 years ago)

if_re freebsd driver

  • build/jam/HaikuImage

     
    131131;
    132132BEOS_ADD_ONS_DRIVERS_MIDI = emuxki ;
    133133BEOS_ADD_ONS_DRIVERS_NET = $(X86_ONLY)3com etherpci $(X86_ONLY)ipro1000
    134     $(X86_ONLY)rtl8139 rtl8169 sis900 $(X86_ONLY)via_rhine wb840
     134    $(X86_ONLY)rtl8139 rtl8169 $(X86_ONLY)re sis900 $(X86_ONLY)via_rhine wb840
    135135    $(X86_ONLY)ipro100 $(X86_ONLY)nforce #vlance
    136136    $(X86_ONLY)marvell_yukon $(X86_ONLY)syskonnect usb_ecm
    137137    $(GPL_ONLY)bcm440x $(GPL_ONLY)bcm570x
  • src/add-ons/kernel/drivers/network/re/Jamfile

     
     1SubDir HAIKU_TOP src add-ons kernel drivers network re ;
     2
     3SubInclude HAIKU_TOP src add-ons kernel drivers network re pci ;
  • src/add-ons/kernel/drivers/network/re/pci/if_rlreg.h

     
     1/*-
     2 * Copyright (c) 1997, 1998-2003
     3 *  Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
     4 *
     5 * Redistribution and use in source and binary forms, with or without
     6 * modification, are permitted provided that the following conditions
     7 * are met:
     8 * 1. Redistributions of source code must retain the above copyright
     9 *    notice, this list of conditions and the following disclaimer.
     10 * 2. Redistributions in binary form must reproduce the above copyright
     11 *    notice, this list of conditions and the following disclaimer in the
     12 *    documentation and/or other materials provided with the distribution.
     13 * 3. All advertising materials mentioning features or use of this software
     14 *    must display the following acknowledgement:
     15 *  This product includes software developed by Bill Paul.
     16 * 4. Neither the name of the author nor the names of any co-contributors
     17 *    may be used to endorse or promote products derived from this software
     18 *    without specific prior written permission.
     19 *
     20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     30 * THE POSSIBILITY OF SUCH DAMAGE.
     31 *
     32 * $FreeBSD: src/sys/pci/if_rlreg.h,v 1.67.2.14 2008/08/09 02:07:30 yongari Exp $
     33 */
     34
     35/*
     36 * RealTek 8129/8139 register offsets
     37 */
     38#define RL_IDR0     0x0000      /* ID register 0 (station addr) */
     39#define RL_IDR1     0x0001      /* Must use 32-bit accesses (?) */
     40#define RL_IDR2     0x0002
     41#define RL_IDR3     0x0003
     42#define RL_IDR4     0x0004
     43#define RL_IDR5     0x0005
     44                    /* 0006-0007 reserved */
     45#define RL_MAR0     0x0008      /* Multicast hash table */
     46#define RL_MAR1     0x0009
     47#define RL_MAR2     0x000A
     48#define RL_MAR3     0x000B
     49#define RL_MAR4     0x000C
     50#define RL_MAR5     0x000D
     51#define RL_MAR6     0x000E
     52#define RL_MAR7     0x000F
     53
     54#define RL_TXSTAT0  0x0010      /* status of TX descriptor 0 */
     55#define RL_TXSTAT1  0x0014      /* status of TX descriptor 1 */
     56#define RL_TXSTAT2  0x0018      /* status of TX descriptor 2 */
     57#define RL_TXSTAT3  0x001C      /* status of TX descriptor 3 */
     58
     59#define RL_TXADDR0  0x0020      /* address of TX descriptor 0 */
     60#define RL_TXADDR1  0x0024      /* address of TX descriptor 1 */
     61#define RL_TXADDR2  0x0028      /* address of TX descriptor 2 */
     62#define RL_TXADDR3  0x002C      /* address of TX descriptor 3 */
     63
     64#define RL_RXADDR       0x0030  /* RX ring start address */
     65#define RL_RX_EARLY_BYTES   0x0034  /* RX early byte count */
     66#define RL_RX_EARLY_STAT    0x0036  /* RX early status */
     67#define RL_COMMAND  0x0037      /* command register */
     68#define RL_CURRXADDR    0x0038      /* current address of packet read */
     69#define RL_CURRXBUF 0x003A      /* current RX buffer address */
     70#define RL_IMR      0x003C      /* interrupt mask register */
     71#define RL_ISR      0x003E      /* interrupt status register */
     72#define RL_TXCFG    0x0040      /* transmit config */
     73#define RL_RXCFG    0x0044      /* receive config */
     74#define RL_TIMERCNT 0x0048      /* timer count register */
     75#define RL_MISSEDPKT    0x004C      /* missed packet counter */
     76#define RL_EECMD    0x0050      /* EEPROM command register */
     77#define RL_CFG0     0x0051      /* config register #0 */
     78#define RL_CFG1     0x0052      /* config register #1 */
     79#define RL_CFG2     0x0053      /* config register #2 */
     80#define RL_CFG3     0x0054      /* config register #3 */
     81#define RL_CFG4     0x0055      /* config register #4 */
     82#define RL_CFG5     0x0056      /* config register #5 */
     83                    /* 0057 reserved */
     84#define RL_MEDIASTAT    0x0058      /* media status register (8139) */
     85                    /* 0059-005A reserved */
     86#define RL_MII      0x005A      /* 8129 chip only */
     87#define RL_HALTCLK  0x005B
     88#define RL_MULTIINTR    0x005C      /* multiple interrupt */
     89#define RL_PCIREV   0x005E      /* PCI revision value */
     90                    /* 005F reserved */
     91#define RL_TXSTAT_ALL   0x0060      /* TX status of all descriptors */
     92
     93/* Direct PHY access registers only available on 8139 */
     94#define RL_BMCR     0x0062      /* PHY basic mode control */
     95#define RL_BMSR     0x0064      /* PHY basic mode status */
     96#define RL_ANAR     0x0066      /* PHY autoneg advert */
     97#define RL_LPAR     0x0068      /* PHY link partner ability */
     98#define RL_ANER     0x006A      /* PHY autoneg expansion */
     99
     100#define RL_DISCCNT  0x006C      /* disconnect counter */
     101#define RL_FALSECAR 0x006E      /* false carrier counter */
     102#define RL_NWAYTST  0x0070      /* NWAY test register */
     103#define RL_RX_ER    0x0072      /* RX_ER counter */
     104#define RL_CSCFG    0x0074      /* CS configuration register */
     105
     106/*
     107 * When operating in special C+ mode, some of the registers in an
     108 * 8139C+ chip have different definitions. These are also used for
     109 * the 8169 gigE chip.
     110 */
     111#define RL_DUMPSTATS_LO     0x0010  /* counter dump command register */
     112#define RL_DUMPSTATS_HI     0x0014  /* counter dump command register */
     113#define RL_TXLIST_ADDR_LO   0x0020  /* 64 bits, 256 byte alignment */
     114#define RL_TXLIST_ADDR_HI   0x0024  /* 64 bits, 256 byte alignment */
     115#define RL_TXLIST_ADDR_HPRIO_LO 0x0028  /* 64 bits, 256 byte alignment */
     116#define RL_TXLIST_ADDR_HPRIO_HI 0x002C  /* 64 bits, 256 byte alignment */
     117#define RL_CFG2         0x0053
     118#define RL_TIMERINT     0x0054  /* interrupt on timer expire */
     119#define RL_TXSTART      0x00D9  /* 8 bits */
     120#define RL_CPLUS_CMD        0x00E0  /* 16 bits */
     121#define RL_RXLIST_ADDR_LO   0x00E4  /* 64 bits, 256 byte alignment */
     122#define RL_RXLIST_ADDR_HI   0x00E8  /* 64 bits, 256 byte alignment */
     123#define RL_EARLY_TX_THRESH  0x00EC  /* 8 bits */
     124
     125/*
     126 * Registers specific to the 8169 gigE chip
     127 */
     128#define RL_TIMERINT_8169    0x0058  /* different offset than 8139 */
     129#define RL_PHYAR        0x0060
     130#define RL_TBICSR       0x0064
     131#define RL_TBI_ANAR     0x0068
     132#define RL_TBI_LPAR     0x006A
     133#define RL_GMEDIASTAT       0x006C  /* 8 bits */
     134#define RL_MAXRXPKTLEN      0x00DA  /* 16 bits, chip multiplies by 8 */
     135#define RL_GTXSTART     0x0038  /* 8 bits */
     136
     137/*
     138 * TX config register bits
     139 */
     140#define RL_TXCFG_CLRABRT    0x00000001  /* retransmit aborted pkt */
     141#define RL_TXCFG_MAXDMA     0x00000700  /* max DMA burst size */
     142#define RL_TXCFG_CRCAPPEND  0x00010000  /* CRC append (0 = yes) */
     143#define RL_TXCFG_LOOPBKTST  0x00060000  /* loopback test */
     144#define RL_TXCFG_IFG2       0x00080000  /* 8169 only */
     145#define RL_TXCFG_IFG        0x03000000  /* interframe gap */
     146#define RL_TXCFG_HWREV      0x7CC00000
     147
     148#define RL_LOOPTEST_OFF     0x00000000
     149#define RL_LOOPTEST_ON      0x00020000
     150#define RL_LOOPTEST_ON_CPLUS    0x00060000
     151
     152/* Known revision codes. */
     153
     154#define RL_HWREV_8169       0x00000000
     155#define RL_HWREV_8110S      0x00800000
     156#define RL_HWREV_8169S      0x04000000
     157#define RL_HWREV_8169_8110SB    0x10000000
     158#define RL_HWREV_8169_8110SC    0x18000000
     159#define RL_HWREV_8102EL     0x24800000
     160#define RL_HWREV_8168_SPIN1 0x30000000
     161#define RL_HWREV_8100E      0x30800000
     162#define RL_HWREV_8101E      0x34000000
     163#define RL_HWREV_8102E      0x34800000
     164#define RL_HWREV_8168_SPIN2 0x38000000
     165#define RL_HWREV_8168_SPIN3 0x38400000
     166#define RL_HWREV_8168C      0x3C000000
     167#define RL_HWREV_8168C_SPIN2    0x3C400000
     168#define RL_HWREV_8168CP     0x3C800000
     169#define RL_HWREV_8139       0x60000000
     170#define RL_HWREV_8139A      0x70000000
     171#define RL_HWREV_8139AG     0x70800000
     172#define RL_HWREV_8139B      0x78000000
     173#define RL_HWREV_8130       0x7C000000
     174#define RL_HWREV_8139C      0x74000000
     175#define RL_HWREV_8139D      0x74400000
     176#define RL_HWREV_8139CPLUS  0x74800000
     177#define RL_HWREV_8101       0x74c00000
     178#define RL_HWREV_8100       0x78800000
     179#define RL_HWREV_8169_8110SBL   0x7CC00000
     180
     181#define RL_TXDMA_16BYTES    0x00000000
     182#define RL_TXDMA_32BYTES    0x00000100
     183#define RL_TXDMA_64BYTES    0x00000200
     184#define RL_TXDMA_128BYTES   0x00000300
     185#define RL_TXDMA_256BYTES   0x00000400
     186#define RL_TXDMA_512BYTES   0x00000500
     187#define RL_TXDMA_1024BYTES  0x00000600
     188#define RL_TXDMA_2048BYTES  0x00000700
     189
     190/*
     191 * Transmit descriptor status register bits.
     192 */
     193#define RL_TXSTAT_LENMASK   0x00001FFF
     194#define RL_TXSTAT_OWN       0x00002000
     195#define RL_TXSTAT_TX_UNDERRUN   0x00004000
     196#define RL_TXSTAT_TX_OK     0x00008000
     197#define RL_TXSTAT_EARLY_THRESH  0x003F0000
     198#define RL_TXSTAT_COLLCNT   0x0F000000
     199#define RL_TXSTAT_CARR_HBEAT    0x10000000
     200#define RL_TXSTAT_OUTOFWIN  0x20000000
     201#define RL_TXSTAT_TXABRT    0x40000000
     202#define RL_TXSTAT_CARRLOSS  0x80000000
     203
     204/*
     205 * Interrupt status register bits.
     206 */
     207#define RL_ISR_RX_OK        0x0001
     208#define RL_ISR_RX_ERR       0x0002
     209#define RL_ISR_TX_OK        0x0004
     210#define RL_ISR_TX_ERR       0x0008
     211#define RL_ISR_RX_OVERRUN   0x0010
     212#define RL_ISR_PKT_UNDERRUN 0x0020
     213#define RL_ISR_LINKCHG      0x0020  /* 8169 only */
     214#define RL_ISR_FIFO_OFLOW   0x0040  /* 8139 only */
     215#define RL_ISR_TX_DESC_UNAVAIL  0x0080  /* C+ only */
     216#define RL_ISR_SWI      0x0100  /* C+ only */
     217#define RL_ISR_CABLE_LEN_CHGD   0x2000
     218#define RL_ISR_PCS_TIMEOUT  0x4000  /* 8129 only */
     219#define RL_ISR_TIMEOUT_EXPIRED  0x4000
     220#define RL_ISR_SYSTEM_ERR   0x8000
     221
     222#define RL_INTRS    \
     223    (RL_ISR_TX_OK|RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_TX_ERR|     \
     224    RL_ISR_RX_OVERRUN|RL_ISR_PKT_UNDERRUN|RL_ISR_FIFO_OFLOW|    \
     225    RL_ISR_PCS_TIMEOUT|RL_ISR_SYSTEM_ERR)
     226
     227#ifdef RE_TX_MODERATION
     228#define RL_INTRS_CPLUS  \
     229    (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_TX_ERR|          \
     230    RL_ISR_RX_OVERRUN|RL_ISR_PKT_UNDERRUN|RL_ISR_FIFO_OFLOW|    \
     231    RL_ISR_PCS_TIMEOUT|RL_ISR_SYSTEM_ERR|RL_ISR_TIMEOUT_EXPIRED)
     232#else
     233#define RL_INTRS_CPLUS  \
     234    (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_TX_ERR|RL_ISR_TX_OK|     \
     235    RL_ISR_RX_OVERRUN|RL_ISR_PKT_UNDERRUN|RL_ISR_FIFO_OFLOW|    \
     236    RL_ISR_PCS_TIMEOUT|RL_ISR_SYSTEM_ERR|RL_ISR_TIMEOUT_EXPIRED)
     237#endif
     238
     239/*
     240 * Media status register. (8139 only)
     241 */
     242#define RL_MEDIASTAT_RXPAUSE    0x01
     243#define RL_MEDIASTAT_TXPAUSE    0x02
     244#define RL_MEDIASTAT_LINK   0x04
     245#define RL_MEDIASTAT_SPEED10    0x08
     246#define RL_MEDIASTAT_RXFLOWCTL  0x40    /* duplex mode */
     247#define RL_MEDIASTAT_TXFLOWCTL  0x80    /* duplex mode */
     248
     249/*
     250 * Receive config register.
     251 */
     252#define RL_RXCFG_RX_ALLPHYS 0x00000001  /* accept all nodes */
     253#define RL_RXCFG_RX_INDIV   0x00000002  /* match filter */
     254#define RL_RXCFG_RX_MULTI   0x00000004  /* accept all multicast */
     255#define RL_RXCFG_RX_BROAD   0x00000008  /* accept all broadcast */
     256#define RL_RXCFG_RX_RUNT    0x00000010
     257#define RL_RXCFG_RX_ERRPKT  0x00000020
     258#define RL_RXCFG_WRAP       0x00000080
     259#define RL_RXCFG_MAXDMA     0x00000700
     260#define RL_RXCFG_BUFSZ      0x00001800
     261#define RL_RXCFG_FIFOTHRESH 0x0000E000
     262#define RL_RXCFG_EARLYTHRESH    0x07000000
     263
     264#define RL_RXDMA_16BYTES    0x00000000
     265#define RL_RXDMA_32BYTES    0x00000100
     266#define RL_RXDMA_64BYTES    0x00000200
     267#define RL_RXDMA_128BYTES   0x00000300
     268#define RL_RXDMA_256BYTES   0x00000400
     269#define RL_RXDMA_512BYTES   0x00000500
     270#define RL_RXDMA_1024BYTES  0x00000600
     271#define RL_RXDMA_UNLIMITED  0x00000700
     272
     273#define RL_RXBUF_8      0x00000000
     274#define RL_RXBUF_16     0x00000800
     275#define RL_RXBUF_32     0x00001000
     276#define RL_RXBUF_64     0x00001800
     277
     278#define RL_RXFIFO_16BYTES   0x00000000
     279#define RL_RXFIFO_32BYTES   0x00002000
     280#define RL_RXFIFO_64BYTES   0x00004000
     281#define RL_RXFIFO_128BYTES  0x00006000
     282#define RL_RXFIFO_256BYTES  0x00008000
     283#define RL_RXFIFO_512BYTES  0x0000A000
     284#define RL_RXFIFO_1024BYTES 0x0000C000
     285#define RL_RXFIFO_NOTHRESH  0x0000E000
     286
     287/*
     288 * Bits in RX status header (included with RX'ed packet
     289 * in ring buffer).
     290 */
     291#define RL_RXSTAT_RXOK      0x00000001
     292#define RL_RXSTAT_ALIGNERR  0x00000002
     293#define RL_RXSTAT_CRCERR    0x00000004
     294#define RL_RXSTAT_GIANT     0x00000008
     295#define RL_RXSTAT_RUNT      0x00000010
     296#define RL_RXSTAT_BADSYM    0x00000020
     297#define RL_RXSTAT_BROAD     0x00002000
     298#define RL_RXSTAT_INDIV     0x00004000
     299#define RL_RXSTAT_MULTI     0x00008000
     300#define RL_RXSTAT_LENMASK   0xFFFF0000
     301
     302#define RL_RXSTAT_UNFINISHED    0xFFF0      /* DMA still in progress */
     303/*
     304 * Command register.
     305 */
     306#define RL_CMD_EMPTY_RXBUF  0x0001
     307#define RL_CMD_TX_ENB       0x0004
     308#define RL_CMD_RX_ENB       0x0008
     309#define RL_CMD_RESET        0x0010
     310
     311/*
     312 * EEPROM control register
     313 */
     314#define RL_EE_DATAOUT       0x01    /* Data out */
     315#define RL_EE_DATAIN        0x02    /* Data in */
     316#define RL_EE_CLK       0x04    /* clock */
     317#define RL_EE_SEL       0x08    /* chip select */
     318#define RL_EE_MODE      (0x40|0x80)
     319
     320#define RL_EEMODE_OFF       0x00
     321#define RL_EEMODE_AUTOLOAD  0x40
     322#define RL_EEMODE_PROGRAM   0x80
     323#define RL_EEMODE_WRITECFG  (0x80|0x40)
     324
     325/* 9346 EEPROM commands */
     326#define RL_9346_ADDR_LEN    6   /* 93C46 1K: 128x16 */
     327#define RL_9356_ADDR_LEN    8   /* 93C56 2K: 256x16 */
     328
     329#define RL_9346_WRITE          0x5
     330#define RL_9346_READ           0x6
     331#define RL_9346_ERASE          0x7
     332#define RL_9346_EWEN           0x4
     333#define RL_9346_EWEN_ADDR      0x30
     334#define RL_9456_EWDS           0x4
     335#define RL_9346_EWDS_ADDR      0x00
     336
     337#define RL_EECMD_WRITE      0x140
     338#define RL_EECMD_READ_6BIT  0x180
     339#define RL_EECMD_READ_8BIT  0x600
     340#define RL_EECMD_ERASE      0x1c0
     341
     342#define RL_EE_ID        0x00
     343#define RL_EE_PCI_VID       0x01
     344#define RL_EE_PCI_DID       0x02
     345/* Location of station address inside EEPROM */
     346#define RL_EE_EADDR     0x07
     347
     348/*
     349 * MII register (8129 only)
     350 */
     351#define RL_MII_CLK      0x01
     352#define RL_MII_DATAIN       0x02
     353#define RL_MII_DATAOUT      0x04
     354#define RL_MII_DIR      0x80    /* 0 == input, 1 == output */
     355
     356/*
     357 * Config 0 register
     358 */
     359#define RL_CFG0_ROM0        0x01
     360#define RL_CFG0_ROM1        0x02
     361#define RL_CFG0_ROM2        0x04
     362#define RL_CFG0_PL0     0x08
     363#define RL_CFG0_PL1     0x10
     364#define RL_CFG0_10MBPS      0x20    /* 10 Mbps internal mode */
     365#define RL_CFG0_PCS     0x40
     366#define RL_CFG0_SCR     0x80
     367
     368/*
     369 * Config 1 register
     370 */
     371#define RL_CFG1_PWRDWN      0x01
     372#define RL_CFG1_PME     0x01   
     373#define RL_CFG1_SLEEP       0x02
     374#define RL_CFG1_VPDEN       0x02
     375#define RL_CFG1_IOMAP       0x04
     376#define RL_CFG1_MEMMAP      0x08
     377#define RL_CFG1_RSVD        0x10
     378#define RL_CFG1_LWACT       0x10
     379#define RL_CFG1_DRVLOAD     0x20
     380#define RL_CFG1_LED0        0x40
     381#define RL_CFG1_FULLDUPLEX  0x40    /* 8129 only */
     382#define RL_CFG1_LED1        0x80
     383
     384/*
     385 * Config 2 register
     386 */
     387#define RL_CFG2_PCI33MHZ    0x00
     388#define RL_CFG2_PCI66MHZ    0x01
     389#define RL_CFG2_PCI64BIT    0x08
     390#define RL_CFG2_AUXPWR      0x10
     391#define RL_CFG2_MSI     0x20
     392
     393/*
     394 * Config 3 register
     395 */
     396#define RL_CFG3_GRANTSEL    0x80
     397#define RL_CFG3_WOL_MAGIC   0x20
     398#define RL_CFG3_WOL_LINK    0x10
     399#define RL_CFG3_FAST_B2B    0x01
     400
     401/*
     402 * Config 4 register
     403 */
     404#define RL_CFG4_LWPTN       0x04
     405#define RL_CFG4_LWPME       0x10
     406
     407/*
     408 * Config 5 register
     409 */
     410#define RL_CFG5_WOL_BCAST   0x40
     411#define RL_CFG5_WOL_MCAST   0x20
     412#define RL_CFG5_WOL_UCAST   0x10
     413#define RL_CFG5_WOL_LANWAKE 0x02
     414#define RL_CFG5_PME_STS     0x01
     415
     416/*
     417 * 8139C+ register definitions
     418 */
     419
     420/* RL_DUMPSTATS_LO register */
     421
     422#define RL_DUMPSTATS_START  0x00000008
     423
     424/* Transmit start register */
     425
     426#define RL_TXSTART_SWI      0x01    /* generate TX interrupt */
     427#define RL_TXSTART_START    0x40    /* start normal queue transmit */
     428#define RL_TXSTART_HPRIO_START  0x80    /* start hi prio queue transmit */
     429
     430/*
     431 * Config 2 register, 8139C+/8169/8169S/8110S only
     432 */
     433#define RL_CFG2_BUSFREQ     0x07
     434#define RL_CFG2_BUSWIDTH    0x08
     435#define RL_CFG2_AUXPWRSTS   0x10
     436
     437#define RL_BUSFREQ_33MHZ    0x00
     438#define RL_BUSFREQ_66MHZ    0x01
     439                                       
     440#define RL_BUSWIDTH_32BITS  0x00
     441#define RL_BUSWIDTH_64BITS  0x08
     442
     443/* C+ mode command register */
     444
     445#define RL_CPLUSCMD_TXENB   0x0001  /* enable C+ transmit mode */
     446#define RL_CPLUSCMD_RXENB   0x0002  /* enable C+ receive mode */
     447#define RL_CPLUSCMD_PCI_MRW 0x0008  /* enable PCI multi-read/write */
     448#define RL_CPLUSCMD_PCI_DAC 0x0010  /* PCI dual-address cycle only */
     449#define RL_CPLUSCMD_RXCSUM_ENB  0x0020  /* enable RX checksum offload */
     450#define RL_CPLUSCMD_VLANSTRIP   0x0040  /* enable VLAN tag stripping */
     451#define RL_CPLUSCMD_MACSTAT_DIS 0x0080  /* 8168B/C/CP */
     452#define RL_CPLUSCMD_ASF     0x0100  /* 8168C/CP */
     453#define RL_CPLUSCMD_DBG_SEL 0x0200  /* 8168C/CP */
     454#define RL_CPLUSCMD_FORCE_TXFC  0x0400  /* 8168C/CP */
     455#define RL_CPLUSCMD_FORCE_RXFC  0x0800  /* 8168C/CP */
     456#define RL_CPLUSCMD_FORCE_HDPX  0x1000  /* 8168C/CP */
     457#define RL_CPLUSCMD_NORMAL_MODE 0x2000  /* 8168C/CP */
     458#define RL_CPLUSCMD_DBG_ENB 0x4000  /* 8168C/CP */
     459#define RL_CPLUSCMD_BIST_ENB    0x8000  /* 8168C/CP */
     460
     461/* C+ early transmit threshold */
     462
     463#define RL_EARLYTXTHRESH_CNT    0x003F  /* byte count times 8 */
     464
     465/*
     466 * Gigabit PHY access register (8169 only)
     467 */
     468
     469#define RL_PHYAR_PHYDATA    0x0000FFFF
     470#define RL_PHYAR_PHYREG     0x001F0000
     471#define RL_PHYAR_BUSY       0x80000000
     472
     473/*
     474 * Gigabit media status (8169 only)
     475 */
     476#define RL_GMEDIASTAT_FDX   0x01    /* full duplex */
     477#define RL_GMEDIASTAT_LINK  0x02    /* link up */
     478#define RL_GMEDIASTAT_10MBPS    0x04    /* 10mps link */
     479#define RL_GMEDIASTAT_100MBPS   0x08    /* 100mbps link */
     480#define RL_GMEDIASTAT_1000MBPS  0x10    /* gigE link */
     481#define RL_GMEDIASTAT_RXFLOW    0x20    /* RX flow control on */
     482#define RL_GMEDIASTAT_TXFLOW    0x40    /* TX flow control on */
     483#define RL_GMEDIASTAT_TBI   0x80    /* TBI enabled */
     484
     485/*
     486 * The RealTek doesn't use a fragment-based descriptor mechanism.
     487 * Instead, there are only four register sets, each or which represents
     488 * one 'descriptor.' Basically, each TX descriptor is just a contiguous
     489 * packet buffer (32-bit aligned!) and we place the buffer addresses in
     490 * the registers so the chip knows where they are.
     491 *
     492 * We can sort of kludge together the same kind of buffer management
     493 * used in previous drivers, but we have to do buffer copies almost all
     494 * the time, so it doesn't really buy us much.
     495 *
     496 * For reception, there's just one large buffer where the chip stores
     497 * all received packets.
     498 */
     499
     500#define RL_RX_BUF_SZ        RL_RXBUF_64
     501#define RL_RXBUFLEN     (1 << ((RL_RX_BUF_SZ >> 11) + 13))
     502#define RL_TX_LIST_CNT      4
     503#define RL_MIN_FRAMELEN     60
     504#define RL_TXTHRESH(x)      ((x) << 11)
     505#define RL_TX_THRESH_INIT   96
     506#define RL_RX_FIFOTHRESH    RL_RXFIFO_NOTHRESH
     507#define RL_RX_MAXDMA        RL_RXDMA_UNLIMITED
     508#define RL_TX_MAXDMA        RL_TXDMA_2048BYTES
     509
     510#define RL_RXCFG_CONFIG (RL_RX_FIFOTHRESH|RL_RX_MAXDMA|RL_RX_BUF_SZ)
     511#define RL_TXCFG_CONFIG (RL_TXCFG_IFG|RL_TX_MAXDMA)
     512
     513#define RL_ETHER_ALIGN  2
     514
     515/*
     516 * re(4) hardware ip4csum-tx could be mangled with 28 bytes or less IP packets.
     517 */
     518#define RL_IP4CSUMTX_MINLEN 28
     519#define RL_IP4CSUMTX_PADLEN (ETHER_HDR_LEN + RL_IP4CSUMTX_MINLEN)
     520
     521struct rl_chain_data {
     522    uint16_t        cur_rx;
     523    uint8_t         *rl_rx_buf;
     524    uint8_t         *rl_rx_buf_ptr;
     525    bus_dmamap_t        rl_rx_dmamap;
     526
     527    struct mbuf     *rl_tx_chain[RL_TX_LIST_CNT];
     528    bus_dmamap_t        rl_tx_dmamap[RL_TX_LIST_CNT];
     529    uint8_t         last_tx;
     530    uint8_t         cur_tx;
     531};
     532
     533#define RL_INC(x)       (x = (x + 1) % RL_TX_LIST_CNT)
     534#define RL_CUR_TXADDR(x)    ((x->rl_cdata.cur_tx * 4) + RL_TXADDR0)
     535#define RL_CUR_TXSTAT(x)    ((x->rl_cdata.cur_tx * 4) + RL_TXSTAT0)
     536#define RL_CUR_TXMBUF(x)    (x->rl_cdata.rl_tx_chain[x->rl_cdata.cur_tx])
     537#define RL_CUR_DMAMAP(x)    (x->rl_cdata.rl_tx_dmamap[x->rl_cdata.cur_tx])
     538#define RL_LAST_TXADDR(x)   ((x->rl_cdata.last_tx * 4) + RL_TXADDR0)
     539#define RL_LAST_TXSTAT(x)   ((x->rl_cdata.last_tx * 4) + RL_TXSTAT0)
     540#define RL_LAST_TXMBUF(x)   (x->rl_cdata.rl_tx_chain[x->rl_cdata.last_tx])
     541#define RL_LAST_DMAMAP(x)   (x->rl_cdata.rl_tx_dmamap[x->rl_cdata.last_tx])
     542
     543struct rl_type {
     544    uint16_t        rl_vid;
     545    uint16_t        rl_did;
     546    int         rl_basetype;
     547    char            *rl_name;
     548};
     549
     550struct rl_hwrev {
     551    uint32_t        rl_rev;
     552    int         rl_type;
     553    char            *rl_desc;
     554};
     555
     556struct rl_mii_frame {
     557    uint8_t     mii_stdelim;
     558    uint8_t     mii_opcode;
     559    uint8_t     mii_phyaddr;
     560    uint8_t     mii_regaddr;
     561    uint8_t     mii_turnaround;
     562    uint16_t    mii_data;
     563};
     564
     565/*
     566 * MII constants
     567 */
     568#define RL_MII_STARTDELIM   0x01
     569#define RL_MII_READOP       0x02
     570#define RL_MII_WRITEOP      0x01
     571#define RL_MII_TURNAROUND   0x02
     572
     573#define RL_8129         1
     574#define RL_8139         2
     575#define RL_8139CPLUS        3
     576#define RL_8169         4
     577
     578#define RL_ISCPLUS(x)       ((x)->rl_type == RL_8139CPLUS ||    \
     579                 (x)->rl_type == RL_8169)
     580
     581/*
     582 * The 8139C+ and 8160 gigE chips support descriptor-based TX
     583 * and RX. In fact, they even support TCP large send. Descriptors
     584 * must be allocated in contiguous blocks that are aligned on a
     585 * 256-byte boundary. The rings can hold a maximum of 64 descriptors.
     586 */
     587
     588/*
     589 * RX/TX descriptor definition. When large send mode is enabled, the
     590 * lower 11 bits of the TX rl_cmd word are used to hold the MSS, and
     591 * the checksum offload bits are disabled. The structure layout is
     592 * the same for RX and TX descriptors
     593 */
     594
     595struct rl_desc {
     596    uint32_t        rl_cmdstat;
     597    uint32_t        rl_vlanctl;
     598    uint32_t        rl_bufaddr_lo;
     599    uint32_t        rl_bufaddr_hi;
     600};
     601
     602#define RL_TDESC_CMD_FRAGLEN    0x0000FFFF
     603#define RL_TDESC_CMD_TCPCSUM    0x00010000  /* TCP checksum enable */
     604#define RL_TDESC_CMD_UDPCSUM    0x00020000  /* UDP checksum enable */
     605#define RL_TDESC_CMD_IPCSUM 0x00040000  /* IP header checksum enable */
     606#define RL_TDESC_CMD_MSSVAL 0x07FF0000  /* Large send MSS value */
     607#define RL_TDESC_CMD_MSSVAL_SHIFT   16  /* Large send MSS value shift */
     608#define RL_TDESC_CMD_LGSEND 0x08000000  /* TCP large send enb */
     609#define RL_TDESC_CMD_EOF    0x10000000  /* end of frame marker */
     610#define RL_TDESC_CMD_SOF    0x20000000  /* start of frame marker */
     611#define RL_TDESC_CMD_EOR    0x40000000  /* end of ring marker */
     612#define RL_TDESC_CMD_OWN    0x80000000  /* chip owns descriptor */
     613
     614#define RL_TDESC_VLANCTL_TAG    0x00020000  /* Insert VLAN tag */
     615#define RL_TDESC_VLANCTL_DATA   0x0000FFFF  /* TAG data */
     616/* RTL8168C/RTL8168CP/RTL8111C/RTL8111CP */
     617#define RL_TDESC_CMD_UDPCSUMV2  0x80000000
     618#define RL_TDESC_CMD_TCPCSUMV2  0x40000000 
     619#define RL_TDESC_CMD_IPCSUMV2   0x20000000 
     620
     621/*
     622 * Error bits are valid only on the last descriptor of a frame
     623 * (i.e. RL_TDESC_CMD_EOF == 1)
     624 */
     625
     626#define RL_TDESC_STAT_COLCNT    0x000F0000  /* collision count */
     627#define RL_TDESC_STAT_EXCESSCOL 0x00100000  /* excessive collisions */
     628#define RL_TDESC_STAT_LINKFAIL  0x00200000  /* link faulure */
     629#define RL_TDESC_STAT_OWINCOL   0x00400000  /* out-of-window collision */
     630#define RL_TDESC_STAT_TXERRSUM  0x00800000  /* transmit error summary */
     631#define RL_TDESC_STAT_UNDERRUN  0x02000000  /* TX underrun occured */
     632#define RL_TDESC_STAT_OWN   0x80000000
     633
     634/*
     635 * RX descriptor cmd/vlan definitions
     636 */
     637
     638#define RL_RDESC_CMD_EOR    0x40000000
     639#define RL_RDESC_CMD_OWN    0x80000000
     640#define RL_RDESC_CMD_BUFLEN 0x00001FFF
     641
     642#define RL_RDESC_STAT_OWN   0x80000000
     643#define RL_RDESC_STAT_EOR   0x40000000
     644#define RL_RDESC_STAT_SOF   0x20000000
     645#define RL_RDESC_STAT_EOF   0x10000000
     646#define RL_RDESC_STAT_FRALIGN   0x08000000  /* frame alignment error */
     647#define RL_RDESC_STAT_MCAST 0x04000000  /* multicast pkt received */
     648#define RL_RDESC_STAT_UCAST 0x02000000  /* unicast pkt received */
     649#define RL_RDESC_STAT_BCAST 0x01000000  /* broadcast pkt received */
     650#define RL_RDESC_STAT_BUFOFLOW  0x00800000  /* out of buffer space */
     651#define RL_RDESC_STAT_FIFOOFLOW 0x00400000  /* FIFO overrun */
     652#define RL_RDESC_STAT_GIANT 0x00200000  /* pkt > 4096 bytes */
     653#define RL_RDESC_STAT_RXERRSUM  0x00100000  /* RX error summary */
     654#define RL_RDESC_STAT_RUNT  0x00080000  /* runt packet received */
     655#define RL_RDESC_STAT_CRCERR    0x00040000  /* CRC error */
     656#define RL_RDESC_STAT_PROTOID   0x00030000  /* Protocol type */
     657#define RL_RDESC_STAT_UDP   0x00020000  /* UDP, 8168C/CP, 8111C/CP */
     658#define RL_RDESC_STAT_TCP   0x00010000  /* TCP, 8168C/CP, 8111C/CP */
     659#define RL_RDESC_STAT_IPSUMBAD  0x00008000  /* IP header checksum bad */
     660#define RL_RDESC_STAT_UDPSUMBAD 0x00004000  /* UDP checksum bad */
     661#define RL_RDESC_STAT_TCPSUMBAD 0x00002000  /* TCP checksum bad */
     662#define RL_RDESC_STAT_FRAGLEN   0x00001FFF  /* RX'ed frame/frag len */
     663#define RL_RDESC_STAT_GFRAGLEN  0x00003FFF  /* RX'ed frame/frag len */
     664#define RL_RDESC_STAT_ERRS  (RL_RDESC_STAT_GIANT|RL_RDESC_STAT_RUNT| \
     665                 RL_RDESC_STAT_CRCERR)
     666
     667#define RL_RDESC_VLANCTL_TAG    0x00010000  /* VLAN tag available
     668                           (rl_vlandata valid)*/
     669#define RL_RDESC_VLANCTL_DATA   0x0000FFFF  /* TAG data */
     670/* RTL8168C/RTL8168CP/RTL8111C/RTL8111CP */
     671#define RL_RDESC_IPV6       0x80000000
     672#define RL_RDESC_IPV4       0x40000000
     673
     674#define RL_PROTOID_NONIP    0x00000000
     675#define RL_PROTOID_TCPIP    0x00010000
     676#define RL_PROTOID_UDPIP    0x00020000
     677#define RL_PROTOID_IP       0x00030000
     678#define RL_TCPPKT(x)        (((x) & RL_RDESC_STAT_PROTOID) == \
     679                 RL_PROTOID_TCPIP)
     680#define RL_UDPPKT(x)        (((x) & RL_RDESC_STAT_PROTOID) == \
     681                 RL_PROTOID_UDPIP)
     682
     683/*
     684 * Statistics counter structure (8139C+ and 8169 only)
     685 */
     686struct rl_stats {
     687    uint32_t        rl_tx_pkts_lo;
     688    uint32_t        rl_tx_pkts_hi;
     689    uint32_t        rl_tx_errs_lo;
     690    uint32_t        rl_tx_errs_hi;
     691    uint32_t        rl_tx_errs;
     692    uint16_t        rl_missed_pkts;
     693    uint16_t        rl_rx_framealign_errs;
     694    uint32_t        rl_tx_onecoll;
     695    uint32_t        rl_tx_multicolls;
     696    uint32_t        rl_rx_ucasts_hi;
     697    uint32_t        rl_rx_ucasts_lo;
     698    uint32_t        rl_rx_bcasts_lo;
     699    uint32_t        rl_rx_bcasts_hi;
     700    uint32_t        rl_rx_mcasts;
     701    uint16_t        rl_tx_aborts;
     702    uint16_t        rl_rx_underruns;
     703};
     704
     705/*
     706 * Rx/Tx descriptor parameters (8139C+ and 8169 only)
     707 *
     708 * 8139C+
     709 *  Number of descriptors supported : up to 64
     710 *  Descriptor alignment : 256 bytes
     711 *  Tx buffer : At least 4 bytes in length.
     712 *  Rx buffer : At least 8 bytes in length and 8 bytes alignment required.
     713 * 
     714 * 8169
     715 *  Number of descriptors supported : up to 1024
     716 *  Descriptor alignment : 256 bytes
     717 *  Tx buffer : At least 4 bytes in length.
     718 *  Rx buffer : At least 8 bytes in length and 8 bytes alignment required.
     719 */
     720#ifndef __NO_STRICT_ALIGNMENT
     721#define RE_FIXUP_RX 1
     722#endif
     723
     724#define RL_8169_TX_DESC_CNT 256
     725#define RL_8169_RX_DESC_CNT 256
     726#define RL_8139_TX_DESC_CNT 64
     727#define RL_8139_RX_DESC_CNT 64
     728#define RL_TX_DESC_CNT      RL_8169_TX_DESC_CNT
     729#define RL_RX_DESC_CNT      RL_8169_RX_DESC_CNT
     730#define RL_NTXSEGS      32
     731
     732#define RL_RING_ALIGN       256
     733#define RL_IFQ_MAXLEN       512
     734#define RL_TX_DESC_NXT(sc,x)    ((x + 1) & ((sc)->rl_ldata.rl_tx_desc_cnt - 1))
     735#define RL_TX_DESC_PRV(sc,x)    ((x - 1) & ((sc)->rl_ldata.rl_tx_desc_cnt - 1))
     736#define RL_RX_DESC_NXT(sc,x)    ((x + 1) & ((sc)->rl_ldata.rl_rx_desc_cnt - 1))
     737#define RL_OWN(x)       (le32toh((x)->rl_cmdstat) & RL_RDESC_STAT_OWN)
     738#define RL_RXBYTES(x)       (le32toh((x)->rl_cmdstat) & sc->rl_rxlenmask)
     739#define RL_PKTSZ(x)     ((x)/* >> 3*/)
     740#ifdef RE_FIXUP_RX
     741#define RE_ETHER_ALIGN  sizeof(uint64_t)
     742#define RE_RX_DESC_BUFLEN   (MCLBYTES - RE_ETHER_ALIGN)
     743#else
     744#define RE_ETHER_ALIGN  0
     745#define RE_RX_DESC_BUFLEN   MCLBYTES
     746#endif
     747
     748#define RL_MSI_MESSAGES 2
     749
     750#define RL_ADDR_LO(y)       ((uint64_t) (y) & 0xFFFFFFFF)
     751#define RL_ADDR_HI(y)       ((uint64_t) (y) >> 32)
     752
     753/*
     754 * The number of bits reserved for MSS in RealTek controllers is
     755 * 11bits. This limits the maximum interface MTU size in TSO case
     756 * as upper stack should not generate TCP segments with MSS greater
     757 * than the limit.
     758 */
     759#define RL_TSO_MTU      (2047 - ETHER_HDR_LEN - ETHER_CRC_LEN)
     760
     761/* see comment in dev/re/if_re.c */
     762#define RL_JUMBO_FRAMELEN   7440
     763#define RL_JUMBO_MTU        (RL_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
     764#define RL_MAX_FRAMELEN     \
     765    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
     766
     767struct rl_txdesc {
     768    struct mbuf     *tx_m;
     769    bus_dmamap_t        tx_dmamap;
     770};
     771
     772struct rl_rxdesc {
     773    struct mbuf     *rx_m;
     774    bus_dmamap_t        rx_dmamap;
     775    bus_size_t      rx_size;
     776};
     777
     778struct rl_list_data {
     779    struct rl_txdesc    rl_tx_desc[RL_TX_DESC_CNT];
     780    struct rl_rxdesc    rl_rx_desc[RL_RX_DESC_CNT];
     781    int         rl_tx_desc_cnt;
     782    int         rl_rx_desc_cnt;
     783    int         rl_tx_prodidx;
     784    int         rl_rx_prodidx;
     785    int         rl_tx_considx;
     786    int         rl_tx_free;
     787    bus_dma_tag_t       rl_tx_mtag; /* mbuf TX mapping tag */
     788    bus_dma_tag_t       rl_rx_mtag; /* mbuf RX mapping tag */
     789    bus_dmamap_t        rl_rx_sparemap;
     790    bus_dma_tag_t       rl_stag;    /* stats mapping tag */
     791    bus_dmamap_t        rl_smap;    /* stats map */
     792    struct rl_stats     *rl_stats;
     793    bus_addr_t      rl_stats_addr;
     794    bus_dma_tag_t       rl_rx_list_tag;
     795    bus_dmamap_t        rl_rx_list_map;
     796    struct rl_desc      *rl_rx_list;
     797    bus_addr_t      rl_rx_list_addr;
     798    bus_dma_tag_t       rl_tx_list_tag;
     799    bus_dmamap_t        rl_tx_list_map;
     800    struct rl_desc      *rl_tx_list;
     801    bus_addr_t      rl_tx_list_addr;
     802};
     803
     804struct rl_softc {
     805    struct ifnet        *rl_ifp;    /* interface info */
     806    bus_space_handle_t  rl_bhandle; /* bus space handle */
     807    bus_space_tag_t     rl_btag;    /* bus space tag */
     808    device_t        rl_dev;
     809    struct resource     *rl_res;
     810    int         rl_res_id;
     811    int         rl_res_type;
     812    struct resource     *rl_irq[RL_MSI_MESSAGES];
     813    void            *rl_intrhand[RL_MSI_MESSAGES];
     814    device_t        rl_miibus;
     815    bus_dma_tag_t       rl_parent_tag;
     816    bus_dma_tag_t       rl_tag;
     817    uint8_t         rl_type;
     818    int         rl_eecmd_read;
     819    int         rl_eewidth;
     820    uint8_t         rl_stats_no_timeout;
     821    int         rl_txthresh;
     822    struct rl_chain_data    rl_cdata;
     823    struct rl_list_data rl_ldata;
     824    struct callout      rl_stat_callout;
     825    int         rl_watchdog_timer;
     826    struct mtx      rl_mtx;
     827    struct mbuf     *rl_head;
     828    struct mbuf     *rl_tail;
     829    uint32_t        rl_hwrev;
     830    uint32_t        rl_rxlenmask;
     831    int         rl_testmode;
     832    int         rl_if_flags;
     833    int         suspended;  /* 0 = normal  1 = suspended */
     834#ifdef DEVICE_POLLING
     835    int         rxcycles;
     836#endif
     837
     838    struct task     rl_txtask;
     839    struct task     rl_inttask;
     840
     841    int         rl_txstart;
     842    uint32_t        rl_flags;
     843#define RL_FLAG_MSI     0x0001
     844#define RL_FLAG_INVMAR      0x0004
     845#define RL_FLAG_PHYWAKE     0x0008
     846#define RL_FLAG_NOJUMBO     0x0010
     847#define RL_FLAG_PAR     0x0020
     848#define RL_FLAG_DESCV2      0x0040
     849#define RL_FLAG_MACSTAT     0x0080
     850#define RL_FLAG_LINK        0x8000
     851};
     852
     853#define RL_LOCK(_sc)        mtx_lock(&(_sc)->rl_mtx)
     854#define RL_UNLOCK(_sc)      mtx_unlock(&(_sc)->rl_mtx)
     855#define RL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rl_mtx, MA_OWNED)
     856
     857/*
     858 * register space access macros
     859 */
     860#define CSR_WRITE_STREAM_4(sc, reg, val)    \
     861    bus_space_write_stream_4(sc->rl_btag, sc->rl_bhandle, reg, val)
     862#define CSR_WRITE_4(sc, reg, val)   \
     863    bus_space_write_4(sc->rl_btag, sc->rl_bhandle, reg, val)
     864#define CSR_WRITE_2(sc, reg, val)   \
     865    bus_space_write_2(sc->rl_btag, sc->rl_bhandle, reg, val)
     866#define CSR_WRITE_1(sc, reg, val)   \
     867    bus_space_write_1(sc->rl_btag, sc->rl_bhandle, reg, val)
     868
     869#define CSR_READ_4(sc, reg)     \
     870    bus_space_read_4(sc->rl_btag, sc->rl_bhandle, reg)
     871#define CSR_READ_2(sc, reg)     \
     872    bus_space_read_2(sc->rl_btag, sc->rl_bhandle, reg)
     873#define CSR_READ_1(sc, reg)     \
     874    bus_space_read_1(sc->rl_btag, sc->rl_bhandle, reg)
     875
     876#define CSR_SETBIT_1(sc, offset, val)       \
     877    CSR_WRITE_1(sc, offset, CSR_READ_1(sc, offset) | (val))
     878
     879#define CSR_CLRBIT_1(sc, offset, val)       \
     880    CSR_WRITE_1(sc, offset, CSR_READ_1(sc, offset) & ~(val))
     881
     882#define CSR_SETBIT_2(sc, offset, val)       \
     883    CSR_WRITE_2(sc, offset, CSR_READ_2(sc, offset) | (val))
     884
     885#define CSR_CLRBIT_2(sc, offset, val)       \
     886    CSR_WRITE_2(sc, offset, CSR_READ_2(sc, offset) & ~(val))
     887
     888#define CSR_SETBIT_4(sc, offset, val)       \
     889    CSR_WRITE_4(sc, offset, CSR_READ_4(sc, offset) | (val))
     890
     891#define CSR_CLRBIT_4(sc, offset, val)       \
     892    CSR_WRITE_4(sc, offset, CSR_READ_4(sc, offset) & ~(val))
     893
     894#define RL_TIMEOUT      1000
     895
     896/*
     897 * General constants that are fun to know.
     898 *
     899 * RealTek PCI vendor ID
     900 */
     901#define RT_VENDORID             0x10EC
     902
     903/*
     904 * RealTek chip device IDs.
     905 */
     906#define RT_DEVICEID_8139D           0x8039
     907#define RT_DEVICEID_8129            0x8129
     908#define RT_DEVICEID_8101E           0x8136
     909#define RT_DEVICEID_8138            0x8138
     910#define RT_DEVICEID_8139            0x8139
     911#define RT_DEVICEID_8169SC          0x8167
     912#define RT_DEVICEID_8168            0x8168
     913#define RT_DEVICEID_8169            0x8169
     914#define RT_DEVICEID_8100            0x8100
     915
     916#define RT_REVID_8139CPLUS          0x20
     917
     918/*
     919 * Accton PCI vendor ID
     920 */
     921#define ACCTON_VENDORID             0x1113
     922
     923/*
     924 * Accton MPX 5030/5038 device ID.
     925 */
     926#define ACCTON_DEVICEID_5030            0x1211
     927
     928/*
     929 * Nortel PCI vendor ID
     930 */
     931#define NORTEL_VENDORID             0x126C
     932
     933/*
     934 * Delta Electronics Vendor ID.
     935 */
     936#define DELTA_VENDORID              0x1500
     937
     938/*
     939 * Delta device IDs.
     940 */
     941#define DELTA_DEVICEID_8139         0x1360
     942
     943/*
     944 * Addtron vendor ID.
     945 */
     946#define ADDTRON_VENDORID            0x4033
     947
     948/*
     949 * Addtron device IDs.
     950 */
     951#define ADDTRON_DEVICEID_8139           0x1360
     952
     953/*
     954 * D-Link vendor ID.
     955 */
     956#define DLINK_VENDORID              0x1186
     957
     958/*
     959 * D-Link DFE-530TX+ device ID
     960 */
     961#define DLINK_DEVICEID_530TXPLUS        0x1300
     962
     963/*
     964 * D-Link DFE-5280T device ID
     965 */
     966#define DLINK_DEVICEID_528T         0x4300
     967
     968/*
     969 * D-Link DFE-690TXD device ID
     970 */
     971#define DLINK_DEVICEID_690TXD           0x1340
     972
     973/*
     974 * Corega K.K vendor ID
     975 */
     976#define COREGA_VENDORID             0x1259
     977
     978/*
     979 * Corega FEther CB-TXD device ID
     980 */
     981#define COREGA_DEVICEID_FETHERCBTXD     0xa117
     982
     983/*
     984 * Corega FEtherII CB-TXD device ID
     985 */
     986#define COREGA_DEVICEID_FETHERIICBTXD       0xa11e
     987
     988/*
     989 * Corega CG-LAPCIGT device ID
     990 */
     991#define COREGA_DEVICEID_CGLAPCIGT       0xc107
     992
     993/*
     994 * Linksys vendor ID
     995 */
     996#define LINKSYS_VENDORID            0x1737
     997
     998/*
     999 * Linksys EG1032 device ID
     1000 */
     1001#define LINKSYS_DEVICEID_EG1032         0x1032
     1002
     1003/*
     1004 * Linksys EG1032 rev 3 sub-device ID
     1005 */
     1006#define LINKSYS_SUBDEVICE_EG1032_REV3       0x0024
     1007
     1008/*
     1009 * Peppercon vendor ID
     1010 */
     1011#define PEPPERCON_VENDORID          0x1743
     1012
     1013/*
     1014 * Peppercon ROL-F device ID
     1015 */
     1016#define PEPPERCON_DEVICEID_ROLF         0x8139
     1017
     1018/*
     1019 * Planex Communications, Inc. vendor ID
     1020 */
     1021#define PLANEX_VENDORID             0x14ea
     1022
     1023/*
     1024 * Planex FNW-3603-TX device ID
     1025 */
     1026#define PLANEX_DEVICEID_FNW3603TX       0xab06
     1027
     1028/*
     1029 * Planex FNW-3800-TX device ID
     1030 */
     1031#define PLANEX_DEVICEID_FNW3800TX       0xab07
     1032
     1033/*
     1034 * LevelOne vendor ID
     1035 */
     1036#define LEVEL1_VENDORID             0x018A
     1037
     1038/*
     1039 * LevelOne FPC-0106TX devide ID
     1040 */
     1041#define LEVEL1_DEVICEID_FPC0106TX       0x0106
     1042
     1043/*
     1044 * Compaq vendor ID
     1045 */
     1046#define CP_VENDORID             0x021B
     1047
     1048/*
     1049 * Edimax vendor ID
     1050 */
     1051#define EDIMAX_VENDORID             0x13D1
     1052
     1053/*
     1054 * Edimax EP-4103DL cardbus device ID
     1055 */
     1056#define EDIMAX_DEVICEID_EP4103DL        0xAB06
     1057
     1058/* US Robotics vendor ID */
     1059
     1060#define USR_VENDORID        0x16EC
     1061
     1062/* US Robotics 997902 device ID */
     1063
     1064#define USR_DEVICEID_997902 0x0116
     1065
     1066/*
     1067 * PCI low memory base and low I/O base register, and
     1068 * other PCI registers.
     1069 */
     1070
     1071#define RL_PCI_VENDOR_ID    0x00
     1072#define RL_PCI_DEVICE_ID    0x02
     1073#define RL_PCI_COMMAND      0x04
     1074#define RL_PCI_STATUS       0x06
     1075#define RL_PCI_CLASSCODE    0x09
     1076#define RL_PCI_LATENCY_TIMER    0x0D
     1077#define RL_PCI_HEADER_TYPE  0x0E
     1078#define RL_PCI_LOIO     0x10
     1079#define RL_PCI_LOMEM        0x14
     1080#define RL_PCI_BIOSROM      0x30
     1081#define RL_PCI_INTLINE      0x3C
     1082#define RL_PCI_INTPIN       0x3D
     1083#define RL_PCI_MINGNT       0x3E
     1084#define RL_PCI_MINLAT       0x0F
     1085#define RL_PCI_RESETOPT     0x48
     1086#define RL_PCI_EEPROM_DATA  0x4C
     1087
     1088#define RL_PCI_CAPID        0x50 /* 8 bits */
     1089#define RL_PCI_NEXTPTR      0x51 /* 8 bits */
     1090#define RL_PCI_PWRMGMTCAP   0x52 /* 16 bits */
     1091#define RL_PCI_PWRMGMTCTRL  0x54 /* 16 bits */
     1092
     1093#define RL_PSTATE_MASK      0x0003
     1094#define RL_PSTATE_D0        0x0000
     1095#define RL_PSTATE_D1        0x0002
     1096#define RL_PSTATE_D2        0x0002
     1097#define RL_PSTATE_D3        0x0003
     1098#define RL_PME_EN       0x0010
     1099#define RL_PME_STATUS       0x8000
     1100
  • src/add-ons/kernel/drivers/network/re/pci/if_re.c

     
     1/*-
     2 * Copyright (c) 1997, 1998-2003
     3 *  Bill Paul <wpaul@windriver.com>.  All rights reserved.
     4 *
     5 * Redistribution and use in source and binary forms, with or without
     6 * modification, are permitted provided that the following conditions
     7 * are met:
     8 * 1. Redistributions of source code must retain the above copyright
     9 *    notice, this list of conditions and the following disclaimer.
     10 * 2. Redistributions in binary form must reproduce the above copyright
     11 *    notice, this list of conditions and the following disclaimer in the
     12 *    documentation and/or other materials provided with the distribution.
     13 * 3. All advertising materials mentioning features or use of this software
     14 *    must display the following acknowledgement:
     15 *  This product includes software developed by Bill Paul.
     16 * 4. Neither the name of the author nor the names of any co-contributors
     17 *    may be used to endorse or promote products derived from this software
     18 *    without specific prior written permission.
     19 *
     20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     30 * THE POSSIBILITY OF SUCH DAMAGE.
     31 */
     32
     33#include <sys/cdefs.h>
     34__FBSDID("$FreeBSD: src/sys/dev/re/if_re.c,v 1.95.2.36 2008/09/19 03:36:53 yongari Exp $");
     35
     36/*
     37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
     38 *
     39 * Written by Bill Paul <wpaul@windriver.com>
     40 * Senior Networking Software Engineer
     41 * Wind River Systems
     42 */
     43
     44/*
     45 * This driver is designed to support RealTek's next generation of
     46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
     47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
     48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
     49 *
     50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
     51 * with the older 8139 family, however it also supports a special
     52 * C+ mode of operation that provides several new performance enhancing
     53 * features. These include:
     54 *
     55 *  o Descriptor based DMA mechanism. Each descriptor represents
     56 *    a single packet fragment. Data buffers may be aligned on
     57 *    any byte boundary.
     58 *
     59 *  o 64-bit DMA
     60 *
     61 *  o TCP/IP checksum offload for both RX and TX
     62 *
     63 *  o High and normal priority transmit DMA rings
     64 *
     65 *  o VLAN tag insertion and extraction
     66 *
     67 *  o TCP large send (segmentation offload)
     68 *
     69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
     70 * programming API is fairly straightforward. The RX filtering, EEPROM
     71 * access and PHY access is the same as it is on the older 8139 series
     72 * chips.
     73 *
     74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
     75 * same programming API and feature set as the 8139C+ with the following
     76 * differences and additions:
     77 *
     78 *  o 1000Mbps mode
     79 *
     80 *  o Jumbo frames
     81 *
     82 *  o GMII and TBI ports/registers for interfacing with copper
     83 *    or fiber PHYs
     84 *
     85 *  o RX and TX DMA rings can have up to 1024 descriptors
     86 *    (the 8139C+ allows a maximum of 64)
     87 *
     88 *  o Slight differences in register layout from the 8139C+
     89 *
     90 * The TX start and timer interrupt registers are at different locations
     91 * on the 8169 than they are on the 8139C+. Also, the status word in the
     92 * RX descriptor has a slightly different bit layout. The 8169 does not
     93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
     94 * copper gigE PHY.
     95 *
     96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
     97 * (the 'S' stands for 'single-chip'). These devices have the same
     98 * programming API as the older 8169, but also have some vendor-specific
     99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
     100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
     101 *
     102 * This driver takes advantage of the RX and TX checksum offload and
     103 * VLAN tag insertion/extraction features. It also implements TX
     104 * interrupt moderation using the timer interrupt registers, which
     105 * significantly reduces TX interrupt load. There is also support
     106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
     107 * jumbo frames larger than 7440, so the max MTU possible with this
     108 * driver is 7422 bytes.
     109 */
     110
     111#ifdef HAVE_KERNEL_OPTION_HEADERS
     112#include "opt_device_polling.h"
     113#endif
     114
     115#include <sys/param.h>
     116#include <sys/endian.h>
     117#include <sys/systm.h>
     118#include <sys/sockio.h>
     119#include <sys/mbuf.h>
     120#include <sys/malloc.h>
     121#include <sys/module.h>
     122#include <sys/kernel.h>
     123#include <sys/socket.h>
     124#include <sys/lock.h>
     125#include <sys/mutex.h>
     126#include <sys/taskqueue.h>
     127
     128#include <net/if.h>
     129#include <net/if_arp.h>
     130#include <net/ethernet.h>
     131#include <net/if_dl.h>
     132#include <net/if_media.h>
     133#include <net/if_types.h>
     134#include <net/if_vlan_var.h>
     135
     136#include <net/bpf.h>
     137
     138#include <machine/bus.h>
     139#include <machine/resource.h>
     140#include <sys/bus.h>
     141#include <sys/rman.h>
     142
     143#include <dev/mii/mii.h>
     144#include <dev/mii/miivar.h>
     145
     146#include <dev/pci/pcireg.h>
     147#include <dev/pci/pcivar.h>
     148
     149#include <pci/if_rlreg.h>
     150
     151MODULE_DEPEND(re, pci, 1, 1, 1);
     152MODULE_DEPEND(re, ether, 1, 1, 1);
     153MODULE_DEPEND(re, miibus, 1, 1, 1);
     154
     155/* "device miibus" required.  See GENERIC if you get errors here. */
     156#include "miibus_if.h"
     157
     158/* Tunables. */
     159static int msi_disable = 1;
     160TUNABLE_INT("hw.re.msi_disable", &msi_disable);
     161
     162#define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
     163
     164/*
     165 * Various supported device vendors/types and their names.
     166 */
     167static struct rl_type re_devs[] = {
     168    { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
     169        "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
     170    { RT_VENDORID, RT_DEVICEID_8139, 0,
     171        "RealTek 8139C+ 10/100BaseTX" },
     172    { RT_VENDORID, RT_DEVICEID_8101E, 0,
     173        "RealTek 8101E/8102E/8102EL PCIe 10/100baseTX" },
     174    { RT_VENDORID, RT_DEVICEID_8168, 0,
     175        "RealTek 8168/8168B/8168C/8168CP/8111B/8111C/8111CP PCIe "
     176        "Gigabit Ethernet" },
     177    { RT_VENDORID, RT_DEVICEID_8169, 0,
     178        "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
     179    { RT_VENDORID, RT_DEVICEID_8169SC, 0,
     180        "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
     181    { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
     182        "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
     183    { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
     184        "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
     185    { USR_VENDORID, USR_DEVICEID_997902, 0,
     186        "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
     187};
     188
     189static struct rl_hwrev re_hwrevs[] = {
     190    { RL_HWREV_8139, RL_8139,  "" },
     191    { RL_HWREV_8139A, RL_8139, "A" },
     192    { RL_HWREV_8139AG, RL_8139, "A-G" },
     193    { RL_HWREV_8139B, RL_8139, "B" },
     194    { RL_HWREV_8130, RL_8139, "8130" },
     195    { RL_HWREV_8139C, RL_8139, "C" },
     196    { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" },
     197    { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"},
     198    { RL_HWREV_8168_SPIN1, RL_8169, "8168"},
     199    { RL_HWREV_8169, RL_8169, "8169"},
     200    { RL_HWREV_8169S, RL_8169, "8169S"},
     201    { RL_HWREV_8110S, RL_8169, "8110S"},
     202    { RL_HWREV_8169_8110SB, RL_8169, "8169SB"},
     203    { RL_HWREV_8169_8110SC, RL_8169, "8169SC"},
     204    { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL"},
     205    { RL_HWREV_8100, RL_8139, "8100"},
     206    { RL_HWREV_8101, RL_8139, "8101"},
     207    { RL_HWREV_8100E, RL_8169, "8100E"},
     208    { RL_HWREV_8101E, RL_8169, "8101E"},
     209    { RL_HWREV_8102E, RL_8169, "8102E"},
     210    { RL_HWREV_8102EL, RL_8169, "8102EL"},
     211    { RL_HWREV_8168_SPIN2, RL_8169, "8168"},
     212    { RL_HWREV_8168_SPIN3, RL_8169, "8168"},
     213    { RL_HWREV_8168C, RL_8169, "8168C/8111C"},
     214    { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C"},
     215    { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP"},
     216    { 0, 0, NULL }
     217};
     218
     219static int re_probe     (device_t);
     220static int re_attach        (device_t);
     221static int re_detach        (device_t);
     222
     223static struct mbuf *re_defrag   (struct mbuf *, int, int);
     224static int re_encap     (struct rl_softc *, struct mbuf **);
     225
     226static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
     227static int re_allocmem      (device_t, struct rl_softc *);
     228static __inline void re_discard_rxbuf
     229                (struct rl_softc *, int);
     230static int re_newbuf        (struct rl_softc *, int);
     231static int re_rx_list_init  (struct rl_softc *);
     232static int re_tx_list_init  (struct rl_softc *);
     233#ifdef RE_FIXUP_RX
     234static __inline void re_fixup_rx
     235                (struct mbuf *);
     236#endif
     237static int re_rxeof     (struct rl_softc *);
     238static void re_txeof        (struct rl_softc *);
     239#ifdef DEVICE_POLLING
     240static void re_poll     (struct ifnet *, enum poll_cmd, int);
     241static void re_poll_locked  (struct ifnet *, enum poll_cmd, int);
     242#endif
     243static int re_intr      (void *);
     244static void re_tick     (void *);
     245static void re_tx_task      (void *, int);
     246static void re_int_task     (void *, int);
     247static void re_start        (struct ifnet *);
     248static int re_ioctl     (struct ifnet *, u_long, caddr_t);
     249static void re_init     (void *);
     250static void re_init_locked  (struct rl_softc *);
     251static void re_stop     (struct rl_softc *);
     252static void re_watchdog     (struct rl_softc *);
     253static int re_suspend       (device_t);
     254static int re_resume        (device_t);
     255static int re_shutdown      (device_t);
     256static int re_ifmedia_upd   (struct ifnet *);
     257static void re_ifmedia_sts  (struct ifnet *, struct ifmediareq *);
     258
     259static void re_eeprom_putbyte   (struct rl_softc *, int);
     260static void re_eeprom_getword   (struct rl_softc *, int, u_int16_t *);
     261static void re_read_eeprom  (struct rl_softc *, caddr_t, int, int);
     262static int re_gmii_readreg  (device_t, int, int);
     263static int re_gmii_writereg (device_t, int, int, int);
     264
     265static int re_miibus_readreg    (device_t, int, int);
     266static int re_miibus_writereg   (device_t, int, int, int);
     267static void re_miibus_statchg   (device_t);
     268
     269static void re_setmulti     (struct rl_softc *);
     270static void re_reset        (struct rl_softc *);
     271static void re_setwol       (struct rl_softc *);
     272static void re_clrwol       (struct rl_softc *);
     273
     274#ifdef RE_DIAG
     275static int re_diag      (struct rl_softc *);
     276#endif
     277
     278static device_method_t re_methods[] = {
     279    /* Device interface */
     280    DEVMETHOD(device_probe,     re_probe),
     281    DEVMETHOD(device_attach,    re_attach),
     282    DEVMETHOD(device_detach,    re_detach),
     283    DEVMETHOD(device_suspend,   re_suspend),
     284    DEVMETHOD(device_resume,    re_resume),
     285    DEVMETHOD(device_shutdown,  re_shutdown),
     286
     287    /* bus interface */
     288    DEVMETHOD(bus_print_child,  bus_generic_print_child),
     289    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
     290
     291    /* MII interface */
     292    DEVMETHOD(miibus_readreg,   re_miibus_readreg),
     293    DEVMETHOD(miibus_writereg,  re_miibus_writereg),
     294    DEVMETHOD(miibus_statchg,   re_miibus_statchg),
     295
     296    { 0, 0 }
     297};
     298
     299static driver_t re_driver = {
     300    "re",
     301    re_methods,
     302    sizeof(struct rl_softc)
     303};
     304
     305static devclass_t re_devclass;
     306
     307DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
     308DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0);
     309DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
     310
     311#define EE_SET(x)                   \
     312    CSR_WRITE_1(sc, RL_EECMD,           \
     313        CSR_READ_1(sc, RL_EECMD) | x)
     314
     315#define EE_CLR(x)                   \
     316    CSR_WRITE_1(sc, RL_EECMD,           \
     317        CSR_READ_1(sc, RL_EECMD) & ~x)
     318
     319/*
     320 * Send a read command and address to the EEPROM, check for ACK.
     321 */
     322static void
     323re_eeprom_putbyte(struct rl_softc *sc, int addr)
     324{
     325    int         d, i;
     326
     327    d = addr | (RL_9346_READ << sc->rl_eewidth);
     328
     329    /*
     330     * Feed in each bit and strobe the clock.
     331     */
     332
     333    for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
     334        if (d & i) {
     335            EE_SET(RL_EE_DATAIN);
     336        } else {
     337            EE_CLR(RL_EE_DATAIN);
     338        }
     339        DELAY(100);
     340        EE_SET(RL_EE_CLK);
     341        DELAY(150);
     342        EE_CLR(RL_EE_CLK);
     343        DELAY(100);
     344    }
     345}
     346
     347/*
     348 * Read a word of data stored in the EEPROM at address 'addr.'
     349 */
     350static void
     351re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
     352{
     353    int         i;
     354    u_int16_t       word = 0;
     355
     356    /*
     357     * Send address of word we want to read.
     358     */
     359    re_eeprom_putbyte(sc, addr);
     360
     361    /*
     362     * Start reading bits from EEPROM.
     363     */
     364    for (i = 0x8000; i; i >>= 1) {
     365        EE_SET(RL_EE_CLK);
     366        DELAY(100);
     367        if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
     368            word |= i;
     369        EE_CLR(RL_EE_CLK);
     370        DELAY(100);
     371    }
     372
     373    *dest = word;
     374}
     375
     376/*
     377 * Read a sequence of words from the EEPROM.
     378 */
     379static void
     380re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
     381{
     382    int         i;
     383    u_int16_t       word = 0, *ptr;
     384
     385    CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
     386
     387        DELAY(100);
     388
     389    for (i = 0; i < cnt; i++) {
     390        CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
     391        re_eeprom_getword(sc, off + i, &word);
     392        CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
     393        ptr = (u_int16_t *)(dest + (i * 2));
     394                *ptr = word;
     395    }
     396
     397    CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
     398}
     399
     400static int
     401re_gmii_readreg(device_t dev, int phy, int reg)
     402{
     403    struct rl_softc     *sc;
     404    u_int32_t       rval;
     405    int         i;
     406
     407    if (phy != 1)
     408        return (0);
     409
     410    sc = device_get_softc(dev);
     411
     412    /* Let the rgephy driver read the GMEDIASTAT register */
     413
     414    if (reg == RL_GMEDIASTAT) {
     415        rval = CSR_READ_1(sc, RL_GMEDIASTAT);
     416        return (rval);
     417    }
     418
     419    CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
     420    DELAY(1000);
     421
     422    for (i = 0; i < RL_TIMEOUT; i++) {
     423        rval = CSR_READ_4(sc, RL_PHYAR);
     424        if (rval & RL_PHYAR_BUSY)
     425            break;
     426        DELAY(100);
     427    }
     428
     429    if (i == RL_TIMEOUT) {
     430        device_printf(sc->rl_dev, "PHY read failed\n");
     431        return (0);
     432    }
     433
     434    return (rval & RL_PHYAR_PHYDATA);
     435}
     436
     437static int
     438re_gmii_writereg(device_t dev, int phy, int reg, int data)
     439{
     440    struct rl_softc     *sc;
     441    u_int32_t       rval;
     442    int         i;
     443
     444    sc = device_get_softc(dev);
     445
     446    CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
     447        (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
     448    DELAY(1000);
     449
     450    for (i = 0; i < RL_TIMEOUT; i++) {
     451        rval = CSR_READ_4(sc, RL_PHYAR);
     452        if (!(rval & RL_PHYAR_BUSY))
     453            break;
     454        DELAY(100);
     455    }
     456
     457    if (i == RL_TIMEOUT) {
     458        device_printf(sc->rl_dev, "PHY write failed\n");
     459        return (0);
     460    }
     461
     462    return (0);
     463}
     464
     465static int
     466re_miibus_readreg(device_t dev, int phy, int reg)
     467{
     468    struct rl_softc     *sc;
     469    u_int16_t       rval = 0;
     470    u_int16_t       re8139_reg = 0;
     471
     472    sc = device_get_softc(dev);
     473
     474    if (sc->rl_type == RL_8169) {
     475        rval = re_gmii_readreg(dev, phy, reg);
     476        return (rval);
     477    }
     478
     479    /* Pretend the internal PHY is only at address 0 */
     480    if (phy) {
     481        return (0);
     482    }
     483    switch (reg) {
     484    case MII_BMCR:
     485        re8139_reg = RL_BMCR;
     486        break;
     487    case MII_BMSR:
     488        re8139_reg = RL_BMSR;
     489        break;
     490    case MII_ANAR:
     491        re8139_reg = RL_ANAR;
     492        break;
     493    case MII_ANER:
     494        re8139_reg = RL_ANER;
     495        break;
     496    case MII_ANLPAR:
     497        re8139_reg = RL_LPAR;
     498        break;
     499    case MII_PHYIDR1:
     500    case MII_PHYIDR2:
     501        return (0);
     502    /*
     503     * Allow the rlphy driver to read the media status
     504     * register. If we have a link partner which does not
     505     * support NWAY, this is the register which will tell
     506     * us the results of parallel detection.
     507     */
     508    case RL_MEDIASTAT:
     509        rval = CSR_READ_1(sc, RL_MEDIASTAT);
     510        return (rval);
     511    default:
     512        device_printf(sc->rl_dev, "bad phy register\n");
     513        return (0);
     514    }
     515    rval = CSR_READ_2(sc, re8139_reg);
     516    if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
     517        /* 8139C+ has different bit layout. */
     518        rval &= ~(BMCR_LOOP | BMCR_ISO);
     519    }
     520    return (rval);
     521}
     522
     523static int
     524re_miibus_writereg(device_t dev, int phy, int reg, int data)
     525{
     526    struct rl_softc     *sc;
     527    u_int16_t       re8139_reg = 0;
     528    int         rval = 0;
     529
     530    sc = device_get_softc(dev);
     531
     532    if (sc->rl_type == RL_8169) {
     533        rval = re_gmii_writereg(dev, phy, reg, data);
     534        return (rval);
     535    }
     536
     537    /* Pretend the internal PHY is only at address 0 */
     538    if (phy)
     539        return (0);
     540
     541    switch (reg) {
     542    case MII_BMCR:
     543        re8139_reg = RL_BMCR;
     544        if (sc->rl_type == RL_8139CPLUS) {
     545            /* 8139C+ has different bit layout. */
     546            data &= ~(BMCR_LOOP | BMCR_ISO);
     547        }
     548        break;
     549    case MII_BMSR:
     550        re8139_reg = RL_BMSR;
     551        break;
     552    case MII_ANAR:
     553        re8139_reg = RL_ANAR;
     554        break;
     555    case MII_ANER:
     556        re8139_reg = RL_ANER;
     557        break;
     558    case MII_ANLPAR:
     559        re8139_reg = RL_LPAR;
     560        break;
     561    case MII_PHYIDR1:
     562    case MII_PHYIDR2:
     563        return (0);
     564        break;
     565    default:
     566        device_printf(sc->rl_dev, "bad phy register\n");
     567        return (0);
     568    }
     569    CSR_WRITE_2(sc, re8139_reg, data);
     570    return (0);
     571}
     572
     573static void
     574re_miibus_statchg(device_t dev)
     575{
     576
     577}
     578
     579/*
     580 * Program the 64-bit multicast hash filter.
     581 */
     582static void
     583re_setmulti(struct rl_softc *sc)
     584{
     585    struct ifnet        *ifp;
     586    int         h = 0;
     587    u_int32_t       hashes[2] = { 0, 0 };
     588    struct ifmultiaddr  *ifma;
     589    u_int32_t       rxfilt;
     590    int         mcnt = 0;
     591
     592    RL_LOCK_ASSERT(sc);
     593
     594    ifp = sc->rl_ifp;
     595
     596
     597    rxfilt = CSR_READ_4(sc, RL_RXCFG);
     598    rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_MULTI);
     599    if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
     600        if (ifp->if_flags & IFF_PROMISC)
     601            rxfilt |= RL_RXCFG_RX_ALLPHYS;
     602        /*
     603         * Unlike other hardwares, we have to explicitly set
     604         * RL_RXCFG_RX_MULTI to receive multicast frames in
     605         * promiscuous mode.
     606         */
     607        rxfilt |= RL_RXCFG_RX_MULTI;
     608        CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
     609        CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
     610        CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
     611        return;
     612    }
     613
     614    /* first, zot all the existing hash bits */
     615    CSR_WRITE_4(sc, RL_MAR0, 0);
     616    CSR_WRITE_4(sc, RL_MAR4, 0);
     617
     618    /* now program new ones */
     619    IF_ADDR_LOCK(ifp);
     620    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
     621        if (ifma->ifma_addr->sa_family != AF_LINK)
     622            continue;
     623        h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
     624            ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
     625        if (h < 32)
     626            hashes[0] |= (1 << h);
     627        else
     628            hashes[1] |= (1 << (h - 32));
     629        mcnt++;
     630    }
     631    IF_ADDR_UNLOCK(ifp);
     632
     633    if (mcnt)
     634        rxfilt |= RL_RXCFG_RX_MULTI;
     635    else
     636        rxfilt &= ~RL_RXCFG_RX_MULTI;
     637
     638    CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
     639
     640    /*
     641     * For some unfathomable reason, RealTek decided to reverse
     642     * the order of the multicast hash registers in the PCI Express
     643     * parts. This means we have to write the hash pattern in reverse
     644     * order for those devices.
     645     */
     646
     647    if ((sc->rl_flags & RL_FLAG_INVMAR) != 0) {
     648        CSR_WRITE_4(sc, RL_MAR0, bswap32(hashes[1]));
     649        CSR_WRITE_4(sc, RL_MAR4, bswap32(hashes[0]));
     650    } else {
     651        CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
     652        CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
     653    }
     654}
     655
     656static void
     657re_reset(struct rl_softc *sc)
     658{
     659    int         i;
     660
     661    RL_LOCK_ASSERT(sc);
     662
     663    CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
     664
     665    for (i = 0; i < RL_TIMEOUT; i++) {
     666        DELAY(10);
     667        if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
     668            break;
     669    }
     670    if (i == RL_TIMEOUT)
     671        device_printf(sc->rl_dev, "reset never completed!\n");
     672
     673    CSR_WRITE_1(sc, 0x82, 1);
     674}
     675
     676#ifdef RE_DIAG
     677
     678/*
     679 * The following routine is designed to test for a defect on some
     680 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
     681 * lines connected to the bus, however for a 32-bit only card, they
     682 * should be pulled high. The result of this defect is that the
     683 * NIC will not work right if you plug it into a 64-bit slot: DMA
     684 * operations will be done with 64-bit transfers, which will fail
     685 * because the 64-bit data lines aren't connected.
     686 *
     687 * There's no way to work around this (short of talking a soldering
     688 * iron to the board), however we can detect it. The method we use
     689 * here is to put the NIC into digital loopback mode, set the receiver
     690 * to promiscuous mode, and then try to send a frame. We then compare
     691 * the frame data we sent to what was received. If the data matches,
     692 * then the NIC is working correctly, otherwise we know the user has
     693 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
     694 * slot. In the latter case, there's no way the NIC can work correctly,
     695 * so we print out a message on the console and abort the device attach.
     696 */
     697
     698static int
     699re_diag(struct rl_softc *sc)
     700{
     701    struct ifnet        *ifp = sc->rl_ifp;
     702    struct mbuf     *m0;
     703    struct ether_header *eh;
     704    struct rl_desc      *cur_rx;
     705    u_int16_t       status;
     706    u_int32_t       rxstat;
     707    int         total_len, i, error = 0, phyaddr;
     708    u_int8_t        dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
     709    u_int8_t        src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
     710
     711    /* Allocate a single mbuf */
     712    MGETHDR(m0, M_DONTWAIT, MT_DATA);
     713    if (m0 == NULL)
     714        return (ENOBUFS);
     715
     716    RL_LOCK(sc);
     717
     718    /*
     719     * Initialize the NIC in test mode. This sets the chip up
     720     * so that it can send and receive frames, but performs the
     721     * following special functions:
     722     * - Puts receiver in promiscuous mode
     723     * - Enables digital loopback mode
     724     * - Leaves interrupts turned off
     725     */
     726
     727    ifp->if_flags |= IFF_PROMISC;
     728    sc->rl_testmode = 1;
     729    re_reset(sc);
     730    re_init_locked(sc);
     731    sc->rl_flags |= RL_FLAG_LINK;
     732    if (sc->rl_type == RL_8169)
     733        phyaddr = 1;
     734    else
     735        phyaddr = 0;
     736
     737    re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
     738    for (i = 0; i < RL_TIMEOUT; i++) {
     739        status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
     740        if (!(status & BMCR_RESET))
     741            break;
     742    }
     743
     744    re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
     745    CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
     746
     747    DELAY(100000);
     748
     749    /* Put some data in the mbuf */
     750
     751    eh = mtod(m0, struct ether_header *);
     752    bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
     753    bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
     754    eh->ether_type = htons(ETHERTYPE_IP);
     755    m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
     756
     757    /*
     758     * Queue the packet, start transmission.
     759     * Note: IF_HANDOFF() ultimately calls re_start() for us.
     760     */
     761
     762    CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
     763    RL_UNLOCK(sc);
     764    /* XXX: re_diag must not be called when in ALTQ mode */
     765    IF_HANDOFF(&ifp->if_snd, m0, ifp);
     766    RL_LOCK(sc);
     767    m0 = NULL;
     768
     769    /* Wait for it to propagate through the chip */
     770
     771    DELAY(100000);
     772    for (i = 0; i < RL_TIMEOUT; i++) {
     773        status = CSR_READ_2(sc, RL_ISR);
     774        CSR_WRITE_2(sc, RL_ISR, status);
     775        if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
     776            (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
     777            break;
     778        DELAY(10);
     779    }
     780
     781    if (i == RL_TIMEOUT) {
     782        device_printf(sc->rl_dev,
     783            "diagnostic failed, failed to receive packet in"
     784            " loopback mode\n");
     785        error = EIO;
     786        goto done;
     787    }
     788
     789    /*
     790     * The packet should have been dumped into the first
     791     * entry in the RX DMA ring. Grab it from there.
     792     */
     793
     794    bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
     795        sc->rl_ldata.rl_rx_list_map,
     796        BUS_DMASYNC_POSTREAD);
     797    bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
     798        sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
     799        BUS_DMASYNC_POSTREAD);
     800    bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
     801        sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
     802
     803    m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
     804    sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
     805    eh = mtod(m0, struct ether_header *);
     806
     807    cur_rx = &sc->rl_ldata.rl_rx_list[0];
     808    total_len = RL_RXBYTES(cur_rx);
     809    rxstat = le32toh(cur_rx->rl_cmdstat);
     810
     811    if (total_len != ETHER_MIN_LEN) {
     812        device_printf(sc->rl_dev,
     813            "diagnostic failed, received short packet\n");
     814        error = EIO;
     815        goto done;
     816    }
     817
     818    /* Test that the received packet data matches what we sent. */
     819
     820    if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
     821        bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
     822        ntohs(eh->ether_type) != ETHERTYPE_IP) {
     823        device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
     824        device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
     825            dst, ":", src, ":", ETHERTYPE_IP);
     826        device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
     827            eh->ether_dhost, ":",  eh->ether_shost, ":",
     828            ntohs(eh->ether_type));
     829        device_printf(sc->rl_dev, "You may have a defective 32-bit "
     830            "NIC plugged into a 64-bit PCI slot.\n");
     831        device_printf(sc->rl_dev, "Please re-install the NIC in a "
     832            "32-bit slot for proper operation.\n");
     833        device_printf(sc->rl_dev, "Read the re(4) man page for more "
     834            "details.\n");
     835        error = EIO;
     836    }
     837
     838done:
     839    /* Turn interface off, release resources */
     840
     841    sc->rl_testmode = 0;
     842    sc->rl_flags &= ~RL_FLAG_LINK;
     843    ifp->if_flags &= ~IFF_PROMISC;
     844    re_stop(sc);
     845    if (m0 != NULL)
     846        m_freem(m0);
     847
     848    RL_UNLOCK(sc);
     849
     850    return (error);
     851}
     852
     853#endif
     854
     855/*
     856 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
     857 * IDs against our list and return a device name if we find a match.
     858 */
     859static int
     860re_probe(device_t dev)
     861{
     862    struct rl_type      *t;
     863    uint16_t        devid, vendor;
     864    uint16_t        revid, sdevid;
     865    int         i;
     866   
     867    vendor = pci_get_vendor(dev);
     868    devid = pci_get_device(dev);
     869    revid = pci_get_revid(dev);
     870    sdevid = pci_get_subdevice(dev);
     871
     872    if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
     873        if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
     874            /*
     875             * Only attach to rev. 3 of the Linksys EG1032 adapter.
     876             * Rev. 2 is supported by sk(4).
     877             */
     878            return (ENXIO);
     879        }
     880    }
     881
     882    if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
     883        if (revid != 0x20) {
     884            /* 8139, let rl(4) take care of this device. */
     885            return (ENXIO);
     886        }
     887    }
     888
     889    t = re_devs;
     890    for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
     891        if (vendor == t->rl_vid && devid == t->rl_did) {
     892            device_set_desc(dev, t->rl_name);
     893            return (BUS_PROBE_DEFAULT);
     894        }
     895    }
     896
     897    return (ENXIO);
     898}
     899
     900/*
     901 * Map a single buffer address.
     902 */
     903
     904static void
     905re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
     906{
     907    bus_addr_t      *addr;
     908
     909    if (error)
     910        return;
     911
     912    KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
     913    addr = arg;
     914    *addr = segs->ds_addr;
     915}
     916
     917static int
     918re_allocmem(device_t dev, struct rl_softc *sc)
     919{
     920    bus_size_t      rx_list_size, tx_list_size;
     921    int         error;
     922    int         i;
     923
     924    rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
     925    tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
     926
     927    /*
     928     * Allocate the parent bus DMA tag appropriate for PCI.
     929     * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
     930     * register should be set. However some RealTek chips are known
     931     * to be buggy on DAC handling, therefore disable DAC by limiting
     932     * DMA address space to 32bit. PCIe variants of RealTek chips
     933     * may not have the limitation but I took safer path.
     934     */
     935    error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
     936        BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
     937        BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
     938        NULL, NULL, &sc->rl_parent_tag);
     939    if (error) {
     940        device_printf(dev, "could not allocate parent DMA tag\n");
     941        return (error);
     942    }
     943
     944    /*
     945     * Allocate map for TX mbufs.
     946     */
     947    error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
     948        BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
     949        NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
     950        NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
     951    if (error) {
     952        device_printf(dev, "could not allocate TX DMA tag\n");
     953        return (error);
     954    }
     955
     956    /*
     957     * Allocate map for RX mbufs.
     958     */
     959
     960    error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
     961        BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
     962        MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
     963    if (error) {
     964        device_printf(dev, "could not allocate RX DMA tag\n");
     965        return (error);
     966    }
     967
     968    /*
     969     * Allocate map for TX descriptor list.
     970     */
     971    error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
     972        0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
     973        NULL, tx_list_size, 1, tx_list_size, 0,
     974        NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
     975    if (error) {
     976        device_printf(dev, "could not allocate TX DMA ring tag\n");
     977        return (error);
     978    }
     979
     980    /* Allocate DMA'able memory for the TX ring */
     981
     982    error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
     983        (void **)&sc->rl_ldata.rl_tx_list,
     984        BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
     985        &sc->rl_ldata.rl_tx_list_map);
     986    if (error) {
     987        device_printf(dev, "could not allocate TX DMA ring\n");
     988        return (error);
     989    }
     990
     991    /* Load the map for the TX ring. */
     992
     993    sc->rl_ldata.rl_tx_list_addr = 0;
     994    error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
     995         sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
     996         tx_list_size, re_dma_map_addr,
     997         &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
     998    if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
     999        device_printf(dev, "could not load TX DMA ring\n");
     1000        return (ENOMEM);
     1001    }
     1002
     1003    /* Create DMA maps for TX buffers */
     1004
     1005    for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
     1006        error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
     1007            &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
     1008        if (error) {
     1009            device_printf(dev, "could not create DMA map for TX\n");
     1010            return (error);
     1011        }
     1012    }
     1013
     1014    /*
     1015     * Allocate map for RX descriptor list.
     1016     */
     1017    error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
     1018        0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
     1019        NULL, rx_list_size, 1, rx_list_size, 0,
     1020        NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
     1021    if (error) {
     1022        device_printf(dev, "could not create RX DMA ring tag\n");
     1023        return (error);
     1024    }
     1025
     1026    /* Allocate DMA'able memory for the RX ring */
     1027
     1028    error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
     1029        (void **)&sc->rl_ldata.rl_rx_list,
     1030        BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
     1031        &sc->rl_ldata.rl_rx_list_map);
     1032    if (error) {
     1033        device_printf(dev, "could not allocate RX DMA ring\n");
     1034        return (error);
     1035    }
     1036
     1037    /* Load the map for the RX ring. */
     1038
     1039    sc->rl_ldata.rl_rx_list_addr = 0;
     1040    error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
     1041         sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
     1042         rx_list_size, re_dma_map_addr,
     1043         &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
     1044    if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
     1045        device_printf(dev, "could not load RX DMA ring\n");
     1046        return (ENOMEM);
     1047    }
     1048
     1049    /* Create DMA maps for RX buffers */
     1050
     1051    error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
     1052        &sc->rl_ldata.rl_rx_sparemap);
     1053    if (error) {
     1054        device_printf(dev, "could not create spare DMA map for RX\n");
     1055        return (error);
     1056    }
     1057    for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
     1058        error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
     1059            &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
     1060        if (error) {
     1061            device_printf(dev, "could not create DMA map for RX\n");
     1062            return (error);
     1063        }
     1064    }
     1065
     1066    return (0);
     1067}
     1068
     1069/*
     1070 * Attach the interface. Allocate softc structures, do ifmedia
     1071 * setup and ethernet/BPF attach.
     1072 */
     1073static int
     1074re_attach(device_t dev)
     1075{
     1076    u_char          eaddr[ETHER_ADDR_LEN];
     1077    u_int16_t       as[ETHER_ADDR_LEN / 2];
     1078    struct rl_softc     *sc;
     1079    struct ifnet        *ifp;
     1080    struct rl_hwrev     *hw_rev;
     1081    int         hwrev;
     1082    u_int16_t       devid, re_did = 0;
     1083    int         error = 0, rid, i;
     1084    int         msic, reg;
     1085    uint8_t         cfg;
     1086
     1087    sc = device_get_softc(dev);
     1088    sc->rl_dev = dev;
     1089
     1090    mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
     1091        MTX_DEF);
     1092    callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
     1093
     1094    /*
     1095     * Map control/status registers.
     1096     */
     1097    pci_enable_busmaster(dev);
     1098
     1099    devid = pci_get_device(dev);
     1100    /* Prefer memory space register mapping over IO space. */
     1101    sc->rl_res_id = PCIR_BAR(1);
     1102    sc->rl_res_type = SYS_RES_MEMORY;
     1103    /* RTL8168/8101E seems to use different BARs. */
     1104    if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
     1105        sc->rl_res_id = PCIR_BAR(2);
     1106    sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
     1107        &sc->rl_res_id, RF_ACTIVE);
     1108
     1109    if (sc->rl_res == NULL) {
     1110        sc->rl_res_id = PCIR_BAR(0);
     1111        sc->rl_res_type = SYS_RES_IOPORT;
     1112        sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
     1113            &sc->rl_res_id, RF_ACTIVE);
     1114        if (sc->rl_res == NULL) {
     1115            device_printf(dev, "couldn't map ports/memory\n");
     1116            error = ENXIO;
     1117            goto fail;
     1118        }
     1119    }
     1120
     1121    sc->rl_btag = rman_get_bustag(sc->rl_res);
     1122    sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
     1123
     1124    msic = 0;
     1125    if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
     1126        msic = pci_msi_count(dev);
     1127        if (bootverbose)
     1128            device_printf(dev, "MSI count : %d\n", msic);
     1129    }
     1130    if (msic == RL_MSI_MESSAGES  && msi_disable == 0) {
     1131        if (pci_alloc_msi(dev, &msic) == 0) {
     1132            if (msic == RL_MSI_MESSAGES) {
     1133                device_printf(dev, "Using %d MSI messages\n",
     1134                    msic);
     1135                sc->rl_flags |= RL_FLAG_MSI;
     1136                /* Explicitly set MSI enable bit. */
     1137                CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
     1138                cfg = CSR_READ_1(sc, RL_CFG2);
     1139                cfg |= RL_CFG2_MSI;
     1140                CSR_WRITE_1(sc, RL_CFG2, cfg);
     1141                CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     1142            } else
     1143                pci_release_msi(dev);
     1144        }
     1145    }
     1146
     1147    /* Allocate interrupt */
     1148    if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
     1149        rid = 0;
     1150        sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
     1151            RF_SHAREABLE | RF_ACTIVE);
     1152        if (sc->rl_irq[0] == NULL) {
     1153            device_printf(dev, "couldn't allocate IRQ resources\n");
     1154            error = ENXIO;
     1155            goto fail;
     1156        }
     1157    } else {
     1158        for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
     1159            sc->rl_irq[i] = bus_alloc_resource_any(dev,
     1160                SYS_RES_IRQ, &rid, RF_ACTIVE);
     1161            if (sc->rl_irq[i] == NULL) {
     1162                device_printf(dev,
     1163                    "couldn't llocate IRQ resources for "
     1164                    "message %d\n", rid);
     1165                error = ENXIO;
     1166                goto fail;
     1167            }
     1168        }
     1169    }
     1170
     1171    if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
     1172        CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
     1173        cfg = CSR_READ_1(sc, RL_CFG2);
     1174        if ((cfg & RL_CFG2_MSI) != 0) {
     1175            device_printf(dev, "turning off MSI enable bit.\n");
     1176            cfg &= ~RL_CFG2_MSI;
     1177            CSR_WRITE_1(sc, RL_CFG2, cfg);
     1178        }
     1179        CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     1180    }
     1181
     1182    /* Reset the adapter. */
     1183    RL_LOCK(sc);
     1184    re_reset(sc);
     1185    RL_UNLOCK(sc);
     1186
     1187    hw_rev = re_hwrevs;
     1188    hwrev = CSR_READ_4(sc, RL_TXCFG);
     1189    device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
     1190    device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
     1191    hwrev &= RL_TXCFG_HWREV;
     1192    while (hw_rev->rl_desc != NULL) {
     1193        if (hw_rev->rl_rev == hwrev) {
     1194            sc->rl_type = hw_rev->rl_type;
     1195            break;
     1196        }
     1197        hw_rev++;
     1198    }
     1199    if (hw_rev->rl_desc == NULL) {
     1200        device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
     1201        error = ENXIO;
     1202        goto fail;
     1203    }
     1204
     1205    switch (hw_rev->rl_rev) {
     1206    case RL_HWREV_8139CPLUS:
     1207        sc->rl_flags |= RL_FLAG_NOJUMBO;
     1208        break;
     1209    case RL_HWREV_8100E:
     1210    case RL_HWREV_8101E:
     1211        sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR |
     1212            RL_FLAG_PHYWAKE;
     1213        break;
     1214    case RL_HWREV_8102E:
     1215    case RL_HWREV_8102EL:
     1216        sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR |
     1217            RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
     1218            RL_FLAG_MACSTAT;
     1219        break;
     1220    case RL_HWREV_8168_SPIN1:
     1221    case RL_HWREV_8168_SPIN2:
     1222    case RL_HWREV_8168_SPIN3:
     1223        sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE |
     1224            RL_FLAG_MACSTAT;
     1225        break;
     1226    case RL_HWREV_8168C:
     1227    case RL_HWREV_8168C_SPIN2:
     1228    case RL_HWREV_8168CP:
     1229        sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE |
     1230            RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT;
     1231        /*
     1232         * These controllers support jumbo frame but it seems
     1233         * that enabling it requires touching additional magic
     1234         * registers. Depending on MAC revisions some
     1235         * controllers need to disable checksum offload. So
     1236         * disable jumbo frame until I have better idea what
     1237         * it really requires to make it support.
     1238         * RTL8168C/CP : supports up to 6KB jumbo frame.
     1239         * RTL8111C/CP : supports up to 9KB jumbo frame.
     1240         */
     1241        sc->rl_flags |= RL_FLAG_NOJUMBO;
     1242        break;
     1243    case RL_HWREV_8169_8110SB:
     1244    case RL_HWREV_8169_8110SC:
     1245    case RL_HWREV_8169_8110SBL:
     1246        sc->rl_flags |= RL_FLAG_PHYWAKE;
     1247        break;
     1248    default:
     1249        break;
     1250    }
     1251
     1252    /* Enable PME. */
     1253    CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
     1254    cfg = CSR_READ_1(sc, RL_CFG1);
     1255    cfg |= RL_CFG1_PME;
     1256    CSR_WRITE_1(sc, RL_CFG1, cfg);
     1257    cfg = CSR_READ_1(sc, RL_CFG5);
     1258    cfg &= RL_CFG5_PME_STS;
     1259    CSR_WRITE_1(sc, RL_CFG5, cfg);
     1260    CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     1261
     1262    if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
     1263        /*
     1264         * XXX Should have a better way to extract station
     1265         * address from EEPROM.
     1266         */
     1267        for (i = 0; i < ETHER_ADDR_LEN; i++)
     1268            eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
     1269    } else {
     1270        sc->rl_eewidth = RL_9356_ADDR_LEN;
     1271        re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
     1272        if (re_did != 0x8129)
     1273            sc->rl_eewidth = RL_9346_ADDR_LEN;
     1274
     1275        /*
     1276         * Get station address from the EEPROM.
     1277         */
     1278        re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
     1279        for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
     1280            as[i] = le16toh(as[i]);
     1281        bcopy(as, eaddr, sizeof(eaddr));
     1282    }
     1283
     1284    if (sc->rl_type == RL_8169) {
     1285        /* Set RX length mask and number of descriptors. */
     1286        sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
     1287        sc->rl_txstart = RL_GTXSTART;
     1288        sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
     1289        sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
     1290    } else {
     1291        /* Set RX length mask and number of descriptors. */
     1292        sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
     1293        sc->rl_txstart = RL_TXSTART;
     1294        sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
     1295        sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
     1296    }
     1297
     1298    error = re_allocmem(dev, sc);
     1299    if (error)
     1300        goto fail;
     1301
     1302    ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
     1303    if (ifp == NULL) {
     1304        device_printf(dev, "can not if_alloc()\n");
     1305        error = ENOSPC;
     1306        goto fail;
     1307    }
     1308
     1309    /* Take PHY out of power down mode. */
     1310    if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
     1311        re_gmii_writereg(dev, 1, 0x1f, 0);
     1312        re_gmii_writereg(dev, 1, 0x0e, 0);
     1313    }
     1314
     1315    /* Do MII setup */
     1316    if (mii_phy_probe(dev, &sc->rl_miibus,
     1317        re_ifmedia_upd, re_ifmedia_sts)) {
     1318        device_printf(dev, "MII without any phy!\n");
     1319        error = ENXIO;
     1320        goto fail;
     1321    }
     1322
     1323    ifp->if_softc = sc;
     1324    if_initname(ifp, device_get_name(dev), device_get_unit(dev));
     1325    ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
     1326    ifp->if_ioctl = re_ioctl;
     1327    ifp->if_start = re_start;
     1328    ifp->if_hwassist = RE_CSUM_FEATURES;
     1329    ifp->if_capabilities = IFCAP_HWCSUM;
     1330    ifp->if_capenable = ifp->if_capabilities;
     1331    ifp->if_init = re_init;
     1332    IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
     1333    ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
     1334    IFQ_SET_READY(&ifp->if_snd);
     1335
     1336    TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp);
     1337    TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
     1338
     1339    /*
     1340     * XXX
     1341     * Still have no idea how to make TSO work on 8168C, 8168CP,
     1342     * 8111C and 8111CP.
     1343     */
     1344    if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
     1345        ifp->if_hwassist |= CSUM_TSO;
     1346        ifp->if_capabilities |= IFCAP_TSO4;
     1347    }
     1348
     1349    /*
     1350     * Call MI attach routine.
     1351     */
     1352    ether_ifattach(ifp, eaddr);
     1353
     1354    /* VLAN capability setup */
     1355    ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
     1356    if (ifp->if_capabilities & IFCAP_HWCSUM)
     1357        ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
     1358#ifdef ENABLE_WOL
     1359    /* Enable WOL if PM is supported. */
     1360    if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &reg) == 0)
     1361        ifp->if_capabilities |= IFCAP_WOL;
     1362#endif
     1363    ifp->if_capenable = ifp->if_capabilities;
     1364    /*
     1365     * Don't enable TSO by default. Under certain
     1366     * circumtances the controller generated corrupted
     1367     * packets in TSO size.
     1368     */
     1369    ifp->if_hwassist &= ~CSUM_TSO;
     1370    ifp->if_capenable &= ~IFCAP_TSO4;
     1371#ifdef DEVICE_POLLING
     1372    ifp->if_capabilities |= IFCAP_POLLING;
     1373#endif
     1374    /*
     1375     * Tell the upper layer(s) we support long frames.
     1376     * Must appear after the call to ether_ifattach() because
     1377     * ether_ifattach() sets ifi_hdrlen to the default value.
     1378     */
     1379    ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
     1380
     1381#ifdef RE_DIAG
     1382    /*
     1383     * Perform hardware diagnostic on the original RTL8169.
     1384     * Some 32-bit cards were incorrectly wired and would
     1385     * malfunction if plugged into a 64-bit slot.
     1386     */
     1387
     1388    if (hwrev == RL_HWREV_8169) {
     1389        error = re_diag(sc);
     1390        if (error) {
     1391            device_printf(dev,
     1392                "attach aborted due to hardware diag failure\n");
     1393            ether_ifdetach(ifp);
     1394            goto fail;
     1395        }
     1396    }
     1397#endif
     1398
     1399    /* Hook interrupt last to avoid having to lock softc */
     1400    if ((sc->rl_flags & RL_FLAG_MSI) == 0)
     1401        error = bus_setup_intr(dev, sc->rl_irq[0],
     1402            INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
     1403            &sc->rl_intrhand[0]);
     1404    else {
     1405        for (i = 0; i < RL_MSI_MESSAGES; i++) {
     1406            error = bus_setup_intr(dev, sc->rl_irq[i],
     1407                INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
     1408                    &sc->rl_intrhand[i]);
     1409            if (error != 0)
     1410                break;
     1411        }
     1412    }
     1413    if (error) {
     1414        device_printf(dev, "couldn't set up irq\n");
     1415        ether_ifdetach(ifp);
     1416    }
     1417
     1418fail:
     1419
     1420    if (error)
     1421        re_detach(dev);
     1422
     1423    return (error);
     1424}
     1425
     1426/*
     1427 * Shutdown hardware and free up resources. This can be called any
     1428 * time after the mutex has been initialized. It is called in both
     1429 * the error case in attach and the normal detach case so it needs
     1430 * to be careful about only freeing resources that have actually been
     1431 * allocated.
     1432 */
     1433static int
     1434re_detach(device_t dev)
     1435{
     1436    struct rl_softc     *sc;
     1437    struct ifnet        *ifp;
     1438    int         i, rid;
     1439
     1440    sc = device_get_softc(dev);
     1441    ifp = sc->rl_ifp;
     1442    KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
     1443
     1444    /* These should only be active if attach succeeded */
     1445    if (device_is_attached(dev)) {
     1446#ifdef DEVICE_POLLING
     1447        if (ifp->if_capenable & IFCAP_POLLING)
     1448            ether_poll_deregister(ifp);
     1449#endif
     1450        RL_LOCK(sc);
     1451#if 0
     1452        sc->suspended = 1;
     1453#endif
     1454        re_stop(sc);
     1455        RL_UNLOCK(sc);
     1456        callout_drain(&sc->rl_stat_callout);
     1457        taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
     1458        taskqueue_drain(taskqueue_fast, &sc->rl_txtask);
     1459        /*
     1460         * Force off the IFF_UP flag here, in case someone
     1461         * still had a BPF descriptor attached to this
     1462         * interface. If they do, ether_ifdetach() will cause
     1463         * the BPF code to try and clear the promisc mode
     1464         * flag, which will bubble down to re_ioctl(),
     1465         * which will try to call re_init() again. This will
     1466         * turn the NIC back on and restart the MII ticker,
     1467         * which will panic the system when the kernel tries
     1468         * to invoke the re_tick() function that isn't there
     1469         * anymore.
     1470         */
     1471        ifp->if_flags &= ~IFF_UP;
     1472        ether_ifdetach(ifp);
     1473    }
     1474    if (sc->rl_miibus)
     1475        device_delete_child(dev, sc->rl_miibus);
     1476    bus_generic_detach(dev);
     1477
     1478    /*
     1479     * The rest is resource deallocation, so we should already be
     1480     * stopped here.
     1481     */
     1482
     1483    for (i = 0; i < RL_MSI_MESSAGES; i++) {
     1484        if (sc->rl_intrhand[i] != NULL) {
     1485            bus_teardown_intr(dev, sc->rl_irq[i],
     1486                sc->rl_intrhand[i]);
     1487            sc->rl_intrhand[i] = NULL;
     1488        }
     1489    }
     1490    if (ifp != NULL)
     1491        if_free(ifp);
     1492    if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
     1493        if (sc->rl_irq[0] != NULL) {
     1494            bus_release_resource(dev, SYS_RES_IRQ, 0,
     1495                sc->rl_irq[0]);
     1496            sc->rl_irq[0] = NULL;
     1497        }
     1498    } else {
     1499        for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
     1500            if (sc->rl_irq[i] != NULL) {
     1501                bus_release_resource(dev, SYS_RES_IRQ, rid,
     1502                    sc->rl_irq[i]);
     1503                sc->rl_irq[i] = NULL;
     1504            }
     1505        }
     1506        pci_release_msi(dev);
     1507    }
     1508    if (sc->rl_res)
     1509        bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
     1510            sc->rl_res);
     1511
     1512    /* Unload and free the RX DMA ring memory and map */
     1513
     1514    if (sc->rl_ldata.rl_rx_list_tag) {
     1515        bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
     1516            sc->rl_ldata.rl_rx_list_map);
     1517        bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
     1518            sc->rl_ldata.rl_rx_list,
     1519            sc->rl_ldata.rl_rx_list_map);
     1520        bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
     1521    }
     1522
     1523    /* Unload and free the TX DMA ring memory and map */
     1524
     1525    if (sc->rl_ldata.rl_tx_list_tag) {
     1526        bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
     1527            sc->rl_ldata.rl_tx_list_map);
     1528        bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
     1529            sc->rl_ldata.rl_tx_list,
     1530            sc->rl_ldata.rl_tx_list_map);
     1531        bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
     1532    }
     1533
     1534    /* Destroy all the RX and TX buffer maps */
     1535
     1536    if (sc->rl_ldata.rl_tx_mtag) {
     1537        for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
     1538            bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
     1539                sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
     1540        bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
     1541    }
     1542    if (sc->rl_ldata.rl_rx_mtag) {
     1543        for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++)
     1544            bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
     1545                sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
     1546        if (sc->rl_ldata.rl_rx_sparemap)
     1547            bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
     1548                sc->rl_ldata.rl_rx_sparemap);
     1549        bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
     1550    }
     1551
     1552    /* Unload and free the stats buffer and map */
     1553
     1554    if (sc->rl_ldata.rl_stag) {
     1555        bus_dmamap_unload(sc->rl_ldata.rl_stag,
     1556            sc->rl_ldata.rl_rx_list_map);
     1557        bus_dmamem_free(sc->rl_ldata.rl_stag,
     1558            sc->rl_ldata.rl_stats,
     1559            sc->rl_ldata.rl_smap);
     1560        bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
     1561    }
     1562
     1563    if (sc->rl_parent_tag)
     1564        bus_dma_tag_destroy(sc->rl_parent_tag);
     1565
     1566    mtx_destroy(&sc->rl_mtx);
     1567
     1568    return (0);
     1569}
     1570
     1571static __inline void
     1572re_discard_rxbuf(struct rl_softc *sc, int idx)
     1573{
     1574    struct rl_desc      *desc;
     1575    struct rl_rxdesc    *rxd;
     1576    uint32_t        cmdstat;
     1577
     1578    rxd = &sc->rl_ldata.rl_rx_desc[idx];
     1579    desc = &sc->rl_ldata.rl_rx_list[idx];
     1580    desc->rl_vlanctl = 0;
     1581    cmdstat = rxd->rx_size;
     1582    if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
     1583        cmdstat |= RL_RDESC_CMD_EOR;
     1584    desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
     1585}
     1586
     1587static int
     1588re_newbuf(struct rl_softc *sc, int idx)
     1589{
     1590    struct mbuf     *m;
     1591    struct rl_rxdesc    *rxd;
     1592    bus_dma_segment_t   segs[1];
     1593    bus_dmamap_t        map;
     1594    struct rl_desc      *desc;
     1595    uint32_t        cmdstat;
     1596    int         error, nsegs;
     1597
     1598    m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
     1599    if (m == NULL)
     1600        return (ENOBUFS);
     1601
     1602    m->m_len = m->m_pkthdr.len = MCLBYTES;
     1603#ifdef RE_FIXUP_RX
     1604    /*
     1605     * This is part of an evil trick to deal with non-x86 platforms.
     1606     * The RealTek chip requires RX buffers to be aligned on 64-bit
     1607     * boundaries, but that will hose non-x86 machines. To get around
     1608     * this, we leave some empty space at the start of each buffer
     1609     * and for non-x86 hosts, we copy the buffer back six bytes
     1610     * to achieve word alignment. This is slightly more efficient
     1611     * than allocating a new buffer, copying the contents, and
     1612     * discarding the old buffer.
     1613     */
     1614    m_adj(m, RE_ETHER_ALIGN);
     1615#endif
     1616    error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
     1617        sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
     1618    if (error != 0) {
     1619        m_freem(m);
     1620        return (ENOBUFS);
     1621    }
     1622    KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
     1623
     1624    rxd = &sc->rl_ldata.rl_rx_desc[idx];
     1625    if (rxd->rx_m != NULL) {
     1626        bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
     1627            BUS_DMASYNC_POSTREAD);
     1628        bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
     1629    }
     1630
     1631    rxd->rx_m = m;
     1632    map = rxd->rx_dmamap;
     1633    rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
     1634    rxd->rx_size = segs[0].ds_len;
     1635    sc->rl_ldata.rl_rx_sparemap = map;
     1636    bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
     1637        BUS_DMASYNC_PREREAD);
     1638
     1639    desc = &sc->rl_ldata.rl_rx_list[idx];
     1640    desc->rl_vlanctl = 0;
     1641    desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
     1642    desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
     1643    cmdstat = segs[0].ds_len;
     1644    if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
     1645        cmdstat |= RL_RDESC_CMD_EOR;
     1646    desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
     1647
     1648    return (0);
     1649}
     1650
     1651#ifdef RE_FIXUP_RX
     1652static __inline void
     1653re_fixup_rx(struct mbuf *m)
     1654{
     1655    int                     i;
     1656    uint16_t                *src, *dst;
     1657
     1658    src = mtod(m, uint16_t *);
     1659    dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
     1660
     1661    for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
     1662        *dst++ = *src++;
     1663
     1664    m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
     1665}
     1666#endif
     1667
     1668static int
     1669re_tx_list_init(struct rl_softc *sc)
     1670{
     1671    struct rl_desc      *desc;
     1672    int         i;
     1673
     1674    RL_LOCK_ASSERT(sc);
     1675
     1676    bzero(sc->rl_ldata.rl_tx_list,
     1677        sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
     1678    for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
     1679        sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
     1680    /* Set EOR. */
     1681    desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
     1682    desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
     1683
     1684    bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
     1685        sc->rl_ldata.rl_tx_list_map,
     1686        BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
     1687
     1688    sc->rl_ldata.rl_tx_prodidx = 0;
     1689    sc->rl_ldata.rl_tx_considx = 0;
     1690    sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
     1691
     1692    return (0);
     1693}
     1694
     1695static int
     1696re_rx_list_init(struct rl_softc *sc)
     1697{
     1698    int         error, i;
     1699
     1700    bzero(sc->rl_ldata.rl_rx_list,
     1701        sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
     1702    for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
     1703        sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
     1704        if ((error = re_newbuf(sc, i)) != 0)
     1705            return (error);
     1706    }
     1707
     1708    /* Flush the RX descriptors */
     1709
     1710    bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
     1711        sc->rl_ldata.rl_rx_list_map,
     1712        BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
     1713
     1714    sc->rl_ldata.rl_rx_prodidx = 0;
     1715    sc->rl_head = sc->rl_tail = NULL;
     1716
     1717    return (0);
     1718}
     1719
     1720/*
     1721 * RX handler for C+ and 8169. For the gigE chips, we support
     1722 * the reception of jumbo frames that have been fragmented
     1723 * across multiple 2K mbuf cluster buffers.
     1724 */
     1725static int
     1726re_rxeof(struct rl_softc *sc)
     1727{
     1728    struct mbuf     *m;
     1729    struct ifnet        *ifp;
     1730    int         i, total_len;
     1731    struct rl_desc      *cur_rx;
     1732    u_int32_t       rxstat, rxvlan;
     1733    int         maxpkt = 16;
     1734
     1735    RL_LOCK_ASSERT(sc);
     1736
     1737    ifp = sc->rl_ifp;
     1738
     1739    /* Invalidate the descriptor memory */
     1740
     1741    bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
     1742        sc->rl_ldata.rl_rx_list_map,
     1743        BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
     1744
     1745    for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
     1746        i = RL_RX_DESC_NXT(sc, i)) {
     1747        cur_rx = &sc->rl_ldata.rl_rx_list[i];
     1748        rxstat = le32toh(cur_rx->rl_cmdstat);
     1749        if ((rxstat & RL_RDESC_STAT_OWN) != 0)
     1750            break;
     1751        total_len = rxstat & sc->rl_rxlenmask;
     1752        rxvlan = le32toh(cur_rx->rl_vlanctl);
     1753        m = sc->rl_ldata.rl_rx_desc[i].rx_m;
     1754
     1755        if (!(rxstat & RL_RDESC_STAT_EOF)) {
     1756            if (re_newbuf(sc, i) != 0) {
     1757                /*
     1758                 * If this is part of a multi-fragment packet,
     1759                 * discard all the pieces.
     1760                 */
     1761                if (sc->rl_head != NULL) {
     1762                    m_freem(sc->rl_head);
     1763                    sc->rl_head = sc->rl_tail = NULL;
     1764                }
     1765                re_discard_rxbuf(sc, i);
     1766                continue;
     1767            }
     1768            m->m_len = RE_RX_DESC_BUFLEN;
     1769            if (sc->rl_head == NULL)
     1770                sc->rl_head = sc->rl_tail = m;
     1771            else {
     1772                m->m_flags &= ~M_PKTHDR;
     1773                sc->rl_tail->m_next = m;
     1774                sc->rl_tail = m;
     1775            }
     1776            continue;
     1777        }
     1778
     1779        /*
     1780         * NOTE: for the 8139C+, the frame length field
     1781         * is always 12 bits in size, but for the gigE chips,
     1782         * it is 13 bits (since the max RX frame length is 16K).
     1783         * Unfortunately, all 32 bits in the status word
     1784         * were already used, so to make room for the extra
     1785         * length bit, RealTek took out the 'frame alignment
     1786         * error' bit and shifted the other status bits
     1787         * over one slot. The OWN, EOR, FS and LS bits are
     1788         * still in the same places. We have already extracted
     1789         * the frame length and checked the OWN bit, so rather
     1790         * than using an alternate bit mapping, we shift the
     1791         * status bits one space to the right so we can evaluate
     1792         * them using the 8169 status as though it was in the
     1793         * same format as that of the 8139C+.
     1794         */
     1795        if (sc->rl_type == RL_8169)
     1796            rxstat >>= 1;
     1797
     1798        /*
     1799         * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
     1800         * set, but if CRC is clear, it will still be a valid frame.
     1801         */
     1802        if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 &&
     1803            (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) {
     1804            ifp->if_ierrors++;
     1805            /*
     1806             * If this is part of a multi-fragment packet,
     1807             * discard all the pieces.
     1808             */
     1809            if (sc->rl_head != NULL) {
     1810                m_freem(sc->rl_head);
     1811                sc->rl_head = sc->rl_tail = NULL;
     1812            }
     1813            re_discard_rxbuf(sc, i);
     1814            continue;
     1815        }
     1816
     1817        /*
     1818         * If allocating a replacement mbuf fails,
     1819         * reload the current one.
     1820         */
     1821
     1822        if (re_newbuf(sc, i) != 0) {
     1823            ifp->if_iqdrops++;
     1824            if (sc->rl_head != NULL) {
     1825                m_freem(sc->rl_head);
     1826                sc->rl_head = sc->rl_tail = NULL;
     1827            }
     1828            re_discard_rxbuf(sc, i);
     1829            continue;
     1830        }
     1831
     1832        if (sc->rl_head != NULL) {
     1833            m->m_len = total_len % RE_RX_DESC_BUFLEN;
     1834            if (m->m_len == 0)
     1835                m->m_len = RE_RX_DESC_BUFLEN;
     1836            /*
     1837             * Special case: if there's 4 bytes or less
     1838             * in this buffer, the mbuf can be discarded:
     1839             * the last 4 bytes is the CRC, which we don't
     1840             * care about anyway.
     1841             */
     1842            if (m->m_len <= ETHER_CRC_LEN) {
     1843                sc->rl_tail->m_len -=
     1844                    (ETHER_CRC_LEN - m->m_len);
     1845                m_freem(m);
     1846            } else {
     1847                m->m_len -= ETHER_CRC_LEN;
     1848                m->m_flags &= ~M_PKTHDR;
     1849                sc->rl_tail->m_next = m;
     1850            }
     1851            m = sc->rl_head;
     1852            sc->rl_head = sc->rl_tail = NULL;
     1853            m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
     1854        } else
     1855            m->m_pkthdr.len = m->m_len =
     1856                (total_len - ETHER_CRC_LEN);
     1857
     1858#ifdef RE_FIXUP_RX
     1859        re_fixup_rx(m);
     1860#endif
     1861        ifp->if_ipackets++;
     1862        m->m_pkthdr.rcvif = ifp;
     1863
     1864        /* Do RX checksumming if enabled */
     1865
     1866        if (ifp->if_capenable & IFCAP_RXCSUM) {
     1867            if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
     1868                /* Check IP header checksum */
     1869                if (rxstat & RL_RDESC_STAT_PROTOID)
     1870                    m->m_pkthdr.csum_flags |=
     1871                        CSUM_IP_CHECKED;
     1872                if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
     1873                    m->m_pkthdr.csum_flags |=
     1874                        CSUM_IP_VALID;
     1875
     1876                /* Check TCP/UDP checksum */
     1877                if ((RL_TCPPKT(rxstat) &&
     1878                    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
     1879                    (RL_UDPPKT(rxstat) &&
     1880                     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
     1881                    m->m_pkthdr.csum_flags |=
     1882                        CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
     1883                    m->m_pkthdr.csum_data = 0xffff;
     1884                }
     1885            } else {
     1886                /*
     1887                 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
     1888                 */
     1889                if ((rxstat & RL_RDESC_STAT_PROTOID) &&
     1890                    (rxvlan & RL_RDESC_IPV4))
     1891                    m->m_pkthdr.csum_flags |=
     1892                        CSUM_IP_CHECKED;
     1893                if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
     1894                    (rxvlan & RL_RDESC_IPV4))
     1895                    m->m_pkthdr.csum_flags |=
     1896                        CSUM_IP_VALID;
     1897                if (((rxstat & RL_RDESC_STAT_TCP) &&
     1898                    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
     1899                    ((rxstat & RL_RDESC_STAT_UDP) &&
     1900                    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
     1901                    m->m_pkthdr.csum_flags |=
     1902                        CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
     1903                    m->m_pkthdr.csum_data = 0xffff;
     1904                }
     1905            }
     1906        }
     1907        maxpkt--;
     1908        if (rxvlan & RL_RDESC_VLANCTL_TAG) {
     1909            m->m_pkthdr.ether_vtag =
     1910                bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
     1911            m->m_flags |= M_VLANTAG;
     1912        }
     1913        RL_UNLOCK(sc);
     1914        (*ifp->if_input)(ifp, m);
     1915        RL_LOCK(sc);
     1916    }
     1917
     1918    /* Flush the RX DMA ring */
     1919
     1920    bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
     1921        sc->rl_ldata.rl_rx_list_map,
     1922        BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
     1923
     1924    sc->rl_ldata.rl_rx_prodidx = i;
     1925
     1926    if (maxpkt)
     1927        return(EAGAIN);
     1928
     1929    return(0);
     1930}
     1931
     1932static void
     1933re_txeof(struct rl_softc *sc)
     1934{
     1935    struct ifnet        *ifp;
     1936    struct rl_txdesc    *txd;
     1937    u_int32_t       txstat;
     1938    int         cons;
     1939
     1940    cons = sc->rl_ldata.rl_tx_considx;
     1941    if (cons == sc->rl_ldata.rl_tx_prodidx)
     1942        return;
     1943
     1944    ifp = sc->rl_ifp;
     1945    /* Invalidate the TX descriptor list */
     1946    bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
     1947        sc->rl_ldata.rl_tx_list_map,
     1948        BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
     1949
     1950    for (; cons != sc->rl_ldata.rl_tx_prodidx;
     1951        cons = RL_TX_DESC_NXT(sc, cons)) {
     1952        txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
     1953        if (txstat & RL_TDESC_STAT_OWN)
     1954            break;
     1955        /*
     1956         * We only stash mbufs in the last descriptor
     1957         * in a fragment chain, which also happens to
     1958         * be the only place where the TX status bits
     1959         * are valid.
     1960         */
     1961        if (txstat & RL_TDESC_CMD_EOF) {
     1962            txd = &sc->rl_ldata.rl_tx_desc[cons];
     1963            bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
     1964                txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
     1965            bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
     1966                txd->tx_dmamap);
     1967            KASSERT(txd->tx_m != NULL,
     1968                ("%s: freeing NULL mbufs!", __func__));
     1969            m_freem(txd->tx_m);
     1970            txd->tx_m = NULL;
     1971            if (txstat & (RL_TDESC_STAT_EXCESSCOL|
     1972                RL_TDESC_STAT_COLCNT))
     1973                ifp->if_collisions++;
     1974            if (txstat & RL_TDESC_STAT_TXERRSUM)
     1975                ifp->if_oerrors++;
     1976            else
     1977                ifp->if_opackets++;
     1978        }
     1979        sc->rl_ldata.rl_tx_free++;
     1980        ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
     1981    }
     1982    sc->rl_ldata.rl_tx_considx = cons;
     1983
     1984    /* No changes made to the TX ring, so no flush needed */
     1985
     1986    if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
     1987        /*
     1988         * Some chips will ignore a second TX request issued
     1989         * while an existing transmission is in progress. If
     1990         * the transmitter goes idle but there are still
     1991         * packets waiting to be sent, we need to restart the
     1992         * channel here to flush them out. This only seems to
     1993         * be required with the PCIe devices.
     1994         */
     1995        CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
     1996
     1997#ifdef RE_TX_MODERATION
     1998        /*
     1999         * If not all descriptors have been reaped yet, reload
     2000         * the timer so that we will eventually get another
     2001         * interrupt that will cause us to re-enter this routine.
     2002         * This is done in case the transmitter has gone idle.
     2003         */
     2004        CSR_WRITE_4(sc, RL_TIMERCNT, 1);
     2005#endif
     2006    } else
     2007        sc->rl_watchdog_timer = 0;
     2008}
     2009
     2010static void
     2011re_tick(void *xsc)
     2012{
     2013    struct rl_softc     *sc;
     2014    struct mii_data     *mii;
     2015    struct ifnet        *ifp;
     2016
     2017    sc = xsc;
     2018    ifp = sc->rl_ifp;
     2019
     2020    RL_LOCK_ASSERT(sc);
     2021
     2022    re_watchdog(sc);
     2023
     2024    mii = device_get_softc(sc->rl_miibus);
     2025    mii_tick(mii);
     2026    if ((sc->rl_flags & RL_FLAG_LINK) != 0) {
     2027        if (!(mii->mii_media_status & IFM_ACTIVE))
     2028            sc->rl_flags &= ~RL_FLAG_LINK;
     2029    } else {
     2030        if (mii->mii_media_status & IFM_ACTIVE &&
     2031            IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
     2032            sc->rl_flags |= RL_FLAG_LINK;
     2033            if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
     2034                taskqueue_enqueue_fast(taskqueue_fast,
     2035                    &sc->rl_txtask);
     2036        }
     2037    }
     2038
     2039    callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
     2040}
     2041
     2042#ifdef DEVICE_POLLING
     2043static void
     2044re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
     2045{
     2046    struct rl_softc *sc = ifp->if_softc;
     2047
     2048    RL_LOCK(sc);
     2049    if (ifp->if_drv_flags & IFF_DRV_RUNNING)
     2050        re_poll_locked(ifp, cmd, count);
     2051    RL_UNLOCK(sc);
     2052}
     2053
     2054static void
     2055re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
     2056{
     2057    struct rl_softc *sc = ifp->if_softc;
     2058
     2059    RL_LOCK_ASSERT(sc);
     2060
     2061    sc->rxcycles = count;
     2062    re_rxeof(sc);
     2063    re_txeof(sc);
     2064
     2065    if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
     2066        taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask);
     2067
     2068    if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
     2069        u_int16_t       status;
     2070
     2071        status = CSR_READ_2(sc, RL_ISR);
     2072        if (status == 0xffff)
     2073            return;
     2074        if (status)
     2075            CSR_WRITE_2(sc, RL_ISR, status);
     2076
     2077        /*
     2078         * XXX check behaviour on receiver stalls.
     2079         */
     2080
     2081        if (status & RL_ISR_SYSTEM_ERR) {
     2082            re_reset(sc);
     2083            re_init_locked(sc);
     2084        }
     2085    }
     2086}
     2087#endif /* DEVICE_POLLING */
     2088
     2089static int
     2090re_intr(void *arg)
     2091{
     2092    struct rl_softc     *sc;
     2093    uint16_t        status;
     2094
     2095    sc = arg;
     2096
     2097    status = CSR_READ_2(sc, RL_ISR);
     2098    if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
     2099                return (FILTER_STRAY);
     2100    CSR_WRITE_2(sc, RL_IMR, 0);
     2101
     2102    taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
     2103
     2104    return (FILTER_HANDLED);
     2105}
     2106
     2107static void
     2108re_int_task(void *arg, int npending)
     2109{
     2110    struct rl_softc     *sc;
     2111    struct ifnet        *ifp;
     2112    u_int16_t       status;
     2113    int         rval = 0;
     2114
     2115    sc = arg;
     2116    ifp = sc->rl_ifp;
     2117
     2118    RL_LOCK(sc);
     2119
     2120    status = CSR_READ_2(sc, RL_ISR);
     2121        CSR_WRITE_2(sc, RL_ISR, status);
     2122
     2123    if (sc->suspended ||
     2124        (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
     2125        RL_UNLOCK(sc);
     2126        return;
     2127    }
     2128
     2129#ifdef DEVICE_POLLING
     2130    if  (ifp->if_capenable & IFCAP_POLLING) {
     2131        RL_UNLOCK(sc);
     2132        return;
     2133    }
     2134#endif
     2135
     2136    if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
     2137        rval = re_rxeof(sc);
     2138
     2139    if (status & (
     2140#ifdef RE_TX_MODERATION
     2141        RL_ISR_TIMEOUT_EXPIRED|
     2142#else
     2143        RL_ISR_TX_OK|
     2144#endif
     2145        RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
     2146        re_txeof(sc);
     2147
     2148    if (status & RL_ISR_SYSTEM_ERR) {
     2149        re_reset(sc);
     2150        re_init_locked(sc);
     2151    }
     2152
     2153    if (status & RL_ISR_LINKCHG) {
     2154        callout_stop(&sc->rl_stat_callout);
     2155        re_tick(sc);
     2156    }
     2157
     2158    if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
     2159        taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask);
     2160
     2161    RL_UNLOCK(sc);
     2162
     2163        if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
     2164        taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
     2165        return;
     2166    }
     2167
     2168    CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
     2169}
     2170
     2171/*
     2172 * It's copy of ath_defrag(ath(4)).
     2173 *
     2174 * Defragment an mbuf chain, returning at most maxfrags separate
     2175 * mbufs+clusters.  If this is not possible NULL is returned and
     2176 * the original mbuf chain is left in it's present (potentially
     2177 * modified) state.  We use two techniques: collapsing consecutive
     2178 * mbufs and replacing consecutive mbufs by a cluster.
     2179 */
     2180static struct mbuf *
     2181re_defrag(m0, how, maxfrags)
     2182    struct mbuf *m0;
     2183    int how;
     2184    int maxfrags;
     2185{
     2186    struct mbuf *m, *n, *n2, **prev;
     2187    u_int curfrags;
     2188
     2189    /*
     2190     * Calculate the current number of frags.
     2191     */
     2192    curfrags = 0;
     2193    for (m = m0; m != NULL; m = m->m_next)
     2194        curfrags++;
     2195    /*
     2196     * First, try to collapse mbufs.  Note that we always collapse
     2197     * towards the front so we don't need to deal with moving the
     2198     * pkthdr.  This may be suboptimal if the first mbuf has much
     2199     * less data than the following.
     2200     */
     2201    m = m0;
     2202again:
     2203    for (;;) {
     2204        n = m->m_next;
     2205        if (n == NULL)
     2206            break;
     2207        if ((m->m_flags & M_RDONLY) == 0 &&
     2208            n->m_len < M_TRAILINGSPACE(m)) {
     2209            bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
     2210                n->m_len);
     2211            m->m_len += n->m_len;
     2212            m->m_next = n->m_next;
     2213            m_free(n);
     2214            if (--curfrags <= maxfrags)
     2215                return (m0);
     2216        } else
     2217            m = n;
     2218    }
     2219    KASSERT(maxfrags > 1,
     2220        ("maxfrags %u, but normal collapse failed", maxfrags));
     2221    /*
     2222     * Collapse consecutive mbufs to a cluster.
     2223     */
     2224    prev = &m0->m_next;     /* NB: not the first mbuf */
     2225    while ((n = *prev) != NULL) {
     2226        if ((n2 = n->m_next) != NULL &&
     2227            n->m_len + n2->m_len < MCLBYTES) {
     2228            m = m_getcl(how, MT_DATA, 0);
     2229            if (m == NULL)
     2230                goto bad;
     2231            bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
     2232            bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
     2233                n2->m_len);
     2234            m->m_len = n->m_len + n2->m_len;
     2235            m->m_next = n2->m_next;
     2236            *prev = m;
     2237            m_free(n);
     2238            m_free(n2);
     2239            if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
     2240                return m0;
     2241            /*
     2242             * Still not there, try the normal collapse
     2243             * again before we allocate another cluster.
     2244             */
     2245            goto again;
     2246        }
     2247        prev = &n->m_next;
     2248    }
     2249    /*
     2250     * No place where we can collapse to a cluster; punt.
     2251     * This can occur if, for example, you request 2 frags
     2252     * but the packet requires that both be clusters (we
     2253     * never reallocate the first mbuf to avoid moving the
     2254     * packet header).
     2255     */
     2256bad:
     2257    return (NULL);
     2258}
     2259
     2260
     2261
     2262static int
     2263re_encap(struct rl_softc *sc, struct mbuf **m_head)
     2264{
     2265    struct rl_txdesc    *txd, *txd_last;
     2266    bus_dma_segment_t   segs[RL_NTXSEGS];
     2267    bus_dmamap_t        map;
     2268    struct mbuf     *m_new;
     2269    struct rl_desc      *desc;
     2270    int         nsegs, prod;
     2271    int         i, error, ei, si;
     2272    int         padlen;
     2273    uint32_t        cmdstat, csum_flags, vlanctl;
     2274
     2275    RL_LOCK_ASSERT(sc);
     2276    M_ASSERTPKTHDR((*m_head));
     2277
     2278    /*
     2279     * With some of the RealTek chips, using the checksum offload
     2280     * support in conjunction with the autopadding feature results
     2281     * in the transmission of corrupt frames. For example, if we
     2282     * need to send a really small IP fragment that's less than 60
     2283     * bytes in size, and IP header checksumming is enabled, the
     2284     * resulting ethernet frame that appears on the wire will
     2285     * have garbled payload. To work around this, if TX IP checksum
     2286     * offload is enabled, we always manually pad short frames out
     2287     * to the minimum ethernet frame size.
     2288     */
     2289    if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 &&
     2290        (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
     2291        ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
     2292        padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
     2293        if (M_WRITABLE(*m_head) == 0) {
     2294            /* Get a writable copy. */
     2295            m_new = m_dup(*m_head, M_DONTWAIT);
     2296            m_freem(*m_head);
     2297            if (m_new == NULL) {
     2298                *m_head = NULL;
     2299                return (ENOBUFS);
     2300            }
     2301            *m_head = m_new;
     2302        }
     2303        if ((*m_head)->m_next != NULL ||
     2304            M_TRAILINGSPACE(*m_head) < padlen) {
     2305            m_new = m_defrag(*m_head, M_DONTWAIT);
     2306            if (m_new == NULL) {
     2307                m_freem(*m_head);
     2308                *m_head = NULL;
     2309                return (ENOBUFS);
     2310            }
     2311        } else
     2312            m_new = *m_head;
     2313
     2314        /*
     2315         * Manually pad short frames, and zero the pad space
     2316         * to avoid leaking data.
     2317         */
     2318        bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
     2319        m_new->m_pkthdr.len += padlen;
     2320        m_new->m_len = m_new->m_pkthdr.len;
     2321        *m_head = m_new;
     2322    }
     2323
     2324    prod = sc->rl_ldata.rl_tx_prodidx;
     2325    txd = &sc->rl_ldata.rl_tx_desc[prod];
     2326    error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
     2327        *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
     2328    if (error == EFBIG) {
     2329        m_new = re_defrag(*m_head, M_DONTWAIT, RL_NTXSEGS);
     2330        if (m_new == NULL) {
     2331            m_freem(*m_head);
     2332            *m_head = NULL;
     2333            return (ENOBUFS);
     2334        }
     2335        *m_head = m_new;
     2336        error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
     2337            txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
     2338        if (error != 0) {
     2339            m_freem(*m_head);
     2340            *m_head = NULL;
     2341            return (error);
     2342        }
     2343    } else if (error != 0)
     2344        return (error);
     2345    if (nsegs == 0) {
     2346        m_freem(*m_head);
     2347        *m_head = NULL;
     2348        return (EIO);
     2349    }
     2350
     2351    /* Check for number of available descriptors. */
     2352    if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
     2353        bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
     2354        return (ENOBUFS);
     2355    }
     2356
     2357    bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
     2358        BUS_DMASYNC_PREWRITE);
     2359
     2360    /*
     2361     * Set up checksum offload. Note: checksum offload bits must
     2362     * appear in all descriptors of a multi-descriptor transmit
     2363     * attempt. This is according to testing done with an 8169
     2364     * chip. This is a requirement.
     2365     */
     2366    vlanctl = 0;
     2367    csum_flags = 0;
     2368    if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0)
     2369        csum_flags = RL_TDESC_CMD_LGSEND |
     2370            ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
     2371            RL_TDESC_CMD_MSSVAL_SHIFT);
     2372    else {
     2373        /*
     2374         * Unconditionally enable IP checksum if TCP or UDP
     2375         * checksum is required. Otherwise, TCP/UDP checksum
     2376         * does't make effects.
     2377         */
     2378        if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
     2379            if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
     2380                csum_flags |= RL_TDESC_CMD_IPCSUM;
     2381                if (((*m_head)->m_pkthdr.csum_flags &
     2382                    CSUM_TCP) != 0)
     2383                    csum_flags |= RL_TDESC_CMD_TCPCSUM;
     2384                if (((*m_head)->m_pkthdr.csum_flags &
     2385                    CSUM_UDP) != 0)
     2386                    csum_flags |= RL_TDESC_CMD_UDPCSUM;
     2387            } else {
     2388                vlanctl |= RL_TDESC_CMD_IPCSUMV2;
     2389                if (((*m_head)->m_pkthdr.csum_flags &
     2390                    CSUM_TCP) != 0)
     2391                    vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
     2392                if (((*m_head)->m_pkthdr.csum_flags &
     2393                    CSUM_UDP) != 0)
     2394                    vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
     2395            }
     2396        }
     2397    }
     2398
     2399    /*
     2400     * Set up hardware VLAN tagging. Note: vlan tag info must
     2401     * appear in all descriptors of a multi-descriptor
     2402     * transmission attempt.
     2403     */
     2404    if ((*m_head)->m_flags & M_VLANTAG)
     2405        vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
     2406            RL_TDESC_VLANCTL_TAG;
     2407
     2408    si = prod;
     2409    for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
     2410        desc = &sc->rl_ldata.rl_tx_list[prod];
     2411        desc->rl_vlanctl = htole32(vlanctl);
     2412        desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
     2413        desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
     2414        cmdstat = segs[i].ds_len;
     2415        if (i != 0)
     2416            cmdstat |= RL_TDESC_CMD_OWN;
     2417        if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
     2418            cmdstat |= RL_TDESC_CMD_EOR;
     2419        desc->rl_cmdstat = htole32(cmdstat | csum_flags);
     2420        sc->rl_ldata.rl_tx_free--;
     2421    }
     2422    /* Update producer index. */
     2423    sc->rl_ldata.rl_tx_prodidx = prod;
     2424
     2425    /* Set EOF on the last descriptor. */
     2426    ei = RL_TX_DESC_PRV(sc, prod);
     2427    desc = &sc->rl_ldata.rl_tx_list[ei];
     2428    desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
     2429
     2430    desc = &sc->rl_ldata.rl_tx_list[si];
     2431    /* Set SOF and transfer ownership of packet to the chip. */
     2432    desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
     2433
     2434    /*
     2435     * Insure that the map for this transmission
     2436     * is placed at the array index of the last descriptor
     2437     * in this chain.  (Swap last and first dmamaps.)
     2438     */
     2439    txd_last = &sc->rl_ldata.rl_tx_desc[ei];
     2440    map = txd->tx_dmamap;
     2441    txd->tx_dmamap = txd_last->tx_dmamap;
     2442    txd_last->tx_dmamap = map;
     2443    txd_last->tx_m = *m_head;
     2444
     2445    return (0);
     2446}
     2447
     2448static void
     2449re_tx_task(void *arg, int npending)
     2450{
     2451    struct ifnet        *ifp;
     2452
     2453    ifp = arg;
     2454    re_start(ifp);
     2455}
     2456
     2457/*
     2458 * Main transmit routine for C+ and gigE NICs.
     2459 */
     2460static void
     2461re_start(struct ifnet *ifp)
     2462{
     2463    struct rl_softc     *sc;
     2464    struct mbuf     *m_head;
     2465    int         queued;
     2466
     2467    sc = ifp->if_softc;
     2468
     2469    RL_LOCK(sc);
     2470
     2471    if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
     2472        IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) {
     2473        RL_UNLOCK(sc);
     2474        return;
     2475    }
     2476
     2477    for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
     2478        sc->rl_ldata.rl_tx_free > 1;) {
     2479        IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
     2480        if (m_head == NULL)
     2481            break;
     2482
     2483        if (re_encap(sc, &m_head) != 0) {
     2484            if (m_head == NULL)
     2485                break;
     2486            IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
     2487            ifp->if_drv_flags |= IFF_DRV_OACTIVE;
     2488            break;
     2489        }
     2490
     2491        /*
     2492         * If there's a BPF listener, bounce a copy of this frame
     2493         * to him.
     2494         */
     2495        ETHER_BPF_MTAP(ifp, m_head);
     2496
     2497        queued++;
     2498    }
     2499
     2500    if (queued == 0) {
     2501#ifdef RE_TX_MODERATION
     2502        if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
     2503            CSR_WRITE_4(sc, RL_TIMERCNT, 1);
     2504#endif
     2505        RL_UNLOCK(sc);
     2506        return;
     2507    }
     2508
     2509    /* Flush the TX descriptors */
     2510
     2511    bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
     2512        sc->rl_ldata.rl_tx_list_map,
     2513        BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
     2514
     2515    CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
     2516
     2517#ifdef RE_TX_MODERATION
     2518    /*
     2519     * Use the countdown timer for interrupt moderation.
     2520     * 'TX done' interrupts are disabled. Instead, we reset the
     2521     * countdown timer, which will begin counting until it hits
     2522     * the value in the TIMERINT register, and then trigger an
     2523     * interrupt. Each time we write to the TIMERCNT register,
     2524     * the timer count is reset to 0.
     2525     */
     2526    CSR_WRITE_4(sc, RL_TIMERCNT, 1);
     2527#endif
     2528
     2529    /*
     2530     * Set a timeout in case the chip goes out to lunch.
     2531     */
     2532    sc->rl_watchdog_timer = 5;
     2533
     2534    RL_UNLOCK(sc);
     2535}
     2536
     2537static void
     2538re_init(void *xsc)
     2539{
     2540    struct rl_softc     *sc = xsc;
     2541
     2542    RL_LOCK(sc);
     2543    re_init_locked(sc);
     2544    RL_UNLOCK(sc);
     2545}
     2546
     2547static void
     2548re_init_locked(struct rl_softc *sc)
     2549{
     2550    struct ifnet        *ifp = sc->rl_ifp;
     2551    struct mii_data     *mii;
     2552    u_int32_t       rxcfg = 0;
     2553    uint16_t        cfg;
     2554    union {
     2555        uint32_t align_dummy;
     2556        u_char eaddr[ETHER_ADDR_LEN];
     2557        } eaddr;
     2558
     2559    RL_LOCK_ASSERT(sc);
     2560
     2561    mii = device_get_softc(sc->rl_miibus);
     2562
     2563    /*
     2564     * Cancel pending I/O and free all RX/TX buffers.
     2565     */
     2566    re_stop(sc);
     2567
     2568    /*
     2569     * Enable C+ RX and TX mode, as well as VLAN stripping and
     2570     * RX checksum offload. We must configure the C+ register
     2571     * before all others.
     2572     */
     2573    cfg = RL_CPLUSCMD_PCI_MRW;
     2574    if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
     2575        cfg |= RL_CPLUSCMD_RXCSUM_ENB;
     2576    if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
     2577        cfg |= RL_CPLUSCMD_VLANSTRIP;
     2578    if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
     2579        cfg |= RL_CPLUSCMD_MACSTAT_DIS;
     2580        /* XXX magic. */
     2581        cfg |= 0x0001;
     2582    } else
     2583        cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
     2584    CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
     2585    /*
     2586     * Disable TSO if interface MTU size is greater than MSS
     2587     * allowed in controller.
     2588     */
     2589    if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
     2590        ifp->if_capenable &= ~IFCAP_TSO4;
     2591        ifp->if_hwassist &= ~CSUM_TSO;
     2592    }
     2593
     2594    /*
     2595     * Init our MAC address.  Even though the chipset
     2596     * documentation doesn't mention it, we need to enter "Config
     2597     * register write enable" mode to modify the ID registers.
     2598     */
     2599    /* Copy MAC address on stack to align. */
     2600    bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
     2601    CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
     2602    CSR_WRITE_4(sc, RL_IDR0,
     2603        htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
     2604    CSR_WRITE_4(sc, RL_IDR4,
     2605        htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
     2606    CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     2607
     2608    /*
     2609     * For C+ mode, initialize the RX descriptors and mbufs.
     2610     */
     2611    re_rx_list_init(sc);
     2612    re_tx_list_init(sc);
     2613
     2614    /*
     2615     * Load the addresses of the RX and TX lists into the chip.
     2616     */
     2617
     2618    CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
     2619        RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
     2620    CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
     2621        RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
     2622
     2623    CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
     2624        RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
     2625    CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
     2626        RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
     2627
     2628    /*
     2629     * Enable transmit and receive.
     2630     */
     2631    CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
     2632
     2633    /*
     2634     * Set the initial TX and RX configuration.
     2635     */
     2636    if (sc->rl_testmode) {
     2637        if (sc->rl_type == RL_8169)
     2638            CSR_WRITE_4(sc, RL_TXCFG,
     2639                RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
     2640        else
     2641            CSR_WRITE_4(sc, RL_TXCFG,
     2642                RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
     2643    } else
     2644        CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
     2645
     2646    CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
     2647
     2648    CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
     2649
     2650    /* Set the individual bit to receive frames for this host only. */
     2651    rxcfg = CSR_READ_4(sc, RL_RXCFG);
     2652    rxcfg |= RL_RXCFG_RX_INDIV;
     2653
     2654    /* If we want promiscuous mode, set the allframes bit. */
     2655    if (ifp->if_flags & IFF_PROMISC)
     2656        rxcfg |= RL_RXCFG_RX_ALLPHYS;
     2657    else
     2658        rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
     2659    CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
     2660
     2661    /*
     2662     * Set capture broadcast bit to capture broadcast frames.
     2663     */
     2664    if (ifp->if_flags & IFF_BROADCAST)
     2665        rxcfg |= RL_RXCFG_RX_BROAD;
     2666    else
     2667        rxcfg &= ~RL_RXCFG_RX_BROAD;
     2668    CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
     2669
     2670    /*
     2671     * Program the multicast filter, if necessary.
     2672     */
     2673    re_setmulti(sc);
     2674
     2675#ifdef DEVICE_POLLING
     2676    /*
     2677     * Disable interrupts if we are polling.
     2678     */
     2679    if (ifp->if_capenable & IFCAP_POLLING)
     2680        CSR_WRITE_2(sc, RL_IMR, 0);
     2681    else    /* otherwise ... */
     2682#endif
     2683
     2684    /*
     2685     * Enable interrupts.
     2686     */
     2687    if (sc->rl_testmode)
     2688        CSR_WRITE_2(sc, RL_IMR, 0);
     2689    else
     2690        CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
     2691    CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
     2692
     2693    /* Set initial TX threshold */
     2694    sc->rl_txthresh = RL_TX_THRESH_INIT;
     2695
     2696    /* Start RX/TX process. */
     2697    CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
     2698#ifdef notdef
     2699    /* Enable receiver and transmitter. */
     2700    CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
     2701#endif
     2702
     2703#ifdef RE_TX_MODERATION
     2704    /*
     2705     * Initialize the timer interrupt register so that
     2706     * a timer interrupt will be generated once the timer
     2707     * reaches a certain number of ticks. The timer is
     2708     * reloaded on each transmit. This gives us TX interrupt
     2709     * moderation, which dramatically improves TX frame rate.
     2710     */
     2711    if (sc->rl_type == RL_8169)
     2712        CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
     2713    else
     2714        CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
     2715#endif
     2716
     2717    /*
     2718     * For 8169 gigE NICs, set the max allowed RX packet
     2719     * size so we can receive jumbo frames.
     2720     */
     2721    if (sc->rl_type == RL_8169)
     2722        CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
     2723
     2724    if (sc->rl_testmode)
     2725        return;
     2726
     2727    mii_mediachg(mii);
     2728
     2729    CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD);
     2730
     2731    ifp->if_drv_flags |= IFF_DRV_RUNNING;
     2732    ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
     2733
     2734    sc->rl_flags &= ~RL_FLAG_LINK;
     2735    sc->rl_watchdog_timer = 0;
     2736    callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
     2737}
     2738
     2739/*
     2740 * Set media options.
     2741 */
     2742static int
     2743re_ifmedia_upd(struct ifnet *ifp)
     2744{
     2745    struct rl_softc     *sc;
     2746    struct mii_data     *mii;
     2747
     2748    sc = ifp->if_softc;
     2749    mii = device_get_softc(sc->rl_miibus);
     2750    RL_LOCK(sc);
     2751    mii_mediachg(mii);
     2752    RL_UNLOCK(sc);
     2753
     2754    return (0);
     2755}
     2756
     2757/*
     2758 * Report current media status.
     2759 */
     2760static void
     2761re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
     2762{
     2763    struct rl_softc     *sc;
     2764    struct mii_data     *mii;
     2765
     2766    sc = ifp->if_softc;
     2767    mii = device_get_softc(sc->rl_miibus);
     2768
     2769    RL_LOCK(sc);
     2770    mii_pollstat(mii);
     2771    RL_UNLOCK(sc);
     2772    ifmr->ifm_active = mii->mii_media_active;
     2773    ifmr->ifm_status = mii->mii_media_status;
     2774}
     2775
     2776static int
     2777re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
     2778{
     2779    struct rl_softc     *sc = ifp->if_softc;
     2780    struct ifreq        *ifr = (struct ifreq *) data;
     2781    struct mii_data     *mii;
     2782    int         error = 0;
     2783
     2784    switch (command) {
     2785    case SIOCSIFMTU:
     2786        if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RL_JUMBO_MTU) {
     2787            error = EINVAL;
     2788            break;
     2789        }
     2790        if ((sc->rl_flags & RL_FLAG_NOJUMBO) != 0 &&
     2791            ifr->ifr_mtu > RL_MAX_FRAMELEN) {
     2792            error = EINVAL;
     2793            break;
     2794        }
     2795        RL_LOCK(sc);
     2796        if (ifp->if_mtu != ifr->ifr_mtu)
     2797            ifp->if_mtu = ifr->ifr_mtu;
     2798        if (ifp->if_mtu > RL_TSO_MTU &&
     2799            (ifp->if_capenable & IFCAP_TSO4) != 0) {
     2800            ifp->if_capenable &= ~IFCAP_TSO4;
     2801            ifp->if_hwassist &= ~CSUM_TSO;
     2802        }
     2803        RL_UNLOCK(sc);
     2804        break;
     2805    case SIOCSIFFLAGS:
     2806        RL_LOCK(sc);
     2807        if ((ifp->if_flags & IFF_UP) != 0) {
     2808            if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
     2809                if (((ifp->if_flags ^ sc->rl_if_flags)
     2810                    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
     2811                    re_setmulti(sc);
     2812            } else
     2813                re_init_locked(sc);
     2814        } else {
     2815            if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
     2816                re_stop(sc);
     2817        }
     2818        sc->rl_if_flags = ifp->if_flags;
     2819        RL_UNLOCK(sc);
     2820        break;
     2821    case SIOCADDMULTI:
     2822    case SIOCDELMULTI:
     2823        RL_LOCK(sc);
     2824        re_setmulti(sc);
     2825        RL_UNLOCK(sc);
     2826        break;
     2827    case SIOCGIFMEDIA:
     2828    case SIOCSIFMEDIA:
     2829        mii = device_get_softc(sc->rl_miibus);
     2830        error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
     2831        break;
     2832    case SIOCSIFCAP:
     2833        {
     2834        int mask, reinit;
     2835
     2836        mask = ifr->ifr_reqcap ^ ifp->if_capenable;
     2837        reinit = 0;
     2838#ifdef DEVICE_POLLING
     2839        if (mask & IFCAP_POLLING) {
     2840            if (ifr->ifr_reqcap & IFCAP_POLLING) {
     2841                error = ether_poll_register(re_poll, ifp);
     2842                if (error)
     2843                    return(error);
     2844                RL_LOCK(sc);
     2845                /* Disable interrupts */
     2846                CSR_WRITE_2(sc, RL_IMR, 0x0000);
     2847                ifp->if_capenable |= IFCAP_POLLING;
     2848                RL_UNLOCK(sc);
     2849            } else {
     2850                error = ether_poll_deregister(ifp);
     2851                /* Enable interrupts. */
     2852                RL_LOCK(sc);
     2853                CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
     2854                ifp->if_capenable &= ~IFCAP_POLLING;
     2855                RL_UNLOCK(sc);
     2856            }
     2857        }
     2858#endif /* DEVICE_POLLING */
     2859        if (mask & IFCAP_HWCSUM) {
     2860            ifp->if_capenable ^= IFCAP_HWCSUM;
     2861            if (ifp->if_capenable & IFCAP_TXCSUM)
     2862                ifp->if_hwassist |= RE_CSUM_FEATURES;
     2863            else
     2864                ifp->if_hwassist &= ~RE_CSUM_FEATURES;
     2865            reinit = 1;
     2866        }
     2867        if (mask & IFCAP_VLAN_HWTAGGING) {
     2868            ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
     2869            reinit = 1;
     2870        }
     2871        if (mask & IFCAP_TSO4) {
     2872            ifp->if_capenable ^= IFCAP_TSO4;
     2873            if ((IFCAP_TSO4 & ifp->if_capenable) &&
     2874                (IFCAP_TSO4 & ifp->if_capabilities))
     2875                ifp->if_hwassist |= CSUM_TSO;
     2876            else
     2877                ifp->if_hwassist &= ~CSUM_TSO;
     2878            if (ifp->if_mtu > RL_TSO_MTU &&
     2879                (ifp->if_capenable & IFCAP_TSO4) != 0) {
     2880                ifp->if_capenable &= ~IFCAP_TSO4;
     2881                ifp->if_hwassist &= ~CSUM_TSO;
     2882            }
     2883        }
     2884#ifdef ENABLE_WOL
     2885        if ((mask & IFCAP_WOL) != 0 &&
     2886            (ifp->if_capabilities & IFCAP_WOL) != 0) {
     2887            if ((mask & IFCAP_WOL_UCAST) != 0)
     2888                ifp->if_capenable ^= IFCAP_WOL_UCAST;
     2889            if ((mask & IFCAP_WOL_MCAST) != 0)
     2890                ifp->if_capenable ^= IFCAP_WOL_MCAST;
     2891            if ((mask & IFCAP_WOL_MAGIC) != 0)
     2892                ifp->if_capenable ^= IFCAP_WOL_MAGIC;
     2893        }
     2894#endif
     2895        if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING)
     2896            re_init(sc);
     2897        VLAN_CAPABILITIES(ifp);
     2898        }
     2899        break;
     2900    default:
     2901        error = ether_ioctl(ifp, command, data);
     2902        break;
     2903    }
     2904
     2905    return (error);
     2906}
     2907
     2908static void
     2909re_watchdog(struct rl_softc *sc)
     2910{
     2911
     2912    RL_LOCK_ASSERT(sc);
     2913
     2914    if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
     2915        return;
     2916
     2917    device_printf(sc->rl_dev, "watchdog timeout\n");
     2918    sc->rl_ifp->if_oerrors++;
     2919
     2920    re_txeof(sc);
     2921    re_rxeof(sc);
     2922    re_init_locked(sc);
     2923}
     2924
     2925/*
     2926 * Stop the adapter and free any mbufs allocated to the
     2927 * RX and TX lists.
     2928 */
     2929static void
     2930re_stop(struct rl_softc *sc)
     2931{
     2932    int         i;
     2933    struct ifnet        *ifp;
     2934    struct rl_txdesc    *txd;
     2935    struct rl_rxdesc    *rxd;
     2936
     2937    RL_LOCK_ASSERT(sc);
     2938
     2939    ifp = sc->rl_ifp;
     2940
     2941    sc->rl_watchdog_timer = 0;
     2942    callout_stop(&sc->rl_stat_callout);
     2943    ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
     2944
     2945    CSR_WRITE_1(sc, RL_COMMAND, 0x00);
     2946    CSR_WRITE_2(sc, RL_IMR, 0x0000);
     2947    CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
     2948
     2949    if (sc->rl_head != NULL) {
     2950        m_freem(sc->rl_head);
     2951        sc->rl_head = sc->rl_tail = NULL;
     2952    }
     2953
     2954    /* Free the TX list buffers. */
     2955
     2956    for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
     2957        txd = &sc->rl_ldata.rl_tx_desc[i];
     2958        if (txd->tx_m != NULL) {
     2959            bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
     2960                txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
     2961            bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
     2962                txd->tx_dmamap);
     2963            m_freem(txd->tx_m);
     2964            txd->tx_m = NULL;
     2965        }
     2966    }
     2967
     2968    /* Free the RX list buffers. */
     2969
     2970    for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
     2971        rxd = &sc->rl_ldata.rl_rx_desc[i];
     2972        if (rxd->rx_m != NULL) {
     2973            bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
     2974                rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
     2975            bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
     2976                rxd->rx_dmamap);
     2977            m_freem(rxd->rx_m);
     2978            rxd->rx_m = NULL;
     2979        }
     2980    }
     2981}
     2982
     2983/*
     2984 * Device suspend routine.  Stop the interface and save some PCI
     2985 * settings in case the BIOS doesn't restore them properly on
     2986 * resume.
     2987 */
     2988static int
     2989re_suspend(device_t dev)
     2990{
     2991    struct rl_softc     *sc;
     2992
     2993    sc = device_get_softc(dev);
     2994
     2995    RL_LOCK(sc);
     2996    re_stop(sc);
     2997    re_setwol(sc);
     2998    sc->suspended = 1;
     2999    RL_UNLOCK(sc);
     3000
     3001    return (0);
     3002}
     3003
     3004/*
     3005 * Device resume routine.  Restore some PCI settings in case the BIOS
     3006 * doesn't, re-enable busmastering, and restart the interface if
     3007 * appropriate.
     3008 */
     3009static int
     3010re_resume(device_t dev)
     3011{
     3012    struct rl_softc     *sc;
     3013    struct ifnet        *ifp;
     3014
     3015    sc = device_get_softc(dev);
     3016
     3017    RL_LOCK(sc);
     3018
     3019    ifp = sc->rl_ifp;
     3020
     3021    /* reinitialize interface if necessary */
     3022    if (ifp->if_flags & IFF_UP)
     3023        re_init_locked(sc);
     3024
     3025    /*
     3026     * Clear WOL matching such that normal Rx filtering
     3027     * wouldn't interfere with WOL patterns.
     3028     */
     3029    re_clrwol(sc);
     3030    sc->suspended = 0;
     3031    RL_UNLOCK(sc);
     3032
     3033    return (0);
     3034}
     3035
     3036/*
     3037 * Stop all chip I/O so that the kernel's probe routines don't
     3038 * get confused by errant DMAs when rebooting.
     3039 */
     3040static int
     3041re_shutdown(device_t dev)
     3042{
     3043    struct rl_softc     *sc;
     3044
     3045    sc = device_get_softc(dev);
     3046
     3047    RL_LOCK(sc);
     3048    re_stop(sc);
     3049    /*
     3050     * Mark interface as down since otherwise we will panic if
     3051     * interrupt comes in later on, which can happen in some
     3052     * cases.
     3053     */
     3054    sc->rl_ifp->if_flags &= ~IFF_UP;
     3055    re_setwol(sc);
     3056    RL_UNLOCK(sc);
     3057
     3058    return (0);
     3059}
     3060
     3061static void
     3062re_setwol(struct rl_softc *sc)
     3063{
     3064#ifdef ENABLE_WOL
     3065    struct ifnet        *ifp;
     3066    int         pmc;
     3067    uint16_t        pmstat;
     3068    uint8_t         v;
     3069
     3070    RL_LOCK_ASSERT(sc);
     3071
     3072    if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
     3073        return;
     3074
     3075    ifp = sc->rl_ifp;
     3076    /* Enable config register write. */
     3077    CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
     3078
     3079    /* Enable PME. */
     3080    v = CSR_READ_1(sc, RL_CFG1);
     3081    v &= ~RL_CFG1_PME;
     3082    if ((ifp->if_capenable & IFCAP_WOL) != 0)
     3083        v |= RL_CFG1_PME;
     3084    CSR_WRITE_1(sc, RL_CFG1, v);
     3085
     3086    v = CSR_READ_1(sc, RL_CFG3);
     3087    v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
     3088    if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
     3089        v |= RL_CFG3_WOL_MAGIC;
     3090    CSR_WRITE_1(sc, RL_CFG3, v);
     3091
     3092    /* Config register write done. */
     3093    CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     3094
     3095    v = CSR_READ_1(sc, RL_CFG5);
     3096    v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
     3097    v &= ~RL_CFG5_WOL_LANWAKE;
     3098    if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
     3099        v |= RL_CFG5_WOL_UCAST;
     3100    if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
     3101        v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
     3102    if ((ifp->if_capenable & IFCAP_WOL) != 0)
     3103        v |= RL_CFG5_WOL_LANWAKE;
     3104    CSR_WRITE_1(sc, RL_CFG5, v);
     3105
     3106    /*
     3107     * It seems that hardware resets its link speed to 100Mbps in
     3108     * power down mode so switching to 100Mbps in driver is not
     3109     * needed.
     3110     */
     3111
     3112    /* Request PME if WOL is requested. */
     3113    pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
     3114    pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
     3115    if ((ifp->if_capenable & IFCAP_WOL) != 0)
     3116        pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
     3117    pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
     3118#endif
     3119}
     3120
     3121static void
     3122re_clrwol(struct rl_softc *sc)
     3123{
     3124#ifdef ENABLE_WOL
     3125    int         pmc;
     3126    uint8_t         v;
     3127
     3128    RL_LOCK_ASSERT(sc);
     3129
     3130    if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
     3131        return;
     3132
     3133    /* Enable config register write. */
     3134    CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
     3135
     3136    v = CSR_READ_1(sc, RL_CFG3);
     3137    v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
     3138    CSR_WRITE_1(sc, RL_CFG3, v);
     3139
     3140    /* Config register write done. */
     3141    CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
     3142
     3143    v = CSR_READ_1(sc, RL_CFG5);
     3144    v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
     3145    v &= ~RL_CFG5_WOL_LANWAKE;
     3146    CSR_WRITE_1(sc, RL_CFG5, v);
     3147#endif
     3148}
     3149
  • src/add-ons/kernel/drivers/network/re/pci/Jamfile

     
     1SubDir HAIKU_TOP src add-ons kernel drivers network re pci ;
     2
     3UsePrivateHeaders kernel net ;
     4
     5UseHeaders [ FDirName $(SUBDIR) .. ] : true ;
     6UseHeaders [ FDirName $(HAIKU_TOP) src libs compat freebsd_network compat ] : true ;
     7UseHeaders [ FDirName $(HAIKU_TOP) src add-ons kernel drivers network rtl8139 ] : true ;
     8
     9SubDirCcFlags [ FDefines _KERNEL=1 FBSD_DRIVER=1 ] ;
     10
     11KernelAddon re :
     12    if_re.c
     13    glue.c
     14    : libfreebsd_network.a rtl8139_mii.a
     15    ;
     16
  • src/add-ons/kernel/drivers/network/re/pci/glue.c

     
     1#include <sys/bus.h>
     2#include <pci/if_rlreg.h>
     3
     4HAIKU_FBSD_DRIVER_GLUE(re, re, pci);
     5HAIKU_FBSD_MII_DRIVER(rlphy);
     6HAIKU_DRIVER_REQUIREMENTS(FBSD_TASKQUEUES | FBSD_FAST_TASKQUEUE);
     7
     8int
     9HAIKU_CHECK_DISABLE_INTERRUPTS(device_t dev)
     10{
     11    struct rl_softc *sc = device_get_softc(dev);
     12    uint16_t status;
     13
     14    status = CSR_READ_2(sc, RL_ISR);
     15    if (status == 0xffff)
     16        return 0;
     17    if (status != 0 && (status & RL_INTRS) == 0) {
     18        CSR_WRITE_2(sc, RL_ISR, status);
     19        return 0;
     20    }
     21    if ((status & RL_INTRS) == 0)
     22        return 0;
     23
     24    CSR_WRITE_2(sc, RL_IMR, 0);
     25    return 1;
     26}
     27
     28
     29void
     30HAIKU_REENABLE_INTERRUPTS(device_t dev)
     31{
     32    struct rl_softc *sc = device_get_softc(dev);
     33    RL_LOCK(sc);
     34    CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
     35    RL_UNLOCK(sc);
     36}
     37
  • src/add-ons/kernel/drivers/network/Jamfile

     
    2121SubInclude HAIKU_TOP src add-ons kernel drivers network pcnet ;
    2222SubInclude HAIKU_TOP src add-ons kernel drivers network syskonnect ;
    2323SubInclude HAIKU_TOP src add-ons kernel drivers network attansic_l2 ;
     24SubInclude HAIKU_TOP src add-ons kernel drivers network re ;
    2425
    2526SubIncludeGPL HAIKU_TOP src add-ons kernel drivers network bcm440x ;
    2627SubIncludeGPL HAIKU_TOP src add-ons kernel drivers network bcm570x ;