The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ti/if_ti.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-4-Clause
    3  *
    4  * Copyright (c) 1997, 1998, 1999
    5  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 /*
   36  * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
   37  * Manuals, sample driver and firmware source kits are available
   38  * from http://www.alteon.com/support/openkits.
   39  *
   40  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   41  * Electrical Engineering Department
   42  * Columbia University, New York City
   43  */
   44 
   45 /*
   46  * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
   47  * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
   48  * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
   49  * Tigon supports hardware IP, TCP and UCP checksumming, multicast
   50  * filtering and jumbo (9014 byte) frames. The hardware is largely
   51  * controlled by firmware, which must be loaded into the NIC during
   52  * initialization.
   53  *
   54  * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
   55  * revision, which supports new features such as extended commands,
   56  * extended jumbo receive ring descriptors and a mini receive ring.
   57  *
   58  * Alteon Networks is to be commended for releasing such a vast amount
   59  * of development material for the Tigon NIC without requiring an NDA
   60  * (although they really should have done it a long time ago). With
   61  * any luck, the other vendors will finally wise up and follow Alteon's
   62  * stellar example.
   63  *
   64  * The firmware for the Tigon 1 and 2 NICs is compiled directly into
   65  * this driver by #including it as a C header file. This bloats the
   66  * driver somewhat, but it's the easiest method considering that the
   67  * driver code and firmware code need to be kept in sync. The source
   68  * for the firmware is not provided with the FreeBSD distribution since
   69  * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
   70  *
   71  * The following people deserve special thanks:
   72  * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
   73  *   for testing
   74  * - Raymond Lee of Netgear, for providing a pair of Netgear
   75  *   GA620 Tigon 2 boards for testing
   76  * - Ulf Zimmermann, for bringing the GA260 to my attention and
   77  *   convincing me to write this driver.
   78  * - Andrew Gallatin for providing FreeBSD/Alpha support.
   79  */
   80 
   81 #include <sys/cdefs.h>
   82 __FBSDID("$FreeBSD$");
   83 
   84 #include "opt_ti.h"
   85 
   86 #include <sys/param.h>
   87 #include <sys/systm.h>
   88 #include <sys/sockio.h>
   89 #include <sys/mbuf.h>
   90 #include <sys/malloc.h>
   91 #include <sys/kernel.h>
   92 #include <sys/module.h>
   93 #include <sys/socket.h>
   94 #include <sys/queue.h>
   95 #include <sys/conf.h>
   96 #include <sys/sf_buf.h>
   97 
   98 #include <net/if.h>
   99 #include <net/if_var.h>
  100 #include <net/if_arp.h>
  101 #include <net/ethernet.h>
  102 #include <net/if_dl.h>
  103 #include <net/if_media.h>
  104 #include <net/if_types.h>
  105 #include <net/if_vlan_var.h>
  106 
  107 #include <net/bpf.h>
  108 
  109 #include <netinet/in_systm.h>
  110 #include <netinet/in.h>
  111 #include <netinet/ip.h>
  112 
  113 #include <machine/bus.h>
  114 #include <machine/resource.h>
  115 #include <sys/bus.h>
  116 #include <sys/rman.h>
  117 
  118 #ifdef TI_SF_BUF_JUMBO
  119 #include <vm/vm.h>
  120 #include <vm/vm_page.h>
  121 #endif
  122 
  123 #include <dev/pci/pcireg.h>
  124 #include <dev/pci/pcivar.h>
  125 
  126 #include <sys/tiio.h>
  127 #include <dev/ti/if_tireg.h>
  128 #include <dev/ti/ti_fw.h>
  129 #include <dev/ti/ti_fw2.h>
  130 
  131 #include <sys/sysctl.h>
  132 
  133 #define TI_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP)
  134 /*
  135  * We can only turn on header splitting if we're using extended receive
  136  * BDs.
  137  */
  138 #if defined(TI_JUMBO_HDRSPLIT) && !defined(TI_SF_BUF_JUMBO)
  139 #error "options TI_JUMBO_HDRSPLIT requires TI_SF_BUF_JUMBO"
  140 #endif /* TI_JUMBO_HDRSPLIT && !TI_SF_BUF_JUMBO */
  141 
  142 typedef enum {
  143         TI_SWAP_HTON,
  144         TI_SWAP_NTOH
  145 } ti_swap_type;
  146 
  147 /*
  148  * Various supported device vendors/types and their names.
  149  */
  150 
  151 static const struct ti_type ti_devs[] = {
  152         { ALT_VENDORID, ALT_DEVICEID_ACENIC,
  153                 "Alteon AceNIC 1000baseSX Gigabit Ethernet" },
  154         { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER,
  155                 "Alteon AceNIC 1000baseT Gigabit Ethernet" },
  156         { TC_VENDORID,  TC_DEVICEID_3C985,
  157                 "3Com 3c985-SX Gigabit Ethernet" },
  158         { NG_VENDORID, NG_DEVICEID_GA620,
  159                 "Netgear GA620 1000baseSX Gigabit Ethernet" },
  160         { NG_VENDORID, NG_DEVICEID_GA620T,
  161                 "Netgear GA620 1000baseT Gigabit Ethernet" },
  162         { SGI_VENDORID, SGI_DEVICEID_TIGON,
  163                 "Silicon Graphics Gigabit Ethernet" },
  164         { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX,
  165                 "Farallon PN9000SX Gigabit Ethernet" },
  166         { 0, 0, NULL }
  167 };
  168 
  169 static  d_open_t        ti_open;
  170 static  d_close_t       ti_close;
  171 static  d_ioctl_t       ti_ioctl2;
  172 
  173 static struct cdevsw ti_cdevsw = {
  174         .d_version =    D_VERSION,
  175         .d_flags =      0,
  176         .d_open =       ti_open,
  177         .d_close =      ti_close,
  178         .d_ioctl =      ti_ioctl2,
  179         .d_name =       "ti",
  180 };
  181 
  182 static int ti_probe(device_t);
  183 static int ti_attach(device_t);
  184 static int ti_detach(device_t);
  185 static void ti_txeof(struct ti_softc *);
  186 static void ti_rxeof(struct ti_softc *);
  187 
  188 static int ti_encap(struct ti_softc *, struct mbuf **);
  189 
  190 static void ti_intr(void *);
  191 static void ti_start(struct ifnet *);
  192 static void ti_start_locked(struct ifnet *);
  193 static int ti_ioctl(struct ifnet *, u_long, caddr_t);
  194 static uint64_t ti_get_counter(struct ifnet *, ift_counter);
  195 static void ti_init(void *);
  196 static void ti_init_locked(void *);
  197 static void ti_init2(struct ti_softc *);
  198 static void ti_stop(struct ti_softc *);
  199 static void ti_watchdog(void *);
  200 static int ti_shutdown(device_t);
  201 static int ti_ifmedia_upd(struct ifnet *);
  202 static int ti_ifmedia_upd_locked(struct ti_softc *);
  203 static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  204 
  205 static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
  206 static uint8_t  ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
  207 static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
  208 
  209 static u_int ti_add_mcast(void *, struct sockaddr_dl *, u_int);
  210 static u_int ti_del_mcast(void *, struct sockaddr_dl *, u_int);
  211 static void ti_setmulti(struct ti_softc *);
  212 
  213 static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *);
  214 static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *);
  215 static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t);
  216 static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int,
  217     int);
  218 static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t,
  219     int, int, int);
  220 static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type);
  221 static void ti_loadfw(struct ti_softc *);
  222 static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
  223 static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int);
  224 static void ti_handle_events(struct ti_softc *);
  225 static void ti_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  226 static int ti_dma_alloc(struct ti_softc *);
  227 static void ti_dma_free(struct ti_softc *);
  228 static int ti_dma_ring_alloc(struct ti_softc *, bus_size_t, bus_size_t,
  229     bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
  230 static void ti_dma_ring_free(struct ti_softc *, bus_dma_tag_t *, uint8_t **,
  231     bus_dmamap_t, bus_addr_t *);
  232 static int ti_newbuf_std(struct ti_softc *, int);
  233 static int ti_newbuf_mini(struct ti_softc *, int);
  234 static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
  235 static int ti_init_rx_ring_std(struct ti_softc *);
  236 static void ti_free_rx_ring_std(struct ti_softc *);
  237 static int ti_init_rx_ring_jumbo(struct ti_softc *);
  238 static void ti_free_rx_ring_jumbo(struct ti_softc *);
  239 static int ti_init_rx_ring_mini(struct ti_softc *);
  240 static void ti_free_rx_ring_mini(struct ti_softc *);
  241 static void ti_free_tx_ring(struct ti_softc *);
  242 static int ti_init_tx_ring(struct ti_softc *);
  243 static void ti_discard_std(struct ti_softc *, int);
  244 #ifndef TI_SF_BUF_JUMBO
  245 static void ti_discard_jumbo(struct ti_softc *, int);
  246 #endif
  247 static void ti_discard_mini(struct ti_softc *, int);
  248 
  249 static int ti_64bitslot_war(struct ti_softc *);
  250 static int ti_chipinit(struct ti_softc *);
  251 static int ti_gibinit(struct ti_softc *);
  252 
  253 #ifdef TI_JUMBO_HDRSPLIT
  254 static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len,
  255     int idx);
  256 #endif /* TI_JUMBO_HDRSPLIT */
  257 
  258 static void ti_sysctl_node(struct ti_softc *);
  259 
  260 static device_method_t ti_methods[] = {
  261         /* Device interface */
  262         DEVMETHOD(device_probe,         ti_probe),
  263         DEVMETHOD(device_attach,        ti_attach),
  264         DEVMETHOD(device_detach,        ti_detach),
  265         DEVMETHOD(device_shutdown,      ti_shutdown),
  266         { 0, 0 }
  267 };
  268 
  269 static driver_t ti_driver = {
  270         "ti",
  271         ti_methods,
  272         sizeof(struct ti_softc)
  273 };
  274 
  275 DRIVER_MODULE(ti, pci, ti_driver, 0, 0);
  276 MODULE_DEPEND(ti, pci, 1, 1, 1);
  277 MODULE_DEPEND(ti, ether, 1, 1, 1);
  278 
  279 /*
  280  * Send an instruction or address to the EEPROM, check for ACK.
  281  */
  282 static uint32_t
  283 ti_eeprom_putbyte(struct ti_softc *sc, int byte)
  284 {
  285         int i, ack = 0;
  286 
  287         /*
  288          * Make sure we're in TX mode.
  289          */
  290         TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
  291 
  292         /*
  293          * Feed in each bit and stobe the clock.
  294          */
  295         for (i = 0x80; i; i >>= 1) {
  296                 if (byte & i) {
  297                         TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
  298                 } else {
  299                         TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
  300                 }
  301                 DELAY(1);
  302                 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  303                 DELAY(1);
  304                 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  305         }
  306 
  307         /*
  308          * Turn off TX mode.
  309          */
  310         TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
  311 
  312         /*
  313          * Check for ack.
  314          */
  315         TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  316         ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
  317         TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  318 
  319         return (ack);
  320 }
  321 
  322 /*
  323  * Read a byte of data stored in the EEPROM at address 'addr.'
  324  * We have to send two address bytes since the EEPROM can hold
  325  * more than 256 bytes of data.
  326  */
  327 static uint8_t
  328 ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
  329 {
  330         int i;
  331         uint8_t byte = 0;
  332 
  333         EEPROM_START;
  334 
  335         /*
  336          * Send write control code to EEPROM.
  337          */
  338         if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
  339                 device_printf(sc->ti_dev,
  340                     "failed to send write command, status: %x\n",
  341                     CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
  342                 return (1);
  343         }
  344 
  345         /*
  346          * Send first byte of address of byte we want to read.
  347          */
  348         if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
  349                 device_printf(sc->ti_dev, "failed to send address, status: %x\n",
  350                     CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
  351                 return (1);
  352         }
  353         /*
  354          * Send second byte address of byte we want to read.
  355          */
  356         if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
  357                 device_printf(sc->ti_dev, "failed to send address, status: %x\n",
  358                     CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
  359                 return (1);
  360         }
  361 
  362         EEPROM_STOP;
  363         EEPROM_START;
  364         /*
  365          * Send read control code to EEPROM.
  366          */
  367         if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
  368                 device_printf(sc->ti_dev,
  369                     "failed to send read command, status: %x\n",
  370                     CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
  371                 return (1);
  372         }
  373 
  374         /*
  375          * Start reading bits from EEPROM.
  376          */
  377         TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
  378         for (i = 0x80; i; i >>= 1) {
  379                 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  380                 DELAY(1);
  381                 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
  382                         byte |= i;
  383                 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
  384                 DELAY(1);
  385         }
  386 
  387         EEPROM_STOP;
  388 
  389         /*
  390          * No ACK generated for read, so just return byte.
  391          */
  392 
  393         *dest = byte;
  394 
  395         return (0);
  396 }
  397 
  398 /*
  399  * Read a sequence of bytes from the EEPROM.
  400  */
  401 static int
  402 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
  403 {
  404         int err = 0, i;
  405         uint8_t byte = 0;
  406 
  407         for (i = 0; i < cnt; i++) {
  408                 err = ti_eeprom_getbyte(sc, off + i, &byte);
  409                 if (err)
  410                         break;
  411                 *(dest + i) = byte;
  412         }
  413 
  414         return (err ? 1 : 0);
  415 }
  416 
  417 /*
  418  * NIC memory read function.
  419  * Can be used to copy data from NIC local memory.
  420  */
  421 static void
  422 ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
  423 {
  424         int segptr, segsize, cnt;
  425         char *ptr;
  426 
  427         segptr = addr;
  428         cnt = len;
  429         ptr = buf;
  430 
  431         while (cnt) {
  432                 if (cnt < TI_WINLEN)
  433                         segsize = cnt;
  434                 else
  435                         segsize = TI_WINLEN - (segptr % TI_WINLEN);
  436                 CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
  437                 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
  438                     TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
  439                     segsize / 4);
  440                 ptr += segsize;
  441                 segptr += segsize;
  442                 cnt -= segsize;
  443         }
  444 }
  445 
  446 /*
  447  * NIC memory write function.
  448  * Can be used to copy data into NIC local memory.
  449  */
  450 static void
  451 ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
  452 {
  453         int segptr, segsize, cnt;
  454         char *ptr;
  455 
  456         segptr = addr;
  457         cnt = len;
  458         ptr = buf;
  459 
  460         while (cnt) {
  461                 if (cnt < TI_WINLEN)
  462                         segsize = cnt;
  463                 else
  464                         segsize = TI_WINLEN - (segptr % TI_WINLEN);
  465                 CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
  466                 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
  467                     TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
  468                     segsize / 4);
  469                 ptr += segsize;
  470                 segptr += segsize;
  471                 cnt -= segsize;
  472         }
  473 }
  474 
  475 /*
  476  * NIC memory read function.
  477  * Can be used to clear a section of NIC local memory.
  478  */
  479 static void
  480 ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
  481 {
  482         int segptr, segsize, cnt;
  483 
  484         segptr = addr;
  485         cnt = len;
  486 
  487         while (cnt) {
  488                 if (cnt < TI_WINLEN)
  489                         segsize = cnt;
  490                 else
  491                         segsize = TI_WINLEN - (segptr % TI_WINLEN);
  492                 CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
  493                 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
  494                     TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
  495                 segptr += segsize;
  496                 cnt -= segsize;
  497         }
  498 }
  499 
  500 static int
  501 ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
  502     caddr_t buf, int useraddr, int readdata)
  503 {
  504         int segptr, segsize, cnt;
  505         caddr_t ptr;
  506         uint32_t origwin;
  507         int resid, segresid;
  508         int first_pass;
  509 
  510         TI_LOCK_ASSERT(sc);
  511 
  512         /*
  513          * At the moment, we don't handle non-aligned cases, we just bail.
  514          * If this proves to be a problem, it will be fixed.
  515          */
  516         if (readdata == 0 && (tigon_addr & 0x3) != 0) {
  517                 device_printf(sc->ti_dev, "%s: tigon address %#x isn't "
  518                     "word-aligned\n", __func__, tigon_addr);
  519                 device_printf(sc->ti_dev, "%s: unaligned writes aren't "
  520                     "yet supported\n", __func__);
  521                 return (EINVAL);
  522         }
  523 
  524         segptr = tigon_addr & ~0x3;
  525         segresid = tigon_addr - segptr;
  526 
  527         /*
  528          * This is the non-aligned amount left over that we'll need to
  529          * copy.
  530          */
  531         resid = len & 0x3;
  532 
  533         /* Add in the left over amount at the front of the buffer */
  534         resid += segresid;
  535 
  536         cnt = len & ~0x3;
  537         /*
  538          * If resid + segresid is >= 4, add multiples of 4 to the count and
  539          * decrease the residual by that much.
  540          */
  541         cnt += resid & ~0x3;
  542         resid -= resid & ~0x3;
  543 
  544         ptr = buf;
  545 
  546         first_pass = 1;
  547 
  548         /*
  549          * Save the old window base value.
  550          */
  551         origwin = CSR_READ_4(sc, TI_WINBASE);
  552 
  553         while (cnt) {
  554                 bus_size_t ti_offset;
  555 
  556                 if (cnt < TI_WINLEN)
  557                         segsize = cnt;
  558                 else
  559                         segsize = TI_WINLEN - (segptr % TI_WINLEN);
  560                 CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
  561 
  562                 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
  563 
  564                 if (readdata) {
  565                         bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
  566                             ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
  567                         if (useraddr) {
  568                                 /*
  569                                  * Yeah, this is a little on the kludgy
  570                                  * side, but at least this code is only
  571                                  * used for debugging.
  572                                  */
  573                                 ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2,
  574                                     segsize, TI_SWAP_NTOH);
  575 
  576                                 TI_UNLOCK(sc);
  577                                 if (first_pass) {
  578                                         copyout(&sc->ti_membuf2[segresid], ptr,
  579                                             segsize - segresid);
  580                                         first_pass = 0;
  581                                 } else
  582                                         copyout(sc->ti_membuf2, ptr, segsize);
  583                                 TI_LOCK(sc);
  584                         } else {
  585                                 if (first_pass) {
  586                                         ti_bcopy_swap(sc->ti_membuf,
  587                                             sc->ti_membuf2, segsize,
  588                                             TI_SWAP_NTOH);
  589                                         TI_UNLOCK(sc);
  590                                         bcopy(&sc->ti_membuf2[segresid], ptr,
  591                                             segsize - segresid);
  592                                         TI_LOCK(sc);
  593                                         first_pass = 0;
  594                                 } else
  595                                         ti_bcopy_swap(sc->ti_membuf, ptr,
  596                                             segsize, TI_SWAP_NTOH);
  597                         }
  598 
  599                 } else {
  600                         if (useraddr) {
  601                                 TI_UNLOCK(sc);
  602                                 copyin(ptr, sc->ti_membuf2, segsize);
  603                                 TI_LOCK(sc);
  604                                 ti_bcopy_swap(sc->ti_membuf2, sc->ti_membuf,
  605                                     segsize, TI_SWAP_HTON);
  606                         } else
  607                                 ti_bcopy_swap(ptr, sc->ti_membuf, segsize,
  608                                     TI_SWAP_HTON);
  609 
  610                         bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
  611                             ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
  612                 }
  613                 segptr += segsize;
  614                 ptr += segsize;
  615                 cnt -= segsize;
  616         }
  617 
  618         /*
  619          * Handle leftover, non-word-aligned bytes.
  620          */
  621         if (resid != 0) {
  622                 uint32_t tmpval, tmpval2;
  623                 bus_size_t ti_offset;
  624 
  625                 /*
  626                  * Set the segment pointer.
  627                  */
  628                 CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
  629 
  630                 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1));
  631 
  632                 /*
  633                  * First, grab whatever is in our source/destination.
  634                  * We'll obviously need this for reads, but also for
  635                  * writes, since we'll be doing read/modify/write.
  636                  */
  637                 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
  638                     ti_offset, &tmpval, 1);
  639 
  640                 /*
  641                  * Next, translate this from little-endian to big-endian
  642                  * (at least on i386 boxes).
  643                  */
  644                 tmpval2 = ntohl(tmpval);
  645 
  646                 if (readdata) {
  647                         /*
  648                          * If we're reading, just copy the leftover number
  649                          * of bytes from the host byte order buffer to
  650                          * the user's buffer.
  651                          */
  652                         if (useraddr) {
  653                                 TI_UNLOCK(sc);
  654                                 copyout(&tmpval2, ptr, resid);
  655                                 TI_LOCK(sc);
  656                         } else
  657                                 bcopy(&tmpval2, ptr, resid);
  658                 } else {
  659                         /*
  660                          * If we're writing, first copy the bytes to be
  661                          * written into the network byte order buffer,
  662                          * leaving the rest of the buffer with whatever was
  663                          * originally in there.  Then, swap the bytes
  664                          * around into host order and write them out.
  665                          *
  666                          * XXX KDM the read side of this has been verified
  667                          * to work, but the write side of it has not been
  668                          * verified.  So user beware.
  669                          */
  670                         if (useraddr) {
  671                                 TI_UNLOCK(sc);
  672                                 copyin(ptr, &tmpval2, resid);
  673                                 TI_LOCK(sc);
  674                         } else
  675                                 bcopy(ptr, &tmpval2, resid);
  676 
  677                         tmpval = htonl(tmpval2);
  678 
  679                         bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
  680                             ti_offset, &tmpval, 1);
  681                 }
  682         }
  683 
  684         CSR_WRITE_4(sc, TI_WINBASE, origwin);
  685 
  686         return (0);
  687 }
  688 
  689 static int
  690 ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
  691     caddr_t buf, int useraddr, int readdata, int cpu)
  692 {
  693         uint32_t segptr;
  694         int cnt;
  695         uint32_t tmpval, tmpval2;
  696         caddr_t ptr;
  697 
  698         TI_LOCK_ASSERT(sc);
  699 
  700         /*
  701          * At the moment, we don't handle non-aligned cases, we just bail.
  702          * If this proves to be a problem, it will be fixed.
  703          */
  704         if (tigon_addr & 0x3) {
  705                 device_printf(sc->ti_dev, "%s: tigon address %#x "
  706                     "isn't word-aligned\n", __func__, tigon_addr);
  707                 return (EINVAL);
  708         }
  709 
  710         if (len & 0x3) {
  711                 device_printf(sc->ti_dev, "%s: transfer length %d "
  712                     "isn't word-aligned\n", __func__, len);
  713                 return (EINVAL);
  714         }
  715 
  716         segptr = tigon_addr;
  717         cnt = len;
  718         ptr = buf;
  719 
  720         while (cnt) {
  721                 CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr);
  722 
  723                 if (readdata) {
  724                         tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu));
  725 
  726                         tmpval = ntohl(tmpval2);
  727 
  728                         /*
  729                          * Note:  I've used this debugging interface
  730                          * extensively with Alteon's 12.3.15 firmware,
  731                          * compiled with GCC 2.7.2.1 and binutils 2.9.1.
  732                          *
  733                          * When you compile the firmware without
  734                          * optimization, which is necessary sometimes in
  735                          * order to properly step through it, you sometimes
  736                          * read out a bogus value of 0xc0017c instead of
  737                          * whatever was supposed to be in that scratchpad
  738                          * location.  That value is on the stack somewhere,
  739                          * but I've never been able to figure out what was
  740                          * causing the problem.
  741                          *
  742                          * The address seems to pop up in random places,
  743                          * often not in the same place on two subsequent
  744                          * reads.
  745                          *
  746                          * In any case, the underlying data doesn't seem
  747                          * to be affected, just the value read out.
  748                          *
  749                          * KDM, 3/7/2000
  750                          */
  751 
  752                         if (tmpval2 == 0xc0017c)
  753                                 device_printf(sc->ti_dev, "found 0xc0017c at "
  754                                     "%#x (tmpval2)\n", segptr);
  755 
  756                         if (tmpval == 0xc0017c)
  757                                 device_printf(sc->ti_dev, "found 0xc0017c at "
  758                                     "%#x (tmpval)\n", segptr);
  759 
  760                         if (useraddr)
  761                                 copyout(&tmpval, ptr, 4);
  762                         else
  763                                 bcopy(&tmpval, ptr, 4);
  764                 } else {
  765                         if (useraddr)
  766                                 copyin(ptr, &tmpval2, 4);
  767                         else
  768                                 bcopy(ptr, &tmpval2, 4);
  769 
  770                         tmpval = htonl(tmpval2);
  771 
  772                         CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval);
  773                 }
  774 
  775                 cnt -= 4;
  776                 segptr += 4;
  777                 ptr += 4;
  778         }
  779 
  780         return (0);
  781 }
  782 
  783 static int
  784 ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type)
  785 {
  786         const uint8_t *tmpsrc;
  787         uint8_t *tmpdst;
  788         size_t tmplen;
  789 
  790         if (len & 0x3) {
  791                 printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len);
  792                 return (-1);
  793         }
  794 
  795         tmpsrc = src;
  796         tmpdst = dst;
  797         tmplen = len;
  798 
  799         while (tmplen) {
  800                 if (swap_type == TI_SWAP_NTOH)
  801                         *(uint32_t *)tmpdst = ntohl(*(const uint32_t *)tmpsrc);
  802                 else
  803                         *(uint32_t *)tmpdst = htonl(*(const uint32_t *)tmpsrc);
  804                 tmpsrc += 4;
  805                 tmpdst += 4;
  806                 tmplen -= 4;
  807         }
  808 
  809         return (0);
  810 }
  811 
  812 /*
  813  * Load firmware image into the NIC. Check that the firmware revision
  814  * is acceptable and see if we want the firmware for the Tigon 1 or
  815  * Tigon 2.
  816  */
  817 static void
  818 ti_loadfw(struct ti_softc *sc)
  819 {
  820 
  821         TI_LOCK_ASSERT(sc);
  822 
  823         switch (sc->ti_hwrev) {
  824         case TI_HWREV_TIGON:
  825                 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR ||
  826                     tigonFwReleaseMinor != TI_FIRMWARE_MINOR ||
  827                     tigonFwReleaseFix != TI_FIRMWARE_FIX) {
  828                         device_printf(sc->ti_dev, "firmware revision mismatch; "
  829                             "want %d.%d.%d, got %d.%d.%d\n",
  830                             TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
  831                             TI_FIRMWARE_FIX, tigonFwReleaseMajor,
  832                             tigonFwReleaseMinor, tigonFwReleaseFix);
  833                         return;
  834                 }
  835                 ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText);
  836                 ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData);
  837                 ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen,
  838                     tigonFwRodata);
  839                 ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen);
  840                 ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen);
  841                 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr);
  842                 break;
  843         case TI_HWREV_TIGON_II:
  844                 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR ||
  845                     tigon2FwReleaseMinor != TI_FIRMWARE_MINOR ||
  846                     tigon2FwReleaseFix != TI_FIRMWARE_FIX) {
  847                         device_printf(sc->ti_dev, "firmware revision mismatch; "
  848                             "want %d.%d.%d, got %d.%d.%d\n",
  849                             TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
  850                             TI_FIRMWARE_FIX, tigon2FwReleaseMajor,
  851                             tigon2FwReleaseMinor, tigon2FwReleaseFix);
  852                         return;
  853                 }
  854                 ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen,
  855                     tigon2FwText);
  856                 ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen,
  857                     tigon2FwData);
  858                 ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen,
  859                     tigon2FwRodata);
  860                 ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen);
  861                 ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen);
  862                 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr);
  863                 break;
  864         default:
  865                 device_printf(sc->ti_dev,
  866                     "can't load firmware: unknown hardware rev\n");
  867                 break;
  868         }
  869 }
  870 
  871 /*
  872  * Send the NIC a command via the command ring.
  873  */
  874 static void
  875 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
  876 {
  877         int index;
  878 
  879         index = sc->ti_cmd_saved_prodidx;
  880         CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
  881         TI_INC(index, TI_CMD_RING_CNT);
  882         CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
  883         sc->ti_cmd_saved_prodidx = index;
  884 }
  885 
  886 /*
  887  * Send the NIC an extended command. The 'len' parameter specifies the
  888  * number of command slots to include after the initial command.
  889  */
  890 static void
  891 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len)
  892 {
  893         int index;
  894         int i;
  895 
  896         index = sc->ti_cmd_saved_prodidx;
  897         CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
  898         TI_INC(index, TI_CMD_RING_CNT);
  899         for (i = 0; i < len; i++) {
  900                 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
  901                     *(uint32_t *)(&arg[i * 4]));
  902                 TI_INC(index, TI_CMD_RING_CNT);
  903         }
  904         CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
  905         sc->ti_cmd_saved_prodidx = index;
  906 }
  907 
  908 /*
  909  * Handle events that have triggered interrupts.
  910  */
  911 static void
  912 ti_handle_events(struct ti_softc *sc)
  913 {
  914         struct ti_event_desc *e;
  915 
  916         if (sc->ti_rdata.ti_event_ring == NULL)
  917                 return;
  918 
  919         bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
  920             sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_POSTREAD);
  921         while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
  922                 e = &sc->ti_rdata.ti_event_ring[sc->ti_ev_saved_considx];
  923                 switch (TI_EVENT_EVENT(e)) {
  924                 case TI_EV_LINKSTAT_CHANGED:
  925                         sc->ti_linkstat = TI_EVENT_CODE(e);
  926                         if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
  927                                 if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
  928                                 sc->ti_ifp->if_baudrate = IF_Mbps(100);
  929                                 if (bootverbose)
  930                                         device_printf(sc->ti_dev,
  931                                             "10/100 link up\n");
  932                         } else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
  933                                 if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
  934                                 sc->ti_ifp->if_baudrate = IF_Gbps(1UL);
  935                                 if (bootverbose)
  936                                         device_printf(sc->ti_dev,
  937                                             "gigabit link up\n");
  938                         } else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
  939                                 if_link_state_change(sc->ti_ifp,
  940                                     LINK_STATE_DOWN);
  941                                 sc->ti_ifp->if_baudrate = 0;
  942                                 if (bootverbose)
  943                                         device_printf(sc->ti_dev,
  944                                             "link down\n");
  945                         }
  946                         break;
  947                 case TI_EV_ERROR:
  948                         if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
  949                                 device_printf(sc->ti_dev, "invalid command\n");
  950                         else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
  951                                 device_printf(sc->ti_dev, "unknown command\n");
  952                         else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
  953                                 device_printf(sc->ti_dev, "bad config data\n");
  954                         break;
  955                 case TI_EV_FIRMWARE_UP:
  956                         ti_init2(sc);
  957                         break;
  958                 case TI_EV_STATS_UPDATED:
  959                 case TI_EV_RESET_JUMBO_RING:
  960                 case TI_EV_MCAST_UPDATED:
  961                         /* Who cares. */
  962                         break;
  963                 default:
  964                         device_printf(sc->ti_dev, "unknown event: %d\n",
  965                             TI_EVENT_EVENT(e));
  966                         break;
  967                 }
  968                 /* Advance the consumer index. */
  969                 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
  970                 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
  971         }
  972         bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
  973             sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_PREREAD);
  974 }
  975 
  976 struct ti_dmamap_arg {
  977         bus_addr_t      ti_busaddr;
  978 };
  979 
  980 static void
  981 ti_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  982 {
  983         struct ti_dmamap_arg *ctx;
  984 
  985         if (error)
  986                 return;
  987 
  988         KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
  989 
  990         ctx = arg;
  991         ctx->ti_busaddr = segs->ds_addr;
  992 }
  993 
  994 static int
  995 ti_dma_ring_alloc(struct ti_softc *sc, bus_size_t alignment, bus_size_t maxsize,
  996     bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
  997     const char *msg)
  998 {
  999         struct ti_dmamap_arg ctx;
 1000         int error;
 1001 
 1002         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag,
 1003             alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
 1004             NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
 1005         if (error != 0) {
 1006                 device_printf(sc->ti_dev,
 1007                     "could not create %s dma tag\n", msg);
 1008                 return (error);
 1009         }
 1010         /* Allocate DMA'able memory for ring. */
 1011         error = bus_dmamem_alloc(*tag, (void **)ring,
 1012             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
 1013         if (error != 0) {
 1014                 device_printf(sc->ti_dev,
 1015                     "could not allocate DMA'able memory for %s\n", msg);
 1016                 return (error);
 1017         }
 1018         /* Load the address of the ring. */
 1019         ctx.ti_busaddr = 0;
 1020         error = bus_dmamap_load(*tag, *map, *ring, maxsize, ti_dma_map_addr,
 1021             &ctx, BUS_DMA_NOWAIT);
 1022         if (error != 0) {
 1023                 device_printf(sc->ti_dev,
 1024                     "could not load DMA'able memory for %s\n", msg);
 1025                 return (error);
 1026         }
 1027         *paddr = ctx.ti_busaddr;
 1028         return (0);
 1029 }
 1030 
 1031 static void
 1032 ti_dma_ring_free(struct ti_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
 1033     bus_dmamap_t map, bus_addr_t *paddr)
 1034 {
 1035 
 1036         if (*paddr != 0) {
 1037                 bus_dmamap_unload(*tag, map);
 1038                 *paddr = 0;
 1039         }
 1040         if (*ring != NULL) {
 1041                 bus_dmamem_free(*tag, *ring, map);
 1042                 *ring = NULL;
 1043         }
 1044         if (*tag) {
 1045                 bus_dma_tag_destroy(*tag);
 1046                 *tag = NULL;
 1047         }
 1048 }
 1049 
 1050 static int
 1051 ti_dma_alloc(struct ti_softc *sc)
 1052 {
 1053         bus_addr_t lowaddr;
 1054         int i, error;
 1055 
 1056         lowaddr = BUS_SPACE_MAXADDR;
 1057         if (sc->ti_dac == 0)
 1058                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
 1059 
 1060         error = bus_dma_tag_create(bus_get_dma_tag(sc->ti_dev), 1, 0, lowaddr,
 1061             BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
 1062             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
 1063             &sc->ti_cdata.ti_parent_tag);
 1064         if (error != 0) {
 1065                 device_printf(sc->ti_dev,
 1066                     "could not allocate parent dma tag\n");
 1067                 return (ENOMEM);
 1068         }
 1069 
 1070         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_gib),
 1071             &sc->ti_cdata.ti_gib_tag, (uint8_t **)&sc->ti_rdata.ti_info,
 1072             &sc->ti_cdata.ti_gib_map, &sc->ti_rdata.ti_info_paddr, "GIB");
 1073         if (error)
 1074                 return (error);
 1075 
 1076         /* Producer/consumer status */
 1077         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_status),
 1078             &sc->ti_cdata.ti_status_tag, (uint8_t **)&sc->ti_rdata.ti_status,
 1079             &sc->ti_cdata.ti_status_map, &sc->ti_rdata.ti_status_paddr,
 1080             "event ring");
 1081         if (error)
 1082                 return (error);
 1083 
 1084         /* Event ring */
 1085         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_EVENT_RING_SZ,
 1086             &sc->ti_cdata.ti_event_ring_tag,
 1087             (uint8_t **)&sc->ti_rdata.ti_event_ring,
 1088             &sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr,
 1089             "event ring");
 1090         if (error)
 1091                 return (error);
 1092 
 1093         /* Command ring lives in shared memory so no need to create DMA area. */
 1094 
 1095         /* Standard RX ring */
 1096         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_STD_RX_RING_SZ,
 1097             &sc->ti_cdata.ti_rx_std_ring_tag,
 1098             (uint8_t **)&sc->ti_rdata.ti_rx_std_ring,
 1099             &sc->ti_cdata.ti_rx_std_ring_map,
 1100             &sc->ti_rdata.ti_rx_std_ring_paddr, "RX ring");
 1101         if (error)
 1102                 return (error);
 1103 
 1104         /* Jumbo RX ring */
 1105         error = ti_dma_ring_alloc(sc, TI_JUMBO_RING_ALIGN, TI_JUMBO_RX_RING_SZ,
 1106             &sc->ti_cdata.ti_rx_jumbo_ring_tag,
 1107             (uint8_t **)&sc->ti_rdata.ti_rx_jumbo_ring,
 1108             &sc->ti_cdata.ti_rx_jumbo_ring_map,
 1109             &sc->ti_rdata.ti_rx_jumbo_ring_paddr, "jumbo RX ring");
 1110         if (error)
 1111                 return (error);
 1112 
 1113         /* RX return ring */
 1114         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_RX_RETURN_RING_SZ,
 1115             &sc->ti_cdata.ti_rx_return_ring_tag,
 1116             (uint8_t **)&sc->ti_rdata.ti_rx_return_ring,
 1117             &sc->ti_cdata.ti_rx_return_ring_map,
 1118             &sc->ti_rdata.ti_rx_return_ring_paddr, "RX return ring");
 1119         if (error)
 1120                 return (error);
 1121 
 1122         /* Create DMA tag for standard RX mbufs. */
 1123         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
 1124             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
 1125             MCLBYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_std_tag);
 1126         if (error) {
 1127                 device_printf(sc->ti_dev, "could not allocate RX dma tag\n");
 1128                 return (error);
 1129         }
 1130 
 1131         /* Create DMA tag for jumbo RX mbufs. */
 1132 #ifdef TI_SF_BUF_JUMBO
 1133         /*
 1134          * The VM system will take care of providing aligned pages.  Alignment
 1135          * is set to 1 here so that busdma resources won't be wasted.
 1136          */
 1137         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
 1138             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE * 4, 4,
 1139             PAGE_SIZE, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
 1140 #else
 1141         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
 1142             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1,
 1143             MJUM9BYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
 1144 #endif
 1145         if (error) {
 1146                 device_printf(sc->ti_dev,
 1147                     "could not allocate jumbo RX dma tag\n");
 1148                 return (error);
 1149         }
 1150 
 1151         /* Create DMA tag for TX mbufs. */
 1152         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1,
 1153             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
 1154             MCLBYTES * TI_MAXTXSEGS, TI_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
 1155             &sc->ti_cdata.ti_tx_tag);
 1156         if (error) {
 1157                 device_printf(sc->ti_dev, "could not allocate TX dma tag\n");
 1158                 return (ENOMEM);
 1159         }
 1160 
 1161         /* Create DMA maps for RX buffers. */
 1162         for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
 1163                 error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
 1164                     &sc->ti_cdata.ti_rx_std_maps[i]);
 1165                 if (error) {
 1166                         device_printf(sc->ti_dev,
 1167                             "could not create DMA map for RX\n");
 1168                         return (error);
 1169                 }
 1170         }
 1171         error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
 1172             &sc->ti_cdata.ti_rx_std_sparemap);
 1173         if (error) {
 1174                 device_printf(sc->ti_dev,
 1175                     "could not create spare DMA map for RX\n");
 1176                 return (error);
 1177         }
 1178 
 1179         /* Create DMA maps for jumbo RX buffers. */
 1180         for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
 1181                 error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
 1182                     &sc->ti_cdata.ti_rx_jumbo_maps[i]);
 1183                 if (error) {
 1184                         device_printf(sc->ti_dev,
 1185                             "could not create DMA map for jumbo RX\n");
 1186                         return (error);
 1187                 }
 1188         }
 1189         error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
 1190             &sc->ti_cdata.ti_rx_jumbo_sparemap);
 1191         if (error) {
 1192                 device_printf(sc->ti_dev,
 1193                     "could not create spare DMA map for jumbo RX\n");
 1194                 return (error);
 1195         }
 1196 
 1197         /* Create DMA maps for TX buffers. */
 1198         for (i = 0; i < TI_TX_RING_CNT; i++) {
 1199                 error = bus_dmamap_create(sc->ti_cdata.ti_tx_tag, 0,
 1200                     &sc->ti_cdata.ti_txdesc[i].tx_dmamap);
 1201                 if (error) {
 1202                         device_printf(sc->ti_dev,
 1203                             "could not create DMA map for TX\n");
 1204                         return (ENOMEM);
 1205                 }
 1206         }
 1207 
 1208         /* Mini ring and TX ring is not available on Tigon 1. */
 1209         if (sc->ti_hwrev == TI_HWREV_TIGON)
 1210                 return (0);
 1211 
 1212         /* TX ring */
 1213         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_TX_RING_SZ,
 1214             &sc->ti_cdata.ti_tx_ring_tag, (uint8_t **)&sc->ti_rdata.ti_tx_ring,
 1215             &sc->ti_cdata.ti_tx_ring_map, &sc->ti_rdata.ti_tx_ring_paddr,
 1216             "TX ring");
 1217         if (error)
 1218                 return (error);
 1219 
 1220         /* Mini RX ring */
 1221         error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_MINI_RX_RING_SZ,
 1222             &sc->ti_cdata.ti_rx_mini_ring_tag,
 1223             (uint8_t **)&sc->ti_rdata.ti_rx_mini_ring,
 1224             &sc->ti_cdata.ti_rx_mini_ring_map,
 1225             &sc->ti_rdata.ti_rx_mini_ring_paddr, "mini RX ring");
 1226         if (error)
 1227                 return (error);
 1228 
 1229         /* Create DMA tag for mini RX mbufs. */
 1230         error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
 1231             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
 1232             MHLEN, 0, NULL, NULL, &sc->ti_cdata.ti_rx_mini_tag);
 1233         if (error) {
 1234                 device_printf(sc->ti_dev,
 1235                     "could not allocate mini RX dma tag\n");
 1236                 return (error);
 1237         }
 1238 
 1239         /* Create DMA maps for mini RX buffers. */
 1240         for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
 1241                 error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
 1242                     &sc->ti_cdata.ti_rx_mini_maps[i]);
 1243                 if (error) {
 1244                         device_printf(sc->ti_dev,
 1245                             "could not create DMA map for mini RX\n");
 1246                         return (error);
 1247                 }
 1248         }
 1249         error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
 1250             &sc->ti_cdata.ti_rx_mini_sparemap);
 1251         if (error) {
 1252                 device_printf(sc->ti_dev,
 1253                     "could not create spare DMA map for mini RX\n");
 1254                 return (error);
 1255         }
 1256 
 1257         return (0);
 1258 }
 1259 
 1260 static void
 1261 ti_dma_free(struct ti_softc *sc)
 1262 {
 1263         int i;
 1264 
 1265         /* Destroy DMA maps for RX buffers. */
 1266         for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
 1267                 if (sc->ti_cdata.ti_rx_std_maps[i]) {
 1268                         bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
 1269                             sc->ti_cdata.ti_rx_std_maps[i]);
 1270                         sc->ti_cdata.ti_rx_std_maps[i] = NULL;
 1271                 }
 1272         }
 1273         if (sc->ti_cdata.ti_rx_std_sparemap) {
 1274                 bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
 1275                     sc->ti_cdata.ti_rx_std_sparemap);
 1276                 sc->ti_cdata.ti_rx_std_sparemap = NULL;
 1277         }
 1278         if (sc->ti_cdata.ti_rx_std_tag) {
 1279                 bus_dma_tag_destroy(sc->ti_cdata.ti_rx_std_tag);
 1280                 sc->ti_cdata.ti_rx_std_tag = NULL;
 1281         }
 1282 
 1283         /* Destroy DMA maps for jumbo RX buffers. */
 1284         for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
 1285                 if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
 1286                         bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
 1287                             sc->ti_cdata.ti_rx_jumbo_maps[i]);
 1288                         sc->ti_cdata.ti_rx_jumbo_maps[i] = NULL;
 1289                 }
 1290         }
 1291         if (sc->ti_cdata.ti_rx_jumbo_sparemap) {
 1292                 bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
 1293                     sc->ti_cdata.ti_rx_jumbo_sparemap);
 1294                 sc->ti_cdata.ti_rx_jumbo_sparemap = NULL;
 1295         }
 1296         if (sc->ti_cdata.ti_rx_jumbo_tag) {
 1297                 bus_dma_tag_destroy(sc->ti_cdata.ti_rx_jumbo_tag);
 1298                 sc->ti_cdata.ti_rx_jumbo_tag = NULL;
 1299         }
 1300 
 1301         /* Destroy DMA maps for mini RX buffers. */
 1302         for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
 1303                 if (sc->ti_cdata.ti_rx_mini_maps[i]) {
 1304                         bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
 1305                             sc->ti_cdata.ti_rx_mini_maps[i]);
 1306                         sc->ti_cdata.ti_rx_mini_maps[i] = NULL;
 1307                 }
 1308         }
 1309         if (sc->ti_cdata.ti_rx_mini_sparemap) {
 1310                 bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
 1311                     sc->ti_cdata.ti_rx_mini_sparemap);
 1312                 sc->ti_cdata.ti_rx_mini_sparemap = NULL;
 1313         }
 1314         if (sc->ti_cdata.ti_rx_mini_tag) {
 1315                 bus_dma_tag_destroy(sc->ti_cdata.ti_rx_mini_tag);
 1316                 sc->ti_cdata.ti_rx_mini_tag = NULL;
 1317         }
 1318 
 1319         /* Destroy DMA maps for TX buffers. */
 1320         for (i = 0; i < TI_TX_RING_CNT; i++) {
 1321                 if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
 1322                         bus_dmamap_destroy(sc->ti_cdata.ti_tx_tag,
 1323                             sc->ti_cdata.ti_txdesc[i].tx_dmamap);
 1324                         sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL;
 1325                 }
 1326         }
 1327         if (sc->ti_cdata.ti_tx_tag) {
 1328                 bus_dma_tag_destroy(sc->ti_cdata.ti_tx_tag);
 1329                 sc->ti_cdata.ti_tx_tag = NULL;
 1330         }
 1331 
 1332         /* Destroy standard RX ring. */
 1333         ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_std_ring_tag,
 1334             (void *)&sc->ti_rdata.ti_rx_std_ring,
 1335             sc->ti_cdata.ti_rx_std_ring_map,
 1336             &sc->ti_rdata.ti_rx_std_ring_paddr);
 1337         /* Destroy jumbo RX ring. */
 1338         ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_jumbo_ring_tag,
 1339             (void *)&sc->ti_rdata.ti_rx_jumbo_ring,
 1340             sc->ti_cdata.ti_rx_jumbo_ring_map,
 1341             &sc->ti_rdata.ti_rx_jumbo_ring_paddr);
 1342         /* Destroy mini RX ring. */
 1343         ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_mini_ring_tag,
 1344             (void *)&sc->ti_rdata.ti_rx_mini_ring,
 1345             sc->ti_cdata.ti_rx_mini_ring_map,
 1346             &sc->ti_rdata.ti_rx_mini_ring_paddr);
 1347         /* Destroy RX return ring. */
 1348         ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_return_ring_tag,
 1349             (void *)&sc->ti_rdata.ti_rx_return_ring,
 1350             sc->ti_cdata.ti_rx_return_ring_map,
 1351             &sc->ti_rdata.ti_rx_return_ring_paddr);
 1352         /* Destroy TX ring. */
 1353         ti_dma_ring_free(sc, &sc->ti_cdata.ti_tx_ring_tag,
 1354             (void *)&sc->ti_rdata.ti_tx_ring, sc->ti_cdata.ti_tx_ring_map,
 1355             &sc->ti_rdata.ti_tx_ring_paddr);
 1356         /* Destroy status block. */
 1357         ti_dma_ring_free(sc, &sc->ti_cdata.ti_status_tag,
 1358             (void *)&sc->ti_rdata.ti_status, sc->ti_cdata.ti_status_map,
 1359             &sc->ti_rdata.ti_status_paddr);
 1360         /* Destroy event ring. */
 1361         ti_dma_ring_free(sc, &sc->ti_cdata.ti_event_ring_tag,
 1362             (void *)&sc->ti_rdata.ti_event_ring,
 1363             sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr);
 1364         /* Destroy GIB */
 1365         ti_dma_ring_free(sc, &sc->ti_cdata.ti_gib_tag,
 1366             (void *)&sc->ti_rdata.ti_info, sc->ti_cdata.ti_gib_map,
 1367             &sc->ti_rdata.ti_info_paddr);
 1368 
 1369         /* Destroy the parent tag. */
 1370         if (sc->ti_cdata.ti_parent_tag) {
 1371                 bus_dma_tag_destroy(sc->ti_cdata.ti_parent_tag);
 1372                 sc->ti_cdata.ti_parent_tag = NULL;
 1373         }
 1374 }
 1375 
 1376 /*
 1377  * Intialize a standard receive ring descriptor.
 1378  */
 1379 static int
 1380 ti_newbuf_std(struct ti_softc *sc, int i)
 1381 {
 1382         bus_dmamap_t map;
 1383         bus_dma_segment_t segs[1];
 1384         struct mbuf *m;
 1385         struct ti_rx_desc *r;
 1386         int error, nsegs;
 1387 
 1388         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1389         if (m == NULL)
 1390                 return (ENOBUFS);
 1391         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1392         m_adj(m, ETHER_ALIGN);
 1393 
 1394         error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_std_tag,
 1395             sc->ti_cdata.ti_rx_std_sparemap, m, segs, &nsegs, 0);
 1396         if (error != 0) {
 1397                 m_freem(m);
 1398                 return (error);
 1399         }
 1400         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1401 
 1402         if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
 1403                 bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
 1404                     sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_POSTREAD);
 1405                 bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag,
 1406                     sc->ti_cdata.ti_rx_std_maps[i]);
 1407         }
 1408 
 1409         map = sc->ti_cdata.ti_rx_std_maps[i];
 1410         sc->ti_cdata.ti_rx_std_maps[i] = sc->ti_cdata.ti_rx_std_sparemap;
 1411         sc->ti_cdata.ti_rx_std_sparemap = map;
 1412         sc->ti_cdata.ti_rx_std_chain[i] = m;
 1413 
 1414         r = &sc->ti_rdata.ti_rx_std_ring[i];
 1415         ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
 1416         r->ti_len = segs[0].ds_len;
 1417         r->ti_type = TI_BDTYPE_RECV_BD;
 1418         r->ti_flags = 0;
 1419         r->ti_vlan_tag = 0;
 1420         r->ti_tcp_udp_cksum = 0;
 1421         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 1422                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 1423         r->ti_idx = i;
 1424 
 1425         bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
 1426             sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_PREREAD);
 1427         return (0);
 1428 }
 1429 
 1430 /*
 1431  * Intialize a mini receive ring descriptor. This only applies to
 1432  * the Tigon 2.
 1433  */
 1434 static int
 1435 ti_newbuf_mini(struct ti_softc *sc, int i)
 1436 {
 1437         bus_dmamap_t map;
 1438         bus_dma_segment_t segs[1];
 1439         struct mbuf *m;
 1440         struct ti_rx_desc *r;
 1441         int error, nsegs;
 1442 
 1443         MGETHDR(m, M_NOWAIT, MT_DATA);
 1444         if (m == NULL)
 1445                 return (ENOBUFS);
 1446         m->m_len = m->m_pkthdr.len = MHLEN;
 1447         m_adj(m, ETHER_ALIGN);
 1448 
 1449         error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_mini_tag,
 1450             sc->ti_cdata.ti_rx_mini_sparemap, m, segs, &nsegs, 0);
 1451         if (error != 0) {
 1452                 m_freem(m);
 1453                 return (error);
 1454         }
 1455         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1456 
 1457         if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
 1458                 bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
 1459                     sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_POSTREAD);
 1460                 bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag,
 1461                     sc->ti_cdata.ti_rx_mini_maps[i]);
 1462         }
 1463 
 1464         map = sc->ti_cdata.ti_rx_mini_maps[i];
 1465         sc->ti_cdata.ti_rx_mini_maps[i] = sc->ti_cdata.ti_rx_mini_sparemap;
 1466         sc->ti_cdata.ti_rx_mini_sparemap = map;
 1467         sc->ti_cdata.ti_rx_mini_chain[i] = m;
 1468 
 1469         r = &sc->ti_rdata.ti_rx_mini_ring[i];
 1470         ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
 1471         r->ti_len = segs[0].ds_len;
 1472         r->ti_type = TI_BDTYPE_RECV_BD;
 1473         r->ti_flags = TI_BDFLAG_MINI_RING;
 1474         r->ti_vlan_tag = 0;
 1475         r->ti_tcp_udp_cksum = 0;
 1476         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 1477                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 1478         r->ti_idx = i;
 1479 
 1480         bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
 1481             sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_PREREAD);
 1482         return (0);
 1483 }
 1484 
 1485 #ifndef TI_SF_BUF_JUMBO
 1486 
 1487 /*
 1488  * Initialize a jumbo receive ring descriptor. This allocates
 1489  * a jumbo buffer from the pool managed internally by the driver.
 1490  */
 1491 static int
 1492 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy)
 1493 {
 1494         bus_dmamap_t map;
 1495         bus_dma_segment_t segs[1];
 1496         struct mbuf *m;
 1497         struct ti_rx_desc *r;
 1498         int error, nsegs;
 1499 
 1500         (void)dummy;
 1501 
 1502         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
 1503         if (m == NULL)
 1504                 return (ENOBUFS);
 1505         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
 1506         m_adj(m, ETHER_ALIGN);
 1507 
 1508         error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag,
 1509             sc->ti_cdata.ti_rx_jumbo_sparemap, m, segs, &nsegs, 0);
 1510         if (error != 0) {
 1511                 m_freem(m);
 1512                 return (error);
 1513         }
 1514         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1515 
 1516         if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
 1517                 bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
 1518                     sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_POSTREAD);
 1519                 bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag,
 1520                     sc->ti_cdata.ti_rx_jumbo_maps[i]);
 1521         }
 1522 
 1523         map = sc->ti_cdata.ti_rx_jumbo_maps[i];
 1524         sc->ti_cdata.ti_rx_jumbo_maps[i] = sc->ti_cdata.ti_rx_jumbo_sparemap;
 1525         sc->ti_cdata.ti_rx_jumbo_sparemap = map;
 1526         sc->ti_cdata.ti_rx_jumbo_chain[i] = m;
 1527 
 1528         r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
 1529         ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
 1530         r->ti_len = segs[0].ds_len;
 1531         r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
 1532         r->ti_flags = TI_BDFLAG_JUMBO_RING;
 1533         r->ti_vlan_tag = 0;
 1534         r->ti_tcp_udp_cksum = 0;
 1535         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 1536                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 1537         r->ti_idx = i;
 1538 
 1539         bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
 1540             sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_PREREAD);
 1541         return (0);
 1542 }
 1543 
 1544 #else
 1545 
 1546 #if (PAGE_SIZE == 4096)
 1547 #define NPAYLOAD 2
 1548 #else
 1549 #define NPAYLOAD 1
 1550 #endif
 1551 
 1552 #define TCP_HDR_LEN (52 + sizeof(struct ether_header))
 1553 #define UDP_HDR_LEN (28 + sizeof(struct ether_header))
 1554 #define NFS_HDR_LEN (UDP_HDR_LEN)
 1555 static int HDR_LEN = TCP_HDR_LEN;
 1556 
 1557 /*
 1558  * Initialize a jumbo receive ring descriptor. This allocates
 1559  * a jumbo buffer from the pool managed internally by the driver.
 1560  */
 1561 static int
 1562 ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
 1563 {
 1564         bus_dmamap_t map;
 1565         struct mbuf *cur, *m_new = NULL;
 1566         struct mbuf *m[3] = {NULL, NULL, NULL};
 1567         struct ti_rx_desc_ext *r;
 1568         vm_page_t frame;
 1569         /* 1 extra buf to make nobufs easy*/
 1570         struct sf_buf *sf[3] = {NULL, NULL, NULL};
 1571         int i;
 1572         bus_dma_segment_t segs[4];
 1573         int nsegs;
 1574 
 1575         if (m_old != NULL) {
 1576                 m_new = m_old;
 1577                 cur = m_old->m_next;
 1578                 for (i = 0; i <= NPAYLOAD; i++){
 1579                         m[i] = cur;
 1580                         cur = cur->m_next;
 1581                 }
 1582         } else {
 1583                 /* Allocate the mbufs. */
 1584                 MGETHDR(m_new, M_NOWAIT, MT_DATA);
 1585                 if (m_new == NULL) {
 1586                         device_printf(sc->ti_dev, "mbuf allocation failed "
 1587                             "-- packet dropped!\n");
 1588                         goto nobufs;
 1589                 }
 1590                 MGET(m[NPAYLOAD], M_NOWAIT, MT_DATA);
 1591                 if (m[NPAYLOAD] == NULL) {
 1592                         device_printf(sc->ti_dev, "cluster mbuf allocation "
 1593                             "failed -- packet dropped!\n");
 1594                         goto nobufs;
 1595                 }
 1596                 if (!(MCLGET(m[NPAYLOAD], M_NOWAIT))) {
 1597                         device_printf(sc->ti_dev, "mbuf allocation failed "
 1598                             "-- packet dropped!\n");
 1599                         goto nobufs;
 1600                 }
 1601                 m[NPAYLOAD]->m_len = MCLBYTES;
 1602 
 1603                 for (i = 0; i < NPAYLOAD; i++){
 1604                         MGET(m[i], M_NOWAIT, MT_DATA);
 1605                         if (m[i] == NULL) {
 1606                                 device_printf(sc->ti_dev, "mbuf allocation "
 1607                                     "failed -- packet dropped!\n");
 1608                                 goto nobufs;
 1609                         }
 1610                         frame = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
 1611                             VM_ALLOC_WIRED);
 1612                         if (frame == NULL) {
 1613                                 device_printf(sc->ti_dev, "buffer allocation "
 1614                                     "failed -- packet dropped!\n");
 1615                                 printf("      index %d page %d\n", idx, i);
 1616                                 goto nobufs;
 1617                         }
 1618                         sf[i] = sf_buf_alloc(frame, SFB_NOWAIT);
 1619                         if (sf[i] == NULL) {
 1620                                 vm_page_unwire_noq(frame);
 1621                                 vm_page_free(frame);
 1622                                 device_printf(sc->ti_dev, "buffer allocation "
 1623                                     "failed -- packet dropped!\n");
 1624                                 printf("      index %d page %d\n", idx, i);
 1625                                 goto nobufs;
 1626                         }
 1627                 }
 1628                 for (i = 0; i < NPAYLOAD; i++){
 1629                 /* Attach the buffer to the mbuf. */
 1630                         m[i]->m_data = (void *)sf_buf_kva(sf[i]);
 1631                         m[i]->m_len = PAGE_SIZE;
 1632                         MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE,
 1633                             sf_mext_free, (void*)sf_buf_kva(sf[i]), sf[i],
 1634                             0, EXT_DISPOSABLE);
 1635                         m[i]->m_next = m[i+1];
 1636                 }
 1637                 /* link the buffers to the header */
 1638                 m_new->m_next = m[0];
 1639                 m_new->m_data += ETHER_ALIGN;
 1640                 if (sc->ti_hdrsplit)
 1641                         m_new->m_len = MHLEN - ETHER_ALIGN;
 1642                 else
 1643                         m_new->m_len = HDR_LEN;
 1644                 m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len;
 1645         }
 1646 
 1647         /* Set up the descriptor. */
 1648         r = &sc->ti_rdata.ti_rx_jumbo_ring[idx];
 1649         sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new;
 1650         map = sc->ti_cdata.ti_rx_jumbo_maps[i];
 1651         if (bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag, map, m_new,
 1652             segs, &nsegs, 0))
 1653                 return (ENOBUFS);
 1654         if ((nsegs < 1) || (nsegs > 4))
 1655                 return (ENOBUFS);
 1656         ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr);
 1657         r->ti_len0 = m_new->m_len;
 1658 
 1659         ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr);
 1660         r->ti_len1 = PAGE_SIZE;
 1661 
 1662         ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr);
 1663         r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */
 1664 
 1665         if (PAGE_SIZE == 4096) {
 1666                 ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr);
 1667                 r->ti_len3 = MCLBYTES;
 1668         } else {
 1669                 r->ti_len3 = 0;
 1670         }
 1671         r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
 1672 
 1673         r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD;
 1674 
 1675         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 1676                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM;
 1677 
 1678         r->ti_idx = idx;
 1679 
 1680         bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_PREREAD);
 1681         return (0);
 1682 
 1683 nobufs:
 1684 
 1685         /*
 1686          * Warning! :
 1687          * This can only be called before the mbufs are strung together.
 1688          * If the mbufs are strung together, m_freem() will free the chain,
 1689          * so that the later mbufs will be freed multiple times.
 1690          */
 1691         if (m_new)
 1692                 m_freem(m_new);
 1693 
 1694         for (i = 0; i < 3; i++) {
 1695                 if (m[i])
 1696                         m_freem(m[i]);
 1697                 if (sf[i])
 1698                         sf_mext_free((void *)sf_buf_kva(sf[i]), sf[i]);
 1699         }
 1700         return (ENOBUFS);
 1701 }
 1702 #endif
 1703 
 1704 /*
 1705  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
 1706  * that's 1MB or memory, which is a lot. For now, we fill only the first
 1707  * 256 ring entries and hope that our CPU is fast enough to keep up with
 1708  * the NIC.
 1709  */
 1710 static int
 1711 ti_init_rx_ring_std(struct ti_softc *sc)
 1712 {
 1713         int i;
 1714         struct ti_cmd_desc cmd;
 1715 
 1716         for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
 1717                 if (ti_newbuf_std(sc, i) != 0)
 1718                         return (ENOBUFS);
 1719         }
 1720 
 1721         sc->ti_std = TI_STD_RX_RING_CNT - 1;
 1722         TI_UPDATE_STDPROD(sc, TI_STD_RX_RING_CNT - 1);
 1723 
 1724         return (0);
 1725 }
 1726 
 1727 static void
 1728 ti_free_rx_ring_std(struct ti_softc *sc)
 1729 {
 1730         bus_dmamap_t map;
 1731         int i;
 1732 
 1733         for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
 1734                 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
 1735                         map = sc->ti_cdata.ti_rx_std_maps[i];
 1736                         bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, map,
 1737                             BUS_DMASYNC_POSTREAD);
 1738                         bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag, map);
 1739                         m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
 1740                         sc->ti_cdata.ti_rx_std_chain[i] = NULL;
 1741                 }
 1742         }
 1743         bzero(sc->ti_rdata.ti_rx_std_ring, TI_STD_RX_RING_SZ);
 1744         bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
 1745             sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
 1746 }
 1747 
 1748 static int
 1749 ti_init_rx_ring_jumbo(struct ti_softc *sc)
 1750 {
 1751         struct ti_cmd_desc cmd;
 1752         int i;
 1753 
 1754         for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
 1755                 if (ti_newbuf_jumbo(sc, i, NULL) != 0)
 1756                         return (ENOBUFS);
 1757         }
 1758 
 1759         sc->ti_jumbo = TI_JUMBO_RX_RING_CNT - 1;
 1760         TI_UPDATE_JUMBOPROD(sc, TI_JUMBO_RX_RING_CNT - 1);
 1761 
 1762         return (0);
 1763 }
 1764 
 1765 static void
 1766 ti_free_rx_ring_jumbo(struct ti_softc *sc)
 1767 {
 1768         bus_dmamap_t map;
 1769         int i;
 1770 
 1771         for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
 1772                 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
 1773                         map = sc->ti_cdata.ti_rx_jumbo_maps[i];
 1774                         bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
 1775                             BUS_DMASYNC_POSTREAD);
 1776                         bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
 1777                         m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
 1778                         sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
 1779                 }
 1780         }
 1781         bzero(sc->ti_rdata.ti_rx_jumbo_ring, TI_JUMBO_RX_RING_SZ);
 1782         bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
 1783             sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
 1784 }
 1785 
 1786 static int
 1787 ti_init_rx_ring_mini(struct ti_softc *sc)
 1788 {
 1789         int i;
 1790 
 1791         for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
 1792                 if (ti_newbuf_mini(sc, i) != 0)
 1793                         return (ENOBUFS);
 1794         }
 1795 
 1796         sc->ti_mini = TI_MINI_RX_RING_CNT - 1;
 1797         TI_UPDATE_MINIPROD(sc, TI_MINI_RX_RING_CNT - 1);
 1798 
 1799         return (0);
 1800 }
 1801 
 1802 static void
 1803 ti_free_rx_ring_mini(struct ti_softc *sc)
 1804 {
 1805         bus_dmamap_t map;
 1806         int i;
 1807 
 1808         if (sc->ti_rdata.ti_rx_mini_ring == NULL)
 1809                 return;
 1810 
 1811         for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
 1812                 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
 1813                         map = sc->ti_cdata.ti_rx_mini_maps[i];
 1814                         bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, map,
 1815                             BUS_DMASYNC_POSTREAD);
 1816                         bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag, map);
 1817                         m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
 1818                         sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
 1819                 }
 1820         }
 1821         bzero(sc->ti_rdata.ti_rx_mini_ring, TI_MINI_RX_RING_SZ);
 1822         bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
 1823             sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
 1824 }
 1825 
 1826 static void
 1827 ti_free_tx_ring(struct ti_softc *sc)
 1828 {
 1829         struct ti_txdesc *txd;
 1830         int i;
 1831 
 1832         if (sc->ti_rdata.ti_tx_ring == NULL)
 1833                 return;
 1834 
 1835         for (i = 0; i < TI_TX_RING_CNT; i++) {
 1836                 txd = &sc->ti_cdata.ti_txdesc[i];
 1837                 if (txd->tx_m != NULL) {
 1838                         bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
 1839                             BUS_DMASYNC_POSTWRITE);
 1840                         bus_dmamap_unload(sc->ti_cdata.ti_tx_tag,
 1841                             txd->tx_dmamap);
 1842                         m_freem(txd->tx_m);
 1843                         txd->tx_m = NULL;
 1844                 }
 1845         }
 1846         bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
 1847         bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
 1848             sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
 1849 }
 1850 
 1851 static int
 1852 ti_init_tx_ring(struct ti_softc *sc)
 1853 {
 1854         struct ti_txdesc *txd;
 1855         int i;
 1856 
 1857         STAILQ_INIT(&sc->ti_cdata.ti_txfreeq);
 1858         STAILQ_INIT(&sc->ti_cdata.ti_txbusyq);
 1859         for (i = 0; i < TI_TX_RING_CNT; i++) {
 1860                 txd = &sc->ti_cdata.ti_txdesc[i];
 1861                 STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
 1862         }
 1863         sc->ti_txcnt = 0;
 1864         sc->ti_tx_saved_considx = 0;
 1865         sc->ti_tx_saved_prodidx = 0;
 1866         CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
 1867         return (0);
 1868 }
 1869 
 1870 /*
 1871  * The Tigon 2 firmware has a new way to add/delete multicast addresses,
 1872  * but we have to support the old way too so that Tigon 1 cards will
 1873  * work.
 1874  */
 1875 static u_int
 1876 ti_add_mcast(void *arg, struct sockaddr_dl *sdl, u_int count)
 1877 {
 1878         struct ti_softc *sc = arg;
 1879         struct ti_cmd_desc cmd;
 1880         uint16_t *m;
 1881         uint32_t ext[2] = {0, 0};
 1882 
 1883         m = (uint16_t *)LLADDR(sdl);
 1884 
 1885         switch (sc->ti_hwrev) {
 1886         case TI_HWREV_TIGON:
 1887                 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
 1888                 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
 1889                 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
 1890                 break;
 1891         case TI_HWREV_TIGON_II:
 1892                 ext[0] = htons(m[0]);
 1893                 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
 1894                 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
 1895                 break;
 1896         default:
 1897                 device_printf(sc->ti_dev, "unknown hwrev\n");
 1898                 return (0);
 1899         }
 1900         return (1);
 1901 }
 1902 
 1903 static u_int
 1904 ti_del_mcast(void *arg, struct sockaddr_dl *sdl, u_int count)
 1905 {
 1906         struct ti_softc *sc = arg;
 1907         struct ti_cmd_desc cmd;
 1908         uint16_t *m;
 1909         uint32_t ext[2] = {0, 0};
 1910 
 1911         m = (uint16_t *)LLADDR(sdl);
 1912 
 1913         switch (sc->ti_hwrev) {
 1914         case TI_HWREV_TIGON:
 1915                 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
 1916                 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
 1917                 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
 1918                 break;
 1919         case TI_HWREV_TIGON_II:
 1920                 ext[0] = htons(m[0]);
 1921                 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
 1922                 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
 1923                 break;
 1924         default:
 1925                 device_printf(sc->ti_dev, "unknown hwrev\n");
 1926                 return (0);
 1927         }
 1928 
 1929         return (1);
 1930 }
 1931 
 1932 /*
 1933  * Configure the Tigon's multicast address filter.
 1934  *
 1935  * The actual multicast table management is a bit of a pain, thanks to
 1936  * slight brain damage on the part of both Alteon and us. With our
 1937  * multicast code, we are only alerted when the multicast address table
 1938  * changes and at that point we only have the current list of addresses:
 1939  * we only know the current state, not the previous state, so we don't
 1940  * actually know what addresses were removed or added. The firmware has
 1941  * state, but we can't get our grubby mits on it, and there is no 'delete
 1942  * all multicast addresses' command. Hence, we have to maintain our own
 1943  * state so we know what addresses have been programmed into the NIC at
 1944  * any given time.
 1945  */
 1946 static void
 1947 ti_setmulti(struct ti_softc *sc)
 1948 {
 1949         struct ifnet *ifp;
 1950         struct ti_cmd_desc cmd;
 1951         uint32_t intrs;
 1952 
 1953         TI_LOCK_ASSERT(sc);
 1954 
 1955         ifp = sc->ti_ifp;
 1956 
 1957         if (ifp->if_flags & IFF_ALLMULTI) {
 1958                 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
 1959                 return;
 1960         } else {
 1961                 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
 1962         }
 1963 
 1964         /* Disable interrupts. */
 1965         intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
 1966         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
 1967 
 1968         /* First, zot all the existing filters. */
 1969         if_foreach_llmaddr(ifp, ti_del_mcast, sc);
 1970 
 1971         /* Now program new ones. */
 1972         if_foreach_llmaddr(ifp, ti_add_mcast, sc);
 1973 
 1974         /* Re-enable interrupts. */
 1975         CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
 1976 }
 1977 
 1978 /*
 1979  * Check to see if the BIOS has configured us for a 64 bit slot when
 1980  * we aren't actually in one. If we detect this condition, we can work
 1981  * around it on the Tigon 2 by setting a bit in the PCI state register,
 1982  * but for the Tigon 1 we must give up and abort the interface attach.
 1983  */
 1984 static int
 1985 ti_64bitslot_war(struct ti_softc *sc)
 1986 {
 1987 
 1988         if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
 1989                 CSR_WRITE_4(sc, 0x600, 0);
 1990                 CSR_WRITE_4(sc, 0x604, 0);
 1991                 CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
 1992                 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
 1993                         if (sc->ti_hwrev == TI_HWREV_TIGON)
 1994                                 return (EINVAL);
 1995                         else {
 1996                                 TI_SETBIT(sc, TI_PCI_STATE,
 1997                                     TI_PCISTATE_32BIT_BUS);
 1998                                 return (0);
 1999                         }
 2000                 }
 2001         }
 2002 
 2003         return (0);
 2004 }
 2005 
 2006 /*
 2007  * Do endian, PCI and DMA initialization. Also check the on-board ROM
 2008  * self-test results.
 2009  */
 2010 static int
 2011 ti_chipinit(struct ti_softc *sc)
 2012 {
 2013         uint32_t cacheline;
 2014         uint32_t pci_writemax = 0;
 2015         uint32_t hdrsplit;
 2016 
 2017         /* Initialize link to down state. */
 2018         sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
 2019 
 2020         /* Set endianness before we access any non-PCI registers. */
 2021 #if 0 && BYTE_ORDER == BIG_ENDIAN
 2022         CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
 2023             TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24));
 2024 #else
 2025         CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
 2026             TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
 2027 #endif
 2028 
 2029         /* Check the ROM failed bit to see if self-tests passed. */
 2030         if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
 2031                 device_printf(sc->ti_dev, "board self-diagnostics failed!\n");
 2032                 return (ENODEV);
 2033         }
 2034 
 2035         /* Halt the CPU. */
 2036         TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
 2037 
 2038         /* Figure out the hardware revision. */
 2039         switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) {
 2040         case TI_REV_TIGON_I:
 2041                 sc->ti_hwrev = TI_HWREV_TIGON;
 2042                 break;
 2043         case TI_REV_TIGON_II:
 2044                 sc->ti_hwrev = TI_HWREV_TIGON_II;
 2045                 break;
 2046         default:
 2047                 device_printf(sc->ti_dev, "unsupported chip revision\n");
 2048                 return (ENODEV);
 2049         }
 2050 
 2051         /* Do special setup for Tigon 2. */
 2052         if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
 2053                 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
 2054                 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
 2055                 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
 2056         }
 2057 
 2058         /*
 2059          * We don't have firmware source for the Tigon 1, so Tigon 1 boards
 2060          * can't do header splitting.
 2061          */
 2062 #ifdef TI_JUMBO_HDRSPLIT
 2063         if (sc->ti_hwrev != TI_HWREV_TIGON)
 2064                 sc->ti_hdrsplit = 1;
 2065         else
 2066                 device_printf(sc->ti_dev,
 2067                     "can't do header splitting on a Tigon I board\n");
 2068 #endif /* TI_JUMBO_HDRSPLIT */
 2069 
 2070         /* Set up the PCI state register. */
 2071         CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD);
 2072         if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
 2073                 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
 2074         }
 2075 
 2076         /* Clear the read/write max DMA parameters. */
 2077         TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
 2078             TI_PCISTATE_READ_MAXDMA));
 2079 
 2080         /* Get cache line size. */
 2081         cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
 2082 
 2083         /*
 2084          * If the system has set enabled the PCI memory write
 2085          * and invalidate command in the command register, set
 2086          * the write max parameter accordingly. This is necessary
 2087          * to use MWI with the Tigon 2.
 2088          */
 2089         if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) {
 2090                 switch (cacheline) {
 2091                 case 1:
 2092                 case 4:
 2093                 case 8:
 2094                 case 16:
 2095                 case 32:
 2096                 case 64:
 2097                         break;
 2098                 default:
 2099                 /* Disable PCI memory write and invalidate. */
 2100                         if (bootverbose)
 2101                                 device_printf(sc->ti_dev, "cache line size %d"
 2102                                     " not supported; disabling PCI MWI\n",
 2103                                     cacheline);
 2104                         CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
 2105                             TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN);
 2106                         break;
 2107                 }
 2108         }
 2109 
 2110         TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
 2111 
 2112         /* This sets the min dma param all the way up (0xff). */
 2113         TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
 2114 
 2115         if (sc->ti_hdrsplit)
 2116                 hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT;
 2117         else
 2118                 hdrsplit = 0;
 2119 
 2120         /* Configure DMA variables. */
 2121 #if BYTE_ORDER == BIG_ENDIAN
 2122         CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD |
 2123             TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD |
 2124             TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
 2125             TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit);
 2126 #else /* BYTE_ORDER */
 2127         CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA|
 2128             TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO|
 2129             TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit);
 2130 #endif /* BYTE_ORDER */
 2131 
 2132         /*
 2133          * Only allow 1 DMA channel to be active at a time.
 2134          * I don't think this is a good idea, but without it
 2135          * the firmware racks up lots of nicDmaReadRingFull
 2136          * errors.  This is not compatible with hardware checksums.
 2137          */
 2138         if ((sc->ti_ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0)
 2139                 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
 2140 
 2141         /* Recommended settings from Tigon manual. */
 2142         CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
 2143         CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
 2144 
 2145         if (ti_64bitslot_war(sc)) {
 2146                 device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, "
 2147                     "but we aren't");
 2148                 return (EINVAL);
 2149         }
 2150 
 2151         return (0);
 2152 }
 2153 
 2154 /*
 2155  * Initialize the general information block and firmware, and
 2156  * start the CPU(s) running.
 2157  */
 2158 static int
 2159 ti_gibinit(struct ti_softc *sc)
 2160 {
 2161         struct ifnet *ifp;
 2162         struct ti_rcb *rcb;
 2163         int i;
 2164 
 2165         TI_LOCK_ASSERT(sc);
 2166 
 2167         ifp = sc->ti_ifp;
 2168 
 2169         /* Disable interrupts for now. */
 2170         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
 2171 
 2172         /* Tell the chip where to find the general information block. */
 2173         CSR_WRITE_4(sc, TI_GCR_GENINFO_HI,
 2174             (uint64_t)sc->ti_rdata.ti_info_paddr >> 32);
 2175         CSR_WRITE_4(sc, TI_GCR_GENINFO_LO,
 2176             sc->ti_rdata.ti_info_paddr & 0xFFFFFFFF);
 2177 
 2178         /* Load the firmware into SRAM. */
 2179         ti_loadfw(sc);
 2180 
 2181         /* Set up the contents of the general info and ring control blocks. */
 2182 
 2183         /* Set up the event ring and producer pointer. */
 2184         bzero(sc->ti_rdata.ti_event_ring, TI_EVENT_RING_SZ);
 2185         rcb = &sc->ti_rdata.ti_info->ti_ev_rcb;
 2186         ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_event_ring_paddr);
 2187         rcb->ti_flags = 0;
 2188         ti_hostaddr64(&sc->ti_rdata.ti_info->ti_ev_prodidx_ptr,
 2189             sc->ti_rdata.ti_status_paddr +
 2190             offsetof(struct ti_status, ti_ev_prodidx_r));
 2191         sc->ti_ev_prodidx.ti_idx = 0;
 2192         CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
 2193         sc->ti_ev_saved_considx = 0;
 2194 
 2195         /* Set up the command ring and producer mailbox. */
 2196         rcb = &sc->ti_rdata.ti_info->ti_cmd_rcb;
 2197         ti_hostaddr64(&rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING));
 2198         rcb->ti_flags = 0;
 2199         rcb->ti_max_len = 0;
 2200         for (i = 0; i < TI_CMD_RING_CNT; i++) {
 2201                 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
 2202         }
 2203         CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
 2204         CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
 2205         sc->ti_cmd_saved_prodidx = 0;
 2206 
 2207         /*
 2208          * Assign the address of the stats refresh buffer.
 2209          * We re-use the current stats buffer for this to
 2210          * conserve memory.
 2211          */
 2212         bzero(&sc->ti_rdata.ti_info->ti_stats, sizeof(struct ti_stats));
 2213         ti_hostaddr64(&sc->ti_rdata.ti_info->ti_refresh_stats_ptr,
 2214             sc->ti_rdata.ti_info_paddr + offsetof(struct ti_gib, ti_stats));
 2215 
 2216         /* Set up the standard receive ring. */
 2217         rcb = &sc->ti_rdata.ti_info->ti_std_rx_rcb;
 2218         ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_std_ring_paddr);
 2219         rcb->ti_max_len = TI_FRAMELEN;
 2220         rcb->ti_flags = 0;
 2221         if (ifp->if_capenable & IFCAP_RXCSUM)
 2222                 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
 2223                      TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
 2224         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
 2225                 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
 2226 
 2227         /* Set up the jumbo receive ring. */
 2228         rcb = &sc->ti_rdata.ti_info->ti_jumbo_rx_rcb;
 2229         ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_jumbo_ring_paddr);
 2230 
 2231 #ifndef TI_SF_BUF_JUMBO
 2232         rcb->ti_max_len = MJUM9BYTES - ETHER_ALIGN;
 2233         rcb->ti_flags = 0;
 2234 #else
 2235         rcb->ti_max_len = PAGE_SIZE;
 2236         rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD;
 2237 #endif
 2238         if (ifp->if_capenable & IFCAP_RXCSUM)
 2239                 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
 2240                      TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
 2241         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
 2242                 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
 2243 
 2244         /*
 2245          * Set up the mini ring. Only activated on the
 2246          * Tigon 2 but the slot in the config block is
 2247          * still there on the Tigon 1.
 2248          */
 2249         rcb = &sc->ti_rdata.ti_info->ti_mini_rx_rcb;
 2250         ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_mini_ring_paddr);
 2251         rcb->ti_max_len = MHLEN - ETHER_ALIGN;
 2252         if (sc->ti_hwrev == TI_HWREV_TIGON)
 2253                 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
 2254         else
 2255                 rcb->ti_flags = 0;
 2256         if (ifp->if_capenable & IFCAP_RXCSUM)
 2257                 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
 2258                      TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
 2259         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
 2260                 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
 2261 
 2262         /*
 2263          * Set up the receive return ring.
 2264          */
 2265         rcb = &sc->ti_rdata.ti_info->ti_return_rcb;
 2266         ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_return_ring_paddr);
 2267         rcb->ti_flags = 0;
 2268         rcb->ti_max_len = TI_RETURN_RING_CNT;
 2269         ti_hostaddr64(&sc->ti_rdata.ti_info->ti_return_prodidx_ptr,
 2270             sc->ti_rdata.ti_status_paddr +
 2271             offsetof(struct ti_status, ti_return_prodidx_r));
 2272 
 2273         /*
 2274          * Set up the tx ring. Note: for the Tigon 2, we have the option
 2275          * of putting the transmit ring in the host's address space and
 2276          * letting the chip DMA it instead of leaving the ring in the NIC's
 2277          * memory and accessing it through the shared memory region. We
 2278          * do this for the Tigon 2, but it doesn't work on the Tigon 1,
 2279          * so we have to revert to the shared memory scheme if we detect
 2280          * a Tigon 1 chip.
 2281          */
 2282         CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
 2283         if (sc->ti_rdata.ti_tx_ring != NULL)
 2284                 bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
 2285         rcb = &sc->ti_rdata.ti_info->ti_tx_rcb;
 2286         if (sc->ti_hwrev == TI_HWREV_TIGON)
 2287                 rcb->ti_flags = 0;
 2288         else
 2289                 rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
 2290         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
 2291                 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
 2292         if (ifp->if_capenable & IFCAP_TXCSUM)
 2293                 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
 2294                      TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
 2295         rcb->ti_max_len = TI_TX_RING_CNT;
 2296         if (sc->ti_hwrev == TI_HWREV_TIGON)
 2297                 ti_hostaddr64(&rcb->ti_hostaddr, TI_TX_RING_BASE);
 2298         else
 2299                 ti_hostaddr64(&rcb->ti_hostaddr,
 2300                     sc->ti_rdata.ti_tx_ring_paddr);
 2301         ti_hostaddr64(&sc->ti_rdata.ti_info->ti_tx_considx_ptr,
 2302             sc->ti_rdata.ti_status_paddr +
 2303             offsetof(struct ti_status, ti_tx_considx_r));
 2304 
 2305         bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map,
 2306             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2307         bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map,
 2308             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2309         bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
 2310             sc->ti_cdata.ti_event_ring_map,
 2311             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2312         if (sc->ti_rdata.ti_tx_ring != NULL)
 2313                 bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
 2314                     sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
 2315 
 2316         /* Set up tunables */
 2317 #if 0
 2318         if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
 2319                 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
 2320                     (sc->ti_rx_coal_ticks / 10));
 2321         else
 2322 #endif
 2323                 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks);
 2324         CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
 2325         CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
 2326         CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
 2327         CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
 2328         CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
 2329 
 2330         /* Turn interrupts on. */
 2331         CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
 2332         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
 2333 
 2334         /* Start CPU. */
 2335         TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
 2336 
 2337         return (0);
 2338 }
 2339 
 2340 /*
 2341  * Probe for a Tigon chip. Check the PCI vendor and device IDs
 2342  * against our list and return its name if we find a match.
 2343  */
 2344 static int
 2345 ti_probe(device_t dev)
 2346 {
 2347         const struct ti_type *t;
 2348 
 2349         t = ti_devs;
 2350 
 2351         while (t->ti_name != NULL) {
 2352                 if ((pci_get_vendor(dev) == t->ti_vid) &&
 2353                     (pci_get_device(dev) == t->ti_did)) {
 2354                         device_set_desc(dev, t->ti_name);
 2355                         return (BUS_PROBE_DEFAULT);
 2356                 }
 2357                 t++;
 2358         }
 2359 
 2360         return (ENXIO);
 2361 }
 2362 
 2363 static int
 2364 ti_attach(device_t dev)
 2365 {
 2366         struct ifnet *ifp;
 2367         struct ti_softc *sc;
 2368         int error = 0, rid;
 2369         u_char eaddr[6];
 2370 
 2371         sc = device_get_softc(dev);
 2372         sc->ti_dev = dev;
 2373 
 2374         mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 2375             MTX_DEF);
 2376         callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0);
 2377         ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
 2378         ifp = sc->ti_ifp = if_alloc(IFT_ETHER);
 2379         if (ifp == NULL) {
 2380                 device_printf(dev, "can not if_alloc()\n");
 2381                 error = ENOSPC;
 2382                 goto fail;
 2383         }
 2384         sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES;
 2385         sc->ti_ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
 2386         sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities;
 2387 
 2388         /*
 2389          * Map control/status registers.
 2390          */
 2391         pci_enable_busmaster(dev);
 2392 
 2393         rid = PCIR_BAR(0);
 2394         sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 2395             RF_ACTIVE);
 2396 
 2397         if (sc->ti_res == NULL) {
 2398                 device_printf(dev, "couldn't map memory\n");
 2399                 error = ENXIO;
 2400                 goto fail;
 2401         }
 2402 
 2403         sc->ti_btag = rman_get_bustag(sc->ti_res);
 2404         sc->ti_bhandle = rman_get_bushandle(sc->ti_res);
 2405 
 2406         /* Allocate interrupt */
 2407         rid = 0;
 2408 
 2409         sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 2410             RF_SHAREABLE | RF_ACTIVE);
 2411 
 2412         if (sc->ti_irq == NULL) {
 2413                 device_printf(dev, "couldn't map interrupt\n");
 2414                 error = ENXIO;
 2415                 goto fail;
 2416         }
 2417 
 2418         if (ti_chipinit(sc)) {
 2419                 device_printf(dev, "chip initialization failed\n");
 2420                 error = ENXIO;
 2421                 goto fail;
 2422         }
 2423 
 2424         /* Zero out the NIC's on-board SRAM. */
 2425         ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
 2426 
 2427         /* Init again -- zeroing memory may have clobbered some registers. */
 2428         if (ti_chipinit(sc)) {
 2429                 device_printf(dev, "chip initialization failed\n");
 2430                 error = ENXIO;
 2431                 goto fail;
 2432         }
 2433 
 2434         /*
 2435          * Get station address from the EEPROM. Note: the manual states
 2436          * that the MAC address is at offset 0x8c, however the data is
 2437          * stored as two longwords (since that's how it's loaded into
 2438          * the NIC). This means the MAC address is actually preceded
 2439          * by two zero bytes. We need to skip over those.
 2440          */
 2441         if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
 2442                 device_printf(dev, "failed to read station address\n");
 2443                 error = ENXIO;
 2444                 goto fail;
 2445         }
 2446 
 2447         /* Allocate working area for memory dump. */
 2448         sc->ti_membuf = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT);
 2449         sc->ti_membuf2 = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF,
 2450             M_NOWAIT);
 2451         if (sc->ti_membuf == NULL || sc->ti_membuf2 == NULL) {
 2452                 device_printf(dev, "cannot allocate memory buffer\n");
 2453                 error = ENOMEM;
 2454                 goto fail;
 2455         }
 2456         if ((error = ti_dma_alloc(sc)) != 0)
 2457                 goto fail;
 2458 
 2459         /*
 2460          * We really need a better way to tell a 1000baseTX card
 2461          * from a 1000baseSX one, since in theory there could be
 2462          * OEMed 1000baseTX cards from lame vendors who aren't
 2463          * clever enough to change the PCI ID. For the moment
 2464          * though, the AceNIC is the only copper card available.
 2465          */
 2466         if (pci_get_vendor(dev) == ALT_VENDORID &&
 2467             pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER)
 2468                 sc->ti_copper = 1;
 2469         /* Ok, it's not the only copper card available. */
 2470         if (pci_get_vendor(dev) == NG_VENDORID &&
 2471             pci_get_device(dev) == NG_DEVICEID_GA620T)
 2472                 sc->ti_copper = 1;
 2473 
 2474         /* Set default tunable values. */
 2475         ti_sysctl_node(sc);
 2476 
 2477         /* Set up ifnet structure */
 2478         ifp->if_softc = sc;
 2479         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 2480         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 2481         ifp->if_ioctl = ti_ioctl;
 2482         ifp->if_start = ti_start;
 2483         ifp->if_init = ti_init;
 2484         ifp->if_get_counter = ti_get_counter;
 2485         ifp->if_baudrate = IF_Gbps(1UL);
 2486         ifp->if_snd.ifq_drv_maxlen = TI_TX_RING_CNT - 1;
 2487         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
 2488         IFQ_SET_READY(&ifp->if_snd);
 2489 
 2490         /* Set up ifmedia support. */
 2491         if (sc->ti_copper) {
 2492                 /*
 2493                  * Copper cards allow manual 10/100 mode selection,
 2494                  * but not manual 1000baseTX mode selection. Why?
 2495                  * Because currently there's no way to specify the
 2496                  * master/slave setting through the firmware interface,
 2497                  * so Alteon decided to just bag it and handle it
 2498                  * via autonegotiation.
 2499                  */
 2500                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 2501                 ifmedia_add(&sc->ifmedia,
 2502                     IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 2503                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
 2504                 ifmedia_add(&sc->ifmedia,
 2505                     IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
 2506                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
 2507                 ifmedia_add(&sc->ifmedia,
 2508                     IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
 2509         } else {
 2510                 /* Fiber cards don't support 10/100 modes. */
 2511                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
 2512                 ifmedia_add(&sc->ifmedia,
 2513                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
 2514         }
 2515         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
 2516         ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
 2517 
 2518         /*
 2519          * We're assuming here that card initialization is a sequential
 2520          * thing.  If it isn't, multiple cards probing at the same time
 2521          * could stomp on the list of softcs here.
 2522          */
 2523 
 2524         /* Register the device */
 2525         sc->dev = make_dev(&ti_cdevsw, device_get_unit(dev), UID_ROOT,
 2526             GID_OPERATOR, 0600, "ti%d", device_get_unit(dev));
 2527         sc->dev->si_drv1 = sc;
 2528 
 2529         /*
 2530          * Call MI attach routine.
 2531          */
 2532         ether_ifattach(ifp, eaddr);
 2533 
 2534         /* VLAN capability setup. */
 2535         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM |
 2536             IFCAP_VLAN_HWTAGGING;
 2537         ifp->if_capenable = ifp->if_capabilities;
 2538         /* Tell the upper layer we support VLAN over-sized frames. */
 2539         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 2540 
 2541         /* Driver supports link state tracking. */
 2542         ifp->if_capabilities |= IFCAP_LINKSTATE;
 2543         ifp->if_capenable |= IFCAP_LINKSTATE;
 2544 
 2545         /* Hook interrupt last to avoid having to lock softc */
 2546         error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE,
 2547            NULL, ti_intr, sc, &sc->ti_intrhand);
 2548 
 2549         if (error) {
 2550                 device_printf(dev, "couldn't set up irq\n");
 2551                 goto fail;
 2552         }
 2553 
 2554 fail:
 2555         if (error)
 2556                 ti_detach(dev);
 2557 
 2558         return (error);
 2559 }
 2560 
 2561 /*
 2562  * Shutdown hardware and free up resources. This can be called any
 2563  * time after the mutex has been initialized. It is called in both
 2564  * the error case in attach and the normal detach case so it needs
 2565  * to be careful about only freeing resources that have actually been
 2566  * allocated.
 2567  */
 2568 static int
 2569 ti_detach(device_t dev)
 2570 {
 2571         struct ti_softc *sc;
 2572         struct ifnet *ifp;
 2573 
 2574         sc = device_get_softc(dev);
 2575         if (sc->dev)
 2576                 destroy_dev(sc->dev);
 2577         KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized"));
 2578         ifp = sc->ti_ifp;
 2579         if (device_is_attached(dev)) {
 2580                 ether_ifdetach(ifp);
 2581                 TI_LOCK(sc);
 2582                 ti_stop(sc);
 2583                 TI_UNLOCK(sc);
 2584         }
 2585 
 2586         /* These should only be active if attach succeeded */
 2587         callout_drain(&sc->ti_watchdog);
 2588         bus_generic_detach(dev);
 2589         ti_dma_free(sc);
 2590         ifmedia_removeall(&sc->ifmedia);
 2591 
 2592         if (sc->ti_intrhand)
 2593                 bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand);
 2594         if (sc->ti_irq)
 2595                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq);
 2596         if (sc->ti_res) {
 2597                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
 2598                     sc->ti_res);
 2599         }
 2600         if (ifp)
 2601                 if_free(ifp);
 2602         if (sc->ti_membuf)
 2603                 free(sc->ti_membuf, M_DEVBUF);
 2604         if (sc->ti_membuf2)
 2605                 free(sc->ti_membuf2, M_DEVBUF);
 2606 
 2607         mtx_destroy(&sc->ti_mtx);
 2608 
 2609         return (0);
 2610 }
 2611 
 2612 #ifdef TI_JUMBO_HDRSPLIT
 2613 /*
 2614  * If hdr_len is 0, that means that header splitting wasn't done on
 2615  * this packet for some reason.  The two most likely reasons are that
 2616  * the protocol isn't a supported protocol for splitting, or this
 2617  * packet had a fragment offset that wasn't 0.
 2618  *
 2619  * The header length, if it is non-zero, will always be the length of
 2620  * the headers on the packet, but that length could be longer than the
 2621  * first mbuf.  So we take the minimum of the two as the actual
 2622  * length.
 2623  */
 2624 static __inline void
 2625 ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
 2626 {
 2627         int i = 0;
 2628         int lengths[4] = {0, 0, 0, 0};
 2629         struct mbuf *m, *mp;
 2630 
 2631         if (hdr_len != 0)
 2632                 top->m_len = min(hdr_len, top->m_len);
 2633         pkt_len -= top->m_len;
 2634         lengths[i++] = top->m_len;
 2635 
 2636         mp = top;
 2637         for (m = top->m_next; m && pkt_len; m = m->m_next) {
 2638                 m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len);
 2639                 pkt_len -= m->m_len;
 2640                 lengths[i++] = m->m_len;
 2641                 mp = m;
 2642         }
 2643 
 2644 #if 0
 2645         if (hdr_len != 0)
 2646                 printf("got split packet: ");
 2647         else
 2648                 printf("got non-split packet: ");
 2649 
 2650         printf("%d,%d,%d,%d = %d\n", lengths[0],
 2651             lengths[1], lengths[2], lengths[3],
 2652             lengths[0] + lengths[1] + lengths[2] +
 2653             lengths[3]);
 2654 #endif
 2655 
 2656         if (pkt_len)
 2657                 panic("header splitting didn't");
 2658 
 2659         if (m) {
 2660                 m_freem(m);
 2661                 mp->m_next = NULL;
 2662         }
 2663         if (mp->m_next != NULL)
 2664                 panic("ti_hdr_split: last mbuf in chain should be null");
 2665 }
 2666 #endif /* TI_JUMBO_HDRSPLIT */
 2667 
 2668 static void
 2669 ti_discard_std(struct ti_softc *sc, int i)
 2670 {
 2671 
 2672         struct ti_rx_desc *r;
 2673 
 2674         r = &sc->ti_rdata.ti_rx_std_ring[i];
 2675         r->ti_len = MCLBYTES - ETHER_ALIGN;
 2676         r->ti_type = TI_BDTYPE_RECV_BD;
 2677         r->ti_flags = 0;
 2678         r->ti_vlan_tag = 0;
 2679         r->ti_tcp_udp_cksum = 0;
 2680         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 2681                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 2682         r->ti_idx = i;
 2683 }
 2684 
 2685 static void
 2686 ti_discard_mini(struct ti_softc *sc, int i)
 2687 {
 2688 
 2689         struct ti_rx_desc *r;
 2690 
 2691         r = &sc->ti_rdata.ti_rx_mini_ring[i];
 2692         r->ti_len = MHLEN - ETHER_ALIGN;
 2693         r->ti_type = TI_BDTYPE_RECV_BD;
 2694         r->ti_flags = TI_BDFLAG_MINI_RING;
 2695         r->ti_vlan_tag = 0;
 2696         r->ti_tcp_udp_cksum = 0;
 2697         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 2698                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 2699         r->ti_idx = i;
 2700 }
 2701 
 2702 #ifndef TI_SF_BUF_JUMBO
 2703 static void
 2704 ti_discard_jumbo(struct ti_softc *sc, int i)
 2705 {
 2706 
 2707         struct ti_rx_desc *r;
 2708 
 2709         r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
 2710         r->ti_len = MJUM9BYTES - ETHER_ALIGN;
 2711         r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
 2712         r->ti_flags = TI_BDFLAG_JUMBO_RING;
 2713         r->ti_vlan_tag = 0;
 2714         r->ti_tcp_udp_cksum = 0;
 2715         if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
 2716                 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
 2717         r->ti_idx = i;
 2718 }
 2719 #endif
 2720 
 2721 /*
 2722  * Frame reception handling. This is called if there's a frame
 2723  * on the receive return list.
 2724  *
 2725  * Note: we have to be able to handle three possibilities here:
 2726  * 1) the frame is from the mini receive ring (can only happen)
 2727  *    on Tigon 2 boards)
 2728  * 2) the frame is from the jumbo receive ring
 2729  * 3) the frame is from the standard receive ring
 2730  */
 2731 
 2732 static void
 2733 ti_rxeof(struct ti_softc *sc)
 2734 {
 2735         struct ifnet *ifp;
 2736 #ifdef TI_SF_BUF_JUMBO
 2737         bus_dmamap_t map;
 2738 #endif
 2739         struct ti_cmd_desc cmd;
 2740         int jumbocnt, minicnt, stdcnt, ti_len;
 2741 
 2742         TI_LOCK_ASSERT(sc);
 2743 
 2744         ifp = sc->ti_ifp;
 2745 
 2746         bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
 2747             sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
 2748         if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
 2749                 bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
 2750                     sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
 2751         if (sc->ti_rdata.ti_rx_mini_ring != NULL)
 2752                 bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
 2753                     sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_POSTWRITE);
 2754         bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
 2755             sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
 2756 
 2757         jumbocnt = minicnt = stdcnt = 0;
 2758         while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
 2759                 struct ti_rx_desc *cur_rx;
 2760                 uint32_t rxidx;
 2761                 struct mbuf *m = NULL;
 2762                 uint16_t vlan_tag = 0;
 2763                 int have_tag = 0;
 2764 
 2765                 cur_rx =
 2766                     &sc->ti_rdata.ti_rx_return_ring[sc->ti_rx_saved_considx];
 2767                 rxidx = cur_rx->ti_idx;
 2768                 ti_len = cur_rx->ti_len;
 2769                 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
 2770 
 2771                 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
 2772                         have_tag = 1;
 2773                         vlan_tag = cur_rx->ti_vlan_tag;
 2774                 }
 2775 
 2776                 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
 2777                         jumbocnt++;
 2778                         TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
 2779                         m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
 2780 #ifndef TI_SF_BUF_JUMBO
 2781                         if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
 2782                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2783                                 ti_discard_jumbo(sc, rxidx);
 2784                                 continue;
 2785                         }
 2786                         if (ti_newbuf_jumbo(sc, rxidx, NULL) != 0) {
 2787                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2788                                 ti_discard_jumbo(sc, rxidx);
 2789                                 continue;
 2790                         }
 2791                         m->m_len = ti_len;
 2792 #else /* !TI_SF_BUF_JUMBO */
 2793                         sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
 2794                         map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx];
 2795                         bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
 2796                             BUS_DMASYNC_POSTREAD);
 2797                         bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
 2798                         if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
 2799                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2800                                 ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
 2801                                 continue;
 2802                         }
 2803                         if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) {
 2804                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2805                                 ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
 2806                                 continue;
 2807                         }
 2808 #ifdef TI_JUMBO_HDRSPLIT
 2809                         if (sc->ti_hdrsplit)
 2810                                 ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr),
 2811                                              ti_len, rxidx);
 2812                         else
 2813 #endif /* TI_JUMBO_HDRSPLIT */
 2814                         m_adj(m, ti_len - m->m_pkthdr.len);
 2815 #endif /* TI_SF_BUF_JUMBO */
 2816                 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
 2817                         minicnt++;
 2818                         TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
 2819                         m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
 2820                         if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
 2821                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2822                                 ti_discard_mini(sc, rxidx);
 2823                                 continue;
 2824                         }
 2825                         if (ti_newbuf_mini(sc, rxidx) != 0) {
 2826                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2827                                 ti_discard_mini(sc, rxidx);
 2828                                 continue;
 2829                         }
 2830                         m->m_len = ti_len;
 2831                 } else {
 2832                         stdcnt++;
 2833                         TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
 2834                         m = sc->ti_cdata.ti_rx_std_chain[rxidx];
 2835                         if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
 2836                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2837                                 ti_discard_std(sc, rxidx);
 2838                                 continue;
 2839                         }
 2840                         if (ti_newbuf_std(sc, rxidx) != 0) {
 2841                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2842                                 ti_discard_std(sc, rxidx);
 2843                                 continue;
 2844                         }
 2845                         m->m_len = ti_len;
 2846                 }
 2847 
 2848                 m->m_pkthdr.len = ti_len;
 2849                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 2850                 m->m_pkthdr.rcvif = ifp;
 2851 
 2852                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2853                         if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) {
 2854                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2855                                 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
 2856                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2857                         }
 2858                         if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) {
 2859                                 m->m_pkthdr.csum_data =
 2860                                     cur_rx->ti_tcp_udp_cksum;
 2861                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 2862                         }
 2863                 }
 2864 
 2865                 /*
 2866                  * If we received a packet with a vlan tag,
 2867                  * tag it before passing the packet upward.
 2868                  */
 2869                 if (have_tag) {
 2870                         m->m_pkthdr.ether_vtag = vlan_tag;
 2871                         m->m_flags |= M_VLANTAG;
 2872                 }
 2873                 TI_UNLOCK(sc);
 2874                 (*ifp->if_input)(ifp, m);
 2875                 TI_LOCK(sc);
 2876         }
 2877 
 2878         bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
 2879             sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_PREREAD);
 2880         /* Only necessary on the Tigon 1. */
 2881         if (sc->ti_hwrev == TI_HWREV_TIGON)
 2882                 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
 2883                     sc->ti_rx_saved_considx);
 2884 
 2885         if (stdcnt > 0) {
 2886                 bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
 2887                     sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
 2888                 TI_UPDATE_STDPROD(sc, sc->ti_std);
 2889         }
 2890         if (minicnt > 0) {
 2891                 bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
 2892                     sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
 2893                 TI_UPDATE_MINIPROD(sc, sc->ti_mini);
 2894         }
 2895         if (jumbocnt > 0) {
 2896                 bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
 2897                     sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
 2898                 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
 2899         }
 2900 }
 2901 
 2902 static void
 2903 ti_txeof(struct ti_softc *sc)
 2904 {
 2905         struct ti_txdesc *txd;
 2906         struct ti_tx_desc txdesc;
 2907         struct ti_tx_desc *cur_tx = NULL;
 2908         struct ifnet *ifp;
 2909         int idx;
 2910 
 2911         ifp = sc->ti_ifp;
 2912 
 2913         txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
 2914         if (txd == NULL)
 2915                 return;
 2916 
 2917         if (sc->ti_rdata.ti_tx_ring != NULL)
 2918                 bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
 2919                     sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_POSTWRITE);
 2920         /*
 2921          * Go through our tx ring and free mbufs for those
 2922          * frames that have been sent.
 2923          */
 2924         for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx;
 2925             TI_INC(idx, TI_TX_RING_CNT)) {
 2926                 if (sc->ti_hwrev == TI_HWREV_TIGON) {
 2927                         ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
 2928                             sizeof(txdesc), &txdesc);
 2929                         cur_tx = &txdesc;
 2930                 } else
 2931                         cur_tx = &sc->ti_rdata.ti_tx_ring[idx];
 2932                 sc->ti_txcnt--;
 2933                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2934                 if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0)
 2935                         continue;
 2936                 bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
 2937                     BUS_DMASYNC_POSTWRITE);
 2938                 bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
 2939 
 2940                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 2941                 m_freem(txd->tx_m);
 2942                 txd->tx_m = NULL;
 2943                 STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q);
 2944                 STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
 2945                 txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
 2946         }
 2947         sc->ti_tx_saved_considx = idx;
 2948         if (sc->ti_txcnt == 0)
 2949                 sc->ti_timer = 0;
 2950 }
 2951 
 2952 static void
 2953 ti_intr(void *xsc)
 2954 {
 2955         struct ti_softc *sc;
 2956         struct ifnet *ifp;
 2957 
 2958         sc = xsc;
 2959         TI_LOCK(sc);
 2960         ifp = sc->ti_ifp;
 2961 
 2962         /* Make sure this is really our interrupt. */
 2963         if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) {
 2964                 TI_UNLOCK(sc);
 2965                 return;
 2966         }
 2967 
 2968         /* Ack interrupt and stop others from occurring. */
 2969         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
 2970 
 2971         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2972                 bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
 2973                     sc->ti_cdata.ti_status_map, BUS_DMASYNC_POSTREAD);
 2974                 /* Check RX return ring producer/consumer */
 2975                 ti_rxeof(sc);
 2976 
 2977                 /* Check TX ring producer/consumer */
 2978                 ti_txeof(sc);
 2979                 bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
 2980                     sc->ti_cdata.ti_status_map, BUS_DMASYNC_PREREAD);
 2981         }
 2982 
 2983         ti_handle_events(sc);
 2984 
 2985         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2986                 /* Re-enable interrupts. */
 2987                 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
 2988                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2989                         ti_start_locked(ifp);
 2990         }
 2991 
 2992         TI_UNLOCK(sc);
 2993 }
 2994 
 2995 static uint64_t
 2996 ti_get_counter(struct ifnet *ifp, ift_counter cnt)
 2997 {
 2998 
 2999         switch (cnt) {
 3000         case IFCOUNTER_COLLISIONS:
 3001             {
 3002                 struct ti_softc *sc;
 3003                 struct ti_stats *s;
 3004                 uint64_t rv;
 3005 
 3006                 sc = if_getsoftc(ifp);
 3007                 s = &sc->ti_rdata.ti_info->ti_stats;
 3008 
 3009                 TI_LOCK(sc);
 3010                 bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
 3011                     sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD);
 3012                 rv = s->dot3StatsSingleCollisionFrames +
 3013                     s->dot3StatsMultipleCollisionFrames +
 3014                     s->dot3StatsExcessiveCollisions +
 3015                     s->dot3StatsLateCollisions;
 3016                 bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
 3017                     sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD);
 3018                 TI_UNLOCK(sc);
 3019                 return (rv);
 3020             }
 3021         default:
 3022                 return (if_get_counter_default(ifp, cnt));
 3023         }
 3024 }
 3025 
 3026 /*
 3027  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
 3028  * pointers to descriptors.
 3029  */
 3030 static int
 3031 ti_encap(struct ti_softc *sc, struct mbuf **m_head)
 3032 {
 3033         struct ti_txdesc *txd;
 3034         struct ti_tx_desc *f;
 3035         struct ti_tx_desc txdesc;
 3036         struct mbuf *m;
 3037         bus_dma_segment_t txsegs[TI_MAXTXSEGS];
 3038         uint16_t csum_flags;
 3039         int error, frag, i, nseg;
 3040 
 3041         if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL)
 3042                 return (ENOBUFS);
 3043 
 3044         error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
 3045             *m_head, txsegs, &nseg, 0);
 3046         if (error == EFBIG) {
 3047                 m = m_defrag(*m_head, M_NOWAIT);
 3048                 if (m == NULL) {
 3049                         m_freem(*m_head);
 3050                         *m_head = NULL;
 3051                         return (ENOMEM);
 3052                 }
 3053                 *m_head = m;
 3054                 error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag,
 3055                     txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
 3056                 if (error) {
 3057                         m_freem(*m_head);
 3058                         *m_head = NULL;
 3059                         return (error);
 3060                 }
 3061         } else if (error != 0)
 3062                 return (error);
 3063         if (nseg == 0) {
 3064                 m_freem(*m_head);
 3065                 *m_head = NULL;
 3066                 return (EIO);
 3067         }
 3068 
 3069         if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) {
 3070                 bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
 3071                 return (ENOBUFS);
 3072         }
 3073         bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
 3074             BUS_DMASYNC_PREWRITE);
 3075 
 3076         m = *m_head;
 3077         csum_flags = 0;
 3078         if (m->m_pkthdr.csum_flags & CSUM_IP)
 3079                 csum_flags |= TI_BDFLAG_IP_CKSUM;
 3080         if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
 3081                 csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
 3082 
 3083         frag = sc->ti_tx_saved_prodidx;
 3084         for (i = 0; i < nseg; i++) {
 3085                 if (sc->ti_hwrev == TI_HWREV_TIGON) {
 3086                         bzero(&txdesc, sizeof(txdesc));
 3087                         f = &txdesc;
 3088                 } else
 3089                         f = &sc->ti_rdata.ti_tx_ring[frag];
 3090                 ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr);
 3091                 f->ti_len = txsegs[i].ds_len;
 3092                 f->ti_flags = csum_flags;
 3093                 if (m->m_flags & M_VLANTAG) {
 3094                         f->ti_flags |= TI_BDFLAG_VLAN_TAG;
 3095                         f->ti_vlan_tag = m->m_pkthdr.ether_vtag;
 3096                 } else {
 3097                         f->ti_vlan_tag = 0;
 3098                 }
 3099 
 3100                 if (sc->ti_hwrev == TI_HWREV_TIGON)
 3101                         ti_mem_write(sc, TI_TX_RING_BASE + frag *
 3102                             sizeof(txdesc), sizeof(txdesc), &txdesc);
 3103                 TI_INC(frag, TI_TX_RING_CNT);
 3104         }
 3105 
 3106         sc->ti_tx_saved_prodidx = frag;
 3107         /* set TI_BDFLAG_END on the last descriptor */
 3108         frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT;
 3109         if (sc->ti_hwrev == TI_HWREV_TIGON) {
 3110                 txdesc.ti_flags |= TI_BDFLAG_END;
 3111                 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
 3112                     sizeof(txdesc), &txdesc);
 3113         } else
 3114                 sc->ti_rdata.ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
 3115 
 3116         STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q);
 3117         STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q);
 3118         txd->tx_m = m;
 3119         sc->ti_txcnt += nseg;
 3120 
 3121         return (0);
 3122 }
 3123 
 3124 static void
 3125 ti_start(struct ifnet *ifp)
 3126 {
 3127         struct ti_softc *sc;
 3128 
 3129         sc = ifp->if_softc;
 3130         TI_LOCK(sc);
 3131         ti_start_locked(ifp);
 3132         TI_UNLOCK(sc);
 3133 }
 3134 
 3135 /*
 3136  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 3137  * to the mbuf data regions directly in the transmit descriptors.
 3138  */
 3139 static void
 3140 ti_start_locked(struct ifnet *ifp)
 3141 {
 3142         struct ti_softc *sc;
 3143         struct mbuf *m_head = NULL;
 3144         int enq = 0;
 3145 
 3146         sc = ifp->if_softc;
 3147 
 3148         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 3149             sc->ti_txcnt < (TI_TX_RING_CNT - 16);) {
 3150                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 3151                 if (m_head == NULL)
 3152                         break;
 3153 
 3154                 /*
 3155                  * Pack the data into the transmit ring. If we
 3156                  * don't have room, set the OACTIVE flag and wait
 3157                  * for the NIC to drain the ring.
 3158                  */
 3159                 if (ti_encap(sc, &m_head)) {
 3160                         if (m_head == NULL)
 3161                                 break;
 3162                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 3163                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 3164                         break;
 3165                 }
 3166 
 3167                 enq++;
 3168                 /*
 3169                  * If there's a BPF listener, bounce a copy of this frame
 3170                  * to him.
 3171                  */
 3172                 ETHER_BPF_MTAP(ifp, m_head);
 3173         }
 3174 
 3175         if (enq > 0) {
 3176                 if (sc->ti_rdata.ti_tx_ring != NULL)
 3177                         bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
 3178                             sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
 3179                 /* Transmit */
 3180                 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx);
 3181 
 3182                 /*
 3183                  * Set a timeout in case the chip goes out to lunch.
 3184                  */
 3185                 sc->ti_timer = 5;
 3186         }
 3187 }
 3188 
 3189 static void
 3190 ti_init(void *xsc)
 3191 {
 3192         struct ti_softc *sc;
 3193 
 3194         sc = xsc;
 3195         TI_LOCK(sc);
 3196         ti_init_locked(sc);
 3197         TI_UNLOCK(sc);
 3198 }
 3199 
 3200 static void
 3201 ti_init_locked(void *xsc)
 3202 {
 3203         struct ti_softc *sc = xsc;
 3204 
 3205         if (sc->ti_ifp->if_drv_flags & IFF_DRV_RUNNING)
 3206                 return;
 3207 
 3208         /* Cancel pending I/O and flush buffers. */
 3209         ti_stop(sc);
 3210 
 3211         /* Init the gen info block, ring control blocks and firmware. */
 3212         if (ti_gibinit(sc)) {
 3213                 device_printf(sc->ti_dev, "initialization failure\n");
 3214                 return;
 3215         }
 3216 }
 3217 
 3218 static void ti_init2(struct ti_softc *sc)
 3219 {
 3220         struct ti_cmd_desc cmd;
 3221         struct ifnet *ifp;
 3222         uint8_t *ea;
 3223         struct ifmedia *ifm;
 3224         int tmp;
 3225 
 3226         TI_LOCK_ASSERT(sc);
 3227 
 3228         ifp = sc->ti_ifp;
 3229 
 3230         /* Specify MTU and interface index. */
 3231         CSR_WRITE_4(sc, TI_GCR_IFINDEX, device_get_unit(sc->ti_dev));
 3232         CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu +
 3233             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
 3234         TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
 3235 
 3236         /* Load our MAC address. */
 3237         ea = IF_LLADDR(sc->ti_ifp);
 3238         CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]);
 3239         CSR_WRITE_4(sc, TI_GCR_PAR1,
 3240             (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]);
 3241         TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
 3242 
 3243         /* Enable or disable promiscuous mode as needed. */
 3244         if (ifp->if_flags & IFF_PROMISC) {
 3245                 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0);
 3246         } else {
 3247                 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
 3248         }
 3249 
 3250         /* Program multicast filter. */
 3251         ti_setmulti(sc);
 3252 
 3253         /*
 3254          * If this is a Tigon 1, we should tell the
 3255          * firmware to use software packet filtering.
 3256          */
 3257         if (sc->ti_hwrev == TI_HWREV_TIGON) {
 3258                 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
 3259         }
 3260 
 3261         /* Init RX ring. */
 3262         if (ti_init_rx_ring_std(sc) != 0) {
 3263                 /* XXX */
 3264                 device_printf(sc->ti_dev, "no memory for std Rx buffers.\n");
 3265                 return;
 3266         }
 3267 
 3268         /* Init jumbo RX ring. */
 3269         if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) {
 3270                 if (ti_init_rx_ring_jumbo(sc) != 0) {
 3271                         /* XXX */
 3272                         device_printf(sc->ti_dev,
 3273                             "no memory for jumbo Rx buffers.\n");
 3274                         return;
 3275                 }
 3276         }
 3277 
 3278         /*
 3279          * If this is a Tigon 2, we can also configure the
 3280          * mini ring.
 3281          */
 3282         if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
 3283                 if (ti_init_rx_ring_mini(sc) != 0) {
 3284                         /* XXX */
 3285                         device_printf(sc->ti_dev,
 3286                             "no memory for mini Rx buffers.\n");
 3287                         return;
 3288                 }
 3289         }
 3290 
 3291         CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
 3292         sc->ti_rx_saved_considx = 0;
 3293 
 3294         /* Init TX ring. */
 3295         ti_init_tx_ring(sc);
 3296 
 3297         /* Tell firmware we're alive. */
 3298         TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
 3299 
 3300         /* Enable host interrupts. */
 3301         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
 3302 
 3303         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3304         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3305         callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
 3306 
 3307         /*
 3308          * Make sure to set media properly. We have to do this
 3309          * here since we have to issue commands in order to set
 3310          * the link negotiation and we can't issue commands until
 3311          * the firmware is running.
 3312          */
 3313         ifm = &sc->ifmedia;
 3314         tmp = ifm->ifm_media;
 3315         ifm->ifm_media = ifm->ifm_cur->ifm_media;
 3316         ti_ifmedia_upd_locked(sc);
 3317         ifm->ifm_media = tmp;
 3318 }
 3319 
 3320 /*
 3321  * Set media options.
 3322  */
 3323 static int
 3324 ti_ifmedia_upd(struct ifnet *ifp)
 3325 {
 3326         struct ti_softc *sc;
 3327         int error;
 3328 
 3329         sc = ifp->if_softc;
 3330         TI_LOCK(sc);
 3331         error = ti_ifmedia_upd_locked(sc);
 3332         TI_UNLOCK(sc);
 3333 
 3334         return (error);
 3335 }
 3336 
 3337 static int
 3338 ti_ifmedia_upd_locked(struct ti_softc *sc)
 3339 {
 3340         struct ifmedia *ifm;
 3341         struct ti_cmd_desc cmd;
 3342         uint32_t flowctl;
 3343 
 3344         ifm = &sc->ifmedia;
 3345 
 3346         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 3347                 return (EINVAL);
 3348 
 3349         flowctl = 0;
 3350 
 3351         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 3352         case IFM_AUTO:
 3353                 /*
 3354                  * Transmit flow control doesn't work on the Tigon 1.
 3355                  */
 3356                 flowctl = TI_GLNK_RX_FLOWCTL_Y;
 3357 
 3358                 /*
 3359                  * Transmit flow control can also cause problems on the
 3360                  * Tigon 2, apparently with both the copper and fiber
 3361                  * boards.  The symptom is that the interface will just
 3362                  * hang.  This was reproduced with Alteon 180 switches.
 3363                  */
 3364 #if 0
 3365                 if (sc->ti_hwrev != TI_HWREV_TIGON)
 3366                         flowctl |= TI_GLNK_TX_FLOWCTL_Y;
 3367 #endif
 3368 
 3369                 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
 3370                     TI_GLNK_FULL_DUPLEX| flowctl |
 3371                     TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
 3372 
 3373                 flowctl = TI_LNK_RX_FLOWCTL_Y;
 3374 #if 0
 3375                 if (sc->ti_hwrev != TI_HWREV_TIGON)
 3376                         flowctl |= TI_LNK_TX_FLOWCTL_Y;
 3377 #endif
 3378 
 3379                 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
 3380                     TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl |
 3381                     TI_LNK_AUTONEGENB|TI_LNK_ENB);
 3382                 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
 3383                     TI_CMD_CODE_NEGOTIATE_BOTH, 0);
 3384                 break;
 3385         case IFM_1000_SX:
 3386         case IFM_1000_T:
 3387                 flowctl = TI_GLNK_RX_FLOWCTL_Y;
 3388 #if 0
 3389                 if (sc->ti_hwrev != TI_HWREV_TIGON)
 3390                         flowctl |= TI_GLNK_TX_FLOWCTL_Y;
 3391 #endif
 3392 
 3393                 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
 3394                     flowctl |TI_GLNK_ENB);
 3395                 CSR_WRITE_4(sc, TI_GCR_LINK, 0);
 3396                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3397                         TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
 3398                 }
 3399                 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
 3400                     TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
 3401                 break;
 3402         case IFM_100_FX:
 3403         case IFM_10_FL:
 3404         case IFM_100_TX:
 3405         case IFM_10_T:
 3406                 flowctl = TI_LNK_RX_FLOWCTL_Y;
 3407 #if 0
 3408                 if (sc->ti_hwrev != TI_HWREV_TIGON)
 3409                         flowctl |= TI_LNK_TX_FLOWCTL_Y;
 3410 #endif
 3411 
 3412                 CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
 3413                 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl);
 3414                 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
 3415                     IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
 3416                         TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
 3417                 } else {
 3418                         TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
 3419                 }
 3420                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
 3421                         TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
 3422                 } else {
 3423                         TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
 3424                 }
 3425                 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
 3426                     TI_CMD_CODE_NEGOTIATE_10_100, 0);
 3427                 break;
 3428         }
 3429 
 3430         return (0);
 3431 }
 3432 
 3433 /*
 3434  * Report current media status.
 3435  */
 3436 static void
 3437 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 3438 {
 3439         struct ti_softc *sc;
 3440         uint32_t media = 0;
 3441 
 3442         sc = ifp->if_softc;
 3443 
 3444         TI_LOCK(sc);
 3445 
 3446         ifmr->ifm_status = IFM_AVALID;
 3447         ifmr->ifm_active = IFM_ETHER;
 3448 
 3449         if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
 3450                 TI_UNLOCK(sc);
 3451                 return;
 3452         }
 3453 
 3454         ifmr->ifm_status |= IFM_ACTIVE;
 3455 
 3456         if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
 3457                 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
 3458                 if (sc->ti_copper)
 3459                         ifmr->ifm_active |= IFM_1000_T;
 3460                 else
 3461                         ifmr->ifm_active |= IFM_1000_SX;
 3462                 if (media & TI_GLNK_FULL_DUPLEX)
 3463                         ifmr->ifm_active |= IFM_FDX;
 3464                 else
 3465                         ifmr->ifm_active |= IFM_HDX;
 3466         } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
 3467                 media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
 3468                 if (sc->ti_copper) {
 3469                         if (media & TI_LNK_100MB)
 3470                                 ifmr->ifm_active |= IFM_100_TX;
 3471                         if (media & TI_LNK_10MB)
 3472                                 ifmr->ifm_active |= IFM_10_T;
 3473                 } else {
 3474                         if (media & TI_LNK_100MB)
 3475                                 ifmr->ifm_active |= IFM_100_FX;
 3476                         if (media & TI_LNK_10MB)
 3477                                 ifmr->ifm_active |= IFM_10_FL;
 3478                 }
 3479                 if (media & TI_LNK_FULL_DUPLEX)
 3480                         ifmr->ifm_active |= IFM_FDX;
 3481                 if (media & TI_LNK_HALF_DUPLEX)
 3482                         ifmr->ifm_active |= IFM_HDX;
 3483         }
 3484         TI_UNLOCK(sc);
 3485 }
 3486 
 3487 static int
 3488 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 3489 {
 3490         struct ti_softc *sc = ifp->if_softc;
 3491         struct ifreq *ifr = (struct ifreq *) data;
 3492         struct ti_cmd_desc cmd;
 3493         int mask, error = 0;
 3494 
 3495         switch (command) {
 3496         case SIOCSIFMTU:
 3497                 TI_LOCK(sc);
 3498                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TI_JUMBO_MTU)
 3499                         error = EINVAL;
 3500                 else {
 3501                         ifp->if_mtu = ifr->ifr_mtu;
 3502                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3503                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3504                                 ti_init_locked(sc);
 3505                         }
 3506                 }
 3507                 TI_UNLOCK(sc);
 3508                 break;
 3509         case SIOCSIFFLAGS:
 3510                 TI_LOCK(sc);
 3511                 if (ifp->if_flags & IFF_UP) {
 3512                         /*
 3513                          * If only the state of the PROMISC flag changed,
 3514                          * then just use the 'set promisc mode' command
 3515                          * instead of reinitializing the entire NIC. Doing
 3516                          * a full re-init means reloading the firmware and
 3517                          * waiting for it to start up, which may take a
 3518                          * second or two.
 3519                          */
 3520                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3521                             ifp->if_flags & IFF_PROMISC &&
 3522                             !(sc->ti_if_flags & IFF_PROMISC)) {
 3523                                 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
 3524                                     TI_CMD_CODE_PROMISC_ENB, 0);
 3525                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3526                             !(ifp->if_flags & IFF_PROMISC) &&
 3527                             sc->ti_if_flags & IFF_PROMISC) {
 3528                                 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
 3529                                     TI_CMD_CODE_PROMISC_DIS, 0);
 3530                         } else
 3531                                 ti_init_locked(sc);
 3532                 } else {
 3533                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3534                                 ti_stop(sc);
 3535                         }
 3536                 }
 3537                 sc->ti_if_flags = ifp->if_flags;
 3538                 TI_UNLOCK(sc);
 3539                 break;
 3540         case SIOCADDMULTI:
 3541         case SIOCDELMULTI:
 3542                 TI_LOCK(sc);
 3543                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3544                         ti_setmulti(sc);
 3545                 TI_UNLOCK(sc);
 3546                 break;
 3547         case SIOCSIFMEDIA:
 3548         case SIOCGIFMEDIA:
 3549                 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
 3550                 break;
 3551         case SIOCSIFCAP:
 3552                 TI_LOCK(sc);
 3553                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3554                 if ((mask & IFCAP_TXCSUM) != 0 &&
 3555                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 3556                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3557                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 3558                                 ifp->if_hwassist |= TI_CSUM_FEATURES;
 3559                         else
 3560                                 ifp->if_hwassist &= ~TI_CSUM_FEATURES;
 3561                 }
 3562                 if ((mask & IFCAP_RXCSUM) != 0 &&
 3563                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
 3564                         ifp->if_capenable ^= IFCAP_RXCSUM;
 3565                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 3566                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
 3567                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 3568                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 3569                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
 3570                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 3571                 if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM |
 3572                     IFCAP_VLAN_HWTAGGING)) != 0) {
 3573                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3574                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3575                                 ti_init_locked(sc);
 3576                         }
 3577                 }
 3578                 TI_UNLOCK(sc);
 3579                 VLAN_CAPABILITIES(ifp);
 3580                 break;
 3581         default:
 3582                 error = ether_ioctl(ifp, command, data);
 3583                 break;
 3584         }
 3585 
 3586         return (error);
 3587 }
 3588 
 3589 static int
 3590 ti_open(struct cdev *dev, int flags, int fmt, struct thread *td)
 3591 {
 3592         struct ti_softc *sc;
 3593 
 3594         sc = dev->si_drv1;
 3595         if (sc == NULL)
 3596                 return (ENODEV);
 3597 
 3598         TI_LOCK(sc);
 3599         sc->ti_flags |= TI_FLAG_DEBUGING;
 3600         TI_UNLOCK(sc);
 3601 
 3602         return (0);
 3603 }
 3604 
 3605 static int
 3606 ti_close(struct cdev *dev, int flag, int fmt, struct thread *td)
 3607 {
 3608         struct ti_softc *sc;
 3609 
 3610         sc = dev->si_drv1;
 3611         if (sc == NULL)
 3612                 return (ENODEV);
 3613 
 3614         TI_LOCK(sc);
 3615         sc->ti_flags &= ~TI_FLAG_DEBUGING;
 3616         TI_UNLOCK(sc);
 3617 
 3618         return (0);
 3619 }
 3620 
 3621 /*
 3622  * This ioctl routine goes along with the Tigon character device.
 3623  */
 3624 static int
 3625 ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
 3626     struct thread *td)
 3627 {
 3628         struct ti_softc *sc;
 3629         int error;
 3630 
 3631         sc = dev->si_drv1;
 3632         if (sc == NULL)
 3633                 return (ENODEV);
 3634 
 3635         error = 0;
 3636 
 3637         switch (cmd) {
 3638         case TIIOCGETSTATS:
 3639         {
 3640                 struct ti_stats *outstats;
 3641 
 3642                 outstats = (struct ti_stats *)addr;
 3643 
 3644                 TI_LOCK(sc);
 3645                 bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
 3646                     sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD);
 3647                 bcopy(&sc->ti_rdata.ti_info->ti_stats, outstats,
 3648                     sizeof(struct ti_stats));
 3649                 bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
 3650                     sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD);
 3651                 TI_UNLOCK(sc);
 3652                 break;
 3653         }
 3654         case TIIOCGETPARAMS:
 3655         {
 3656                 struct ti_params *params;
 3657 
 3658                 params = (struct ti_params *)addr;
 3659 
 3660                 TI_LOCK(sc);
 3661                 params->ti_stat_ticks = sc->ti_stat_ticks;
 3662                 params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks;
 3663                 params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks;
 3664                 params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds;
 3665                 params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds;
 3666                 params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio;
 3667                 params->param_mask = TI_PARAM_ALL;
 3668                 TI_UNLOCK(sc);
 3669                 break;
 3670         }
 3671         case TIIOCSETPARAMS:
 3672         {
 3673                 struct ti_params *params;
 3674 
 3675                 params = (struct ti_params *)addr;
 3676 
 3677                 TI_LOCK(sc);
 3678                 if (params->param_mask & TI_PARAM_STAT_TICKS) {
 3679                         sc->ti_stat_ticks = params->ti_stat_ticks;
 3680                         CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
 3681                 }
 3682 
 3683                 if (params->param_mask & TI_PARAM_RX_COAL_TICKS) {
 3684                         sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks;
 3685                         CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
 3686                                     sc->ti_rx_coal_ticks);
 3687                 }
 3688 
 3689                 if (params->param_mask & TI_PARAM_TX_COAL_TICKS) {
 3690                         sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks;
 3691                         CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS,
 3692                                     sc->ti_tx_coal_ticks);
 3693                 }
 3694 
 3695                 if (params->param_mask & TI_PARAM_RX_COAL_BDS) {
 3696                         sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds;
 3697                         CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD,
 3698                                     sc->ti_rx_max_coal_bds);
 3699                 }
 3700 
 3701                 if (params->param_mask & TI_PARAM_TX_COAL_BDS) {
 3702                         sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds;
 3703                         CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD,
 3704                                     sc->ti_tx_max_coal_bds);
 3705                 }
 3706 
 3707                 if (params->param_mask & TI_PARAM_TX_BUF_RATIO) {
 3708                         sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio;
 3709                         CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO,
 3710                                     sc->ti_tx_buf_ratio);
 3711                 }
 3712                 TI_UNLOCK(sc);
 3713                 break;
 3714         }
 3715         case TIIOCSETTRACE: {
 3716                 ti_trace_type trace_type;
 3717 
 3718                 trace_type = *(ti_trace_type *)addr;
 3719 
 3720                 /*
 3721                  * Set tracing to whatever the user asked for.  Setting
 3722                  * this register to 0 should have the effect of disabling
 3723                  * tracing.
 3724                  */
 3725                 TI_LOCK(sc);
 3726                 CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type);
 3727                 TI_UNLOCK(sc);
 3728                 break;
 3729         }
 3730         case TIIOCGETTRACE: {
 3731                 struct ti_trace_buf *trace_buf;
 3732                 uint32_t trace_start, cur_trace_ptr, trace_len;
 3733 
 3734                 trace_buf = (struct ti_trace_buf *)addr;
 3735 
 3736                 TI_LOCK(sc);
 3737                 trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START);
 3738                 cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR);
 3739                 trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN);
 3740 #if 0
 3741                 if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, "
 3742                        "trace_len = %d\n", trace_start,
 3743                        cur_trace_ptr, trace_len);
 3744                 if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n",
 3745                        trace_buf->buf_len);
 3746 #endif
 3747                 error = ti_copy_mem(sc, trace_start, min(trace_len,
 3748                     trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1);
 3749                 if (error == 0) {
 3750                         trace_buf->fill_len = min(trace_len,
 3751                             trace_buf->buf_len);
 3752                         if (cur_trace_ptr < trace_start)
 3753                                 trace_buf->cur_trace_ptr =
 3754                                     trace_start - cur_trace_ptr;
 3755                         else
 3756                                 trace_buf->cur_trace_ptr =
 3757                                     cur_trace_ptr - trace_start;
 3758                 } else
 3759                         trace_buf->fill_len = 0;
 3760                 TI_UNLOCK(sc);
 3761                 break;
 3762         }
 3763 
 3764         /*
 3765          * For debugging, five ioctls are needed:
 3766          * ALT_ATTACH
 3767          * ALT_READ_TG_REG
 3768          * ALT_WRITE_TG_REG
 3769          * ALT_READ_TG_MEM
 3770          * ALT_WRITE_TG_MEM
 3771          */
 3772         case ALT_ATTACH:
 3773                 /*
 3774                  * From what I can tell, Alteon's Solaris Tigon driver
 3775                  * only has one character device, so you have to attach
 3776                  * to the Tigon board you're interested in.  This seems
 3777                  * like a not-so-good way to do things, since unless you
 3778                  * subsequently specify the unit number of the device
 3779                  * you're interested in every ioctl, you'll only be
 3780                  * able to debug one board at a time.
 3781                  */
 3782                 break;
 3783         case ALT_READ_TG_MEM:
 3784         case ALT_WRITE_TG_MEM:
 3785         {
 3786                 struct tg_mem *mem_param;
 3787                 uint32_t sram_end, scratch_end;
 3788 
 3789                 mem_param = (struct tg_mem *)addr;
 3790 
 3791                 if (sc->ti_hwrev == TI_HWREV_TIGON) {
 3792                         sram_end = TI_END_SRAM_I;
 3793                         scratch_end = TI_END_SCRATCH_I;
 3794                 } else {
 3795                         sram_end = TI_END_SRAM_II;
 3796                         scratch_end = TI_END_SCRATCH_II;
 3797                 }
 3798 
 3799                 /*
 3800                  * For now, we'll only handle accessing regular SRAM,
 3801                  * nothing else.
 3802                  */
 3803                 TI_LOCK(sc);
 3804                 if (mem_param->tgAddr >= TI_BEG_SRAM &&
 3805                     mem_param->tgAddr + mem_param->len <= sram_end) {
 3806                         /*
 3807                          * In this instance, we always copy to/from user
 3808                          * space, so the user space argument is set to 1.
 3809                          */
 3810                         error = ti_copy_mem(sc, mem_param->tgAddr,
 3811                             mem_param->len, mem_param->userAddr, 1,
 3812                             cmd == ALT_READ_TG_MEM ? 1 : 0);
 3813                 } else if (mem_param->tgAddr >= TI_BEG_SCRATCH &&
 3814                     mem_param->tgAddr <= scratch_end) {
 3815                         error = ti_copy_scratch(sc, mem_param->tgAddr,
 3816                             mem_param->len, mem_param->userAddr, 1,
 3817                             cmd == ALT_READ_TG_MEM ?  1 : 0, TI_PROCESSOR_A);
 3818                 } else if (mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG &&
 3819                     mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG) {
 3820                         if (sc->ti_hwrev == TI_HWREV_TIGON) {
 3821                                 if_printf(sc->ti_ifp,
 3822                                     "invalid memory range for Tigon I\n");
 3823                                 error = EINVAL;
 3824                                 break;
 3825                         }
 3826                         error = ti_copy_scratch(sc, mem_param->tgAddr -
 3827                             TI_SCRATCH_DEBUG_OFF, mem_param->len,
 3828                             mem_param->userAddr, 1,
 3829                             cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_B);
 3830                 } else {
 3831                         if_printf(sc->ti_ifp, "memory address %#x len %d is "
 3832                                 "out of supported range\n",
 3833                                 mem_param->tgAddr, mem_param->len);
 3834                         error = EINVAL;
 3835                 }
 3836                 TI_UNLOCK(sc);
 3837                 break;
 3838         }
 3839         case ALT_READ_TG_REG:
 3840         case ALT_WRITE_TG_REG:
 3841         {
 3842                 struct tg_reg *regs;
 3843                 uint32_t tmpval;
 3844 
 3845                 regs = (struct tg_reg *)addr;
 3846 
 3847                 /*
 3848                  * Make sure the address in question isn't out of range.
 3849                  */
 3850                 if (regs->addr > TI_REG_MAX) {
 3851                         error = EINVAL;
 3852                         break;
 3853                 }
 3854                 TI_LOCK(sc);
 3855                 if (cmd == ALT_READ_TG_REG) {
 3856                         bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
 3857                             regs->addr, &tmpval, 1);
 3858                         regs->data = ntohl(tmpval);
 3859 #if 0
 3860                         if ((regs->addr == TI_CPU_STATE)
 3861                          || (regs->addr == TI_CPU_CTL_B)) {
 3862                                 if_printf(sc->ti_ifp, "register %#x = %#x\n",
 3863                                        regs->addr, tmpval);
 3864                         }
 3865 #endif
 3866                 } else {
 3867                         tmpval = htonl(regs->data);
 3868                         bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
 3869                             regs->addr, &tmpval, 1);
 3870                 }
 3871                 TI_UNLOCK(sc);
 3872                 break;
 3873         }
 3874         default:
 3875                 error = ENOTTY;
 3876                 break;
 3877         }
 3878         return (error);
 3879 }
 3880 
 3881 static void
 3882 ti_watchdog(void *arg)
 3883 {
 3884         struct ti_softc *sc;
 3885         struct ifnet *ifp;
 3886 
 3887         sc = arg;
 3888         TI_LOCK_ASSERT(sc);
 3889         callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
 3890         if (sc->ti_timer == 0 || --sc->ti_timer > 0)
 3891                 return;
 3892 
 3893         /*
 3894          * When we're debugging, the chip is often stopped for long periods
 3895          * of time, and that would normally cause the watchdog timer to fire.
 3896          * Since that impedes debugging, we don't want to do that.
 3897          */
 3898         if (sc->ti_flags & TI_FLAG_DEBUGING)
 3899                 return;
 3900 
 3901         ifp = sc->ti_ifp;
 3902         if_printf(ifp, "watchdog timeout -- resetting\n");
 3903         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3904         ti_init_locked(sc);
 3905 
 3906         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 3907 }
 3908 
 3909 /*
 3910  * Stop the adapter and free any mbufs allocated to the
 3911  * RX and TX lists.
 3912  */
 3913 static void
 3914 ti_stop(struct ti_softc *sc)
 3915 {
 3916         struct ifnet *ifp;
 3917         struct ti_cmd_desc cmd;
 3918 
 3919         TI_LOCK_ASSERT(sc);
 3920 
 3921         ifp = sc->ti_ifp;
 3922 
 3923         /* Disable host interrupts. */
 3924         CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
 3925         /*
 3926          * Tell firmware we're shutting down.
 3927          */
 3928         TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
 3929 
 3930         /* Halt and reinitialize. */
 3931         if (ti_chipinit(sc) == 0) {
 3932                 ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
 3933                 /* XXX ignore init errors. */
 3934                 ti_chipinit(sc);
 3935         }
 3936 
 3937         /* Free the RX lists. */
 3938         ti_free_rx_ring_std(sc);
 3939 
 3940         /* Free jumbo RX list. */
 3941         ti_free_rx_ring_jumbo(sc);
 3942 
 3943         /* Free mini RX list. */
 3944         ti_free_rx_ring_mini(sc);
 3945 
 3946         /* Free TX buffers. */
 3947         ti_free_tx_ring(sc);
 3948 
 3949         sc->ti_ev_prodidx.ti_idx = 0;
 3950         sc->ti_return_prodidx.ti_idx = 0;
 3951         sc->ti_tx_considx.ti_idx = 0;
 3952         sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
 3953 
 3954         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3955         callout_stop(&sc->ti_watchdog);
 3956 }
 3957 
 3958 /*
 3959  * Stop all chip I/O so that the kernel's probe routines don't
 3960  * get confused by errant DMAs when rebooting.
 3961  */
 3962 static int
 3963 ti_shutdown(device_t dev)
 3964 {
 3965         struct ti_softc *sc;
 3966 
 3967         sc = device_get_softc(dev);
 3968         TI_LOCK(sc);
 3969         ti_chipinit(sc);
 3970         TI_UNLOCK(sc);
 3971 
 3972         return (0);
 3973 }
 3974 
 3975 static void
 3976 ti_sysctl_node(struct ti_softc *sc)
 3977 {
 3978         struct sysctl_ctx_list *ctx;
 3979         struct sysctl_oid_list *child;
 3980         char tname[32];
 3981 
 3982         ctx = device_get_sysctl_ctx(sc->ti_dev);
 3983         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ti_dev));
 3984 
 3985         /* Use DAC */
 3986         sc->ti_dac = 1;
 3987         snprintf(tname, sizeof(tname), "dev.ti.%d.dac",
 3988             device_get_unit(sc->ti_dev));
 3989         TUNABLE_INT_FETCH(tname, &sc->ti_dac);
 3990 
 3991         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_coal_ticks", CTLFLAG_RW,
 3992             &sc->ti_rx_coal_ticks, 0, "Receive coalcesced ticks");
 3993         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_max_coal_bds", CTLFLAG_RW,
 3994             &sc->ti_rx_max_coal_bds, 0, "Receive max coalcesced BDs");
 3995 
 3996         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_coal_ticks", CTLFLAG_RW,
 3997             &sc->ti_tx_coal_ticks, 0, "Send coalcesced ticks");
 3998         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_max_coal_bds", CTLFLAG_RW,
 3999             &sc->ti_tx_max_coal_bds, 0, "Send max coalcesced BDs");
 4000         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_buf_ratio", CTLFLAG_RW,
 4001             &sc->ti_tx_buf_ratio, 0,
 4002             "Ratio of NIC memory devoted to TX buffer");
 4003 
 4004         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stat_ticks", CTLFLAG_RW,
 4005             &sc->ti_stat_ticks, 0,
 4006             "Number of clock ticks for statistics update interval");
 4007 
 4008         /* Pull in device tunables. */
 4009         sc->ti_rx_coal_ticks = 170;
 4010         resource_int_value(device_get_name(sc->ti_dev),
 4011             device_get_unit(sc->ti_dev), "rx_coal_ticks",
 4012             &sc->ti_rx_coal_ticks);
 4013         sc->ti_rx_max_coal_bds = 64;
 4014         resource_int_value(device_get_name(sc->ti_dev),
 4015             device_get_unit(sc->ti_dev), "rx_max_coal_bds",
 4016             &sc->ti_rx_max_coal_bds);
 4017 
 4018         sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
 4019         resource_int_value(device_get_name(sc->ti_dev),
 4020             device_get_unit(sc->ti_dev), "tx_coal_ticks",
 4021             &sc->ti_tx_coal_ticks);
 4022         sc->ti_tx_max_coal_bds = 32;
 4023         resource_int_value(device_get_name(sc->ti_dev),
 4024             device_get_unit(sc->ti_dev), "tx_max_coal_bds",
 4025             &sc->ti_tx_max_coal_bds);
 4026         sc->ti_tx_buf_ratio = 21;
 4027         resource_int_value(device_get_name(sc->ti_dev),
 4028             device_get_unit(sc->ti_dev), "tx_buf_ratio",
 4029             &sc->ti_tx_buf_ratio);
 4030 
 4031         sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
 4032         resource_int_value(device_get_name(sc->ti_dev),
 4033             device_get_unit(sc->ti_dev), "stat_ticks",
 4034             &sc->ti_stat_ticks);
 4035 }

Cache object: 15e911d2a6f0a8acbbeb455fafff0fa1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.