The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_txp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: if_txp.c,v 1.5.2.1 2004/07/10 09:24:34 tron Exp $ */
    2 
    3 /*
    4  * Copyright (c) 2001
    5  *      Jason L. Wright <jason@thought.net>, Theo de Raadt, and
    6  *      Aaron Campbell <aaron@monkey.org>.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
   21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   27  * THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Driver for 3c990 (Typhoon) Ethernet ASIC
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.5.2.1 2004/07/10 09:24:34 tron Exp $");
   36 
   37 #include "bpfilter.h"
   38 #include "opt_inet.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 #include <sys/sockio.h>
   43 #include <sys/mbuf.h>
   44 #include <sys/malloc.h>
   45 #include <sys/kernel.h>
   46 #include <sys/socket.h>
   47 #include <sys/device.h>
   48 #include <sys/callout.h>
   49 
   50 #include <net/if.h>
   51 #include <net/if_dl.h>
   52 #include <net/if_types.h>
   53 #include <net/if_ether.h>
   54 #include <net/if_arp.h>
   55 
   56 #ifdef INET
   57 #include <netinet/in.h>
   58 #include <netinet/in_systm.h>
   59 #include <netinet/in_var.h>
   60 #include <netinet/ip.h>
   61 #include <netinet/if_inarp.h>
   62 #endif
   63 
   64 #include <net/if_media.h>
   65 
   66 #if NBPFILTER > 0
   67 #include <net/bpf.h>
   68 #endif
   69 
   70 #include <uvm/uvm_extern.h>              /* for vtophys */
   71 #include <machine/bus.h>
   72 
   73 #include <dev/mii/mii.h>
   74 #include <dev/mii/miivar.h>
   75 #include <dev/pci/pcireg.h>
   76 #include <dev/pci/pcivar.h>
   77 #include <dev/pci/pcidevs.h>
   78 
   79 #include <dev/pci/if_txpreg.h>
   80 
   81 #include <dev/microcode/typhoon/3c990img.h>
   82 
   83 /*
   84  * These currently break the 3c990 firmware, hopefully will be resolved
   85  * at some point.
   86  */
   87 #undef  TRY_TX_UDP_CSUM
   88 #undef  TRY_TX_TCP_CSUM
   89 
   90 int txp_probe(struct device *, struct cfdata *, void *);
   91 void txp_attach(struct device *, struct device *, void *);
   92 int txp_intr(void *);
   93 void txp_tick(void *);
   94 void txp_shutdown(void *);
   95 int txp_ioctl(struct ifnet *, u_long, caddr_t);
   96 void txp_start(struct ifnet *);
   97 void txp_stop(struct txp_softc *);
   98 void txp_init(struct txp_softc *);
   99 void txp_watchdog(struct ifnet *);
  100 
  101 int txp_chip_init(struct txp_softc *);
  102 int txp_reset_adapter(struct txp_softc *);
  103 int txp_download_fw(struct txp_softc *);
  104 int txp_download_fw_wait(struct txp_softc *);
  105 int txp_download_fw_section(struct txp_softc *,
  106     struct txp_fw_section_header *, int);
  107 int txp_alloc_rings(struct txp_softc *);
  108 void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
  109 int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
  110 void txp_set_filter(struct txp_softc *);
  111 
  112 int txp_cmd_desc_numfree(struct txp_softc *);
  113 int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
  114     u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int);
  115 int txp_command2(struct txp_softc *, u_int16_t, u_int16_t,
  116     u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
  117     struct txp_rsp_desc **, int);
  118 int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
  119     struct txp_rsp_desc **);
  120 void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
  121     struct txp_rsp_desc *);
  122 void txp_capabilities(struct txp_softc *);
  123 
  124 void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  125 int txp_ifmedia_upd(struct ifnet *);
  126 void txp_show_descriptor(void *);
  127 void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
  128     struct txp_dma_alloc *);
  129 void txp_rxbuf_reclaim(struct txp_softc *);
  130 void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
  131     struct txp_dma_alloc *);
  132 
  133 CFATTACH_DECL(txp, sizeof(struct txp_softc), txp_probe, txp_attach,
  134               NULL, NULL);
  135 
  136 const struct txp_pci_match {
  137         int vid, did, flags;
  138 } txp_devices[] = {
  139         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 },
  140         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 },
  141         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 },
  142         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION },
  143         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION },
  144         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM },
  145         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION },
  146         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM },
  147 };
  148 
  149 static const struct txp_pci_match *txp_pcilookup(pcireg_t);
  150 
  151 static const struct {
  152         u_int16_t mask, value;
  153         int flags;
  154 } txp_subsysinfo[] = {
  155         {0xf000, 0x2000, TXP_SERVERVERSION},
  156         {0x0100, 0x0100, TXP_FIBER},
  157 #if 0 /* information from 3com header, unused */
  158         {0x0010, 0x0010, /* secured firmware */},
  159         {0x0003, 0x0000, /* variable DES */},
  160         {0x0003, 0x0001, /* single DES - "95" */},
  161         {0x0003, 0x0002, /* triple DES - "97" */},
  162 #endif
  163 };
  164 
  165 static const struct txp_pci_match *
  166 txp_pcilookup(id)
  167         pcireg_t id;
  168 {
  169         int i;
  170 
  171         for (i = 0; i < sizeof(txp_devices) / sizeof(txp_devices[0]); i++)
  172                 if ((PCI_VENDOR(id) == txp_devices[i].vid) &&
  173                     (PCI_PRODUCT(id) == txp_devices[i].did))
  174                         return (&txp_devices[i]);
  175         return (0);
  176 }
  177 
  178 int
  179 txp_probe(parent, match, aux)
  180         struct device *parent;
  181         struct cfdata *match;
  182         void *aux;
  183 {
  184         struct pci_attach_args *pa = aux;
  185 
  186         if (txp_pcilookup(pa->pa_id))
  187                         return (1);
  188         return (0);
  189 }
  190 
  191 void
  192 txp_attach(parent, self, aux)
  193         struct device *parent, *self;
  194         void *aux;
  195 {
  196         struct txp_softc *sc = (struct txp_softc *)self;
  197         struct pci_attach_args *pa = aux;
  198         pci_chipset_tag_t pc = pa->pa_pc;
  199         pci_intr_handle_t ih;
  200         const char *intrstr = NULL;
  201         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
  202         u_int32_t command;
  203         u_int16_t p1;
  204         u_int32_t p2;
  205         u_char enaddr[6];
  206         const struct txp_pci_match *pcimatch;
  207         u_int16_t subsys;
  208         int i, flags;
  209         char devinfo[256];
  210 
  211         sc->sc_cold = 1;
  212 
  213         pcimatch = txp_pcilookup(pa->pa_id);
  214         flags = pcimatch->flags;
  215         if (pcimatch->flags & TXP_USESUBSYSTEM) {
  216                 subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag,
  217                                                    PCI_SUBSYS_ID_REG));
  218                 for (i = 0;
  219                      i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]);
  220                      i++)
  221                         if ((subsys & txp_subsysinfo[i].mask) ==
  222                             txp_subsysinfo[i].value)
  223                                 flags |= txp_subsysinfo[i].flags;
  224         }
  225         sc->sc_flags = flags;
  226 
  227         pci_devinfo(pa->pa_id, 0, 0, devinfo);
  228 #define TXP_EXTRAINFO ((flags & (TXP_USESUBSYSTEM|TXP_SERVERVERSION)) == \
  229   (TXP_USESUBSYSTEM|TXP_SERVERVERSION) ? " (SVR)" : "")
  230         printf(": %s%s\n%s", devinfo, TXP_EXTRAINFO, sc->sc_dev.dv_xname);
  231 
  232         command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
  233 
  234         if (!(command & PCI_COMMAND_MASTER_ENABLE)) {
  235                 printf(": failed to enable bus mastering\n");
  236                 return;
  237         }
  238 
  239         if (!(command & PCI_COMMAND_MEM_ENABLE)) {
  240                 printf(": failed to enable memory mapping\n");
  241                 return;
  242         }
  243         if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
  244             &sc->sc_bt, &sc->sc_bh, NULL, NULL)) {
  245                 printf(": can't map mem space %d\n", 0);
  246                 return;
  247         }
  248 
  249         sc->sc_dmat = pa->pa_dmat;
  250 
  251         /*
  252          * Allocate our interrupt.
  253          */
  254         if (pci_intr_map(pa, &ih)) {
  255                 printf(": couldn't map interrupt\n");
  256                 return;
  257         }
  258 
  259         intrstr = pci_intr_string(pc, ih);
  260         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc);
  261         if (sc->sc_ih == NULL) {
  262                 printf(": couldn't establish interrupt");
  263                 if (intrstr != NULL)
  264                         printf(" at %s", intrstr);
  265                 printf("\n");
  266                 return;
  267         }
  268         printf(": interrupting at %s\n", intrstr);
  269 
  270         if (txp_chip_init(sc))
  271                 goto cleanupintr;
  272 
  273         if (txp_download_fw(sc))
  274                 goto cleanupintr;
  275 
  276         if (txp_alloc_rings(sc))
  277                 goto cleanupintr;
  278 
  279         if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
  280             NULL, NULL, NULL, 1))
  281                 goto cleanupintr;
  282 
  283         if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
  284             &p1, &p2, NULL, 1))
  285                 goto cleanupintr;
  286 
  287         txp_set_filter(sc);
  288 
  289         p1 = htole16(p1);
  290         enaddr[0] = ((u_int8_t *)&p1)[1];
  291         enaddr[1] = ((u_int8_t *)&p1)[0];
  292         p2 = htole32(p2);
  293         enaddr[2] = ((u_int8_t *)&p2)[3];
  294         enaddr[3] = ((u_int8_t *)&p2)[2];
  295         enaddr[4] = ((u_int8_t *)&p2)[1];
  296         enaddr[5] = ((u_int8_t *)&p2)[0];
  297 
  298         printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
  299                ether_sprintf(enaddr));
  300         sc->sc_cold = 0;
  301 
  302         ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
  303         if (flags & TXP_FIBER) {
  304                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX,
  305                             0, NULL);
  306                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_HDX,
  307                             0, NULL);
  308                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_FDX,
  309                             0, NULL);
  310         } else {
  311                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T,
  312                             0, NULL);
  313                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX,
  314                             0, NULL);
  315                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX,
  316                             0, NULL);
  317                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX,
  318                             0, NULL);
  319                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX,
  320                             0, NULL);
  321                 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX,
  322                             0, NULL);
  323         }
  324         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
  325 
  326         sc->sc_xcvr = TXP_XCVR_AUTO;
  327         txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
  328             NULL, NULL, NULL, 0);
  329         ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
  330 
  331         ifp->if_softc = sc;
  332         ifp->if_mtu = ETHERMTU;
  333         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  334         ifp->if_ioctl = txp_ioctl;
  335         ifp->if_start = txp_start;
  336         ifp->if_watchdog = txp_watchdog;
  337         ifp->if_baudrate = 10000000;
  338         IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
  339         IFQ_SET_READY(&ifp->if_snd);
  340         ifp->if_capabilities = 0;
  341         bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
  342 
  343         txp_capabilities(sc);
  344 
  345         callout_init(&sc->sc_tick);
  346         callout_setfunc(&sc->sc_tick, txp_tick, sc);
  347 
  348         /*
  349          * Attach us everywhere
  350          */
  351         if_attach(ifp);
  352         ether_ifattach(ifp, enaddr);
  353 
  354         shutdownhook_establish(txp_shutdown, sc);
  355 
  356 
  357         return;
  358 
  359 cleanupintr:
  360         pci_intr_disestablish(pc,sc->sc_ih);
  361 
  362         return;
  363 
  364 }
  365 
  366 int
  367 txp_chip_init(sc)
  368         struct txp_softc *sc;
  369 {
  370         /* disable interrupts */
  371         WRITE_REG(sc, TXP_IER, 0);
  372         WRITE_REG(sc, TXP_IMR,
  373             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  374             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  375             TXP_INT_LATCH);
  376 
  377         /* ack all interrupts */
  378         WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
  379             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  380             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  381             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  382             TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
  383 
  384         if (txp_reset_adapter(sc))
  385                 return (-1);
  386 
  387         /* disable interrupts */
  388         WRITE_REG(sc, TXP_IER, 0);
  389         WRITE_REG(sc, TXP_IMR,
  390             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  391             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  392             TXP_INT_LATCH);
  393 
  394         /* ack all interrupts */
  395         WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
  396             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  397             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  398             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  399             TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
  400 
  401         return (0);
  402 }
  403 
  404 int
  405 txp_reset_adapter(sc)
  406         struct txp_softc *sc;
  407 {
  408         u_int32_t r;
  409         int i;
  410 
  411         WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
  412         DELAY(1000);
  413         WRITE_REG(sc, TXP_SRR, 0);
  414 
  415         /* Should wait max 6 seconds */
  416         for (i = 0; i < 6000; i++) {
  417                 r = READ_REG(sc, TXP_A2H_0);
  418                 if (r == STAT_WAITING_FOR_HOST_REQUEST)
  419                         break;
  420                 DELAY(1000);
  421         }
  422 
  423         if (r != STAT_WAITING_FOR_HOST_REQUEST) {
  424                 printf("%s: reset hung\n", TXP_DEVNAME(sc));
  425                 return (-1);
  426         }
  427 
  428         return (0);
  429 }
  430 
  431 int
  432 txp_download_fw(sc)
  433         struct txp_softc *sc;
  434 {
  435         struct txp_fw_file_header *fileheader;
  436         struct txp_fw_section_header *secthead;
  437         int sect;
  438         u_int32_t r, i, ier, imr;
  439 
  440         ier = READ_REG(sc, TXP_IER);
  441         WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
  442 
  443         imr = READ_REG(sc, TXP_IMR);
  444         WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
  445 
  446         for (i = 0; i < 10000; i++) {
  447                 r = READ_REG(sc, TXP_A2H_0);
  448                 if (r == STAT_WAITING_FOR_HOST_REQUEST)
  449                         break;
  450                 DELAY(50);
  451         }
  452         if (r != STAT_WAITING_FOR_HOST_REQUEST) {
  453                 printf(": not waiting for host request\n");
  454                 return (-1);
  455         }
  456 
  457         /* Ack the status */
  458         WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
  459 
  460         fileheader = (struct txp_fw_file_header *)tc990image;
  461         if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
  462                 printf(": fw invalid magic\n");
  463                 return (-1);
  464         }
  465 
  466         /* Tell boot firmware to get ready for image */
  467         WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
  468         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
  469 
  470         if (txp_download_fw_wait(sc)) {
  471                 printf("%s: fw wait failed, initial\n", sc->sc_dev.dv_xname);
  472                 return (-1);
  473         }
  474 
  475         secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) +
  476             sizeof(struct txp_fw_file_header));
  477 
  478         for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
  479                 if (txp_download_fw_section(sc, secthead, sect))
  480                         return (-1);
  481                 secthead = (struct txp_fw_section_header *)
  482                     (((u_int8_t *)secthead) + le32toh(secthead->nbytes) +
  483                         sizeof(*secthead));
  484         }
  485 
  486         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
  487 
  488         for (i = 0; i < 10000; i++) {
  489                 r = READ_REG(sc, TXP_A2H_0);
  490                 if (r == STAT_WAITING_FOR_BOOT)
  491                         break;
  492                 DELAY(50);
  493         }
  494         if (r != STAT_WAITING_FOR_BOOT) {
  495                 printf(": not waiting for boot\n");
  496                 return (-1);
  497         }
  498 
  499         WRITE_REG(sc, TXP_IER, ier);
  500         WRITE_REG(sc, TXP_IMR, imr);
  501 
  502         return (0);
  503 }
  504 
  505 int
  506 txp_download_fw_wait(sc)
  507         struct txp_softc *sc;
  508 {
  509         u_int32_t i, r;
  510 
  511         for (i = 0; i < 10000; i++) {
  512                 r = READ_REG(sc, TXP_ISR);
  513                 if (r & TXP_INT_A2H_0)
  514                         break;
  515                 DELAY(50);
  516         }
  517 
  518         if (!(r & TXP_INT_A2H_0)) {
  519                 printf(": fw wait failed comm0\n");
  520                 return (-1);
  521         }
  522 
  523         WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
  524 
  525         r = READ_REG(sc, TXP_A2H_0);
  526         if (r != STAT_WAITING_FOR_SEGMENT) {
  527                 printf(": fw not waiting for segment\n");
  528                 return (-1);
  529         }
  530         return (0);
  531 }
  532 
  533 int
  534 txp_download_fw_section(sc, sect, sectnum)
  535         struct txp_softc *sc;
  536         struct txp_fw_section_header *sect;
  537         int sectnum;
  538 {
  539         struct txp_dma_alloc dma;
  540         int rseg, err = 0;
  541         struct mbuf m;
  542         u_int16_t csum;
  543 
  544         /* Skip zero length sections */
  545         if (sect->nbytes == 0)
  546                 return (0);
  547 
  548         /* Make sure we aren't past the end of the image */
  549         rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image);
  550         if (rseg >= sizeof(tc990image)) {
  551                 printf(": fw invalid section address, section %d\n", sectnum);
  552                 return (-1);
  553         }
  554 
  555         /* Make sure this section doesn't go past the end */
  556         rseg += le32toh(sect->nbytes);
  557         if (rseg >= sizeof(tc990image)) {
  558                 printf(": fw truncated section %d\n", sectnum);
  559                 return (-1);
  560         }
  561 
  562         /* map a buffer, copy segment to it, get physaddr */
  563         if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) {
  564                 printf(": fw dma malloc failed, section %d\n", sectnum);
  565                 return (-1);
  566         }
  567 
  568         bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr,
  569             le32toh(sect->nbytes));
  570 
  571         /*
  572          * dummy up mbuf and verify section checksum
  573          */
  574         m.m_type = MT_DATA;
  575         m.m_next = m.m_nextpkt = NULL;
  576         m.m_len = le32toh(sect->nbytes);
  577         m.m_data = dma.dma_vaddr;
  578         m.m_flags = 0;
  579         csum = in_cksum(&m, le32toh(sect->nbytes));
  580         if (csum != sect->cksum) {
  581                 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
  582                     sectnum, sect->cksum, csum);
  583                 err = -1;
  584                 goto bail;
  585         }
  586 
  587         bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
  588             dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
  589 
  590         WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
  591         WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum));
  592         WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
  593         WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
  594         WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
  595         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
  596 
  597         if (txp_download_fw_wait(sc)) {
  598                 printf("%s: fw wait failed, section %d\n",
  599                     sc->sc_dev.dv_xname, sectnum);
  600                 err = -1;
  601         }
  602 
  603         bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
  604             dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  605 
  606 bail:
  607         txp_dma_free(sc, &dma);
  608 
  609         return (err);
  610 }
  611 
  612 int
  613 txp_intr(vsc)
  614         void *vsc;
  615 {
  616         struct txp_softc *sc = vsc;
  617         struct txp_hostvar *hv = sc->sc_hostvar;
  618         u_int32_t isr;
  619         int claimed = 0;
  620 
  621         /* mask all interrupts */
  622         WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
  623             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  624             TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
  625             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  626             TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
  627 
  628         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
  629             sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
  630 
  631         isr = READ_REG(sc, TXP_ISR);
  632         while (isr) {
  633                 claimed = 1;
  634                 WRITE_REG(sc, TXP_ISR, isr);
  635 
  636                 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
  637                         txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
  638                 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
  639                         txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);
  640 
  641                 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
  642                         txp_rxbuf_reclaim(sc);
  643 
  644                 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
  645                     TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
  646                         txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);
  647 
  648                 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
  649                     TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
  650                         txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);
  651 
  652                 isr = READ_REG(sc, TXP_ISR);
  653         }
  654 
  655         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
  656             sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
  657 
  658         /* unmask all interrupts */
  659         WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
  660 
  661         txp_start(&sc->sc_arpcom.ec_if);
  662 
  663         return (claimed);
  664 }
  665 
  666 void
  667 txp_rx_reclaim(sc, r, dma)
  668         struct txp_softc *sc;
  669         struct txp_rx_ring *r;
  670         struct txp_dma_alloc *dma;
  671 {
  672         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
  673         struct txp_rx_desc *rxd;
  674         struct mbuf *m;
  675         struct txp_swdesc *sd;
  676         u_int32_t roff, woff;
  677         int sumflags = 0;
  678         int idx;
  679 
  680         roff = le32toh(*r->r_roff);
  681         woff = le32toh(*r->r_woff);
  682         idx = roff / sizeof(struct txp_rx_desc);
  683         rxd = r->r_desc + idx;
  684 
  685         while (roff != woff) {
  686 
  687                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  688                     idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
  689                     BUS_DMASYNC_POSTREAD);
  690 
  691                 if (rxd->rx_flags & RX_FLAGS_ERROR) {
  692                         printf("%s: error 0x%x\n", sc->sc_dev.dv_xname,
  693                             le32toh(rxd->rx_stat));
  694                         ifp->if_ierrors++;
  695                         goto next;
  696                 }
  697 
  698                 /* retrieve stashed pointer */
  699                 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
  700 
  701                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  702                     sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
  703                 bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
  704                 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
  705                 m = sd->sd_mbuf;
  706                 free(sd, M_DEVBUF);
  707                 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
  708 
  709 #ifdef __STRICT_ALIGNMENT
  710                 {
  711                         /*
  712                          * XXX Nice chip, except it won't accept "off by 2"
  713                          * buffers, so we're force to copy.  Supposedly
  714                          * this will be fixed in a newer firmware rev
  715                          * and this will be temporary.
  716                          */
  717                         struct mbuf *mnew;
  718 
  719                         MGETHDR(mnew, M_DONTWAIT, MT_DATA);
  720                         if (mnew == NULL) {
  721                                 m_freem(m);
  722                                 goto next;
  723                         }
  724                         if (m->m_len > (MHLEN - 2)) {
  725                                 MCLGET(mnew, M_DONTWAIT);
  726                                 if (!(mnew->m_flags & M_EXT)) {
  727                                         m_freem(mnew);
  728                                         m_freem(m);
  729                                         goto next;
  730                                 }
  731                         }
  732                         mnew->m_pkthdr.rcvif = ifp;
  733                         mnew->m_pkthdr.len = mnew->m_len = m->m_len;
  734                         mnew->m_data += 2;
  735                         bcopy(m->m_data, mnew->m_data, m->m_len);
  736                         m_freem(m);
  737                         m = mnew;
  738                 }
  739 #endif
  740 
  741 #if NBPFILTER > 0
  742                 /*
  743                  * Handle BPF listeners. Let the BPF user see the packet.
  744                  */
  745                 if (ifp->if_bpf)
  746                         bpf_mtap(ifp->if_bpf, m);
  747 #endif
  748 
  749                 if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
  750                         sumflags |= (M_CSUM_IPv4|M_CSUM_IPv4_BAD);
  751                 else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
  752                         sumflags |= M_CSUM_IPv4;
  753 
  754                 if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
  755                         sumflags |= (M_CSUM_TCPv4|M_CSUM_TCP_UDP_BAD);
  756                 else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
  757                         sumflags |= M_CSUM_TCPv4;
  758 
  759                 if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
  760                         sumflags |= (M_CSUM_UDPv4|M_CSUM_TCP_UDP_BAD);
  761                 else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
  762                         sumflags |= M_CSUM_UDPv4;
  763 
  764                 m->m_pkthdr.csum_flags = sumflags;
  765 
  766                 if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
  767                         struct m_tag *mtag;
  768 
  769                         mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
  770                                          M_NOWAIT);
  771                         if (!m) {
  772                                 printf("%s: no mbuf for tag\n",
  773                                        sc->sc_dev.dv_xname);
  774                                 m_freem(m);
  775                                 goto next;
  776                         }
  777                         *(u_int *)(mtag + 1) = htons(rxd->rx_vlan >> 16);
  778                         m_tag_prepend(m, mtag);
  779                 }
  780 
  781                 (*ifp->if_input)(ifp, m);
  782 
  783 next:
  784                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  785                     idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
  786                     BUS_DMASYNC_PREREAD);
  787 
  788                 roff += sizeof(struct txp_rx_desc);
  789                 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
  790                         idx = 0;
  791                         roff = 0;
  792                         rxd = r->r_desc;
  793                 } else {
  794                         idx++;
  795                         rxd++;
  796                 }
  797                 woff = le32toh(*r->r_woff);
  798         }
  799 
  800         *r->r_roff = htole32(woff);
  801 }
  802 
  803 void
  804 txp_rxbuf_reclaim(sc)
  805         struct txp_softc *sc;
  806 {
  807         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
  808         struct txp_hostvar *hv = sc->sc_hostvar;
  809         struct txp_rxbuf_desc *rbd;
  810         struct txp_swdesc *sd;
  811         u_int32_t i, end;
  812 
  813         end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
  814         i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx));
  815 
  816         if (++i == RXBUF_ENTRIES)
  817                 i = 0;
  818 
  819         rbd = sc->sc_rxbufs + i;
  820 
  821         while (i != end) {
  822                 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
  823                     M_DEVBUF, M_NOWAIT);
  824                 if (sd == NULL)
  825                         break;
  826 
  827                 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
  828                 if (sd->sd_mbuf == NULL)
  829                         goto err_sd;
  830 
  831                 MCLGET(sd->sd_mbuf, M_DONTWAIT);
  832                 if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
  833                         goto err_mbuf;
  834                 sd->sd_mbuf->m_pkthdr.rcvif = ifp;
  835                 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
  836                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
  837                     TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
  838                         goto err_mbuf;
  839                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
  840                     BUS_DMA_NOWAIT)) {
  841                         bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
  842                         goto err_mbuf;
  843                 }
  844 
  845                 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
  846                     i * sizeof(struct txp_rxbuf_desc),
  847                     sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
  848                     
  849                 /* stash away pointer */
  850                 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
  851 
  852                 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
  853                     & 0xffffffff;
  854                 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
  855                     >> 32;
  856 
  857                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  858                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
  859 
  860                 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
  861                     i * sizeof(struct txp_rxbuf_desc),
  862                     sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);
  863 
  864                 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));
  865 
  866                 if (++i == RXBUF_ENTRIES) {
  867                         i = 0;
  868                         rbd = sc->sc_rxbufs;
  869                 } else
  870                         rbd++;
  871         }
  872         return;
  873 
  874 err_mbuf:
  875         m_freem(sd->sd_mbuf);
  876 err_sd:
  877         free(sd, M_DEVBUF);
  878 }
  879 
  880 /*
  881  * Reclaim mbufs and entries from a transmit ring.
  882  */
  883 void
  884 txp_tx_reclaim(sc, r, dma)
  885         struct txp_softc *sc;
  886         struct txp_tx_ring *r;
  887         struct txp_dma_alloc *dma;
  888 {
  889         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
  890         u_int32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
  891         u_int32_t cons = r->r_cons, cnt = r->r_cnt;
  892         struct txp_tx_desc *txd = r->r_desc + cons;
  893         struct txp_swdesc *sd = sc->sc_txd + cons;
  894         struct mbuf *m;
  895 
  896         while (cons != idx) {
  897                 if (cnt == 0)
  898                         break;
  899 
  900                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  901                     cons * sizeof(struct txp_tx_desc),
  902                     sizeof(struct txp_tx_desc),
  903                     BUS_DMASYNC_POSTWRITE);
  904 
  905                 if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
  906                     TX_FLAGS_TYPE_DATA) {
  907                         bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  908                             sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  909                         bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
  910                         m = sd->sd_mbuf;
  911                         if (m != NULL) {
  912                                 m_freem(m);
  913                                 txd->tx_addrlo = 0;
  914                                 txd->tx_addrhi = 0;
  915                                 ifp->if_opackets++;
  916                         }
  917                 }
  918                 ifp->if_flags &= ~IFF_OACTIVE;
  919 
  920                 if (++cons == TX_ENTRIES) {
  921                         txd = r->r_desc;
  922                         cons = 0;
  923                         sd = sc->sc_txd;
  924                 } else {
  925                         txd++;
  926                         sd++;
  927                 }
  928 
  929                 cnt--;
  930         }
  931 
  932         r->r_cons = cons;
  933         r->r_cnt = cnt;
  934         if (cnt == 0)
  935                 ifp->if_timer = 0;
  936 }
  937 
  938 void
  939 txp_shutdown(vsc)
  940         void *vsc;
  941 {
  942         struct txp_softc *sc = (struct txp_softc *)vsc;
  943 
  944         /* mask all interrupts */
  945         WRITE_REG(sc, TXP_IMR,
  946             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  947             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  948             TXP_INT_LATCH);
  949 
  950         txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
  951         txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
  952         txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
  953 }
  954 
  955 int
  956 txp_alloc_rings(sc)
  957         struct txp_softc *sc;
  958 {
  959         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
  960         struct txp_boot_record *boot;
  961         struct txp_swdesc *sd;
  962         u_int32_t r;
  963         int i, j;
  964 
  965         /* boot record */
  966         if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma,
  967             BUS_DMA_COHERENT)) {
  968                 printf(": can't allocate boot record\n");
  969                 return (-1);
  970         }
  971         boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
  972         bzero(boot, sizeof(*boot));
  973         sc->sc_boot = boot;
  974 
  975         /* host variables */
  976         if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
  977             BUS_DMA_COHERENT)) {
  978                 printf(": can't allocate host ring\n");
  979                 goto bail_boot;
  980         }
  981         bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar));
  982         boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff);
  983         boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32);
  984         sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
  985 
  986         /* high priority tx ring */
  987         if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
  988             &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
  989                 printf(": can't allocate high tx ring\n");
  990                 goto bail_host;
  991         }
  992         bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
  993         boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff);
  994         boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32);
  995         boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
  996         sc->sc_txhir.r_reg = TXP_H2A_1;
  997         sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
  998         sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
  999         sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
 1000         for (i = 0; i < TX_ENTRIES; i++) {
 1001                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
 1002                     TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0,
 1003                     BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) {
 1004                         for (j = 0; j < i; j++) {
 1005                                 bus_dmamap_destroy(sc->sc_dmat,
 1006                                     sc->sc_txd[j].sd_map);
 1007                                 sc->sc_txd[j].sd_map = NULL;
 1008                         }
 1009                         goto bail_txhiring;
 1010                 }
 1011         }
 1012 
 1013         /* low priority tx ring */
 1014         if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
 1015             &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
 1016                 printf(": can't allocate low tx ring\n");
 1017                 goto bail_txhiring;
 1018         }
 1019         bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
 1020         boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff);
 1021         boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32);
 1022         boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
 1023         sc->sc_txlor.r_reg = TXP_H2A_3;
 1024         sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
 1025         sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
 1026         sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
 1027 
 1028         /* high priority rx ring */
 1029         if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
 1030             &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
 1031                 printf(": can't allocate high rx ring\n");
 1032                 goto bail_txloring;
 1033         }
 1034         bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
 1035         boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff);
 1036         boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32);
 1037         boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
 1038         sc->sc_rxhir.r_desc =
 1039             (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
 1040         sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
 1041         sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
 1042         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
 1043             0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1044 
 1045         /* low priority ring */
 1046         if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
 1047             &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
 1048                 printf(": can't allocate low rx ring\n");
 1049                 goto bail_rxhiring;
 1050         }
 1051         bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
 1052         boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff);
 1053         boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32);
 1054         boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
 1055         sc->sc_rxlor.r_desc =
 1056             (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
 1057         sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
 1058         sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
 1059         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
 1060             0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1061 
 1062         /* command ring */
 1063         if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
 1064             &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
 1065                 printf(": can't allocate command ring\n");
 1066                 goto bail_rxloring;
 1067         }
 1068         bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
 1069         boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff);
 1070         boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32);
 1071         boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
 1072         sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
 1073         sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
 1074         sc->sc_cmdring.lastwrite = 0;
 1075 
 1076         /* response ring */
 1077         if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
 1078             &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
 1079                 printf(": can't allocate response ring\n");
 1080                 goto bail_cmdring;
 1081         }
 1082         bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
 1083         boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff);
 1084         boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32);
 1085         boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
 1086         sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
 1087         sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
 1088         sc->sc_rspring.lastwrite = 0;
 1089 
 1090         /* receive buffer ring */
 1091         if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
 1092             &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
 1093                 printf(": can't allocate rx buffer ring\n");
 1094                 goto bail_rspring;
 1095         }
 1096         bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
 1097         boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff);
 1098         boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32);
 1099         boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
 1100         sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
 1101         for (i = 0; i < RXBUF_ENTRIES; i++) {
 1102                 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
 1103                     M_DEVBUF, M_NOWAIT);
 1104                 if (sd == NULL)
 1105                         break;
 1106 
 1107                 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
 1108                 if (sd->sd_mbuf == NULL) {
 1109                         goto bail_rxbufring;
 1110                 }
 1111 
 1112                 MCLGET(sd->sd_mbuf, M_DONTWAIT);
 1113                 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
 1114                         goto bail_rxbufring;
 1115                 }
 1116                 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
 1117                 sd->sd_mbuf->m_pkthdr.rcvif = ifp;
 1118                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
 1119                     TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
 1120                         goto bail_rxbufring;
 1121                 }
 1122                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
 1123                     BUS_DMA_NOWAIT)) {
 1124                         bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
 1125                         goto bail_rxbufring;
 1126                 }
 1127                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
 1128                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1129 
 1130                 /* stash away pointer */
 1131                 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd));
 1132 
 1133                 sc->sc_rxbufs[i].rb_paddrlo =
 1134                     ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
 1135                 sc->sc_rxbufs[i].rb_paddrhi =
 1136                     ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
 1137         }
 1138         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
 1139             0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
 1140             BUS_DMASYNC_PREWRITE);
 1141         sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
 1142             sizeof(struct txp_rxbuf_desc));
 1143 
 1144         /* zero dma */
 1145         if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
 1146             BUS_DMA_COHERENT)) {
 1147                 printf(": can't allocate response ring\n");
 1148                 goto bail_rxbufring;
 1149         }
 1150         bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t));
 1151         boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff);
 1152         boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32);
 1153 
 1154         /* See if it's waiting for boot, and try to boot it */
 1155         for (i = 0; i < 10000; i++) {
 1156                 r = READ_REG(sc, TXP_A2H_0);
 1157                 if (r == STAT_WAITING_FOR_BOOT)
 1158                         break;
 1159                 DELAY(50);
 1160         }
 1161         if (r != STAT_WAITING_FOR_BOOT) {
 1162                 printf(": not waiting for boot\n");
 1163                 goto bail;
 1164         }
 1165         WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
 1166         WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
 1167         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
 1168 
 1169         /* See if it booted */
 1170         for (i = 0; i < 10000; i++) {
 1171                 r = READ_REG(sc, TXP_A2H_0);
 1172                 if (r == STAT_RUNNING)
 1173                         break;
 1174                 DELAY(50);
 1175         }
 1176         if (r != STAT_RUNNING) {
 1177                 printf(": fw not running\n");
 1178                 goto bail;
 1179         }
 1180 
 1181         /* Clear TX and CMD ring write registers */
 1182         WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
 1183         WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
 1184         WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
 1185         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
 1186 
 1187         return (0);
 1188 
 1189 bail:
 1190         txp_dma_free(sc, &sc->sc_zero_dma);
 1191 bail_rxbufring:
 1192         txp_dma_free(sc, &sc->sc_rxbufring_dma);
 1193 bail_rspring:
 1194         txp_dma_free(sc, &sc->sc_rspring_dma);
 1195 bail_cmdring:
 1196         txp_dma_free(sc, &sc->sc_cmdring_dma);
 1197 bail_rxloring:
 1198         txp_dma_free(sc, &sc->sc_rxloring_dma);
 1199 bail_rxhiring:
 1200         txp_dma_free(sc, &sc->sc_rxhiring_dma);
 1201 bail_txloring:
 1202         txp_dma_free(sc, &sc->sc_txloring_dma);
 1203 bail_txhiring:
 1204         txp_dma_free(sc, &sc->sc_txhiring_dma);
 1205 bail_host:
 1206         txp_dma_free(sc, &sc->sc_host_dma);
 1207 bail_boot:
 1208         txp_dma_free(sc, &sc->sc_boot_dma);
 1209         return (-1);
 1210 }
 1211 
 1212 int
 1213 txp_dma_malloc(sc, size, dma, mapflags)
 1214         struct txp_softc *sc;
 1215         bus_size_t size;
 1216         struct txp_dma_alloc *dma;
 1217         int mapflags;
 1218 {
 1219         int r;
 1220 
 1221         if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
 1222             &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
 1223                 goto fail_0;
 1224 
 1225         if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
 1226             size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
 1227                 goto fail_1;
 1228 
 1229         if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
 1230             BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
 1231                 goto fail_2;
 1232 
 1233         if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
 1234             size, NULL, BUS_DMA_NOWAIT)) != 0)
 1235                 goto fail_3;
 1236 
 1237         dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
 1238         return (0);
 1239 
 1240 fail_3:
 1241         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
 1242 fail_2:
 1243         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
 1244 fail_1:
 1245         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
 1246 fail_0:
 1247         return (r);
 1248 }
 1249 
 1250 void
 1251 txp_dma_free(sc, dma)
 1252         struct txp_softc *sc;
 1253         struct txp_dma_alloc *dma;
 1254 {
 1255         bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
 1256         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
 1257         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
 1258         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
 1259 }
 1260 
 1261 int
 1262 txp_ioctl(ifp, command, data)
 1263         struct ifnet *ifp;
 1264         u_long command;
 1265         caddr_t data;
 1266 {
 1267         struct txp_softc *sc = ifp->if_softc;
 1268         struct ifreq *ifr = (struct ifreq *)data;
 1269         struct ifaddr *ifa = (struct ifaddr *)data;
 1270         int s, error = 0;
 1271 
 1272         s = splnet();
 1273 
 1274 #if 0
 1275         if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
 1276                 splx(s);
 1277                 return error;
 1278         }
 1279 #endif
 1280 
 1281         switch(command) {
 1282         case SIOCSIFADDR:
 1283                 ifp->if_flags |= IFF_UP;
 1284                 switch (ifa->ifa_addr->sa_family) {
 1285 #ifdef INET
 1286                 case AF_INET:
 1287                         txp_init(sc);
 1288                         arp_ifinit(ifp, ifa);
 1289                         break;
 1290 #endif /* INET */
 1291                 default:
 1292                         txp_init(sc);
 1293                         break;
 1294                 }
 1295                 break;
 1296         case SIOCSIFFLAGS:
 1297                 if (ifp->if_flags & IFF_UP) {
 1298                         txp_init(sc);
 1299                 } else {
 1300                         if (ifp->if_flags & IFF_RUNNING)
 1301                                 txp_stop(sc);
 1302                 }
 1303                 break;
 1304         case SIOCADDMULTI:
 1305         case SIOCDELMULTI:
 1306                 error = (command == SIOCADDMULTI) ?
 1307                     ether_addmulti(ifr, &sc->sc_arpcom) :
 1308                     ether_delmulti(ifr, &sc->sc_arpcom);
 1309 
 1310                 if (error == ENETRESET) {
 1311                         /*
 1312                          * Multicast list has changed; set the hardware
 1313                          * filter accordingly.
 1314                          */
 1315                         txp_set_filter(sc);
 1316                         error = 0;
 1317                 }
 1318                 break;
 1319         case SIOCGIFMEDIA:
 1320         case SIOCSIFMEDIA:
 1321                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
 1322                 break;
 1323         default:
 1324                 error = EINVAL;
 1325                 break;
 1326         }
 1327 
 1328         splx(s);
 1329 
 1330         return(error);
 1331 }
 1332 
 1333 void
 1334 txp_init(sc)
 1335         struct txp_softc *sc;
 1336 {
 1337         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
 1338         int s;
 1339 
 1340         txp_stop(sc);
 1341 
 1342         s = splnet();
 1343 
 1344         txp_set_filter(sc);
 1345 
 1346         txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1347         txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1348 
 1349         WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
 1350             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
 1351             TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
 1352             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
 1353             TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
 1354         WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
 1355 
 1356         ifp->if_flags |= IFF_RUNNING;
 1357         ifp->if_flags &= ~IFF_OACTIVE;
 1358         ifp->if_timer = 0;
 1359 
 1360         if (!callout_pending(&sc->sc_tick))
 1361                 callout_schedule(&sc->sc_tick, hz);
 1362 
 1363         splx(s);
 1364 }
 1365 
 1366 void
 1367 txp_tick(vsc)
 1368         void *vsc;
 1369 {
 1370         struct txp_softc *sc = vsc;
 1371         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
 1372         struct txp_rsp_desc *rsp = NULL;
 1373         struct txp_ext_desc *ext;
 1374         int s;
 1375 
 1376         s = splnet();
 1377         txp_rxbuf_reclaim(sc);
 1378 
 1379         if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
 1380             &rsp, 1))
 1381                 goto out;
 1382         if (rsp->rsp_numdesc != 6)
 1383                 goto out;
 1384         if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
 1385             NULL, NULL, NULL, 1))
 1386                 goto out;
 1387         ext = (struct txp_ext_desc *)(rsp + 1);
 1388 
 1389         ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
 1390             ext[4].ext_1 + ext[4].ext_4;
 1391         ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
 1392             ext[2].ext_1;
 1393         ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
 1394             ext[1].ext_3;
 1395         ifp->if_opackets += rsp->rsp_par2;
 1396         ifp->if_ipackets += ext[2].ext_3;
 1397 
 1398 out:
 1399         if (rsp != NULL)
 1400                 free(rsp, M_DEVBUF);
 1401 
 1402         splx(s);
 1403         callout_schedule(&sc->sc_tick, hz);
 1404 }
 1405 
 1406 void
 1407 txp_start(ifp)
 1408         struct ifnet *ifp;
 1409 {
 1410         struct txp_softc *sc = ifp->if_softc;
 1411         struct txp_tx_ring *r = &sc->sc_txhir;
 1412         struct txp_tx_desc *txd;
 1413         int txdidx;
 1414         struct txp_frag_desc *fxd;
 1415         struct mbuf *m, *mnew;
 1416         struct txp_swdesc *sd;
 1417         u_int32_t firstprod, firstcnt, prod, cnt, i;
 1418         struct m_tag *mtag;
 1419 
 1420         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1421                 return;
 1422 
 1423         prod = r->r_prod;
 1424         cnt = r->r_cnt;
 1425 
 1426         while (1) {
 1427                 IFQ_POLL(&ifp->if_snd, m);
 1428                 if (m == NULL)
 1429                         break;
 1430                 mnew = NULL;
 1431 
 1432                 firstprod = prod;
 1433                 firstcnt = cnt;
 1434 
 1435                 sd = sc->sc_txd + prod;
 1436                 sd->sd_mbuf = m;
 1437 
 1438                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
 1439                     BUS_DMA_NOWAIT)) {
 1440                         MGETHDR(mnew, M_DONTWAIT, MT_DATA);
 1441                         if (mnew == NULL)
 1442                                 goto oactive1;
 1443                         if (m->m_pkthdr.len > MHLEN) {
 1444                                 MCLGET(mnew, M_DONTWAIT);
 1445                                 if ((mnew->m_flags & M_EXT) == 0) {
 1446                                         m_freem(mnew);
 1447                                         goto oactive1;
 1448                                 }
 1449                         }
 1450                         m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t));
 1451                         mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
 1452                         IFQ_DEQUEUE(&ifp->if_snd, m);
 1453                         m_freem(m);
 1454                         m = mnew;
 1455                         if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
 1456                             BUS_DMA_NOWAIT))
 1457                                 goto oactive1;
 1458                 }
 1459 
 1460                 if ((TX_ENTRIES - cnt) < 4)
 1461                         goto oactive;
 1462 
 1463                 txd = r->r_desc + prod;
 1464                 txdidx = prod;
 1465                 txd->tx_flags = TX_FLAGS_TYPE_DATA;
 1466                 txd->tx_numdesc = 0;
 1467                 txd->tx_addrlo = 0;
 1468                 txd->tx_addrhi = 0;
 1469                 txd->tx_totlen = m->m_pkthdr.len;
 1470                 txd->tx_pflags = 0;
 1471                 txd->tx_numdesc = sd->sd_map->dm_nsegs;
 1472 
 1473                 if (++prod == TX_ENTRIES)
 1474                         prod = 0;
 1475 
 1476                 if (++cnt >= (TX_ENTRIES - 4))
 1477                         goto oactive;
 1478 
 1479                 mtag = m_tag_find(m, PACKET_TAG_VLAN, NULL);
 1480                 if (mtag)
 1481                         txd->tx_pflags = TX_PFLAGS_VLAN |
 1482                           (htons(*(u_int *)(mtag + 1)) << TX_PFLAGS_VLANTAG_S);
 1483 
 1484                 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
 1485                         txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
 1486 #ifdef TRY_TX_TCP_CSUM
 1487                 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
 1488                         txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
 1489 #endif
 1490 #ifdef TRY_TX_UDP_CSUM
 1491                 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
 1492                         txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
 1493 #endif
 1494 
 1495                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
 1496                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
 1497 
 1498                 fxd = (struct txp_frag_desc *)(r->r_desc + prod);
 1499                 for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
 1500                         if (++cnt >= (TX_ENTRIES - 4)) {
 1501                                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
 1502                                     0, sd->sd_map->dm_mapsize,
 1503                                     BUS_DMASYNC_POSTWRITE);
 1504                                 goto oactive;
 1505                         }
 1506 
 1507                         fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
 1508                             FRAG_FLAGS_VALID;
 1509                         fxd->frag_rsvd1 = 0;
 1510                         fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
 1511                         fxd->frag_addrlo =
 1512                             ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
 1513                             0xffffffff;
 1514                         fxd->frag_addrhi =
 1515                             ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
 1516                             32;
 1517                         fxd->frag_rsvd2 = 0;
 1518 
 1519                         bus_dmamap_sync(sc->sc_dmat,
 1520                             sc->sc_txhiring_dma.dma_map,
 1521                             prod * sizeof(struct txp_frag_desc),
 1522                             sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);
 1523 
 1524                         if (++prod == TX_ENTRIES) {
 1525                                 fxd = (struct txp_frag_desc *)r->r_desc;
 1526                                 prod = 0;
 1527                         } else
 1528                                 fxd++;
 1529 
 1530                 }
 1531 
 1532                 /*
 1533                  * if mnew isn't NULL, we already dequeued and copied
 1534                  * the packet.
 1535                  */
 1536                 if (mnew == NULL)
 1537                         IFQ_DEQUEUE(&ifp->if_snd, m);
 1538 
 1539                 ifp->if_timer = 5;
 1540 
 1541 #if NBPFILTER > 0
 1542                 if (ifp->if_bpf)
 1543                         bpf_mtap(ifp->if_bpf, m);
 1544 #endif
 1545 
 1546                 txd->tx_flags |= TX_FLAGS_VALID;
 1547                 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
 1548                     txdidx * sizeof(struct txp_tx_desc),
 1549                     sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);
 1550 
 1551 #if 0
 1552                 {
 1553                         struct mbuf *mx;
 1554                         int i;
 1555 
 1556                         printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
 1557                             txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
 1558                             txd->tx_pflags);
 1559                         for (mx = m; mx != NULL; mx = mx->m_next) {
 1560                                 for (i = 0; i < mx->m_len; i++) {
 1561                                         printf(":%02x",
 1562                                             (u_int8_t)m->m_data[i]);
 1563                                 }
 1564                         }
 1565                         printf("\n");
 1566                 }
 1567 #endif
 1568 
 1569                 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
 1570         }
 1571 
 1572         r->r_prod = prod;
 1573         r->r_cnt = cnt;
 1574         return;
 1575 
 1576 oactive:
 1577         bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
 1578 oactive1:
 1579         ifp->if_flags |= IFF_OACTIVE;
 1580         r->r_prod = firstprod;
 1581         r->r_cnt = firstcnt;
 1582 }
 1583 
 1584 /*
 1585  * Handle simple commands sent to the typhoon
 1586  */
 1587 int
 1588 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait)
 1589         struct txp_softc *sc;
 1590         u_int16_t id, in1, *out1;
 1591         u_int32_t in2, in3, *out2, *out3;
 1592         int wait;
 1593 {
 1594         struct txp_rsp_desc *rsp = NULL;
 1595 
 1596         if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
 1597                 return (-1);
 1598 
 1599         if (!wait)
 1600                 return (0);
 1601 
 1602         if (out1 != NULL)
 1603                 *out1 = le16toh(rsp->rsp_par1);
 1604         if (out2 != NULL)
 1605                 *out2 = le32toh(rsp->rsp_par2);
 1606         if (out3 != NULL)
 1607                 *out3 = le32toh(rsp->rsp_par3);
 1608         free(rsp, M_DEVBUF);
 1609         return (0);
 1610 }
 1611 
 1612 int
 1613 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait)
 1614         struct txp_softc *sc;
 1615         u_int16_t id, in1;
 1616         u_int32_t in2, in3;
 1617         struct txp_ext_desc *in_extp;
 1618         u_int8_t in_extn;
 1619         struct txp_rsp_desc **rspp;
 1620         int wait;
 1621 {
 1622         struct txp_hostvar *hv = sc->sc_hostvar;
 1623         struct txp_cmd_desc *cmd;
 1624         struct txp_ext_desc *ext;
 1625         u_int32_t idx, i;
 1626         u_int16_t seq;
 1627 
 1628         if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
 1629                 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
 1630                 return (-1);
 1631         }
 1632 
 1633         idx = sc->sc_cmdring.lastwrite;
 1634         cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
 1635         bzero(cmd, sizeof(*cmd));
 1636 
 1637         cmd->cmd_numdesc = in_extn;
 1638         seq = sc->sc_seq++;
 1639         cmd->cmd_seq = htole16(seq);
 1640         cmd->cmd_id = htole16(id);
 1641         cmd->cmd_par1 = htole16(in1);
 1642         cmd->cmd_par2 = htole32(in2);
 1643         cmd->cmd_par3 = htole32(in3);
 1644         cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
 1645             (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
 1646 
 1647         idx += sizeof(struct txp_cmd_desc);
 1648         if (idx == sc->sc_cmdring.size)
 1649                 idx = 0;
 1650 
 1651         for (i = 0; i < in_extn; i++) {
 1652                 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
 1653                 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
 1654                 in_extp++;
 1655                 idx += sizeof(struct txp_cmd_desc);
 1656                 if (idx == sc->sc_cmdring.size)
 1657                         idx = 0;
 1658         }
 1659 
 1660         sc->sc_cmdring.lastwrite = idx;
 1661 
 1662         WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
 1663         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1664             sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
 1665 
 1666         if (!wait)
 1667                 return (0);
 1668 
 1669         for (i = 0; i < 10000; i++) {
 1670                 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1671                     sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
 1672                 idx = le32toh(hv->hv_resp_read_idx);
 1673                 if (idx != le32toh(hv->hv_resp_write_idx)) {
 1674                         *rspp = NULL;
 1675                         if (txp_response(sc, idx, id, seq, rspp))
 1676                                 return (-1);
 1677                         if (*rspp != NULL)
 1678                                 break;
 1679                 }
 1680                 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1681                     sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
 1682                 DELAY(50);
 1683         }
 1684         if (i == 1000 || (*rspp) == NULL) {
 1685                 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
 1686                 return (-1);
 1687         }
 1688 
 1689         return (0);
 1690 }
 1691 
 1692 int
 1693 txp_response(sc, ridx, id, seq, rspp)
 1694         struct txp_softc *sc;
 1695         u_int32_t ridx;
 1696         u_int16_t id;
 1697         u_int16_t seq;
 1698         struct txp_rsp_desc **rspp;
 1699 {
 1700         struct txp_hostvar *hv = sc->sc_hostvar;
 1701         struct txp_rsp_desc *rsp;
 1702 
 1703         while (ridx != le32toh(hv->hv_resp_write_idx)) {
 1704                 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);
 1705 
 1706                 if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) {
 1707                         *rspp = (struct txp_rsp_desc *)malloc(
 1708                             sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
 1709                             M_DEVBUF, M_NOWAIT);
 1710                         if ((*rspp) == NULL)
 1711                                 return (-1);
 1712                         txp_rsp_fixup(sc, rsp, *rspp);
 1713                         return (0);
 1714                 }
 1715 
 1716                 if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
 1717                         printf("%s: response error: id 0x%x\n",
 1718                             TXP_DEVNAME(sc), le16toh(rsp->rsp_id));
 1719                         txp_rsp_fixup(sc, rsp, NULL);
 1720                         ridx = le32toh(hv->hv_resp_read_idx);
 1721                         continue;
 1722                 }
 1723 
 1724                 switch (le16toh(rsp->rsp_id)) {
 1725                 case TXP_CMD_CYCLE_STATISTICS:
 1726                 case TXP_CMD_MEDIA_STATUS_READ:
 1727                         break;
 1728                 case TXP_CMD_HELLO_RESPONSE:
 1729                         printf("%s: hello\n", TXP_DEVNAME(sc));
 1730                         break;
 1731                 default:
 1732                         printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
 1733                             le16toh(rsp->rsp_id));
 1734                 }
 1735 
 1736                 txp_rsp_fixup(sc, rsp, NULL);
 1737                 ridx = le32toh(hv->hv_resp_read_idx);
 1738                 hv->hv_resp_read_idx = le32toh(ridx);
 1739         }
 1740 
 1741         return (0);
 1742 }
 1743 
 1744 void
 1745 txp_rsp_fixup(sc, rsp, dst)
 1746         struct txp_softc *sc;
 1747         struct txp_rsp_desc *rsp, *dst;
 1748 {
 1749         struct txp_rsp_desc *src = rsp;
 1750         struct txp_hostvar *hv = sc->sc_hostvar;
 1751         u_int32_t i, ridx;
 1752 
 1753         ridx = le32toh(hv->hv_resp_read_idx);
 1754 
 1755         for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
 1756                 if (dst != NULL)
 1757                         bcopy(src, dst++, sizeof(struct txp_rsp_desc));
 1758                 ridx += sizeof(struct txp_rsp_desc);
 1759                 if (ridx == sc->sc_rspring.size) {
 1760                         src = sc->sc_rspring.base;
 1761                         ridx = 0;
 1762                 } else
 1763                         src++;
 1764                 sc->sc_rspring.lastwrite = ridx;
 1765                 hv->hv_resp_read_idx = htole32(ridx);
 1766         }
 1767         
 1768         hv->hv_resp_read_idx = htole32(ridx);
 1769 }
 1770 
 1771 int
 1772 txp_cmd_desc_numfree(sc)
 1773         struct txp_softc *sc;
 1774 {
 1775         struct txp_hostvar *hv = sc->sc_hostvar;
 1776         struct txp_boot_record *br = sc->sc_boot;
 1777         u_int32_t widx, ridx, nfree;
 1778 
 1779         widx = sc->sc_cmdring.lastwrite;
 1780         ridx = le32toh(hv->hv_cmd_read_idx);
 1781 
 1782         if (widx == ridx) {
 1783                 /* Ring is completely free */
 1784                 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
 1785         } else {
 1786                 if (widx > ridx)
 1787                         nfree = le32toh(br->br_cmd_siz) -
 1788                             (widx - ridx + sizeof(struct txp_cmd_desc));
 1789                 else
 1790                         nfree = ridx - widx - sizeof(struct txp_cmd_desc);
 1791         }
 1792 
 1793         return (nfree / sizeof(struct txp_cmd_desc));
 1794 }
 1795 
 1796 void
 1797 txp_stop(sc)
 1798         struct txp_softc *sc;
 1799 {
 1800         txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1801         txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1802 
 1803         if (callout_pending(&sc->sc_tick))
 1804                 callout_stop(&sc->sc_tick);
 1805 }
 1806 
 1807 void
 1808 txp_watchdog(ifp)
 1809         struct ifnet *ifp;
 1810 {
 1811 }
 1812 
 1813 int
 1814 txp_ifmedia_upd(ifp)
 1815         struct ifnet *ifp;
 1816 {
 1817         struct txp_softc *sc = ifp->if_softc;
 1818         struct ifmedia *ifm = &sc->sc_ifmedia;
 1819         u_int16_t new_xcvr;
 1820 
 1821         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1822                 return (EINVAL);
 1823 
 1824         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
 1825                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1826                         new_xcvr = TXP_XCVR_10_FDX;
 1827                 else
 1828                         new_xcvr = TXP_XCVR_10_HDX;
 1829         } else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) ||
 1830                    (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) {
 1831                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1832                         new_xcvr = TXP_XCVR_100_FDX;
 1833                 else
 1834                         new_xcvr = TXP_XCVR_100_HDX;
 1835         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
 1836                 new_xcvr = TXP_XCVR_AUTO;
 1837         } else
 1838                 return (EINVAL);
 1839 
 1840         /* nothing to do */
 1841         if (sc->sc_xcvr == new_xcvr)
 1842                 return (0);
 1843 
 1844         txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
 1845             NULL, NULL, NULL, 0);
 1846         sc->sc_xcvr = new_xcvr;
 1847 
 1848         return (0);
 1849 }
 1850 
 1851 void
 1852 txp_ifmedia_sts(ifp, ifmr)
 1853         struct ifnet *ifp;
 1854         struct ifmediareq *ifmr;
 1855 {
 1856         struct txp_softc *sc = ifp->if_softc;
 1857         struct ifmedia *ifm = &sc->sc_ifmedia;
 1858         u_int16_t bmsr, bmcr, anlpar;
 1859 
 1860         ifmr->ifm_status = IFM_AVALID;
 1861         ifmr->ifm_active = IFM_ETHER;
 1862 
 1863         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
 1864             &bmsr, NULL, NULL, 1))
 1865                 goto bail;
 1866         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
 1867             &bmsr, NULL, NULL, 1))
 1868                 goto bail;
 1869 
 1870         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
 1871             &bmcr, NULL, NULL, 1))
 1872                 goto bail;
 1873 
 1874         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
 1875             &anlpar, NULL, NULL, 1))
 1876                 goto bail;
 1877 
 1878         if (bmsr & BMSR_LINK)
 1879                 ifmr->ifm_status |= IFM_ACTIVE;
 1880 
 1881         if (bmcr & BMCR_ISO) {
 1882                 ifmr->ifm_active |= IFM_NONE;
 1883                 ifmr->ifm_status = 0;
 1884                 return;
 1885         }
 1886 
 1887         if (bmcr & BMCR_LOOP)
 1888                 ifmr->ifm_active |= IFM_LOOP;
 1889 
 1890         if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) {
 1891                 if ((bmsr & BMSR_ACOMP) == 0) {
 1892                         ifmr->ifm_active |= IFM_NONE;
 1893                         return;
 1894                 }
 1895 
 1896                 if (anlpar & ANLPAR_T4)
 1897                         ifmr->ifm_active |= IFM_100_T4;
 1898                 else if (anlpar & ANLPAR_TX_FD)
 1899                         ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
 1900                 else if (anlpar & ANLPAR_TX)
 1901                         ifmr->ifm_active |= IFM_100_TX;
 1902                 else if (anlpar & ANLPAR_10_FD)
 1903                         ifmr->ifm_active |= IFM_10_T|IFM_FDX;
 1904                 else if (anlpar & ANLPAR_10)
 1905                         ifmr->ifm_active |= IFM_10_T;
 1906                 else
 1907                         ifmr->ifm_active |= IFM_NONE;
 1908         } else
 1909                 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
 1910         return;
 1911 
 1912 bail:
 1913         ifmr->ifm_active |= IFM_NONE;
 1914         ifmr->ifm_status &= ~IFM_AVALID;
 1915 }
 1916 
 1917 void
 1918 txp_show_descriptor(d)
 1919         void *d;
 1920 {
 1921         struct txp_cmd_desc *cmd = d;
 1922         struct txp_rsp_desc *rsp = d;
 1923         struct txp_tx_desc *txd = d;
 1924         struct txp_frag_desc *frgd = d;
 1925 
 1926         switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
 1927         case CMD_FLAGS_TYPE_CMD:
 1928                 /* command descriptor */
 1929                 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1930                     cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
 1931                     le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
 1932                     le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
 1933                 break;
 1934         case CMD_FLAGS_TYPE_RESP:
 1935                 /* response descriptor */
 1936                 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1937                     rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
 1938                     le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
 1939                     le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
 1940                 break;
 1941         case CMD_FLAGS_TYPE_DATA:
 1942                 /* data header (assuming tx for now) */
 1943                 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
 1944                     txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
 1945                     txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
 1946                 break;
 1947         case CMD_FLAGS_TYPE_FRAG:
 1948                 /* fragment descriptor */
 1949                 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
 1950                     frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
 1951                     frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
 1952                 break;
 1953         default:
 1954                 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1955                     cmd->cmd_flags & CMD_FLAGS_TYPE_M,
 1956                     cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
 1957                     le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
 1958                     le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
 1959                 break;
 1960         }
 1961 }
 1962 
 1963 void
 1964 txp_set_filter(sc)
 1965         struct txp_softc *sc;
 1966 {
 1967         struct ethercom *ac = &sc->sc_arpcom;
 1968         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
 1969         u_int32_t crc, carry, hashbit, hash[2];
 1970         u_int16_t filter;
 1971         u_int8_t octet;
 1972         int i, j, mcnt = 0;
 1973         struct ether_multi *enm;
 1974         struct ether_multistep step;
 1975 
 1976         if (ifp->if_flags & IFF_PROMISC) {
 1977                 filter = TXP_RXFILT_PROMISC;
 1978                 goto setit;
 1979         }
 1980 
 1981 again:
 1982         filter = TXP_RXFILT_DIRECT;
 1983 
 1984         if (ifp->if_flags & IFF_BROADCAST)
 1985                 filter |= TXP_RXFILT_BROADCAST;
 1986 
 1987         if (ifp->if_flags & IFF_ALLMULTI)
 1988                 filter |= TXP_RXFILT_ALLMULTI;
 1989         else {
 1990                 hash[0] = hash[1] = 0;
 1991 
 1992                 ETHER_FIRST_MULTI(step, ac, enm);
 1993                 while (enm != NULL) {
 1994                         if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1995                                 /*
 1996                                  * We must listen to a range of multicast
 1997                                  * addresses.  For now, just accept all
 1998                                  * multicasts, rather than trying to set only
 1999                                  * those filter bits needed to match the range.
 2000                                  * (At this time, the only use of address
 2001                                  * ranges is for IP multicast routing, for
 2002                                  * which the range is big enough to require
 2003                                  * all bits set.)
 2004                                  */
 2005                                 ifp->if_flags |= IFF_ALLMULTI;
 2006                                 goto again;
 2007                         }
 2008 
 2009                         mcnt++;
 2010                         crc = 0xffffffff;
 2011 
 2012                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2013                                 octet = enm->enm_addrlo[i];
 2014                                 for (j = 0; j < 8; j++) {
 2015                                         carry = ((crc & 0x80000000) ? 1 : 0) ^
 2016                                             (octet & 1);
 2017                                         crc <<= 1;
 2018                                         octet >>= 1;
 2019                                         if (carry)
 2020                                                 crc = (crc ^ TXP_POLYNOMIAL) |
 2021                                                     carry;
 2022                                 }
 2023                         }
 2024                         hashbit = (u_int16_t)(crc & (64 - 1));
 2025                         hash[hashbit / 32] |= (1 << hashbit % 32);
 2026                         ETHER_NEXT_MULTI(step, enm);
 2027                 }
 2028 
 2029                 if (mcnt > 0) {
 2030                         filter |= TXP_RXFILT_HASHMULTI;
 2031                         txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
 2032                             2, hash[0], hash[1], NULL, NULL, NULL, 0);
 2033                 }
 2034         }
 2035 
 2036 setit:
 2037         txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
 2038             NULL, NULL, NULL, 1);
 2039 }
 2040 
 2041 void
 2042 txp_capabilities(sc)
 2043         struct txp_softc *sc;
 2044 {
 2045         struct ifnet *ifp = &sc->sc_arpcom.ec_if;
 2046         struct txp_rsp_desc *rsp = NULL;
 2047         struct txp_ext_desc *ext;
 2048 
 2049         if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
 2050                 goto out;
 2051 
 2052         if (rsp->rsp_numdesc != 1)
 2053                 goto out;
 2054         ext = (struct txp_ext_desc *)(rsp + 1);
 2055 
 2056         sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
 2057         sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
 2058 
 2059         sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_MTU;
 2060         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
 2061                 sc->sc_tx_capability |= OFFLOAD_VLAN;
 2062                 sc->sc_rx_capability |= OFFLOAD_VLAN;
 2063                 sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
 2064         }
 2065 
 2066 #if 0
 2067         /* not ready yet */
 2068         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
 2069                 sc->sc_tx_capability |= OFFLOAD_IPSEC;
 2070                 sc->sc_rx_capability |= OFFLOAD_IPSEC;
 2071                 ifp->if_capabilities |= IFCAP_IPSEC;
 2072         }
 2073 #endif
 2074 
 2075         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
 2076                 sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
 2077                 sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
 2078                 ifp->if_capabilities |= IFCAP_CSUM_IPv4;
 2079         }
 2080 
 2081         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
 2082                 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
 2083 #ifdef TRY_TX_TCP_CSUM
 2084                 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
 2085                 ifp->if_capabilities |= IFCAP_CSUM_TCPv4;
 2086 #endif
 2087         }
 2088 
 2089         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
 2090                 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
 2091 #ifdef TRY_TX_UDP_CSUM
 2092                 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
 2093                 ifp->if_capabilities |= IFCAP_CSUM_UDPv4;
 2094 #endif
 2095         }
 2096 
 2097         if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
 2098             sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
 2099                 goto out;
 2100 
 2101 out:
 2102         if (rsp != NULL)
 2103                 free(rsp, M_DEVBUF);
 2104 }

Cache object: 4c516755c000f119fe86f9dbc91d5628


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.