The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/jme/if_jme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/bus.h>
   34 #include <sys/endian.h>
   35 #include <sys/kernel.h>
   36 #include <sys/malloc.h>
   37 #include <sys/mbuf.h>
   38 #include <sys/rman.h>
   39 #include <sys/module.h>
   40 #include <sys/proc.h>
   41 #include <sys/queue.h>
   42 #include <sys/socket.h>
   43 #include <sys/sockio.h>
   44 #include <sys/sysctl.h>
   45 #include <sys/taskqueue.h>
   46 
   47 #include <net/bpf.h>
   48 #include <net/if.h>
   49 #include <net/if_arp.h>
   50 #include <net/ethernet.h>
   51 #include <net/if_dl.h>
   52 #include <net/if_media.h>
   53 #include <net/if_types.h>
   54 #include <net/if_vlan_var.h>
   55 
   56 #include <netinet/in.h>
   57 #include <netinet/in_systm.h>
   58 #include <netinet/ip.h>
   59 #include <netinet/tcp.h>
   60 
   61 #include <dev/mii/mii.h>
   62 #include <dev/mii/miivar.h>
   63 
   64 #include <dev/pci/pcireg.h>
   65 #include <dev/pci/pcivar.h>
   66 
   67 #include <machine/atomic.h>
   68 #include <machine/bus.h>
   69 #include <machine/in_cksum.h>
   70 
   71 #include <dev/jme/if_jmereg.h>
   72 #include <dev/jme/if_jmevar.h>
   73 
   74 /* "device miibus" required.  See GENERIC if you get errors here. */
   75 #include "miibus_if.h"
   76 
   77 #ifndef IFCAP_WOL
   78 #define IFCAP_WOL               0
   79 #define IFCAP_WOL_MAGIC         0
   80 #endif
   81 #ifndef IFCAP_TSO4
   82 #define IFCAP_TSO4              0
   83 #define CSUM_TSO                0
   84 #endif
   85 #ifndef IFCAP_VLAN_HWCSUM
   86 #define IFCAP_VLAN_HWCSUM       0
   87 #endif
   88 #ifndef VLAN_CAPABILITIES
   89 #define VLAN_CAPABILITIES(x)
   90 #endif
   91 #if __FreeBSD_version < 700000
   92 #define FILTER_STRAY
   93 #define FILTER_HANDLED
   94 #define m_collapse(x, y, z)     m_defrag(x, y)
   95 #endif
   96 
   97 /* Define the following to disable printing Rx errors. */
   98 #undef  JME_SHOW_ERRORS
   99 
  100 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
  101 
  102 MODULE_DEPEND(jme, pci, 1, 1, 1);
  103 MODULE_DEPEND(jme, ether, 1, 1, 1);
  104 MODULE_DEPEND(jme, miibus, 1, 1, 1);
  105 
  106 /* Tunables. */
  107 static int msi_disable = 0;
  108 static int msix_disable = 0;
  109 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
  110 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
  111 
  112 /*
  113  * Devices supported by this driver.
  114  */
  115 static struct jme_dev {
  116         uint16_t        jme_vendorid;
  117         uint16_t        jme_deviceid;
  118         const char      *jme_name;
  119 } jme_devs[] = {
  120         { VENDORID_JMICRON, DEVICEID_JMC250,
  121             "JMicron Inc, JMC250 Gigabit Ethernet" },
  122         { VENDORID_JMICRON, DEVICEID_JMC260,
  123             "JMicron Inc, JMC260 Fast Ethernet" },
  124 };
  125 
  126 static int jme_miibus_readreg(device_t, int, int);
  127 static int jme_miibus_writereg(device_t, int, int, int);
  128 static void jme_miibus_statchg(device_t);
  129 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
  130 static int jme_mediachange(struct ifnet *);
  131 static int jme_probe(device_t);
  132 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
  133 static int jme_eeprom_macaddr(struct jme_softc *);
  134 static void jme_reg_macaddr(struct jme_softc *);
  135 static void jme_map_intr_vector(struct jme_softc *);
  136 static int jme_attach(device_t);
  137 static int jme_detach(device_t);
  138 static void jme_sysctl_node(struct jme_softc *);
  139 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  140 static int jme_dma_alloc(struct jme_softc *);
  141 static void jme_dma_free(struct jme_softc *);
  142 static int jme_shutdown(device_t);
  143 static void jme_setlinkspeed(struct jme_softc *);
  144 static void jme_setwol(struct jme_softc *);
  145 static int jme_suspend(device_t);
  146 static int jme_resume(device_t);
  147 static int jme_encap(struct jme_softc *, struct mbuf **);
  148 static void jme_tx_task(void *, int);
  149 static void jme_start(struct ifnet *);
  150 static void jme_watchdog(struct jme_softc *);
  151 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
  152 static void jme_mac_config(struct jme_softc *);
  153 static void jme_link_task(void *, int);
  154 #if __FreeBSD_version < 700000
  155 static void jme_intr(void *);
  156 #else
  157 static int jme_intr(void *);
  158 #endif
  159 static void jme_int_task(void *, int);
  160 static void jme_txeof(struct jme_softc *);
  161 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
  162 static void jme_rxeof(struct jme_softc *);
  163 static int jme_rxintr(struct jme_softc *, int);
  164 static void jme_tick(void *);
  165 static void jme_reset(struct jme_softc *);
  166 static void jme_init(void *);
  167 static void jme_init_locked(struct jme_softc *);
  168 static void jme_stop(struct jme_softc *);
  169 static void jme_stop_tx(struct jme_softc *);
  170 static void jme_stop_rx(struct jme_softc *);
  171 static int jme_init_rx_ring(struct jme_softc *);
  172 static void jme_init_tx_ring(struct jme_softc *);
  173 static void jme_init_ssb(struct jme_softc *);
  174 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
  175 static void jme_set_vlan(struct jme_softc *);
  176 static void jme_set_filter(struct jme_softc *);
  177 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  178 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
  179 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
  180 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
  181 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
  182 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
  183 
  184 
  185 static device_method_t jme_methods[] = {
  186         /* Device interface. */
  187         DEVMETHOD(device_probe,         jme_probe),
  188         DEVMETHOD(device_attach,        jme_attach),
  189         DEVMETHOD(device_detach,        jme_detach),
  190         DEVMETHOD(device_shutdown,      jme_shutdown),
  191         DEVMETHOD(device_suspend,       jme_suspend),
  192         DEVMETHOD(device_resume,        jme_resume),
  193 
  194         /* MII interface. */
  195         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
  196         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
  197         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
  198 
  199         { NULL, NULL }
  200 };
  201 
  202 static driver_t jme_driver = {
  203         "jme",
  204         jme_methods,
  205         sizeof(struct jme_softc)
  206 };
  207 
  208 static devclass_t jme_devclass;
  209 
  210 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
  211 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
  212 
  213 static struct resource_spec jme_res_spec_mem[] = {
  214         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  215         { -1,                   0,              0 }
  216 };
  217 
  218 static struct resource_spec jme_irq_spec_legacy[] = {
  219         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  220         { -1,                   0,              0 }
  221 };
  222 
  223 static struct resource_spec jme_irq_spec_msi[] = {
  224         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  225         { SYS_RES_IRQ,          2,              RF_ACTIVE },
  226         { SYS_RES_IRQ,          3,              RF_ACTIVE },
  227         { SYS_RES_IRQ,          4,              RF_ACTIVE },
  228         { SYS_RES_IRQ,          5,              RF_ACTIVE },
  229         { SYS_RES_IRQ,          6,              RF_ACTIVE },
  230         { SYS_RES_IRQ,          7,              RF_ACTIVE },
  231         { SYS_RES_IRQ,          8,              RF_ACTIVE },
  232         { -1,                   0,              0 }
  233 };
  234 
  235 /*
  236  *      Read a PHY register on the MII of the JMC250.
  237  */
  238 static int
  239 jme_miibus_readreg(device_t dev, int phy, int reg)
  240 {
  241         struct jme_softc *sc;
  242         uint32_t val;
  243         int i;
  244 
  245         sc = device_get_softc(dev);
  246 
  247         /* For FPGA version, PHY address 0 should be ignored. */
  248         if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
  249                 if (phy == 0)
  250                         return (0);
  251         } else {
  252                 if (sc->jme_phyaddr != phy)
  253                         return (0);
  254         }
  255 
  256         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
  257             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
  258         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
  259                 DELAY(1);
  260                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
  261                         break;
  262         }
  263 
  264         if (i == 0) {
  265                 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
  266                 return (0);
  267         }
  268 
  269         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
  270 }
  271 
  272 /*
  273  *      Write a PHY register on the MII of the JMC250.
  274  */
  275 static int
  276 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
  277 {
  278         struct jme_softc *sc;
  279         int i;
  280 
  281         sc = device_get_softc(dev);
  282 
  283         /* For FPGA version, PHY address 0 should be ignored. */
  284         if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
  285                 if (phy == 0)
  286                         return (0);
  287         } else {
  288                 if (sc->jme_phyaddr != phy)
  289                         return (0);
  290         }
  291 
  292         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
  293             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
  294             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
  295         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
  296                 DELAY(1);
  297                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
  298                         break;
  299         }
  300 
  301         if (i == 0)
  302                 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
  303 
  304         return (0);
  305 }
  306 
  307 /*
  308  *      Callback from MII layer when media changes.
  309  */
  310 static void
  311 jme_miibus_statchg(device_t dev)
  312 {
  313         struct jme_softc *sc;
  314 
  315         sc = device_get_softc(dev);
  316         taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
  317 }
  318 
  319 /*
  320  *      Get the current interface media status.
  321  */
  322 static void
  323 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  324 {
  325         struct jme_softc *sc;
  326         struct mii_data *mii;
  327 
  328         sc = ifp->if_softc;
  329         JME_LOCK(sc);
  330         mii = device_get_softc(sc->jme_miibus);
  331 
  332         mii_pollstat(mii);
  333         ifmr->ifm_status = mii->mii_media_status;
  334         ifmr->ifm_active = mii->mii_media_active;
  335         JME_UNLOCK(sc);
  336 }
  337 
  338 /*
  339  *      Set hardware to newly-selected media.
  340  */
  341 static int
  342 jme_mediachange(struct ifnet *ifp)
  343 {
  344         struct jme_softc *sc;
  345         struct mii_data *mii;
  346         struct mii_softc *miisc;
  347         int error;
  348 
  349         sc = ifp->if_softc;
  350         JME_LOCK(sc);
  351         mii = device_get_softc(sc->jme_miibus);
  352         if (mii->mii_instance != 0) {
  353                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
  354                         mii_phy_reset(miisc);
  355         }
  356         error = mii_mediachg(mii);
  357         JME_UNLOCK(sc);
  358 
  359         return (error);
  360 }
  361 
  362 static int
  363 jme_probe(device_t dev)
  364 {
  365         struct jme_dev *sp;
  366         int i;
  367         uint16_t vendor, devid;
  368 
  369         vendor = pci_get_vendor(dev);
  370         devid = pci_get_device(dev);
  371         sp = jme_devs;
  372         for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
  373             i++, sp++) {
  374                 if (vendor == sp->jme_vendorid &&
  375                     devid == sp->jme_deviceid) {
  376                         device_set_desc(dev, sp->jme_name);
  377                         return (BUS_PROBE_DEFAULT);
  378                 }
  379         }
  380 
  381         return (ENXIO);
  382 }
  383 
  384 static int
  385 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
  386 {
  387         uint32_t reg;
  388         int i;
  389 
  390         *val = 0;
  391         for (i = JME_TIMEOUT; i > 0; i--) {
  392                 reg = CSR_READ_4(sc, JME_SMBCSR);
  393                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
  394                         break;
  395                 DELAY(1);
  396         }
  397 
  398         if (i == 0) {
  399                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
  400                 return (ETIMEDOUT);
  401         }
  402 
  403         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
  404         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
  405         for (i = JME_TIMEOUT; i > 0; i--) {
  406                 DELAY(1);
  407                 reg = CSR_READ_4(sc, JME_SMBINTF);
  408                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
  409                         break;
  410         }
  411 
  412         if (i == 0) {
  413                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
  414                 return (ETIMEDOUT);
  415         }
  416 
  417         reg = CSR_READ_4(sc, JME_SMBINTF);
  418         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
  419 
  420         return (0);
  421 }
  422 
  423 static int
  424 jme_eeprom_macaddr(struct jme_softc *sc)
  425 {
  426         uint8_t eaddr[ETHER_ADDR_LEN];
  427         uint8_t fup, reg, val;
  428         uint32_t offset;
  429         int match;
  430 
  431         offset = 0;
  432         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
  433             fup != JME_EEPROM_SIG0)
  434                 return (ENOENT);
  435         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
  436             fup != JME_EEPROM_SIG1)
  437                 return (ENOENT);
  438         match = 0;
  439         do {
  440                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
  441                         break;
  442                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
  443                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
  444                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
  445                                 break;
  446                         if (reg >= JME_PAR0 &&
  447                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
  448                                 if (jme_eeprom_read_byte(sc, offset + 2,
  449                                     &val) != 0)
  450                                         break;
  451                                 eaddr[reg - JME_PAR0] = val;
  452                                 match++;
  453                         }
  454                 }
  455                 /* Check for the end of EEPROM descriptor. */
  456                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
  457                         break;
  458                 /* Try next eeprom descriptor. */
  459                 offset += JME_EEPROM_DESC_BYTES;
  460         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
  461 
  462         if (match == ETHER_ADDR_LEN) {
  463                 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
  464                 return (0);
  465         }
  466 
  467         return (ENOENT);
  468 }
  469 
  470 static void
  471 jme_reg_macaddr(struct jme_softc *sc)
  472 {
  473         uint32_t par0, par1;
  474 
  475         /* Read station address. */
  476         par0 = CSR_READ_4(sc, JME_PAR0);
  477         par1 = CSR_READ_4(sc, JME_PAR1);
  478         par1 &= 0xFFFF;
  479         if ((par0 == 0 && par1 == 0) ||
  480             (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
  481                 device_printf(sc->jme_dev,
  482                     "generating fake ethernet address.\n");
  483                 par0 = arc4random();
  484                 /* Set OUI to JMicron. */
  485                 sc->jme_eaddr[0] = 0x00;
  486                 sc->jme_eaddr[1] = 0x1B;
  487                 sc->jme_eaddr[2] = 0x8C;
  488                 sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
  489                 sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
  490                 sc->jme_eaddr[5] = par0 & 0xff;
  491         } else {
  492                 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
  493                 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
  494                 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
  495                 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
  496                 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
  497                 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
  498         }
  499 }
  500 
  501 static void
  502 jme_map_intr_vector(struct jme_softc *sc)
  503 {
  504         uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
  505 
  506         bzero(map, sizeof(map));
  507 
  508         /* Map Tx interrupts source to MSI/MSIX vector 2. */
  509         map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
  510             MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
  511         map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
  512             MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
  513         map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
  514             MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
  515         map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
  516             MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
  517         map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
  518             MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
  519         map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
  520             MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
  521         map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
  522             MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
  523         map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
  524             MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
  525         map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
  526             MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
  527         map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
  528             MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
  529 
  530         /* Map Rx interrupts source to MSI/MSIX vector 1. */
  531         map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
  532             MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
  533         map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
  534             MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
  535         map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
  536             MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
  537         map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
  538             MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
  539         map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
  540             MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
  541         map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
  542             MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
  543         map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
  544             MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
  545         map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
  546             MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
  547         map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
  548             MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
  549         map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
  550             MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
  551         map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
  552             MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
  553         map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
  554             MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
  555         map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
  556             MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
  557         map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
  558             MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
  559         map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
  560             MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
  561         map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
  562             MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
  563 
  564         /* Map all other interrupts source to MSI/MSIX vector 0. */
  565         CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
  566         CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
  567         CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
  568         CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
  569 }
  570 
  571 static int
  572 jme_attach(device_t dev)
  573 {
  574         struct jme_softc *sc;
  575         struct ifnet *ifp;
  576         struct mii_softc *miisc;
  577         struct mii_data *mii;
  578         uint32_t reg;
  579         uint16_t burst;
  580         int error, i, msic, msixc, pmc;
  581 
  582         error = 0;
  583         sc = device_get_softc(dev);
  584         sc->jme_dev = dev;
  585 
  586         mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  587             MTX_DEF);
  588         callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
  589         TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
  590         TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
  591 
  592         /*
  593          * Map the device. JMC250 supports both memory mapped and I/O
  594          * register space access. Because I/O register access should
  595          * use different BARs to access registers it's waste of time
  596          * to use I/O register spce access. JMC250 uses 16K to map
  597          * entire memory space.
  598          */
  599         pci_enable_busmaster(dev);
  600         sc->jme_res_spec = jme_res_spec_mem;
  601         sc->jme_irq_spec = jme_irq_spec_legacy;
  602         error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
  603         if (error != 0) {
  604                 device_printf(dev, "cannot allocate memory resources.\n");
  605                 goto fail;
  606         }
  607 
  608         /* Allocate IRQ resources. */
  609         msixc = pci_msix_count(dev);
  610         msic = pci_msi_count(dev);
  611         if (bootverbose) {
  612                 device_printf(dev, "MSIX count : %d\n", msixc);
  613                 device_printf(dev, "MSI count : %d\n", msic);
  614         }
  615 
  616         /* Prefer MSIX over MSI. */
  617         if (msix_disable == 0 || msi_disable == 0) {
  618                 if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
  619                     pci_alloc_msix(dev, &msixc) == 0) {
  620                         if (msic == JME_MSIX_MESSAGES) {
  621                                 device_printf(dev, "Using %d MSIX messages.\n",
  622                                     msixc);
  623                                 sc->jme_flags |= JME_FLAG_MSIX;
  624                                 sc->jme_irq_spec = jme_irq_spec_msi;
  625                         } else
  626                                 pci_release_msi(dev);
  627                 }
  628                 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
  629                     msic == JME_MSI_MESSAGES &&
  630                     pci_alloc_msi(dev, &msic) == 0) {
  631                         if (msic == JME_MSI_MESSAGES) {
  632                                 device_printf(dev, "Using %d MSI messages.\n",
  633                                     msic);
  634                                 sc->jme_flags |= JME_FLAG_MSI;
  635                                 sc->jme_irq_spec = jme_irq_spec_msi;
  636                         } else
  637                                 pci_release_msi(dev);
  638                 }
  639                 /* Map interrupt vector 0, 1 and 2. */
  640                 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
  641                     (sc->jme_flags & JME_FLAG_MSIX) != 0)
  642                         jme_map_intr_vector(sc);
  643         }
  644 
  645         error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
  646         if (error != 0) {
  647                 device_printf(dev, "cannot allocate IRQ resources.\n");
  648                 goto fail;
  649         }
  650 
  651         sc->jme_rev = pci_get_device(dev);
  652         if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
  653                 sc->jme_flags |= JME_FLAG_FASTETH;
  654                 sc->jme_flags |= JME_FLAG_NOJUMBO;
  655         }
  656         reg = CSR_READ_4(sc, JME_CHIPMODE);
  657         sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
  658         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
  659             CHIPMODE_NOT_FPGA)
  660                 sc->jme_flags |= JME_FLAG_FPGA;
  661         if (bootverbose) {
  662                 device_printf(dev, "PCI device revision : 0x%04x\n",
  663                     sc->jme_rev);
  664                 device_printf(dev, "Chip revision : 0x%02x\n",
  665                     sc->jme_chip_rev);
  666                 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
  667                         device_printf(dev, "FPGA revision : 0x%04x\n",
  668                             (reg & CHIPMODE_FPGA_REV_MASK) >>
  669                             CHIPMODE_FPGA_REV_SHIFT);
  670         }
  671         if (sc->jme_chip_rev == 0xFF) {
  672                 device_printf(dev, "Unknown chip revision : 0x%02x\n",
  673                     sc->jme_rev);
  674                 error = ENXIO;
  675                 goto fail;
  676         }
  677 
  678         /* Reset the ethernet controller. */
  679         jme_reset(sc);
  680 
  681         /* Get station address. */
  682         reg = CSR_READ_4(sc, JME_SMBCSR);
  683         if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
  684                 error = jme_eeprom_macaddr(sc);
  685         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
  686                 if (error != 0 && (bootverbose))
  687                         device_printf(sc->jme_dev,
  688                             "ethernet hardware address not found in EEPROM.\n");
  689                 jme_reg_macaddr(sc);
  690         }
  691 
  692         /*
  693          * Save PHY address.
  694          * Integrated JR0211 has fixed PHY address whereas FPGA version
  695          * requires PHY probing to get correct PHY address.
  696          */
  697         if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
  698                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
  699                     GPREG0_PHY_ADDR_MASK;
  700                 if (bootverbose)
  701                         device_printf(dev, "PHY is at address %d.\n",
  702                             sc->jme_phyaddr);
  703         } else
  704                 sc->jme_phyaddr = 0;
  705 
  706         /* Set max allowable DMA size. */
  707         if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
  708                 sc->jme_flags |= JME_FLAG_PCIE;
  709                 burst = pci_read_config(dev, i + 0x08, 2);
  710                 if (bootverbose) {
  711                         device_printf(dev, "Read request size : %d bytes.\n",
  712                             128 << ((burst >> 12) & 0x07));
  713                         device_printf(dev, "TLP payload size : %d bytes.\n",
  714                             128 << ((burst >> 5) & 0x07));
  715                 }
  716                 switch ((burst >> 12) & 0x07) {
  717                 case 0:
  718                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
  719                         break;
  720                 case 1:
  721                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
  722                         break;
  723                 default:
  724                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
  725                         break;
  726                 }
  727                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
  728         } else {
  729                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
  730                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
  731         }
  732         /* Create coalescing sysctl node. */
  733         jme_sysctl_node(sc);
  734         if ((error = jme_dma_alloc(sc) != 0))
  735                 goto fail;
  736 
  737         ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
  738         if (ifp == NULL) {
  739                 device_printf(dev, "cannot allocate ifnet structure.\n");
  740                 error = ENXIO;
  741                 goto fail;
  742         }
  743 
  744         ifp->if_softc = sc;
  745         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  746         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  747         ifp->if_ioctl = jme_ioctl;
  748         ifp->if_start = jme_start;
  749         ifp->if_init = jme_init;
  750         ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
  751         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  752         IFQ_SET_READY(&ifp->if_snd);
  753         /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
  754         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
  755         ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
  756         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
  757                 sc->jme_flags |= JME_FLAG_PMCAP;
  758                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  759         }
  760         ifp->if_capenable = ifp->if_capabilities;
  761 
  762         /* Set up MII bus. */
  763         if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
  764             jme_mediastatus)) != 0) {
  765                 device_printf(dev, "no PHY found!\n");
  766                 goto fail;
  767         }
  768 
  769         /*
  770          * Force PHY to FPGA mode.
  771          */
  772         if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
  773                 mii = device_get_softc(sc->jme_miibus);
  774                 if (mii->mii_instance != 0) {
  775                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
  776                                 if (miisc->mii_phy != 0) {
  777                                         sc->jme_phyaddr = miisc->mii_phy;
  778                                         break;
  779                                 }
  780                         }
  781                         if (sc->jme_phyaddr != 0) {
  782                                 device_printf(sc->jme_dev,
  783                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
  784                                 /* vendor magic. */
  785                                 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
  786                                     0x0004);
  787                         }
  788                 }
  789         }
  790 
  791         ether_ifattach(ifp, sc->jme_eaddr);
  792 
  793         /* VLAN capability setup */
  794         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
  795             IFCAP_VLAN_HWCSUM;
  796         ifp->if_capenable = ifp->if_capabilities;
  797 
  798         /* Tell the upper layer(s) we support long frames. */
  799         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  800 
  801         /* Create local taskq. */
  802         TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
  803         sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
  804             taskqueue_thread_enqueue, &sc->jme_tq);
  805         if (sc->jme_tq == NULL) {
  806                 device_printf(dev, "could not create taskqueue.\n");
  807                 ether_ifdetach(ifp);
  808                 error = ENXIO;
  809                 goto fail;
  810         }
  811         taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
  812             device_get_nameunit(sc->jme_dev));
  813 
  814         if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
  815                 msic = JME_MSIX_MESSAGES;
  816         else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
  817                 msic = JME_MSI_MESSAGES;
  818         else
  819                 msic = 1;
  820         for (i = 0; i < msic; i++) {
  821 #if __FreeBSD_version > 700000
  822                 error = bus_setup_intr(dev, sc->jme_irq[i],
  823                     INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
  824                     &sc->jme_intrhand[i]);
  825 #else
  826                 error = bus_setup_intr(dev, sc->jme_irq[i],
  827                     INTR_TYPE_NET | INTR_MPSAFE, jme_intr, sc,
  828                     &sc->jme_intrhand[i]);
  829 #endif
  830                 if (error != 0)
  831                         break;
  832         }
  833 
  834         if (error != 0) {
  835                 device_printf(dev, "could not set up interrupt handler.\n");
  836                 taskqueue_free(sc->jme_tq);
  837                 sc->jme_tq = NULL;
  838                 ether_ifdetach(ifp);
  839                 goto fail;
  840         }
  841 
  842 fail:
  843         if (error != 0)
  844                 jme_detach(dev);
  845 
  846         return (error);
  847 }
  848 
  849 static int
  850 jme_detach(device_t dev)
  851 {
  852         struct jme_softc *sc;
  853         struct ifnet *ifp;
  854         int i, msic;
  855 
  856         sc = device_get_softc(dev);
  857 
  858         ifp = sc->jme_ifp;
  859         if (device_is_attached(dev)) {
  860                 JME_LOCK(sc);
  861                 sc->jme_flags |= JME_FLAG_DETACH;
  862                 jme_stop(sc);
  863                 JME_UNLOCK(sc);
  864                 callout_drain(&sc->jme_tick_ch);
  865                 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
  866                 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
  867                 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
  868                 ether_ifdetach(ifp);
  869         }
  870 
  871         if (sc->jme_tq != NULL) {
  872                 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
  873                 taskqueue_free(sc->jme_tq);
  874                 sc->jme_tq = NULL;
  875         }
  876 
  877         if (sc->jme_miibus != NULL) {
  878                 device_delete_child(dev, sc->jme_miibus);
  879                 sc->jme_miibus = NULL;
  880         }
  881         bus_generic_detach(dev);
  882         jme_dma_free(sc);
  883 
  884         if (ifp != NULL) {
  885                 if_free(ifp);
  886                 sc->jme_ifp = NULL;
  887         }
  888 
  889         msic = 1;
  890         if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
  891                 msic = JME_MSIX_MESSAGES;
  892         else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
  893                 msic = JME_MSI_MESSAGES;
  894         else
  895                 msic = 1;
  896         for (i = 0; i < msic; i++) {
  897                 if (sc->jme_intrhand[i] != NULL) {
  898                         bus_teardown_intr(dev, sc->jme_irq[i],
  899                             sc->jme_intrhand[i]);
  900                         sc->jme_intrhand[i] = NULL;
  901                 }
  902         }
  903 
  904         bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
  905         if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
  906                 pci_release_msi(dev);
  907         bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
  908         mtx_destroy(&sc->jme_mtx);
  909 
  910         return (0);
  911 }
  912 
  913 static void
  914 jme_sysctl_node(struct jme_softc *sc)
  915 {
  916         int error;
  917 
  918         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
  919             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
  920             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
  921             0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
  922 
  923         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
  924             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
  925             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
  926             0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
  927 
  928         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
  929             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
  930             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
  931             0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
  932 
  933         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
  934             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
  935             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
  936             0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
  937 
  938         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
  939             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
  940             "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit,
  941             0, sysctl_hw_jme_proc_limit, "I",
  942             "max number of Rx events to process");
  943 
  944         /* Pull in device tunables. */
  945         sc->jme_process_limit = JME_PROC_DEFAULT;
  946         error = resource_int_value(device_get_name(sc->jme_dev),
  947             device_get_unit(sc->jme_dev), "process_limit",
  948             &sc->jme_process_limit);
  949         if (error == 0) {
  950                 if (sc->jme_process_limit < JME_PROC_MIN ||
  951                     sc->jme_process_limit > JME_PROC_MAX) {
  952                         device_printf(sc->jme_dev,
  953                             "process_limit value out of range; "
  954                             "using default: %d\n", JME_PROC_DEFAULT);
  955                         sc->jme_process_limit = JME_PROC_DEFAULT;
  956                 }
  957         }
  958 
  959         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
  960         error = resource_int_value(device_get_name(sc->jme_dev),
  961             device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
  962         if (error == 0) {
  963                 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
  964                     sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
  965                         device_printf(sc->jme_dev,
  966                             "tx_coal_to value out of range; "
  967                             "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
  968                         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
  969                 }
  970         }
  971 
  972         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
  973         error = resource_int_value(device_get_name(sc->jme_dev),
  974             device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
  975         if (error == 0) {
  976                 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
  977                     sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
  978                         device_printf(sc->jme_dev,
  979                             "tx_coal_pkt value out of range; "
  980                             "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
  981                         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
  982                 }
  983         }
  984 
  985         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
  986         error = resource_int_value(device_get_name(sc->jme_dev),
  987             device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
  988         if (error == 0) {
  989                 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
  990                     sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
  991                         device_printf(sc->jme_dev,
  992                             "rx_coal_to value out of range; "
  993                             "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
  994                         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
  995                 }
  996         }
  997 
  998         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
  999         error = resource_int_value(device_get_name(sc->jme_dev),
 1000             device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
 1001         if (error == 0) {
 1002                 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
 1003                     sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
 1004                         device_printf(sc->jme_dev,
 1005                             "tx_coal_pkt value out of range; "
 1006                             "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
 1007                         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
 1008                 }
 1009         }
 1010 }
 1011 
 1012 struct jme_dmamap_arg {
 1013         bus_addr_t      jme_busaddr;
 1014 };
 1015 
 1016 static void
 1017 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1018 {
 1019         struct jme_dmamap_arg *ctx;
 1020 
 1021         if (error != 0)
 1022                 return;
 1023 
 1024         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1025 
 1026         ctx = (struct jme_dmamap_arg *)arg;
 1027         ctx->jme_busaddr = segs[0].ds_addr;
 1028 }
 1029 
 1030 static int
 1031 jme_dma_alloc(struct jme_softc *sc)
 1032 {
 1033         struct jme_dmamap_arg ctx;
 1034         struct jme_txdesc *txd;
 1035         struct jme_rxdesc *rxd;
 1036         bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
 1037         int error, i;
 1038 
 1039         lowaddr = BUS_SPACE_MAXADDR;
 1040 
 1041 again:
 1042         /* Create parent ring tag. */
 1043         error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
 1044             1, 0,                       /* algnmnt, boundary */
 1045             lowaddr,                    /* lowaddr */
 1046             BUS_SPACE_MAXADDR,          /* highaddr */
 1047             NULL, NULL,                 /* filter, filterarg */
 1048             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1049             0,                          /* nsegments */
 1050             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1051             0,                          /* flags */
 1052             NULL, NULL,                 /* lockfunc, lockarg */
 1053             &sc->jme_cdata.jme_ring_tag);
 1054         if (error != 0) {
 1055                 device_printf(sc->jme_dev,
 1056                     "could not create parent ring DMA tag.\n");
 1057                 goto fail;
 1058         }
 1059         /* Create tag for Tx ring. */
 1060         error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
 1061             JME_TX_RING_ALIGN, 0,       /* algnmnt, boundary */
 1062             BUS_SPACE_MAXADDR,          /* lowaddr */
 1063             BUS_SPACE_MAXADDR,          /* highaddr */
 1064             NULL, NULL,                 /* filter, filterarg */
 1065             JME_TX_RING_SIZE,           /* maxsize */
 1066             1,                          /* nsegments */
 1067             JME_TX_RING_SIZE,           /* maxsegsize */
 1068             0,                          /* flags */
 1069             NULL, NULL,                 /* lockfunc, lockarg */
 1070             &sc->jme_cdata.jme_tx_ring_tag);
 1071         if (error != 0) {
 1072                 device_printf(sc->jme_dev,
 1073                     "could not allocate Tx ring DMA tag.\n");
 1074                 goto fail;
 1075         }
 1076 
 1077         /* Create tag for Rx ring. */
 1078         error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
 1079             JME_RX_RING_ALIGN, 0,       /* algnmnt, boundary */
 1080             lowaddr,                    /* lowaddr */
 1081             BUS_SPACE_MAXADDR,          /* highaddr */
 1082             NULL, NULL,                 /* filter, filterarg */
 1083             JME_RX_RING_SIZE,           /* maxsize */
 1084             1,                          /* nsegments */
 1085             JME_RX_RING_SIZE,           /* maxsegsize */
 1086             0,                          /* flags */
 1087             NULL, NULL,                 /* lockfunc, lockarg */
 1088             &sc->jme_cdata.jme_rx_ring_tag);
 1089         if (error != 0) {
 1090                 device_printf(sc->jme_dev,
 1091                     "could not allocate Rx ring DMA tag.\n");
 1092                 goto fail;
 1093         }
 1094 
 1095         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 1096         error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
 1097             (void **)&sc->jme_rdata.jme_tx_ring,
 1098             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1099             &sc->jme_cdata.jme_tx_ring_map);
 1100         if (error != 0) {
 1101                 device_printf(sc->jme_dev,
 1102                     "could not allocate DMA'able memory for Tx ring.\n");
 1103                 goto fail;
 1104         }
 1105 
 1106         ctx.jme_busaddr = 0;
 1107         error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
 1108             sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
 1109             JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 1110         if (error != 0 || ctx.jme_busaddr == 0) {
 1111                 device_printf(sc->jme_dev,
 1112                     "could not load DMA'able memory for Tx ring.\n");
 1113                 goto fail;
 1114         }
 1115         sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
 1116 
 1117         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 1118         error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
 1119             (void **)&sc->jme_rdata.jme_rx_ring,
 1120             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1121             &sc->jme_cdata.jme_rx_ring_map);
 1122         if (error != 0) {
 1123                 device_printf(sc->jme_dev,
 1124                     "could not allocate DMA'able memory for Rx ring.\n");
 1125                 goto fail;
 1126         }
 1127 
 1128         ctx.jme_busaddr = 0;
 1129         error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
 1130             sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
 1131             JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 1132         if (error != 0 || ctx.jme_busaddr == 0) {
 1133                 device_printf(sc->jme_dev,
 1134                     "could not load DMA'able memory for Rx ring.\n");
 1135                 goto fail;
 1136         }
 1137         sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
 1138 
 1139         /* Tx/Rx descriptor queue should reside within 4GB boundary. */
 1140         tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
 1141         rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
 1142         if ((JME_ADDR_HI(tx_ring_end) !=
 1143             JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
 1144             (JME_ADDR_HI(rx_ring_end) !=
 1145             JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
 1146                 device_printf(sc->jme_dev, "4GB boundary crossed, "
 1147                     "switching to 32bit DMA address mode.\n");
 1148                 jme_dma_free(sc);
 1149                 /* Limit DMA address space to 32bit and try again. */
 1150                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
 1151                 goto again;
 1152         }
 1153 
 1154         /* Create parent buffer tag. */
 1155         error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
 1156             1, 0,                       /* algnmnt, boundary */
 1157             BUS_SPACE_MAXADDR,          /* lowaddr */
 1158             BUS_SPACE_MAXADDR,          /* highaddr */
 1159             NULL, NULL,                 /* filter, filterarg */
 1160             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1161             0,                          /* nsegments */
 1162             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1163             0,                          /* flags */
 1164             NULL, NULL,                 /* lockfunc, lockarg */
 1165             &sc->jme_cdata.jme_buffer_tag);
 1166         if (error != 0) {
 1167                 device_printf(sc->jme_dev,
 1168                     "could not create parent buffer DMA tag.\n");
 1169                 goto fail;
 1170         }
 1171 
 1172         /* Create shadow status block tag. */
 1173         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
 1174             JME_SSB_ALIGN, 0,           /* algnmnt, boundary */
 1175             BUS_SPACE_MAXADDR,          /* lowaddr */
 1176             BUS_SPACE_MAXADDR,          /* highaddr */
 1177             NULL, NULL,                 /* filter, filterarg */
 1178             JME_SSB_SIZE,               /* maxsize */
 1179             1,                          /* nsegments */
 1180             JME_SSB_SIZE,               /* maxsegsize */
 1181             0,                          /* flags */
 1182             NULL, NULL,                 /* lockfunc, lockarg */
 1183             &sc->jme_cdata.jme_ssb_tag);
 1184         if (error != 0) {
 1185                 device_printf(sc->jme_dev,
 1186                     "could not create shared status block DMA tag.\n");
 1187                 goto fail;
 1188         }
 1189 
 1190         /* Create tag for Tx buffers. */
 1191         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
 1192             1, 0,                       /* algnmnt, boundary */
 1193             BUS_SPACE_MAXADDR,          /* lowaddr */
 1194             BUS_SPACE_MAXADDR,          /* highaddr */
 1195             NULL, NULL,                 /* filter, filterarg */
 1196             JME_TSO_MAXSIZE,            /* maxsize */
 1197             JME_MAXTXSEGS,              /* nsegments */
 1198             JME_TSO_MAXSEGSIZE,         /* maxsegsize */
 1199             0,                          /* flags */
 1200             NULL, NULL,                 /* lockfunc, lockarg */
 1201             &sc->jme_cdata.jme_tx_tag);
 1202         if (error != 0) {
 1203                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
 1204                 goto fail;
 1205         }
 1206 
 1207         /* Create tag for Rx buffers. */
 1208         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
 1209             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
 1210             BUS_SPACE_MAXADDR,          /* lowaddr */
 1211             BUS_SPACE_MAXADDR,          /* highaddr */
 1212             NULL, NULL,                 /* filter, filterarg */
 1213             MCLBYTES,                   /* maxsize */
 1214             1,                          /* nsegments */
 1215             MCLBYTES,                   /* maxsegsize */
 1216             0,                          /* flags */
 1217             NULL, NULL,                 /* lockfunc, lockarg */
 1218             &sc->jme_cdata.jme_rx_tag);
 1219         if (error != 0) {
 1220                 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
 1221                 goto fail;
 1222         }
 1223 
 1224         /*
 1225          * Allocate DMA'able memory and load the DMA map for shared
 1226          * status block.
 1227          */
 1228         error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
 1229             (void **)&sc->jme_rdata.jme_ssb_block,
 1230             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1231             &sc->jme_cdata.jme_ssb_map);
 1232         if (error != 0) {
 1233                 device_printf(sc->jme_dev, "could not allocate DMA'able "
 1234                     "memory for shared status block.\n");
 1235                 goto fail;
 1236         }
 1237 
 1238         ctx.jme_busaddr = 0;
 1239         error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
 1240             sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
 1241             JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 1242         if (error != 0 || ctx.jme_busaddr == 0) {
 1243                 device_printf(sc->jme_dev, "could not load DMA'able memory "
 1244                     "for shared status block.\n");
 1245                 goto fail;
 1246         }
 1247         sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
 1248 
 1249         /* Create DMA maps for Tx buffers. */
 1250         for (i = 0; i < JME_TX_RING_CNT; i++) {
 1251                 txd = &sc->jme_cdata.jme_txdesc[i];
 1252                 txd->tx_m = NULL;
 1253                 txd->tx_dmamap = NULL;
 1254                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
 1255                     &txd->tx_dmamap);
 1256                 if (error != 0) {
 1257                         device_printf(sc->jme_dev,
 1258                             "could not create Tx dmamap.\n");
 1259                         goto fail;
 1260                 }
 1261         }
 1262         /* Create DMA maps for Rx buffers. */
 1263         if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
 1264             &sc->jme_cdata.jme_rx_sparemap)) != 0) {
 1265                 device_printf(sc->jme_dev,
 1266                     "could not create spare Rx dmamap.\n");
 1267                 goto fail;
 1268         }
 1269         for (i = 0; i < JME_RX_RING_CNT; i++) {
 1270                 rxd = &sc->jme_cdata.jme_rxdesc[i];
 1271                 rxd->rx_m = NULL;
 1272                 rxd->rx_dmamap = NULL;
 1273                 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
 1274                     &rxd->rx_dmamap);
 1275                 if (error != 0) {
 1276                         device_printf(sc->jme_dev,
 1277                             "could not create Rx dmamap.\n");
 1278                         goto fail;
 1279                 }
 1280         }
 1281 
 1282 fail:
 1283         return (error);
 1284 }
 1285 
 1286 static void
 1287 jme_dma_free(struct jme_softc *sc)
 1288 {
 1289         struct jme_txdesc *txd;
 1290         struct jme_rxdesc *rxd;
 1291         int i;
 1292 
 1293         /* Tx ring */
 1294         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
 1295                 if (sc->jme_cdata.jme_tx_ring_map)
 1296                         bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
 1297                             sc->jme_cdata.jme_tx_ring_map);
 1298                 if (sc->jme_cdata.jme_tx_ring_map &&
 1299                     sc->jme_rdata.jme_tx_ring)
 1300                         bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
 1301                             sc->jme_rdata.jme_tx_ring,
 1302                             sc->jme_cdata.jme_tx_ring_map);
 1303                 sc->jme_rdata.jme_tx_ring = NULL;
 1304                 sc->jme_cdata.jme_tx_ring_map = NULL;
 1305                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
 1306                 sc->jme_cdata.jme_tx_ring_tag = NULL;
 1307         }
 1308         /* Rx ring */
 1309         if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
 1310                 if (sc->jme_cdata.jme_rx_ring_map)
 1311                         bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
 1312                             sc->jme_cdata.jme_rx_ring_map);
 1313                 if (sc->jme_cdata.jme_rx_ring_map &&
 1314                     sc->jme_rdata.jme_rx_ring)
 1315                         bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
 1316                             sc->jme_rdata.jme_rx_ring,
 1317                             sc->jme_cdata.jme_rx_ring_map);
 1318                 sc->jme_rdata.jme_rx_ring = NULL;
 1319                 sc->jme_cdata.jme_rx_ring_map = NULL;
 1320                 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
 1321                 sc->jme_cdata.jme_rx_ring_tag = NULL;
 1322         }
 1323         /* Tx buffers */
 1324         if (sc->jme_cdata.jme_tx_tag != NULL) {
 1325                 for (i = 0; i < JME_TX_RING_CNT; i++) {
 1326                         txd = &sc->jme_cdata.jme_txdesc[i];
 1327                         if (txd->tx_dmamap != NULL) {
 1328                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
 1329                                     txd->tx_dmamap);
 1330                                 txd->tx_dmamap = NULL;
 1331                         }
 1332                 }
 1333                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
 1334                 sc->jme_cdata.jme_tx_tag = NULL;
 1335         }
 1336         /* Rx buffers */
 1337         if (sc->jme_cdata.jme_rx_tag != NULL) {
 1338                 for (i = 0; i < JME_RX_RING_CNT; i++) {
 1339                         rxd = &sc->jme_cdata.jme_rxdesc[i];
 1340                         if (rxd->rx_dmamap != NULL) {
 1341                                 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
 1342                                     rxd->rx_dmamap);
 1343                                 rxd->rx_dmamap = NULL;
 1344                         }
 1345                 }
 1346                 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
 1347                         bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
 1348                             sc->jme_cdata.jme_rx_sparemap);
 1349                         sc->jme_cdata.jme_rx_sparemap = NULL;
 1350                 }
 1351                 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
 1352                 sc->jme_cdata.jme_rx_tag = NULL;
 1353         }
 1354 
 1355         /* Shared status block. */
 1356         if (sc->jme_cdata.jme_ssb_tag != NULL) {
 1357                 if (sc->jme_cdata.jme_ssb_map)
 1358                         bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
 1359                             sc->jme_cdata.jme_ssb_map);
 1360                 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
 1361                         bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
 1362                             sc->jme_rdata.jme_ssb_block,
 1363                             sc->jme_cdata.jme_ssb_map);
 1364                 sc->jme_rdata.jme_ssb_block = NULL;
 1365                 sc->jme_cdata.jme_ssb_map = NULL;
 1366                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
 1367                 sc->jme_cdata.jme_ssb_tag = NULL;
 1368         }
 1369 
 1370         if (sc->jme_cdata.jme_buffer_tag != NULL) {
 1371                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
 1372                 sc->jme_cdata.jme_buffer_tag = NULL;
 1373         }
 1374         if (sc->jme_cdata.jme_ring_tag != NULL) {
 1375                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
 1376                 sc->jme_cdata.jme_ring_tag = NULL;
 1377         }
 1378 }
 1379 
 1380 /*
 1381  *      Make sure the interface is stopped at reboot time.
 1382  */
 1383 static int
 1384 jme_shutdown(device_t dev)
 1385 {
 1386 
 1387         return (jme_suspend(dev));
 1388 }
 1389 
 1390 /*
 1391  * Unlike other ethernet controllers, JMC250 requires
 1392  * explicit resetting link speed to 10/100Mbps as gigabit
 1393  * link will cunsume more power than 375mA.
 1394  * Note, we reset the link speed to 10/100Mbps with
 1395  * auto-negotiation but we don't know whether that operation
 1396  * would succeed or not as we have no control after powering
 1397  * off. If the renegotiation fail WOL may not work. Running
 1398  * at 1Gbps draws more power than 375mA at 3.3V which is
 1399  * specified in PCI specification and that would result in
 1400  * complete shutdowning power to ethernet controller.
 1401  *
 1402  * TODO
 1403  *  Save current negotiated media speed/duplex/flow-control
 1404  *  to softc and restore the same link again after resuming.
 1405  *  PHY handling such as power down/resetting to 100Mbps
 1406  *  may be better handled in suspend method in phy driver.
 1407  */
 1408 static void
 1409 jme_setlinkspeed(struct jme_softc *sc)
 1410 {
 1411         struct mii_data *mii;
 1412         int aneg, i;
 1413 
 1414         JME_LOCK_ASSERT(sc);
 1415 
 1416         mii = device_get_softc(sc->jme_miibus);
 1417         mii_pollstat(mii);
 1418         aneg = 0;
 1419         if ((mii->mii_media_status & IFM_AVALID) != 0) {
 1420                 switch IFM_SUBTYPE(mii->mii_media_active) {
 1421                 case IFM_10_T:
 1422                 case IFM_100_TX:
 1423                         return;
 1424                 case IFM_1000_T:
 1425                         aneg++;
 1426                 default:
 1427                         break;
 1428                 }
 1429         }
 1430         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
 1431         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
 1432             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
 1433         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
 1434             BMCR_AUTOEN | BMCR_STARTNEG);
 1435         DELAY(1000);
 1436         if (aneg != 0) {
 1437                 /* Poll link state until jme(4) get a 10/100 link. */
 1438                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
 1439                         mii_pollstat(mii);
 1440                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
 1441                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 1442                                 case IFM_10_T:
 1443                                 case IFM_100_TX:
 1444                                         jme_mac_config(sc);
 1445                                         return;
 1446                                 default:
 1447                                         break;
 1448                                 }
 1449                         }
 1450 #if __FreeBSD_version < 700000
 1451                         msleep(sc, &sc->jme_mtx, PPAUSE, "jmelnk", hz);
 1452 #else
 1453                         JME_UNLOCK(sc);
 1454                         pause("jmelnk", hz);
 1455                         JME_LOCK(sc);
 1456 #endif
 1457                 }
 1458                 if (i == MII_ANEGTICKS_GIGE)
 1459                         device_printf(sc->jme_dev, "establishing link failed, "
 1460                             "WOL may not work!");
 1461         }
 1462         /*
 1463          * No link, force MAC to have 100Mbps, full-duplex link.
 1464          * This is the last resort and may/may not work.
 1465          */
 1466         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
 1467         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
 1468         jme_mac_config(sc);
 1469 }
 1470 
 1471 static void
 1472 jme_setwol(struct jme_softc *sc)
 1473 {
 1474         struct ifnet *ifp;
 1475         uint32_t gpr, pmcs;
 1476         uint16_t pmstat;
 1477         int pmc;
 1478 
 1479         JME_LOCK_ASSERT(sc);
 1480 
 1481         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
 1482                 /* No PME capability, PHY power down. */
 1483                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
 1484                     MII_BMCR, BMCR_PDOWN);
 1485                 return;
 1486         }
 1487 
 1488         ifp = sc->jme_ifp;
 1489         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
 1490         pmcs = CSR_READ_4(sc, JME_PMCS);
 1491         pmcs &= ~PMCS_WOL_ENB_MASK;
 1492         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
 1493                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
 1494                 /* Enable PME message. */
 1495                 gpr |= GPREG0_PME_ENB;
 1496                 /* For gigabit controllers, reset link speed to 10/100. */
 1497                 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
 1498                         jme_setlinkspeed(sc);
 1499         }
 1500 
 1501         CSR_WRITE_4(sc, JME_PMCS, pmcs);
 1502         CSR_WRITE_4(sc, JME_GPREG0, gpr);
 1503 
 1504         /* Request PME. */
 1505         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
 1506         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1507         if ((ifp->if_capenable & IFCAP_WOL) != 0)
 1508                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1509         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1510         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
 1511                 /* No WOL, PHY power down. */
 1512                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
 1513                     MII_BMCR, BMCR_PDOWN);
 1514         }
 1515 }
 1516 
 1517 static int
 1518 jme_suspend(device_t dev)
 1519 {
 1520         struct jme_softc *sc;
 1521 
 1522         sc = device_get_softc(dev);
 1523 
 1524         JME_LOCK(sc);
 1525         jme_stop(sc);
 1526         jme_setwol(sc);
 1527         JME_UNLOCK(sc);
 1528 
 1529         return (0);
 1530 }
 1531 
 1532 static int
 1533 jme_resume(device_t dev)
 1534 {
 1535         struct jme_softc *sc;
 1536         struct ifnet *ifp;
 1537         uint16_t pmstat;
 1538         int pmc;
 1539 
 1540         sc = device_get_softc(dev);
 1541 
 1542         JME_LOCK(sc);
 1543         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
 1544                 pmstat = pci_read_config(sc->jme_dev,
 1545                     pmc + PCIR_POWER_STATUS, 2);
 1546                 /* Disable PME clear PME status. */
 1547                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 1548                 pci_write_config(sc->jme_dev,
 1549                     pmc + PCIR_POWER_STATUS, pmstat, 2);
 1550         }
 1551         ifp = sc->jme_ifp;
 1552         if ((ifp->if_flags & IFF_UP) != 0)
 1553                 jme_init_locked(sc);
 1554 
 1555         JME_UNLOCK(sc);
 1556 
 1557         return (0);
 1558 }
 1559 
 1560 static int
 1561 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
 1562 {
 1563         struct jme_txdesc *txd;
 1564         struct jme_desc *desc;
 1565         struct mbuf *m;
 1566 #if __FreeBSD_version < 700000
 1567         struct m_tag *mtag;
 1568 #endif
 1569         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
 1570         int error, i, nsegs, prod;
 1571         uint32_t cflags, tso_segsz;
 1572 
 1573         JME_LOCK_ASSERT(sc);
 1574 
 1575         M_ASSERTPKTHDR((*m_head));
 1576 
 1577         if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 1578                 /*
 1579                  * Due to the adherence to NDIS specification JMC250
 1580                  * assumes upper stack computed TCP pseudo checksum
 1581                  * without including payload length. This breaks
 1582                  * checksum offload for TSO case so recompute TCP
 1583                  * pseudo checksum for JMC250. Hopefully this wouldn't
 1584                  * be much burden on modern CPUs.
 1585                  */
 1586                 struct ether_header *eh;
 1587                 struct ip *ip;
 1588                 struct tcphdr *tcp;
 1589                 uint32_t ip_off, poff;
 1590 
 1591                 if (M_WRITABLE(*m_head) == 0) {
 1592                         /* Get a writable copy. */
 1593                         m = m_dup(*m_head, M_DONTWAIT);
 1594                         m_freem(*m_head);
 1595                         if (m == NULL) {
 1596                                 *m_head = NULL;
 1597                                 return (ENOBUFS);
 1598                         }
 1599                         *m_head = m;
 1600                 }
 1601                 ip_off = sizeof(struct ether_header);
 1602                 m = m_pullup(*m_head, ip_off);
 1603                 if (m == NULL) {
 1604                         *m_head = NULL;
 1605                         return (ENOBUFS);
 1606                 }
 1607                 eh = mtod(m, struct ether_header *);
 1608                 /* Check the existence of VLAN tag. */
 1609                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 1610                         ip_off = sizeof(struct ether_vlan_header);
 1611                         m = m_pullup(m, ip_off);
 1612                         if (m == NULL) {
 1613                                 *m_head = NULL;
 1614                                 return (ENOBUFS);
 1615                         }
 1616                 }
 1617                 m = m_pullup(m, ip_off + sizeof(struct ip));
 1618                 if (m == NULL) {
 1619                         *m_head = NULL;
 1620                         return (ENOBUFS);
 1621                 }
 1622                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1623                 poff = ip_off + (ip->ip_hl << 2);
 1624                 m = m_pullup(m, poff + sizeof(struct tcphdr));
 1625                 if (m == NULL) {
 1626                         *m_head = NULL;
 1627                         return (ENOBUFS);
 1628                 }
 1629                 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1630                 /*
 1631                  * Reset IP checksum and recompute TCP pseudo
 1632                  * checksum that NDIS specification requires.
 1633                  */
 1634                 ip->ip_sum = 0;
 1635                 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
 1636                         tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
 1637                             ip->ip_dst.s_addr,
 1638                             htons((tcp->th_off << 2) + IPPROTO_TCP));
 1639                         /* No need to TSO, force IP checksum offload. */
 1640                         (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
 1641                         (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
 1642                 } else
 1643                         tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
 1644                             ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 1645                 *m_head = m;
 1646         }
 1647 
 1648         prod = sc->jme_cdata.jme_tx_prod;
 1649         txd = &sc->jme_cdata.jme_txdesc[prod];
 1650 
 1651         error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
 1652             txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1653         if (error == EFBIG) {
 1654                 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
 1655                 if (m == NULL) {
 1656                         m_freem(*m_head);
 1657                         *m_head = NULL;
 1658                         return (ENOMEM);
 1659                 }
 1660                 *m_head = m;
 1661                 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
 1662                     txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1663                 if (error != 0) {
 1664                         m_freem(*m_head);
 1665                         *m_head = NULL;
 1666                         return (error);
 1667                 }
 1668         } else if (error != 0)
 1669                 return (error);
 1670         if (nsegs == 0) {
 1671                 m_freem(*m_head);
 1672                 *m_head = NULL;
 1673                 return (EIO);
 1674         }
 1675 
 1676         /*
 1677          * Check descriptor overrun. Leave one free descriptor.
 1678          * Since we always use 64bit address mode for transmitting,
 1679          * each Tx request requires one more dummy descriptor.
 1680          */
 1681         if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
 1682                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
 1683                 return (ENOBUFS);
 1684         }
 1685 
 1686         m = *m_head;
 1687         cflags = 0;
 1688         tso_segsz = 0;
 1689         /* Configure checksum offload and TSO. */
 1690         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 1691 #if __FreeBSD_version >= 700000
 1692                 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
 1693                     JME_TD_MSS_SHIFT;
 1694 #endif
 1695                 cflags |= JME_TD_TSO;
 1696         } else {
 1697                 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
 1698                         cflags |= JME_TD_IPCSUM;
 1699                 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
 1700                         cflags |= JME_TD_TCPCSUM;
 1701                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 1702                         cflags |= JME_TD_UDPCSUM;
 1703         }
 1704         /* Configure VLAN. */
 1705 #if __FreeBSD_version < 700000
 1706         mtag = VLAN_OUTPUT_TAG(sc->jme_ifp, m);
 1707         if (mtag != NULL) {
 1708                 cflags |= (VLAN_TAG_VALUE(mtag) & JME_TD_VLAN_MASK);
 1709                 cflags |= JME_TD_VLAN_TAG;
 1710         }
 1711 #else
 1712         if ((m->m_flags & M_VLANTAG) != 0) {
 1713                 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
 1714                 cflags |= JME_TD_VLAN_TAG;
 1715         }
 1716 #endif
 1717 
 1718         desc = &sc->jme_rdata.jme_tx_ring[prod];
 1719         desc->flags = htole32(cflags);
 1720         desc->buflen = htole32(tso_segsz);
 1721         desc->addr_hi = htole32(m->m_pkthdr.len);
 1722         desc->addr_lo = 0;
 1723         sc->jme_cdata.jme_tx_cnt++;
 1724         JME_DESC_INC(prod, JME_TX_RING_CNT);
 1725         for (i = 0; i < nsegs; i++) {
 1726                 desc = &sc->jme_rdata.jme_tx_ring[prod];
 1727                 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
 1728                 desc->buflen = htole32(txsegs[i].ds_len);
 1729                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
 1730                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
 1731                 sc->jme_cdata.jme_tx_cnt++;
 1732                 JME_DESC_INC(prod, JME_TX_RING_CNT);
 1733         }
 1734 
 1735         /* Update producer index. */
 1736         sc->jme_cdata.jme_tx_prod = prod;
 1737         /*
 1738          * Finally request interrupt and give the first descriptor
 1739          * owenership to hardware.
 1740          */
 1741         desc = txd->tx_desc;
 1742         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
 1743 
 1744         txd->tx_m = m;
 1745         txd->tx_ndesc = nsegs + 1;
 1746 
 1747         /* Sync descriptors. */
 1748         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
 1749             BUS_DMASYNC_PREWRITE);
 1750         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
 1751             sc->jme_cdata.jme_tx_ring_map,
 1752             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1753 
 1754         return (0);
 1755 }
 1756 
 1757 static void
 1758 jme_tx_task(void *arg, int pending)
 1759 {
 1760         struct ifnet *ifp;
 1761 
 1762         ifp = (struct ifnet *)arg;
 1763         jme_start(ifp);
 1764 }
 1765 
 1766 static void
 1767 jme_start(struct ifnet *ifp)
 1768 {
 1769         struct jme_softc *sc;
 1770         struct mbuf *m_head;
 1771         int enq;
 1772 
 1773         sc = ifp->if_softc;
 1774 
 1775         JME_LOCK(sc);
 1776 
 1777         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
 1778                 jme_txeof(sc);
 1779 
 1780         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1781             IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
 1782                 JME_UNLOCK(sc);
 1783                 return;
 1784         }
 1785 
 1786         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
 1787                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1788                 if (m_head == NULL)
 1789                         break;
 1790                 /*
 1791                  * Pack the data into the transmit ring. If we
 1792                  * don't have room, set the OACTIVE flag and wait
 1793                  * for the NIC to drain the ring.
 1794                  */
 1795                 if (jme_encap(sc, &m_head)) {
 1796                         if (m_head == NULL)
 1797                                 break;
 1798                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1799                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1800                         break;
 1801                 }
 1802 
 1803                 enq++;
 1804                 /*
 1805                  * If there's a BPF listener, bounce a copy of this frame
 1806                  * to him.
 1807                  */
 1808                 ETHER_BPF_MTAP(ifp, m_head);
 1809         }
 1810 
 1811         if (enq > 0) {
 1812                 /*
 1813                  * Reading TXCSR takes very long time under heavy load
 1814                  * so cache TXCSR value and writes the ORed value with
 1815                  * the kick command to the TXCSR. This saves one register
 1816                  * access cycle.
 1817                  */
 1818                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
 1819                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
 1820                 /* Set a timeout in case the chip goes out to lunch. */
 1821                 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
 1822         }
 1823 
 1824         JME_UNLOCK(sc);
 1825 }
 1826 
 1827 static void
 1828 jme_watchdog(struct jme_softc *sc)
 1829 {
 1830         struct ifnet *ifp;
 1831 
 1832         JME_LOCK_ASSERT(sc);
 1833 
 1834         if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
 1835                 return;
 1836 
 1837         ifp = sc->jme_ifp;
 1838         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
 1839                 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
 1840                 ifp->if_oerrors++;
 1841                 jme_init_locked(sc);
 1842                 return;
 1843         }
 1844         jme_txeof(sc);
 1845         if (sc->jme_cdata.jme_tx_cnt == 0) {
 1846                 if_printf(sc->jme_ifp,
 1847                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 1848                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1849                         taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
 1850                 return;
 1851         }
 1852 
 1853         if_printf(sc->jme_ifp, "watchdog timeout\n");
 1854         ifp->if_oerrors++;
 1855         jme_init_locked(sc);
 1856         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1857                 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
 1858 }
 1859 
 1860 static int
 1861 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1862 {
 1863         struct jme_softc *sc;
 1864         struct ifreq *ifr;
 1865         struct mii_data *mii;
 1866         uint32_t reg;
 1867         int error, mask;
 1868 
 1869         sc = ifp->if_softc;
 1870         ifr = (struct ifreq *)data;
 1871         error = 0;
 1872         switch (cmd) {
 1873         case SIOCSIFMTU:
 1874                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
 1875                     ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
 1876                     ifr->ifr_mtu > JME_MAX_MTU)) {
 1877                         error = EINVAL;
 1878                         break;
 1879                 }
 1880 
 1881                 if (ifp->if_mtu != ifr->ifr_mtu) {
 1882                         /*
 1883                          * No special configuration is required when interface
 1884                          * MTU is changed but availability of TSO/Tx checksum
 1885                          * offload should be chcked against new MTU size as
 1886                          * FIFO size is just 2K.
 1887                          */
 1888                         JME_LOCK(sc);
 1889                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
 1890                                 ifp->if_capenable &=
 1891                                     ~(IFCAP_TXCSUM | IFCAP_TSO4);
 1892                                 ifp->if_hwassist &=
 1893                                     ~(JME_CSUM_FEATURES | CSUM_TSO);
 1894                                 VLAN_CAPABILITIES(ifp);
 1895                         }
 1896                         ifp->if_mtu = ifr->ifr_mtu;
 1897                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1898                                 jme_init_locked(sc);
 1899                         JME_UNLOCK(sc);
 1900                 }
 1901                 break;
 1902         case SIOCSIFFLAGS:
 1903                 JME_LOCK(sc);
 1904                 if ((ifp->if_flags & IFF_UP) != 0) {
 1905                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1906                                 if (((ifp->if_flags ^ sc->jme_if_flags)
 1907                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1908                                         jme_set_filter(sc);
 1909                         } else {
 1910                                 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
 1911                                         jme_init_locked(sc);
 1912                         }
 1913                 } else {
 1914                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1915                                 jme_stop(sc);
 1916                 }
 1917                 sc->jme_if_flags = ifp->if_flags;
 1918                 JME_UNLOCK(sc);
 1919                 break;
 1920         case SIOCADDMULTI:
 1921         case SIOCDELMULTI:
 1922                 JME_LOCK(sc);
 1923                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1924                         jme_set_filter(sc);
 1925                 JME_UNLOCK(sc);
 1926                 break;
 1927         case SIOCSIFMEDIA:
 1928         case SIOCGIFMEDIA:
 1929                 mii = device_get_softc(sc->jme_miibus);
 1930                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1931                 break;
 1932         case SIOCSIFCAP:
 1933                 JME_LOCK(sc);
 1934                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1935                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1936                     ifp->if_mtu < JME_TX_FIFO_SIZE) {
 1937                         if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1938                                 ifp->if_capenable ^= IFCAP_TXCSUM;
 1939                                 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 1940                                         ifp->if_hwassist |= JME_CSUM_FEATURES;
 1941                                 else
 1942                                         ifp->if_hwassist &= ~JME_CSUM_FEATURES;
 1943                         }
 1944                 }
 1945                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1946                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
 1947                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1948                         reg = CSR_READ_4(sc, JME_RXMAC);
 1949                         reg &= ~RXMAC_CSUM_ENB;
 1950                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 1951                                 reg |= RXMAC_CSUM_ENB;
 1952                         CSR_WRITE_4(sc, JME_RXMAC, reg);
 1953                 }
 1954                 if ((mask & IFCAP_TSO4) != 0 &&
 1955                     ifp->if_mtu < JME_TX_FIFO_SIZE) {
 1956                         if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1957                                 ifp->if_capenable ^= IFCAP_TSO4;
 1958                                 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1959                                         ifp->if_hwassist |= CSUM_TSO;
 1960                                 else
 1961                                         ifp->if_hwassist &= ~CSUM_TSO;
 1962                         }
 1963                 }
 1964                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 1965                     (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
 1966                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1967                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1968                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
 1969                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1970                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1971                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1972                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1973                         jme_set_vlan(sc);
 1974                 }
 1975                 JME_UNLOCK(sc);
 1976                 VLAN_CAPABILITIES(ifp);
 1977                 break;
 1978         default:
 1979                 error = ether_ioctl(ifp, cmd, data);
 1980                 break;
 1981         }
 1982 
 1983         return (error);
 1984 }
 1985 
 1986 static void
 1987 jme_mac_config(struct jme_softc *sc)
 1988 {
 1989         struct mii_data *mii;
 1990         uint32_t ghc, gpreg, rxmac, txmac, txpause;
 1991 
 1992         JME_LOCK_ASSERT(sc);
 1993 
 1994         mii = device_get_softc(sc->jme_miibus);
 1995 
 1996         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
 1997         DELAY(10);
 1998         CSR_WRITE_4(sc, JME_GHC, 0);
 1999         ghc = 0;
 2000         rxmac = CSR_READ_4(sc, JME_RXMAC);
 2001         rxmac &= ~RXMAC_FC_ENB;
 2002         txmac = CSR_READ_4(sc, JME_TXMAC);
 2003         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
 2004         txpause = CSR_READ_4(sc, JME_TXPFC);
 2005         txpause &= ~TXPFC_PAUSE_ENB;
 2006         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
 2007                 ghc |= GHC_FULL_DUPLEX;
 2008                 rxmac &= ~RXMAC_COLL_DET_ENB;
 2009                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
 2010                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
 2011                     TXMAC_FRAME_BURST);
 2012 #ifdef notyet
 2013                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
 2014                         txpause |= TXPFC_PAUSE_ENB;
 2015                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
 2016                         rxmac |= RXMAC_FC_ENB;
 2017 #endif
 2018                 /* Disable retry transmit timer/retry limit. */
 2019                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
 2020                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
 2021         } else {
 2022                 rxmac |= RXMAC_COLL_DET_ENB;
 2023                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
 2024                 /* Enable retry transmit timer/retry limit. */
 2025                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
 2026                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
 2027         }
 2028                 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
 2029         switch (IFM_SUBTYPE(mii->mii_media_active)) {
 2030         case IFM_10_T:
 2031                 ghc |= GHC_SPEED_10;
 2032                 break;
 2033         case IFM_100_TX:
 2034                 ghc |= GHC_SPEED_100;
 2035                 break;
 2036         case IFM_1000_T:
 2037                 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
 2038                         break;
 2039                 ghc |= GHC_SPEED_1000;
 2040                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
 2041                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
 2042                 break;
 2043         default:
 2044                 break;
 2045         }
 2046         if (sc->jme_rev == DEVICEID_JMC250 &&
 2047             sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
 2048                 /*
 2049                  * Workaround occasional packet loss issue of JMC250 A2
 2050                  * when it runs on half-duplex media.
 2051                  */
 2052                 gpreg = CSR_READ_4(sc, JME_GPREG1);
 2053                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
 2054                         gpreg &= ~GPREG1_HDPX_FIX;
 2055                 else
 2056                         gpreg |= GPREG1_HDPX_FIX;
 2057                 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
 2058                 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
 2059                 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
 2060                         /* Extend interface FIFO depth. */
 2061                         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
 2062                             0x1B, 0x0000);
 2063                 } else {
 2064                         /* Select default interface FIFO depth. */
 2065                         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
 2066                             0x1B, 0x0004);
 2067                 }
 2068         }
 2069         CSR_WRITE_4(sc, JME_GHC, ghc);
 2070         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
 2071         CSR_WRITE_4(sc, JME_TXMAC, txmac);
 2072         CSR_WRITE_4(sc, JME_TXPFC, txpause);
 2073 }
 2074 
 2075 static void
 2076 jme_link_task(void *arg, int pending)
 2077 {
 2078         struct jme_softc *sc;
 2079         struct mii_data *mii;
 2080         struct ifnet *ifp;
 2081         struct jme_txdesc *txd;
 2082         bus_addr_t paddr;
 2083         int i;
 2084 
 2085         sc = (struct jme_softc *)arg;
 2086 
 2087         JME_LOCK(sc);
 2088         mii = device_get_softc(sc->jme_miibus);
 2089         ifp = sc->jme_ifp;
 2090         if (mii == NULL || ifp == NULL ||
 2091             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 2092                 JME_UNLOCK(sc);
 2093                 return;
 2094         }
 2095 
 2096         sc->jme_flags &= ~JME_FLAG_LINK;
 2097         if ((mii->mii_media_status & IFM_AVALID) != 0) {
 2098                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 2099                 case IFM_10_T:
 2100                 case IFM_100_TX:
 2101                         sc->jme_flags |= JME_FLAG_LINK;
 2102                         break;
 2103                 case IFM_1000_T:
 2104                         if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
 2105                                 break;
 2106                         sc->jme_flags |= JME_FLAG_LINK;
 2107                         break;
 2108                 default:
 2109                         break;
 2110                 }
 2111         }
 2112 
 2113         /*
 2114          * Disabling Rx/Tx MACs have a side-effect of resetting
 2115          * JME_TXNDA/JME_RXNDA register to the first address of
 2116          * Tx/Rx descriptor address. So driver should reset its
 2117          * internal procucer/consumer pointer and reclaim any
 2118          * allocated resources. Note, just saving the value of
 2119          * JME_TXNDA and JME_RXNDA registers before stopping MAC
 2120          * and restoring JME_TXNDA/JME_RXNDA register is not
 2121          * sufficient to make sure correct MAC state because
 2122          * stopping MAC operation can take a while and hardware
 2123          * might have updated JME_TXNDA/JME_RXNDA registers
 2124          * during the stop operation.
 2125          */
 2126 #ifdef notyet
 2127         /* Block execution of task. */
 2128         taskqueue_block(sc->jme_tq);
 2129 #endif
 2130         /* Disable interrupts and stop driver. */
 2131         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
 2132         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2133         callout_stop(&sc->jme_tick_ch);
 2134         sc->jme_watchdog_timer = 0;
 2135 
 2136         /* Stop receiver/transmitter. */
 2137         jme_stop_rx(sc);
 2138         jme_stop_tx(sc);
 2139 
 2140         /* XXX Drain all queued tasks. */
 2141         JME_UNLOCK(sc);
 2142         taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
 2143         taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
 2144         JME_LOCK(sc);
 2145 
 2146         jme_rxintr(sc, JME_RX_RING_CNT);
 2147         if (sc->jme_cdata.jme_rxhead != NULL)
 2148                 m_freem(sc->jme_cdata.jme_rxhead);
 2149         JME_RXCHAIN_RESET(sc);
 2150         jme_txeof(sc);
 2151         if (sc->jme_cdata.jme_tx_cnt != 0) {
 2152                 /* Remove queued packets for transmit. */
 2153                 for (i = 0; i < JME_TX_RING_CNT; i++) {
 2154                         txd = &sc->jme_cdata.jme_txdesc[i];
 2155                         if (txd->tx_m != NULL) {
 2156                                 bus_dmamap_sync(
 2157                                     sc->jme_cdata.jme_tx_tag,
 2158                                     txd->tx_dmamap,
 2159                                     BUS_DMASYNC_POSTWRITE);
 2160                                 bus_dmamap_unload(
 2161                                     sc->jme_cdata.jme_tx_tag,
 2162                                     txd->tx_dmamap);
 2163                                 m_freem(txd->tx_m);
 2164                                 txd->tx_m = NULL;
 2165                                 txd->tx_ndesc = 0;
 2166                                 ifp->if_oerrors++;
 2167                         }
 2168                 }
 2169         }
 2170 
 2171         /*
 2172          * Reuse configured Rx descriptors and reset
 2173          * procuder/consumer index.
 2174          */
 2175         sc->jme_cdata.jme_rx_cons = 0;
 2176         atomic_set_int(&sc->jme_morework, 0);
 2177         jme_init_tx_ring(sc);
 2178         /* Initialize shadow status block. */
 2179         jme_init_ssb(sc);
 2180 
 2181         /* Program MAC with resolved speed/duplex/flow-control. */
 2182         if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
 2183                 jme_mac_config(sc);
 2184 
 2185                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
 2186                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
 2187 
 2188                 /* Set Tx ring address to the hardware. */
 2189                 paddr = JME_TX_RING_ADDR(sc, 0);
 2190                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
 2191                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
 2192 
 2193                 /* Set Rx ring address to the hardware. */
 2194                 paddr = JME_RX_RING_ADDR(sc, 0);
 2195                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
 2196                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
 2197 
 2198                 /* Restart receiver/transmitter. */
 2199                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
 2200                     RXCSR_RXQ_START);
 2201                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
 2202         }
 2203 
 2204         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2205         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2206         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
 2207 #ifdef notyet
 2208         /* Unblock execution of task. */
 2209         taskqueue_unblock(sc->jme_tq);
 2210 #endif
 2211         /* Reenable interrupts. */
 2212         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
 2213 
 2214         JME_UNLOCK(sc);
 2215 }
 2216 
 2217 #if __FreeBSD_version < 700000
 2218 static void
 2219 #else
 2220 static int
 2221 #endif
 2222 jme_intr(void *arg)
 2223 {
 2224         struct jme_softc *sc;
 2225         uint32_t status;
 2226 
 2227         sc = (struct jme_softc *)arg;
 2228 
 2229         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
 2230         if (status == 0 || status == 0xFFFFFFFF)
 2231                 return FILTER_STRAY;
 2232         /* Disable interrupts. */
 2233         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
 2234         taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
 2235 
 2236         return FILTER_HANDLED;
 2237 }
 2238 
 2239 static void
 2240 jme_int_task(void *arg, int pending)
 2241 {
 2242         struct jme_softc *sc;
 2243         struct ifnet *ifp;
 2244         uint32_t status;
 2245         int more;
 2246 
 2247         sc = (struct jme_softc *)arg;
 2248         ifp = sc->jme_ifp;
 2249 
 2250         status = CSR_READ_4(sc, JME_INTR_STATUS);
 2251         more = atomic_readandclear_int(&sc->jme_morework);
 2252         if (more != 0) {
 2253                 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
 2254                 more = 0;
 2255         }
 2256         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
 2257                 goto done;
 2258         /* Reset PCC counter/timer and Ack interrupts. */
 2259         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
 2260         if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
 2261                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
 2262         if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
 2263                 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
 2264         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
 2265         more = 0;
 2266         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2267                 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
 2268                         more = jme_rxintr(sc, sc->jme_process_limit);
 2269                         if (more != 0)
 2270                                 atomic_set_int(&sc->jme_morework, 1);
 2271                 }
 2272                 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
 2273                         /*
 2274                          * Notify hardware availability of new Rx
 2275                          * buffers.
 2276                          * Reading RXCSR takes very long time under
 2277                          * heavy load so cache RXCSR value and writes
 2278                          * the ORed value with the kick command to
 2279                          * the RXCSR. This saves one register access
 2280                          * cycle.
 2281                          */
 2282                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
 2283                             RXCSR_RX_ENB | RXCSR_RXQ_START);
 2284                 }
 2285                 /*
 2286                  * Reclaiming Tx buffers are deferred to make jme(4) run
 2287                  * without locks held.
 2288                  */
 2289                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2290                         taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
 2291         }
 2292 
 2293         if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
 2294                 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
 2295                 return;
 2296         }
 2297 done:
 2298         /* Reenable interrupts. */
 2299         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
 2300 }
 2301 
 2302 static void
 2303 jme_txeof(struct jme_softc *sc)
 2304 {
 2305         struct ifnet *ifp;
 2306         struct jme_txdesc *txd;
 2307         uint32_t status;
 2308         int cons, nsegs;
 2309 
 2310         JME_LOCK_ASSERT(sc);
 2311 
 2312         ifp = sc->jme_ifp;
 2313 
 2314         cons = sc->jme_cdata.jme_tx_cons;
 2315         if (cons == sc->jme_cdata.jme_tx_prod)
 2316                 return;
 2317 
 2318         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
 2319             sc->jme_cdata.jme_tx_ring_map,
 2320             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2321 
 2322         /*
 2323          * Go through our Tx list and free mbufs for those
 2324          * frames which have been transmitted.
 2325          */
 2326         for (; cons != sc->jme_cdata.jme_tx_prod;) {
 2327                 txd = &sc->jme_cdata.jme_txdesc[cons];
 2328                 status = le32toh(txd->tx_desc->flags);
 2329                 if ((status & JME_TD_OWN) == JME_TD_OWN)
 2330                         break;
 2331 
 2332                 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
 2333                         ifp->if_oerrors++;
 2334                 else {
 2335                         ifp->if_opackets++;
 2336                         if ((status & JME_TD_COLLISION) != 0)
 2337                                 ifp->if_collisions +=
 2338                                     le32toh(txd->tx_desc->buflen) &
 2339                                     JME_TD_BUF_LEN_MASK;
 2340                 }
 2341                 /*
 2342                  * Only the first descriptor of multi-descriptor
 2343                  * transmission is updated so driver have to skip entire
 2344                  * chained buffers for the transmiited frame. In other
 2345                  * words, JME_TD_OWN bit is valid only at the first
 2346                  * descriptor of a multi-descriptor transmission.
 2347                  */
 2348                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
 2349                         sc->jme_rdata.jme_tx_ring[cons].flags = 0;
 2350                         JME_DESC_INC(cons, JME_TX_RING_CNT);
 2351                 }
 2352 
 2353                 /* Reclaim transferred mbufs. */
 2354                 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
 2355                     BUS_DMASYNC_POSTWRITE);
 2356                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
 2357 
 2358                 KASSERT(txd->tx_m != NULL,
 2359                     ("%s: freeing NULL mbuf!\n", __func__));
 2360                 m_freem(txd->tx_m);
 2361                 txd->tx_m = NULL;
 2362                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
 2363                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
 2364                     ("%s: Active Tx desc counter was garbled\n", __func__));
 2365                 txd->tx_ndesc = 0;
 2366                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2367         }
 2368         sc->jme_cdata.jme_tx_cons = cons;
 2369         /* Unarm watchog timer when there is no pending descriptors in queue. */
 2370         if (sc->jme_cdata.jme_tx_cnt == 0)
 2371                 sc->jme_watchdog_timer = 0;
 2372 
 2373         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
 2374             sc->jme_cdata.jme_tx_ring_map,
 2375             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2376 }
 2377 
 2378 static __inline void
 2379 jme_discard_rxbuf(struct jme_softc *sc, int cons)
 2380 {
 2381         struct jme_desc *desc;
 2382 
 2383         desc = &sc->jme_rdata.jme_rx_ring[cons];
 2384         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
 2385         desc->buflen = htole32(MCLBYTES);
 2386 }
 2387 
 2388 /* Receive a frame. */
 2389 static void
 2390 jme_rxeof(struct jme_softc *sc)
 2391 {
 2392         struct ifnet *ifp;
 2393         struct jme_desc *desc;
 2394         struct jme_rxdesc *rxd;
 2395         struct mbuf *mp, *m;
 2396         uint32_t flags, status;
 2397         int cons, count, nsegs;
 2398 
 2399         ifp = sc->jme_ifp;
 2400 
 2401         cons = sc->jme_cdata.jme_rx_cons;
 2402         desc = &sc->jme_rdata.jme_rx_ring[cons];
 2403         flags = le32toh(desc->flags);
 2404         status = le32toh(desc->buflen);
 2405         nsegs = JME_RX_NSEGS(status);
 2406         sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
 2407         if ((status & JME_RX_ERR_STAT) != 0) {
 2408                 ifp->if_ierrors++;
 2409                 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
 2410 #ifdef JME_SHOW_ERRORS
 2411                 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
 2412                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
 2413 #endif
 2414                 sc->jme_cdata.jme_rx_cons += nsegs;
 2415                 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
 2416                 return;
 2417         }
 2418 
 2419         for (count = 0; count < nsegs; count++,
 2420             JME_DESC_INC(cons, JME_RX_RING_CNT)) {
 2421                 rxd = &sc->jme_cdata.jme_rxdesc[cons];
 2422                 mp = rxd->rx_m;
 2423                 /* Add a new receive buffer to the ring. */
 2424                 if (jme_newbuf(sc, rxd) != 0) {
 2425                         ifp->if_iqdrops++;
 2426                         /* Reuse buffer. */
 2427                         for (; count < nsegs; count++) {
 2428                                 jme_discard_rxbuf(sc, cons);
 2429                                 JME_DESC_INC(cons, JME_RX_RING_CNT);
 2430                         }
 2431                         if (sc->jme_cdata.jme_rxhead != NULL) {
 2432                                 m_freem(sc->jme_cdata.jme_rxhead);
 2433                                 JME_RXCHAIN_RESET(sc);
 2434                         }
 2435                         break;
 2436                 }
 2437 
 2438                 /*
 2439                  * Assume we've received a full sized frame.
 2440                  * Actual size is fixed when we encounter the end of
 2441                  * multi-segmented frame.
 2442                  */
 2443                 mp->m_len = MCLBYTES;
 2444 
 2445                 /* Chain received mbufs. */
 2446                 if (sc->jme_cdata.jme_rxhead == NULL) {
 2447                         sc->jme_cdata.jme_rxhead = mp;
 2448                         sc->jme_cdata.jme_rxtail = mp;
 2449                 } else {
 2450                         /*
 2451                          * Receive processor can receive a maximum frame
 2452                          * size of 65535 bytes.
 2453                          */
 2454                         mp->m_flags &= ~M_PKTHDR;
 2455                         sc->jme_cdata.jme_rxtail->m_next = mp;
 2456                         sc->jme_cdata.jme_rxtail = mp;
 2457                 }
 2458 
 2459                 if (count == nsegs - 1) {
 2460                         /* Last desc. for this frame. */
 2461                         m = sc->jme_cdata.jme_rxhead;
 2462                         m->m_flags |= M_PKTHDR;
 2463                         m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
 2464                         if (nsegs > 1) {
 2465                                 /* Set first mbuf size. */
 2466                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
 2467                                 /* Set last mbuf size. */
 2468                                 mp->m_len = sc->jme_cdata.jme_rxlen -
 2469                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
 2470                                     (MCLBYTES * (nsegs - 2)));
 2471                         } else
 2472                                 m->m_len = sc->jme_cdata.jme_rxlen;
 2473                         m->m_pkthdr.rcvif = ifp;
 2474 
 2475                         /*
 2476                          * Account for 10bytes auto padding which is used
 2477                          * to align IP header on 32bit boundary. Also note,
 2478                          * CRC bytes is automatically removed by the
 2479                          * hardware.
 2480                          */
 2481                         m->m_data += JME_RX_PAD_BYTES;
 2482 
 2483                         /* Set checksum information. */
 2484                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 2485                             (flags & JME_RD_IPV4) != 0) {
 2486                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2487                                 if ((flags & JME_RD_IPCSUM) != 0)
 2488                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2489                                 if (((flags & JME_RD_MORE_FRAG) == 0) &&
 2490                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
 2491                                     (JME_RD_TCP | JME_RD_TCPCSUM) ||
 2492                                     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
 2493                                     (JME_RD_UDP | JME_RD_UDPCSUM))) {
 2494                                         m->m_pkthdr.csum_flags |=
 2495                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2496                                         m->m_pkthdr.csum_data = 0xffff;
 2497                                 }
 2498                         }
 2499 
 2500                         /* Check for VLAN tagged packets. */
 2501                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 2502                             (flags & JME_RD_VLAN_TAG) != 0) {
 2503 #if __FreeBSD_version < 700000
 2504                                 VLAN_INPUT_TAG_NEW(ifp, m,
 2505                                     (flags & JME_RD_VLAN_MASK));
 2506 #else
 2507                                 m->m_pkthdr.ether_vtag =
 2508                                     flags & JME_RD_VLAN_MASK;
 2509                                 m->m_flags |= M_VLANTAG;
 2510 #endif
 2511                         }
 2512 
 2513                         ifp->if_ipackets++;
 2514                         /* Pass it on. */
 2515                         (*ifp->if_input)(ifp, m);
 2516 
 2517                         /* Reset mbuf chains. */
 2518                         JME_RXCHAIN_RESET(sc);
 2519                 }
 2520         }
 2521 
 2522         sc->jme_cdata.jme_rx_cons += nsegs;
 2523         sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
 2524 }
 2525 
 2526 static int
 2527 jme_rxintr(struct jme_softc *sc, int count)
 2528 {
 2529         struct jme_desc *desc;
 2530         int nsegs, prog, pktlen;
 2531 
 2532         bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
 2533             sc->jme_cdata.jme_rx_ring_map,
 2534             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2535 
 2536         for (prog = 0; count > 0; prog++) {
 2537                 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
 2538                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
 2539                         break;
 2540                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
 2541                         break;
 2542                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
 2543                 /*
 2544                  * Check number of segments against received bytes.
 2545                  * Non-matching value would indicate that hardware
 2546                  * is still trying to update Rx descriptors. I'm not
 2547                  * sure whether this check is needed.
 2548                  */
 2549                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
 2550                 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
 2551                         break;
 2552                 prog++;
 2553                 /* Received a frame. */
 2554                 jme_rxeof(sc);
 2555                 count -= nsegs;
 2556         }
 2557 
 2558         if (prog > 0)
 2559                 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
 2560                     sc->jme_cdata.jme_rx_ring_map,
 2561                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2562 
 2563         return (count > 0 ? 0 : EAGAIN);
 2564 }
 2565 
 2566 static void
 2567 jme_tick(void *arg)
 2568 {
 2569         struct jme_softc *sc;
 2570         struct mii_data *mii;
 2571 
 2572         sc = (struct jme_softc *)arg;
 2573 
 2574         JME_LOCK_ASSERT(sc);
 2575 
 2576         mii = device_get_softc(sc->jme_miibus);
 2577         mii_tick(mii);
 2578         /*
 2579          * Reclaim Tx buffers that have been completed. It's not
 2580          * needed here but it would release allocated mbuf chains
 2581          * faster and limit the maximum delay to a hz.
 2582          */
 2583         jme_txeof(sc);
 2584         jme_watchdog(sc);
 2585         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
 2586 }
 2587 
 2588 static void
 2589 jme_reset(struct jme_softc *sc)
 2590 {
 2591 
 2592         /* Stop receiver, transmitter. */
 2593         jme_stop_rx(sc);
 2594         jme_stop_tx(sc);
 2595         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
 2596         DELAY(10);
 2597         CSR_WRITE_4(sc, JME_GHC, 0);
 2598 }
 2599 
 2600 static void
 2601 jme_init(void *xsc)
 2602 {
 2603         struct jme_softc *sc;
 2604 
 2605         sc = (struct jme_softc *)xsc;
 2606         JME_LOCK(sc);
 2607         jme_init_locked(sc);
 2608         JME_UNLOCK(sc);
 2609 }
 2610 
 2611 static void
 2612 jme_init_locked(struct jme_softc *sc)
 2613 {
 2614         struct ifnet *ifp;
 2615         struct mii_data *mii;
 2616         uint8_t eaddr[ETHER_ADDR_LEN];
 2617         bus_addr_t paddr;
 2618         uint32_t reg;
 2619         int error;
 2620 
 2621         JME_LOCK_ASSERT(sc);
 2622 
 2623         ifp = sc->jme_ifp;
 2624         mii = device_get_softc(sc->jme_miibus);
 2625 
 2626         /*
 2627          * Cancel any pending I/O.
 2628          */
 2629         jme_stop(sc);
 2630 
 2631         /*
 2632          * Reset the chip to a known state.
 2633          */
 2634         jme_reset(sc);
 2635 
 2636         /* Init descriptors. */
 2637         error = jme_init_rx_ring(sc);
 2638         if (error != 0) {
 2639                 device_printf(sc->jme_dev,
 2640                     "%s: initialization failed: no memory for Rx buffers.\n",
 2641                     __func__);
 2642                 jme_stop(sc);
 2643                 return;
 2644         }
 2645         jme_init_tx_ring(sc);
 2646         /* Initialize shadow status block. */
 2647         jme_init_ssb(sc);
 2648 
 2649         /* Reprogram the station address. */
 2650         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
 2651         CSR_WRITE_4(sc, JME_PAR0,
 2652             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
 2653         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
 2654 
 2655         /*
 2656          * Configure Tx queue.
 2657          *  Tx priority queue weight value : 0
 2658          *  Tx FIFO threshold for processing next packet : 16QW
 2659          *  Maximum Tx DMA length : 512
 2660          *  Allow Tx DMA burst.
 2661          */
 2662         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
 2663         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
 2664         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
 2665         sc->jme_txcsr |= sc->jme_tx_dma_size;
 2666         sc->jme_txcsr |= TXCSR_DMA_BURST;
 2667         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
 2668 
 2669         /* Set Tx descriptor counter. */
 2670         CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
 2671 
 2672         /* Set Tx ring address to the hardware. */
 2673         paddr = JME_TX_RING_ADDR(sc, 0);
 2674         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
 2675         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
 2676 
 2677         /* Configure TxMAC parameters. */
 2678         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
 2679         reg |= TXMAC_THRESH_1_PKT;
 2680         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
 2681         CSR_WRITE_4(sc, JME_TXMAC, reg);
 2682 
 2683         /*
 2684          * Configure Rx queue.
 2685          *  FIFO full threshold for transmitting Tx pause packet : 128T
 2686          *  FIFO threshold for processing next packet : 128QW
 2687          *  Rx queue 0 select
 2688          *  Max Rx DMA length : 128
 2689          *  Rx descriptor retry : 32
 2690          *  Rx descriptor retry time gap : 256ns
 2691          *  Don't receive runt/bad frame.
 2692          */
 2693         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
 2694         /*
 2695          * Since Rx FIFO size is 4K bytes, receiving frames larger
 2696          * than 4K bytes will suffer from Rx FIFO overruns. So
 2697          * decrease FIFO threshold to reduce the FIFO overruns for
 2698          * frames larger than 4000 bytes.
 2699          * For best performance of standard MTU sized frames use
 2700          * maximum allowable FIFO threshold, 128QW.
 2701          */
 2702         if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
 2703             ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
 2704                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
 2705         else
 2706                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
 2707         sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
 2708         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
 2709         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
 2710         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
 2711 
 2712         /* Set Rx descriptor counter. */
 2713         CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
 2714 
 2715         /* Set Rx ring address to the hardware. */
 2716         paddr = JME_RX_RING_ADDR(sc, 0);
 2717         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
 2718         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
 2719 
 2720         /* Clear receive filter. */
 2721         CSR_WRITE_4(sc, JME_RXMAC, 0);
 2722         /* Set up the receive filter. */
 2723         jme_set_filter(sc);
 2724         jme_set_vlan(sc);
 2725 
 2726         /*
 2727          * Disable all WOL bits as WOL can interfere normal Rx
 2728          * operation. Also clear WOL detection status bits.
 2729          */
 2730         reg = CSR_READ_4(sc, JME_PMCS);
 2731         reg &= ~PMCS_WOL_ENB_MASK;
 2732         CSR_WRITE_4(sc, JME_PMCS, reg);
 2733 
 2734         reg = CSR_READ_4(sc, JME_RXMAC);
 2735         /*
 2736          * Pad 10bytes right before received frame. This will greatly
 2737          * help Rx performance on strict-alignment architectures as
 2738          * it does not need to copy the frame to align the payload.
 2739          */
 2740         reg |= RXMAC_PAD_10BYTES;
 2741         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2742                 reg |= RXMAC_CSUM_ENB;
 2743         CSR_WRITE_4(sc, JME_RXMAC, reg);
 2744 
 2745         /* Configure general purpose reg0 */
 2746         reg = CSR_READ_4(sc, JME_GPREG0);
 2747         reg &= ~GPREG0_PCC_UNIT_MASK;
 2748         /* Set PCC timer resolution to micro-seconds unit. */
 2749         reg |= GPREG0_PCC_UNIT_US;
 2750         /*
 2751          * Disable all shadow register posting as we have to read
 2752          * JME_INTR_STATUS register in jme_int_task. Also it seems
 2753          * that it's hard to synchronize interrupt status between
 2754          * hardware and software with shadow posting due to
 2755          * requirements of bus_dmamap_sync(9).
 2756          */
 2757         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
 2758             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
 2759             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
 2760             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
 2761         /* Disable posting of DW0. */
 2762         reg &= ~GPREG0_POST_DW0_ENB;
 2763         /* Clear PME message. */
 2764         reg &= ~GPREG0_PME_ENB;
 2765         /* Set PHY address. */
 2766         reg &= ~GPREG0_PHY_ADDR_MASK;
 2767         reg |= sc->jme_phyaddr;
 2768         CSR_WRITE_4(sc, JME_GPREG0, reg);
 2769 
 2770         /* Configure Tx queue 0 packet completion coalescing. */
 2771         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
 2772             PCCTX_COAL_TO_MASK;
 2773         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
 2774             PCCTX_COAL_PKT_MASK;
 2775         reg |= PCCTX_COAL_TXQ0;
 2776         CSR_WRITE_4(sc, JME_PCCTX, reg);
 2777 
 2778         /* Configure Rx queue 0 packet completion coalescing. */
 2779         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
 2780             PCCRX_COAL_TO_MASK;
 2781         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
 2782             PCCRX_COAL_PKT_MASK;
 2783         CSR_WRITE_4(sc, JME_PCCRX0, reg);
 2784 
 2785         /* Configure shadow status block but don't enable posting. */
 2786         paddr = sc->jme_rdata.jme_ssb_block_paddr;
 2787         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
 2788         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
 2789 
 2790         /* Disable Timer 1 and Timer 2. */
 2791         CSR_WRITE_4(sc, JME_TIMER1, 0);
 2792         CSR_WRITE_4(sc, JME_TIMER2, 0);
 2793 
 2794         /* Configure retry transmit period, retry limit value. */
 2795         CSR_WRITE_4(sc, JME_TXTRHD,
 2796             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
 2797             TXTRHD_RT_PERIOD_MASK) |
 2798             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
 2799             TXTRHD_RT_LIMIT_SHIFT));
 2800 
 2801         /* Disable RSS. */
 2802         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
 2803 
 2804         /* Initialize the interrupt mask. */
 2805         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
 2806         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
 2807 
 2808         /*
 2809          * Enabling Tx/Rx DMA engines and Rx queue processing is
 2810          * done after detection of valid link in jme_link_task.
 2811          */
 2812 
 2813         sc->jme_flags &= ~JME_FLAG_LINK;
 2814         /* Set the current media. */
 2815         mii_mediachg(mii);
 2816 
 2817         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
 2818 
 2819         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2820         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2821 }
 2822 
 2823 static void
 2824 jme_stop(struct jme_softc *sc)
 2825 {
 2826         struct ifnet *ifp;
 2827         struct jme_txdesc *txd;
 2828         struct jme_rxdesc *rxd;
 2829         int i;
 2830 
 2831         JME_LOCK_ASSERT(sc);
 2832         /*
 2833          * Mark the interface down and cancel the watchdog timer.
 2834          */
 2835         ifp = sc->jme_ifp;
 2836         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2837         sc->jme_flags &= ~JME_FLAG_LINK;
 2838         callout_stop(&sc->jme_tick_ch);
 2839         sc->jme_watchdog_timer = 0;
 2840 
 2841         /*
 2842          * Disable interrupts.
 2843          */
 2844         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
 2845         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
 2846 
 2847         /* Disable updating shadow status block. */
 2848         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
 2849             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
 2850 
 2851         /* Stop receiver, transmitter. */
 2852         jme_stop_rx(sc);
 2853         jme_stop_tx(sc);
 2854 
 2855          /* Reclaim Rx/Tx buffers that have been completed. */
 2856         jme_rxintr(sc, JME_RX_RING_CNT);
 2857         if (sc->jme_cdata.jme_rxhead != NULL)
 2858                 m_freem(sc->jme_cdata.jme_rxhead);
 2859         JME_RXCHAIN_RESET(sc);
 2860         jme_txeof(sc);
 2861         /*
 2862          * Free RX and TX mbufs still in the queues.
 2863          */
 2864         for (i = 0; i < JME_RX_RING_CNT; i++) {
 2865                 rxd = &sc->jme_cdata.jme_rxdesc[i];
 2866                 if (rxd->rx_m != NULL) {
 2867                         bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
 2868                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 2869                         bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
 2870                             rxd->rx_dmamap);
 2871                         m_freem(rxd->rx_m);
 2872                         rxd->rx_m = NULL;
 2873                 }
 2874         }
 2875         for (i = 0; i < JME_TX_RING_CNT; i++) {
 2876                 txd = &sc->jme_cdata.jme_txdesc[i];
 2877                 if (txd->tx_m != NULL) {
 2878                         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
 2879                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 2880                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
 2881                             txd->tx_dmamap);
 2882                         m_freem(txd->tx_m);
 2883                         txd->tx_m = NULL;
 2884                         txd->tx_ndesc = 0;
 2885                 }
 2886         }
 2887 }
 2888 
 2889 static void
 2890 jme_stop_tx(struct jme_softc *sc)
 2891 {
 2892         uint32_t reg;
 2893         int i;
 2894 
 2895         reg = CSR_READ_4(sc, JME_TXCSR);
 2896         if ((reg & TXCSR_TX_ENB) == 0)
 2897                 return;
 2898         reg &= ~TXCSR_TX_ENB;
 2899         CSR_WRITE_4(sc, JME_TXCSR, reg);
 2900         for (i = JME_TIMEOUT; i > 0; i--) {
 2901                 DELAY(1);
 2902                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
 2903                         break;
 2904         }
 2905         if (i == 0)
 2906                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
 2907 }
 2908 
 2909 static void
 2910 jme_stop_rx(struct jme_softc *sc)
 2911 {
 2912         uint32_t reg;
 2913         int i;
 2914 
 2915         reg = CSR_READ_4(sc, JME_RXCSR);
 2916         if ((reg & RXCSR_RX_ENB) == 0)
 2917                 return;
 2918         reg &= ~RXCSR_RX_ENB;
 2919         CSR_WRITE_4(sc, JME_RXCSR, reg);
 2920         for (i = JME_TIMEOUT; i > 0; i--) {
 2921                 DELAY(1);
 2922                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
 2923                         break;
 2924         }
 2925         if (i == 0)
 2926                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
 2927 }
 2928 
 2929 static void
 2930 jme_init_tx_ring(struct jme_softc *sc)
 2931 {
 2932         struct jme_ring_data *rd;
 2933         struct jme_txdesc *txd;
 2934         int i;
 2935 
 2936         sc->jme_cdata.jme_tx_prod = 0;
 2937         sc->jme_cdata.jme_tx_cons = 0;
 2938         sc->jme_cdata.jme_tx_cnt = 0;
 2939 
 2940         rd = &sc->jme_rdata;
 2941         bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
 2942         for (i = 0; i < JME_TX_RING_CNT; i++) {
 2943                 txd = &sc->jme_cdata.jme_txdesc[i];
 2944                 txd->tx_m = NULL;
 2945                 txd->tx_desc = &rd->jme_tx_ring[i];
 2946                 txd->tx_ndesc = 0;
 2947         }
 2948 
 2949         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
 2950             sc->jme_cdata.jme_tx_ring_map,
 2951             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2952 }
 2953 
 2954 static void
 2955 jme_init_ssb(struct jme_softc *sc)
 2956 {
 2957         struct jme_ring_data *rd;
 2958 
 2959         rd = &sc->jme_rdata;
 2960         bzero(rd->jme_ssb_block, JME_SSB_SIZE);
 2961         bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
 2962             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2963 }
 2964 
 2965 static int
 2966 jme_init_rx_ring(struct jme_softc *sc)
 2967 {
 2968         struct jme_ring_data *rd;
 2969         struct jme_rxdesc *rxd;
 2970         int i;
 2971 
 2972         sc->jme_cdata.jme_rx_cons = 0;
 2973         JME_RXCHAIN_RESET(sc);
 2974         atomic_set_int(&sc->jme_morework, 0);
 2975 
 2976         rd = &sc->jme_rdata;
 2977         bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
 2978         for (i = 0; i < JME_RX_RING_CNT; i++) {
 2979                 rxd = &sc->jme_cdata.jme_rxdesc[i];
 2980                 rxd->rx_m = NULL;
 2981                 rxd->rx_desc = &rd->jme_rx_ring[i];
 2982                 if (jme_newbuf(sc, rxd) != 0)
 2983                         return (ENOBUFS);
 2984         }
 2985 
 2986         bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
 2987             sc->jme_cdata.jme_rx_ring_map,
 2988             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2989 
 2990         return (0);
 2991 }
 2992 
 2993 static int
 2994 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
 2995 {
 2996         struct jme_desc *desc;
 2997         struct mbuf *m;
 2998         bus_dma_segment_t segs[1];
 2999         bus_dmamap_t map;
 3000         int nsegs;
 3001 
 3002         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 3003         if (m == NULL)
 3004                 return (ENOBUFS);
 3005         /*
 3006          * JMC250 has 64bit boundary alignment limitation so jme(4)
 3007          * takes advantage of 10 bytes padding feature of hardware
 3008          * in order not to copy entire frame to align IP header on
 3009          * 32bit boundary.
 3010          */
 3011         m->m_len = m->m_pkthdr.len = MCLBYTES;
 3012 
 3013         if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
 3014             sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 3015                 m_freem(m);
 3016                 return (ENOBUFS);
 3017         }
 3018         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 3019 
 3020         if (rxd->rx_m != NULL) {
 3021                 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
 3022                     BUS_DMASYNC_POSTREAD);
 3023                 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
 3024         }
 3025         map = rxd->rx_dmamap;
 3026         rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
 3027         sc->jme_cdata.jme_rx_sparemap = map;
 3028         bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
 3029             BUS_DMASYNC_PREREAD);
 3030         rxd->rx_m = m;
 3031 
 3032         desc = rxd->rx_desc;
 3033         desc->buflen = htole32(segs[0].ds_len);
 3034         desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
 3035         desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
 3036         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
 3037 
 3038         return (0);
 3039 }
 3040 
 3041 static void
 3042 jme_set_vlan(struct jme_softc *sc)
 3043 {
 3044         struct ifnet *ifp;
 3045         uint32_t reg;
 3046 
 3047         JME_LOCK_ASSERT(sc);
 3048 
 3049         ifp = sc->jme_ifp;
 3050         reg = CSR_READ_4(sc, JME_RXMAC);
 3051         reg &= ~RXMAC_VLAN_ENB;
 3052         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3053                 reg |= RXMAC_VLAN_ENB;
 3054         CSR_WRITE_4(sc, JME_RXMAC, reg);
 3055 }
 3056 
 3057 static void
 3058 jme_set_filter(struct jme_softc *sc)
 3059 {
 3060         struct ifnet *ifp;
 3061         struct ifmultiaddr *ifma;
 3062         uint32_t crc;
 3063         uint32_t mchash[2];
 3064         uint32_t rxcfg;
 3065 
 3066         JME_LOCK_ASSERT(sc);
 3067 
 3068         ifp = sc->jme_ifp;
 3069 
 3070         rxcfg = CSR_READ_4(sc, JME_RXMAC);
 3071         rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
 3072             RXMAC_ALLMULTI);
 3073         /* Always accept frames destined to our station address. */
 3074         rxcfg |= RXMAC_UNICAST;
 3075         if ((ifp->if_flags & IFF_BROADCAST) != 0)
 3076                 rxcfg |= RXMAC_BROADCAST;
 3077         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 3078                 if ((ifp->if_flags & IFF_PROMISC) != 0)
 3079                         rxcfg |= RXMAC_PROMISC;
 3080                 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
 3081                         rxcfg |= RXMAC_ALLMULTI;
 3082                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
 3083                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
 3084                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
 3085                 return;
 3086         }
 3087 
 3088         /*
 3089          * Set up the multicast address filter by passing all multicast
 3090          * addresses through a CRC generator, and then using the low-order
 3091          * 6 bits as an index into the 64 bit multicast hash table.  The
 3092          * high order bits select the register, while the rest of the bits
 3093          * select the bit within the register.
 3094          */
 3095         rxcfg |= RXMAC_MULTICAST;
 3096         bzero(mchash, sizeof(mchash));
 3097 
 3098         IF_ADDR_LOCK(ifp);
 3099         TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
 3100                 if (ifma->ifma_addr->sa_family != AF_LINK)
 3101                         continue;
 3102                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
 3103                     ifma->ifma_addr), ETHER_ADDR_LEN);
 3104 
 3105                 /* Just want the 6 least significant bits. */
 3106                 crc &= 0x3f;
 3107 
 3108                 /* Set the corresponding bit in the hash table. */
 3109                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
 3110         }
 3111         IF_ADDR_UNLOCK(ifp);
 3112 
 3113         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
 3114         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
 3115         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
 3116 }
 3117 
 3118 static int
 3119 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3120 {
 3121         int error, value;
 3122 
 3123         if (arg1 == NULL)
 3124                 return (EINVAL);
 3125         value = *(int *)arg1;
 3126         error = sysctl_handle_int(oidp, &value, 0, req);
 3127         if (error || req->newptr == NULL)
 3128                 return (error);
 3129         if (value < low || value > high)
 3130                 return (EINVAL);
 3131         *(int *)arg1 = value;
 3132 
 3133         return (0);
 3134 }
 3135 
 3136 static int
 3137 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
 3138 {
 3139         return (sysctl_int_range(oidp, arg1, arg2, req,
 3140             PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
 3141 }
 3142 
 3143 static int
 3144 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
 3145 {
 3146         return (sysctl_int_range(oidp, arg1, arg2, req,
 3147             PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
 3148 }
 3149 
 3150 static int
 3151 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
 3152 {
 3153         return (sysctl_int_range(oidp, arg1, arg2, req,
 3154             PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
 3155 }
 3156 
 3157 static int
 3158 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
 3159 {
 3160         return (sysctl_int_range(oidp, arg1, arg2, req,
 3161             PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
 3162 }
 3163 
 3164 static int
 3165 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
 3166 {
 3167         return (sysctl_int_range(oidp, arg1, arg2, req,
 3168             JME_PROC_MIN, JME_PROC_MAX));
 3169 }

Cache object: 2461b72ea8b569041f6bceec5ba33726


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.