The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/neta/if_mvneta.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017 Stormshield.
    3  * Copyright (c) 2017 Semihalf.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
   19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  * POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 #include "opt_platform.h"
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/endian.h>
   35 #include <sys/mbuf.h>
   36 #include <sys/lock.h>
   37 #include <sys/mutex.h>
   38 #include <sys/kernel.h>
   39 #include <sys/module.h>
   40 #include <sys/socket.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/smp.h>
   43 #include <sys/taskqueue.h>
   44 #ifdef MVNETA_KTR
   45 #include <sys/ktr.h>
   46 #endif
   47 
   48 #include <net/ethernet.h>
   49 #include <net/bpf.h>
   50 #include <net/if.h>
   51 #include <net/if_arp.h>
   52 #include <net/if_dl.h>
   53 #include <net/if_media.h>
   54 #include <net/if_types.h>
   55 #include <net/if_vlan_var.h>
   56 
   57 #include <netinet/in_systm.h>
   58 #include <netinet/in.h>
   59 #include <netinet/ip.h>
   60 #include <netinet/tcp_lro.h>
   61 
   62 #include <sys/sockio.h>
   63 #include <sys/bus.h>
   64 #include <machine/bus.h>
   65 #include <sys/rman.h>
   66 #include <machine/resource.h>
   67 
   68 #include <dev/extres/clk/clk.h>
   69 
   70 #include <dev/mii/mii.h>
   71 #include <dev/mii/miivar.h>
   72 
   73 #include <dev/mdio/mdio.h>
   74 
   75 #include <arm/mv/mvvar.h>
   76 
   77 #if !defined(__aarch64__)
   78 #include <arm/mv/mvreg.h>
   79 #include <arm/mv/mvwin.h>
   80 #endif
   81 
   82 #include "if_mvnetareg.h"
   83 #include "if_mvnetavar.h"
   84 
   85 #include "miibus_if.h"
   86 #include "mdio_if.h"
   87 
   88 #ifdef MVNETA_DEBUG
   89 #define STATIC /* nothing */
   90 #else
   91 #define STATIC static
   92 #endif
   93 
   94 #define DASSERT(x) KASSERT((x), (#x))
   95 
   96 #define A3700_TCLK_250MHZ               250000000
   97 
   98 /* Device Register Initialization */
   99 STATIC int mvneta_initreg(if_t);
  100 
  101 /* Descriptor Ring Control for each of queues */
  102 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
  103 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
  104 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
  105 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
  106 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
  107 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
  108 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
  109 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
  110 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  111 STATIC int mvneta_dma_create(struct mvneta_softc *);
  112 
  113 /* Rx/Tx Queue Control */
  114 STATIC int mvneta_rx_queue_init(if_t, int);
  115 STATIC int mvneta_tx_queue_init(if_t, int);
  116 STATIC int mvneta_rx_queue_enable(if_t, int);
  117 STATIC int mvneta_tx_queue_enable(if_t, int);
  118 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
  119 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
  120 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
  121 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
  122 
  123 /* Interrupt Handlers */
  124 STATIC void mvneta_disable_intr(struct mvneta_softc *);
  125 STATIC void mvneta_enable_intr(struct mvneta_softc *);
  126 STATIC void mvneta_rxtxth_intr(void *);
  127 STATIC int mvneta_misc_intr(struct mvneta_softc *);
  128 STATIC void mvneta_tick(void *);
  129 /* struct ifnet and mii callbacks*/
  130 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
  131 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
  132 #ifdef MVNETA_MULTIQUEUE
  133 STATIC int mvneta_transmit(if_t, struct mbuf *);
  134 #else /* !MVNETA_MULTIQUEUE */
  135 STATIC void mvneta_start(if_t);
  136 #endif
  137 STATIC void mvneta_qflush(if_t);
  138 STATIC void mvneta_tx_task(void *, int);
  139 STATIC int mvneta_ioctl(if_t, u_long, caddr_t);
  140 STATIC void mvneta_init(void *);
  141 STATIC void mvneta_init_locked(void *);
  142 STATIC void mvneta_stop(struct mvneta_softc *);
  143 STATIC void mvneta_stop_locked(struct mvneta_softc *);
  144 STATIC int mvneta_mediachange(if_t);
  145 STATIC void mvneta_mediastatus(if_t, struct ifmediareq *);
  146 STATIC void mvneta_portup(struct mvneta_softc *);
  147 STATIC void mvneta_portdown(struct mvneta_softc *);
  148 
  149 /* Link State Notify */
  150 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
  151 STATIC int mvneta_update_media(struct mvneta_softc *, int);
  152 STATIC void mvneta_adjust_link(struct mvneta_softc *);
  153 STATIC void mvneta_update_eee(struct mvneta_softc *);
  154 STATIC void mvneta_update_fc(struct mvneta_softc *);
  155 STATIC void mvneta_link_isr(struct mvneta_softc *);
  156 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
  157 STATIC void mvneta_linkup(struct mvneta_softc *);
  158 STATIC void mvneta_linkdown(struct mvneta_softc *);
  159 STATIC void mvneta_linkreset(struct mvneta_softc *);
  160 
  161 /* Tx Subroutines */
  162 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
  163 STATIC void mvneta_tx_set_csumflag(if_t,
  164     struct mvneta_tx_desc *, struct mbuf *);
  165 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
  166 STATIC void mvneta_tx_drain(struct mvneta_softc *);
  167 
  168 /* Rx Subroutines */
  169 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
  170 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
  171 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
  172 STATIC void mvneta_rx_set_csumflag(if_t,
  173     struct mvneta_rx_desc *, struct mbuf *);
  174 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
  175 
  176 /* MAC address filter */
  177 STATIC void mvneta_filter_setup(struct mvneta_softc *);
  178 
  179 /* sysctl(9) */
  180 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
  181 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
  182 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
  183 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
  184 
  185 /* MIB */
  186 STATIC void mvneta_clear_mib(struct mvneta_softc *);
  187 STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int);
  188 STATIC void mvneta_update_mib(struct mvneta_softc *);
  189 
  190 /* Switch */
  191 STATIC boolean_t mvneta_has_switch(device_t);
  192 
  193 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
  194 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
  195 
  196 STATIC struct mtx mii_mutex;
  197 STATIC int mii_init = 0;
  198 
  199 /* Device */
  200 STATIC int mvneta_detach(device_t);
  201 /* MII */
  202 STATIC int mvneta_miibus_readreg(device_t, int, int);
  203 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
  204 
  205 static device_method_t mvneta_methods[] = {
  206         /* Device interface */
  207         DEVMETHOD(device_detach,        mvneta_detach),
  208         /* MII interface */
  209         DEVMETHOD(miibus_readreg,       mvneta_miibus_readreg),
  210         DEVMETHOD(miibus_writereg,      mvneta_miibus_writereg),
  211         /* MDIO interface */
  212         DEVMETHOD(mdio_readreg,         mvneta_miibus_readreg),
  213         DEVMETHOD(mdio_writereg,        mvneta_miibus_writereg),
  214 
  215         /* End */
  216         DEVMETHOD_END
  217 };
  218 
  219 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
  220 
  221 DRIVER_MODULE(miibus, mvneta, miibus_driver, 0, 0);
  222 DRIVER_MODULE(mdio, mvneta, mdio_driver, 0, 0);
  223 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
  224 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
  225 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
  226 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
  227 
  228 /*
  229  * List of MIB register and names
  230  */
  231 enum mvneta_mib_idx
  232 {
  233         MVNETA_MIB_RX_GOOD_OCT_IDX,
  234         MVNETA_MIB_RX_BAD_OCT_IDX,
  235         MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
  236         MVNETA_MIB_RX_GOOD_FRAME_IDX,
  237         MVNETA_MIB_RX_BAD_FRAME_IDX,
  238         MVNETA_MIB_RX_BCAST_FRAME_IDX,
  239         MVNETA_MIB_RX_MCAST_FRAME_IDX,
  240         MVNETA_MIB_RX_FRAME64_OCT_IDX,
  241         MVNETA_MIB_RX_FRAME127_OCT_IDX,
  242         MVNETA_MIB_RX_FRAME255_OCT_IDX,
  243         MVNETA_MIB_RX_FRAME511_OCT_IDX,
  244         MVNETA_MIB_RX_FRAME1023_OCT_IDX,
  245         MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
  246         MVNETA_MIB_TX_GOOD_OCT_IDX,
  247         MVNETA_MIB_TX_GOOD_FRAME_IDX,
  248         MVNETA_MIB_TX_EXCES_COL_IDX,
  249         MVNETA_MIB_TX_MCAST_FRAME_IDX,
  250         MVNETA_MIB_TX_BCAST_FRAME_IDX,
  251         MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
  252         MVNETA_MIB_FC_SENT_IDX,
  253         MVNETA_MIB_FC_GOOD_IDX,
  254         MVNETA_MIB_FC_BAD_IDX,
  255         MVNETA_MIB_PKT_UNDERSIZE_IDX,
  256         MVNETA_MIB_PKT_FRAGMENT_IDX,
  257         MVNETA_MIB_PKT_OVERSIZE_IDX,
  258         MVNETA_MIB_PKT_JABBER_IDX,
  259         MVNETA_MIB_MAC_RX_ERR_IDX,
  260         MVNETA_MIB_MAC_CRC_ERR_IDX,
  261         MVNETA_MIB_MAC_COL_IDX,
  262         MVNETA_MIB_MAC_LATE_COL_IDX,
  263 };
  264 
  265 STATIC struct mvneta_mib_def {
  266         uint32_t regnum;
  267         int reg64;
  268         const char *sysctl_name;
  269         const char *desc;
  270 } mvneta_mib_list[] = {
  271         [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
  272             "rx_good_oct", "Good Octets Rx"},
  273         [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
  274             "rx_bad_oct", "Bad  Octets Rx"},
  275         [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
  276             "tx_mac_err", "MAC Transmit Error"},
  277         [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
  278             "rx_good_frame", "Good Frames Rx"},
  279         [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
  280             "rx_bad_frame", "Bad Frames Rx"},
  281         [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
  282             "rx_bcast_frame", "Broadcast Frames Rx"},
  283         [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
  284             "rx_mcast_frame", "Multicast Frames Rx"},
  285         [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
  286             "rx_frame_1_64", "Frame Size    1 -   64"},
  287         [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
  288             "rx_frame_65_127", "Frame Size   65 -  127"},
  289         [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
  290             "rx_frame_128_255", "Frame Size  128 -  255"},
  291         [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
  292             "rx_frame_256_511", "Frame Size  256 -  511"},
  293         [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
  294             "rx_frame_512_1023", "Frame Size  512 - 1023"},
  295         [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
  296             "rx_fame_1024_max", "Frame Size 1024 -  Max"},
  297         [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
  298             "tx_good_oct", "Good Octets Tx"},
  299         [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
  300             "tx_good_frame", "Good Frames Tx"},
  301         [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
  302             "tx_exces_collision", "Excessive Collision"},
  303         [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
  304             "tx_mcast_frame", "Multicast Frames Tx"},
  305         [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
  306             "tx_bcast_frame", "Broadcast Frames Tx"},
  307         [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
  308             "tx_mac_ctl_err", "Unknown MAC Control"},
  309         [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
  310             "fc_tx", "Flow Control Tx"},
  311         [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
  312             "fc_rx_good", "Good Flow Control Rx"},
  313         [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
  314             "fc_rx_bad", "Bad Flow Control Rx"},
  315         [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
  316             "pkt_undersize", "Undersized Packets Rx"},
  317         [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
  318             "pkt_fragment", "Fragmented Packets Rx"},
  319         [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
  320             "pkt_oversize", "Oversized Packets Rx"},
  321         [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
  322             "pkt_jabber", "Jabber Packets Rx"},
  323         [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
  324             "mac_rx_err", "MAC Rx Errors"},
  325         [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
  326             "mac_crc_err", "MAC CRC Errors"},
  327         [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
  328             "mac_collision", "MAC Collision"},
  329         [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
  330             "mac_late_collision", "MAC Late Collision"},
  331 };
  332 
  333 static struct resource_spec res_spec[] = {
  334         { SYS_RES_MEMORY, 0, RF_ACTIVE },
  335         { SYS_RES_IRQ, 0, RF_ACTIVE },
  336         { -1, 0}
  337 };
  338 
  339 static struct {
  340         driver_intr_t *handler;
  341         char * description;
  342 } mvneta_intrs[] = {
  343         { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
  344 };
  345 
  346 static int
  347 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
  348 {
  349         unsigned int mac_h;
  350         unsigned int mac_l;
  351 
  352         mac_l = (addr[4] << 8) | (addr[5]);
  353         mac_h = (addr[0] << 24) | (addr[1] << 16) |
  354             (addr[2] << 8) | (addr[3] << 0);
  355 
  356         MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
  357         MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
  358         return (0);
  359 }
  360 
  361 static int
  362 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
  363 {
  364         uint32_t mac_l, mac_h;
  365 
  366 #ifdef FDT
  367         if (mvneta_fdt_mac_address(sc, addr) == 0)
  368                 return (0);
  369 #endif
  370         /*
  371          * Fall back -- use the currently programmed address.
  372          */
  373         mac_l = MVNETA_READ(sc, MVNETA_MACAL);
  374         mac_h = MVNETA_READ(sc, MVNETA_MACAH);
  375         if (mac_l == 0 && mac_h == 0) {
  376                 /*
  377                  * Generate pseudo-random MAC.
  378                  * Set lower part to random number | unit number.
  379                  */
  380                 mac_l = arc4random() & ~0xff;
  381                 mac_l |= device_get_unit(sc->dev) & 0xff;
  382                 mac_h = arc4random();
  383                 mac_h &= ~(3 << 24);    /* Clear multicast and LAA bits */
  384                 if (bootverbose) {
  385                         device_printf(sc->dev,
  386                             "Could not acquire MAC address. "
  387                             "Using randomized one.\n");
  388                 }
  389         }
  390 
  391         addr[0] = (mac_h & 0xff000000) >> 24;
  392         addr[1] = (mac_h & 0x00ff0000) >> 16;
  393         addr[2] = (mac_h & 0x0000ff00) >> 8;
  394         addr[3] = (mac_h & 0x000000ff);
  395         addr[4] = (mac_l & 0x0000ff00) >> 8;
  396         addr[5] = (mac_l & 0x000000ff);
  397         return (0);
  398 }
  399 
  400 STATIC boolean_t
  401 mvneta_has_switch(device_t self)
  402 {
  403 #ifdef FDT
  404         return (mvneta_has_switch_fdt(self));
  405 #endif
  406 
  407         return (false);
  408 }
  409 
  410 STATIC int
  411 mvneta_dma_create(struct mvneta_softc *sc)
  412 {
  413         size_t maxsize, maxsegsz;
  414         size_t q;
  415         int error;
  416 
  417         /*
  418          * Create Tx DMA
  419          */
  420         maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
  421 
  422         error = bus_dma_tag_create(
  423             bus_get_dma_tag(sc->dev),           /* parent */
  424             16, 0,                              /* alignment, boundary */
  425             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  426             BUS_SPACE_MAXADDR,                  /* highaddr */
  427             NULL, NULL,                         /* filtfunc, filtfuncarg */
  428             maxsize,                            /* maxsize */
  429             1,                                  /* nsegments */
  430             maxsegsz,                           /* maxsegsz */
  431             0,                                  /* flags */
  432             NULL, NULL,                         /* lockfunc, lockfuncarg */
  433             &sc->tx_dtag);                      /* dmat */
  434         if (error != 0) {
  435                 device_printf(sc->dev,
  436                     "Failed to create DMA tag for Tx descriptors.\n");
  437                 goto fail;
  438         }
  439         error = bus_dma_tag_create(
  440             bus_get_dma_tag(sc->dev),           /* parent */
  441             1, 0,                               /* alignment, boundary */
  442             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  443             BUS_SPACE_MAXADDR,                  /* highaddr */
  444             NULL, NULL,                         /* filtfunc, filtfuncarg */
  445             MVNETA_MAX_FRAME,                   /* maxsize */
  446             MVNETA_TX_SEGLIMIT,                 /* nsegments */
  447             MVNETA_MAX_FRAME,                   /* maxsegsz */
  448             BUS_DMA_ALLOCNOW,                   /* flags */
  449             NULL, NULL,                         /* lockfunc, lockfuncarg */
  450             &sc->txmbuf_dtag);
  451         if (error != 0) {
  452                 device_printf(sc->dev,
  453                     "Failed to create DMA tag for Tx mbufs.\n");
  454                 goto fail;
  455         }
  456 
  457         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
  458                 error = mvneta_ring_alloc_tx_queue(sc, q);
  459                 if (error != 0) {
  460                         device_printf(sc->dev,
  461                             "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
  462                         goto fail;
  463                 }
  464         }
  465 
  466         /*
  467          * Create Rx DMA.
  468          */
  469         /* Create tag for Rx descripors */
  470         error = bus_dma_tag_create(
  471             bus_get_dma_tag(sc->dev),           /* parent */
  472             32, 0,                              /* alignment, boundary */
  473             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  474             BUS_SPACE_MAXADDR,                  /* highaddr */
  475             NULL, NULL,                         /* filtfunc, filtfuncarg */
  476             sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
  477             1,                                  /* nsegments */
  478             sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
  479             0,                                  /* flags */
  480             NULL, NULL,                         /* lockfunc, lockfuncarg */
  481             &sc->rx_dtag);                      /* dmat */
  482         if (error != 0) {
  483                 device_printf(sc->dev,
  484                     "Failed to create DMA tag for Rx descriptors.\n");
  485                 goto fail;
  486         }
  487 
  488         /* Create tag for Rx buffers */
  489         error = bus_dma_tag_create(
  490             bus_get_dma_tag(sc->dev),           /* parent */
  491             32, 0,                              /* alignment, boundary */
  492             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  493             BUS_SPACE_MAXADDR,                  /* highaddr */
  494             NULL, NULL,                         /* filtfunc, filtfuncarg */
  495             MVNETA_MAX_FRAME, 1,                /* maxsize, nsegments */
  496             MVNETA_MAX_FRAME,                   /* maxsegsz */
  497             0,                                  /* flags */
  498             NULL, NULL,                         /* lockfunc, lockfuncarg */
  499             &sc->rxbuf_dtag);                   /* dmat */
  500         if (error != 0) {
  501                 device_printf(sc->dev,
  502                     "Failed to create DMA tag for Rx buffers.\n");
  503                 goto fail;
  504         }
  505 
  506         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
  507                 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
  508                         device_printf(sc->dev,
  509                             "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
  510                         goto fail;
  511                 }
  512         }
  513 
  514         return (0);
  515 fail:
  516         mvneta_detach(sc->dev);
  517 
  518         return (error);
  519 }
  520 
  521 /* ARGSUSED */
  522 int
  523 mvneta_attach(device_t self)
  524 {
  525         struct mvneta_softc *sc;
  526         if_t ifp;
  527         device_t child;
  528         int ifm_target;
  529         int q, error;
  530 #if !defined(__aarch64__)
  531         uint32_t reg;
  532 #endif
  533         clk_t clk;
  534 
  535         sc = device_get_softc(self);
  536         sc->dev = self;
  537 
  538         mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
  539 
  540         error = bus_alloc_resources(self, res_spec, sc->res);
  541         if (error) {
  542                 device_printf(self, "could not allocate resources\n");
  543                 return (ENXIO);
  544         }
  545 
  546         sc->version = MVNETA_READ(sc, MVNETA_PV);
  547         device_printf(self, "version is %x\n", sc->version);
  548         callout_init(&sc->tick_ch, 0);
  549 
  550         /*
  551          * make sure DMA engines are in reset state
  552          */
  553         MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
  554         MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
  555 
  556         error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0,
  557             &clk);
  558         if (error != 0) {
  559 #if defined(__aarch64__)
  560                 device_printf(sc->dev,
  561                         "Cannot get clock, using default frequency: %d\n",
  562                         A3700_TCLK_250MHZ);
  563                 sc->clk_freq = A3700_TCLK_250MHZ;
  564 #else
  565                 device_printf(sc->dev,
  566                         "Cannot get clock, using get_tclk()\n");
  567                 sc->clk_freq = get_tclk();
  568 #endif
  569         } else {
  570                 error = clk_get_freq(clk, &sc->clk_freq);
  571                 if (error != 0) {
  572                         device_printf(sc->dev,
  573                                 "Cannot obtain frequency from parent clock\n");
  574                         bus_release_resources(sc->dev, res_spec, sc->res);
  575                         return (error);
  576                 }
  577         }
  578 
  579 #if !defined(__aarch64__)
  580         /*
  581          * Disable port snoop for buffers and descriptors
  582          * to avoid L2 caching of both without DRAM copy.
  583          * Obtain coherency settings from the first MBUS
  584          * window attribute.
  585          */
  586         if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
  587                 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
  588                 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
  589                 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
  590                 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
  591         }
  592 #endif
  593 
  594         error = bus_setup_intr(self, sc->res[1],
  595             INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
  596             &sc->ih_cookie[0]);
  597         if (error) {
  598                 device_printf(self, "could not setup %s\n",
  599                     mvneta_intrs[0].description);
  600                 mvneta_detach(self);
  601                 return (error);
  602         }
  603 
  604         /*
  605          * MAC address
  606          */
  607         if (mvneta_get_mac_address(sc, sc->enaddr)) {
  608                 device_printf(self, "no mac address.\n");
  609                 return (ENXIO);
  610         }
  611         mvneta_set_mac_address(sc, sc->enaddr);
  612 
  613         mvneta_disable_intr(sc);
  614 
  615         /* Allocate network interface */
  616         ifp = sc->ifp = if_alloc(IFT_ETHER);
  617         if (ifp == NULL) {
  618                 device_printf(self, "if_alloc() failed\n");
  619                 mvneta_detach(self);
  620                 return (ENOMEM);
  621         }
  622         if_initname(ifp, device_get_name(self), device_get_unit(self));
  623 
  624         /*
  625          * We can support 802.1Q VLAN-sized frames and jumbo
  626          * Ethernet frames.
  627          */
  628         if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
  629 
  630         if_setsoftc(ifp, sc);
  631         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
  632 #ifdef MVNETA_MULTIQUEUE
  633         if_settransmitfn(ifp, mvneta_transmit);
  634         if_setqflushfn(ifp, mvneta_qflush);
  635 #else /* !MVNETA_MULTIQUEUE */
  636         if_setstartfn(ifp, mvneta_start);
  637         if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1);
  638         if_setsendqready(ifp);
  639 #endif
  640         if_setinitfn(ifp, mvneta_init);
  641         if_setioctlfn(ifp, mvneta_ioctl);
  642 
  643         /*
  644          * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
  645          */
  646         if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
  647 
  648         /*
  649          * As VLAN hardware tagging is not supported
  650          * but is necessary to perform VLAN hardware checksums,
  651          * it is done in the driver
  652          */
  653         if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
  654 
  655         /*
  656          * Currently IPv6 HW checksum is broken, so make sure it is disabled.
  657          */
  658         if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM_IPV6);
  659         if_setcapenable(ifp, if_getcapabilities(ifp));
  660 
  661         /*
  662          * Disabled option(s):
  663          * - Support for Large Receive Offload
  664          */
  665         if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
  666 
  667         if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
  668 
  669         sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
  670 
  671         /*
  672          * Device DMA Buffer allocation.
  673          * Handles resource deallocation in case of failure.
  674          */
  675         error = mvneta_dma_create(sc);
  676         if (error != 0) {
  677                 mvneta_detach(self);
  678                 return (error);
  679         }
  680 
  681         /* Initialize queues */
  682         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
  683                 error = mvneta_ring_init_tx_queue(sc, q);
  684                 if (error != 0) {
  685                         mvneta_detach(self);
  686                         return (error);
  687                 }
  688         }
  689 
  690         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
  691                 error = mvneta_ring_init_rx_queue(sc, q);
  692                 if (error != 0) {
  693                         mvneta_detach(self);
  694                         return (error);
  695                 }
  696         }
  697 
  698         /*
  699          * Enable DMA engines and Initialize Device Registers.
  700          */
  701         MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
  702         MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
  703         MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
  704         mvneta_sc_lock(sc);
  705         mvneta_filter_setup(sc);
  706         mvneta_sc_unlock(sc);
  707         mvneta_initreg(ifp);
  708 
  709         /*
  710          * Now MAC is working, setup MII.
  711          */
  712         if (mii_init == 0) {
  713                 /*
  714                  * MII bus is shared by all MACs and all PHYs in SoC.
  715                  * serializing the bus access should be safe.
  716                  */
  717                 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
  718                 mii_init = 1;
  719         }
  720 
  721         /* Attach PHY(s) */
  722         if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
  723                 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
  724                     mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
  725                     MII_OFFSET_ANY, 0);
  726                 if (error != 0) {
  727                         device_printf(self, "MII attach failed, error: %d\n",
  728                             error);
  729                         ether_ifdetach(sc->ifp);
  730                         mvneta_detach(self);
  731                         return (error);
  732                 }
  733                 sc->mii = device_get_softc(sc->miibus);
  734                 sc->phy_attached = 1;
  735 
  736                 /* Disable auto-negotiation in MAC - rely on PHY layer */
  737                 mvneta_update_autoneg(sc, FALSE);
  738         } else if (sc->use_inband_status == TRUE) {
  739                 /* In-band link status */
  740                 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
  741                     mvneta_mediastatus);
  742 
  743                 /* Configure media */
  744                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
  745                     0, NULL);
  746                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
  747                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
  748                     0, NULL);
  749                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
  750                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
  751                     0, NULL);
  752                 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
  753                 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
  754 
  755                 /* Enable auto-negotiation */
  756                 mvneta_update_autoneg(sc, TRUE);
  757 
  758                 mvneta_sc_lock(sc);
  759                 if (MVNETA_IS_LINKUP(sc))
  760                         mvneta_linkup(sc);
  761                 else
  762                         mvneta_linkdown(sc);
  763                 mvneta_sc_unlock(sc);
  764 
  765         } else {
  766                 /* Fixed-link, use predefined values */
  767                 mvneta_update_autoneg(sc, FALSE);
  768                 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
  769                     mvneta_mediastatus);
  770 
  771                 ifm_target = IFM_ETHER;
  772                 switch (sc->phy_speed) {
  773                 case 2500:
  774                         if (sc->phy_mode != MVNETA_PHY_SGMII &&
  775                             sc->phy_mode != MVNETA_PHY_QSGMII) {
  776                                 device_printf(self,
  777                                     "2.5G speed can work only in (Q)SGMII mode\n");
  778                                 ether_ifdetach(sc->ifp);
  779                                 mvneta_detach(self);
  780                                 return (ENXIO);
  781                         }
  782                         ifm_target |= IFM_2500_T;
  783                         break;
  784                 case 1000:
  785                         ifm_target |= IFM_1000_T;
  786                         break;
  787                 case 100:
  788                         ifm_target |= IFM_100_TX;
  789                         break;
  790                 case 10:
  791                         ifm_target |= IFM_10_T;
  792                         break;
  793                 default:
  794                         ether_ifdetach(sc->ifp);
  795                         mvneta_detach(self);
  796                         return (ENXIO);
  797                 }
  798 
  799                 if (sc->phy_fdx)
  800                         ifm_target |= IFM_FDX;
  801                 else
  802                         ifm_target |= IFM_HDX;
  803 
  804                 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
  805                 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
  806                 if_link_state_change(sc->ifp, LINK_STATE_UP);
  807 
  808                 if (mvneta_has_switch(self)) {
  809                         if (bootverbose)
  810                                 device_printf(self, "This device is attached to a switch\n");
  811                         child = device_add_child(sc->dev, "mdio", -1);
  812                         if (child == NULL) {
  813                                 ether_ifdetach(sc->ifp);
  814                                 mvneta_detach(self);
  815                                 return (ENXIO);
  816                         }
  817                         bus_generic_attach(sc->dev);
  818                         bus_generic_attach(child);
  819                 }
  820 
  821                 /* Configure MAC media */
  822                 mvneta_update_media(sc, ifm_target);
  823         }
  824 
  825         ether_ifattach(ifp, sc->enaddr);
  826 
  827         callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
  828 
  829         sysctl_mvneta_init(sc);
  830 
  831         return (0);
  832 }
  833 
  834 STATIC int
  835 mvneta_detach(device_t dev)
  836 {
  837         struct mvneta_softc *sc;
  838         int q;
  839 
  840         sc = device_get_softc(dev);
  841 
  842         if (device_is_attached(dev)) {
  843                 mvneta_stop(sc);
  844                 callout_drain(&sc->tick_ch);
  845                 ether_ifdetach(sc->ifp);
  846         }
  847 
  848         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
  849                 mvneta_ring_dealloc_rx_queue(sc, q);
  850         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
  851                 mvneta_ring_dealloc_tx_queue(sc, q);
  852 
  853         device_delete_children(dev);
  854 
  855         if (sc->ih_cookie[0] != NULL)
  856                 bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
  857 
  858         if (sc->tx_dtag != NULL)
  859                 bus_dma_tag_destroy(sc->tx_dtag);
  860         if (sc->rx_dtag != NULL)
  861                 bus_dma_tag_destroy(sc->rx_dtag);
  862         if (sc->txmbuf_dtag != NULL)
  863                 bus_dma_tag_destroy(sc->txmbuf_dtag);
  864         if (sc->rxbuf_dtag != NULL)
  865                 bus_dma_tag_destroy(sc->rxbuf_dtag);
  866 
  867         bus_release_resources(dev, res_spec, sc->res);
  868 
  869         if (sc->ifp)
  870                 if_free(sc->ifp);
  871 
  872         if (mtx_initialized(&sc->mtx))
  873                 mtx_destroy(&sc->mtx);
  874 
  875         return (0);
  876 }
  877 
  878 /*
  879  * MII
  880  */
  881 STATIC int
  882 mvneta_miibus_readreg(device_t dev, int phy, int reg)
  883 {
  884         struct mvneta_softc *sc;
  885         if_t ifp;
  886         uint32_t smi, val;
  887         int i;
  888 
  889         sc = device_get_softc(dev);
  890         ifp = sc->ifp;
  891 
  892         mtx_lock(&mii_mutex);
  893 
  894         for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
  895                 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
  896                         break;
  897                 DELAY(1);
  898         }
  899         if (i == MVNETA_PHY_TIMEOUT) {
  900                 if_printf(ifp, "SMI busy timeout\n");
  901                 mtx_unlock(&mii_mutex);
  902                 return (-1);
  903         }
  904 
  905         smi = MVNETA_SMI_PHYAD(phy) |
  906             MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
  907         MVNETA_WRITE(sc, MVNETA_SMI, smi);
  908 
  909         for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
  910                 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
  911                         break;
  912                 DELAY(1);
  913         }
  914 
  915         if (i == MVNETA_PHY_TIMEOUT) {
  916                 if_printf(ifp, "SMI busy timeout\n");
  917                 mtx_unlock(&mii_mutex);
  918                 return (-1);
  919         }
  920         for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
  921                 smi = MVNETA_READ(sc, MVNETA_SMI);
  922                 if (smi & MVNETA_SMI_READVALID)
  923                         break;
  924                 DELAY(1);
  925         }
  926 
  927         if (i == MVNETA_PHY_TIMEOUT) {
  928                 if_printf(ifp, "SMI busy timeout\n");
  929                 mtx_unlock(&mii_mutex);
  930                 return (-1);
  931         }
  932 
  933         mtx_unlock(&mii_mutex);
  934 
  935 #ifdef MVNETA_KTR
  936         CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", if_getname(ifp), i,
  937             MVNETA_PHY_TIMEOUT);
  938 #endif
  939 
  940         val = smi & MVNETA_SMI_DATA_MASK;
  941 
  942 #ifdef MVNETA_KTR
  943         CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_getname(ifp), phy,
  944             reg, val);
  945 #endif
  946         return (val);
  947 }
  948 
  949 STATIC int
  950 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
  951 {
  952         struct mvneta_softc *sc;
  953         if_t ifp;
  954         uint32_t smi;
  955         int i;
  956 
  957         sc = device_get_softc(dev);
  958         ifp = sc->ifp;
  959 #ifdef MVNETA_KTR
  960         CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_name(ifp),
  961             phy, reg, val);
  962 #endif
  963 
  964         mtx_lock(&mii_mutex);
  965 
  966         for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
  967                 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
  968                         break;
  969                 DELAY(1);
  970         }
  971         if (i == MVNETA_PHY_TIMEOUT) {
  972                 if_printf(ifp, "SMI busy timeout\n");
  973                 mtx_unlock(&mii_mutex);
  974                 return (0);
  975         }
  976 
  977         smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
  978             MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
  979         MVNETA_WRITE(sc, MVNETA_SMI, smi);
  980 
  981         for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
  982                 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
  983                         break;
  984                 DELAY(1);
  985         }
  986 
  987         mtx_unlock(&mii_mutex);
  988 
  989         if (i == MVNETA_PHY_TIMEOUT)
  990                 if_printf(ifp, "phy write timed out\n");
  991 
  992         return (0);
  993 }
  994 
  995 STATIC void
  996 mvneta_portup(struct mvneta_softc *sc)
  997 {
  998         int q;
  999 
 1000         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 1001                 mvneta_rx_lockq(sc, q);
 1002                 mvneta_rx_queue_enable(sc->ifp, q);
 1003                 mvneta_rx_unlockq(sc, q);
 1004         }
 1005 
 1006         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 1007                 mvneta_tx_lockq(sc, q);
 1008                 mvneta_tx_queue_enable(sc->ifp, q);
 1009                 mvneta_tx_unlockq(sc, q);
 1010         }
 1011 
 1012 }
 1013 
 1014 STATIC void
 1015 mvneta_portdown(struct mvneta_softc *sc)
 1016 {
 1017         struct mvneta_rx_ring *rx;
 1018         struct mvneta_tx_ring *tx;
 1019         int q, cnt;
 1020         uint32_t reg;
 1021 
 1022         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 1023                 rx = MVNETA_RX_RING(sc, q);
 1024                 mvneta_rx_lockq(sc, q);
 1025                 rx->queue_status = MVNETA_QUEUE_DISABLED;
 1026                 mvneta_rx_unlockq(sc, q);
 1027         }
 1028 
 1029         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 1030                 tx = MVNETA_TX_RING(sc, q);
 1031                 mvneta_tx_lockq(sc, q);
 1032                 tx->queue_status = MVNETA_QUEUE_DISABLED;
 1033                 mvneta_tx_unlockq(sc, q);
 1034         }
 1035 
 1036         /* Wait for all Rx activity to terminate. */
 1037         reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
 1038         reg = MVNETA_RQC_DIS(reg);
 1039         MVNETA_WRITE(sc, MVNETA_RQC, reg);
 1040         cnt = 0;
 1041         do {
 1042                 if (cnt >= RX_DISABLE_TIMEOUT) {
 1043                         if_printf(sc->ifp,
 1044                             "timeout for RX stopped. rqc 0x%x\n", reg);
 1045                         break;
 1046                 }
 1047                 cnt++;
 1048                 reg = MVNETA_READ(sc, MVNETA_RQC);
 1049         } while ((reg & MVNETA_RQC_EN_MASK) != 0);
 1050 
 1051         /* Wait for all Tx activity to terminate. */
 1052         reg  = MVNETA_READ(sc, MVNETA_PIE);
 1053         reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
 1054         MVNETA_WRITE(sc, MVNETA_PIE, reg);
 1055 
 1056         reg  = MVNETA_READ(sc, MVNETA_PRXTXTIM);
 1057         reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
 1058         MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
 1059 
 1060         reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
 1061         reg = MVNETA_TQC_DIS(reg);
 1062         MVNETA_WRITE(sc, MVNETA_TQC, reg);
 1063         cnt = 0;
 1064         do {
 1065                 if (cnt >= TX_DISABLE_TIMEOUT) {
 1066                         if_printf(sc->ifp,
 1067                             "timeout for TX stopped. tqc 0x%x\n", reg);
 1068                         break;
 1069                 }
 1070                 cnt++;
 1071                 reg = MVNETA_READ(sc, MVNETA_TQC);
 1072         } while ((reg & MVNETA_TQC_EN_MASK) != 0);
 1073 
 1074         /* Wait for all Tx FIFO is empty */
 1075         cnt = 0;
 1076         do {
 1077                 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
 1078                         if_printf(sc->ifp,
 1079                             "timeout for TX FIFO drained. ps0 0x%x\n", reg);
 1080                         break;
 1081                 }
 1082                 cnt++;
 1083                 reg = MVNETA_READ(sc, MVNETA_PS0);
 1084         } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
 1085             ((reg & MVNETA_PS0_TXINPROG) != 0));
 1086 }
 1087 
 1088 /*
 1089  * Device Register Initialization
 1090  *  reset device registers to device driver default value.
 1091  *  the device is not enabled here.
 1092  */
 1093 STATIC int
 1094 mvneta_initreg(if_t ifp)
 1095 {
 1096         struct mvneta_softc *sc;
 1097         int q;
 1098         uint32_t reg;
 1099 
 1100         sc = if_getsoftc(ifp);
 1101 #ifdef MVNETA_KTR
 1102         CTR1(KTR_SPARE2, "%s initializing device register", if_name(ifp));
 1103 #endif
 1104 
 1105         /* Disable Legacy WRR, Disable EJP, Release from reset. */
 1106         MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
 1107         /* Enable mbus retry. */
 1108         MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
 1109 
 1110         /* Init TX/RX Queue Registers */
 1111         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 1112                 mvneta_rx_lockq(sc, q);
 1113                 if (mvneta_rx_queue_init(ifp, q) != 0) {
 1114                         device_printf(sc->dev,
 1115                             "initialization failed: cannot initialize queue\n");
 1116                         mvneta_rx_unlockq(sc, q);
 1117                         return (ENOBUFS);
 1118                 }
 1119                 mvneta_rx_unlockq(sc, q);
 1120         }
 1121         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 1122                 mvneta_tx_lockq(sc, q);
 1123                 if (mvneta_tx_queue_init(ifp, q) != 0) {
 1124                         device_printf(sc->dev,
 1125                             "initialization failed: cannot initialize queue\n");
 1126                         mvneta_tx_unlockq(sc, q);
 1127                         return (ENOBUFS);
 1128                 }
 1129                 mvneta_tx_unlockq(sc, q);
 1130         }
 1131 
 1132         /*
 1133          * Ethernet Unit Control - disable automatic PHY management by HW.
 1134          * In case the port uses SMI-controlled PHY, poll its status with
 1135          * mii_tick() and update MAC settings accordingly.
 1136          */
 1137         reg = MVNETA_READ(sc, MVNETA_EUC);
 1138         reg &= ~MVNETA_EUC_POLLING;
 1139         MVNETA_WRITE(sc, MVNETA_EUC, reg);
 1140 
 1141         /* EEE: Low Power Idle */
 1142         reg  = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
 1143         reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
 1144         MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
 1145 
 1146         reg  = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
 1147         MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
 1148 
 1149         reg = MVNETA_LPIC2_MUSTSET;
 1150         MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
 1151 
 1152         /* Port MAC Control set 0 */
 1153         reg  = MVNETA_PMACC0_MUSTSET;   /* must write 0x1 */
 1154         reg &= ~MVNETA_PMACC0_PORTEN;   /* port is still disabled */
 1155         reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
 1156         MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
 1157 
 1158         /* Port MAC Control set 2 */
 1159         reg = MVNETA_READ(sc, MVNETA_PMACC2);
 1160         switch (sc->phy_mode) {
 1161         case MVNETA_PHY_QSGMII:
 1162                 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
 1163                 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
 1164                 break;
 1165         case MVNETA_PHY_SGMII:
 1166                 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
 1167                 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
 1168                 break;
 1169         case MVNETA_PHY_RGMII:
 1170         case MVNETA_PHY_RGMII_ID:
 1171                 reg |= MVNETA_PMACC2_RGMIIEN;
 1172                 break;
 1173         }
 1174         reg |= MVNETA_PMACC2_MUSTSET;
 1175         reg &= ~MVNETA_PMACC2_PORTMACRESET;
 1176         MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
 1177 
 1178         /* Port Configuration Extended: enable Tx CRC generation */
 1179         reg = MVNETA_READ(sc, MVNETA_PXCX);
 1180         reg &= ~MVNETA_PXCX_TXCRCDIS;
 1181         MVNETA_WRITE(sc, MVNETA_PXCX, reg);
 1182 
 1183         /* clear MIB counter registers(clear by read) */
 1184         mvneta_sc_lock(sc);
 1185         mvneta_clear_mib(sc);
 1186         mvneta_sc_unlock(sc);
 1187 
 1188         /* Set SDC register except IPGINT bits */
 1189         reg  = MVNETA_SDC_RXBSZ_16_64BITWORDS;
 1190         reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
 1191         reg |= MVNETA_SDC_BLMR;
 1192         reg |= MVNETA_SDC_BLMT;
 1193         MVNETA_WRITE(sc, MVNETA_SDC, reg);
 1194 
 1195         return (0);
 1196 }
 1197 
 1198 STATIC void
 1199 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
 1200 {
 1201 
 1202         if (error != 0)
 1203                 return;
 1204         *(bus_addr_t *)arg = segs->ds_addr;
 1205 }
 1206 
 1207 STATIC int
 1208 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
 1209 {
 1210         struct mvneta_rx_ring *rx;
 1211         struct mvneta_buf *rxbuf;
 1212         bus_dmamap_t dmap;
 1213         int i, error;
 1214 
 1215         if (q >= MVNETA_RX_QNUM_MAX)
 1216                 return (EINVAL);
 1217 
 1218         rx = MVNETA_RX_RING(sc, q);
 1219         mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
 1220         /* Allocate DMA memory for Rx descriptors */
 1221         error = bus_dmamem_alloc(sc->rx_dtag,
 1222             (void**)&(rx->desc),
 1223             BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1224             &rx->desc_map);
 1225         if (error != 0 || rx->desc == NULL)
 1226                 goto fail;
 1227         error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
 1228             rx->desc,
 1229             sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
 1230             mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
 1231         if (error != 0)
 1232                 goto fail;
 1233 
 1234         for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
 1235                 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
 1236                 if (error != 0) {
 1237                         device_printf(sc->dev,
 1238                             "Failed to create DMA map for Rx buffer num: %d\n", i);
 1239                         goto fail;
 1240                 }
 1241                 rxbuf = &rx->rxbuf[i];
 1242                 rxbuf->dmap = dmap;
 1243                 rxbuf->m = NULL;
 1244         }
 1245 
 1246         return (0);
 1247 fail:
 1248         mvneta_rx_lockq(sc, q);
 1249         mvneta_ring_flush_rx_queue(sc, q);
 1250         mvneta_rx_unlockq(sc, q);
 1251         mvneta_ring_dealloc_rx_queue(sc, q);
 1252         device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
 1253         return (error);
 1254 }
 1255 
 1256 STATIC int
 1257 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
 1258 {
 1259         struct mvneta_tx_ring *tx;
 1260         int error;
 1261 
 1262         if (q >= MVNETA_TX_QNUM_MAX)
 1263                 return (EINVAL);
 1264         tx = MVNETA_TX_RING(sc, q);
 1265         mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
 1266         error = bus_dmamem_alloc(sc->tx_dtag,
 1267             (void**)&(tx->desc),
 1268             BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1269             &tx->desc_map);
 1270         if (error != 0 || tx->desc == NULL)
 1271                 goto fail;
 1272         error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
 1273             tx->desc,
 1274             sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
 1275             mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
 1276         if (error != 0)
 1277                 goto fail;
 1278 
 1279 #ifdef MVNETA_MULTIQUEUE
 1280         tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
 1281             &tx->ring_mtx);
 1282         if (tx->br == NULL) {
 1283                 device_printf(sc->dev,
 1284                     "Could not setup buffer ring for TxQ(%d)\n", q);
 1285                 error = ENOMEM;
 1286                 goto fail;
 1287         }
 1288 #endif
 1289 
 1290         return (0);
 1291 fail:
 1292         mvneta_tx_lockq(sc, q);
 1293         mvneta_ring_flush_tx_queue(sc, q);
 1294         mvneta_tx_unlockq(sc, q);
 1295         mvneta_ring_dealloc_tx_queue(sc, q);
 1296         device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
 1297         return (error);
 1298 }
 1299 
 1300 STATIC void
 1301 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
 1302 {
 1303         struct mvneta_tx_ring *tx;
 1304         struct mvneta_buf *txbuf;
 1305         void *kva;
 1306         int error;
 1307         int i;
 1308 
 1309         if (q >= MVNETA_TX_QNUM_MAX)
 1310                 return;
 1311         tx = MVNETA_TX_RING(sc, q);
 1312 
 1313         if (tx->taskq != NULL) {
 1314                 /* Remove task */
 1315                 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
 1316                         taskqueue_drain(tx->taskq, &tx->task);
 1317         }
 1318 #ifdef MVNETA_MULTIQUEUE
 1319         if (tx->br != NULL)
 1320                 drbr_free(tx->br, M_DEVBUF);
 1321 #endif
 1322 
 1323         if (sc->txmbuf_dtag != NULL) {
 1324                 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
 1325                         txbuf = &tx->txbuf[i];
 1326                         if (txbuf->dmap != NULL) {
 1327                                 error = bus_dmamap_destroy(sc->txmbuf_dtag,
 1328                                     txbuf->dmap);
 1329                                 if (error != 0) {
 1330                                         panic("%s: map busy for Tx descriptor (Q%d, %d)",
 1331                                             __func__, q, i);
 1332                                 }
 1333                         }
 1334                 }
 1335         }
 1336 
 1337         if (tx->desc_pa != 0)
 1338                 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
 1339 
 1340         kva = (void *)tx->desc;
 1341         if (kva != NULL)
 1342                 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
 1343 
 1344         if (mtx_name(&tx->ring_mtx) != NULL)
 1345                 mtx_destroy(&tx->ring_mtx);
 1346 
 1347         memset(tx, 0, sizeof(*tx));
 1348 }
 1349 
 1350 STATIC void
 1351 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
 1352 {
 1353         struct mvneta_rx_ring *rx;
 1354         struct lro_ctrl *lro;
 1355         void *kva;
 1356 
 1357         if (q >= MVNETA_RX_QNUM_MAX)
 1358                 return;
 1359 
 1360         rx = MVNETA_RX_RING(sc, q);
 1361 
 1362         if (rx->desc_pa != 0)
 1363                 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
 1364 
 1365         kva = (void *)rx->desc;
 1366         if (kva != NULL)
 1367                 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
 1368 
 1369         lro = &rx->lro;
 1370         tcp_lro_free(lro);
 1371 
 1372         if (mtx_name(&rx->ring_mtx) != NULL)
 1373                 mtx_destroy(&rx->ring_mtx);
 1374 
 1375         memset(rx, 0, sizeof(*rx));
 1376 }
 1377 
 1378 STATIC int
 1379 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
 1380 {
 1381         struct mvneta_rx_ring *rx;
 1382         struct lro_ctrl *lro;
 1383         int error;
 1384 
 1385         if (q >= MVNETA_RX_QNUM_MAX)
 1386                 return (0);
 1387 
 1388         rx = MVNETA_RX_RING(sc, q);
 1389         rx->dma = rx->cpu = 0;
 1390         rx->queue_th_received = MVNETA_RXTH_COUNT;
 1391         rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
 1392 
 1393         /* Initialize LRO */
 1394         rx->lro_enabled = FALSE;
 1395         if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) {
 1396                 lro = &rx->lro;
 1397                 error = tcp_lro_init(lro);
 1398                 if (error != 0)
 1399                         device_printf(sc->dev, "LRO Initialization failed!\n");
 1400                 else {
 1401                         rx->lro_enabled = TRUE;
 1402                         lro->ifp = sc->ifp;
 1403                 }
 1404         }
 1405 
 1406         return (0);
 1407 }
 1408 
 1409 STATIC int
 1410 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
 1411 {
 1412         struct mvneta_tx_ring *tx;
 1413         struct mvneta_buf *txbuf;
 1414         int i, error;
 1415 
 1416         if (q >= MVNETA_TX_QNUM_MAX)
 1417                 return (0);
 1418 
 1419         tx = MVNETA_TX_RING(sc, q);
 1420 
 1421         /* Tx handle */
 1422         for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
 1423                 txbuf = &tx->txbuf[i];
 1424                 txbuf->m = NULL;
 1425                 /* Tx handle needs DMA map for busdma_load_mbuf() */
 1426                 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
 1427                     &txbuf->dmap);
 1428                 if (error != 0) {
 1429                         device_printf(sc->dev,
 1430                             "can't create dma map (tx ring %d)\n", i);
 1431                         return (error);
 1432                 }
 1433         }
 1434         tx->dma = tx->cpu = 0;
 1435         tx->used = 0;
 1436         tx->drv_error = 0;
 1437         tx->queue_status = MVNETA_QUEUE_DISABLED;
 1438         tx->queue_hung = FALSE;
 1439 
 1440         tx->ifp = sc->ifp;
 1441         tx->qidx = q;
 1442         TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
 1443         tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
 1444             taskqueue_thread_enqueue, &tx->taskq);
 1445         taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
 1446             device_get_nameunit(sc->dev), q);
 1447 
 1448         return (0);
 1449 }
 1450 
 1451 STATIC void
 1452 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
 1453 {
 1454         struct mvneta_tx_ring *tx;
 1455         struct mvneta_buf *txbuf;
 1456         int i;
 1457 
 1458         tx = MVNETA_TX_RING(sc, q);
 1459         KASSERT_TX_MTX(sc, q);
 1460 
 1461         /* Tx handle */
 1462         for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
 1463                 txbuf = &tx->txbuf[i];
 1464                 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
 1465                 if (txbuf->m != NULL) {
 1466                         m_freem(txbuf->m);
 1467                         txbuf->m = NULL;
 1468                 }
 1469         }
 1470         tx->dma = tx->cpu = 0;
 1471         tx->used = 0;
 1472 }
 1473 
 1474 STATIC void
 1475 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
 1476 {
 1477         struct mvneta_rx_ring *rx;
 1478         struct mvneta_buf *rxbuf;
 1479         int i;
 1480 
 1481         rx = MVNETA_RX_RING(sc, q);
 1482         KASSERT_RX_MTX(sc, q);
 1483 
 1484         /* Rx handle */
 1485         for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
 1486                 rxbuf = &rx->rxbuf[i];
 1487                 mvneta_rx_buf_free(sc, rxbuf);
 1488         }
 1489         rx->dma = rx->cpu = 0;
 1490 }
 1491 
 1492 /*
 1493  * Rx/Tx Queue Control
 1494  */
 1495 STATIC int
 1496 mvneta_rx_queue_init(if_t ifp, int q)
 1497 {
 1498         struct mvneta_softc *sc;
 1499         struct mvneta_rx_ring *rx;
 1500         uint32_t reg;
 1501 
 1502         sc = if_getsoftc(ifp);
 1503         KASSERT_RX_MTX(sc, q);
 1504         rx =  MVNETA_RX_RING(sc, q);
 1505         DASSERT(rx->desc_pa != 0);
 1506 
 1507         /* descriptor address */
 1508         MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
 1509 
 1510         /* Rx buffer size and descriptor ring size */
 1511         reg  = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
 1512         reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
 1513         MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
 1514 #ifdef MVNETA_KTR
 1515         CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q,
 1516             MVNETA_READ(sc, MVNETA_PRXDQS(q)));
 1517 #endif
 1518         /* Rx packet offset address */
 1519         reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
 1520         MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
 1521 #ifdef MVNETA_KTR
 1522         CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q,
 1523             MVNETA_READ(sc, MVNETA_PRXC(q)));
 1524 #endif
 1525 
 1526         /* if DMA is not working, register is not updated */
 1527         DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
 1528         return (0);
 1529 }
 1530 
 1531 STATIC int
 1532 mvneta_tx_queue_init(if_t ifp, int q)
 1533 {
 1534         struct mvneta_softc *sc;
 1535         struct mvneta_tx_ring *tx;
 1536         uint32_t reg;
 1537 
 1538         sc = if_getsoftc(ifp);
 1539         KASSERT_TX_MTX(sc, q);
 1540         tx = MVNETA_TX_RING(sc, q);
 1541         DASSERT(tx->desc_pa != 0);
 1542 
 1543         /* descriptor address */
 1544         MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
 1545 
 1546         /* descriptor ring size */
 1547         reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
 1548         MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
 1549 
 1550         /* if DMA is not working, register is not updated */
 1551         DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
 1552         return (0);
 1553 }
 1554 
 1555 STATIC int
 1556 mvneta_rx_queue_enable(if_t ifp, int q)
 1557 {
 1558         struct mvneta_softc *sc;
 1559         struct mvneta_rx_ring *rx;
 1560         uint32_t reg;
 1561 
 1562         sc = if_getsoftc(ifp);
 1563         rx = MVNETA_RX_RING(sc, q);
 1564         KASSERT_RX_MTX(sc, q);
 1565 
 1566         /* Set Rx interrupt threshold */
 1567         reg  = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
 1568         MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
 1569 
 1570         reg  = MVNETA_PRXITTH_RITT(rx->queue_th_time);
 1571         MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
 1572 
 1573         /* Unmask RXTX_TH Intr. */
 1574         reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
 1575         reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
 1576         MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
 1577 
 1578         /* Enable Rx queue */
 1579         reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
 1580         reg |= MVNETA_RQC_ENQ(q);
 1581         MVNETA_WRITE(sc, MVNETA_RQC, reg);
 1582 
 1583         rx->queue_status = MVNETA_QUEUE_WORKING;
 1584         return (0);
 1585 }
 1586 
 1587 STATIC int
 1588 mvneta_tx_queue_enable(if_t ifp, int q)
 1589 {
 1590         struct mvneta_softc *sc;
 1591         struct mvneta_tx_ring *tx;
 1592 
 1593         sc = if_getsoftc(ifp);
 1594         tx = MVNETA_TX_RING(sc, q);
 1595         KASSERT_TX_MTX(sc, q);
 1596 
 1597         /* Enable Tx queue */
 1598         MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
 1599 
 1600         tx->queue_status = MVNETA_QUEUE_IDLE;
 1601         tx->queue_hung = FALSE;
 1602         return (0);
 1603 }
 1604 
 1605 STATIC __inline void
 1606 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
 1607 {
 1608 
 1609         DASSERT(q >= 0);
 1610         DASSERT(q < MVNETA_RX_QNUM_MAX);
 1611         mtx_lock(&sc->rx_ring[q].ring_mtx);
 1612 }
 1613 
 1614 STATIC __inline void
 1615 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
 1616 {
 1617 
 1618         DASSERT(q >= 0);
 1619         DASSERT(q < MVNETA_RX_QNUM_MAX);
 1620         mtx_unlock(&sc->rx_ring[q].ring_mtx);
 1621 }
 1622 
 1623 STATIC __inline int __unused
 1624 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
 1625 {
 1626 
 1627         DASSERT(q >= 0);
 1628         DASSERT(q < MVNETA_TX_QNUM_MAX);
 1629         return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
 1630 }
 1631 
 1632 STATIC __inline void
 1633 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
 1634 {
 1635 
 1636         DASSERT(q >= 0);
 1637         DASSERT(q < MVNETA_TX_QNUM_MAX);
 1638         mtx_lock(&sc->tx_ring[q].ring_mtx);
 1639 }
 1640 
 1641 STATIC __inline void
 1642 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
 1643 {
 1644 
 1645         DASSERT(q >= 0);
 1646         DASSERT(q < MVNETA_TX_QNUM_MAX);
 1647         mtx_unlock(&sc->tx_ring[q].ring_mtx);
 1648 }
 1649 
 1650 /*
 1651  * Interrupt Handlers
 1652  */
 1653 STATIC void
 1654 mvneta_disable_intr(struct mvneta_softc *sc)
 1655 {
 1656 
 1657         MVNETA_WRITE(sc, MVNETA_EUIM, 0);
 1658         MVNETA_WRITE(sc, MVNETA_EUIC, 0);
 1659         MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
 1660         MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
 1661         MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
 1662         MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
 1663         MVNETA_WRITE(sc, MVNETA_PMIM, 0);
 1664         MVNETA_WRITE(sc, MVNETA_PMIC, 0);
 1665         MVNETA_WRITE(sc, MVNETA_PIE, 0);
 1666 }
 1667 
 1668 STATIC void
 1669 mvneta_enable_intr(struct mvneta_softc *sc)
 1670 {
 1671         uint32_t reg;
 1672 
 1673         /* Enable Summary Bit to check all interrupt cause. */
 1674         reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
 1675         reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
 1676         MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
 1677 
 1678         if (!sc->phy_attached || sc->use_inband_status) {
 1679                 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
 1680                 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
 1681                     MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
 1682         }
 1683 
 1684         /* Enable All Queue Interrupt */
 1685         reg  = MVNETA_READ(sc, MVNETA_PIE);
 1686         reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
 1687         reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
 1688         MVNETA_WRITE(sc, MVNETA_PIE, reg);
 1689 }
 1690 
 1691 STATIC void
 1692 mvneta_rxtxth_intr(void *arg)
 1693 {
 1694         struct mvneta_softc *sc;
 1695         if_t ifp;
 1696         uint32_t ic, queues;
 1697 
 1698         sc = arg;
 1699         ifp = sc->ifp;
 1700 #ifdef MVNETA_KTR
 1701         CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", if_name(ifp));
 1702 #endif
 1703         ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
 1704         if (ic == 0)
 1705                 return;
 1706         MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
 1707 
 1708         /* Ack maintenance interrupt first */
 1709         if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
 1710             (!sc->phy_attached || sc->use_inband_status))) {
 1711                 mvneta_sc_lock(sc);
 1712                 mvneta_misc_intr(sc);
 1713                 mvneta_sc_unlock(sc);
 1714         }
 1715         if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
 1716                 return;
 1717         /* RxTxTH interrupt */
 1718         queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
 1719         if (__predict_true(queues)) {
 1720 #ifdef MVNETA_KTR
 1721                 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", if_name(ifp));
 1722 #endif
 1723                 /* At the moment the driver support only one RX queue. */
 1724                 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
 1725                 mvneta_rx(sc, 0, 0);
 1726         }
 1727 }
 1728 
 1729 STATIC int
 1730 mvneta_misc_intr(struct mvneta_softc *sc)
 1731 {
 1732         uint32_t ic;
 1733         int claimed = 0;
 1734 
 1735 #ifdef MVNETA_KTR
 1736         CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp));
 1737 #endif
 1738         KASSERT_SC_MTX(sc);
 1739 
 1740         for (;;) {
 1741                 ic = MVNETA_READ(sc, MVNETA_PMIC);
 1742                 ic &= MVNETA_READ(sc, MVNETA_PMIM);
 1743                 if (ic == 0)
 1744                         break;
 1745                 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
 1746                 claimed = 1;
 1747 
 1748                 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
 1749                     MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
 1750                         mvneta_link_isr(sc);
 1751         }
 1752         return (claimed);
 1753 }
 1754 
 1755 STATIC void
 1756 mvneta_tick(void *arg)
 1757 {
 1758         struct mvneta_softc *sc;
 1759         struct mvneta_tx_ring *tx;
 1760         struct mvneta_rx_ring *rx;
 1761         int q;
 1762         uint32_t fc_prev, fc_curr;
 1763 
 1764         sc = arg;
 1765 
 1766         /*
 1767          * This is done before mib update to get the right stats
 1768          * for this tick.
 1769          */
 1770         mvneta_tx_drain(sc);
 1771 
 1772         /* Extract previous flow-control frame received counter. */
 1773         fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
 1774         /* Read mib registers (clear by read). */
 1775         mvneta_update_mib(sc);
 1776         /* Extract current flow-control frame received counter. */
 1777         fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
 1778 
 1779 
 1780         if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) {
 1781                 mvneta_sc_lock(sc);
 1782                 mii_tick(sc->mii);
 1783 
 1784                 /* Adjust MAC settings */
 1785                 mvneta_adjust_link(sc);
 1786                 mvneta_sc_unlock(sc);
 1787         }
 1788 
 1789         /*
 1790          * We were unable to refill the rx queue and left the rx func, leaving
 1791          * the ring without mbuf and no way to call the refill func.
 1792          */
 1793         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 1794                 rx = MVNETA_RX_RING(sc, q);
 1795                 if (rx->needs_refill == TRUE) {
 1796                         mvneta_rx_lockq(sc, q);
 1797                         mvneta_rx_queue_refill(sc, q);
 1798                         mvneta_rx_unlockq(sc, q);
 1799                 }
 1800         }
 1801 
 1802         /*
 1803          * Watchdog:
 1804          * - check if queue is mark as hung.
 1805          * - ignore hung status if we received some pause frame
 1806          *   as hardware may have paused packet transmit.
 1807          */
 1808         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 1809                 /*
 1810                  * We should take queue lock, but as we only read
 1811                  * queue status we can do it without lock, we may
 1812                  * only missdetect queue status for one tick.
 1813                  */
 1814                 tx = MVNETA_TX_RING(sc, q);
 1815 
 1816                 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
 1817                         goto timeout;
 1818         }
 1819 
 1820         callout_schedule(&sc->tick_ch, hz);
 1821         return;
 1822 
 1823 timeout:
 1824         if_printf(sc->ifp, "watchdog timeout\n");
 1825 
 1826         mvneta_sc_lock(sc);
 1827         sc->counter_watchdog++;
 1828         sc->counter_watchdog_mib++;
 1829         /* Trigger reinitialize sequence. */
 1830         mvneta_stop_locked(sc);
 1831         mvneta_init_locked(sc);
 1832         mvneta_sc_unlock(sc);
 1833 }
 1834 
 1835 STATIC void
 1836 mvneta_qflush(if_t ifp)
 1837 {
 1838 #ifdef MVNETA_MULTIQUEUE
 1839         struct mvneta_softc *sc;
 1840         struct mvneta_tx_ring *tx;
 1841         struct mbuf *m;
 1842         size_t q;
 1843 
 1844         sc = if_getsoftc(ifp);
 1845 
 1846         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 1847                 tx = MVNETA_TX_RING(sc, q);
 1848                 mvneta_tx_lockq(sc, q);
 1849                 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
 1850                         m_freem(m);
 1851                 mvneta_tx_unlockq(sc, q);
 1852         }
 1853 #endif
 1854         if_qflush(ifp);
 1855 }
 1856 
 1857 STATIC void
 1858 mvneta_tx_task(void *arg, int pending)
 1859 {
 1860         struct mvneta_softc *sc;
 1861         struct mvneta_tx_ring *tx;
 1862         if_t ifp;
 1863         int error;
 1864 
 1865         tx = arg;
 1866         ifp = tx->ifp;
 1867         sc = if_getsoftc(ifp);
 1868 
 1869         mvneta_tx_lockq(sc, tx->qidx);
 1870         error = mvneta_xmit_locked(sc, tx->qidx);
 1871         mvneta_tx_unlockq(sc, tx->qidx);
 1872 
 1873         /* Try again */
 1874         if (__predict_false(error != 0 && error != ENETDOWN)) {
 1875                 pause("mvneta_tx_task_sleep", 1);
 1876                 taskqueue_enqueue(tx->taskq, &tx->task);
 1877         }
 1878 }
 1879 
 1880 STATIC int
 1881 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
 1882 {
 1883         struct mvneta_tx_ring *tx;
 1884         if_t ifp;
 1885         int error;
 1886 
 1887         KASSERT_TX_MTX(sc, q);
 1888         tx = MVNETA_TX_RING(sc, q);
 1889         error = 0;
 1890 
 1891         ifp = sc->ifp;
 1892 
 1893         /* Dont enqueue packet if the queue is disabled. */
 1894         if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
 1895                 m_freem(*m);
 1896                 *m = NULL;
 1897                 return (ENETDOWN);
 1898         }
 1899 
 1900         /* Reclaim mbuf if above threshold. */
 1901         if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
 1902                 mvneta_tx_queue_complete(sc, q);
 1903 
 1904         /* Do not call transmit path if queue is already too full. */
 1905         if (__predict_false(tx->used >
 1906             MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
 1907                 return (ENOBUFS);
 1908 
 1909         error = mvneta_tx_queue(sc, m, q);
 1910         if (__predict_false(error != 0))
 1911                 return (error);
 1912 
 1913         /* Send a copy of the frame to the BPF listener */
 1914         ETHER_BPF_MTAP(ifp, *m);
 1915 
 1916         /* Set watchdog on */
 1917         tx->watchdog_time = ticks;
 1918         tx->queue_status = MVNETA_QUEUE_WORKING;
 1919 
 1920         return (error);
 1921 }
 1922 
 1923 #ifdef MVNETA_MULTIQUEUE
 1924 STATIC int
 1925 mvneta_transmit(if_t ifp, struct mbuf *m)
 1926 {
 1927         struct mvneta_softc *sc;
 1928         struct mvneta_tx_ring *tx;
 1929         int error;
 1930         int q;
 1931 
 1932         sc = if_getsoftc(ifp);
 1933 
 1934         /* Use default queue if there is no flow id as thread can migrate. */
 1935         if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
 1936                 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
 1937         else
 1938                 q = 0;
 1939 
 1940         tx = MVNETA_TX_RING(sc, q);
 1941 
 1942         /* If buf_ring is full start transmit immediately. */
 1943         if (buf_ring_full(tx->br)) {
 1944                 mvneta_tx_lockq(sc, q);
 1945                 mvneta_xmit_locked(sc, q);
 1946                 mvneta_tx_unlockq(sc, q);
 1947         }
 1948 
 1949         /*
 1950          * If the buf_ring is empty we will not reorder packets.
 1951          * If the lock is available transmit without using buf_ring.
 1952          */
 1953         if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
 1954                 error = mvneta_xmitfast_locked(sc, q, &m);
 1955                 mvneta_tx_unlockq(sc, q);
 1956                 if (__predict_true(error == 0))
 1957                         return (0);
 1958 
 1959                 /* Transmit can fail in fastpath. */
 1960                 if (__predict_false(m == NULL))
 1961                         return (error);
 1962         }
 1963 
 1964         /* Enqueue then schedule taskqueue. */
 1965         error = drbr_enqueue(ifp, tx->br, m);
 1966         if (__predict_false(error != 0))
 1967                 return (error);
 1968 
 1969         taskqueue_enqueue(tx->taskq, &tx->task);
 1970         return (0);
 1971 }
 1972 
 1973 STATIC int
 1974 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
 1975 {
 1976         if_t ifp;
 1977         struct mvneta_tx_ring *tx;
 1978         struct mbuf *m;
 1979         int error;
 1980 
 1981         KASSERT_TX_MTX(sc, q);
 1982         ifp = sc->ifp;
 1983         tx = MVNETA_TX_RING(sc, q);
 1984         error = 0;
 1985 
 1986         while ((m = drbr_peek(ifp, tx->br)) != NULL) {
 1987                 error = mvneta_xmitfast_locked(sc, q, &m);
 1988                 if (__predict_false(error != 0)) {
 1989                         if (m != NULL)
 1990                                 drbr_putback(ifp, tx->br, m);
 1991                         else
 1992                                 drbr_advance(ifp, tx->br);
 1993                         break;
 1994                 }
 1995                 drbr_advance(ifp, tx->br);
 1996         }
 1997 
 1998         return (error);
 1999 }
 2000 #else /* !MVNETA_MULTIQUEUE */
 2001 STATIC void
 2002 mvneta_start(if_t ifp)
 2003 {
 2004         struct mvneta_softc *sc;
 2005         struct mvneta_tx_ring *tx;
 2006         int error;
 2007 
 2008         sc = if_getsoftc(ifp);
 2009         tx = MVNETA_TX_RING(sc, 0);
 2010 
 2011         mvneta_tx_lockq(sc, 0);
 2012         error = mvneta_xmit_locked(sc, 0);
 2013         mvneta_tx_unlockq(sc, 0);
 2014         /* Handle retransmit in the background taskq. */
 2015         if (__predict_false(error != 0 && error != ENETDOWN))
 2016                 taskqueue_enqueue(tx->taskq, &tx->task);
 2017 }
 2018 
 2019 STATIC int
 2020 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
 2021 {
 2022         if_t ifp;
 2023         struct mbuf *m;
 2024         int error;
 2025 
 2026         KASSERT_TX_MTX(sc, q);
 2027         ifp = sc->ifp;
 2028         error = 0;
 2029 
 2030         while (!if_sendq_empty(ifp)) {
 2031                 m = if_dequeue(ifp);
 2032                 if (m == NULL)
 2033                         break;
 2034 
 2035                 error = mvneta_xmitfast_locked(sc, q, &m);
 2036                 if (__predict_false(error != 0)) {
 2037                         if (m != NULL)
 2038                                 if_sendq_prepend(ifp, m);
 2039                         break;
 2040                 }
 2041         }
 2042 
 2043         return (error);
 2044 }
 2045 #endif
 2046 
 2047 STATIC int
 2048 mvneta_ioctl(if_t ifp, u_long cmd, caddr_t data)
 2049 {
 2050         struct mvneta_softc *sc;
 2051         struct mvneta_rx_ring *rx;
 2052         struct ifreq *ifr;
 2053         int error, mask;
 2054         uint32_t flags;
 2055         bool reinit;
 2056         int q;
 2057 
 2058         error = 0;
 2059         reinit = false;
 2060         sc = if_getsoftc(ifp);
 2061         ifr = (struct ifreq *)data;
 2062         switch (cmd) {
 2063         case SIOCSIFFLAGS:
 2064                 mvneta_sc_lock(sc);
 2065                 if (if_getflags(ifp) & IFF_UP) {
 2066                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 2067                                 flags = if_getflags(ifp) ^ sc->mvneta_if_flags;
 2068 
 2069                                 if (flags != 0)
 2070                                         sc->mvneta_if_flags = if_getflags(ifp);
 2071 
 2072                                 if ((flags & IFF_PROMISC) != 0)
 2073                                         mvneta_filter_setup(sc);
 2074                         } else {
 2075                                 mvneta_init_locked(sc);
 2076                                 sc->mvneta_if_flags = if_getflags(ifp);
 2077                                 if (sc->phy_attached)
 2078                                         mii_mediachg(sc->mii);
 2079                                 mvneta_sc_unlock(sc);
 2080                                 break;
 2081                         }
 2082                 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 2083                         mvneta_stop_locked(sc);
 2084 
 2085                 sc->mvneta_if_flags = if_getflags(ifp);
 2086                 mvneta_sc_unlock(sc);
 2087                 break;
 2088         case SIOCSIFCAP:
 2089                 if (if_getmtu(ifp) > sc->tx_csum_limit &&
 2090                     ifr->ifr_reqcap & IFCAP_TXCSUM)
 2091                         ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
 2092                 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
 2093                 if (mask & IFCAP_HWCSUM) {
 2094                         if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap,
 2095                             IFCAP_HWCSUM);
 2096                         if (if_getcapenable(ifp) & IFCAP_TXCSUM)
 2097                                 if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
 2098                                     CSUM_UDP);
 2099                         else
 2100                                 if_sethwassist(ifp, 0);
 2101                 }
 2102                 if (mask & IFCAP_LRO) {
 2103                         mvneta_sc_lock(sc);
 2104                         if_togglecapenable(ifp, IFCAP_LRO);
 2105                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 2106                                 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 2107                                         rx = MVNETA_RX_RING(sc, q);
 2108                                         rx->lro_enabled = !rx->lro_enabled;
 2109                                 }
 2110                         }
 2111                         mvneta_sc_unlock(sc);
 2112                 }
 2113                 VLAN_CAPABILITIES(ifp);
 2114                 break;
 2115         case SIOCSIFMEDIA:
 2116                 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
 2117                     IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
 2118                     (ifr->ifr_media & IFM_FDX) == 0) {
 2119                         device_printf(sc->dev,
 2120                             "%s half-duplex unsupported\n",
 2121                             IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
 2122                             "1000Base-T" :
 2123                             "2500Base-T");
 2124                         error = EINVAL;
 2125                         break;
 2126                 }
 2127         case SIOCGIFMEDIA: /* FALLTHROUGH */
 2128         case SIOCGIFXMEDIA:
 2129                 if (!sc->phy_attached)
 2130                         error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
 2131                             cmd);
 2132                 else
 2133                         error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
 2134                             cmd);
 2135                 break;
 2136         case SIOCSIFMTU:
 2137                 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
 2138                     MVNETA_ETHER_SIZE) {
 2139                         error = EINVAL;
 2140                 } else {
 2141                         if_setmtu(ifp, ifr->ifr_mtu);
 2142                         mvneta_sc_lock(sc);
 2143                         if (if_getmtu(ifp) + MVNETA_ETHER_SIZE <= MCLBYTES) {
 2144                                 sc->rx_frame_size = MCLBYTES;
 2145                         } else {
 2146                                 sc->rx_frame_size = MJUM9BYTES;
 2147                         }
 2148                         if (if_getmtu(ifp) > sc->tx_csum_limit) {
 2149                                 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
 2150                                 if_sethwassist(ifp, 0);
 2151                         } else {
 2152                                 if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
 2153                                 if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
 2154                                         CSUM_UDP);
 2155                         }
 2156                         /*
 2157                          * Reinitialize RX queues.
 2158                          * We need to update RX descriptor size.
 2159                          */
 2160                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 2161                                 reinit = true;
 2162                                 mvneta_stop_locked(sc);
 2163                         }
 2164 
 2165                         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 2166                                 mvneta_rx_lockq(sc, q);
 2167                                 if (mvneta_rx_queue_init(ifp, q) != 0) {
 2168                                         device_printf(sc->dev,
 2169                                             "initialization failed:"
 2170                                             " cannot initialize queue\n");
 2171                                         mvneta_rx_unlockq(sc, q);
 2172                                         error = ENOBUFS;
 2173                                         break;
 2174                                 }
 2175                                 mvneta_rx_unlockq(sc, q);
 2176                         }
 2177                         if (reinit)
 2178                                 mvneta_init_locked(sc);
 2179 
 2180                         mvneta_sc_unlock(sc);
 2181                 }
 2182                 break;
 2183 
 2184         default:
 2185                 error = ether_ioctl(ifp, cmd, data);
 2186                 break;
 2187         }
 2188 
 2189         return (error);
 2190 }
 2191 
 2192 STATIC void
 2193 mvneta_init_locked(void *arg)
 2194 {
 2195         struct mvneta_softc *sc;
 2196         if_t ifp;
 2197         uint32_t reg;
 2198         int q, cpu;
 2199 
 2200         sc = arg;
 2201         ifp = sc->ifp;
 2202 
 2203         if (!device_is_attached(sc->dev) ||
 2204             (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
 2205                 return;
 2206 
 2207         mvneta_disable_intr(sc);
 2208         callout_stop(&sc->tick_ch);
 2209 
 2210         /* Get the latest mac address */
 2211         bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN);
 2212         mvneta_set_mac_address(sc, sc->enaddr);
 2213         mvneta_filter_setup(sc);
 2214 
 2215         /* Start DMA Engine */
 2216         MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
 2217         MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
 2218         MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
 2219 
 2220         /* Enable port */
 2221         reg  = MVNETA_READ(sc, MVNETA_PMACC0);
 2222         reg |= MVNETA_PMACC0_PORTEN;
 2223         reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
 2224         reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
 2225         MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
 2226 
 2227         /* Allow access to each TXQ/RXQ from both CPU's */
 2228         for (cpu = 0; cpu < mp_ncpus; ++cpu)
 2229                 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
 2230                     MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
 2231 
 2232         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 2233                 mvneta_rx_lockq(sc, q);
 2234                 mvneta_rx_queue_refill(sc, q);
 2235                 mvneta_rx_unlockq(sc, q);
 2236         }
 2237 
 2238         if (!sc->phy_attached)
 2239                 mvneta_linkup(sc);
 2240 
 2241         /* Enable interrupt */
 2242         mvneta_enable_intr(sc);
 2243 
 2244         /* Set Counter */
 2245         callout_schedule(&sc->tick_ch, hz);
 2246 
 2247         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
 2248 }
 2249 
 2250 STATIC void
 2251 mvneta_init(void *arg)
 2252 {
 2253         struct mvneta_softc *sc;
 2254 
 2255         sc = arg;
 2256         mvneta_sc_lock(sc);
 2257         mvneta_init_locked(sc);
 2258         if (sc->phy_attached)
 2259                 mii_mediachg(sc->mii);
 2260         mvneta_sc_unlock(sc);
 2261 }
 2262 
 2263 /* ARGSUSED */
 2264 STATIC void
 2265 mvneta_stop_locked(struct mvneta_softc *sc)
 2266 {
 2267         if_t ifp;
 2268         uint32_t reg;
 2269         int q;
 2270 
 2271         ifp = sc->ifp;
 2272         if (ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
 2273                 return;
 2274 
 2275         mvneta_disable_intr(sc);
 2276 
 2277         callout_stop(&sc->tick_ch);
 2278 
 2279         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
 2280 
 2281         /* Link down */
 2282         if (sc->linkup == TRUE)
 2283                 mvneta_linkdown(sc);
 2284 
 2285         /* Reset the MAC Port Enable bit */
 2286         reg = MVNETA_READ(sc, MVNETA_PMACC0);
 2287         reg &= ~MVNETA_PMACC0_PORTEN;
 2288         MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
 2289 
 2290         /* Disable each of queue */
 2291         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 2292                 mvneta_rx_lockq(sc, q);
 2293                 mvneta_ring_flush_rx_queue(sc, q);
 2294                 mvneta_rx_unlockq(sc, q);
 2295         }
 2296 
 2297         /*
 2298          * Hold Reset state of DMA Engine
 2299          * (must write 0x0 to restart it)
 2300          */
 2301         MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
 2302         MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
 2303 
 2304         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 2305                 mvneta_tx_lockq(sc, q);
 2306                 mvneta_ring_flush_tx_queue(sc, q);
 2307                 mvneta_tx_unlockq(sc, q);
 2308         }
 2309 }
 2310 
 2311 STATIC void
 2312 mvneta_stop(struct mvneta_softc *sc)
 2313 {
 2314 
 2315         mvneta_sc_lock(sc);
 2316         mvneta_stop_locked(sc);
 2317         mvneta_sc_unlock(sc);
 2318 }
 2319 
 2320 STATIC int
 2321 mvneta_mediachange(if_t ifp)
 2322 {
 2323         struct mvneta_softc *sc;
 2324 
 2325         sc = if_getsoftc(ifp);
 2326 
 2327         if (!sc->phy_attached && !sc->use_inband_status) {
 2328                 /* We shouldn't be here */
 2329                 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
 2330                 return (0);
 2331         }
 2332 
 2333         if (sc->use_inband_status) {
 2334                 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
 2335                 return (0);
 2336         }
 2337 
 2338         mvneta_sc_lock(sc);
 2339 
 2340         /* Update PHY */
 2341         mii_mediachg(sc->mii);
 2342 
 2343         mvneta_sc_unlock(sc);
 2344 
 2345         return (0);
 2346 }
 2347 
 2348 STATIC void
 2349 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
 2350 {
 2351         uint32_t psr;
 2352 
 2353         psr = MVNETA_READ(sc, MVNETA_PSR);
 2354 
 2355         /* Speed */
 2356         if (psr & MVNETA_PSR_GMIISPEED)
 2357                 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
 2358         else if (psr & MVNETA_PSR_MIISPEED)
 2359                 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
 2360         else if (psr & MVNETA_PSR_LINKUP)
 2361                 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
 2362 
 2363         /* Duplex */
 2364         if (psr & MVNETA_PSR_FULLDX)
 2365                 ifmr->ifm_active |= IFM_FDX;
 2366 
 2367         /* Link */
 2368         ifmr->ifm_status = IFM_AVALID;
 2369         if (psr & MVNETA_PSR_LINKUP)
 2370                 ifmr->ifm_status |= IFM_ACTIVE;
 2371 }
 2372 
 2373 STATIC void
 2374 mvneta_mediastatus(if_t ifp, struct ifmediareq *ifmr)
 2375 {
 2376         struct mvneta_softc *sc;
 2377         struct mii_data *mii;
 2378 
 2379         sc = if_getsoftc(ifp);
 2380 
 2381         if (!sc->phy_attached && !sc->use_inband_status) {
 2382                 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
 2383                 return;
 2384         }
 2385 
 2386         mvneta_sc_lock(sc);
 2387 
 2388         if (sc->use_inband_status) {
 2389                 mvneta_get_media(sc, ifmr);
 2390                 mvneta_sc_unlock(sc);
 2391                 return;
 2392         }
 2393 
 2394         mii = sc->mii;
 2395         mii_pollstat(mii);
 2396 
 2397         ifmr->ifm_active = mii->mii_media_active;
 2398         ifmr->ifm_status = mii->mii_media_status;
 2399 
 2400         mvneta_sc_unlock(sc);
 2401 }
 2402 
 2403 /*
 2404  * Link State Notify
 2405  */
 2406 STATIC void
 2407 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
 2408 {
 2409         int reg;
 2410 
 2411         if (enable) {
 2412                 reg = MVNETA_READ(sc, MVNETA_PANC);
 2413                 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
 2414                     MVNETA_PANC_ANFCEN);
 2415                 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
 2416                     MVNETA_PANC_INBANDANEN;
 2417                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2418 
 2419                 reg = MVNETA_READ(sc, MVNETA_PMACC2);
 2420                 reg |= MVNETA_PMACC2_INBANDANMODE;
 2421                 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
 2422 
 2423                 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
 2424                 reg |= MVNETA_PSOMSCD_ENABLE;
 2425                 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
 2426         } else {
 2427                 reg = MVNETA_READ(sc, MVNETA_PANC);
 2428                 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
 2429                     MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
 2430                     MVNETA_PANC_INBANDANEN);
 2431                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2432 
 2433                 reg = MVNETA_READ(sc, MVNETA_PMACC2);
 2434                 reg &= ~MVNETA_PMACC2_INBANDANMODE;
 2435                 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
 2436 
 2437                 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
 2438                 reg &= ~MVNETA_PSOMSCD_ENABLE;
 2439                 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
 2440         }
 2441 }
 2442 
 2443 STATIC int
 2444 mvneta_update_media(struct mvneta_softc *sc, int media)
 2445 {
 2446         int reg, err;
 2447         boolean_t running;
 2448 
 2449         err = 0;
 2450 
 2451         mvneta_sc_lock(sc);
 2452 
 2453         mvneta_linkreset(sc);
 2454 
 2455         running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0;
 2456         if (running)
 2457                 mvneta_stop_locked(sc);
 2458 
 2459         sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
 2460 
 2461         if (!sc->phy_attached || sc->use_inband_status)
 2462                 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
 2463 
 2464         mvneta_update_eee(sc);
 2465         mvneta_update_fc(sc);
 2466 
 2467         if (IFM_SUBTYPE(media) != IFM_AUTO) {
 2468                 reg = MVNETA_READ(sc, MVNETA_PANC);
 2469                 reg &= ~(MVNETA_PANC_SETGMIISPEED |
 2470                     MVNETA_PANC_SETMIISPEED |
 2471                     MVNETA_PANC_SETFULLDX);
 2472                 if (IFM_SUBTYPE(media) == IFM_1000_T ||
 2473                     IFM_SUBTYPE(media) == IFM_2500_T) {
 2474                         if ((media & IFM_FDX) == 0) {
 2475                                 device_printf(sc->dev,
 2476                                     "%s half-duplex unsupported\n",
 2477                                     IFM_SUBTYPE(media) == IFM_1000_T ?
 2478                                     "1000Base-T" :
 2479                                     "2500Base-T");
 2480                                 err = EINVAL;
 2481                                 goto out;
 2482                         }
 2483                         reg |= MVNETA_PANC_SETGMIISPEED;
 2484                 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
 2485                         reg |= MVNETA_PANC_SETMIISPEED;
 2486 
 2487                 if (media & IFM_FDX)
 2488                         reg |= MVNETA_PANC_SETFULLDX;
 2489 
 2490                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2491         }
 2492 out:
 2493         if (running)
 2494                 mvneta_init_locked(sc);
 2495         mvneta_sc_unlock(sc);
 2496         return (err);
 2497 }
 2498 
 2499 STATIC void
 2500 mvneta_adjust_link(struct mvneta_softc *sc)
 2501 {
 2502         boolean_t phy_linkup;
 2503         int reg;
 2504 
 2505         /* Update eee/fc */
 2506         mvneta_update_eee(sc);
 2507         mvneta_update_fc(sc);
 2508 
 2509         /* Check for link change */
 2510         phy_linkup = (sc->mii->mii_media_status &
 2511             (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
 2512 
 2513         if (sc->linkup != phy_linkup)
 2514                 mvneta_linkupdate(sc, phy_linkup);
 2515 
 2516         /* Don't update media on disabled link */
 2517         if (!phy_linkup)
 2518                 return;
 2519 
 2520         /* Check for media type change */
 2521         if (sc->mvneta_media != sc->mii->mii_media_active) {
 2522                 sc->mvneta_media = sc->mii->mii_media_active;
 2523 
 2524                 reg = MVNETA_READ(sc, MVNETA_PANC);
 2525                 reg &= ~(MVNETA_PANC_SETGMIISPEED |
 2526                     MVNETA_PANC_SETMIISPEED |
 2527                     MVNETA_PANC_SETFULLDX);
 2528                 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
 2529                     IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
 2530                         reg |= MVNETA_PANC_SETGMIISPEED;
 2531                 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
 2532                         reg |= MVNETA_PANC_SETMIISPEED;
 2533 
 2534                 if (sc->mvneta_media & IFM_FDX)
 2535                         reg |= MVNETA_PANC_SETFULLDX;
 2536 
 2537                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2538         }
 2539 }
 2540 
 2541 STATIC void
 2542 mvneta_link_isr(struct mvneta_softc *sc)
 2543 {
 2544         int linkup;
 2545 
 2546         KASSERT_SC_MTX(sc);
 2547 
 2548         linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
 2549         if (sc->linkup == linkup)
 2550                 return;
 2551 
 2552         if (linkup == TRUE)
 2553                 mvneta_linkup(sc);
 2554         else
 2555                 mvneta_linkdown(sc);
 2556 
 2557 #ifdef DEBUG
 2558         device_printf(sc->dev,
 2559             "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
 2560 #endif
 2561 }
 2562 
 2563 STATIC void
 2564 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
 2565 {
 2566 
 2567         KASSERT_SC_MTX(sc);
 2568 
 2569         if (linkup == TRUE)
 2570                 mvneta_linkup(sc);
 2571         else
 2572                 mvneta_linkdown(sc);
 2573 
 2574 #ifdef DEBUG
 2575         device_printf(sc->dev,
 2576             "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
 2577 #endif
 2578 }
 2579 
 2580 STATIC void
 2581 mvneta_update_eee(struct mvneta_softc *sc)
 2582 {
 2583         uint32_t reg;
 2584 
 2585         KASSERT_SC_MTX(sc);
 2586 
 2587         /* set EEE parameters */
 2588         reg = MVNETA_READ(sc, MVNETA_LPIC1);
 2589         if (sc->cf_lpi)
 2590                 reg |= MVNETA_LPIC1_LPIRE;
 2591         else
 2592                 reg &= ~MVNETA_LPIC1_LPIRE;
 2593         MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
 2594 }
 2595 
 2596 STATIC void
 2597 mvneta_update_fc(struct mvneta_softc *sc)
 2598 {
 2599         uint32_t reg;
 2600 
 2601         KASSERT_SC_MTX(sc);
 2602 
 2603         reg  = MVNETA_READ(sc, MVNETA_PANC);
 2604         if (sc->cf_fc) {
 2605                 /* Flow control negotiation */
 2606                 reg |= MVNETA_PANC_PAUSEADV;
 2607                 reg |= MVNETA_PANC_ANFCEN;
 2608         } else {
 2609                 /* Disable flow control negotiation */
 2610                 reg &= ~MVNETA_PANC_PAUSEADV;
 2611                 reg &= ~MVNETA_PANC_ANFCEN;
 2612         }
 2613 
 2614         MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2615 }
 2616 
 2617 STATIC void
 2618 mvneta_linkup(struct mvneta_softc *sc)
 2619 {
 2620         uint32_t reg;
 2621 
 2622         KASSERT_SC_MTX(sc);
 2623 
 2624         if (!sc->phy_attached || !sc->use_inband_status) {
 2625                 reg  = MVNETA_READ(sc, MVNETA_PANC);
 2626                 reg |= MVNETA_PANC_FORCELINKPASS;
 2627                 reg &= ~MVNETA_PANC_FORCELINKFAIL;
 2628                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2629         }
 2630 
 2631         mvneta_qflush(sc->ifp);
 2632         mvneta_portup(sc);
 2633         sc->linkup = TRUE;
 2634         if_link_state_change(sc->ifp, LINK_STATE_UP);
 2635 }
 2636 
 2637 STATIC void
 2638 mvneta_linkdown(struct mvneta_softc *sc)
 2639 {
 2640         uint32_t reg;
 2641 
 2642         KASSERT_SC_MTX(sc);
 2643 
 2644         if (!sc->phy_attached || !sc->use_inband_status) {
 2645                 reg  = MVNETA_READ(sc, MVNETA_PANC);
 2646                 reg &= ~MVNETA_PANC_FORCELINKPASS;
 2647                 reg |= MVNETA_PANC_FORCELINKFAIL;
 2648                 MVNETA_WRITE(sc, MVNETA_PANC, reg);
 2649         }
 2650 
 2651         mvneta_portdown(sc);
 2652         mvneta_qflush(sc->ifp);
 2653         sc->linkup = FALSE;
 2654         if_link_state_change(sc->ifp, LINK_STATE_DOWN);
 2655 }
 2656 
 2657 STATIC void
 2658 mvneta_linkreset(struct mvneta_softc *sc)
 2659 {
 2660         struct mii_softc *mii;
 2661 
 2662         if (sc->phy_attached) {
 2663                 /* Force reset PHY */
 2664                 mii = LIST_FIRST(&sc->mii->mii_phys);
 2665                 if (mii)
 2666                         mii_phy_reset(mii);
 2667         }
 2668 }
 2669 
 2670 /*
 2671  * Tx Subroutines
 2672  */
 2673 STATIC int
 2674 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
 2675 {
 2676         if_t ifp;
 2677         bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
 2678         struct mbuf *mtmp, *mbuf;
 2679         struct mvneta_tx_ring *tx;
 2680         struct mvneta_buf *txbuf;
 2681         struct mvneta_tx_desc *t;
 2682         uint32_t ptxsu;
 2683         int used, error, i, txnsegs;
 2684 
 2685         mbuf = *mbufp;
 2686         tx = MVNETA_TX_RING(sc, q);
 2687         DASSERT(tx->used >= 0);
 2688         DASSERT(tx->used <= MVNETA_TX_RING_CNT);
 2689         t = NULL;
 2690         ifp = sc->ifp;
 2691 
 2692         if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
 2693                 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
 2694                 if (mbuf == NULL) {
 2695                         tx->drv_error++;
 2696                         *mbufp = NULL;
 2697                         return (ENOBUFS);
 2698                 }
 2699                 mbuf->m_flags &= ~M_VLANTAG;
 2700                 *mbufp = mbuf;
 2701         }
 2702 
 2703         if (__predict_false(mbuf->m_next != NULL &&
 2704             (mbuf->m_pkthdr.csum_flags &
 2705             (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
 2706                 if (M_WRITABLE(mbuf) == 0) {
 2707                         mtmp = m_dup(mbuf, M_NOWAIT);
 2708                         m_freem(mbuf);
 2709                         if (mtmp == NULL) {
 2710                                 tx->drv_error++;
 2711                                 *mbufp = NULL;
 2712                                 return (ENOBUFS);
 2713                         }
 2714                         *mbufp = mbuf = mtmp;
 2715                 }
 2716         }
 2717 
 2718         /* load mbuf using dmamap of 1st descriptor */
 2719         txbuf = &tx->txbuf[tx->cpu];
 2720         error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
 2721             txbuf->dmap, mbuf, txsegs, &txnsegs,
 2722             BUS_DMA_NOWAIT);
 2723         if (__predict_false(error != 0)) {
 2724 #ifdef MVNETA_KTR
 2725                 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error);
 2726 #endif
 2727                 /* This is the only recoverable error (except EFBIG). */
 2728                 if (error != ENOMEM) {
 2729                         tx->drv_error++;
 2730                         m_freem(mbuf);
 2731                         *mbufp = NULL;
 2732                         return (ENOBUFS);
 2733                 }
 2734                 return (error);
 2735         }
 2736 
 2737         if (__predict_false(txnsegs <= 0
 2738             || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
 2739                 /* we have no enough descriptors or mbuf is broken */
 2740 #ifdef MVNETA_KTR
 2741                 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
 2742                     if_name(ifp), q, txnsegs);
 2743 #endif
 2744                 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
 2745                 return (ENOBUFS);
 2746         }
 2747         DASSERT(txbuf->m == NULL);
 2748 
 2749         /* remember mbuf using 1st descriptor */
 2750         txbuf->m = mbuf;
 2751         bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
 2752             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2753 
 2754         /* load to tx descriptors */
 2755         used = 0;
 2756         for (i = 0; i < txnsegs; i++) {
 2757                 t = &tx->desc[tx->cpu];
 2758                 t->command = 0;
 2759                 t->l4ichk = 0;
 2760                 t->flags = 0;
 2761                 if (__predict_true(i == 0)) {
 2762                         /* 1st descriptor */
 2763                         t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
 2764                         t->command |= MVNETA_TX_CMD_F;
 2765                         mvneta_tx_set_csumflag(ifp, t, mbuf);
 2766                 }
 2767                 t->bufptr_pa = txsegs[i].ds_addr;
 2768                 t->bytecnt = txsegs[i].ds_len;
 2769                 tx->cpu = tx_counter_adv(tx->cpu, 1);
 2770 
 2771                 tx->used++;
 2772                 used++;
 2773         }
 2774         /* t is last descriptor here */
 2775         DASSERT(t != NULL);
 2776         t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
 2777 
 2778         bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
 2779             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2780 
 2781         while (__predict_false(used > 255)) {
 2782                 ptxsu = MVNETA_PTXSU_NOWD(255);
 2783                 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
 2784                 used -= 255;
 2785         }
 2786         if (__predict_true(used > 0)) {
 2787                 ptxsu = MVNETA_PTXSU_NOWD(used);
 2788                 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
 2789         }
 2790         return (0);
 2791 }
 2792 
 2793 STATIC void
 2794 mvneta_tx_set_csumflag(if_t ifp,
 2795     struct mvneta_tx_desc *t, struct mbuf *m)
 2796 {
 2797         struct ether_header *eh;
 2798         struct ether_vlan_header *evh;
 2799         int csum_flags;
 2800         uint32_t iphl, ipoff;
 2801         struct ip *ip;
 2802 
 2803         iphl = ipoff = 0;
 2804         csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags;
 2805         eh = mtod(m, struct ether_header *);
 2806 
 2807         switch (ntohs(eh->ether_type)) {
 2808         case ETHERTYPE_IP:
 2809                 ipoff = ETHER_HDR_LEN;
 2810                 break;
 2811         case ETHERTYPE_VLAN:
 2812                 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2813                 evh = mtod(m, struct ether_vlan_header *);
 2814                 if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN)
 2815                         ipoff += ETHER_VLAN_ENCAP_LEN;
 2816                 break;
 2817         default:
 2818                 csum_flags = 0;
 2819         }
 2820 
 2821         if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
 2822                 ip = (struct ip *)(m->m_data + ipoff);
 2823                 iphl = ip->ip_hl<<2;
 2824                 t->command |= MVNETA_TX_CMD_L3_IP4;
 2825         } else {
 2826                 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
 2827                 return;
 2828         }
 2829 
 2830 
 2831         /* L3 */
 2832         if (csum_flags & CSUM_IP) {
 2833                 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
 2834         }
 2835 
 2836         /* L4 */
 2837         if (csum_flags & CSUM_IP_TCP) {
 2838                 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
 2839                 t->command |= MVNETA_TX_CMD_L4_TCP;
 2840         } else if (csum_flags & CSUM_IP_UDP) {
 2841                 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
 2842                 t->command |= MVNETA_TX_CMD_L4_UDP;
 2843         } else
 2844                 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
 2845 
 2846         t->l4ichk = 0;
 2847         t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
 2848         t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
 2849 }
 2850 
 2851 STATIC void
 2852 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
 2853 {
 2854         struct mvneta_tx_ring *tx;
 2855         struct mvneta_buf *txbuf;
 2856         struct mvneta_tx_desc *t __diagused;
 2857         uint32_t ptxs, ptxsu, ndesc;
 2858         int i;
 2859 
 2860         KASSERT_TX_MTX(sc, q);
 2861 
 2862         tx = MVNETA_TX_RING(sc, q);
 2863         if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
 2864                 return;
 2865 
 2866         ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
 2867         ndesc = MVNETA_PTXS_GET_TBC(ptxs);
 2868 
 2869         if (__predict_false(ndesc == 0)) {
 2870                 if (tx->used == 0)
 2871                         tx->queue_status = MVNETA_QUEUE_IDLE;
 2872                 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
 2873                     ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
 2874                         tx->queue_hung = TRUE;
 2875                 return;
 2876         }
 2877 
 2878 #ifdef MVNETA_KTR
 2879         CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
 2880             if_name(sc->ifp), q, ndesc);
 2881 #endif
 2882 
 2883         bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
 2884             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2885 
 2886         for (i = 0; i < ndesc; i++) {
 2887                 t = &tx->desc[tx->dma];
 2888 #ifdef MVNETA_KTR
 2889                 if (t->flags & MVNETA_TX_F_ES)
 2890                         CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
 2891                             if_name(sc->ifp), q, tx->dma);
 2892 #endif
 2893                 txbuf = &tx->txbuf[tx->dma];
 2894                 if (__predict_true(txbuf->m != NULL)) {
 2895                         DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
 2896                         bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
 2897                         m_freem(txbuf->m);
 2898                         txbuf->m = NULL;
 2899                 }
 2900                 else
 2901                         DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
 2902                 tx->dma = tx_counter_adv(tx->dma, 1);
 2903                 tx->used--;
 2904         }
 2905         DASSERT(tx->used >= 0);
 2906         DASSERT(tx->used <= MVNETA_TX_RING_CNT);
 2907         while (__predict_false(ndesc > 255)) {
 2908                 ptxsu = MVNETA_PTXSU_NORB(255);
 2909                 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
 2910                 ndesc -= 255;
 2911         }
 2912         if (__predict_true(ndesc > 0)) {
 2913                 ptxsu = MVNETA_PTXSU_NORB(ndesc);
 2914                 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
 2915         }
 2916 #ifdef MVNETA_KTR
 2917         CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
 2918             if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
 2919 #endif
 2920 
 2921         tx->watchdog_time = ticks;
 2922 
 2923         if (tx->used == 0)
 2924                 tx->queue_status = MVNETA_QUEUE_IDLE;
 2925 }
 2926 
 2927 /*
 2928  * Do a final TX complete when TX is idle.
 2929  */
 2930 STATIC void
 2931 mvneta_tx_drain(struct mvneta_softc *sc)
 2932 {
 2933         struct mvneta_tx_ring *tx;
 2934         int q;
 2935 
 2936         /*
 2937          * Handle trailing mbuf on TX queue.
 2938          * Check is done lockess to avoid TX path contention.
 2939          */
 2940         for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
 2941                 tx = MVNETA_TX_RING(sc, q);
 2942                 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
 2943                     tx->used > 0) {
 2944                         mvneta_tx_lockq(sc, q);
 2945                         mvneta_tx_queue_complete(sc, q);
 2946                         mvneta_tx_unlockq(sc, q);
 2947                 }
 2948         }
 2949 }
 2950 
 2951 /*
 2952  * Rx Subroutines
 2953  */
 2954 STATIC int
 2955 mvneta_rx(struct mvneta_softc *sc, int q, int count)
 2956 {
 2957         uint32_t prxs, npkt;
 2958         int more;
 2959 
 2960         more = 0;
 2961         mvneta_rx_lockq(sc, q);
 2962         prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
 2963         npkt = MVNETA_PRXS_GET_ODC(prxs);
 2964         if (__predict_false(npkt == 0))
 2965                 goto out;
 2966 
 2967         if (count > 0 && npkt > count) {
 2968                 more = 1;
 2969                 npkt = count;
 2970         }
 2971         mvneta_rx_queue(sc, q, npkt);
 2972 out:
 2973         mvneta_rx_unlockq(sc, q);
 2974         return more;
 2975 }
 2976 
 2977 /*
 2978  * Helper routine for updating PRXSU register of a given queue.
 2979  * Handles number of processed descriptors bigger than maximum acceptable value.
 2980  */
 2981 STATIC __inline void
 2982 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
 2983 {
 2984         uint32_t prxsu;
 2985 
 2986         while (__predict_false(processed > 255)) {
 2987                 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
 2988                 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
 2989                 processed -= 255;
 2990         }
 2991         prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
 2992         MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
 2993 }
 2994 
 2995 static __inline void
 2996 mvneta_prefetch(void *p)
 2997 {
 2998 
 2999         __builtin_prefetch(p);
 3000 }
 3001 
 3002 STATIC void
 3003 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
 3004 {
 3005         if_t ifp;
 3006         struct mvneta_rx_ring *rx;
 3007         struct mvneta_rx_desc *r;
 3008         struct mvneta_buf *rxbuf;
 3009         struct mbuf *m;
 3010         struct lro_ctrl *lro;
 3011         struct lro_entry *queued;
 3012         void *pktbuf;
 3013         int i, pktlen, processed, ndma;
 3014 
 3015         KASSERT_RX_MTX(sc, q);
 3016 
 3017         ifp = sc->ifp;
 3018         rx = MVNETA_RX_RING(sc, q);
 3019         processed = 0;
 3020 
 3021         if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
 3022                 return;
 3023 
 3024         bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
 3025             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 3026 
 3027         for (i = 0; i < npkt; i++) {
 3028                 /* Prefetch next desc, rxbuf. */
 3029                 ndma = rx_counter_adv(rx->dma, 1);
 3030                 mvneta_prefetch(&rx->desc[ndma]);
 3031                 mvneta_prefetch(&rx->rxbuf[ndma]);
 3032 
 3033                 /* get descriptor and packet */
 3034                 r = &rx->desc[rx->dma];
 3035                 rxbuf = &rx->rxbuf[rx->dma];
 3036                 m = rxbuf->m;
 3037                 rxbuf->m = NULL;
 3038                 DASSERT(m != NULL);
 3039                 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
 3040                     BUS_DMASYNC_POSTREAD);
 3041                 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
 3042                 /* Prefetch mbuf header. */
 3043                 mvneta_prefetch(m);
 3044 
 3045                 processed++;
 3046                 /* Drop desc with error status or not in a single buffer. */
 3047                 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
 3048                     (MVNETA_RX_F|MVNETA_RX_L));
 3049                 if (__predict_false((r->status & MVNETA_RX_ES) ||
 3050                     (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
 3051                     (MVNETA_RX_F|MVNETA_RX_L)))
 3052                         goto rx_error;
 3053 
 3054                 /*
 3055                  * [ OFF | MH | PKT | CRC ]
 3056                  * bytecnt cover MH, PKT, CRC
 3057                  */
 3058                 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
 3059                 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
 3060                     MVNETA_HWHEADER_SIZE;
 3061 
 3062                 /* Prefetch mbuf data. */
 3063                 mvneta_prefetch(pktbuf);
 3064 
 3065                 /* Write value to mbuf (avoid read). */
 3066                 m->m_data = pktbuf;
 3067                 m->m_len = m->m_pkthdr.len = pktlen;
 3068                 m->m_pkthdr.rcvif = ifp;
 3069                 mvneta_rx_set_csumflag(ifp, r, m);
 3070 
 3071                 /* Increase rx_dma before releasing the lock. */
 3072                 rx->dma = ndma;
 3073 
 3074                 if (__predict_false(rx->lro_enabled &&
 3075                     ((r->status & MVNETA_RX_L3_IP) != 0) &&
 3076                     ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
 3077                     (m->m_pkthdr.csum_flags &
 3078                     (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
 3079                     (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
 3080                         if (rx->lro.lro_cnt != 0) {
 3081                                 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
 3082                                         goto rx_done;
 3083                         }
 3084                 }
 3085 
 3086                 mvneta_rx_unlockq(sc, q);
 3087                 if_input(ifp, m);
 3088                 mvneta_rx_lockq(sc, q);
 3089                 /*
 3090                  * Check whether this queue has been disabled in the
 3091                  * meantime. If yes, then clear LRO and exit.
 3092                  */
 3093                 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
 3094                         goto rx_lro;
 3095 rx_done:
 3096                 /* Refresh receive ring to avoid stall and minimize jitter. */
 3097                 if (processed >= MVNETA_RX_REFILL_COUNT) {
 3098                         mvneta_prxsu_update(sc, q, processed);
 3099                         mvneta_rx_queue_refill(sc, q);
 3100                         processed = 0;
 3101                 }
 3102                 continue;
 3103 rx_error:
 3104                 m_freem(m);
 3105                 rx->dma = ndma;
 3106                 /* Refresh receive ring to avoid stall and minimize jitter. */
 3107                 if (processed >= MVNETA_RX_REFILL_COUNT) {
 3108                         mvneta_prxsu_update(sc, q, processed);
 3109                         mvneta_rx_queue_refill(sc, q);
 3110                         processed = 0;
 3111                 }
 3112         }
 3113 #ifdef MVNETA_KTR
 3114         CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt);
 3115 #endif
 3116         /* DMA status update */
 3117         mvneta_prxsu_update(sc, q, processed);
 3118         /* Refill the rest of buffers if there are any to refill */
 3119         mvneta_rx_queue_refill(sc, q);
 3120 
 3121 rx_lro:
 3122         /*
 3123          * Flush any outstanding LRO work
 3124          */
 3125         lro = &rx->lro;
 3126         while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
 3127                 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
 3128                 tcp_lro_flush(lro, queued);
 3129         }
 3130 }
 3131 
 3132 STATIC void
 3133 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
 3134 {
 3135 
 3136         bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
 3137         /* This will remove all data at once */
 3138         m_freem(rxbuf->m);
 3139 }
 3140 
 3141 STATIC void
 3142 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
 3143 {
 3144         struct mvneta_rx_ring *rx;
 3145         struct mvneta_rx_desc *r;
 3146         struct mvneta_buf *rxbuf;
 3147         bus_dma_segment_t segs;
 3148         struct mbuf *m;
 3149         uint32_t prxs, prxsu, ndesc;
 3150         int npkt, refill, nsegs, error;
 3151 
 3152         KASSERT_RX_MTX(sc, q);
 3153 
 3154         rx = MVNETA_RX_RING(sc, q);
 3155         prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
 3156         ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
 3157         refill = MVNETA_RX_RING_CNT - ndesc;
 3158 #ifdef MVNETA_KTR
 3159         CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
 3160             refill);
 3161 #endif
 3162         if (__predict_false(refill <= 0))
 3163                 return;
 3164 
 3165         for (npkt = 0; npkt < refill; npkt++) {
 3166                 rxbuf = &rx->rxbuf[rx->cpu];
 3167                 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
 3168                 if (__predict_false(m == NULL)) {
 3169                         error = ENOBUFS;
 3170                         break;
 3171                 }
 3172                 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
 3173 
 3174                 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
 3175                     m, &segs, &nsegs, BUS_DMA_NOWAIT);
 3176                 if (__predict_false(error != 0 || nsegs != 1)) {
 3177                         KASSERT(1, ("Failed to load Rx mbuf DMA map"));
 3178                         m_freem(m);
 3179                         break;
 3180                 }
 3181 
 3182                 /* Add the packet to the ring */
 3183                 rxbuf->m = m;
 3184                 r = &rx->desc[rx->cpu];
 3185                 r->bufptr_pa = segs.ds_addr;
 3186                 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
 3187 
 3188                 rx->cpu = rx_counter_adv(rx->cpu, 1);
 3189         }
 3190         if (npkt == 0) {
 3191                 if (refill == MVNETA_RX_RING_CNT)
 3192                         rx->needs_refill = TRUE;
 3193                 return;
 3194         }
 3195 
 3196         rx->needs_refill = FALSE;
 3197         bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 3198 
 3199         while (__predict_false(npkt > 255)) {
 3200                 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
 3201                 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
 3202                 npkt -= 255;
 3203         }
 3204         if (__predict_true(npkt > 0)) {
 3205                 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
 3206                 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
 3207         }
 3208 }
 3209 
 3210 STATIC __inline void
 3211 mvneta_rx_set_csumflag(if_t ifp,
 3212     struct mvneta_rx_desc *r, struct mbuf *m)
 3213 {
 3214         uint32_t csum_flags;
 3215 
 3216         csum_flags = 0;
 3217         if (__predict_false((r->status &
 3218             (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
 3219                 return; /* not a IP packet */
 3220 
 3221         /* L3 */
 3222         if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
 3223             MVNETA_RX_IP_HEADER_OK))
 3224                 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
 3225 
 3226         if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
 3227             (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
 3228                 /* L4 */
 3229                 switch (r->status & MVNETA_RX_L4_MASK) {
 3230                 case MVNETA_RX_L4_TCP:
 3231                 case MVNETA_RX_L4_UDP:
 3232                         csum_flags |= CSUM_L4_CALC;
 3233                         if (__predict_true((r->status &
 3234                             MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
 3235                                 csum_flags |= CSUM_L4_VALID;
 3236                                 m->m_pkthdr.csum_data = htons(0xffff);
 3237                         }
 3238                         break;
 3239                 case MVNETA_RX_L4_OTH:
 3240                 default:
 3241                         break;
 3242                 }
 3243         }
 3244         m->m_pkthdr.csum_flags = csum_flags;
 3245 }
 3246 
 3247 /*
 3248  * MAC address filter
 3249  */
 3250 STATIC void
 3251 mvneta_filter_setup(struct mvneta_softc *sc)
 3252 {
 3253         if_t ifp;
 3254         uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
 3255         uint32_t pxc;
 3256         int i;
 3257 
 3258         KASSERT_SC_MTX(sc);
 3259 
 3260         memset(dfut, 0, sizeof(dfut));
 3261         memset(dfsmt, 0, sizeof(dfsmt));
 3262         memset(dfomt, 0, sizeof(dfomt));
 3263 
 3264         ifp = sc->ifp;
 3265         if_setflagbits(ifp, IFF_ALLMULTI, 0);
 3266         if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
 3267                 for (i = 0; i < MVNETA_NDFSMT; i++) {
 3268                         dfsmt[i] = dfomt[i] =
 3269                             MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3270                             MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3271                             MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3272                             MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
 3273                 }
 3274         }
 3275 
 3276         pxc = MVNETA_READ(sc, MVNETA_PXC);
 3277         pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
 3278             MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
 3279         pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
 3280         pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
 3281         pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
 3282         pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
 3283         pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
 3284         pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
 3285         if (if_getflags(ifp) & IFF_BROADCAST) {
 3286                 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
 3287         }
 3288         if (if_getflags(ifp) & IFF_PROMISC) {
 3289                 pxc |= MVNETA_PXC_UPM;
 3290         }
 3291         MVNETA_WRITE(sc, MVNETA_PXC, pxc);
 3292 
 3293         /* Set Destination Address Filter Unicast Table */
 3294         if (if_getflags(ifp) & IFF_PROMISC) {
 3295                 /* pass all unicast addresses */
 3296                 for (i = 0; i < MVNETA_NDFUT; i++) {
 3297                         dfut[i] =
 3298                             MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3299                             MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3300                             MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
 3301                             MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
 3302                 }
 3303         } else {
 3304                 i = sc->enaddr[5] & 0xf;                /* last nibble */
 3305                 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
 3306         }
 3307         MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
 3308 
 3309         /* Set Destination Address Filter Multicast Tables */
 3310         MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
 3311         MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
 3312 }
 3313 
 3314 /*
 3315  * sysctl(9)
 3316  */
 3317 STATIC int
 3318 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
 3319 {
 3320         struct mvneta_sysctl_mib *arg;
 3321         struct mvneta_softc *sc;
 3322         uint64_t val;
 3323 
 3324         arg = (struct mvneta_sysctl_mib *)arg1;
 3325         if (arg == NULL)
 3326                 return (EINVAL);
 3327 
 3328         sc = arg->sc;
 3329         if (sc == NULL)
 3330                 return (EINVAL);
 3331         if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
 3332                 return (EINVAL);
 3333 
 3334         mvneta_sc_lock(sc);
 3335         val = arg->counter;
 3336         mvneta_sc_unlock(sc);
 3337         return sysctl_handle_64(oidp, &val, 0, req);
 3338 }
 3339 
 3340 
 3341 STATIC int
 3342 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
 3343 {
 3344         struct mvneta_softc *sc;
 3345         int err, val;
 3346 
 3347         val = 0;
 3348         sc = (struct mvneta_softc *)arg1;
 3349         if (sc == NULL)
 3350                 return (EINVAL);
 3351 
 3352         err = sysctl_handle_int(oidp, &val, 0, req);
 3353         if (err != 0)
 3354                 return (err);
 3355 
 3356         if (val < 0 || val > 1)
 3357                 return (EINVAL);
 3358 
 3359         if (val == 1) {
 3360                 mvneta_sc_lock(sc);
 3361                 mvneta_clear_mib(sc);
 3362                 mvneta_sc_unlock(sc);
 3363         }
 3364 
 3365         return (0);
 3366 }
 3367 
 3368 STATIC int
 3369 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
 3370 {
 3371         struct mvneta_sysctl_queue *arg;
 3372         struct mvneta_rx_ring *rx;
 3373         struct mvneta_softc *sc;
 3374         uint32_t reg, time_mvtclk;
 3375         int err, time_us;
 3376 
 3377         rx = NULL;
 3378         arg = (struct mvneta_sysctl_queue *)arg1;
 3379         if (arg == NULL)
 3380                 return (EINVAL);
 3381         if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
 3382                 return (EINVAL);
 3383         if (arg->rxtx != MVNETA_SYSCTL_RX)
 3384                 return (EINVAL);
 3385 
 3386         sc = arg->sc;
 3387         if (sc == NULL)
 3388                 return (EINVAL);
 3389 
 3390         /* read queue length */
 3391         mvneta_sc_lock(sc);
 3392         mvneta_rx_lockq(sc, arg->queue);
 3393         rx = MVNETA_RX_RING(sc, arg->queue);
 3394         time_mvtclk = rx->queue_th_time;
 3395         time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq;
 3396         mvneta_rx_unlockq(sc, arg->queue);
 3397         mvneta_sc_unlock(sc);
 3398 
 3399         err = sysctl_handle_int(oidp, &time_us, 0, req);
 3400         if (err != 0)
 3401                 return (err);
 3402 
 3403         mvneta_sc_lock(sc);
 3404         mvneta_rx_lockq(sc, arg->queue);
 3405 
 3406         /* update queue length (0[sec] - 1[sec]) */
 3407         if (time_us < 0 || time_us > (1000 * 1000)) {
 3408                 mvneta_rx_unlockq(sc, arg->queue);
 3409                 mvneta_sc_unlock(sc);
 3410                 return (EINVAL);
 3411         }
 3412         time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL);
 3413         rx->queue_th_time = time_mvtclk;
 3414         reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
 3415         MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
 3416         mvneta_rx_unlockq(sc, arg->queue);
 3417         mvneta_sc_unlock(sc);
 3418 
 3419         return (0);
 3420 }
 3421 
 3422 STATIC void
 3423 sysctl_mvneta_init(struct mvneta_softc *sc)
 3424 {
 3425         struct sysctl_ctx_list *ctx;
 3426         struct sysctl_oid_list *children;
 3427         struct sysctl_oid_list *rxchildren;
 3428         struct sysctl_oid_list *qchildren, *mchildren;
 3429         struct sysctl_oid *tree;
 3430         int i, q;
 3431         struct mvneta_sysctl_queue *rxarg;
 3432 #define MVNETA_SYSCTL_NAME(num) "queue" # num
 3433         static const char *sysctl_queue_names[] = {
 3434                 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
 3435                 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
 3436                 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
 3437                 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
 3438         };
 3439 #undef MVNETA_SYSCTL_NAME
 3440 
 3441 #ifndef NO_SYSCTL_DESCR
 3442 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
 3443         static const char *sysctl_queue_descrs[] = {
 3444                 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
 3445                 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
 3446                 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
 3447                 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
 3448         };
 3449 #undef MVNETA_SYSCTL_DESCR
 3450 #endif
 3451 
 3452 
 3453         ctx = device_get_sysctl_ctx(sc->dev);
 3454         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3455 
 3456         tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
 3457             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
 3458         rxchildren = SYSCTL_CHILDREN(tree);
 3459         tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
 3460             CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
 3461         mchildren = SYSCTL_CHILDREN(tree);
 3462 
 3463 
 3464         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
 3465             CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
 3466         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
 3467             CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
 3468 
 3469         /*
 3470          * MIB access
 3471          */
 3472         /* dev.mvneta.[unit].mib.<mibs> */
 3473         for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
 3474                 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
 3475 
 3476                 mib_arg->sc = sc;
 3477                 mib_arg->index = i;
 3478                 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
 3479                     mvneta_mib_list[i].sysctl_name,
 3480                     CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
 3481                     (void *)mib_arg, 0, sysctl_read_mib, "I",
 3482                     mvneta_mib_list[i].desc);
 3483         }
 3484         SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
 3485             CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
 3486         SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
 3487             CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
 3488         SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
 3489             CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
 3490 
 3491         SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
 3492             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
 3493             (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
 3494 
 3495         for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
 3496                 rxarg = &sc->sysctl_rx_queue[q];
 3497 
 3498                 rxarg->sc = sc;
 3499                 rxarg->queue = q;
 3500                 rxarg->rxtx = MVNETA_SYSCTL_RX;
 3501 
 3502                 /* hw.mvneta.mvneta[unit].rx.[queue] */
 3503                 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
 3504                     sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
 3505                     sysctl_queue_descrs[q]);
 3506                 qchildren = SYSCTL_CHILDREN(tree);
 3507 
 3508                 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
 3509                 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
 3510                     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
 3511                     sysctl_set_queue_rxthtime, "I",
 3512                     "interrupt coalescing threshold timer [us]");
 3513         }
 3514 }
 3515 
 3516 /*
 3517  * MIB
 3518  */
 3519 STATIC uint64_t
 3520 mvneta_read_mib(struct mvneta_softc *sc, int index)
 3521 {
 3522         struct mvneta_mib_def *mib;
 3523         uint64_t val;
 3524 
 3525         mib = &mvneta_mib_list[index];
 3526         val = MVNETA_READ_MIB(sc, mib->regnum);
 3527         if (mib->reg64)
 3528                 val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
 3529         return (val);
 3530 }
 3531 
 3532 STATIC void
 3533 mvneta_clear_mib(struct mvneta_softc *sc)
 3534 {
 3535         int i;
 3536 
 3537         KASSERT_SC_MTX(sc);
 3538 
 3539         for (i = 0; i < nitems(mvneta_mib_list); i++) {
 3540                 (void)mvneta_read_mib(sc, i);
 3541                 sc->sysctl_mib[i].counter = 0;
 3542         }
 3543         MVNETA_READ(sc, MVNETA_PDFC);
 3544         sc->counter_pdfc = 0;
 3545         MVNETA_READ(sc, MVNETA_POFC);
 3546         sc->counter_pofc = 0;
 3547         sc->counter_watchdog = 0;
 3548 }
 3549 
 3550 STATIC void
 3551 mvneta_update_mib(struct mvneta_softc *sc)
 3552 {
 3553         struct mvneta_tx_ring *tx;
 3554         int i;
 3555         uint64_t val;
 3556         uint32_t reg;
 3557 
 3558         for (i = 0; i < nitems(mvneta_mib_list); i++) {
 3559 
 3560                 val = mvneta_read_mib(sc, i);
 3561                 if (val == 0)
 3562                         continue;
 3563 
 3564                 sc->sysctl_mib[i].counter += val;
 3565                 switch (mvneta_mib_list[i].regnum) {
 3566                         case MVNETA_MIB_RX_GOOD_OCT:
 3567                                 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
 3568                                 break;
 3569                         case MVNETA_MIB_RX_BAD_FRAME:
 3570                                 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
 3571                                 break;
 3572                         case MVNETA_MIB_RX_GOOD_FRAME:
 3573                                 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
 3574                                 break;
 3575                         case MVNETA_MIB_RX_MCAST_FRAME:
 3576                                 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
 3577                                 break;
 3578                         case MVNETA_MIB_TX_GOOD_OCT:
 3579                                 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
 3580                                 break;
 3581                         case MVNETA_MIB_TX_GOOD_FRAME:
 3582                                 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
 3583                                 break;
 3584                         case MVNETA_MIB_TX_MCAST_FRAME:
 3585                                 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
 3586                                 break;
 3587                         case MVNETA_MIB_MAC_COL:
 3588                                 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
 3589                                 break;
 3590                         case MVNETA_MIB_TX_MAC_TRNS_ERR:
 3591                         case MVNETA_MIB_TX_EXCES_COL:
 3592                         case MVNETA_MIB_MAC_LATE_COL:
 3593                                 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
 3594                                 break;
 3595                 }
 3596         }
 3597 
 3598         reg = MVNETA_READ(sc, MVNETA_PDFC);
 3599         sc->counter_pdfc += reg;
 3600         if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
 3601         reg = MVNETA_READ(sc, MVNETA_POFC);
 3602         sc->counter_pofc += reg;
 3603         if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
 3604 
 3605         /* TX watchdog. */
 3606         if (sc->counter_watchdog_mib > 0) {
 3607                 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
 3608                 sc->counter_watchdog_mib = 0;
 3609         }
 3610         /*
 3611          * TX driver errors:
 3612          * We do not take queue locks to not disrupt TX path.
 3613          * We may only miss one drv error which will be fixed at
 3614          * next mib update. We may also clear counter when TX path
 3615          * is incrementing it but we only do it if counter was not zero
 3616          * thus we may only loose one error.
 3617          */
 3618         for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
 3619                 tx = MVNETA_TX_RING(sc, i);
 3620 
 3621                 if (tx->drv_error > 0) {
 3622                         if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
 3623                         tx->drv_error = 0;
 3624                 }
 3625         }
 3626 }

Cache object: 4db7da28356662de851d3c14c7dfdbfd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.