FreeBSD/Linux Kernel Cross Reference
sys/dev/et/if_et.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/bus.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/proc.h>
50 #include <sys/rman.h>
51 #include <sys/module.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55
56 #include <net/ethernet.h>
57 #include <net/if.h>
58 #include <net/if_var.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
61 #include <net/bpf.h>
62 #include <net/if_arp.h>
63 #include <net/if_media.h>
64 #include <net/if_vlan_var.h>
65
66 #include <machine/bus.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73
74 #include <dev/et/if_etreg.h>
75 #include <dev/et/if_etvar.h>
76
77 #include "miibus_if.h"
78
79 MODULE_DEPEND(et, pci, 1, 1, 1);
80 MODULE_DEPEND(et, ether, 1, 1, 1);
81 MODULE_DEPEND(et, miibus, 1, 1, 1);
82
83 /* Tunables. */
84 static int msi_disable = 0;
85 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
86
87 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
88
89 static int et_probe(device_t);
90 static int et_attach(device_t);
91 static int et_detach(device_t);
92 static int et_shutdown(device_t);
93 static int et_suspend(device_t);
94 static int et_resume(device_t);
95
96 static int et_miibus_readreg(device_t, int, int);
97 static int et_miibus_writereg(device_t, int, int, int);
98 static void et_miibus_statchg(device_t);
99
100 static void et_init_locked(struct et_softc *);
101 static void et_init(void *);
102 static int et_ioctl(struct ifnet *, u_long, caddr_t);
103 static void et_start_locked(struct ifnet *);
104 static void et_start(struct ifnet *);
105 static int et_watchdog(struct et_softc *);
106 static int et_ifmedia_upd_locked(struct ifnet *);
107 static int et_ifmedia_upd(struct ifnet *);
108 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 static uint64_t et_get_counter(struct ifnet *, ift_counter);
110
111 static void et_add_sysctls(struct et_softc *);
112 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
113 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
114
115 static void et_intr(void *);
116 static void et_rxeof(struct et_softc *);
117 static void et_txeof(struct et_softc *);
118
119 static int et_dma_alloc(struct et_softc *);
120 static void et_dma_free(struct et_softc *);
121 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
122 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
123 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
124 const char *);
125 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
126 bus_dmamap_t, bus_addr_t *);
127 static void et_init_tx_ring(struct et_softc *);
128 static int et_init_rx_ring(struct et_softc *);
129 static void et_free_tx_ring(struct et_softc *);
130 static void et_free_rx_ring(struct et_softc *);
131 static int et_encap(struct et_softc *, struct mbuf **);
132 static int et_newbuf_cluster(struct et_rxbuf_data *, int);
133 static int et_newbuf_hdr(struct et_rxbuf_data *, int);
134 static void et_rxbuf_discard(struct et_rxbuf_data *, int);
135
136 static void et_stop(struct et_softc *);
137 static int et_chip_init(struct et_softc *);
138 static void et_chip_attach(struct et_softc *);
139 static void et_init_mac(struct et_softc *);
140 static void et_init_rxmac(struct et_softc *);
141 static void et_init_txmac(struct et_softc *);
142 static int et_init_rxdma(struct et_softc *);
143 static int et_init_txdma(struct et_softc *);
144 static int et_start_rxdma(struct et_softc *);
145 static int et_start_txdma(struct et_softc *);
146 static int et_stop_rxdma(struct et_softc *);
147 static int et_stop_txdma(struct et_softc *);
148 static void et_reset(struct et_softc *);
149 static int et_bus_config(struct et_softc *);
150 static void et_get_eaddr(device_t, uint8_t[]);
151 static void et_setmulti(struct et_softc *);
152 static void et_tick(void *);
153 static void et_stats_update(struct et_softc *);
154
155 static const struct et_dev {
156 uint16_t vid;
157 uint16_t did;
158 const char *desc;
159 } et_devices[] = {
160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
161 "Agere ET1310 Gigabit Ethernet" },
162 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
163 "Agere ET1310 Fast Ethernet" },
164 { 0, 0, NULL }
165 };
166
167 static device_method_t et_methods[] = {
168 DEVMETHOD(device_probe, et_probe),
169 DEVMETHOD(device_attach, et_attach),
170 DEVMETHOD(device_detach, et_detach),
171 DEVMETHOD(device_shutdown, et_shutdown),
172 DEVMETHOD(device_suspend, et_suspend),
173 DEVMETHOD(device_resume, et_resume),
174
175 DEVMETHOD(miibus_readreg, et_miibus_readreg),
176 DEVMETHOD(miibus_writereg, et_miibus_writereg),
177 DEVMETHOD(miibus_statchg, et_miibus_statchg),
178
179 DEVMETHOD_END
180 };
181
182 static driver_t et_driver = {
183 "et",
184 et_methods,
185 sizeof(struct et_softc)
186 };
187
188 DRIVER_MODULE(et, pci, et_driver, 0, 0);
189 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices,
190 nitems(et_devices) - 1);
191 DRIVER_MODULE(miibus, et, miibus_driver, 0, 0);
192
193 static int et_rx_intr_npkts = 32;
194 static int et_rx_intr_delay = 20; /* x10 usec */
195 static int et_tx_intr_nsegs = 126;
196 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
197
198 TUNABLE_INT("hw.et.timer", &et_timer);
199 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
200 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
201 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
202
203 static int
204 et_probe(device_t dev)
205 {
206 const struct et_dev *d;
207 uint16_t did, vid;
208
209 vid = pci_get_vendor(dev);
210 did = pci_get_device(dev);
211
212 for (d = et_devices; d->desc != NULL; ++d) {
213 if (vid == d->vid && did == d->did) {
214 device_set_desc(dev, d->desc);
215 return (BUS_PROBE_DEFAULT);
216 }
217 }
218 return (ENXIO);
219 }
220
221 static int
222 et_attach(device_t dev)
223 {
224 struct et_softc *sc;
225 struct ifnet *ifp;
226 uint8_t eaddr[ETHER_ADDR_LEN];
227 uint32_t pmcfg;
228 int cap, error, msic;
229
230 sc = device_get_softc(dev);
231 sc->dev = dev;
232 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
233 MTX_DEF);
234 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
235
236 ifp = sc->ifp = if_alloc(IFT_ETHER);
237 if (ifp == NULL) {
238 device_printf(dev, "can not if_alloc()\n");
239 error = ENOSPC;
240 goto fail;
241 }
242
243 /*
244 * Initialize tunables
245 */
246 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
247 sc->sc_rx_intr_delay = et_rx_intr_delay;
248 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
249 sc->sc_timer = et_timer;
250
251 /* Enable bus mastering */
252 pci_enable_busmaster(dev);
253
254 /*
255 * Allocate IO memory
256 */
257 sc->sc_mem_rid = PCIR_BAR(0);
258 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
259 &sc->sc_mem_rid, RF_ACTIVE);
260 if (sc->sc_mem_res == NULL) {
261 device_printf(dev, "can't allocate IO memory\n");
262 return (ENXIO);
263 }
264
265 msic = 0;
266 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
267 sc->sc_expcap = cap;
268 sc->sc_flags |= ET_FLAG_PCIE;
269 msic = pci_msi_count(dev);
270 if (bootverbose)
271 device_printf(dev, "MSI count: %d\n", msic);
272 }
273 if (msic > 0 && msi_disable == 0) {
274 msic = 1;
275 if (pci_alloc_msi(dev, &msic) == 0) {
276 if (msic == 1) {
277 device_printf(dev, "Using %d MSI message\n",
278 msic);
279 sc->sc_flags |= ET_FLAG_MSI;
280 } else
281 pci_release_msi(dev);
282 }
283 }
284
285 /*
286 * Allocate IRQ
287 */
288 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
289 sc->sc_irq_rid = 0;
290 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
291 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
292 } else {
293 sc->sc_irq_rid = 1;
294 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
295 &sc->sc_irq_rid, RF_ACTIVE);
296 }
297 if (sc->sc_irq_res == NULL) {
298 device_printf(dev, "can't allocate irq\n");
299 error = ENXIO;
300 goto fail;
301 }
302
303 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
304 sc->sc_flags |= ET_FLAG_FASTETHER;
305
306 error = et_bus_config(sc);
307 if (error)
308 goto fail;
309
310 et_get_eaddr(dev, eaddr);
311
312 /* Take PHY out of COMA and enable clocks. */
313 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
314 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
315 pmcfg |= EM_PM_GIGEPHY_ENB;
316 CSR_WRITE_4(sc, ET_PM, pmcfg);
317
318 et_reset(sc);
319
320 error = et_dma_alloc(sc);
321 if (error)
322 goto fail;
323
324 ifp->if_softc = sc;
325 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
326 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
327 ifp->if_init = et_init;
328 ifp->if_ioctl = et_ioctl;
329 ifp->if_start = et_start;
330 ifp->if_get_counter = et_get_counter;
331 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
332 ifp->if_capenable = ifp->if_capabilities;
333 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
334 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
335 IFQ_SET_READY(&ifp->if_snd);
336
337 et_chip_attach(sc);
338
339 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
340 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
341 MIIF_DOPAUSE);
342 if (error) {
343 device_printf(dev, "attaching PHYs failed\n");
344 goto fail;
345 }
346
347 ether_ifattach(ifp, eaddr);
348
349 /* Tell the upper layer(s) we support long frames. */
350 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
351
352 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
353 NULL, et_intr, sc, &sc->sc_irq_handle);
354 if (error) {
355 ether_ifdetach(ifp);
356 device_printf(dev, "can't setup intr\n");
357 goto fail;
358 }
359
360 et_add_sysctls(sc);
361
362 return (0);
363 fail:
364 et_detach(dev);
365 return (error);
366 }
367
368 static int
369 et_detach(device_t dev)
370 {
371 struct et_softc *sc;
372
373 sc = device_get_softc(dev);
374 if (device_is_attached(dev)) {
375 ether_ifdetach(sc->ifp);
376 ET_LOCK(sc);
377 et_stop(sc);
378 ET_UNLOCK(sc);
379 callout_drain(&sc->sc_tick);
380 }
381
382 if (sc->sc_miibus != NULL)
383 device_delete_child(dev, sc->sc_miibus);
384 bus_generic_detach(dev);
385
386 if (sc->sc_irq_handle != NULL)
387 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
388 if (sc->sc_irq_res != NULL)
389 bus_release_resource(dev, SYS_RES_IRQ,
390 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
391 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
392 pci_release_msi(dev);
393 if (sc->sc_mem_res != NULL)
394 bus_release_resource(dev, SYS_RES_MEMORY,
395 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
396
397 if (sc->ifp != NULL)
398 if_free(sc->ifp);
399
400 et_dma_free(sc);
401
402 mtx_destroy(&sc->sc_mtx);
403
404 return (0);
405 }
406
407 static int
408 et_shutdown(device_t dev)
409 {
410 struct et_softc *sc;
411
412 sc = device_get_softc(dev);
413 ET_LOCK(sc);
414 et_stop(sc);
415 ET_UNLOCK(sc);
416 return (0);
417 }
418
419 static int
420 et_miibus_readreg(device_t dev, int phy, int reg)
421 {
422 struct et_softc *sc;
423 uint32_t val;
424 int i, ret;
425
426 sc = device_get_softc(dev);
427 /* Stop any pending operations */
428 CSR_WRITE_4(sc, ET_MII_CMD, 0);
429
430 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
431 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
432 CSR_WRITE_4(sc, ET_MII_ADDR, val);
433
434 /* Start reading */
435 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
436
437 #define NRETRY 50
438
439 for (i = 0; i < NRETRY; ++i) {
440 val = CSR_READ_4(sc, ET_MII_IND);
441 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
442 break;
443 DELAY(50);
444 }
445 if (i == NRETRY) {
446 if_printf(sc->ifp,
447 "read phy %d, reg %d timed out\n", phy, reg);
448 ret = 0;
449 goto back;
450 }
451
452 #undef NRETRY
453
454 val = CSR_READ_4(sc, ET_MII_STAT);
455 ret = val & ET_MII_STAT_VALUE_MASK;
456
457 back:
458 /* Make sure that the current operation is stopped */
459 CSR_WRITE_4(sc, ET_MII_CMD, 0);
460 return (ret);
461 }
462
463 static int
464 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
465 {
466 struct et_softc *sc;
467 uint32_t val;
468 int i;
469
470 sc = device_get_softc(dev);
471 /* Stop any pending operations */
472 CSR_WRITE_4(sc, ET_MII_CMD, 0);
473
474 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
475 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
476 CSR_WRITE_4(sc, ET_MII_ADDR, val);
477
478 /* Start writing */
479 CSR_WRITE_4(sc, ET_MII_CTRL,
480 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
481
482 #define NRETRY 100
483
484 for (i = 0; i < NRETRY; ++i) {
485 val = CSR_READ_4(sc, ET_MII_IND);
486 if ((val & ET_MII_IND_BUSY) == 0)
487 break;
488 DELAY(50);
489 }
490 if (i == NRETRY) {
491 if_printf(sc->ifp,
492 "write phy %d, reg %d timed out\n", phy, reg);
493 et_miibus_readreg(dev, phy, reg);
494 }
495
496 #undef NRETRY
497
498 /* Make sure that the current operation is stopped */
499 CSR_WRITE_4(sc, ET_MII_CMD, 0);
500 return (0);
501 }
502
503 static void
504 et_miibus_statchg(device_t dev)
505 {
506 struct et_softc *sc;
507 struct mii_data *mii;
508 struct ifnet *ifp;
509 uint32_t cfg1, cfg2, ctrl;
510 int i;
511
512 sc = device_get_softc(dev);
513
514 mii = device_get_softc(sc->sc_miibus);
515 ifp = sc->ifp;
516 if (mii == NULL || ifp == NULL ||
517 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
518 return;
519
520 sc->sc_flags &= ~ET_FLAG_LINK;
521 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
522 (IFM_ACTIVE | IFM_AVALID)) {
523 switch (IFM_SUBTYPE(mii->mii_media_active)) {
524 case IFM_10_T:
525 case IFM_100_TX:
526 sc->sc_flags |= ET_FLAG_LINK;
527 break;
528 case IFM_1000_T:
529 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
530 sc->sc_flags |= ET_FLAG_LINK;
531 break;
532 }
533 }
534
535 /* XXX Stop TX/RX MAC? */
536 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
537 return;
538
539 /* Program MACs with resolved speed/duplex/flow-control. */
540 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
541 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
542 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
543 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
544 ET_MAC_CFG1_LOOPBACK);
545 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
546 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
547 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
548 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
549 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
550 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
551
552 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
553 cfg2 |= ET_MAC_CFG2_MODE_GMII;
554 else {
555 cfg2 |= ET_MAC_CFG2_MODE_MII;
556 ctrl |= ET_MAC_CTRL_MODE_MII;
557 }
558
559 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
560 cfg2 |= ET_MAC_CFG2_FDX;
561 /*
562 * Controller lacks automatic TX pause frame
563 * generation so it should be handled by driver.
564 * Even though driver can send pause frame with
565 * arbitrary pause time, controller does not
566 * provide a way that tells how many free RX
567 * buffers are available in controller. This
568 * limitation makes it hard to generate XON frame
569 * in time on driver side so don't enable TX flow
570 * control.
571 */
572 #ifdef notyet
573 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
574 cfg1 |= ET_MAC_CFG1_TXFLOW;
575 #endif
576 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
577 cfg1 |= ET_MAC_CFG1_RXFLOW;
578 } else
579 ctrl |= ET_MAC_CTRL_GHDX;
580
581 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
582 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
583 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
584 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
585
586 #define NRETRY 50
587
588 for (i = 0; i < NRETRY; ++i) {
589 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
590 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
591 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
592 break;
593 DELAY(100);
594 }
595 if (i == NRETRY)
596 if_printf(ifp, "can't enable RX/TX\n");
597 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
598
599 #undef NRETRY
600 }
601
602 static int
603 et_ifmedia_upd_locked(struct ifnet *ifp)
604 {
605 struct et_softc *sc;
606 struct mii_data *mii;
607 struct mii_softc *miisc;
608
609 sc = ifp->if_softc;
610 mii = device_get_softc(sc->sc_miibus);
611 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
612 PHY_RESET(miisc);
613 return (mii_mediachg(mii));
614 }
615
616 static int
617 et_ifmedia_upd(struct ifnet *ifp)
618 {
619 struct et_softc *sc;
620 int res;
621
622 sc = ifp->if_softc;
623 ET_LOCK(sc);
624 res = et_ifmedia_upd_locked(ifp);
625 ET_UNLOCK(sc);
626
627 return (res);
628 }
629
630 static void
631 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
632 {
633 struct et_softc *sc;
634 struct mii_data *mii;
635
636 sc = ifp->if_softc;
637 ET_LOCK(sc);
638 if ((ifp->if_flags & IFF_UP) == 0) {
639 ET_UNLOCK(sc);
640 return;
641 }
642
643 mii = device_get_softc(sc->sc_miibus);
644 mii_pollstat(mii);
645 ifmr->ifm_active = mii->mii_media_active;
646 ifmr->ifm_status = mii->mii_media_status;
647 ET_UNLOCK(sc);
648 }
649
650 static void
651 et_stop(struct et_softc *sc)
652 {
653 struct ifnet *ifp;
654
655 ET_LOCK_ASSERT(sc);
656
657 ifp = sc->ifp;
658 callout_stop(&sc->sc_tick);
659 /* Disable interrupts. */
660 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
661
662 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
663 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
664 DELAY(100);
665
666 et_stop_rxdma(sc);
667 et_stop_txdma(sc);
668 et_stats_update(sc);
669
670 et_free_tx_ring(sc);
671 et_free_rx_ring(sc);
672
673 sc->sc_tx = 0;
674 sc->sc_tx_intr = 0;
675 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
676
677 sc->watchdog_timer = 0;
678 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
679 }
680
681 static int
682 et_bus_config(struct et_softc *sc)
683 {
684 uint32_t val, max_plsz;
685 uint16_t ack_latency, replay_timer;
686
687 /*
688 * Test whether EEPROM is valid
689 * NOTE: Read twice to get the correct value
690 */
691 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
692 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
693 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
694 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
695 return (ENXIO);
696 }
697
698 /* TODO: LED */
699
700 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
701 return (0);
702
703 /*
704 * Configure ACK latency and replay timer according to
705 * max playload size
706 */
707 val = pci_read_config(sc->dev,
708 sc->sc_expcap + PCIER_DEVICE_CAP, 4);
709 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
710
711 switch (max_plsz) {
712 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
713 ack_latency = ET_PCIV_ACK_LATENCY_128;
714 replay_timer = ET_PCIV_REPLAY_TIMER_128;
715 break;
716
717 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
718 ack_latency = ET_PCIV_ACK_LATENCY_256;
719 replay_timer = ET_PCIV_REPLAY_TIMER_256;
720 break;
721
722 default:
723 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
724 replay_timer = pci_read_config(sc->dev,
725 ET_PCIR_REPLAY_TIMER, 2);
726 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
727 ack_latency, replay_timer);
728 break;
729 }
730 if (ack_latency != 0) {
731 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
732 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
733 2);
734 }
735
736 /*
737 * Set L0s and L1 latency timer to 2us
738 */
739 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
740 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
741 /* L0s exit latency : 2us */
742 val |= 0x00005000;
743 /* L1 exit latency : 2us */
744 val |= 0x00028000;
745 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
746
747 /*
748 * Set max read request size to 2048 bytes
749 */
750 pci_set_max_read_req(sc->dev, 2048);
751
752 return (0);
753 }
754
755 static void
756 et_get_eaddr(device_t dev, uint8_t eaddr[])
757 {
758 uint32_t val;
759 int i;
760
761 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
762 for (i = 0; i < 4; ++i)
763 eaddr[i] = (val >> (8 * i)) & 0xff;
764
765 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
766 for (; i < ETHER_ADDR_LEN; ++i)
767 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
768 }
769
770 static void
771 et_reset(struct et_softc *sc)
772 {
773
774 CSR_WRITE_4(sc, ET_MAC_CFG1,
775 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
776 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
777 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
778
779 CSR_WRITE_4(sc, ET_SWRST,
780 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
781 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
782 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
783
784 CSR_WRITE_4(sc, ET_MAC_CFG1,
785 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
786 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
787 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
788 /* Disable interrupts. */
789 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
790 }
791
792 struct et_dmamap_arg {
793 bus_addr_t et_busaddr;
794 };
795
796 static void
797 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
798 {
799 struct et_dmamap_arg *ctx;
800
801 if (error)
802 return;
803
804 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
805
806 ctx = arg;
807 ctx->et_busaddr = segs->ds_addr;
808 }
809
810 static int
811 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
812 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
813 const char *msg)
814 {
815 struct et_dmamap_arg ctx;
816 int error;
817
818 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
819 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
820 tag);
821 if (error != 0) {
822 device_printf(sc->dev, "could not create %s dma tag\n", msg);
823 return (error);
824 }
825 /* Allocate DMA'able memory for ring. */
826 error = bus_dmamem_alloc(*tag, (void **)ring,
827 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
828 if (error != 0) {
829 device_printf(sc->dev,
830 "could not allocate DMA'able memory for %s\n", msg);
831 return (error);
832 }
833 /* Load the address of the ring. */
834 ctx.et_busaddr = 0;
835 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
836 &ctx, BUS_DMA_NOWAIT);
837 if (error != 0) {
838 device_printf(sc->dev,
839 "could not load DMA'able memory for %s\n", msg);
840 return (error);
841 }
842 *paddr = ctx.et_busaddr;
843 return (0);
844 }
845
846 static void
847 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
848 bus_dmamap_t map, bus_addr_t *paddr)
849 {
850
851 if (*paddr != 0) {
852 bus_dmamap_unload(*tag, map);
853 *paddr = 0;
854 }
855 if (*ring != NULL) {
856 bus_dmamem_free(*tag, *ring, map);
857 *ring = NULL;
858 }
859 if (*tag) {
860 bus_dma_tag_destroy(*tag);
861 *tag = NULL;
862 }
863 }
864
865 static int
866 et_dma_alloc(struct et_softc *sc)
867 {
868 struct et_txdesc_ring *tx_ring;
869 struct et_rxdesc_ring *rx_ring;
870 struct et_rxstat_ring *rxst_ring;
871 struct et_rxstatus_data *rxsd;
872 struct et_rxbuf_data *rbd;
873 struct et_txbuf_data *tbd;
874 struct et_txstatus_data *txsd;
875 int i, error;
876
877 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
878 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
879 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
880 &sc->sc_dtag);
881 if (error != 0) {
882 device_printf(sc->dev, "could not allocate parent dma tag\n");
883 return (error);
884 }
885
886 /* TX ring. */
887 tx_ring = &sc->sc_tx_ring;
888 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
889 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
890 &tx_ring->tr_paddr, "TX ring");
891 if (error)
892 return (error);
893
894 /* TX status block. */
895 txsd = &sc->sc_tx_status;
896 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
897 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
898 &txsd->txsd_paddr, "TX status block");
899 if (error)
900 return (error);
901
902 /* RX ring 0, used as to recive small sized frames. */
903 rx_ring = &sc->sc_rx_ring[0];
904 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
905 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
906 &rx_ring->rr_paddr, "RX ring 0");
907 rx_ring->rr_posreg = ET_RX_RING0_POS;
908 if (error)
909 return (error);
910
911 /* RX ring 1, used as to store normal sized frames. */
912 rx_ring = &sc->sc_rx_ring[1];
913 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
914 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
915 &rx_ring->rr_paddr, "RX ring 1");
916 rx_ring->rr_posreg = ET_RX_RING1_POS;
917 if (error)
918 return (error);
919
920 /* RX stat ring. */
921 rxst_ring = &sc->sc_rxstat_ring;
922 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
923 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
924 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
925 if (error)
926 return (error);
927
928 /* RX status block. */
929 rxsd = &sc->sc_rx_status;
930 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
931 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
932 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
933 &rxsd->rxsd_paddr, "RX status block");
934 if (error)
935 return (error);
936
937 /* Create parent DMA tag for mbufs. */
938 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
939 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
940 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
941 &sc->sc_mbuf_dtag);
942 if (error != 0) {
943 device_printf(sc->dev,
944 "could not allocate parent dma tag for mbuf\n");
945 return (error);
946 }
947
948 /* Create DMA tag for mini RX mbufs to use RX ring 0. */
949 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
950 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
951 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
952 if (error) {
953 device_printf(sc->dev, "could not create mini RX dma tag\n");
954 return (error);
955 }
956
957 /* Create DMA tag for standard RX mbufs to use RX ring 1. */
958 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
959 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
960 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
961 if (error) {
962 device_printf(sc->dev, "could not create RX dma tag\n");
963 return (error);
964 }
965
966 /* Create DMA tag for TX mbufs. */
967 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
968 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
969 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
970 &sc->sc_tx_tag);
971 if (error) {
972 device_printf(sc->dev, "could not create TX dma tag\n");
973 return (error);
974 }
975
976 /* Initialize RX ring 0. */
977 rbd = &sc->sc_rx_data[0];
978 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
979 rbd->rbd_newbuf = et_newbuf_hdr;
980 rbd->rbd_discard = et_rxbuf_discard;
981 rbd->rbd_softc = sc;
982 rbd->rbd_ring = &sc->sc_rx_ring[0];
983 /* Create DMA maps for mini RX buffers, ring 0. */
984 for (i = 0; i < ET_RX_NDESC; i++) {
985 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
986 &rbd->rbd_buf[i].rb_dmap);
987 if (error) {
988 device_printf(sc->dev,
989 "could not create DMA map for mini RX mbufs\n");
990 return (error);
991 }
992 }
993
994 /* Create a spare DMA map for mini RX buffers, ring 0. */
995 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
996 &sc->sc_rx_mini_sparemap);
997 if (error) {
998 device_printf(sc->dev,
999 "could not create spare DMA map for mini RX mbuf\n");
1000 return (error);
1001 }
1002
1003 /* Initialize RX ring 1. */
1004 rbd = &sc->sc_rx_data[1];
1005 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
1006 rbd->rbd_newbuf = et_newbuf_cluster;
1007 rbd->rbd_discard = et_rxbuf_discard;
1008 rbd->rbd_softc = sc;
1009 rbd->rbd_ring = &sc->sc_rx_ring[1];
1010 /* Create DMA maps for standard RX buffers, ring 1. */
1011 for (i = 0; i < ET_RX_NDESC; i++) {
1012 error = bus_dmamap_create(sc->sc_rx_tag, 0,
1013 &rbd->rbd_buf[i].rb_dmap);
1014 if (error) {
1015 device_printf(sc->dev,
1016 "could not create DMA map for mini RX mbufs\n");
1017 return (error);
1018 }
1019 }
1020
1021 /* Create a spare DMA map for standard RX buffers, ring 1. */
1022 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1023 if (error) {
1024 device_printf(sc->dev,
1025 "could not create spare DMA map for RX mbuf\n");
1026 return (error);
1027 }
1028
1029 /* Create DMA maps for TX buffers. */
1030 tbd = &sc->sc_tx_data;
1031 for (i = 0; i < ET_TX_NDESC; i++) {
1032 error = bus_dmamap_create(sc->sc_tx_tag, 0,
1033 &tbd->tbd_buf[i].tb_dmap);
1034 if (error) {
1035 device_printf(sc->dev,
1036 "could not create DMA map for TX mbufs\n");
1037 return (error);
1038 }
1039 }
1040
1041 return (0);
1042 }
1043
1044 static void
1045 et_dma_free(struct et_softc *sc)
1046 {
1047 struct et_txdesc_ring *tx_ring;
1048 struct et_rxdesc_ring *rx_ring;
1049 struct et_txstatus_data *txsd;
1050 struct et_rxstat_ring *rxst_ring;
1051 struct et_rxbuf_data *rbd;
1052 struct et_txbuf_data *tbd;
1053 int i;
1054
1055 /* Destroy DMA maps for mini RX buffers, ring 0. */
1056 rbd = &sc->sc_rx_data[0];
1057 for (i = 0; i < ET_RX_NDESC; i++) {
1058 if (rbd->rbd_buf[i].rb_dmap) {
1059 bus_dmamap_destroy(sc->sc_rx_mini_tag,
1060 rbd->rbd_buf[i].rb_dmap);
1061 rbd->rbd_buf[i].rb_dmap = NULL;
1062 }
1063 }
1064 if (sc->sc_rx_mini_sparemap) {
1065 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1066 sc->sc_rx_mini_sparemap = NULL;
1067 }
1068 if (sc->sc_rx_mini_tag) {
1069 bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1070 sc->sc_rx_mini_tag = NULL;
1071 }
1072
1073 /* Destroy DMA maps for standard RX buffers, ring 1. */
1074 rbd = &sc->sc_rx_data[1];
1075 for (i = 0; i < ET_RX_NDESC; i++) {
1076 if (rbd->rbd_buf[i].rb_dmap) {
1077 bus_dmamap_destroy(sc->sc_rx_tag,
1078 rbd->rbd_buf[i].rb_dmap);
1079 rbd->rbd_buf[i].rb_dmap = NULL;
1080 }
1081 }
1082 if (sc->sc_rx_sparemap) {
1083 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1084 sc->sc_rx_sparemap = NULL;
1085 }
1086 if (sc->sc_rx_tag) {
1087 bus_dma_tag_destroy(sc->sc_rx_tag);
1088 sc->sc_rx_tag = NULL;
1089 }
1090
1091 /* Destroy DMA maps for TX buffers. */
1092 tbd = &sc->sc_tx_data;
1093 for (i = 0; i < ET_TX_NDESC; i++) {
1094 if (tbd->tbd_buf[i].tb_dmap) {
1095 bus_dmamap_destroy(sc->sc_tx_tag,
1096 tbd->tbd_buf[i].tb_dmap);
1097 tbd->tbd_buf[i].tb_dmap = NULL;
1098 }
1099 }
1100 if (sc->sc_tx_tag) {
1101 bus_dma_tag_destroy(sc->sc_tx_tag);
1102 sc->sc_tx_tag = NULL;
1103 }
1104
1105 /* Destroy mini RX ring, ring 0. */
1106 rx_ring = &sc->sc_rx_ring[0];
1107 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1108 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1109 /* Destroy standard RX ring, ring 1. */
1110 rx_ring = &sc->sc_rx_ring[1];
1111 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1112 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1113 /* Destroy RX stat ring. */
1114 rxst_ring = &sc->sc_rxstat_ring;
1115 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1116 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1117 /* Destroy RX status block. */
1118 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1119 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1120 /* Destroy TX ring. */
1121 tx_ring = &sc->sc_tx_ring;
1122 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1123 tx_ring->tr_dmap, &tx_ring->tr_paddr);
1124 /* Destroy TX status block. */
1125 txsd = &sc->sc_tx_status;
1126 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1127 txsd->txsd_dmap, &txsd->txsd_paddr);
1128
1129 /* Destroy the parent tag. */
1130 if (sc->sc_dtag) {
1131 bus_dma_tag_destroy(sc->sc_dtag);
1132 sc->sc_dtag = NULL;
1133 }
1134 }
1135
1136 static void
1137 et_chip_attach(struct et_softc *sc)
1138 {
1139 uint32_t val;
1140
1141 /*
1142 * Perform minimal initialization
1143 */
1144
1145 /* Disable loopback */
1146 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1147
1148 /* Reset MAC */
1149 CSR_WRITE_4(sc, ET_MAC_CFG1,
1150 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1151 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1152 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1153
1154 /*
1155 * Setup half duplex mode
1156 */
1157 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1158 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1159 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1160 ET_MAC_HDX_EXC_DEFER;
1161 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1162
1163 /* Clear MAC control */
1164 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1165
1166 /* Reset MII */
1167 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1168
1169 /* Bring MAC out of reset state */
1170 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1171
1172 /* Enable memory controllers */
1173 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1174 }
1175
1176 static void
1177 et_intr(void *xsc)
1178 {
1179 struct et_softc *sc;
1180 struct ifnet *ifp;
1181 uint32_t status;
1182
1183 sc = xsc;
1184 ET_LOCK(sc);
1185 ifp = sc->ifp;
1186 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1187 goto done;
1188
1189 status = CSR_READ_4(sc, ET_INTR_STATUS);
1190 if ((status & ET_INTRS) == 0)
1191 goto done;
1192
1193 /* Disable further interrupts. */
1194 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1195
1196 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1197 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1198 status);
1199 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1200 et_init_locked(sc);
1201 ET_UNLOCK(sc);
1202 return;
1203 }
1204 if (status & ET_INTR_RXDMA)
1205 et_rxeof(sc);
1206 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1207 et_txeof(sc);
1208 if (status & ET_INTR_TIMER)
1209 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1211 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1212 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1213 et_start_locked(ifp);
1214 }
1215 done:
1216 ET_UNLOCK(sc);
1217 }
1218
1219 static void
1220 et_init_locked(struct et_softc *sc)
1221 {
1222 struct ifnet *ifp;
1223 int error;
1224
1225 ET_LOCK_ASSERT(sc);
1226
1227 ifp = sc->ifp;
1228 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1229 return;
1230
1231 et_stop(sc);
1232 et_reset(sc);
1233
1234 et_init_tx_ring(sc);
1235 error = et_init_rx_ring(sc);
1236 if (error)
1237 return;
1238
1239 error = et_chip_init(sc);
1240 if (error)
1241 goto fail;
1242
1243 /*
1244 * Start TX/RX DMA engine
1245 */
1246 error = et_start_rxdma(sc);
1247 if (error)
1248 return;
1249
1250 error = et_start_txdma(sc);
1251 if (error)
1252 return;
1253
1254 /* Enable interrupts. */
1255 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1256
1257 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1258
1259 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1260 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1261
1262 sc->sc_flags &= ~ET_FLAG_LINK;
1263 et_ifmedia_upd_locked(ifp);
1264
1265 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1266
1267 fail:
1268 if (error)
1269 et_stop(sc);
1270 }
1271
1272 static void
1273 et_init(void *xsc)
1274 {
1275 struct et_softc *sc = xsc;
1276
1277 ET_LOCK(sc);
1278 et_init_locked(sc);
1279 ET_UNLOCK(sc);
1280 }
1281
1282 static int
1283 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1284 {
1285 struct et_softc *sc;
1286 struct mii_data *mii;
1287 struct ifreq *ifr;
1288 int error, mask, max_framelen;
1289
1290 sc = ifp->if_softc;
1291 ifr = (struct ifreq *)data;
1292 error = 0;
1293
1294 /* XXX LOCKSUSED */
1295 switch (cmd) {
1296 case SIOCSIFFLAGS:
1297 ET_LOCK(sc);
1298 if (ifp->if_flags & IFF_UP) {
1299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1300 if ((ifp->if_flags ^ sc->sc_if_flags) &
1301 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1302 et_setmulti(sc);
1303 } else {
1304 et_init_locked(sc);
1305 }
1306 } else {
1307 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1308 et_stop(sc);
1309 }
1310 sc->sc_if_flags = ifp->if_flags;
1311 ET_UNLOCK(sc);
1312 break;
1313
1314 case SIOCSIFMEDIA:
1315 case SIOCGIFMEDIA:
1316 mii = device_get_softc(sc->sc_miibus);
1317 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1318 break;
1319
1320 case SIOCADDMULTI:
1321 case SIOCDELMULTI:
1322 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1323 ET_LOCK(sc);
1324 et_setmulti(sc);
1325 ET_UNLOCK(sc);
1326 }
1327 break;
1328
1329 case SIOCSIFMTU:
1330 ET_LOCK(sc);
1331 #if 0
1332 if (sc->sc_flags & ET_FLAG_JUMBO)
1333 max_framelen = ET_JUMBO_FRAMELEN;
1334 else
1335 #endif
1336 max_framelen = MCLBYTES - 1;
1337
1338 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1339 error = EOPNOTSUPP;
1340 ET_UNLOCK(sc);
1341 break;
1342 }
1343
1344 if (ifp->if_mtu != ifr->ifr_mtu) {
1345 ifp->if_mtu = ifr->ifr_mtu;
1346 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1347 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1348 et_init_locked(sc);
1349 }
1350 }
1351 ET_UNLOCK(sc);
1352 break;
1353
1354 case SIOCSIFCAP:
1355 ET_LOCK(sc);
1356 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1357 if ((mask & IFCAP_TXCSUM) != 0 &&
1358 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1359 ifp->if_capenable ^= IFCAP_TXCSUM;
1360 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1361 ifp->if_hwassist |= ET_CSUM_FEATURES;
1362 else
1363 ifp->if_hwassist &= ~ET_CSUM_FEATURES;
1364 }
1365 ET_UNLOCK(sc);
1366 break;
1367
1368 default:
1369 error = ether_ioctl(ifp, cmd, data);
1370 break;
1371 }
1372 return (error);
1373 }
1374
1375 static void
1376 et_start_locked(struct ifnet *ifp)
1377 {
1378 struct et_softc *sc;
1379 struct mbuf *m_head = NULL;
1380 struct et_txdesc_ring *tx_ring;
1381 struct et_txbuf_data *tbd;
1382 uint32_t tx_ready_pos;
1383 int enq;
1384
1385 sc = ifp->if_softc;
1386 ET_LOCK_ASSERT(sc);
1387
1388 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1389 IFF_DRV_RUNNING ||
1390 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1391 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1392 return;
1393
1394 /*
1395 * Driver does not request TX completion interrupt for every
1396 * queued frames to prevent generating excessive interrupts.
1397 * This means driver may wait for TX completion interrupt even
1398 * though some frames were successfully transmitted. Reclaiming
1399 * transmitted frames will ensure driver see all available
1400 * descriptors.
1401 */
1402 tbd = &sc->sc_tx_data;
1403 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1404 et_txeof(sc);
1405
1406 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1407 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1408 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1409 break;
1410 }
1411
1412 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1413 if (m_head == NULL)
1414 break;
1415
1416 if (et_encap(sc, &m_head)) {
1417 if (m_head == NULL) {
1418 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1419 break;
1420 }
1421 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1422 if (tbd->tbd_used > 0)
1423 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1424 break;
1425 }
1426 enq++;
1427 ETHER_BPF_MTAP(ifp, m_head);
1428 }
1429
1430 if (enq > 0) {
1431 tx_ring = &sc->sc_tx_ring;
1432 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1433 BUS_DMASYNC_PREWRITE);
1434 tx_ready_pos = tx_ring->tr_ready_index &
1435 ET_TX_READY_POS_INDEX_MASK;
1436 if (tx_ring->tr_ready_wrap)
1437 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1438 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1439 sc->watchdog_timer = 5;
1440 }
1441 }
1442
1443 static void
1444 et_start(struct ifnet *ifp)
1445 {
1446 struct et_softc *sc;
1447
1448 sc = ifp->if_softc;
1449 ET_LOCK(sc);
1450 et_start_locked(ifp);
1451 ET_UNLOCK(sc);
1452 }
1453
1454 static int
1455 et_watchdog(struct et_softc *sc)
1456 {
1457 uint32_t status;
1458
1459 ET_LOCK_ASSERT(sc);
1460
1461 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1462 return (0);
1463
1464 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1465 BUS_DMASYNC_POSTREAD);
1466 status = le32toh(*(sc->sc_tx_status.txsd_status));
1467 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1468 status);
1469
1470 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1471 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1472 et_init_locked(sc);
1473 return (EJUSTRETURN);
1474 }
1475
1476 static int
1477 et_stop_rxdma(struct et_softc *sc)
1478 {
1479
1480 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1481 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1482
1483 DELAY(5);
1484 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1485 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1486 return (ETIMEDOUT);
1487 }
1488 return (0);
1489 }
1490
1491 static int
1492 et_stop_txdma(struct et_softc *sc)
1493 {
1494
1495 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1496 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1497 return (0);
1498 }
1499
1500 static void
1501 et_free_tx_ring(struct et_softc *sc)
1502 {
1503 struct et_txbuf_data *tbd;
1504 struct et_txbuf *tb;
1505 int i;
1506
1507 tbd = &sc->sc_tx_data;
1508 for (i = 0; i < ET_TX_NDESC; ++i) {
1509 tb = &tbd->tbd_buf[i];
1510 if (tb->tb_mbuf != NULL) {
1511 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1512 BUS_DMASYNC_POSTWRITE);
1513 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1514 m_freem(tb->tb_mbuf);
1515 tb->tb_mbuf = NULL;
1516 }
1517 }
1518 }
1519
1520 static void
1521 et_free_rx_ring(struct et_softc *sc)
1522 {
1523 struct et_rxbuf_data *rbd;
1524 struct et_rxdesc_ring *rx_ring;
1525 struct et_rxbuf *rb;
1526 int i;
1527
1528 /* Ring 0 */
1529 rx_ring = &sc->sc_rx_ring[0];
1530 rbd = &sc->sc_rx_data[0];
1531 for (i = 0; i < ET_RX_NDESC; ++i) {
1532 rb = &rbd->rbd_buf[i];
1533 if (rb->rb_mbuf != NULL) {
1534 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1535 BUS_DMASYNC_POSTREAD);
1536 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1537 m_freem(rb->rb_mbuf);
1538 rb->rb_mbuf = NULL;
1539 }
1540 }
1541
1542 /* Ring 1 */
1543 rx_ring = &sc->sc_rx_ring[1];
1544 rbd = &sc->sc_rx_data[1];
1545 for (i = 0; i < ET_RX_NDESC; ++i) {
1546 rb = &rbd->rbd_buf[i];
1547 if (rb->rb_mbuf != NULL) {
1548 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1549 BUS_DMASYNC_POSTREAD);
1550 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1551 m_freem(rb->rb_mbuf);
1552 rb->rb_mbuf = NULL;
1553 }
1554 }
1555 }
1556
1557 static u_int
1558 et_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1559 {
1560 uint32_t h, *hp, *hash = arg;
1561
1562 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
1563 h = (h & 0x3f800000) >> 23;
1564
1565 hp = &hash[0];
1566 if (h >= 32 && h < 64) {
1567 h -= 32;
1568 hp = &hash[1];
1569 } else if (h >= 64 && h < 96) {
1570 h -= 64;
1571 hp = &hash[2];
1572 } else if (h >= 96) {
1573 h -= 96;
1574 hp = &hash[3];
1575 }
1576 *hp |= (1 << h);
1577
1578 return (1);
1579 }
1580
1581 static void
1582 et_setmulti(struct et_softc *sc)
1583 {
1584 struct ifnet *ifp;
1585 uint32_t hash[4] = { 0, 0, 0, 0 };
1586 uint32_t rxmac_ctrl, pktfilt;
1587 int i, count;
1588
1589 ET_LOCK_ASSERT(sc);
1590 ifp = sc->ifp;
1591
1592 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1593 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1594
1595 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1596 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1597 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1598 goto back;
1599 }
1600
1601 count = if_foreach_llmaddr(ifp, et_hash_maddr, &hash);
1602
1603 for (i = 0; i < 4; ++i)
1604 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1605
1606 if (count > 0)
1607 pktfilt |= ET_PKTFILT_MCAST;
1608 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1609 back:
1610 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1611 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1612 }
1613
1614 static int
1615 et_chip_init(struct et_softc *sc)
1616 {
1617 struct ifnet *ifp;
1618 uint32_t rxq_end;
1619 int error, frame_len, rxmem_size;
1620
1621 ifp = sc->ifp;
1622 /*
1623 * Split 16Kbytes internal memory between TX and RX
1624 * according to frame length.
1625 */
1626 frame_len = ET_FRAMELEN(ifp->if_mtu);
1627 if (frame_len < 2048) {
1628 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1629 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1630 rxmem_size = ET_MEM_SIZE / 2;
1631 } else {
1632 rxmem_size = ET_MEM_SIZE -
1633 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1634 }
1635 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1636
1637 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1638 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1639 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1640 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1641
1642 /* No loopback */
1643 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1644
1645 /* Clear MSI configure */
1646 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1647 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1648
1649 /* Disable timer */
1650 CSR_WRITE_4(sc, ET_TIMER, 0);
1651
1652 /* Initialize MAC */
1653 et_init_mac(sc);
1654
1655 /* Enable memory controllers */
1656 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1657
1658 /* Initialize RX MAC */
1659 et_init_rxmac(sc);
1660
1661 /* Initialize TX MAC */
1662 et_init_txmac(sc);
1663
1664 /* Initialize RX DMA engine */
1665 error = et_init_rxdma(sc);
1666 if (error)
1667 return (error);
1668
1669 /* Initialize TX DMA engine */
1670 error = et_init_txdma(sc);
1671 if (error)
1672 return (error);
1673
1674 return (0);
1675 }
1676
1677 static void
1678 et_init_tx_ring(struct et_softc *sc)
1679 {
1680 struct et_txdesc_ring *tx_ring;
1681 struct et_txbuf_data *tbd;
1682 struct et_txstatus_data *txsd;
1683
1684 tx_ring = &sc->sc_tx_ring;
1685 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1686 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1687 BUS_DMASYNC_PREWRITE);
1688
1689 tbd = &sc->sc_tx_data;
1690 tbd->tbd_start_index = 0;
1691 tbd->tbd_start_wrap = 0;
1692 tbd->tbd_used = 0;
1693
1694 txsd = &sc->sc_tx_status;
1695 bzero(txsd->txsd_status, sizeof(uint32_t));
1696 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1698 }
1699
1700 static int
1701 et_init_rx_ring(struct et_softc *sc)
1702 {
1703 struct et_rxstatus_data *rxsd;
1704 struct et_rxstat_ring *rxst_ring;
1705 struct et_rxbuf_data *rbd;
1706 int i, error, n;
1707
1708 for (n = 0; n < ET_RX_NRING; ++n) {
1709 rbd = &sc->sc_rx_data[n];
1710 for (i = 0; i < ET_RX_NDESC; ++i) {
1711 error = rbd->rbd_newbuf(rbd, i);
1712 if (error) {
1713 if_printf(sc->ifp, "%d ring %d buf, "
1714 "newbuf failed: %d\n", n, i, error);
1715 return (error);
1716 }
1717 }
1718 }
1719
1720 rxsd = &sc->sc_rx_status;
1721 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1722 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724
1725 rxst_ring = &sc->sc_rxstat_ring;
1726 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1727 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1729
1730 return (0);
1731 }
1732
1733 static int
1734 et_init_rxdma(struct et_softc *sc)
1735 {
1736 struct et_rxstatus_data *rxsd;
1737 struct et_rxstat_ring *rxst_ring;
1738 struct et_rxdesc_ring *rx_ring;
1739 int error;
1740
1741 error = et_stop_rxdma(sc);
1742 if (error) {
1743 if_printf(sc->ifp, "can't init RX DMA engine\n");
1744 return (error);
1745 }
1746
1747 /*
1748 * Install RX status
1749 */
1750 rxsd = &sc->sc_rx_status;
1751 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1752 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1753
1754 /*
1755 * Install RX stat ring
1756 */
1757 rxst_ring = &sc->sc_rxstat_ring;
1758 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1759 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1760 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1761 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1762 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1763
1764 /* Match ET_RXSTAT_POS */
1765 rxst_ring->rsr_index = 0;
1766 rxst_ring->rsr_wrap = 0;
1767
1768 /*
1769 * Install the 2nd RX descriptor ring
1770 */
1771 rx_ring = &sc->sc_rx_ring[1];
1772 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1773 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1774 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1775 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1776 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1777
1778 /* Match ET_RX_RING1_POS */
1779 rx_ring->rr_index = 0;
1780 rx_ring->rr_wrap = 1;
1781
1782 /*
1783 * Install the 1st RX descriptor ring
1784 */
1785 rx_ring = &sc->sc_rx_ring[0];
1786 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1787 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1788 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1789 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1790 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1791
1792 /* Match ET_RX_RING0_POS */
1793 rx_ring->rr_index = 0;
1794 rx_ring->rr_wrap = 1;
1795
1796 /*
1797 * RX intr moderation
1798 */
1799 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1800 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1801
1802 return (0);
1803 }
1804
1805 static int
1806 et_init_txdma(struct et_softc *sc)
1807 {
1808 struct et_txdesc_ring *tx_ring;
1809 struct et_txstatus_data *txsd;
1810 int error;
1811
1812 error = et_stop_txdma(sc);
1813 if (error) {
1814 if_printf(sc->ifp, "can't init TX DMA engine\n");
1815 return (error);
1816 }
1817
1818 /*
1819 * Install TX descriptor ring
1820 */
1821 tx_ring = &sc->sc_tx_ring;
1822 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1823 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1824 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1825
1826 /*
1827 * Install TX status
1828 */
1829 txsd = &sc->sc_tx_status;
1830 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1831 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1832
1833 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1834
1835 /* Match ET_TX_READY_POS */
1836 tx_ring->tr_ready_index = 0;
1837 tx_ring->tr_ready_wrap = 0;
1838
1839 return (0);
1840 }
1841
1842 static void
1843 et_init_mac(struct et_softc *sc)
1844 {
1845 struct ifnet *ifp;
1846 const uint8_t *eaddr;
1847 uint32_t val;
1848
1849 /* Reset MAC */
1850 CSR_WRITE_4(sc, ET_MAC_CFG1,
1851 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1852 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1853 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1854
1855 /*
1856 * Setup inter packet gap
1857 */
1858 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1859 (88 << ET_IPG_NONB2B_2_SHIFT) |
1860 (80 << ET_IPG_MINIFG_SHIFT) |
1861 (96 << ET_IPG_B2B_SHIFT);
1862 CSR_WRITE_4(sc, ET_IPG, val);
1863
1864 /*
1865 * Setup half duplex mode
1866 */
1867 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1868 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1869 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1870 ET_MAC_HDX_EXC_DEFER;
1871 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1872
1873 /* Clear MAC control */
1874 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1875
1876 /* Reset MII */
1877 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1878
1879 /*
1880 * Set MAC address
1881 */
1882 ifp = sc->ifp;
1883 eaddr = IF_LLADDR(ifp);
1884 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1885 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1886 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1887 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1888
1889 /* Set max frame length */
1890 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1891
1892 /* Bring MAC out of reset state */
1893 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1894 }
1895
1896 static void
1897 et_init_rxmac(struct et_softc *sc)
1898 {
1899 struct ifnet *ifp;
1900 const uint8_t *eaddr;
1901 uint32_t val;
1902 int i;
1903
1904 /* Disable RX MAC and WOL */
1905 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1906
1907 /*
1908 * Clear all WOL related registers
1909 */
1910 for (i = 0; i < 3; ++i)
1911 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1912 for (i = 0; i < 20; ++i)
1913 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1914
1915 /*
1916 * Set WOL source address. XXX is this necessary?
1917 */
1918 ifp = sc->ifp;
1919 eaddr = IF_LLADDR(ifp);
1920 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1921 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1922 val = (eaddr[0] << 8) | eaddr[1];
1923 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1924
1925 /* Clear packet filters */
1926 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1927
1928 /* No ucast filtering */
1929 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1930 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1931 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1932
1933 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1934 /*
1935 * In order to transmit jumbo packets greater than
1936 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1937 * RX MAC and RX DMA needs to be reduced in size to
1938 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1939 * order to implement this, we must use "cut through"
1940 * mode in the RX MAC, which chops packets down into
1941 * segments. In this case we selected 256 bytes,
1942 * since this is the size of the PCI-Express TLP's
1943 * that the ET1310 uses.
1944 */
1945 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1946 ET_RXMAC_MC_SEGSZ_ENABLE;
1947 } else {
1948 val = 0;
1949 }
1950 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1951
1952 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1953
1954 /* Initialize RX MAC management register */
1955 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1956
1957 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1958
1959 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1960 ET_RXMAC_MGT_PASS_ECRC |
1961 ET_RXMAC_MGT_PASS_ELEN |
1962 ET_RXMAC_MGT_PASS_ETRUNC |
1963 ET_RXMAC_MGT_CHECK_PKT);
1964
1965 /*
1966 * Configure runt filtering (may not work on certain chip generation)
1967 */
1968 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1969 ET_PKTFILT_MINLEN_MASK;
1970 val |= ET_PKTFILT_FRAG;
1971 CSR_WRITE_4(sc, ET_PKTFILT, val);
1972
1973 /* Enable RX MAC but leave WOL disabled */
1974 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1975 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1976
1977 /*
1978 * Setup multicast hash and allmulti/promisc mode
1979 */
1980 et_setmulti(sc);
1981 }
1982
1983 static void
1984 et_init_txmac(struct et_softc *sc)
1985 {
1986
1987 /* Disable TX MAC and FC(?) */
1988 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1989
1990 /*
1991 * Initialize pause time.
1992 * This register should be set before XON/XOFF frame is
1993 * sent by driver.
1994 */
1995 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
1996
1997 /* Enable TX MAC but leave FC(?) diabled */
1998 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1999 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
2000 }
2001
2002 static int
2003 et_start_rxdma(struct et_softc *sc)
2004 {
2005 uint32_t val;
2006
2007 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
2008 ET_RXDMA_CTRL_RING0_ENABLE;
2009 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
2010 ET_RXDMA_CTRL_RING1_ENABLE;
2011
2012 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2013
2014 DELAY(5);
2015
2016 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2017 if_printf(sc->ifp, "can't start RX DMA engine\n");
2018 return (ETIMEDOUT);
2019 }
2020 return (0);
2021 }
2022
2023 static int
2024 et_start_txdma(struct et_softc *sc)
2025 {
2026
2027 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2028 return (0);
2029 }
2030
2031 static void
2032 et_rxeof(struct et_softc *sc)
2033 {
2034 struct et_rxstatus_data *rxsd;
2035 struct et_rxstat_ring *rxst_ring;
2036 struct et_rxbuf_data *rbd;
2037 struct et_rxdesc_ring *rx_ring;
2038 struct et_rxstat *st;
2039 struct ifnet *ifp;
2040 struct mbuf *m;
2041 uint32_t rxstat_pos, rxring_pos;
2042 uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2043 int buflen, buf_idx, npost[2], ring_idx;
2044 int rxst_index, rxst_wrap;
2045
2046 ET_LOCK_ASSERT(sc);
2047
2048 ifp = sc->ifp;
2049 rxsd = &sc->sc_rx_status;
2050 rxst_ring = &sc->sc_rxstat_ring;
2051
2052 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2053 return;
2054
2055 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2056 BUS_DMASYNC_POSTREAD);
2057 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2058 BUS_DMASYNC_POSTREAD);
2059
2060 npost[0] = npost[1] = 0;
2061 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2062 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2063 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2064 ET_RXS_STATRING_INDEX_SHIFT;
2065
2066 while (rxst_index != rxst_ring->rsr_index ||
2067 rxst_wrap != rxst_ring->rsr_wrap) {
2068 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2069 break;
2070
2071 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2072 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2073 rxst_info1 = le32toh(st->rxst_info1);
2074 rxst_info2 = le32toh(st->rxst_info2);
2075 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2076 ET_RXST_INFO2_LEN_SHIFT;
2077 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2078 ET_RXST_INFO2_BUFIDX_SHIFT;
2079 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2080 ET_RXST_INFO2_RINGIDX_SHIFT;
2081
2082 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2083 rxst_ring->rsr_index = 0;
2084 rxst_ring->rsr_wrap ^= 1;
2085 }
2086 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2087 if (rxst_ring->rsr_wrap)
2088 rxstat_pos |= ET_RXSTAT_POS_WRAP;
2089 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2090
2091 if (ring_idx >= ET_RX_NRING) {
2092 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2093 if_printf(ifp, "invalid ring index %d\n", ring_idx);
2094 continue;
2095 }
2096 if (buf_idx >= ET_RX_NDESC) {
2097 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2098 if_printf(ifp, "invalid buf index %d\n", buf_idx);
2099 continue;
2100 }
2101
2102 rbd = &sc->sc_rx_data[ring_idx];
2103 m = rbd->rbd_buf[buf_idx].rb_mbuf;
2104 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2105 /* Discard errored frame. */
2106 rbd->rbd_discard(rbd, buf_idx);
2107 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2108 /* No available mbufs, discard it. */
2109 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2110 rbd->rbd_discard(rbd, buf_idx);
2111 } else {
2112 buflen -= ETHER_CRC_LEN;
2113 if (buflen < ETHER_HDR_LEN) {
2114 m_freem(m);
2115 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2116 } else {
2117 m->m_pkthdr.len = m->m_len = buflen;
2118 m->m_pkthdr.rcvif = ifp;
2119 ET_UNLOCK(sc);
2120 ifp->if_input(ifp, m);
2121 ET_LOCK(sc);
2122 }
2123 }
2124
2125 rx_ring = &sc->sc_rx_ring[ring_idx];
2126 if (buf_idx != rx_ring->rr_index) {
2127 if_printf(ifp,
2128 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2129 ring_idx, buf_idx, rx_ring->rr_index);
2130 }
2131
2132 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2133 if (++rx_ring->rr_index == ET_RX_NDESC) {
2134 rx_ring->rr_index = 0;
2135 rx_ring->rr_wrap ^= 1;
2136 }
2137 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2138 if (rx_ring->rr_wrap)
2139 rxring_pos |= ET_RX_RING_POS_WRAP;
2140 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2141 }
2142
2143 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2144 BUS_DMASYNC_PREREAD);
2145 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2146 BUS_DMASYNC_PREREAD);
2147 }
2148
2149 static int
2150 et_encap(struct et_softc *sc, struct mbuf **m0)
2151 {
2152 struct et_txdesc_ring *tx_ring;
2153 struct et_txbuf_data *tbd;
2154 struct et_txdesc *td;
2155 struct mbuf *m;
2156 bus_dma_segment_t segs[ET_NSEG_MAX];
2157 bus_dmamap_t map;
2158 uint32_t csum_flags, last_td_ctrl2;
2159 int error, i, idx, first_idx, last_idx, nsegs;
2160
2161 tx_ring = &sc->sc_tx_ring;
2162 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2163 tbd = &sc->sc_tx_data;
2164 first_idx = tx_ring->tr_ready_index;
2165 map = tbd->tbd_buf[first_idx].tb_dmap;
2166
2167 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2168 0);
2169 if (error == EFBIG) {
2170 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2171 if (m == NULL) {
2172 m_freem(*m0);
2173 *m0 = NULL;
2174 return (ENOMEM);
2175 }
2176 *m0 = m;
2177 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2178 &nsegs, 0);
2179 if (error != 0) {
2180 m_freem(*m0);
2181 *m0 = NULL;
2182 return (error);
2183 }
2184 } else if (error != 0)
2185 return (error);
2186
2187 /* Check for descriptor overruns. */
2188 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2189 bus_dmamap_unload(sc->sc_tx_tag, map);
2190 return (ENOBUFS);
2191 }
2192 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2193
2194 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2195 sc->sc_tx += nsegs;
2196 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2197 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2198 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2199 }
2200
2201 m = *m0;
2202 csum_flags = 0;
2203 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2204 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2205 csum_flags |= ET_TDCTRL2_CSUM_IP;
2206 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2207 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2208 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2209 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2210 }
2211 last_idx = -1;
2212 for (i = 0; i < nsegs; ++i) {
2213 idx = (first_idx + i) % ET_TX_NDESC;
2214 td = &tx_ring->tr_desc[idx];
2215 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2216 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2217 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2218 if (i == nsegs - 1) {
2219 /* Last frag */
2220 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2221 last_idx = idx;
2222 } else
2223 td->td_ctrl2 = htole32(csum_flags);
2224
2225 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2226 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2227 tx_ring->tr_ready_index = 0;
2228 tx_ring->tr_ready_wrap ^= 1;
2229 }
2230 }
2231 td = &tx_ring->tr_desc[first_idx];
2232 /* First frag */
2233 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2234
2235 MPASS(last_idx >= 0);
2236 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2237 tbd->tbd_buf[last_idx].tb_dmap = map;
2238 tbd->tbd_buf[last_idx].tb_mbuf = m;
2239
2240 tbd->tbd_used += nsegs;
2241 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2242
2243 return (0);
2244 }
2245
2246 static void
2247 et_txeof(struct et_softc *sc)
2248 {
2249 struct et_txdesc_ring *tx_ring;
2250 struct et_txbuf_data *tbd;
2251 struct et_txbuf *tb;
2252 struct ifnet *ifp;
2253 uint32_t tx_done;
2254 int end, wrap;
2255
2256 ET_LOCK_ASSERT(sc);
2257
2258 ifp = sc->ifp;
2259 tx_ring = &sc->sc_tx_ring;
2260 tbd = &sc->sc_tx_data;
2261
2262 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2263 return;
2264
2265 if (tbd->tbd_used == 0)
2266 return;
2267
2268 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2269 BUS_DMASYNC_POSTWRITE);
2270
2271 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2272 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2273 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2274
2275 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2276 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2277 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2278 if (tb->tb_mbuf != NULL) {
2279 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2280 BUS_DMASYNC_POSTWRITE);
2281 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2282 m_freem(tb->tb_mbuf);
2283 tb->tb_mbuf = NULL;
2284 }
2285
2286 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2287 tbd->tbd_start_index = 0;
2288 tbd->tbd_start_wrap ^= 1;
2289 }
2290
2291 MPASS(tbd->tbd_used > 0);
2292 tbd->tbd_used--;
2293 }
2294
2295 if (tbd->tbd_used == 0)
2296 sc->watchdog_timer = 0;
2297 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2298 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2299 }
2300
2301 static void
2302 et_tick(void *xsc)
2303 {
2304 struct et_softc *sc;
2305 struct mii_data *mii;
2306
2307 sc = xsc;
2308 ET_LOCK_ASSERT(sc);
2309 mii = device_get_softc(sc->sc_miibus);
2310
2311 mii_tick(mii);
2312 et_stats_update(sc);
2313 if (et_watchdog(sc) == EJUSTRETURN)
2314 return;
2315 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2316 }
2317
2318 static int
2319 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2320 {
2321 struct et_softc *sc;
2322 struct et_rxdesc *desc;
2323 struct et_rxbuf *rb;
2324 struct mbuf *m;
2325 bus_dma_segment_t segs[1];
2326 bus_dmamap_t dmap;
2327 int nsegs;
2328
2329 MPASS(buf_idx < ET_RX_NDESC);
2330 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2331 if (m == NULL)
2332 return (ENOBUFS);
2333 m->m_len = m->m_pkthdr.len = MCLBYTES;
2334 m_adj(m, ETHER_ALIGN);
2335
2336 sc = rbd->rbd_softc;
2337 rb = &rbd->rbd_buf[buf_idx];
2338
2339 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2340 segs, &nsegs, 0) != 0) {
2341 m_freem(m);
2342 return (ENOBUFS);
2343 }
2344 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2345
2346 if (rb->rb_mbuf != NULL) {
2347 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2348 BUS_DMASYNC_POSTREAD);
2349 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2350 }
2351 dmap = rb->rb_dmap;
2352 rb->rb_dmap = sc->sc_rx_sparemap;
2353 sc->sc_rx_sparemap = dmap;
2354 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2355
2356 rb->rb_mbuf = m;
2357 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2358 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2359 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2360 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2361 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2362 BUS_DMASYNC_PREWRITE);
2363 return (0);
2364 }
2365
2366 static void
2367 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2368 {
2369 struct et_rxdesc *desc;
2370
2371 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2372 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2373 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2374 BUS_DMASYNC_PREWRITE);
2375 }
2376
2377 static int
2378 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2379 {
2380 struct et_softc *sc;
2381 struct et_rxdesc *desc;
2382 struct et_rxbuf *rb;
2383 struct mbuf *m;
2384 bus_dma_segment_t segs[1];
2385 bus_dmamap_t dmap;
2386 int nsegs;
2387
2388 MPASS(buf_idx < ET_RX_NDESC);
2389 MGETHDR(m, M_NOWAIT, MT_DATA);
2390 if (m == NULL)
2391 return (ENOBUFS);
2392 m->m_len = m->m_pkthdr.len = MHLEN;
2393 m_adj(m, ETHER_ALIGN);
2394
2395 sc = rbd->rbd_softc;
2396 rb = &rbd->rbd_buf[buf_idx];
2397
2398 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2399 m, segs, &nsegs, 0) != 0) {
2400 m_freem(m);
2401 return (ENOBUFS);
2402 }
2403 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2404
2405 if (rb->rb_mbuf != NULL) {
2406 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2407 BUS_DMASYNC_POSTREAD);
2408 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2409 }
2410 dmap = rb->rb_dmap;
2411 rb->rb_dmap = sc->sc_rx_mini_sparemap;
2412 sc->sc_rx_mini_sparemap = dmap;
2413 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2414
2415 rb->rb_mbuf = m;
2416 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2417 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2418 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2419 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2420 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2421 BUS_DMASYNC_PREWRITE);
2422 return (0);
2423 }
2424
2425 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2426 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2427 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2428 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2429
2430 /*
2431 * Create sysctl tree
2432 */
2433 static void
2434 et_add_sysctls(struct et_softc * sc)
2435 {
2436 struct sysctl_ctx_list *ctx;
2437 struct sysctl_oid_list *children, *parent;
2438 struct sysctl_oid *tree;
2439 struct et_hw_stats *stats;
2440
2441 ctx = device_get_sysctl_ctx(sc->dev);
2442 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2443
2444 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2445 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2446 et_sysctl_rx_intr_npkts, "I", "RX IM, # packets per RX interrupt");
2447 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2448 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2449 et_sysctl_rx_intr_delay, "I",
2450 "RX IM, RX interrupt delay (x10 usec)");
2451 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2452 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2453 "TX IM, # segments per TX interrupt");
2454 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2455 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2456
2457 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
2458 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ET statistics");
2459 parent = SYSCTL_CHILDREN(tree);
2460
2461 /* TX/RX statistics. */
2462 stats = &sc->sc_stats;
2463 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2464 "0 to 64 bytes frames");
2465 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2466 "65 to 127 bytes frames");
2467 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2468 "128 to 255 bytes frames");
2469 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2470 "256 to 511 bytes frames");
2471 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2472 "512 to 1023 bytes frames");
2473 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2474 "1024 to 1518 bytes frames");
2475 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2476 "1519 to 1522 bytes frames");
2477
2478 /* RX statistics. */
2479 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
2480 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
2481 children = SYSCTL_CHILDREN(tree);
2482 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2483 &stats->rx_bytes, "Good bytes");
2484 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2485 &stats->rx_frames, "Good frames");
2486 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2487 &stats->rx_crcerrs, "CRC errors");
2488 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2489 &stats->rx_mcast, "Multicast frames");
2490 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2491 &stats->rx_bcast, "Broadcast frames");
2492 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2493 &stats->rx_control, "Control frames");
2494 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2495 &stats->rx_pause, "Pause frames");
2496 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2497 &stats->rx_unknown_control, "Unknown control frames");
2498 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2499 &stats->rx_alignerrs, "Alignment errors");
2500 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2501 &stats->rx_lenerrs, "Frames with length mismatched");
2502 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2503 &stats->rx_codeerrs, "Frames with code error");
2504 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2505 &stats->rx_cserrs, "Frames with carrier sense error");
2506 ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2507 &stats->rx_runts, "Too short frames");
2508 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2509 &stats->rx_oversize, "Oversized frames");
2510 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2511 &stats->rx_fragments, "Fragmented frames");
2512 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2513 &stats->rx_jabbers, "Frames with jabber error");
2514 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2515 &stats->rx_drop, "Dropped frames");
2516
2517 /* TX statistics. */
2518 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
2519 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
2520 children = SYSCTL_CHILDREN(tree);
2521 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2522 &stats->tx_bytes, "Good bytes");
2523 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2524 &stats->tx_frames, "Good frames");
2525 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2526 &stats->tx_mcast, "Multicast frames");
2527 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2528 &stats->tx_bcast, "Broadcast frames");
2529 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2530 &stats->tx_pause, "Pause frames");
2531 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2532 &stats->tx_deferred, "Deferred frames");
2533 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2534 &stats->tx_excess_deferred, "Excessively deferred frames");
2535 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2536 &stats->tx_single_colls, "Single collisions");
2537 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2538 &stats->tx_multi_colls, "Multiple collisions");
2539 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2540 &stats->tx_late_colls, "Late collisions");
2541 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2542 &stats->tx_excess_colls, "Excess collisions");
2543 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2544 &stats->tx_total_colls, "Total collisions");
2545 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2546 &stats->tx_pause_honored, "Honored pause frames");
2547 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2548 &stats->tx_drop, "Dropped frames");
2549 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2550 &stats->tx_jabbers, "Frames with jabber errors");
2551 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2552 &stats->tx_crcerrs, "Frames with CRC errors");
2553 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2554 &stats->tx_control, "Control frames");
2555 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2556 &stats->tx_oversize, "Oversized frames");
2557 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2558 &stats->tx_undersize, "Undersized frames");
2559 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2560 &stats->tx_fragments, "Fragmented frames");
2561 }
2562
2563 #undef ET_SYSCTL_STAT_ADD32
2564 #undef ET_SYSCTL_STAT_ADD64
2565
2566 static int
2567 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2568 {
2569 struct et_softc *sc;
2570 struct ifnet *ifp;
2571 int error, v;
2572
2573 sc = arg1;
2574 ifp = sc->ifp;
2575 v = sc->sc_rx_intr_npkts;
2576 error = sysctl_handle_int(oidp, &v, 0, req);
2577 if (error || req->newptr == NULL)
2578 goto back;
2579 if (v <= 0) {
2580 error = EINVAL;
2581 goto back;
2582 }
2583
2584 if (sc->sc_rx_intr_npkts != v) {
2585 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2586 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2587 sc->sc_rx_intr_npkts = v;
2588 }
2589 back:
2590 return (error);
2591 }
2592
2593 static int
2594 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2595 {
2596 struct et_softc *sc;
2597 struct ifnet *ifp;
2598 int error, v;
2599
2600 sc = arg1;
2601 ifp = sc->ifp;
2602 v = sc->sc_rx_intr_delay;
2603 error = sysctl_handle_int(oidp, &v, 0, req);
2604 if (error || req->newptr == NULL)
2605 goto back;
2606 if (v <= 0) {
2607 error = EINVAL;
2608 goto back;
2609 }
2610
2611 if (sc->sc_rx_intr_delay != v) {
2612 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2613 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2614 sc->sc_rx_intr_delay = v;
2615 }
2616 back:
2617 return (error);
2618 }
2619
2620 static void
2621 et_stats_update(struct et_softc *sc)
2622 {
2623 struct et_hw_stats *stats;
2624
2625 stats = &sc->sc_stats;
2626 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2627 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2628 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2629 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2630 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2631 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2632 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2633
2634 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2635 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2636 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2637 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2638 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2639 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2640 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2641 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2642 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2643 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2644 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2645 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2646 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2647 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2648 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2649 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2650 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2651
2652 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2653 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2654 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2655 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2656 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2657 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2658 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2659 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2660 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2661 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2662 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2663 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2664 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2665 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2666 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2667 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2668 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2669 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2670 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2671 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2672 }
2673
2674 static uint64_t
2675 et_get_counter(struct ifnet *ifp, ift_counter cnt)
2676 {
2677 struct et_softc *sc;
2678 struct et_hw_stats *stats;
2679
2680 sc = if_getsoftc(ifp);
2681 stats = &sc->sc_stats;
2682
2683 switch (cnt) {
2684 case IFCOUNTER_OPACKETS:
2685 return (stats->tx_frames);
2686 case IFCOUNTER_COLLISIONS:
2687 return (stats->tx_total_colls);
2688 case IFCOUNTER_OERRORS:
2689 return (stats->tx_drop + stats->tx_jabbers +
2690 stats->tx_crcerrs + stats->tx_excess_deferred +
2691 stats->tx_late_colls);
2692 case IFCOUNTER_IPACKETS:
2693 return (stats->rx_frames);
2694 case IFCOUNTER_IERRORS:
2695 return (stats->rx_crcerrs + stats->rx_alignerrs +
2696 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2697 stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2698 default:
2699 return (if_get_counter_default(ifp, cnt));
2700 }
2701 }
2702
2703 static int
2704 et_suspend(device_t dev)
2705 {
2706 struct et_softc *sc;
2707 uint32_t pmcfg;
2708
2709 sc = device_get_softc(dev);
2710 ET_LOCK(sc);
2711 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2712 et_stop(sc);
2713 /* Diable all clocks and put PHY into COMA. */
2714 pmcfg = CSR_READ_4(sc, ET_PM);
2715 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2716 ET_PM_RXCLK_GATE);
2717 pmcfg |= ET_PM_PHY_SW_COMA;
2718 CSR_WRITE_4(sc, ET_PM, pmcfg);
2719 ET_UNLOCK(sc);
2720 return (0);
2721 }
2722
2723 static int
2724 et_resume(device_t dev)
2725 {
2726 struct et_softc *sc;
2727 uint32_t pmcfg;
2728
2729 sc = device_get_softc(dev);
2730 ET_LOCK(sc);
2731 /* Take PHY out of COMA and enable clocks. */
2732 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2733 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2734 pmcfg |= EM_PM_GIGEPHY_ENB;
2735 CSR_WRITE_4(sc, ET_PM, pmcfg);
2736 if ((sc->ifp->if_flags & IFF_UP) != 0)
2737 et_init_locked(sc);
2738 ET_UNLOCK(sc);
2739 return (0);
2740 }
Cache object: e8cf000fa3e919a79c8b16c7711ec7bc
|