FreeBSD/Linux Kernel Cross Reference
sys/pci/if_wx.c
1 /* $FreeBSD$ */
2 /*
3 * Principal Author: Matthew Jacob <mjacob@feral.com>
4 * Copyright (c) 1999, 2001 by Traakan Software
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Additional Copyright (c) 2001 by Parag Patel
30 * under same licence for MII PHY code.
31 */
32
33 /*
34 * Intel Gigabit Ethernet (82452/82453) Driver.
35 * Inspired by fxp driver by David Greenman for FreeBSD, and by
36 * Bill Paul's work in other FreeBSD network drivers.
37 */
38
39 /*
40 * Many bug fixes gratefully acknowledged from:
41 *
42 * The folks at Sitara Networks
43 */
44
45 /*
46 * Options
47 */
48
49 /*
50 * Use only every other 16 byte receive descriptor, leaving the ones
51 * in between empty. This card is most efficient at reading/writing
52 * 32 byte cache lines, so avoid all the (not working for early rev
53 * cards) MWI and/or READ/MODIFY/WRITE cycles updating one descriptor
54 * would have you do.
55 *
56 * This isn't debugged yet.
57 */
58 /* #define PADDED_CELL 1 */
59
60 /*
61 * Since the includes are a mess, they'll all be in if_wxvar.h
62 */
63
64 #include <pci/if_wxvar.h>
65
66 #ifdef __alpha__
67 #undef vtophys
68 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
69 #endif /* __alpha__ */
70
71 /*
72 * Function Prototpes, yadda yadda...
73 */
74
75 static int wx_intr(void *);
76 static void wx_handle_link_intr(wx_softc_t *);
77 static void wx_check_link(wx_softc_t *);
78 static void wx_handle_rxint(wx_softc_t *);
79 static void wx_gc(wx_softc_t *);
80 static void wx_start(struct ifnet *);
81 static int wx_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
82 static int wx_ifmedia_upd(struct ifnet *);
83 static void wx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
84 static int wx_init(void *);
85 static void wx_hw_stop(wx_softc_t *);
86 static void wx_set_addr(wx_softc_t *, int, u_int8_t *);
87 static int wx_hw_initialize(wx_softc_t *);
88 static void wx_stop(wx_softc_t *);
89 static void wx_txwatchdog(struct ifnet *);
90 static int wx_get_rbuf(wx_softc_t *, rxpkt_t *);
91 static void wx_rxdma_map(wx_softc_t *, rxpkt_t *, struct mbuf *);
92
93 static INLINE void wx_eeprom_raise_clk(wx_softc_t *, u_int32_t);
94 static INLINE void wx_eeprom_lower_clk(wx_softc_t *, u_int32_t);
95 static INLINE void wx_eeprom_sobits(wx_softc_t *, u_int16_t, u_int16_t);
96 static INLINE u_int16_t wx_eeprom_sibits(wx_softc_t *);
97 static INLINE void wx_eeprom_cleanup(wx_softc_t *);
98 static INLINE u_int16_t wx_read_eeprom_word(wx_softc_t *, int);
99 static void wx_read_eeprom(wx_softc_t *, u_int16_t *, int, int);
100
101 static int wx_attach_common(wx_softc_t *);
102 static void wx_watchdog(void *);
103
104 static INLINE void wx_mwi_whackon(wx_softc_t *);
105 static INLINE void wx_mwi_unwhack(wx_softc_t *);
106 static int wx_dring_setup(wx_softc_t *);
107 static void wx_dring_teardown(wx_softc_t *);
108
109 static int wx_attach_phy(wx_softc_t *);
110 static int wx_miibus_readreg(void *, int, int);
111 static int wx_miibus_writereg(void *, int, int, int);
112 static void wx_miibus_statchg(void *);
113 static void wx_miibus_mediainit(void *);
114
115 static u_int32_t wx_mii_shift_in(wx_softc_t *);
116 static void wx_mii_shift_out(wx_softc_t *, u_int32_t, u_int32_t);
117
118 #define WX_DISABLE_INT(sc) WRITE_CSR(sc, WXREG_IMCLR, WXDISABLE)
119 #define WX_ENABLE_INT(sc) WRITE_CSR(sc, WXREG_IMASK, sc->wx_ienable)
120
121 /*
122 * Until we do a bit more work, we can get no bigger than MCLBYTES
123 */
124 #if 0
125 #define WX_MAXMTU (WX_MAX_PKT_SIZE_JUMBO - sizeof (struct ether_header))
126 #else
127 #define WX_MAXMTU (MCLBYTES - sizeof (struct ether_header))
128 #endif
129
130 #define DPRINTF(sc, x) if (sc->wx_debug) printf x
131 #define IPRINTF(sc, x) if (sc->wx_verbose) printf x
132
133 static const char ldn[] = "%s: link down\n";
134 static const char lup[] = "%s: link up\n";
135 static const char sqe[] = "%s: receive sequence error\n";
136 static const char ane[] = "%s: /C/ ordered sets seen- enabling ANE\n";
137 static const char inane[] = "%s: no /C/ ordered sets seen- disabling ANE\n";
138
139 static int wx_txint_delay = 5000; /* ~5ms */
140 TUNABLE_INT("hw.wx.txint_delay", &wx_txint_delay);
141
142 SYSCTL_NODE(_hw, OID_AUTO, wx, CTLFLAG_RD, 0, "WX driver parameters");
143 SYSCTL_INT(_hw_wx, OID_AUTO, txint_delay, CTLFLAG_RW,
144 &wx_txint_delay, 0, "");
145 static int wx_dump_stats = -1;
146 SYSCTL_INT(_hw_wx, OID_AUTO, dump_stats, CTLFLAG_RW,
147 &wx_dump_stats, 0, "");
148 static int wx_clr_stats = -1;
149 SYSCTL_INT(_hw_wx, OID_AUTO, clear_stats, CTLFLAG_RW,
150 &wx_clr_stats, 0, "");
151
152
153 /*
154 * Program multicast addresses.
155 *
156 * This function must be called at splimp, but it may sleep.
157 */
158 static int
159 wx_mc_setup(wx_softc_t *sc)
160 {
161 struct ifnet *ifp = &sc->wx_if;
162 struct ifmultiaddr *ifma;
163
164 /*
165 * XXX: drain TX queue
166 */
167 if (sc->tactive) {
168 return (EBUSY);
169 }
170
171 wx_stop(sc);
172
173 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
174 sc->all_mcasts = 1;
175 return (wx_init(sc));
176 }
177
178 sc->wx_nmca = 0;
179 for (ifma = ifp->if_multiaddrs.lh_first, sc->wx_nmca = 0;
180 ifma != NULL; ifma = ifma->ifma_link.le_next) {
181
182 if (ifma->ifma_addr->sa_family != AF_LINK) {
183 continue;
184 }
185 if (sc->wx_nmca >= WX_RAL_TAB_SIZE-1) {
186 sc->wx_nmca = 0;
187 sc->all_mcasts = 1;
188 break;
189 }
190 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
191 (void *) &sc->wx_mcaddr[sc->wx_nmca++][0], 6);
192 }
193 return (wx_init(sc));
194 }
195
196 /*
197 * Return identification string if this is device is ours.
198 */
199 static int
200 wx_probe(device_t dev)
201 {
202 if (pci_get_vendor(dev) != WX_VENDOR_INTEL) {
203 return (ENXIO);
204 }
205 switch (pci_get_device(dev)) {
206 case WX_PRODUCT_82452:
207 device_set_desc(dev, "Intel PRO/1000 Gigabit (WISEMAN)");
208 break;
209 case WX_PRODUCT_LIVENGOOD:
210 device_set_desc(dev, "Intel PRO/1000 (LIVENGOOD)");
211 break;
212 case WX_PRODUCT_82452_SC:
213 device_set_desc(dev, "Intel PRO/1000 F Gigabit Ethernet");
214 break;
215 case WX_PRODUCT_82543:
216 device_set_desc(dev, "Intel PRO/1000 T Gigabit Ethernet");
217 break;
218 default:
219 return (ENXIO);
220 }
221 return (0);
222 }
223
224 static int
225 wx_attach(device_t dev)
226 {
227 int error = 0;
228 wx_softc_t *sc = device_get_softc(dev);
229 struct ifnet *ifp;
230 u_int32_t val;
231 int rid;
232
233 bzero(sc, sizeof (wx_softc_t));
234
235 callout_handle_init(&sc->w.sch);
236 sc->w.dev = dev;
237
238 if (bootverbose)
239 sc->wx_verbose = 1;
240
241 if (getenv_int ("wx_debug", &rid)) {
242 if (rid & (1 << device_get_unit(dev))) {
243 sc->wx_debug = 1;
244 }
245 }
246
247 if (getenv_int("wx_no_ilos", &rid)) {
248 if (rid & (1 << device_get_unit(dev))) {
249 sc->wx_no_ilos = 1;
250 }
251 }
252
253 if (getenv_int("wx_ilos", &rid)) {
254 if (rid & (1 << device_get_unit(dev))) {
255 sc->wx_ilos = 1;
256 }
257 }
258
259 if (getenv_int("wx_no_flow", &rid)) {
260 if (rid & (1 << device_get_unit(dev))) {
261 sc->wx_no_flow = 1;
262 }
263 }
264
265 #ifdef SMPNG
266 mtx_init(&sc->wx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
267 #endif
268 WX_LOCK(sc);
269 /*
270 * get revision && id...
271 */
272 sc->wx_idnrev = (pci_get_device(dev) << 16) | (pci_get_revid(dev));
273
274 /*
275 * Enable bus mastering, make sure that the cache line size is right.
276 */
277 pci_enable_busmaster(dev);
278 pci_enable_io(dev, SYS_RES_MEMORY);
279 val = pci_read_config(dev, PCIR_COMMAND, 4);
280 if ((val & PCIM_CMD_MEMEN) == 0) {
281 device_printf(dev, "failed to enable memory mapping\n");
282 error = ENXIO;
283 goto out;
284 }
285
286 /*
287 * Let the BIOS do it's job- but check for sanity.
288 */
289 val = pci_read_config(dev, PCIR_CACHELNSZ, 1);
290 if (val < 4 || val > 32) {
291 pci_write_config(dev, PCIR_CACHELNSZ, 8, 1);
292 }
293
294 /*
295 * Map control/status registers.
296 */
297 rid = WX_MMBA;
298 sc->w.mem = bus_alloc_resource(dev, SYS_RES_MEMORY,
299 &rid, 0, ~0, 1, RF_ACTIVE);
300 if (!sc->w.mem) {
301 device_printf(dev, "could not map memory\n");
302 error = ENXIO;
303 goto out;
304 }
305 sc->w.st = rman_get_bustag(sc->w.mem);
306 sc->w.sh = rman_get_bushandle(sc->w.mem);
307
308 rid = 0;
309 sc->w.irq = bus_alloc_resource(dev, SYS_RES_IRQ,
310 &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
311 if (sc->w.irq == NULL) {
312 device_printf(dev, "could not map interrupt\n");
313 error = ENXIO;
314 goto out;
315 }
316 error = bus_setup_intr(dev, sc->w.irq, INTR_TYPE_NET,
317 (void (*)(void *))wx_intr, sc, &sc->w.ih);
318 if (error) {
319 device_printf(dev, "could not setup irq\n");
320 goto out;
321 }
322 (void) snprintf(sc->wx_name, sizeof (sc->wx_name) - 1, "wx%d",
323 device_get_unit(dev));
324 if (wx_attach_common(sc)) {
325 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
326 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
327 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
328 error = ENXIO;
329 goto out;
330 }
331 device_printf(dev, "Ethernet address %02x:%02x:%02x:%02x:%02x:%02x\n",
332 sc->w.arpcom.ac_enaddr[0], sc->w.arpcom.ac_enaddr[1],
333 sc->w.arpcom.ac_enaddr[2], sc->w.arpcom.ac_enaddr[3],
334 sc->w.arpcom.ac_enaddr[4], sc->w.arpcom.ac_enaddr[5]);
335
336 ifp = &sc->w.arpcom.ac_if;
337 ifp->if_unit = device_get_unit(dev);
338 ifp->if_name = "wx";
339 ifp->if_mtu = ETHERMTU; /* we always start at ETHERMTU size */
340 ifp->if_output = ether_output;
341 ifp->if_baudrate = 1000000000;
342 ifp->if_init = (void (*)(void *))wx_init;
343 ifp->if_softc = sc;
344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
345 ifp->if_ioctl = wx_ioctl;
346 ifp->if_start = wx_start;
347 ifp->if_watchdog = wx_txwatchdog;
348 ifp->if_snd.ifq_maxlen = WX_MAX_TDESC - 1;
349 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
350 out:
351 WX_UNLOCK(sc);
352 return (error);
353 }
354
355 static int
356 wx_attach_phy(wx_softc_t *sc)
357 {
358 if (mii_phy_probe(sc->w.dev, &sc->w.miibus, wx_ifmedia_upd,
359 wx_ifmedia_sts)) {
360 printf("%s: no PHY probed!\n", sc->wx_name);
361 return (-1);
362 }
363 sc->wx_mii = 1;
364 return 0;
365 }
366
367 static int
368 wx_detach(device_t dev)
369 {
370 wx_softc_t *sc = device_get_softc(dev);
371
372 WX_LOCK(sc);
373 wx_stop(sc);
374
375 ether_ifdetach(&sc->w.arpcom.ac_if, ETHER_BPF_SUPPORTED);
376 if (sc->w.miibus) {
377 bus_generic_detach(dev);
378 device_delete_child(dev, sc->w.miibus);
379 } else {
380 ifmedia_removeall(&sc->wx_media);
381 }
382 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
383 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
384 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
385
386 wx_dring_teardown(sc);
387 if (sc->rbase) {
388 WXFREE(sc->rbase);
389 sc->rbase = NULL;
390 }
391 if (sc->tbase) {
392 WXFREE(sc->tbase);
393 sc->tbase = NULL;
394 }
395 WX_UNLOCK(sc);
396 #ifdef SMPNG
397 mtx_destroy(&sc->wx_mtx);
398 #endif
399 return (0);
400 }
401
402 static int
403 wx_shutdown(device_t dev)
404 {
405 wx_hw_stop((wx_softc_t *) device_get_softc(dev));
406 return (0);
407 }
408
409 static INLINE void
410 wx_mwi_whackon(wx_softc_t *sc)
411 {
412 sc->wx_cmdw = pci_read_config(sc->w.dev, PCIR_COMMAND, 2);
413 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw & ~MWI, 2);
414 }
415
416 static INLINE void
417 wx_mwi_unwhack(wx_softc_t *sc)
418 {
419 if (sc->wx_cmdw & MWI) {
420 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw, 2);
421 }
422 }
423
424 static int
425 wx_dring_setup(wx_softc_t *sc)
426 {
427 size_t len;
428
429 len = sizeof (wxrd_t) * WX_MAX_RDESC;
430 sc->rdescriptors = (wxrd_t *)
431 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
432 if (sc->rdescriptors == NULL) {
433 printf("%s: could not allocate rcv descriptors\n", sc->wx_name);
434 return (-1);
435 }
436 if (((intptr_t)sc->rdescriptors) & 0xfff) {
437 contigfree(sc->rdescriptors, len, M_DEVBUF);
438 sc->rdescriptors = NULL;
439 printf("%s: rcv descriptors not 4KB aligned\n", sc->wx_name);
440 return (-1);
441 }
442 bzero(sc->rdescriptors, len);
443
444 len = sizeof (wxtd_t) * WX_MAX_TDESC;
445 sc->tdescriptors = (wxtd_t *)
446 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
447 if (sc->tdescriptors == NULL) {
448 contigfree(sc->rdescriptors,
449 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
450 sc->rdescriptors = NULL;
451 printf("%s: could not allocate xmt descriptors\n", sc->wx_name);
452 return (-1);
453 }
454 if (((intptr_t)sc->tdescriptors) & 0xfff) {
455 contigfree(sc->rdescriptors,
456 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
457 contigfree(sc->tdescriptors, len, M_DEVBUF);
458 sc->rdescriptors = NULL;
459 sc->tdescriptors = NULL;
460 printf("%s: xmt descriptors not 4KB aligned\n", sc->wx_name);
461 return (-1);
462 }
463 bzero(sc->tdescriptors, len);
464 return (0);
465 }
466
467 static void
468 wx_dring_teardown(wx_softc_t *sc)
469 {
470 if (sc->rdescriptors) {
471 contigfree(sc->rdescriptors,
472 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
473 sc->rdescriptors = NULL;
474 }
475 if (sc->tdescriptors) {
476 contigfree(sc->tdescriptors,
477 sizeof (wxtd_t) * WX_MAX_TDESC, M_DEVBUF);
478 sc->tdescriptors = NULL;
479 }
480 }
481
482 static device_method_t wx_methods[] = {
483 /* Device interface */
484 DEVMETHOD(device_probe, wx_probe),
485 DEVMETHOD(device_attach, wx_attach),
486 DEVMETHOD(device_detach, wx_detach),
487 DEVMETHOD(device_shutdown, wx_shutdown),
488
489 /* bus interface */
490 DEVMETHOD(bus_print_child, bus_generic_print_child),
491 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
492
493 /* MII interface */
494 DEVMETHOD(miibus_readreg, wx_miibus_readreg),
495 DEVMETHOD(miibus_writereg, wx_miibus_writereg),
496 DEVMETHOD(miibus_statchg, wx_miibus_statchg),
497 DEVMETHOD(miibus_mediainit, wx_miibus_mediainit),
498
499 { 0, 0 }
500 };
501
502 static driver_t wx_driver = {
503 "wx", wx_methods, sizeof(wx_softc_t),
504 };
505 static devclass_t wx_devclass;
506 DRIVER_MODULE(if_wx, pci, wx_driver, wx_devclass, 0, 0);
507 DRIVER_MODULE(miibus, wx, miibus_driver, miibus_devclass, 0, 0);
508
509 /*
510 * Do generic parts of attach. Our registers have been mapped
511 * and our interrupt registered.
512 */
513 static int
514 wx_attach_common(wx_softc_t *sc)
515 {
516 size_t len;
517 u_int32_t tmp;
518 int ll = 0;
519
520 /*
521 * First, check for revision support.
522 */
523 if (sc->wx_idnrev < WX_WISEMAN_2_0) {
524 printf("%s: cannot support ID 0x%x, revision %d chips\n",
525 sc->wx_name, sc->wx_idnrev >> 16, sc->wx_idnrev & 0xffff);
526 return (ENXIO);
527 }
528
529 /*
530 * Second, reset the chip.
531 */
532 wx_hw_stop(sc);
533
534 /*
535 * Third, validate our EEPROM.
536 */
537
538 /* TBD */
539
540 /*
541 * Fourth, read eeprom for our MAC address and other things.
542 */
543 wx_read_eeprom(sc, (u_int16_t *)sc->wx_enaddr, WX_EEPROM_MAC_OFF, 3);
544
545 /*
546 * Fifth, establish some adapter parameters.
547 */
548 sc->wx_dcr = 0;
549
550 if (IS_LIVENGOOD_CU(sc)) {
551
552 /* settings to talk to PHY */
553 sc->wx_dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
554 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
555
556 /*
557 * Raise the PHY's reset line to make it operational.
558 */
559 tmp = READ_CSR(sc, WXREG_EXCT);
560 tmp |= WXPHY_RESET_DIR4;
561 WRITE_CSR(sc, WXREG_EXCT, tmp);
562 DELAY(20*1000);
563
564 tmp = READ_CSR(sc, WXREG_EXCT);
565 tmp &= ~WXPHY_RESET4;
566 WRITE_CSR(sc, WXREG_EXCT, tmp);
567 DELAY(20*1000);
568
569 tmp = READ_CSR(sc, WXREG_EXCT);
570 tmp |= WXPHY_RESET4;
571 WRITE_CSR(sc, WXREG_EXCT, tmp);
572 DELAY(20*1000);
573
574 if (wx_attach_phy(sc)) {
575 goto fail;
576 }
577 } else {
578 ifmedia_init(&sc->wx_media, IFM_IMASK,
579 wx_ifmedia_upd, wx_ifmedia_sts);
580
581 ifmedia_add(&sc->wx_media, IFM_ETHER|IFM_1000_SX, 0, NULL);
582 ifmedia_add(&sc->wx_media,
583 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
584 ifmedia_set(&sc->wx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX);
585
586 sc->wx_media.ifm_media = sc->wx_media.ifm_cur->ifm_media;
587 }
588
589 /*
590 * Sixth, establish a default device control register word.
591 */
592 ll += 1;
593 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_FD)
594 sc->wx_dcr |= WXDCR_FD;
595 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_ILOS)
596 sc->wx_dcr |= WXDCR_ILOS;
597
598 tmp = (sc->wx_cfg1 >> WX_EEPROM_CTLR1_SWDPIO_SHIFT) & WXDCR_SWDPIO_MASK;
599 sc->wx_dcr |= (tmp << WXDCR_SWDPIO_SHIFT);
600
601 if (sc->wx_no_ilos)
602 sc->wx_dcr &= ~WXDCR_ILOS;
603 if (sc->wx_ilos)
604 sc->wx_dcr |= WXDCR_ILOS;
605 if (sc->wx_no_flow == 0)
606 sc->wx_dcr |= WXDCR_RFCE | WXDCR_TFCE;
607
608 /*
609 * Seventh, allocate various sw structures...
610 */
611 len = sizeof (rxpkt_t) * WX_MAX_RDESC;
612 sc->rbase = (rxpkt_t *) WXMALLOC(len);
613 if (sc->rbase == NULL) {
614 goto fail;
615 }
616 bzero(sc->rbase, len);
617 ll += 1;
618
619 len = sizeof (txpkt_t) * WX_MAX_TDESC;
620 sc->tbase = (txpkt_t *) WXMALLOC(len);
621 if (sc->tbase == NULL) {
622 goto fail;
623 }
624 bzero(sc->tbase, len);
625 ll += 1;
626
627 /*
628 * Eighth, allocate and dma map (platform dependent) descriptor rings.
629 * They have to be aligned on a 4KB boundary.
630 */
631 if (wx_dring_setup(sc) == 0) {
632 return (0);
633 }
634
635 fail:
636 printf("%s: failed to do common attach (%d)\n", sc->wx_name, ll);
637 wx_dring_teardown(sc);
638 if (sc->rbase) {
639 WXFREE(sc->rbase);
640 sc->rbase = NULL;
641 }
642 if (sc->tbase) {
643 WXFREE(sc->tbase);
644 sc->tbase = NULL;
645 }
646 return (ENOMEM);
647 }
648
649 /*
650 * EEPROM functions.
651 */
652
653 static INLINE void
654 wx_eeprom_raise_clk(wx_softc_t *sc, u_int32_t regval)
655 {
656 WRITE_CSR(sc, WXREG_EECDR, regval | WXEECD_SK);
657 DELAY(50);
658 }
659
660 static INLINE void
661 wx_eeprom_lower_clk(wx_softc_t *sc, u_int32_t regval)
662 {
663 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_SK);
664 DELAY(50);
665 }
666
667 static INLINE void
668 wx_eeprom_sobits(wx_softc_t *sc, u_int16_t data, u_int16_t count)
669 {
670 u_int32_t regval, mask;
671
672 mask = 1 << (count - 1);
673 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
674
675 do {
676 if (data & mask)
677 regval |= WXEECD_DI;
678 else
679 regval &= ~WXEECD_DI;
680 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
681 wx_eeprom_raise_clk(sc, regval);
682 wx_eeprom_lower_clk(sc, regval);
683 mask >>= 1;
684 } while (mask != 0);
685 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_DI);
686 }
687
688 static INLINE u_int16_t
689 wx_eeprom_sibits(wx_softc_t *sc)
690 {
691 unsigned int regval, i;
692 u_int16_t data;
693
694 data = 0;
695 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
696 for (i = 0; i != 16; i++) {
697 data <<= 1;
698 wx_eeprom_raise_clk(sc, regval);
699 regval = READ_CSR(sc, WXREG_EECDR) & ~WXEECD_DI;
700 if (regval & WXEECD_DO) {
701 data |= 1;
702 }
703 wx_eeprom_lower_clk(sc, regval);
704 }
705 return (data);
706 }
707
708 static INLINE void
709 wx_eeprom_cleanup(wx_softc_t *sc)
710 {
711 u_int32_t regval;
712 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_CS);
713 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
714 wx_eeprom_raise_clk(sc, regval);
715 wx_eeprom_lower_clk(sc, regval);
716 }
717
718 static u_int16_t INLINE
719 wx_read_eeprom_word(wx_softc_t *sc, int offset)
720 {
721 u_int16_t data;
722 WRITE_CSR(sc, WXREG_EECDR, WXEECD_CS);
723 wx_eeprom_sobits(sc, EEPROM_READ_OPCODE, 3);
724 wx_eeprom_sobits(sc, offset, 6);
725 data = wx_eeprom_sibits(sc);
726 wx_eeprom_cleanup(sc);
727 return (data);
728 }
729
730 static void
731 wx_read_eeprom(wx_softc_t *sc, u_int16_t *data, int offset, int words)
732 {
733 int i;
734 for (i = 0; i < words; i++) {
735 *data++ = wx_read_eeprom_word(sc, offset++);
736 }
737 sc->wx_cfg1 = wx_read_eeprom_word(sc, WX_EEPROM_CTLR1_OFF);
738 }
739
740 /*
741 * Start packet transmission on the interface.
742 */
743
744 static void
745 wx_start(struct ifnet *ifp)
746 {
747 wx_softc_t *sc = SOFTC_IFP(ifp);
748 u_int16_t widx = WX_MAX_TDESC, cidx, nactv;
749
750 WX_LOCK(sc);
751 DPRINTF(sc, ("%s: wx_start\n", sc->wx_name));
752 nactv = sc->tactive;
753 while (nactv < WX_MAX_TDESC - 1) {
754 int ndesc, plen;
755 int gctried = 0;
756 struct mbuf *m, *mb_head;
757
758 IF_DEQUEUE(&ifp->if_snd, mb_head);
759 if (mb_head == NULL) {
760 break;
761 }
762 sc->wx_xmitwanted++;
763
764 /*
765 * If we have a packet less than ethermin, pad it out.
766 */
767 if (mb_head->m_pkthdr.len < WX_MIN_RPKT_SIZE) {
768 if (mb_head->m_next == NULL) {
769 mb_head->m_len = WX_MIN_RPKT_SIZE;
770 } else {
771 MGETHDR(m, M_DONTWAIT, MT_DATA);
772 if (m == NULL) {
773 m_freem(mb_head);
774 break;
775 }
776 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
777 mtod(m, caddr_t));
778 m->m_pkthdr.len = m->m_len = WX_MIN_RPKT_SIZE;
779 bzero(mtod(m, char *) + mb_head->m_pkthdr.len,
780 WX_MIN_RPKT_SIZE - mb_head->m_pkthdr.len);
781 sc->wx_xmitpullup++;
782 m_freem(mb_head);
783 mb_head = m;
784 }
785 }
786 again:
787 cidx = sc->tnxtfree;
788 nactv = sc->tactive;
789
790
791 /*
792 * Go through each of the mbufs in the chain and initialize
793 * the transmit buffer descriptors with the physical address
794 * and size of that mbuf. If we have a length less than our
795 * minimum transmit size, we bail (to do a pullup). If we run
796 * out of descriptors, we also bail and try and do a pullup.
797 */
798 for (plen = ndesc = 0, m = mb_head; m != NULL; m = m->m_next) {
799 vm_offset_t vptr;
800 wxtd_t *td;
801
802 /*
803 * If this mbuf has no data, skip it.
804 */
805 if (m->m_len == 0) {
806 continue;
807 }
808
809 /*
810 * This appears to be a bogus check the PRO1000T.
811 * I think they meant that the minimum packet size
812 * is in fact WX_MIN_XPKT_SIZE (all data loaded)
813 */
814 #if 0
815 /*
816 * If this mbuf is too small for the chip's minimum,
817 * break out to cluster it.
818 */
819 if (m->m_len < WX_MIN_XPKT_SIZE) {
820 sc->wx_xmitrunt++;
821 break;
822 }
823 #endif
824
825 /*
826 * Do we have a descriptor available for this mbuf?
827 */
828 if (++nactv == WX_MAX_TDESC) {
829 if (gctried++ == 0) {
830 sc->wx_xmitgc++;
831 wx_gc(sc);
832 goto again;
833 }
834 break;
835 }
836 sc->tbase[cidx].dptr = m;
837 td = &sc->tdescriptors[cidx];
838 td->length = m->m_len;
839 plen += m->m_len;
840
841 vptr = mtod(m, vm_offset_t);
842 td->address.highpart = 0;
843 td->address.lowpart = vtophys(vptr);
844
845 td->cso = 0;
846 td->status = 0;
847 td->special = 0;
848 td->cmd = 0;
849 td->css = 0;
850
851 if (sc->wx_debug) {
852 printf("%s: XMIT[%d] %p vptr %lx (length %d "
853 "DMA addr %x) idx %d\n", sc->wx_name,
854 ndesc, m, (long) vptr, td->length,
855 td->address.lowpart, cidx);
856 }
857 ndesc++;
858 cidx = T_NXT_IDX(cidx);
859 }
860
861 /*
862 * If we get here and m is NULL, we can send
863 * the the packet chain described by mb_head.
864 */
865 if (m == NULL) {
866 /*
867 * Mark the last descriptor with EOP and tell the
868 * chip to insert a final checksum.
869 */
870 wxtd_t *td = &sc->tdescriptors[T_PREV_IDX(cidx)];
871 td->cmd = TXCMD_EOP|TXCMD_IFCS;
872 /*
873 * Set up a delayed interrupt when this packet
874 * is sent and the descriptor written back.
875 * Additional packets completing will cause
876 * interrupt to be delayed further. Therefore,
877 * after the *last* packet is sent, after the delay
878 * period in TIDV, an interrupt will be generated
879 * which will cause us to garbage collect.
880 */
881 td->cmd |= TXCMD_IDE|TXCMD_RPS;
882
883 /*
884 * Don't xmit odd length packets.
885 * We're okay with bumping things
886 * up as long as our mbuf allocation
887 * is always larger than our MTU
888 * by a comfortable amount.
889 *
890 * Yes, it's a hole to run past the end
891 * of a packet.
892 */
893 if (plen & 0x1) {
894 sc->wx_oddpkt++;
895 td->length++;
896 }
897
898 sc->tbase[sc->tnxtfree].sidx = sc->tnxtfree;
899 sc->tbase[sc->tnxtfree].eidx = cidx;
900 sc->tbase[sc->tnxtfree].next = NULL;
901 if (sc->tbsyf) {
902 sc->tbsyl->next = &sc->tbase[sc->tnxtfree];
903 } else {
904 sc->tbsyf = &sc->tbase[sc->tnxtfree];
905 }
906 sc->tbsyl = &sc->tbase[sc->tnxtfree];
907 sc->tnxtfree = cidx;
908 sc->tactive = nactv;
909 ifp->if_timer = 10;
910 if (ifp->if_bpf)
911 bpf_mtap(WX_BPFTAP_ARG(ifp), mb_head);
912 /* defer xmit until we've got them all */
913 widx = cidx;
914 continue;
915 }
916
917 /*
918 * Otherwise, we couldn't send this packet for some reason.
919 *
920 * If don't have a descriptor available, and this is a
921 * single mbuf packet, freeze output so that later we
922 * can restart when we have more room. Otherwise, we'll
923 * try and cluster the request. We've already tried to
924 * garbage collect completed descriptors.
925 */
926 if (nactv == WX_MAX_TDESC && mb_head->m_next == NULL) {
927 sc->wx_xmitputback++;
928 ifp->if_flags |= IFF_OACTIVE;
929 IF_PREPEND(&ifp->if_snd, mb_head);
930 break;
931 }
932
933 /*
934 * Otherwise, it's either a fragment length somewhere in the
935 * chain that isn't at least WX_MIN_XPKT_SIZE in length or
936 * the number of fragments exceeds the number of descriptors
937 * available.
938 *
939 * We could try a variety of strategies here- if this is
940 * a length problem for single mbuf packet or a length problem
941 * for the last mbuf in a chain (we could just try and adjust
942 * it), but it's just simpler to try and cluster it.
943 */
944 MGETHDR(m, M_DONTWAIT, MT_DATA);
945 if (m == NULL) {
946 m_freem(mb_head);
947 break;
948 }
949 MCLGET(m, M_DONTWAIT);
950 if ((m->m_flags & M_EXT) == 0) {
951 m_freem(m);
952 m_freem(mb_head);
953 break;
954 }
955 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(m, caddr_t));
956 m->m_pkthdr.len = m->m_len = mb_head->m_pkthdr.len;
957 m_freem(mb_head);
958 mb_head = m;
959 sc->wx_xmitcluster++;
960 goto again;
961 }
962
963 if (widx < WX_MAX_TDESC) {
964 if (IS_WISEMAN(sc)) {
965 WRITE_CSR(sc, WXREG_TDT, widx);
966 } else {
967 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, widx);
968 }
969 }
970
971 if (sc->tactive == WX_MAX_TDESC - 1) {
972 sc->wx_xmitgc++;
973 wx_gc(sc);
974 if (sc->tactive >= WX_MAX_TDESC - 1) {
975 sc->wx_xmitblocked++;
976 ifp->if_flags |= IFF_OACTIVE;
977 }
978 }
979
980 /* used SW LED to indicate transmission active */
981 if (sc->tactive > 0 && sc->wx_mii) {
982 WRITE_CSR(sc, WXREG_DCR,
983 READ_CSR(sc, WXREG_DCR) | (WXDCR_SWDPIO0|WXDCR_SWDPIN0));
984 }
985 WX_UNLOCK(sc);
986 }
987
988 /*
989 * Process interface interrupts.
990 */
991 static int
992 wx_intr(void *arg)
993 {
994 wx_softc_t *sc = arg;
995 int claimed = 0;
996
997 WX_ILOCK(sc);
998 /*
999 * Read interrupt cause register. Reading it clears bits.
1000 */
1001 sc->wx_icr = READ_CSR(sc, WXREG_ICR);
1002 if (sc->wx_icr) {
1003 claimed++;
1004 WX_DISABLE_INT(sc);
1005 sc->wx_intr++;
1006 if (sc->wx_icr & (WXISR_LSC|WXISR_RXSEQ|WXISR_GPI_EN1)) {
1007 sc->wx_linkintr++;
1008 wx_handle_link_intr(sc);
1009 }
1010 wx_handle_rxint(sc);
1011 if (sc->wx_icr & WXISR_TXDW) {
1012 sc->wx_txqe++;
1013 wx_gc(sc);
1014 }
1015 #if 0
1016 if (sc->wx_icr & WXISR_TXQE) {
1017 sc->wx_txqe++;
1018 wx_gc(sc);
1019 }
1020 #endif
1021 if (sc->wx_if.if_snd.ifq_head != NULL) {
1022 wx_start(&sc->wx_if);
1023 }
1024 WX_ENABLE_INT(sc);
1025 }
1026 WX_IUNLK(sc);
1027 return (claimed);
1028 }
1029
1030 static void
1031 wx_handle_link_intr(wx_softc_t *sc)
1032 {
1033 u_int32_t txcw, rxcw, dcr, dsr;
1034
1035
1036 dcr = READ_CSR(sc, WXREG_DCR);
1037 DPRINTF(sc, ("%s: handle_link_intr: icr=%#x dcr=%#x\n",
1038 sc->wx_name, sc->wx_icr, dcr));
1039 if (sc->wx_mii) {
1040 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1041 mii_pollstat(mii);
1042 if (mii->mii_media_status & IFM_ACTIVE) {
1043 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
1044 IPRINTF(sc, (ldn, sc->wx_name));
1045 sc->linkup = 0;
1046 } else {
1047 IPRINTF(sc, (lup, sc->wx_name));
1048 sc->linkup = 1;
1049 }
1050 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1051 } else if (sc->wx_icr & WXISR_RXSEQ) {
1052 DPRINTF(sc, (sqe, sc->wx_name));
1053 }
1054 return;
1055 }
1056
1057 txcw = READ_CSR(sc, WXREG_XMIT_CFGW);
1058 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1059 dsr = READ_CSR(sc, WXREG_DSR);
1060
1061 /*
1062 * If we have LOS or are now receiving Ordered Sets and are not
1063 * doing auto-negotiation, restore autonegotiation.
1064 */
1065
1066 if (((dcr & WXDCR_SWDPIN1) || (rxcw & WXRXCW_C)) &&
1067 ((txcw & WXTXCW_ANE) == 0)) {
1068 DPRINTF(sc, (ane, sc->wx_name));
1069 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1070 sc->wx_dcr &= ~WXDCR_SLU;
1071 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1072 sc->ane_failed = 0;
1073 }
1074
1075 if (sc->wx_icr & WXISR_LSC) {
1076 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1077 IPRINTF(sc, (lup, sc->wx_name));
1078 sc->linkup = 1;
1079 sc->wx_dcr |= (WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1080 } else {
1081 IPRINTF(sc, (ldn, sc->wx_name));
1082 sc->linkup = 0;
1083 sc->wx_dcr &= ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1084 }
1085 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1086 } else {
1087 DPRINTF(sc, (sqe, sc->wx_name));
1088 }
1089 }
1090
1091 static void
1092 wx_check_link(wx_softc_t *sc)
1093 {
1094 u_int32_t rxcw, dcr, dsr;
1095
1096 if (sc->wx_mii) {
1097 mii_pollstat(WX_MII_FROM_SOFTC(sc));
1098 return;
1099 }
1100
1101 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1102 dcr = READ_CSR(sc, WXREG_DCR);
1103 dsr = READ_CSR(sc, WXREG_DSR);
1104
1105 if ((dsr & WXDSR_LU) == 0 && (dcr & WXDCR_SWDPIN1) == 0 &&
1106 (rxcw & WXRXCW_C) == 0) {
1107 if (sc->ane_failed == 0) {
1108 sc->ane_failed = 1;
1109 return;
1110 }
1111 DPRINTF(sc, (inane, sc->wx_name));
1112 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT & ~WXTXCW_ANE);
1113 if (sc->wx_idnrev < WX_WISEMAN_2_1)
1114 sc->wx_dcr &= ~WXDCR_TFCE;
1115 sc->wx_dcr |= WXDCR_SLU;
1116 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1117 } else if ((rxcw & WXRXCW_C) != 0 && (dcr & WXDCR_SLU) != 0) {
1118 DPRINTF(sc, (ane, sc->wx_name));
1119 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1120 sc->wx_dcr &= ~WXDCR_SLU;
1121 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1122 }
1123 }
1124
1125 static void
1126 wx_handle_rxint(wx_softc_t *sc)
1127 {
1128 struct ether_header *eh;
1129 struct mbuf *m0, *mb, *pending[WX_MAX_RDESC];
1130 struct ifnet *ifp = &sc->wx_if;
1131 int npkts, ndesc, lidx, idx, tlen;
1132
1133 DPRINTF(sc, ("%s: wx_handle_rxint\n", sc->wx_name));
1134
1135 for (m0 = sc->rpending, tlen = ndesc = npkts = 0, idx = sc->rnxt,
1136 lidx = R_PREV_IDX(idx); ndesc < WX_MAX_RDESC;
1137 ndesc++, lidx = idx, idx = R_NXT_IDX(idx)) {
1138 wxrd_t *rd;
1139 rxpkt_t *rxpkt;
1140 int length, offset, lastframe;
1141
1142 rd = &sc->rdescriptors[idx];
1143 /*
1144 * XXX: DMA Flush descriptor
1145 */
1146 if ((rd->status & RDSTAT_DD) == 0) {
1147 if (m0) {
1148 if (sc->rpending == NULL) {
1149 m0->m_pkthdr.len = tlen;
1150 sc->rpending = m0;
1151 } else {
1152 m_freem(m0);
1153 }
1154 m0 = NULL;
1155 }
1156 DPRINTF(sc, ("%s: WXRX: ndesc %d idx %d lidx %d\n",
1157 sc->wx_name, ndesc, idx, lidx));
1158 break;
1159 }
1160
1161 if (rd->errors != 0) {
1162 printf("%s: packet with errors (%x)\n",
1163 sc->wx_name, rd->errors);
1164 rd->status = 0;
1165 ifp->if_ierrors++;
1166 if (m0) {
1167 m_freem(m0);
1168 m0 = NULL;
1169 if (sc->rpending) {
1170 m_freem(sc->rpending);
1171 sc->rpending = NULL;
1172 }
1173 }
1174 continue;
1175 }
1176
1177
1178 rxpkt = &sc->rbase[idx];
1179 mb = rxpkt->dptr;
1180 if (mb == NULL) {
1181 printf("%s: receive descriptor with no mbuf\n",
1182 sc->wx_name);
1183 (void) wx_get_rbuf(sc, rxpkt);
1184 rd->status = 0;
1185 ifp->if_ierrors++;
1186 if (m0) {
1187 m_freem(m0);
1188 m0 = NULL;
1189 if (sc->rpending) {
1190 m_freem(sc->rpending);
1191 sc->rpending = NULL;
1192 }
1193 }
1194 continue;
1195 }
1196
1197 /* XXX: Flush DMA for rxpkt */
1198
1199 if (wx_get_rbuf(sc, rxpkt)) {
1200 sc->wx_rxnobuf++;
1201 wx_rxdma_map(sc, rxpkt, mb);
1202 ifp->if_ierrors++;
1203 rd->status = 0;
1204 if (m0) {
1205 m_freem(m0);
1206 m0 = NULL;
1207 if (sc->rpending) {
1208 m_freem(sc->rpending);
1209 sc->rpending = NULL;
1210 }
1211 }
1212 continue;
1213 }
1214
1215 /*
1216 * Save the completing packet's offset value and length
1217 * and install the new one into the descriptor.
1218 */
1219 lastframe = (rd->status & RDSTAT_EOP) != 0;
1220 length = rd->length;
1221 offset = rd->address.lowpart & 0xff;
1222 bzero (rd, sizeof (*rd));
1223 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1224
1225 mb->m_len = length;
1226 mb->m_data += offset;
1227 mb->m_next = NULL;
1228 if (m0 == NULL) {
1229 m0 = mb;
1230 tlen = length;
1231 } else if (m0 == sc->rpending) {
1232 /*
1233 * Pick up where we left off before. If
1234 * we have an offset (we're assuming the
1235 * first frame has an offset), then we've
1236 * lost sync somewhere along the line.
1237 */
1238 if (offset) {
1239 printf("%s: lost sync with partial packet\n",
1240 sc->wx_name);
1241 m_freem(sc->rpending);
1242 sc->rpending = NULL;
1243 m0 = mb;
1244 tlen = length;
1245 } else {
1246 sc->rpending = NULL;
1247 tlen = m0->m_pkthdr.len;
1248 }
1249 } else {
1250 tlen += length;
1251 }
1252
1253 DPRINTF(sc, ("%s: RDESC[%d] len %d off %d lastframe %d\n",
1254 sc->wx_name, idx, mb->m_len, offset, lastframe));
1255 if (m0 != mb)
1256 m_cat(m0, mb);
1257 if (lastframe == 0) {
1258 continue;
1259 }
1260 m0->m_pkthdr.rcvif = ifp;
1261 m0->m_pkthdr.len = tlen - WX_CRC_LENGTH;
1262 mb->m_len -= WX_CRC_LENGTH;
1263
1264 eh = mtod(m0, struct ether_header *);
1265 /*
1266 * No need to check for promiscous mode since
1267 * the decision to keep or drop the packet is
1268 * handled by ether_input()
1269 */
1270 pending[npkts++] = m0;
1271 m0 = NULL;
1272 tlen = 0;
1273 }
1274
1275 if (ndesc) {
1276 if (IS_WISEMAN(sc)) {
1277 WRITE_CSR(sc, WXREG_RDT0, lidx);
1278 } else {
1279 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, lidx);
1280 }
1281 sc->rnxt = idx;
1282 }
1283
1284 if (npkts) {
1285 sc->wx_rxintr++;
1286 }
1287
1288 for (idx = 0; idx < npkts; idx++) {
1289 mb = pending[idx];
1290 if (ifp->if_bpf) {
1291 bpf_mtap(WX_BPFTAP_ARG(ifp), mb);
1292 }
1293 ifp->if_ipackets++;
1294 DPRINTF(sc, ("%s: RECV packet length %d\n",
1295 sc->wx_name, mb->m_pkthdr.len));
1296 eh = mtod(mb, struct ether_header *);
1297 m_adj(mb, sizeof (struct ether_header));
1298 ether_input(ifp, eh, mb);
1299 }
1300 }
1301
1302 static void
1303 wx_gc(wx_softc_t *sc)
1304 {
1305 struct ifnet *ifp = &sc->wx_if;
1306 txpkt_t *txpkt;
1307 u_int32_t tdh;
1308
1309 WX_LOCK(sc);
1310 txpkt = sc->tbsyf;
1311 if (IS_WISEMAN(sc)) {
1312 tdh = READ_CSR(sc, WXREG_TDH);
1313 } else {
1314 tdh = READ_CSR(sc, WXREG_TDH_LIVENGOOD);
1315 }
1316 while (txpkt != NULL) {
1317 u_int32_t end = txpkt->eidx, cidx = tdh;
1318
1319 /*
1320 * Normalize start..end indices to 2 *
1321 * WX_MAX_TDESC range to eliminate wrap.
1322 */
1323 if (txpkt->eidx < txpkt->sidx) {
1324 end += WX_MAX_TDESC;
1325 }
1326
1327 /*
1328 * Normalize current chip index to 2 *
1329 * WX_MAX_TDESC range to eliminate wrap.
1330 */
1331 if (cidx < txpkt->sidx) {
1332 cidx += WX_MAX_TDESC;
1333 }
1334
1335 /*
1336 * If the current chip index is between low and
1337 * high indices for this packet, it's not finished
1338 * transmitting yet. Because transmits are done FIFO,
1339 * this means we're done garbage collecting too.
1340 */
1341
1342 if (txpkt->sidx <= cidx && cidx < txpkt->eidx) {
1343 DPRINTF(sc, ("%s: TXGC %d..%d TDH %d\n", sc->wx_name,
1344 txpkt->sidx, txpkt->eidx, tdh));
1345 break;
1346 }
1347 ifp->if_opackets++;
1348
1349 if (txpkt->dptr) {
1350 (void) m_freem(txpkt->dptr);
1351 } else {
1352 printf("%s: null mbuf in gc\n", sc->wx_name);
1353 }
1354
1355 for (cidx = txpkt->sidx; cidx != txpkt->eidx;
1356 cidx = T_NXT_IDX(cidx)) {
1357 txpkt_t *tmp;
1358 wxtd_t *td;
1359
1360 td = &sc->tdescriptors[cidx];
1361 if (td->status & TXSTS_EC) {
1362 IPRINTF(sc, ("%s: excess collisions\n",
1363 sc->wx_name));
1364 ifp->if_collisions++;
1365 ifp->if_oerrors++;
1366 }
1367 if (td->status & TXSTS_LC) {
1368 IPRINTF(sc,
1369 ("%s: lost carrier\n", sc->wx_name));
1370 ifp->if_oerrors++;
1371 }
1372 tmp = &sc->tbase[cidx];
1373 DPRINTF(sc, ("%s: TXGC[%d] %p %d..%d done nact %d "
1374 "TDH %d\n", sc->wx_name, cidx, tmp->dptr,
1375 txpkt->sidx, txpkt->eidx, sc->tactive, tdh));
1376 tmp->dptr = NULL;
1377 if (sc->tactive == 0) {
1378 printf("%s: nactive < 0?\n", sc->wx_name);
1379 } else {
1380 sc->tactive -= 1;
1381 }
1382 bzero(td, sizeof (*td));
1383 }
1384 sc->tbsyf = txpkt->next;
1385 txpkt = sc->tbsyf;
1386 }
1387 if (sc->tactive < WX_MAX_TDESC - 1) {
1388 ifp->if_timer = 0;
1389 ifp->if_flags &= ~IFF_OACTIVE;
1390 }
1391
1392 /* used SW LED to indicate transmission not active */
1393 if (sc->tactive == 0 && sc->wx_mii) {
1394 WRITE_CSR(sc, WXREG_DCR,
1395 READ_CSR(sc, WXREG_DCR) & ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0));
1396 }
1397 WX_UNLOCK(sc);
1398 }
1399
1400 /*
1401 * Periodic timer to update packet in/out/collision statistics,
1402 * and, more importantly, garbage collect completed transmissions
1403 * and to handle link status changes.
1404 */
1405 #define WX_PRT_STATS(sc, y) printf("\t" # y " = %u\n", (sc)-> ## y )
1406 #define WX_CLR_STATS(sc, y) (sc)-> ## y = 0
1407
1408 static void
1409 wx_watchdog(void *arg)
1410 {
1411 wx_softc_t *sc = arg;
1412
1413 WX_LOCK(sc);
1414 if (sc->wx_needreinit) {
1415 WX_UNLOCK(sc);
1416 if (wx_init(sc) == 0) {
1417 WX_LOCK(sc);
1418 sc->wx_needreinit = 0;
1419 } else {
1420 WX_LOCK(sc);
1421 }
1422 } else {
1423 wx_gc(sc);
1424 wx_check_link(sc);
1425 }
1426 if (wx_dump_stats == device_get_unit(sc->w.dev)) {
1427 printf("%s: current statistics\n", sc->wx_name);
1428 WX_PRT_STATS(sc, wx_intr);
1429 WX_PRT_STATS(sc, wx_linkintr);
1430 WX_PRT_STATS(sc, wx_rxintr);
1431 WX_PRT_STATS(sc, wx_txqe);
1432 WX_PRT_STATS(sc, wx_xmitgc);
1433 WX_PRT_STATS(sc, wx_xmitpullup);
1434 WX_PRT_STATS(sc, wx_xmitcluster);
1435 WX_PRT_STATS(sc, wx_xmitputback);
1436 WX_PRT_STATS(sc, wx_xmitwanted);
1437 WX_PRT_STATS(sc, wx_xmitblocked);
1438 WX_PRT_STATS(sc, wx_xmitrunt);
1439 WX_PRT_STATS(sc, wx_rxnobuf);
1440 WX_PRT_STATS(sc, wx_oddpkt);
1441 wx_dump_stats = -1;
1442 }
1443 if (wx_clr_stats == device_get_unit(sc->w.dev)) {
1444 printf("%s: statistics cleared\n", sc->wx_name);
1445 WX_CLR_STATS(sc, wx_intr);
1446 WX_CLR_STATS(sc, wx_linkintr);
1447 WX_CLR_STATS(sc, wx_rxintr);
1448 WX_CLR_STATS(sc, wx_txqe);
1449 WX_CLR_STATS(sc, wx_xmitgc);
1450 WX_CLR_STATS(sc, wx_xmitpullup);
1451 WX_CLR_STATS(sc, wx_xmitcluster);
1452 WX_CLR_STATS(sc, wx_xmitputback);
1453 WX_CLR_STATS(sc, wx_xmitwanted);
1454 WX_CLR_STATS(sc, wx_xmitblocked);
1455 WX_CLR_STATS(sc, wx_xmitrunt);
1456 WX_CLR_STATS(sc, wx_rxnobuf);
1457 WX_CLR_STATS(sc, wx_oddpkt);
1458 wx_clr_stats = -1;
1459 }
1460 WX_UNLOCK(sc);
1461
1462 /*
1463 * Schedule another timeout one second from now.
1464 */
1465 TIMEOUT(sc, wx_watchdog, sc, hz);
1466 }
1467
1468 /*
1469 * Stop and reinitialize the hardware
1470 */
1471 static void
1472 wx_hw_stop(wx_softc_t *sc)
1473 {
1474 u_int32_t icr;
1475 DPRINTF(sc, ("%s: wx_hw_stop\n", sc->wx_name));
1476 WX_DISABLE_INT(sc);
1477 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1478 wx_mwi_whackon(sc);
1479 }
1480 WRITE_CSR(sc, WXREG_DCR, WXDCR_RST);
1481 DELAY(20 * 1000);
1482 icr = READ_CSR(sc, WXREG_ICR);
1483 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1484 wx_mwi_unwhack(sc);
1485 }
1486 }
1487
1488 static void
1489 wx_set_addr(wx_softc_t *sc, int idx, u_int8_t *mac)
1490 {
1491 u_int32_t t0, t1;
1492 DPRINTF(sc, ("%s: wx_set_addr\n", sc->wx_name));
1493 t0 = (mac[0]) | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
1494 t1 = (mac[4] << 0) | (mac[5] << 8);
1495 t1 |= WX_RAL_AV;
1496 WRITE_CSR(sc, WXREG_RAL_LO(idx), t0);
1497 WRITE_CSR(sc, WXREG_RAL_HI(idx), t1);
1498 }
1499
1500 static int
1501 wx_hw_initialize(wx_softc_t *sc)
1502 {
1503 int i;
1504
1505 DPRINTF(sc, ("%s: wx_hw_initialize\n", sc->wx_name));
1506
1507 WRITE_CSR(sc, WXREG_VET, 0);
1508 for (i = 0; i < (WX_VLAN_TAB_SIZE << 2); i += 4) {
1509 WRITE_CSR(sc, (WXREG_VFTA + i), 0);
1510 }
1511 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1512 wx_mwi_whackon(sc);
1513 WRITE_CSR(sc, WXREG_RCTL, WXRCTL_RST);
1514 DELAY(5 * 1000);
1515 }
1516 /*
1517 * Load the first receiver address with our MAC address,
1518 * and load as many multicast addresses as can fit into
1519 * the receive address array.
1520 */
1521 wx_set_addr(sc, 0, sc->wx_enaddr);
1522 for (i = 1; i <= sc->wx_nmca; i++) {
1523 if (i >= WX_RAL_TAB_SIZE) {
1524 break;
1525 } else {
1526 wx_set_addr(sc, i, sc->wx_mcaddr[i-1]);
1527 }
1528 }
1529
1530 while (i < WX_RAL_TAB_SIZE) {
1531 WRITE_CSR(sc, WXREG_RAL_LO(i), 0);
1532 WRITE_CSR(sc, WXREG_RAL_HI(i), 0);
1533 i++;
1534 }
1535
1536 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1537 WRITE_CSR(sc, WXREG_RCTL, 0);
1538 DELAY(1 * 1000);
1539 wx_mwi_unwhack(sc);
1540 }
1541
1542 /*
1543 * Clear out the hashed multicast table array.
1544 */
1545 for (i = 0; i < WX_MC_TAB_SIZE; i++) {
1546 WRITE_CSR(sc, WXREG_MTA + (sizeof (u_int32_t) * 4), 0);
1547 }
1548
1549 if (IS_LIVENGOOD_CU(sc)) {
1550 /*
1551 * has a PHY - raise its reset line to make it operational
1552 */
1553 u_int32_t tmp = READ_CSR(sc, WXREG_EXCT);
1554 tmp |= WXPHY_RESET_DIR4;
1555 WRITE_CSR(sc, WXREG_EXCT, tmp);
1556 DELAY(20*1000);
1557
1558 tmp = READ_CSR(sc, WXREG_EXCT);
1559 tmp &= ~WXPHY_RESET4;
1560 WRITE_CSR(sc, WXREG_EXCT, tmp);
1561 DELAY(20*1000);
1562
1563 tmp = READ_CSR(sc, WXREG_EXCT);
1564 tmp |= WXPHY_RESET4;
1565 WRITE_CSR(sc, WXREG_EXCT, tmp);
1566 DELAY(20*1000);
1567 } else if (IS_LIVENGOOD(sc)) {
1568 u_int16_t tew;
1569
1570 /*
1571 * Handle link control
1572 */
1573 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr | WXDCR_LRST);
1574 DELAY(50 * 1000);
1575
1576 wx_read_eeprom(sc, &tew, WX_EEPROM_CTLR2_OFF, 1);
1577 tew = (tew & WX_EEPROM_CTLR2_SWDPIO) << WX_EEPROM_EXT_SHIFT;
1578 WRITE_CSR(sc, WXREG_EXCT, (u_int32_t)tew);
1579 }
1580
1581 if (sc->wx_dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
1582 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
1583 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
1584 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
1585 } else {
1586 WRITE_CSR(sc, WXREG_FCAL, 0);
1587 WRITE_CSR(sc, WXREG_FCAH, 0);
1588 WRITE_CSR(sc, WXREG_FCT, 0);
1589 }
1590 WRITE_CSR(sc, WXREG_FLOW_XTIMER, WX_XTIMER_DFLT);
1591
1592 if (IS_WISEMAN(sc)) {
1593 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1594 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, 0);
1595 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, 0);
1596 sc->wx_dcr &= ~(WXDCR_RFCE|WXDCR_TFCE);
1597 } else {
1598 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, WX_RCV_FLOW_HI_DFLT);
1599 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, WX_RCV_FLOW_LO_DFLT);
1600 }
1601 } else {
1602 WRITE_CSR(sc, WXREG_FLOW_RCV_HI_LIVENGOOD, WX_RCV_FLOW_HI_DFLT);
1603 WRITE_CSR(sc, WXREG_FLOW_RCV_LO_LIVENGOOD, WX_RCV_FLOW_LO_DFLT);
1604 }
1605
1606 if (!IS_LIVENGOOD_CU(sc))
1607 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1608
1609 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1610 DELAY(50 * 1000);
1611
1612 if (!IS_LIVENGOOD_CU(sc)) {
1613 /*
1614 * The pin stuff is all FM from the Linux driver.
1615 */
1616 if ((READ_CSR(sc, WXREG_DCR) & WXDCR_SWDPIN1) == 0) {
1617 for (i = 0; i < (WX_LINK_UP_TIMEOUT/10); i++) {
1618 DELAY(10 * 1000);
1619 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1620 sc->linkup = 1;
1621 break;
1622 }
1623 }
1624 if (sc->linkup == 0) {
1625 sc->ane_failed = 1;
1626 wx_check_link(sc);
1627 }
1628 sc->ane_failed = 0;
1629 } else {
1630 printf("%s: SWDPIO1 did not clear- check for reversed "
1631 "or disconnected cable\n", sc->wx_name);
1632 /* but return okay anyway */
1633 }
1634 }
1635
1636 sc->wx_ienable = WXIENABLE_DEFAULT;
1637 return (0);
1638 }
1639
1640 /*
1641 * Stop the interface. Cancels the statistics updater and resets the interface.
1642 */
1643 static void
1644 wx_stop(wx_softc_t *sc)
1645 {
1646 txpkt_t *txp;
1647 rxpkt_t *rxp;
1648 struct ifnet *ifp = &sc->wx_if;
1649
1650 DPRINTF(sc, ("%s: wx_stop\n", sc->wx_name));
1651 /*
1652 * Cancel stats updater.
1653 */
1654 UNTIMEOUT(wx_watchdog, sc, sc);
1655
1656 /*
1657 * Reset the chip
1658 */
1659 wx_hw_stop(sc);
1660
1661 /*
1662 * Release any xmit buffers.
1663 */
1664 for (txp = sc->tbase; txp && txp < &sc->tbase[WX_MAX_TDESC]; txp++) {
1665 if (txp->dptr) {
1666 m_free(txp->dptr);
1667 txp->dptr = NULL;
1668 }
1669 }
1670
1671 /*
1672 * Free all the receive buffers.
1673 */
1674 for (rxp = sc->rbase; rxp && rxp < &sc->rbase[WX_MAX_RDESC]; rxp++) {
1675 if (rxp->dptr) {
1676 m_free(rxp->dptr);
1677 rxp->dptr = NULL;
1678 }
1679 }
1680
1681 if (sc->rpending) {
1682 m_freem(sc->rpending);
1683 sc->rpending = NULL;
1684 }
1685
1686 /*
1687 * And we're outta here...
1688 */
1689
1690 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1691 ifp->if_timer = 0;
1692 }
1693
1694 /*
1695 * Transmit Watchdog
1696 */
1697 static void
1698 wx_txwatchdog(struct ifnet *ifp)
1699 {
1700 wx_softc_t *sc = SOFTC_IFP(ifp);
1701 printf("%s: device timeout\n", sc->wx_name);
1702 ifp->if_oerrors++;
1703 if (wx_init(sc)) {
1704 printf("%s: could not re-init device\n", sc->wx_name);
1705 sc->wx_needreinit = 1;
1706 }
1707 }
1708
1709 static int
1710 wx_init(void *xsc)
1711 {
1712 struct ifmedia *ifm;
1713 wx_softc_t *sc = xsc;
1714 struct ifnet *ifp = &sc->wx_if;
1715 rxpkt_t *rxpkt;
1716 wxrd_t *rd;
1717 size_t len;
1718 int i, bflags;
1719
1720 DPRINTF(sc, ("%s: wx_init\n", sc->wx_name));
1721 WX_LOCK(sc);
1722
1723 /*
1724 * Cancel any pending I/O by resetting things.
1725 * wx_stop will free any allocated mbufs.
1726 */
1727 wx_stop(sc);
1728
1729 /*
1730 * Reset the hardware. All network addresses loaded here, but
1731 * neither the receiver nor the transmitter are enabled.
1732 */
1733
1734 if (wx_hw_initialize(sc)) {
1735 DPRINTF(sc, ("%s: wx_hw_initialize failed\n", sc->wx_name));
1736 WX_UNLOCK(sc);
1737 return (EIO);
1738 }
1739
1740 /*
1741 * Set up the receive ring stuff.
1742 */
1743 len = sizeof (wxrd_t) * WX_MAX_RDESC;
1744 bzero(sc->rdescriptors, len);
1745 for (rxpkt = sc->rbase, i = 0; rxpkt != NULL && i < WX_MAX_RDESC;
1746 i += RXINCR, rxpkt++) {
1747 rd = &sc->rdescriptors[i];
1748 if (wx_get_rbuf(sc, rxpkt)) {
1749 break;
1750 }
1751 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1752 }
1753 if (i != WX_MAX_RDESC) {
1754 printf("%s: could not set up rbufs\n", sc->wx_name);
1755 wx_stop(sc);
1756 WX_UNLOCK(sc);
1757 return (ENOMEM);
1758 }
1759
1760 /*
1761 * Set up transmit parameters and enable the transmitter.
1762 */
1763 sc->tnxtfree = sc->tactive = 0;
1764 sc->tbsyf = sc->tbsyl = NULL;
1765 WRITE_CSR(sc, WXREG_TCTL, 0);
1766 DELAY(5 * 1000);
1767 if (IS_WISEMAN(sc)) {
1768 WRITE_CSR(sc, WXREG_TDBA_LO,
1769 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1770 WRITE_CSR(sc, WXREG_TDBA_HI, 0);
1771 WRITE_CSR(sc, WXREG_TDLEN, WX_MAX_TDESC * sizeof (wxtd_t));
1772 WRITE_CSR(sc, WXREG_TDH, 0);
1773 WRITE_CSR(sc, WXREG_TDT, 0);
1774 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1775 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1776 WRITE_CSR(sc, WXREG_TIPG, WX_WISEMAN_TIPG_DFLT);
1777 WRITE_CSR(sc, WXREG_TIDV, wx_txint_delay);
1778 } else {
1779 WRITE_CSR(sc, WXREG_TDBA_LO_LIVENGOOD,
1780 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1781 WRITE_CSR(sc, WXREG_TDBA_HI_LIVENGOOD, 0);
1782 WRITE_CSR(sc, WXREG_TDLEN_LIVENGOOD,
1783 WX_MAX_TDESC * sizeof (wxtd_t));
1784 WRITE_CSR(sc, WXREG_TDH_LIVENGOOD, 0);
1785 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, 0);
1786 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1787 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1788 WRITE_CSR(sc, WXREG_TIPG, WX_LIVENGOOD_TIPG_DFLT);
1789 WRITE_CSR(sc, WXREG_TIDV_LIVENGOOD, wx_txint_delay);
1790 }
1791 WRITE_CSR(sc, WXREG_TCTL, (WXTCTL_CT(WX_COLLISION_THRESHOLD) |
1792 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN));
1793 /*
1794 * Set up receive parameters and enable the receiver.
1795 */
1796
1797 sc->rnxt = 0;
1798 WRITE_CSR(sc, WXREG_RCTL, 0);
1799 DELAY(5 * 1000);
1800 if (IS_WISEMAN(sc)) {
1801 WRITE_CSR(sc, WXREG_RDTR0, WXRDTR_FPD);
1802 WRITE_CSR(sc, WXREG_RDBA0_LO,
1803 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1804 WRITE_CSR(sc, WXREG_RDBA0_HI, 0);
1805 WRITE_CSR(sc, WXREG_RDLEN0, WX_MAX_RDESC * sizeof (wxrd_t));
1806 WRITE_CSR(sc, WXREG_RDH0, 0);
1807 WRITE_CSR(sc, WXREG_RDT0, (WX_MAX_RDESC - RXINCR));
1808 } else {
1809 /*
1810 * The delay should yield ~10us receive interrupt delay
1811 */
1812 WRITE_CSR(sc, WXREG_RDTR0_LIVENGOOD, WXRDTR_FPD | 0x40);
1813 WRITE_CSR(sc, WXREG_RDBA0_LO_LIVENGOOD,
1814 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1815 WRITE_CSR(sc, WXREG_RDBA0_HI_LIVENGOOD, 0);
1816 WRITE_CSR(sc, WXREG_RDLEN0_LIVENGOOD,
1817 WX_MAX_RDESC * sizeof (wxrd_t));
1818 WRITE_CSR(sc, WXREG_RDH0_LIVENGOOD, 0);
1819 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, (WX_MAX_RDESC - RXINCR));
1820 }
1821 WRITE_CSR(sc, WXREG_RDTR1, 0);
1822 WRITE_CSR(sc, WXREG_RDBA1_LO, 0);
1823 WRITE_CSR(sc, WXREG_RDBA1_HI, 0);
1824 WRITE_CSR(sc, WXREG_RDLEN1, 0);
1825 WRITE_CSR(sc, WXREG_RDH1, 0);
1826 WRITE_CSR(sc, WXREG_RDT1, 0);
1827
1828 if (ifp->if_mtu > ETHERMTU) {
1829 bflags = WXRCTL_EN | WXRCTL_LPE | WXRCTL_2KRBUF;
1830 } else {
1831 bflags = WXRCTL_EN | WXRCTL_2KRBUF;
1832 }
1833
1834 WRITE_CSR(sc, WXREG_RCTL, bflags |
1835 ((ifp->if_flags & IFF_BROADCAST) ? WXRCTL_BAM : 0) |
1836 ((ifp->if_flags & IFF_PROMISC) ? WXRCTL_UPE : 0) |
1837 ((sc->all_mcasts) ? WXRCTL_MPE : 0));
1838
1839 /*
1840 * Enable Interrupts
1841 */
1842 WX_ENABLE_INT(sc);
1843
1844 if (sc->wx_mii) {
1845 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1846 } else {
1847 ifm = &sc->wx_media;
1848 i = ifm->ifm_media;
1849 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1850 wx_ifmedia_upd(ifp);
1851 ifm->ifm_media = i;
1852 }
1853
1854 /*
1855 * Mark that we're up and running...
1856 */
1857 ifp->if_flags |= IFF_RUNNING;
1858 ifp->if_flags &= ~IFF_OACTIVE;
1859
1860
1861 /*
1862 * Start stats updater.
1863 */
1864 TIMEOUT(sc, wx_watchdog, sc, hz);
1865
1866 WX_UNLOCK(sc);
1867 /*
1868 * And we're outta here...
1869 */
1870 return (0);
1871 }
1872
1873 /*
1874 * Get a receive buffer for our use (and dma map the data area).
1875 *
1876 * The Wiseman chip can have buffers be 256, 512, 1024 or 2048 bytes in size.
1877 * The LIVENGOOD chip can go higher (up to 16K), but what's the point as
1878 * we aren't doing non-MCLGET memory management.
1879 *
1880 * It wants them aligned on 256 byte boundaries, but can actually cope
1881 * with an offset in the first 255 bytes of the head of a receive frame.
1882 *
1883 * We'll allocate a MCLBYTE sized cluster but *not* adjust the data pointer
1884 * by any alignment value. Instead, we'll tell the chip to offset by any
1885 * alignment and we'll catch the alignment on the backend at interrupt time.
1886 */
1887 static void
1888 wx_rxdma_map(wx_softc_t *sc, rxpkt_t *rxpkt, struct mbuf *mb)
1889 {
1890 rxpkt->dptr = mb;
1891 rxpkt->dma_addr = vtophys(mtod(mb, vm_offset_t));
1892 }
1893
1894 static int
1895 wx_get_rbuf(wx_softc_t *sc, rxpkt_t *rxpkt)
1896 {
1897 struct mbuf *mb;
1898 MGETHDR(mb, M_DONTWAIT, MT_DATA);
1899 if (mb == NULL) {
1900 rxpkt->dptr = NULL;
1901 return (-1);
1902 }
1903 MCLGET(mb, M_DONTWAIT);
1904 if ((mb->m_flags & M_EXT) == 0) {
1905 m_freem(mb);
1906 rxpkt->dptr = NULL;
1907 return (-1);
1908 }
1909 wx_rxdma_map(sc, rxpkt, mb);
1910 return (0);
1911 }
1912
1913 static int
1914 wx_ioctl(struct ifnet *ifp, IOCTL_CMD_TYPE command, caddr_t data)
1915 {
1916 wx_softc_t *sc = SOFTC_IFP(ifp);
1917 struct ifreq *ifr = (struct ifreq *) data;
1918 int error = 0;
1919
1920 WX_LOCK(sc);
1921 switch (command) {
1922 case SIOCSIFADDR:
1923 case SIOCGIFADDR:
1924 error = ether_ioctl(ifp, command, data);
1925 break;
1926 case SIOCSIFMTU:
1927 if (ifr->ifr_mtu > WX_MAXMTU || ifr->ifr_mtu < ETHERMIN) {
1928 error = EINVAL;
1929 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1930 ifp->if_mtu = ifr->ifr_mtu;
1931 error = wx_init(sc);
1932 }
1933 break;
1934 case SIOCSIFFLAGS:
1935 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1936
1937 /*
1938 * If interface is marked up and not running, then start it.
1939 * If it is marked down and running, stop it.
1940 * If it's up then re-initialize it. This is so flags
1941 * such as IFF_PROMISC are handled.
1942 */
1943 if (ifp->if_flags & IFF_UP) {
1944 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1945 error = wx_init(sc);
1946 }
1947 } else {
1948 if (ifp->if_flags & IFF_RUNNING) {
1949 wx_stop(sc);
1950 }
1951 }
1952 break;
1953
1954 case SIOCADDMULTI:
1955 case SIOCDELMULTI:
1956 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1957 error = wx_mc_setup(sc);
1958 break;
1959 case SIOCGIFMEDIA:
1960 case SIOCSIFMEDIA:
1961 DPRINTF(sc, ("%s: ioctl SIOC[GS]IFMEDIA: command=%#lx\n",
1962 sc->wx_name, command));
1963 if (sc->wx_mii) {
1964 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1965 error = ifmedia_ioctl(ifp, ifr,
1966 &mii->mii_media, command);
1967 } else {
1968 error = ifmedia_ioctl(ifp, ifr, &sc->wx_media, command);
1969 }
1970
1971 break;
1972 default:
1973 error = EINVAL;
1974 }
1975
1976 WX_UNLOCK(sc);
1977 return (error);
1978 }
1979
1980 static int
1981 wx_ifmedia_upd(struct ifnet *ifp)
1982 {
1983 struct wx_softc *sc = SOFTC_IFP(ifp);
1984 struct ifmedia *ifm;
1985
1986 DPRINTF(sc, ("%s: ifmedia_upd\n", sc->wx_name));
1987
1988 if (sc->wx_mii) {
1989 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1990 return 0;
1991 }
1992
1993 ifm = &sc->wx_media;
1994
1995 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1996 return (EINVAL);
1997 }
1998
1999 return (0);
2000 }
2001
2002 static void
2003 wx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2004 {
2005 u_int32_t dsr;
2006 struct wx_softc *sc = SOFTC_IFP(ifp);
2007
2008 DPRINTF(sc, ("%s: ifmedia_sts: ", sc->wx_name));
2009
2010 if (sc->wx_mii) {
2011 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2012 mii_pollstat(mii);
2013 ifmr->ifm_active = mii->mii_media_active;
2014 ifmr->ifm_status = mii->mii_media_status;
2015 DPRINTF(sc, ("active=%#x status=%#x\n",
2016 ifmr->ifm_active, ifmr->ifm_status));
2017 return;
2018 }
2019
2020 DPRINTF(sc, ("\n"));
2021 ifmr->ifm_status = IFM_AVALID;
2022 ifmr->ifm_active = IFM_ETHER;
2023
2024 if (sc->linkup == 0)
2025 return;
2026
2027 ifmr->ifm_status |= IFM_ACTIVE;
2028 dsr = READ_CSR(sc, WXREG_DSR);
2029 if (IS_LIVENGOOD(sc)) {
2030 if (dsr & WXDSR_1000BT) {
2031 if (IS_LIVENGOOD_CU(sc)) {
2032 ifmr->ifm_status |= IFM_1000_TX;
2033 }
2034 else {
2035 ifmr->ifm_status |= IFM_1000_SX;
2036 }
2037 } else if (dsr & WXDSR_100BT) {
2038 ifmr->ifm_status |= IFM_100_FX; /* ?? */
2039 } else {
2040 ifmr->ifm_status |= IFM_10_T; /* ?? */
2041 }
2042 } else {
2043 ifmr->ifm_status |= IFM_1000_SX;
2044 }
2045 if (dsr & WXDSR_FD) {
2046 ifmr->ifm_active |= IFM_FDX;
2047 }
2048 }
2049
2050
2051 #define RAISE_CLOCK(sc, dcr) \
2052 WRITE_CSR(sc, WXREG_DCR, (dcr) | WXPHY_MDC), DELAY(2)
2053
2054 #define LOWER_CLOCK(sc, dcr) \
2055 WRITE_CSR(sc, WXREG_DCR, (dcr) & ~WXPHY_MDC), DELAY(2)
2056
2057 static u_int32_t
2058 wx_mii_shift_in(wx_softc_t *sc)
2059 {
2060 u_int32_t dcr, i;
2061 u_int32_t data = 0;
2062
2063 dcr = READ_CSR(sc, WXREG_DCR);
2064 dcr &= ~(WXPHY_MDIO_DIR | WXPHY_MDIO);
2065 WRITE_CSR(sc, WXREG_DCR, dcr);
2066 RAISE_CLOCK(sc, dcr);
2067 LOWER_CLOCK(sc, dcr);
2068
2069 for (i = 0; i < 16; i++) {
2070 data <<= 1;
2071 RAISE_CLOCK(sc, dcr);
2072 dcr = READ_CSR(sc, WXREG_DCR);
2073
2074 if (dcr & WXPHY_MDIO)
2075 data |= 1;
2076
2077 LOWER_CLOCK(sc, dcr);
2078 }
2079
2080 RAISE_CLOCK(sc, dcr);
2081 LOWER_CLOCK(sc, dcr);
2082 return (data);
2083 }
2084
2085 static void
2086 wx_mii_shift_out(wx_softc_t *sc, u_int32_t data, u_int32_t count)
2087 {
2088 u_int32_t dcr, mask;
2089
2090 dcr = READ_CSR(sc, WXREG_DCR);
2091 dcr |= WXPHY_MDIO_DIR | WXPHY_MDC_DIR;
2092
2093 for (mask = (1 << (count - 1)); mask; mask >>= 1) {
2094 if (data & mask)
2095 dcr |= WXPHY_MDIO;
2096 else
2097 dcr &= ~WXPHY_MDIO;
2098
2099 WRITE_CSR(sc, WXREG_DCR, dcr);
2100 DELAY(2);
2101 RAISE_CLOCK(sc, dcr);
2102 LOWER_CLOCK(sc, dcr);
2103 }
2104 }
2105
2106 static int
2107 wx_miibus_readreg(void *arg, int phy, int reg)
2108 {
2109 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2110 unsigned int data = 0;
2111
2112 if (!IS_LIVENGOOD_CU(sc)) {
2113 return 0;
2114 }
2115 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2116 wx_mii_shift_out(sc, reg | (phy << 5) | (WXPHYC_READ << 10) |
2117 (WXPHYC_SOF << 12), 14);
2118 data = wx_mii_shift_in(sc);
2119 return (data & WXMDIC_DATA_MASK);
2120 }
2121
2122 static int
2123 wx_miibus_writereg(void *arg, int phy, int reg, int data)
2124 {
2125 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2126 if (!IS_LIVENGOOD_CU(sc)) {
2127 return 0;
2128 }
2129 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2130 wx_mii_shift_out(sc, (u_int32_t)data | (WXPHYC_TURNAROUND << 16) |
2131 (reg << 18) | (phy << 23) | (WXPHYC_WRITE << 28) |
2132 (WXPHYC_SOF << 30), 32);
2133 return (0);
2134 }
2135
2136 static void
2137 wx_miibus_statchg(void *arg)
2138 {
2139 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2140 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2141 u_int32_t dcr, tctl;
2142
2143 if (mii == NULL)
2144 return;
2145
2146 dcr = sc->wx_dcr;
2147 tctl = READ_CSR(sc, WXREG_TCTL);
2148 DPRINTF(sc, ("%s: statchg dcr=%#x tctl=%#x", sc->wx_name, dcr, tctl));
2149
2150 dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
2151 dcr &= ~(WXDCR_SPEED_MASK | WXDCR_ASDE /* | WXDCR_ILOS */);
2152
2153 if (mii->mii_media_status & IFM_ACTIVE) {
2154 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
2155 DPRINTF(sc, (" link-down\n"));
2156 sc->linkup = 0;
2157 return;
2158 }
2159
2160 sc->linkup = 1;
2161 }
2162
2163 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
2164 DPRINTF(sc, (" 1000TX"));
2165 dcr |= WXDCR_1000BT;
2166 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2167 DPRINTF(sc, (" 100TX"));
2168 dcr |= WXDCR_100BT;
2169 } else /* assume IFM_10_TX */ {
2170 DPRINTF(sc, (" 10TX"));
2171 dcr |= WXDCR_10BT;
2172 }
2173
2174 if (mii->mii_media_active & IFM_FDX) {
2175 DPRINTF(sc, ("-FD"));
2176 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2177 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN;
2178 dcr |= WXDCR_FD;
2179 } else {
2180 DPRINTF(sc, ("-HD"));
2181 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2182 WXTCTL_COLD(WX_HDX_COLLISION_DX) | WXTCTL_EN;
2183 dcr &= ~WXDCR_FD;
2184 }
2185
2186 /* FLAG0==rx-flow-control FLAG1==tx-flow-control */
2187 if (mii->mii_media_active & IFM_FLAG0) {
2188 dcr |= WXDCR_RFCE;
2189 } else {
2190 dcr &= ~WXDCR_RFCE;
2191 }
2192
2193 if (mii->mii_media_active & IFM_FLAG1) {
2194 dcr |= WXDCR_TFCE;
2195 } else {
2196 dcr &= ~WXDCR_TFCE;
2197 }
2198
2199 if (dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
2200 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
2201 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
2202 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
2203 } else {
2204 WRITE_CSR(sc, WXREG_FCAL, 0);
2205 WRITE_CSR(sc, WXREG_FCAH, 0);
2206 WRITE_CSR(sc, WXREG_FCT, 0);
2207 }
2208
2209 DPRINTF(sc, (" dcr=%#x tctl=%#x\n", dcr, tctl));
2210 WRITE_CSR(sc, WXREG_TCTL, tctl);
2211 sc->wx_dcr = dcr;
2212 WRITE_CSR(sc, WXREG_DCR, dcr);
2213 }
2214
2215 static void
2216 wx_miibus_mediainit(void *arg)
2217 {
2218 }
Cache object: 129a91599f465badc61285a901a4e1fe
|