FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/elinkxl.c
1 /* $NetBSD: elinkxl.c,v 1.95 2006/11/12 07:16:14 itohy Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.95 2006/11/12 07:16:14 itohy Exp $");
41
42 #include "bpfilter.h"
43 #include "rnd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/errno.h>
53 #include <sys/syslog.h>
54 #include <sys/select.h>
55 #include <sys/device.h>
56 #if NRND > 0
57 #include <sys/rnd.h>
58 #endif
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_ether.h>
65 #include <net/if_media.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70 #endif
71
72 #include <machine/cpu.h>
73 #include <machine/bus.h>
74 #include <machine/intr.h>
75 #include <machine/endian.h>
76
77 #include <dev/mii/miivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/mii_bitbang.h>
80
81 #include <dev/ic/elink3reg.h>
82 /* #include <dev/ic/elink3var.h> */
83 #include <dev/ic/elinkxlreg.h>
84 #include <dev/ic/elinkxlvar.h>
85
86 #ifdef DEBUG
87 int exdebug = 0;
88 #endif
89
90 /* ifmedia callbacks */
91 int ex_media_chg(struct ifnet *ifp);
92 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
93
94 void ex_probe_media(struct ex_softc *);
95 void ex_set_filter(struct ex_softc *);
96 void ex_set_media(struct ex_softc *);
97 void ex_set_xcvr(struct ex_softc *, u_int16_t);
98 struct mbuf *ex_get(struct ex_softc *, int);
99 u_int16_t ex_read_eeprom(struct ex_softc *, int);
100 int ex_init(struct ifnet *);
101 void ex_read(struct ex_softc *);
102 void ex_reset(struct ex_softc *);
103 void ex_set_mc(struct ex_softc *);
104 void ex_getstats(struct ex_softc *);
105 void ex_printstats(struct ex_softc *);
106 void ex_tick(void *);
107
108 void ex_power(int, void *);
109
110 static int ex_eeprom_busy(struct ex_softc *);
111 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
112 static void ex_init_txdescs(struct ex_softc *);
113
114 static void ex_setup_tx(struct ex_softc *);
115 static void ex_shutdown(void *);
116 static void ex_start(struct ifnet *);
117 static void ex_txstat(struct ex_softc *);
118
119 int ex_mii_readreg(struct device *, int, int);
120 void ex_mii_writereg(struct device *, int, int, int);
121 void ex_mii_statchg(struct device *);
122
123 void ex_probemedia(struct ex_softc *);
124
125 /*
126 * Structure to map media-present bits in boards to ifmedia codes and
127 * printable media names. Used for table-driven ifmedia initialization.
128 */
129 struct ex_media {
130 int exm_mpbit; /* media present bit */
131 const char *exm_name; /* name of medium */
132 int exm_ifmedia; /* ifmedia word for medium */
133 int exm_epmedia; /* ELINKMEDIA_* constant */
134 };
135
136 /*
137 * Media table for 3c90x chips. Note that chips with MII have no
138 * `native' media.
139 */
140 struct ex_media ex_native_media[] = {
141 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
142 ELINKMEDIA_10BASE_T },
143 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
144 ELINKMEDIA_10BASE_T },
145 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
146 ELINKMEDIA_AUI },
147 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
148 ELINKMEDIA_10BASE_2 },
149 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
150 ELINKMEDIA_100BASE_TX },
151 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
152 ELINKMEDIA_100BASE_TX },
153 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
154 ELINKMEDIA_100BASE_FX },
155 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
156 ELINKMEDIA_MII },
157 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
158 ELINKMEDIA_100BASE_T4 },
159 { 0, NULL, 0,
160 0 },
161 };
162
163 /*
164 * MII bit-bang glue.
165 */
166 u_int32_t ex_mii_bitbang_read(struct device *);
167 void ex_mii_bitbang_write(struct device *, u_int32_t);
168
169 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
170 ex_mii_bitbang_read,
171 ex_mii_bitbang_write,
172 {
173 ELINK_PHY_DATA, /* MII_BIT_MDO */
174 ELINK_PHY_DATA, /* MII_BIT_MDI */
175 ELINK_PHY_CLK, /* MII_BIT_MDC */
176 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
177 0, /* MII_BIT_DIR_PHY_HOST */
178 }
179 };
180
181 /*
182 * Back-end attach and configure.
183 */
184 void
185 ex_config(sc)
186 struct ex_softc *sc;
187 {
188 struct ifnet *ifp;
189 u_int16_t val;
190 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
191 bus_space_tag_t iot = sc->sc_iot;
192 bus_space_handle_t ioh = sc->sc_ioh;
193 int i, error, attach_stage;
194
195 callout_init(&sc->ex_mii_callout);
196
197 ex_reset(sc);
198
199 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
200 macaddr[0] = val >> 8;
201 macaddr[1] = val & 0xff;
202 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
203 macaddr[2] = val >> 8;
204 macaddr[3] = val & 0xff;
205 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
206 macaddr[4] = val >> 8;
207 macaddr[5] = val & 0xff;
208
209 aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
210 ether_sprintf(macaddr));
211
212 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
213 GO_WINDOW(2);
214 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
215 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
216 val |= ELINK_RESET_OPT_LEDPOLAR;
217 if (sc->ex_conf & EX_CONF_PHY_POWER)
218 val |= ELINK_RESET_OPT_PHYPOWER;
219 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
220 }
221 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
222 GO_WINDOW(0);
223 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
224 EX_XCVR_PWR_MAGICBITS);
225 }
226
227 attach_stage = 0;
228
229 /*
230 * Allocate the upload descriptors, and create and load the DMA
231 * map for them.
232 */
233 if ((error = bus_dmamem_alloc(sc->sc_dmat,
234 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
235 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
236 aprint_error(
237 "%s: can't allocate upload descriptors, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 goto fail;
240 }
241
242 attach_stage = 1;
243
244 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
245 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
246 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 aprint_error("%s: can't map upload descriptors, error = %d\n",
248 sc->sc_dev.dv_xname, error);
249 goto fail;
250 }
251
252 attach_stage = 2;
253
254 if ((error = bus_dmamap_create(sc->sc_dmat,
255 EX_NUPD * sizeof (struct ex_upd), 1,
256 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
257 &sc->sc_upd_dmamap)) != 0) {
258 aprint_error(
259 "%s: can't create upload desc. DMA map, error = %d\n",
260 sc->sc_dev.dv_xname, error);
261 goto fail;
262 }
263
264 attach_stage = 3;
265
266 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
267 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
268 BUS_DMA_NOWAIT)) != 0) {
269 aprint_error(
270 "%s: can't load upload desc. DMA map, error = %d\n",
271 sc->sc_dev.dv_xname, error);
272 goto fail;
273 }
274
275 attach_stage = 4;
276
277 /*
278 * Allocate the download descriptors, and create and load the DMA
279 * map for them.
280 */
281 if ((error = bus_dmamem_alloc(sc->sc_dmat,
282 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
283 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
284 aprint_error(
285 "%s: can't allocate download descriptors, error = %d\n",
286 sc->sc_dev.dv_xname, error);
287 goto fail;
288 }
289
290 attach_stage = 5;
291
292 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
293 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (caddr_t *)&sc->sc_dpd,
294 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
295 aprint_error("%s: can't map download descriptors, error = %d\n",
296 sc->sc_dev.dv_xname, error);
297 goto fail;
298 }
299 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
300
301 attach_stage = 6;
302
303 if ((error = bus_dmamap_create(sc->sc_dmat,
304 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
305 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
306 &sc->sc_dpd_dmamap)) != 0) {
307 aprint_error(
308 "%s: can't create download desc. DMA map, error = %d\n",
309 sc->sc_dev.dv_xname, error);
310 goto fail;
311 }
312
313 attach_stage = 7;
314
315 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
316 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
317 BUS_DMA_NOWAIT)) != 0) {
318 aprint_error(
319 "%s: can't load download desc. DMA map, error = %d\n",
320 sc->sc_dev.dv_xname, error);
321 goto fail;
322 }
323 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
324 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
325
326 attach_stage = 8;
327
328
329 /*
330 * Create the transmit buffer DMA maps.
331 */
332 for (i = 0; i < EX_NDPD; i++) {
333 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
334 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
335 &sc->sc_tx_dmamaps[i])) != 0) {
336 aprint_error(
337 "%s: can't create tx DMA map %d, error = %d\n",
338 sc->sc_dev.dv_xname, i, error);
339 goto fail;
340 }
341 }
342
343 attach_stage = 9;
344
345 /*
346 * Create the receive buffer DMA maps.
347 */
348 for (i = 0; i < EX_NUPD; i++) {
349 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
350 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
351 &sc->sc_rx_dmamaps[i])) != 0) {
352 aprint_error(
353 "%s: can't create rx DMA map %d, error = %d\n",
354 sc->sc_dev.dv_xname, i, error);
355 goto fail;
356 }
357 }
358
359 attach_stage = 10;
360
361 /*
362 * Create ring of upload descriptors, only once. The DMA engine
363 * will loop over this when receiving packets, stalling if it
364 * hits an UPD with a finished receive.
365 */
366 for (i = 0; i < EX_NUPD; i++) {
367 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
368 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
369 sc->sc_upd[i].upd_frags[0].fr_len =
370 htole32((MCLBYTES - 2) | EX_FR_LAST);
371 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
372 aprint_error("%s: can't allocate or map rx buffers\n",
373 sc->sc_dev.dv_xname);
374 goto fail;
375 }
376 }
377
378 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
379 EX_NUPD * sizeof (struct ex_upd),
380 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
381
382 ex_init_txdescs(sc);
383
384 attach_stage = 11;
385
386
387 GO_WINDOW(3);
388 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
389 if (val & ELINK_MEDIACAP_MII)
390 sc->ex_conf |= EX_CONF_MII;
391
392 ifp = &sc->sc_ethercom.ec_if;
393
394 /*
395 * Initialize our media structures and MII info. We'll
396 * probe the MII if we discover that we have one.
397 */
398 sc->ex_mii.mii_ifp = ifp;
399 sc->ex_mii.mii_readreg = ex_mii_readreg;
400 sc->ex_mii.mii_writereg = ex_mii_writereg;
401 sc->ex_mii.mii_statchg = ex_mii_statchg;
402 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
403 ex_media_stat);
404
405 if (sc->ex_conf & EX_CONF_MII) {
406 /*
407 * Find PHY, extract media information from it.
408 * First, select the right transceiver.
409 */
410 ex_set_xcvr(sc, val);
411
412 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
413 MII_PHY_ANY, MII_OFFSET_ANY, 0);
414 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
415 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
416 0, NULL);
417 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
418 } else {
419 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
420 }
421 } else
422 ex_probemedia(sc);
423
424 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
425 ifp->if_softc = sc;
426 ifp->if_start = ex_start;
427 ifp->if_ioctl = ex_ioctl;
428 ifp->if_watchdog = ex_watchdog;
429 ifp->if_init = ex_init;
430 ifp->if_stop = ex_stop;
431 ifp->if_flags =
432 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
433 sc->sc_if_flags = ifp->if_flags;
434 IFQ_SET_READY(&ifp->if_snd);
435
436 /*
437 * We can support 802.1Q VLAN-sized frames.
438 */
439 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
440
441 /*
442 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
443 */
444 if (sc->ex_conf & EX_CONF_90XB)
445 sc->sc_ethercom.ec_if.if_capabilities |=
446 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
447 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
448 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
449
450 if_attach(ifp);
451 ether_ifattach(ifp, macaddr);
452
453 GO_WINDOW(1);
454
455 sc->tx_start_thresh = 20;
456 sc->tx_succ_ok = 0;
457
458 /* TODO: set queues to 0 */
459
460 #if NRND > 0
461 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
462 RND_TYPE_NET, 0);
463 #endif
464
465 /* Establish callback to reset card when we reboot. */
466 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
467 if (sc->sc_sdhook == NULL)
468 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
469 sc->sc_dev.dv_xname);
470
471 /* Add a suspend hook to make sure we come back up after a resume. */
472 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
473 ex_power, sc);
474 if (sc->sc_powerhook == NULL)
475 aprint_error("%s: WARNING: unable to establish power hook\n",
476 sc->sc_dev.dv_xname);
477
478 /* The attach is successful. */
479 sc->ex_flags |= EX_FLAGS_ATTACHED;
480 return;
481
482 fail:
483 /*
484 * Free any resources we've allocated during the failed attach
485 * attempt. Do this in reverse order and fall though.
486 */
487 switch (attach_stage) {
488 case 11:
489 {
490 struct ex_rxdesc *rxd;
491
492 for (i = 0; i < EX_NUPD; i++) {
493 rxd = &sc->sc_rxdescs[i];
494 if (rxd->rx_mbhead != NULL) {
495 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
496 m_freem(rxd->rx_mbhead);
497 }
498 }
499 }
500 /* FALLTHROUGH */
501
502 case 10:
503 for (i = 0; i < EX_NUPD; i++)
504 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
505 /* FALLTHROUGH */
506
507 case 9:
508 for (i = 0; i < EX_NDPD; i++)
509 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
510 /* FALLTHROUGH */
511 case 8:
512 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
513 /* FALLTHROUGH */
514
515 case 7:
516 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
517 /* FALLTHROUGH */
518
519 case 6:
520 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
521 EX_NDPD * sizeof (struct ex_dpd));
522 /* FALLTHROUGH */
523
524 case 5:
525 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
526 break;
527
528 case 4:
529 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
530 /* FALLTHROUGH */
531
532 case 3:
533 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
534 /* FALLTHROUGH */
535
536 case 2:
537 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
538 EX_NUPD * sizeof (struct ex_upd));
539 /* FALLTHROUGH */
540
541 case 1:
542 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
543 break;
544 }
545
546 }
547
548 /*
549 * Find the media present on non-MII chips.
550 */
551 void
552 ex_probemedia(sc)
553 struct ex_softc *sc;
554 {
555 bus_space_tag_t iot = sc->sc_iot;
556 bus_space_handle_t ioh = sc->sc_ioh;
557 struct ifmedia *ifm = &sc->ex_mii.mii_media;
558 struct ex_media *exm;
559 u_int16_t config1, reset_options, default_media;
560 int defmedia = 0;
561 const char *sep = "", *defmedianame = NULL;
562
563 GO_WINDOW(3);
564 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
565 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
566 GO_WINDOW(0);
567
568 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
569
570 aprint_normal("%s: ", sc->sc_dev.dv_xname);
571
572 /* Sanity check that there are any media! */
573 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
574 aprint_error("no media present!\n");
575 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
576 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
577 return;
578 }
579
580 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
581
582 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
583 if (reset_options & exm->exm_mpbit) {
584 /*
585 * Default media is a little complicated. We
586 * support full-duplex which uses the same
587 * reset options bit.
588 *
589 * XXX Check EEPROM for default to FDX?
590 */
591 if (exm->exm_epmedia == default_media) {
592 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
593 defmedia = exm->exm_ifmedia;
594 defmedianame = exm->exm_name;
595 }
596 } else if (defmedia == 0) {
597 defmedia = exm->exm_ifmedia;
598 defmedianame = exm->exm_name;
599 }
600 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
601 NULL);
602 PRINT(exm->exm_name);
603 }
604 }
605
606 #undef PRINT
607
608 #ifdef DIAGNOSTIC
609 if (defmedia == 0)
610 panic("ex_probemedia: impossible");
611 #endif
612
613 aprint_normal(", default %s\n", defmedianame);
614 ifmedia_set(ifm, defmedia);
615 }
616
617 /*
618 * Setup transmitter parameters.
619 */
620 static void
621 ex_setup_tx(sc)
622 struct ex_softc *sc;
623 {
624 bus_space_tag_t iot = sc->sc_iot;
625 bus_space_handle_t ioh = sc->sc_ioh;
626
627 /*
628 * Disable reclaim threshold for 90xB, set free threshold to
629 * 6 * 256 = 1536 for 90x.
630 */
631 if (sc->ex_conf & EX_CONF_90XB)
632 bus_space_write_2(iot, ioh, ELINK_COMMAND,
633 ELINK_TXRECLTHRESH | 255);
634 else
635 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
636
637 /* Setup early transmission start threshold. */
638 bus_space_write_2(iot, ioh, ELINK_COMMAND,
639 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
640 }
641
642 /*
643 * Bring device up.
644 */
645 int
646 ex_init(ifp)
647 struct ifnet *ifp;
648 {
649 struct ex_softc *sc = ifp->if_softc;
650 bus_space_tag_t iot = sc->sc_iot;
651 bus_space_handle_t ioh = sc->sc_ioh;
652 int i;
653 u_int16_t val;
654 int error = 0;
655
656 if ((error = ex_enable(sc)) != 0)
657 goto out;
658
659 ex_waitcmd(sc);
660 ex_stop(ifp, 0);
661
662 GO_WINDOW(2);
663
664 /* Turn on PHY power. */
665 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
666 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
667 if (sc->ex_conf & EX_CONF_PHY_POWER)
668 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
669 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
670 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
671 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
672 }
673
674 /*
675 * Set the station address and clear the station mask. The latter
676 * is needed for 90x cards, 0 is the default for 90xB cards.
677 */
678 for (i = 0; i < ETHER_ADDR_LEN; i++) {
679 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
680 LLADDR(ifp->if_sadl)[i]);
681 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
682 }
683
684 GO_WINDOW(3);
685
686 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
687 ex_waitcmd(sc);
688 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
689 ex_waitcmd(sc);
690
691 /* Load Tx parameters. */
692 ex_setup_tx(sc);
693
694 bus_space_write_2(iot, ioh, ELINK_COMMAND,
695 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
696
697 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
698 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
699
700 bus_space_write_2(iot, ioh, ELINK_COMMAND,
701 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
702 bus_space_write_2(iot, ioh, ELINK_COMMAND,
703 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
704
705 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
706 if (sc->intr_ack)
707 (* sc->intr_ack)(sc);
708 ex_set_media(sc);
709 ex_set_mc(sc);
710
711
712 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
713 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
714 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
715 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
716 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
717
718 ifp->if_flags |= IFF_RUNNING;
719 ifp->if_flags &= ~IFF_OACTIVE;
720 ex_start(ifp);
721 sc->sc_if_flags = ifp->if_flags;
722
723 GO_WINDOW(1);
724
725 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
726
727 out:
728 if (error) {
729 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
730 ifp->if_timer = 0;
731 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
732 }
733 return (error);
734 }
735
736 #define MCHASHSIZE 256
737 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
738 (MCHASHSIZE - 1))
739
740 /*
741 * Set multicast receive filter. Also take care of promiscuous mode
742 * here (XXX).
743 */
744 void
745 ex_set_mc(sc)
746 struct ex_softc *sc;
747 {
748 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
749 struct ethercom *ec = &sc->sc_ethercom;
750 struct ether_multi *enm;
751 struct ether_multistep estep;
752 int i;
753 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
754
755 if (ifp->if_flags & IFF_PROMISC) {
756 mask |= FIL_PROMISC;
757 goto allmulti;
758 }
759
760 ETHER_FIRST_MULTI(estep, ec, enm);
761 if (enm == NULL)
762 goto nomulti;
763
764 if ((sc->ex_conf & EX_CONF_90XB) == 0)
765 /* No multicast hash filtering. */
766 goto allmulti;
767
768 for (i = 0; i < MCHASHSIZE; i++)
769 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
770 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
771
772 do {
773 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
774 ETHER_ADDR_LEN) != 0)
775 goto allmulti;
776
777 i = ex_mchash(enm->enm_addrlo);
778 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
779 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
780 ETHER_NEXT_MULTI(estep, enm);
781 } while (enm != NULL);
782 mask |= FIL_MULTIHASH;
783
784 nomulti:
785 ifp->if_flags &= ~IFF_ALLMULTI;
786 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
787 SET_RX_FILTER | mask);
788 return;
789
790 allmulti:
791 ifp->if_flags |= IFF_ALLMULTI;
792 mask |= FIL_MULTICAST;
793 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
794 SET_RX_FILTER | mask);
795 }
796
797
798 /*
799 * The Tx Complete interrupts occur only on errors,
800 * and this is the error handler.
801 */
802 static void
803 ex_txstat(sc)
804 struct ex_softc *sc;
805 {
806 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
807 bus_space_tag_t iot = sc->sc_iot;
808 bus_space_handle_t ioh = sc->sc_ioh;
809 int i, err = 0;
810
811 /*
812 * We need to read+write TX_STATUS until we get a 0 status
813 * in order to turn off the interrupt flag.
814 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
815 */
816 for (;;) {
817 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
818 if ((i & TXS_COMPLETE) == 0)
819 break;
820 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
821 err |= i;
822 }
823 err &= ~TXS_TIMER;
824
825 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
826 || err == 0 /* should not happen, just in case */) {
827 /*
828 * Make sure the transmission is stopped.
829 */
830 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
831 for (i = 1000; i > 0; i--)
832 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
833 ELINK_DMAC_DNINPROG) == 0)
834 break;
835
836 /*
837 * Reset the transmitter.
838 */
839 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
840
841 /* Resetting takes a while and we will do more than wait. */
842
843 ifp->if_flags &= ~IFF_OACTIVE;
844 ++sc->sc_ethercom.ec_if.if_oerrors;
845 printf("%s:%s%s%s", sc->sc_dev.dv_xname,
846 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
847 (err & TXS_JABBER) ? " jabber" : "",
848 (err & TXS_RECLAIM) ? " reclaim" : "");
849 if (err == 0)
850 printf(" unknown Tx error");
851 printf(" (%x)", err);
852 if (err & TXS_UNDERRUN) {
853 printf(" @%d", sc->tx_start_thresh);
854 if (sc->tx_succ_ok < 256 &&
855 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
856 > sc->tx_start_thresh) {
857 printf(", new threshold is %d", i);
858 sc->tx_start_thresh = i;
859 }
860 sc->tx_succ_ok = 0;
861 }
862 printf("\n");
863 if (err & TXS_MAX_COLLISION)
864 ++sc->sc_ethercom.ec_if.if_collisions;
865
866 /* Wait for TX_RESET to finish. */
867 ex_waitcmd(sc);
868
869 /* Reload Tx parameters. */
870 ex_setup_tx(sc);
871 } else {
872 if (err & TXS_MAX_COLLISION)
873 ++sc->sc_ethercom.ec_if.if_collisions;
874 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
875 }
876
877 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
878
879 /* Retransmit current packet if any. */
880 if (sc->tx_head) {
881 ifp->if_flags |= IFF_OACTIVE;
882 bus_space_write_2(iot, ioh, ELINK_COMMAND,
883 ELINK_DNUNSTALL);
884 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
885 DPD_DMADDR(sc, sc->tx_head));
886
887 /* Retrigger watchdog if stopped. */
888 if (ifp->if_timer == 0)
889 ifp->if_timer = 1;
890 }
891 }
892
893 int
894 ex_media_chg(ifp)
895 struct ifnet *ifp;
896 {
897
898 if (ifp->if_flags & IFF_UP)
899 ex_init(ifp);
900 return 0;
901 }
902
903 void
904 ex_set_xcvr(sc, media)
905 struct ex_softc *sc;
906 const u_int16_t media;
907 {
908 bus_space_tag_t iot = sc->sc_iot;
909 bus_space_handle_t ioh = sc->sc_ioh;
910 u_int32_t icfg;
911
912 /*
913 * We're already in Window 3
914 */
915 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
916 icfg &= ~(CONFIG_XCVR_SEL << 16);
917 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
918 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
919 if (media & ELINK_MEDIACAP_100BASETX)
920 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
921 if (media & ELINK_MEDIACAP_100BASEFX)
922 icfg |= ELINKMEDIA_100BASE_FX
923 << (CONFIG_XCVR_SEL_SHIFT + 16);
924 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
925 }
926
927 void
928 ex_set_media(sc)
929 struct ex_softc *sc;
930 {
931 bus_space_tag_t iot = sc->sc_iot;
932 bus_space_handle_t ioh = sc->sc_ioh;
933 u_int32_t configreg;
934
935 if (((sc->ex_conf & EX_CONF_MII) &&
936 (sc->ex_mii.mii_media_active & IFM_FDX))
937 || (!(sc->ex_conf & EX_CONF_MII) &&
938 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
939 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
940 MAC_CONTROL_FDX);
941 } else {
942 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
943 }
944
945 /*
946 * If the device has MII, select it, and then tell the
947 * PHY which media to use.
948 */
949 if (sc->ex_conf & EX_CONF_MII) {
950 u_int16_t val;
951
952 GO_WINDOW(3);
953 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
954 ex_set_xcvr(sc, val);
955 mii_mediachg(&sc->ex_mii);
956 return;
957 }
958
959 GO_WINDOW(4);
960 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
961 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
962 delay(800);
963
964 /*
965 * Now turn on the selected media/transceiver.
966 */
967 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
968 case IFM_10_T:
969 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
970 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
971 break;
972
973 case IFM_10_2:
974 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
975 DELAY(800);
976 break;
977
978 case IFM_100_TX:
979 case IFM_100_FX:
980 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
981 LINKBEAT_ENABLE);
982 DELAY(800);
983 break;
984
985 case IFM_10_5:
986 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
987 SQE_ENABLE);
988 DELAY(800);
989 break;
990
991 case IFM_MANUAL:
992 break;
993
994 case IFM_NONE:
995 return;
996
997 default:
998 panic("ex_set_media: impossible");
999 }
1000
1001 GO_WINDOW(3);
1002 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
1003
1004 configreg &= ~(CONFIG_MEDIAMASK << 16);
1005 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
1006 (CONFIG_MEDIAMASK_SHIFT + 16));
1007
1008 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
1009 }
1010
1011 /*
1012 * Get currently-selected media from card.
1013 * (if_media callback, may be called before interface is brought up).
1014 */
1015 void
1016 ex_media_stat(ifp, req)
1017 struct ifnet *ifp;
1018 struct ifmediareq *req;
1019 {
1020 struct ex_softc *sc = ifp->if_softc;
1021 u_int16_t help;
1022
1023 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
1024 if (sc->ex_conf & EX_CONF_MII) {
1025 mii_pollstat(&sc->ex_mii);
1026 req->ifm_status = sc->ex_mii.mii_media_status;
1027 req->ifm_active = sc->ex_mii.mii_media_active;
1028 } else {
1029 GO_WINDOW(4);
1030 req->ifm_status = IFM_AVALID;
1031 req->ifm_active =
1032 sc->ex_mii.mii_media.ifm_cur->ifm_media;
1033 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
1034 ELINK_W4_MEDIA_TYPE);
1035 if (help & LINKBEAT_DETECT)
1036 req->ifm_status |= IFM_ACTIVE;
1037 GO_WINDOW(1);
1038 }
1039 }
1040 }
1041
1042
1043
1044 /*
1045 * Start outputting on the interface.
1046 */
1047 static void
1048 ex_start(ifp)
1049 struct ifnet *ifp;
1050 {
1051 struct ex_softc *sc = ifp->if_softc;
1052 bus_space_tag_t iot = sc->sc_iot;
1053 bus_space_handle_t ioh = sc->sc_ioh;
1054 volatile struct ex_fraghdr *fr = NULL;
1055 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1056 struct ex_txdesc *txp;
1057 struct mbuf *mb_head;
1058 bus_dmamap_t dmamap;
1059 int m_csumflags, offset, seglen, totlen, segment, error;
1060 u_int32_t csum_flags;
1061
1062 if (sc->tx_head || sc->tx_free == NULL)
1063 return;
1064
1065 txp = NULL;
1066
1067 /*
1068 * We're finished if there is nothing more to add to the list or if
1069 * we're all filled up with buffers to transmit.
1070 */
1071 while (sc->tx_free != NULL) {
1072 /*
1073 * Grab a packet to transmit.
1074 */
1075 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1076 if (mb_head == NULL)
1077 break;
1078
1079 /*
1080 * mb_head might be updated later,
1081 * so preserve csum_flags here.
1082 */
1083 m_csumflags = mb_head->m_pkthdr.csum_flags;
1084
1085 /*
1086 * Get pointer to next available tx desc.
1087 */
1088 txp = sc->tx_free;
1089 dmamap = txp->tx_dmamap;
1090
1091 /*
1092 * Go through each of the mbufs in the chain and initialize
1093 * the transmit buffer descriptors with the physical address
1094 * and size of the mbuf.
1095 */
1096 reload:
1097 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1098 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1099 switch (error) {
1100 case 0:
1101 /* Success. */
1102 break;
1103
1104 case EFBIG:
1105 {
1106 struct mbuf *mn;
1107
1108 /*
1109 * We ran out of segments. We have to recopy this
1110 * mbuf chain first. Bail out if we can't get the
1111 * new buffers.
1112 */
1113 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
1114
1115 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1116 if (mn == NULL) {
1117 m_freem(mb_head);
1118 printf("aborting\n");
1119 goto out;
1120 }
1121 if (mb_head->m_pkthdr.len > MHLEN) {
1122 MCLGET(mn, M_DONTWAIT);
1123 if ((mn->m_flags & M_EXT) == 0) {
1124 m_freem(mn);
1125 m_freem(mb_head);
1126 printf("aborting\n");
1127 goto out;
1128 }
1129 }
1130 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1131 mtod(mn, caddr_t));
1132 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1133 m_freem(mb_head);
1134 mb_head = mn;
1135 printf("retrying\n");
1136 goto reload;
1137 }
1138
1139 default:
1140 /*
1141 * Some other problem; report it.
1142 */
1143 printf("%s: can't load mbuf chain, error = %d\n",
1144 sc->sc_dev.dv_xname, error);
1145 m_freem(mb_head);
1146 goto out;
1147 }
1148
1149 /*
1150 * remove our tx desc from freelist.
1151 */
1152 sc->tx_free = txp->tx_next;
1153 txp->tx_next = NULL;
1154
1155 fr = &txp->tx_dpd->dpd_frags[0];
1156 totlen = 0;
1157 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1158 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1159 seglen = dmamap->dm_segs[segment].ds_len;
1160 fr->fr_len = htole32(seglen);
1161 totlen += seglen;
1162 }
1163 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1164 (m_csumflags & M_CSUM_IPv4) != 0)) {
1165 /*
1166 * Pad short packets to avoid ip4csum-tx bug.
1167 *
1168 * XXX Should we still consider if such short
1169 * (36 bytes or less) packets might already
1170 * occupy EX_NTFRAG (== 32) fragements here?
1171 */
1172 KASSERT(segment < EX_NTFRAGS);
1173 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1174 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1175 fr->fr_len = htole32(EX_FR_LAST | seglen);
1176 totlen += seglen;
1177 } else {
1178 fr--;
1179 fr->fr_len |= htole32(EX_FR_LAST);
1180 }
1181 txp->tx_mbhead = mb_head;
1182
1183 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1184 BUS_DMASYNC_PREWRITE);
1185
1186 dpd = txp->tx_dpd;
1187 dpd->dpd_nextptr = 0;
1188 dpd->dpd_fsh = htole32(totlen);
1189
1190 /* Byte-swap constants so compiler can optimize. */
1191
1192 if (sc->ex_conf & EX_CONF_90XB) {
1193 csum_flags = 0;
1194
1195 if (m_csumflags & M_CSUM_IPv4)
1196 csum_flags |= htole32(EX_DPD_IPCKSUM);
1197
1198 if (m_csumflags & M_CSUM_TCPv4)
1199 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1200 else if (m_csumflags & M_CSUM_UDPv4)
1201 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1202
1203 dpd->dpd_fsh |= csum_flags;
1204 } else {
1205 KDASSERT((mb_head->m_pkthdr.csum_flags &
1206 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1207 }
1208
1209 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1210 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1211 sizeof (struct ex_dpd),
1212 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1213
1214 /*
1215 * No need to stall the download engine, we know it's
1216 * not busy right now.
1217 *
1218 * Fix up pointers in both the "soft" tx and the physical
1219 * tx list.
1220 */
1221 if (sc->tx_head != NULL) {
1222 prevdpd = sc->tx_tail->tx_dpd;
1223 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1224 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1225 offset, sizeof (struct ex_dpd),
1226 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1227 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1228 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1229 offset, sizeof (struct ex_dpd),
1230 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1231 sc->tx_tail->tx_next = txp;
1232 sc->tx_tail = txp;
1233 } else {
1234 sc->tx_tail = sc->tx_head = txp;
1235 }
1236
1237 #if NBPFILTER > 0
1238 /*
1239 * Pass packet to bpf if there is a listener.
1240 */
1241 if (ifp->if_bpf)
1242 bpf_mtap(ifp->if_bpf, mb_head);
1243 #endif
1244 }
1245 out:
1246 if (sc->tx_head) {
1247 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1248 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1249 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1250 sizeof (struct ex_dpd),
1251 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1252 ifp->if_flags |= IFF_OACTIVE;
1253 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1254 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1255 DPD_DMADDR(sc, sc->tx_head));
1256
1257 /* trigger watchdog */
1258 ifp->if_timer = 5;
1259 }
1260 }
1261
1262
1263 int
1264 ex_intr(arg)
1265 void *arg;
1266 {
1267 struct ex_softc *sc = arg;
1268 bus_space_tag_t iot = sc->sc_iot;
1269 bus_space_handle_t ioh = sc->sc_ioh;
1270 u_int16_t stat;
1271 int ret = 0;
1272 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1273
1274 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1275 !device_is_active(&sc->sc_dev))
1276 return (0);
1277
1278 for (;;) {
1279 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1280
1281 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1282 if ((stat & INTR_LATCH) == 0) {
1283 #if 0
1284 printf("%s: intr latch cleared\n",
1285 sc->sc_dev.dv_xname);
1286 #endif
1287 break;
1288 }
1289 }
1290
1291 ret = 1;
1292
1293 /*
1294 * Acknowledge interrupts.
1295 */
1296 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1297 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1298 if (sc->intr_ack)
1299 (*sc->intr_ack)(sc);
1300
1301 if (stat & HOST_ERROR) {
1302 printf("%s: adapter failure (%x)\n",
1303 sc->sc_dev.dv_xname, stat);
1304 ex_reset(sc);
1305 ex_init(ifp);
1306 return 1;
1307 }
1308 if (stat & UPD_STATS) {
1309 ex_getstats(sc);
1310 }
1311 if (stat & TX_COMPLETE) {
1312 ex_txstat(sc);
1313 #if 0
1314 if (stat & DN_COMPLETE)
1315 printf("%s: Ignoring Dn interrupt (%x)\n",
1316 sc->sc_dev.dv_xname, stat);
1317 #endif
1318 /*
1319 * In some rare cases, both Tx Complete and
1320 * Dn Complete bits are set. However, the packet
1321 * has been reloaded in ex_txstat() and should not
1322 * handle the Dn Complete event here.
1323 * Hence the "else" below.
1324 */
1325 } else if (stat & DN_COMPLETE) {
1326 struct ex_txdesc *txp, *ptxp = NULL;
1327 bus_dmamap_t txmap;
1328
1329 /* reset watchdog timer, was set in ex_start() */
1330 ifp->if_timer = 0;
1331
1332 for (txp = sc->tx_head; txp != NULL;
1333 txp = txp->tx_next) {
1334 bus_dmamap_sync(sc->sc_dmat,
1335 sc->sc_dpd_dmamap,
1336 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1337 sizeof (struct ex_dpd),
1338 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1339 if (txp->tx_mbhead != NULL) {
1340 txmap = txp->tx_dmamap;
1341 bus_dmamap_sync(sc->sc_dmat, txmap,
1342 0, txmap->dm_mapsize,
1343 BUS_DMASYNC_POSTWRITE);
1344 bus_dmamap_unload(sc->sc_dmat, txmap);
1345 m_freem(txp->tx_mbhead);
1346 txp->tx_mbhead = NULL;
1347 }
1348 ptxp = txp;
1349 }
1350
1351 /*
1352 * Move finished tx buffers back to the tx free list.
1353 */
1354 if (sc->tx_free) {
1355 sc->tx_ftail->tx_next = sc->tx_head;
1356 sc->tx_ftail = ptxp;
1357 } else
1358 sc->tx_ftail = sc->tx_free = sc->tx_head;
1359
1360 sc->tx_head = sc->tx_tail = NULL;
1361 ifp->if_flags &= ~IFF_OACTIVE;
1362
1363 if (sc->tx_succ_ok < 256)
1364 sc->tx_succ_ok++;
1365 }
1366
1367 if (stat & UP_COMPLETE) {
1368 struct ex_rxdesc *rxd;
1369 struct mbuf *m;
1370 struct ex_upd *upd;
1371 bus_dmamap_t rxmap;
1372 u_int32_t pktstat;
1373
1374 rcvloop:
1375 rxd = sc->rx_head;
1376 rxmap = rxd->rx_dmamap;
1377 m = rxd->rx_mbhead;
1378 upd = rxd->rx_upd;
1379
1380 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1381 rxmap->dm_mapsize,
1382 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1383 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1384 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1385 sizeof (struct ex_upd),
1386 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1387 pktstat = le32toh(upd->upd_pktstatus);
1388
1389 if (pktstat & EX_UPD_COMPLETE) {
1390 /*
1391 * Remove first packet from the chain.
1392 */
1393 sc->rx_head = rxd->rx_next;
1394 rxd->rx_next = NULL;
1395
1396 /*
1397 * Add a new buffer to the receive chain.
1398 * If this fails, the old buffer is recycled
1399 * instead.
1400 */
1401 if (ex_add_rxbuf(sc, rxd) == 0) {
1402 u_int16_t total_len;
1403
1404 if (pktstat &
1405 ((sc->sc_ethercom.ec_capenable &
1406 ETHERCAP_VLAN_MTU) ?
1407 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1408 ifp->if_ierrors++;
1409 m_freem(m);
1410 goto rcvloop;
1411 }
1412
1413 total_len = pktstat & EX_UPD_PKTLENMASK;
1414 if (total_len <
1415 sizeof(struct ether_header)) {
1416 m_freem(m);
1417 goto rcvloop;
1418 }
1419 m->m_pkthdr.rcvif = ifp;
1420 m->m_pkthdr.len = m->m_len = total_len;
1421 #if NBPFILTER > 0
1422 if (ifp->if_bpf)
1423 bpf_mtap(ifp->if_bpf, m);
1424 #endif
1425 /*
1426 * Set the incoming checksum information for the packet.
1427 */
1428 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1429 (pktstat & EX_UPD_IPCHECKED) != 0) {
1430 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1431 if (pktstat & EX_UPD_IPCKSUMERR)
1432 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1433 if (pktstat & EX_UPD_TCPCHECKED) {
1434 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1435 if (pktstat & EX_UPD_TCPCKSUMERR)
1436 m->m_pkthdr.csum_flags |=
1437 M_CSUM_TCP_UDP_BAD;
1438 } else if (pktstat & EX_UPD_UDPCHECKED) {
1439 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1440 if (pktstat & EX_UPD_UDPCKSUMERR)
1441 m->m_pkthdr.csum_flags |=
1442 M_CSUM_TCP_UDP_BAD;
1443 }
1444 }
1445 (*ifp->if_input)(ifp, m);
1446 }
1447 goto rcvloop;
1448 }
1449 /*
1450 * Just in case we filled up all UPDs and the DMA engine
1451 * stalled. We could be more subtle about this.
1452 */
1453 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1454 printf("%s: uplistptr was 0\n",
1455 sc->sc_dev.dv_xname);
1456 ex_init(ifp);
1457 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1458 & 0x2000) {
1459 printf("%s: receive stalled\n",
1460 sc->sc_dev.dv_xname);
1461 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1462 ELINK_UPUNSTALL);
1463 }
1464 }
1465
1466 #if NRND > 0
1467 if (stat)
1468 rnd_add_uint32(&sc->rnd_source, stat);
1469 #endif
1470 }
1471
1472 /* no more interrupts */
1473 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1474 ex_start(ifp);
1475 return ret;
1476 }
1477
1478 int
1479 ex_ioctl(ifp, cmd, data)
1480 struct ifnet *ifp;
1481 u_long cmd;
1482 caddr_t data;
1483 {
1484 struct ex_softc *sc = ifp->if_softc;
1485 struct ifreq *ifr = (struct ifreq *)data;
1486 int s, error;
1487
1488 s = splnet();
1489
1490 switch (cmd) {
1491 case SIOCSIFMEDIA:
1492 case SIOCGIFMEDIA:
1493 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1494 break;
1495 case SIOCSIFFLAGS:
1496 /* If the interface is up and running, only modify the receive
1497 * filter when setting promiscuous or debug mode. Otherwise
1498 * fall through to ether_ioctl, which will reset the chip.
1499 */
1500 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
1501 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
1502 == (IFF_UP|IFF_RUNNING))
1503 && ((ifp->if_flags & (~RESETIGN))
1504 == (sc->sc_if_flags & (~RESETIGN)))) {
1505 ex_set_mc(sc);
1506 error = 0;
1507 break;
1508 #undef RESETIGN
1509 }
1510 /* FALLTHROUGH */
1511 default:
1512 error = ether_ioctl(ifp, cmd, data);
1513 if (error == ENETRESET) {
1514 /*
1515 * Multicast list has changed; set the hardware filter
1516 * accordingly.
1517 */
1518 if (ifp->if_flags & IFF_RUNNING)
1519 ex_set_mc(sc);
1520 error = 0;
1521 }
1522 break;
1523 }
1524
1525 sc->sc_if_flags = ifp->if_flags;
1526 splx(s);
1527 return (error);
1528 }
1529
1530 void
1531 ex_getstats(sc)
1532 struct ex_softc *sc;
1533 {
1534 bus_space_handle_t ioh = sc->sc_ioh;
1535 bus_space_tag_t iot = sc->sc_iot;
1536 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1537 u_int8_t upperok;
1538
1539 GO_WINDOW(6);
1540 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1541 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1542 ifp->if_ipackets += (upperok & 0x03) << 8;
1543 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1544 ifp->if_opackets += (upperok & 0x30) << 4;
1545 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1546 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1547 /*
1548 * There seems to be no way to get the exact number of collisions,
1549 * this is the number that occurred at the very least.
1550 */
1551 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1552 TX_AFTER_X_COLLISIONS);
1553 /*
1554 * Interface byte counts are counted by ether_input() and
1555 * ether_output(), so don't accumulate them here. Just
1556 * read the NIC counters so they don't generate overflow interrupts.
1557 * Upper byte counters are latched from reading the totals, so
1558 * they don't need to be read if we don't need their values.
1559 */
1560 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1561 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1562
1563 /*
1564 * Clear the following to avoid stats overflow interrupts
1565 */
1566 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1567 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1568 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1569 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1570 GO_WINDOW(4);
1571 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1572 GO_WINDOW(1);
1573 }
1574
1575 void
1576 ex_printstats(sc)
1577 struct ex_softc *sc;
1578 {
1579 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1580
1581 ex_getstats(sc);
1582 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1583 "%llu\n", (unsigned long long)ifp->if_ipackets,
1584 (unsigned long long)ifp->if_opackets,
1585 (unsigned long long)ifp->if_ierrors,
1586 (unsigned long long)ifp->if_oerrors,
1587 (unsigned long long)ifp->if_ibytes,
1588 (unsigned long long)ifp->if_obytes);
1589 }
1590
1591 void
1592 ex_tick(arg)
1593 void *arg;
1594 {
1595 struct ex_softc *sc = arg;
1596 int s;
1597
1598 if (!device_is_active(&sc->sc_dev))
1599 return;
1600
1601 s = splnet();
1602
1603 if (sc->ex_conf & EX_CONF_MII)
1604 mii_tick(&sc->ex_mii);
1605
1606 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1607 & COMMAND_IN_PROGRESS))
1608 ex_getstats(sc);
1609
1610 splx(s);
1611
1612 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1613 }
1614
1615 void
1616 ex_reset(sc)
1617 struct ex_softc *sc;
1618 {
1619 u_int16_t val = GLOBAL_RESET;
1620
1621 if (sc->ex_conf & EX_CONF_RESETHACK)
1622 val |= 0x10;
1623 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1624 /*
1625 * XXX apparently the command in progress bit can't be trusted
1626 * during a reset, so we just always wait this long. Fortunately
1627 * we normally only reset the chip during autoconfig.
1628 */
1629 delay(100000);
1630 ex_waitcmd(sc);
1631 }
1632
1633 void
1634 ex_watchdog(ifp)
1635 struct ifnet *ifp;
1636 {
1637 struct ex_softc *sc = ifp->if_softc;
1638
1639 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1640 ++sc->sc_ethercom.ec_if.if_oerrors;
1641
1642 ex_reset(sc);
1643 ex_init(ifp);
1644 }
1645
1646 void
1647 ex_stop(ifp, disable)
1648 struct ifnet *ifp;
1649 int disable;
1650 {
1651 struct ex_softc *sc = ifp->if_softc;
1652 bus_space_tag_t iot = sc->sc_iot;
1653 bus_space_handle_t ioh = sc->sc_ioh;
1654 struct ex_txdesc *tx;
1655 struct ex_rxdesc *rx;
1656 int i;
1657
1658 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1659 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1660 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1661
1662 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1663 if (tx->tx_mbhead == NULL)
1664 continue;
1665 m_freem(tx->tx_mbhead);
1666 tx->tx_mbhead = NULL;
1667 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1668 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1669 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1670 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1671 sizeof (struct ex_dpd),
1672 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1673 }
1674 sc->tx_tail = sc->tx_head = NULL;
1675 ex_init_txdescs(sc);
1676
1677 sc->rx_tail = sc->rx_head = 0;
1678 for (i = 0; i < EX_NUPD; i++) {
1679 rx = &sc->sc_rxdescs[i];
1680 if (rx->rx_mbhead != NULL) {
1681 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1682 m_freem(rx->rx_mbhead);
1683 rx->rx_mbhead = NULL;
1684 }
1685 ex_add_rxbuf(sc, rx);
1686 }
1687
1688 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1689
1690 callout_stop(&sc->ex_mii_callout);
1691 if (sc->ex_conf & EX_CONF_MII)
1692 mii_down(&sc->ex_mii);
1693
1694 if (disable)
1695 ex_disable(sc);
1696
1697 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1698 sc->sc_if_flags = ifp->if_flags;
1699 ifp->if_timer = 0;
1700 }
1701
1702 static void
1703 ex_init_txdescs(sc)
1704 struct ex_softc *sc;
1705 {
1706 int i;
1707
1708 for (i = 0; i < EX_NDPD; i++) {
1709 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1710 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1711 if (i < EX_NDPD - 1)
1712 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1713 else
1714 sc->sc_txdescs[i].tx_next = NULL;
1715 }
1716 sc->tx_free = &sc->sc_txdescs[0];
1717 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1718 }
1719
1720
1721 int
1722 ex_activate(self, act)
1723 struct device *self;
1724 enum devact act;
1725 {
1726 struct ex_softc *sc = (void *) self;
1727 int s, error = 0;
1728
1729 s = splnet();
1730 switch (act) {
1731 case DVACT_ACTIVATE:
1732 error = EOPNOTSUPP;
1733 break;
1734
1735 case DVACT_DEACTIVATE:
1736 if (sc->ex_conf & EX_CONF_MII)
1737 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1738 MII_OFFSET_ANY);
1739 if_deactivate(&sc->sc_ethercom.ec_if);
1740 break;
1741 }
1742 splx(s);
1743
1744 return (error);
1745 }
1746
1747 int
1748 ex_detach(sc)
1749 struct ex_softc *sc;
1750 {
1751 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1752 struct ex_rxdesc *rxd;
1753 int i;
1754
1755 /* Succeed now if there's no work to do. */
1756 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1757 return (0);
1758
1759 /* Unhook our tick handler. */
1760 callout_stop(&sc->ex_mii_callout);
1761
1762 if (sc->ex_conf & EX_CONF_MII) {
1763 /* Detach all PHYs */
1764 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1765 }
1766
1767 /* Delete all remaining media. */
1768 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1769
1770 #if NRND > 0
1771 rnd_detach_source(&sc->rnd_source);
1772 #endif
1773 ether_ifdetach(ifp);
1774 if_detach(ifp);
1775
1776 for (i = 0; i < EX_NUPD; i++) {
1777 rxd = &sc->sc_rxdescs[i];
1778 if (rxd->rx_mbhead != NULL) {
1779 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1780 m_freem(rxd->rx_mbhead);
1781 rxd->rx_mbhead = NULL;
1782 }
1783 }
1784 for (i = 0; i < EX_NUPD; i++)
1785 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1786 for (i = 0; i < EX_NDPD; i++)
1787 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1788 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1789 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1790 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1791 EX_NDPD * sizeof (struct ex_dpd));
1792 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1793 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1794 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1795 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1796 EX_NUPD * sizeof (struct ex_upd));
1797 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1798
1799 shutdownhook_disestablish(sc->sc_sdhook);
1800 powerhook_disestablish(sc->sc_powerhook);
1801
1802 return (0);
1803 }
1804
1805 /*
1806 * Before reboots, reset card completely.
1807 */
1808 static void
1809 ex_shutdown(arg)
1810 void *arg;
1811 {
1812 struct ex_softc *sc = arg;
1813
1814 ex_stop(&sc->sc_ethercom.ec_if, 1);
1815 /*
1816 * Make sure the interface is powered up when we reboot,
1817 * otherwise firmware on some systems gets really confused.
1818 */
1819 (void) ex_enable(sc);
1820 }
1821
1822 /*
1823 * Read EEPROM data.
1824 * XXX what to do if EEPROM doesn't unbusy?
1825 */
1826 u_int16_t
1827 ex_read_eeprom(sc, offset)
1828 struct ex_softc *sc;
1829 int offset;
1830 {
1831 bus_space_tag_t iot = sc->sc_iot;
1832 bus_space_handle_t ioh = sc->sc_ioh;
1833 u_int16_t data = 0, cmd = READ_EEPROM;
1834 int off;
1835
1836 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1837 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1838
1839 GO_WINDOW(0);
1840 if (ex_eeprom_busy(sc))
1841 goto out;
1842 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1843 cmd | (off + (offset & 0x3f)));
1844 if (ex_eeprom_busy(sc))
1845 goto out;
1846 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1847 out:
1848 return data;
1849 }
1850
1851 static int
1852 ex_eeprom_busy(sc)
1853 struct ex_softc *sc;
1854 {
1855 bus_space_tag_t iot = sc->sc_iot;
1856 bus_space_handle_t ioh = sc->sc_ioh;
1857 int i = 100;
1858
1859 while (i--) {
1860 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1861 EEPROM_BUSY))
1862 return 0;
1863 delay(100);
1864 }
1865 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1866 return (1);
1867 }
1868
1869 /*
1870 * Create a new rx buffer and add it to the 'soft' rx list.
1871 */
1872 static int
1873 ex_add_rxbuf(sc, rxd)
1874 struct ex_softc *sc;
1875 struct ex_rxdesc *rxd;
1876 {
1877 struct mbuf *m, *oldm;
1878 bus_dmamap_t rxmap;
1879 int error, rval = 0;
1880
1881 oldm = rxd->rx_mbhead;
1882 rxmap = rxd->rx_dmamap;
1883
1884 MGETHDR(m, M_DONTWAIT, MT_DATA);
1885 if (m != NULL) {
1886 MCLGET(m, M_DONTWAIT);
1887 if ((m->m_flags & M_EXT) == 0) {
1888 m_freem(m);
1889 if (oldm == NULL)
1890 return 1;
1891 m = oldm;
1892 MRESETDATA(m);
1893 rval = 1;
1894 }
1895 } else {
1896 if (oldm == NULL)
1897 return 1;
1898 m = oldm;
1899 MRESETDATA(m);
1900 rval = 1;
1901 }
1902
1903 /*
1904 * Setup the DMA map for this receive buffer.
1905 */
1906 if (m != oldm) {
1907 if (oldm != NULL)
1908 bus_dmamap_unload(sc->sc_dmat, rxmap);
1909 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1910 m->m_ext.ext_buf, MCLBYTES, NULL,
1911 BUS_DMA_READ|BUS_DMA_NOWAIT);
1912 if (error) {
1913 printf("%s: can't load rx buffer, error = %d\n",
1914 sc->sc_dev.dv_xname, error);
1915 panic("ex_add_rxbuf"); /* XXX */
1916 }
1917 }
1918
1919 /*
1920 * Align for data after 14 byte header.
1921 */
1922 m->m_data += 2;
1923
1924 rxd->rx_mbhead = m;
1925 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1926 rxd->rx_upd->upd_frags[0].fr_addr =
1927 htole32(rxmap->dm_segs[0].ds_addr + 2);
1928 rxd->rx_upd->upd_nextptr = 0;
1929
1930 /*
1931 * Attach it to the end of the list.
1932 */
1933 if (sc->rx_head != NULL) {
1934 sc->rx_tail->rx_next = rxd;
1935 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1936 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1937 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1938 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1939 sizeof (struct ex_upd),
1940 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1941 } else {
1942 sc->rx_head = rxd;
1943 }
1944 sc->rx_tail = rxd;
1945
1946 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1947 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1948 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1949 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1950 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1951 return (rval);
1952 }
1953
1954 u_int32_t
1955 ex_mii_bitbang_read(self)
1956 struct device *self;
1957 {
1958 struct ex_softc *sc = (void *) self;
1959
1960 /* We're already in Window 4. */
1961 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1962 }
1963
1964 void
1965 ex_mii_bitbang_write(self, val)
1966 struct device *self;
1967 u_int32_t val;
1968 {
1969 struct ex_softc *sc = (void *) self;
1970
1971 /* We're already in Window 4. */
1972 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1973 }
1974
1975 int
1976 ex_mii_readreg(v, phy, reg)
1977 struct device *v;
1978 int phy, reg;
1979 {
1980 struct ex_softc *sc = (struct ex_softc *)v;
1981 int val;
1982
1983 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1984 return 0;
1985
1986 GO_WINDOW(4);
1987
1988 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1989
1990 GO_WINDOW(1);
1991
1992 return (val);
1993 }
1994
1995 void
1996 ex_mii_writereg(v, phy, reg, data)
1997 struct device *v;
1998 int phy;
1999 int reg;
2000 int data;
2001 {
2002 struct ex_softc *sc = (struct ex_softc *)v;
2003
2004 GO_WINDOW(4);
2005
2006 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
2007
2008 GO_WINDOW(1);
2009 }
2010
2011 void
2012 ex_mii_statchg(v)
2013 struct device *v;
2014 {
2015 struct ex_softc *sc = (struct ex_softc *)v;
2016 bus_space_tag_t iot = sc->sc_iot;
2017 bus_space_handle_t ioh = sc->sc_ioh;
2018 int mctl;
2019
2020 GO_WINDOW(3);
2021 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
2022 if (sc->ex_mii.mii_media_active & IFM_FDX)
2023 mctl |= MAC_CONTROL_FDX;
2024 else
2025 mctl &= ~MAC_CONTROL_FDX;
2026 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
2027 GO_WINDOW(1); /* back to operating window */
2028 }
2029
2030 int
2031 ex_enable(sc)
2032 struct ex_softc *sc;
2033 {
2034 if (sc->enabled == 0 && sc->enable != NULL) {
2035 if ((*sc->enable)(sc) != 0) {
2036 printf("%s: de/vice enable failed\n",
2037 sc->sc_dev.dv_xname);
2038 return (EIO);
2039 }
2040 sc->enabled = 1;
2041 }
2042 return (0);
2043 }
2044
2045 void
2046 ex_disable(sc)
2047 struct ex_softc *sc;
2048 {
2049 if (sc->enabled == 1 && sc->disable != NULL) {
2050 (*sc->disable)(sc);
2051 sc->enabled = 0;
2052 }
2053 }
2054
2055 void
2056 ex_power(why, arg)
2057 int why;
2058 void *arg;
2059 {
2060 struct ex_softc *sc = (void *)arg;
2061 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2062 int s;
2063
2064 s = splnet();
2065 switch (why) {
2066 case PWR_SUSPEND:
2067 case PWR_STANDBY:
2068 ex_stop(ifp, 0);
2069 if (sc->power != NULL)
2070 (*sc->power)(sc, why);
2071 break;
2072 case PWR_RESUME:
2073 if (ifp->if_flags & IFF_UP) {
2074 if (sc->power != NULL)
2075 (*sc->power)(sc, why);
2076 ex_init(ifp);
2077 }
2078 break;
2079 case PWR_SOFTSUSPEND:
2080 case PWR_SOFTSTANDBY:
2081 case PWR_SOFTRESUME:
2082 break;
2083 }
2084 splx(s);
2085 }
Cache object: 4cd9f1051d800a65c75f9c7d00c59f34
|