FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c
1 /*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * 3Com 3c90x Etherlink XL PCI NIC driver
35 *
36 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
37 * bus-master chips (3c90x cards and embedded controllers) including
38 * the following:
39 *
40 * 3Com 3c900-TPO 10Mbps/RJ-45
41 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
42 * 3Com 3c905-TX 10/100Mbps/RJ-45
43 * 3Com 3c905-T4 10/100Mbps/RJ-45
44 * 3Com 3c900B-TPO 10Mbps/RJ-45
45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
46 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
47 * 3Com 3c900B-FL 10Mbps/Fiber-optic
48 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
49 * 3Com 3c905B-TX 10/100Mbps/RJ-45
50 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
51 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
52 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
53 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
54 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
55 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
56 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
57 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
58 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
59 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
60 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
61 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
62 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
65 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
66 * Dell on-board 3c920 10/100Mbps/RJ-45
67 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
68 * Dell Latitude laptop docking station embedded 3c905-TX
69 *
70 * Written by Bill Paul <wpaul@ctr.columbia.edu>
71 * Electrical Engineering Department
72 * Columbia University, New York City
73 */
74
75 /*
76 * The 3c90x series chips use a bus-master DMA interface for transfering
77 * packets to and from the controller chip. Some of the "vortex" cards
78 * (3c59x) also supported a bus master mode, however for those chips
79 * you could only DMA packets to/from a contiguous memory buffer. For
80 * transmission this would mean copying the contents of the queued mbuf
81 * chain into an mbuf cluster and then DMAing the cluster. This extra
82 * copy would sort of defeat the purpose of the bus master support for
83 * any packet that doesn't fit into a single mbuf.
84 *
85 * By contrast, the 3c90x cards support a fragment-based bus master
86 * mode where mbuf chains can be encapsulated using TX descriptors.
87 * This is similar to other PCI chips such as the Texas Instruments
88 * ThunderLAN and the Intel 82557/82558.
89 *
90 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
91 * bus master chips because they maintain the old PIO interface for
92 * backwards compatibility, but starting with the 3c905B and the
93 * "cyclone" chips, the compatibility interface has been dropped.
94 * Since using bus master DMA is a big win, we use this driver to
95 * support the PCI "boomerang" chips even though they work with the
96 * "vortex" driver in order to obtain better performance.
97 *
98 * This driver is in the /sys/pci directory because it only supports
99 * PCI-based NICs.
100 */
101
102 #include <sys/cdefs.h>
103 __FBSDID("$FreeBSD$");
104
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/sockio.h>
108 #include <sys/endian.h>
109 #include <sys/mbuf.h>
110 #include <sys/kernel.h>
111 #include <sys/socket.h>
112
113 #include <net/if.h>
114 #include <net/if_arp.h>
115 #include <net/ethernet.h>
116 #include <net/if_dl.h>
117 #include <net/if_media.h>
118 #include <net/if_vlan_var.h>
119
120 #include <net/bpf.h>
121
122 #include <machine/bus_memio.h>
123 #include <machine/bus_pio.h>
124 #include <machine/bus.h>
125 #include <machine/clock.h> /* for DELAY */
126 #include <machine/resource.h>
127 #include <sys/bus.h>
128 #include <sys/rman.h>
129
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132
133 #include <pci/pcireg.h>
134 #include <pci/pcivar.h>
135
136 MODULE_DEPEND(xl, miibus, 1, 1, 1);
137
138 /* "controller miibus0" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
140
141 #include <pci/if_xlreg.h>
142
143 /*
144 * TX Checksumming is disabled by default for two reasons:
145 * - TX Checksumming will occasionally produce corrupt packets
146 * - TX Checksumming seems to reduce performance
147 *
148 * Only 905B/C cards were reported to have this problem, it is possible
149 * that later chips _may_ be immune.
150 */
151 #define XL905B_TXCSUM_BROKEN 1
152
153 #ifdef XL905B_TXCSUM_BROKEN
154 #define XL905B_CSUM_FEATURES 0
155 #else
156 #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
157 #endif
158
159 /*
160 * Various supported device vendors/types and their names.
161 */
162 static struct xl_type xl_devs[] = {
163 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
164 "3Com 3c900-TPO Etherlink XL" },
165 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
166 "3Com 3c900-COMBO Etherlink XL" },
167 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
168 "3Com 3c905-TX Fast Etherlink XL" },
169 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
170 "3Com 3c905-T4 Fast Etherlink XL" },
171 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
172 "3Com 3c900B-TPO Etherlink XL" },
173 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
174 "3Com 3c900B-COMBO Etherlink XL" },
175 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
176 "3Com 3c900B-TPC Etherlink XL" },
177 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
178 "3Com 3c900B-FL Etherlink XL" },
179 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
180 "3Com 3c905B-TX Fast Etherlink XL" },
181 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
182 "3Com 3c905B-T4 Fast Etherlink XL" },
183 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
184 "3Com 3c905B-FX/SC Fast Etherlink XL" },
185 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
186 "3Com 3c905B-COMBO Fast Etherlink XL" },
187 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
188 "3Com 3c905C-TX Fast Etherlink XL" },
189 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
190 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
191 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
192 "3Com 3c980 Fast Etherlink XL" },
193 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
194 "3Com 3c980C Fast Etherlink XL" },
195 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
196 "3Com 3cSOHO100-TX OfficeConnect" },
197 { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
198 "3Com 3c450-TX HomeConnect" },
199 { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
200 "3Com 3c555 Fast Etherlink XL" },
201 { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
202 "3Com 3c556 Fast Etherlink XL" },
203 { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
204 "3Com 3c556B Fast Etherlink XL" },
205 { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
206 "3Com 3c575TX Fast Etherlink XL" },
207 { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
208 "3Com 3c575B Fast Etherlink XL" },
209 { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
210 "3Com 3c575C Fast Etherlink XL" },
211 { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
212 "3Com 3c656 Fast Etherlink XL" },
213 { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
214 "3Com 3c656B Fast Etherlink XL" },
215 { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
216 "3Com 3c656C Fast Etherlink XL" },
217 { 0, 0, NULL }
218 };
219
220 static int xl_probe (device_t);
221 static int xl_attach (device_t);
222 static int xl_detach (device_t);
223
224 static int xl_newbuf (struct xl_softc *, struct xl_chain_onefrag *);
225 static void xl_stats_update (void *);
226 static int xl_encap (struct xl_softc *, struct xl_chain *,
227 struct mbuf *);
228 static void xl_rxeof (struct xl_softc *);
229 static int xl_rx_resync (struct xl_softc *);
230 static void xl_txeof (struct xl_softc *);
231 static void xl_txeof_90xB (struct xl_softc *);
232 static void xl_txeoc (struct xl_softc *);
233 static void xl_intr (void *);
234 static void xl_start (struct ifnet *);
235 static void xl_start_90xB (struct ifnet *);
236 static int xl_ioctl (struct ifnet *, u_long, caddr_t);
237 static void xl_init (void *);
238 static void xl_stop (struct xl_softc *);
239 static void xl_watchdog (struct ifnet *);
240 static void xl_shutdown (device_t);
241 static int xl_suspend (device_t);
242 static int xl_resume (device_t);
243
244 static int xl_ifmedia_upd (struct ifnet *);
245 static void xl_ifmedia_sts (struct ifnet *, struct ifmediareq *);
246
247 static int xl_eeprom_wait (struct xl_softc *);
248 static int xl_read_eeprom (struct xl_softc *, caddr_t, int, int, int);
249 static void xl_mii_sync (struct xl_softc *);
250 static void xl_mii_send (struct xl_softc *, u_int32_t, int);
251 static int xl_mii_readreg (struct xl_softc *, struct xl_mii_frame *);
252 static int xl_mii_writereg (struct xl_softc *, struct xl_mii_frame *);
253
254 static void xl_setcfg (struct xl_softc *);
255 static void xl_setmode (struct xl_softc *, int);
256 static u_int8_t xl_calchash (caddr_t);
257 static void xl_setmulti (struct xl_softc *);
258 static void xl_setmulti_hash (struct xl_softc *);
259 static void xl_reset (struct xl_softc *);
260 static int xl_list_rx_init (struct xl_softc *);
261 static int xl_list_tx_init (struct xl_softc *);
262 static int xl_list_tx_init_90xB (struct xl_softc *);
263 static void xl_wait (struct xl_softc *);
264 static void xl_mediacheck (struct xl_softc *);
265 static void xl_choose_xcvr (struct xl_softc *, int);
266 static void xl_dma_map_addr (void *, bus_dma_segment_t *, int, int);
267 static void xl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t,
268 int);
269 static void xl_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t,
270 int);
271 #ifdef notdef
272 static void xl_testpacket (struct xl_softc *);
273 #endif
274
275 static int xl_miibus_readreg (device_t, int, int);
276 static int xl_miibus_writereg (device_t, int, int, int);
277 static void xl_miibus_statchg (device_t);
278 static void xl_miibus_mediainit (device_t);
279
280 static device_method_t xl_methods[] = {
281 /* Device interface */
282 DEVMETHOD(device_probe, xl_probe),
283 DEVMETHOD(device_attach, xl_attach),
284 DEVMETHOD(device_detach, xl_detach),
285 DEVMETHOD(device_shutdown, xl_shutdown),
286 DEVMETHOD(device_suspend, xl_suspend),
287 DEVMETHOD(device_resume, xl_resume),
288
289 /* bus interface */
290 DEVMETHOD(bus_print_child, bus_generic_print_child),
291 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
292
293 /* MII interface */
294 DEVMETHOD(miibus_readreg, xl_miibus_readreg),
295 DEVMETHOD(miibus_writereg, xl_miibus_writereg),
296 DEVMETHOD(miibus_statchg, xl_miibus_statchg),
297 DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
298
299 { 0, 0 }
300 };
301
302 static driver_t xl_driver = {
303 "xl",
304 xl_methods,
305 sizeof(struct xl_softc)
306 };
307
308 static devclass_t xl_devclass;
309
310 DRIVER_MODULE(if_xl, pci, xl_driver, xl_devclass, 0, 0);
311 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
312
313 static void
314 xl_dma_map_addr(arg, segs, nseg, error)
315 void *arg;
316 bus_dma_segment_t *segs;
317 int nseg, error;
318 {
319 u_int32_t *paddr;
320
321 paddr = arg;
322 *paddr = segs->ds_addr;
323 }
324
325 static void
326 xl_dma_map_rxbuf(arg, segs, nseg, mapsize, error)
327 void *arg;
328 bus_dma_segment_t *segs;
329 int nseg;
330 bus_size_t mapsize;
331 int error;
332 {
333 u_int32_t *paddr;
334
335 if (error)
336 return;
337 KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
338 paddr = arg;
339 *paddr = segs->ds_addr;
340 }
341
342 static void
343 xl_dma_map_txbuf(arg, segs, nseg, mapsize, error)
344 void *arg;
345 bus_dma_segment_t *segs;
346 int nseg;
347 bus_size_t mapsize;
348 int error;
349 {
350 struct xl_list *l;
351 int i, total_len;
352
353 if (error)
354 return;
355
356 KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
357
358 total_len = 0;
359 l = arg;
360 for (i = 0; i < nseg; i++) {
361 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
362 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
363 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
364 total_len += segs[i].ds_len;
365 }
366 l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
367 XL_LAST_FRAG);
368 l->xl_status = htole32(total_len);
369 l->xl_next = 0;
370 }
371
372 /*
373 * Murphy's law says that it's possible the chip can wedge and
374 * the 'command in progress' bit may never clear. Hence, we wait
375 * only a finite amount of time to avoid getting caught in an
376 * infinite loop. Normally this delay routine would be a macro,
377 * but it isn't called during normal operation so we can afford
378 * to make it a function.
379 */
380 static void
381 xl_wait(sc)
382 struct xl_softc *sc;
383 {
384 register int i;
385
386 for (i = 0; i < XL_TIMEOUT; i++) {
387 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
388 break;
389 }
390
391 if (i == XL_TIMEOUT)
392 printf("xl%d: command never completed!\n", sc->xl_unit);
393
394 return;
395 }
396
397 /*
398 * MII access routines are provided for adapters with external
399 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
400 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
401 * Note: if you don't perform the MDIO operations just right,
402 * it's possible to end up with code that works correctly with
403 * some chips/CPUs/processor speeds/bus speeds/etc but not
404 * with others.
405 */
406 #define MII_SET(x) \
407 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
408 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
409
410 #define MII_CLR(x) \
411 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
412 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
413
414 /*
415 * Sync the PHYs by setting data bit and strobing the clock 32 times.
416 */
417 static void
418 xl_mii_sync(sc)
419 struct xl_softc *sc;
420 {
421 register int i;
422
423 XL_SEL_WIN(4);
424 MII_SET(XL_MII_DIR|XL_MII_DATA);
425
426 for (i = 0; i < 32; i++) {
427 MII_SET(XL_MII_CLK);
428 MII_SET(XL_MII_DATA);
429 MII_SET(XL_MII_DATA);
430 MII_CLR(XL_MII_CLK);
431 MII_SET(XL_MII_DATA);
432 MII_SET(XL_MII_DATA);
433 }
434
435 return;
436 }
437
438 /*
439 * Clock a series of bits through the MII.
440 */
441 static void
442 xl_mii_send(sc, bits, cnt)
443 struct xl_softc *sc;
444 u_int32_t bits;
445 int cnt;
446 {
447 int i;
448
449 XL_SEL_WIN(4);
450 MII_CLR(XL_MII_CLK);
451
452 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
453 if (bits & i) {
454 MII_SET(XL_MII_DATA);
455 } else {
456 MII_CLR(XL_MII_DATA);
457 }
458 MII_CLR(XL_MII_CLK);
459 MII_SET(XL_MII_CLK);
460 }
461 }
462
463 /*
464 * Read an PHY register through the MII.
465 */
466 static int
467 xl_mii_readreg(sc, frame)
468 struct xl_softc *sc;
469 struct xl_mii_frame *frame;
470
471 {
472 int i, ack, s;
473
474 s = splimp();
475
476 /*
477 * Set up frame for RX.
478 */
479 frame->mii_stdelim = XL_MII_STARTDELIM;
480 frame->mii_opcode = XL_MII_READOP;
481 frame->mii_turnaround = 0;
482 frame->mii_data = 0;
483
484 /*
485 * Select register window 4.
486 */
487
488 XL_SEL_WIN(4);
489
490 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
491 /*
492 * Turn on data xmit.
493 */
494 MII_SET(XL_MII_DIR);
495
496 xl_mii_sync(sc);
497
498 /*
499 * Send command/address info.
500 */
501 xl_mii_send(sc, frame->mii_stdelim, 2);
502 xl_mii_send(sc, frame->mii_opcode, 2);
503 xl_mii_send(sc, frame->mii_phyaddr, 5);
504 xl_mii_send(sc, frame->mii_regaddr, 5);
505
506 /* Idle bit */
507 MII_CLR((XL_MII_CLK|XL_MII_DATA));
508 MII_SET(XL_MII_CLK);
509
510 /* Turn off xmit. */
511 MII_CLR(XL_MII_DIR);
512
513 /* Check for ack */
514 MII_CLR(XL_MII_CLK);
515 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
516 MII_SET(XL_MII_CLK);
517
518 /*
519 * Now try reading data bits. If the ack failed, we still
520 * need to clock through 16 cycles to keep the PHY(s) in sync.
521 */
522 if (ack) {
523 for(i = 0; i < 16; i++) {
524 MII_CLR(XL_MII_CLK);
525 MII_SET(XL_MII_CLK);
526 }
527 goto fail;
528 }
529
530 for (i = 0x8000; i; i >>= 1) {
531 MII_CLR(XL_MII_CLK);
532 if (!ack) {
533 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
534 frame->mii_data |= i;
535 }
536 MII_SET(XL_MII_CLK);
537 }
538
539 fail:
540
541 MII_CLR(XL_MII_CLK);
542 MII_SET(XL_MII_CLK);
543
544 splx(s);
545
546 if (ack)
547 return(1);
548 return(0);
549 }
550
551 /*
552 * Write to a PHY register through the MII.
553 */
554 static int
555 xl_mii_writereg(sc, frame)
556 struct xl_softc *sc;
557 struct xl_mii_frame *frame;
558
559 {
560 int s;
561
562 s = splimp();
563
564 /*
565 * Set up frame for TX.
566 */
567
568 frame->mii_stdelim = XL_MII_STARTDELIM;
569 frame->mii_opcode = XL_MII_WRITEOP;
570 frame->mii_turnaround = XL_MII_TURNAROUND;
571
572 /*
573 * Select the window 4.
574 */
575 XL_SEL_WIN(4);
576
577 /*
578 * Turn on data output.
579 */
580 MII_SET(XL_MII_DIR);
581
582 xl_mii_sync(sc);
583
584 xl_mii_send(sc, frame->mii_stdelim, 2);
585 xl_mii_send(sc, frame->mii_opcode, 2);
586 xl_mii_send(sc, frame->mii_phyaddr, 5);
587 xl_mii_send(sc, frame->mii_regaddr, 5);
588 xl_mii_send(sc, frame->mii_turnaround, 2);
589 xl_mii_send(sc, frame->mii_data, 16);
590
591 /* Idle bit. */
592 MII_SET(XL_MII_CLK);
593 MII_CLR(XL_MII_CLK);
594
595 /*
596 * Turn off xmit.
597 */
598 MII_CLR(XL_MII_DIR);
599
600 splx(s);
601
602 return(0);
603 }
604
605 static int
606 xl_miibus_readreg(dev, phy, reg)
607 device_t dev;
608 int phy, reg;
609 {
610 struct xl_softc *sc;
611 struct xl_mii_frame frame;
612
613 sc = device_get_softc(dev);
614
615 /*
616 * Pretend that PHYs are only available at MII address 24.
617 * This is to guard against problems with certain 3Com ASIC
618 * revisions that incorrectly map the internal transceiver
619 * control registers at all MII addresses. This can cause
620 * the miibus code to attach the same PHY several times over.
621 */
622 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
623 return(0);
624
625 bzero((char *)&frame, sizeof(frame));
626
627 frame.mii_phyaddr = phy;
628 frame.mii_regaddr = reg;
629 xl_mii_readreg(sc, &frame);
630
631 return(frame.mii_data);
632 }
633
634 static int
635 xl_miibus_writereg(dev, phy, reg, data)
636 device_t dev;
637 int phy, reg, data;
638 {
639 struct xl_softc *sc;
640 struct xl_mii_frame frame;
641
642 sc = device_get_softc(dev);
643
644 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
645 return(0);
646
647 bzero((char *)&frame, sizeof(frame));
648
649 frame.mii_phyaddr = phy;
650 frame.mii_regaddr = reg;
651 frame.mii_data = data;
652
653 xl_mii_writereg(sc, &frame);
654
655 return(0);
656 }
657
658 static void
659 xl_miibus_statchg(dev)
660 device_t dev;
661 {
662 struct xl_softc *sc;
663 struct mii_data *mii;
664
665
666 sc = device_get_softc(dev);
667 mii = device_get_softc(sc->xl_miibus);
668
669 xl_setcfg(sc);
670
671 /* Set ASIC's duplex mode to match the PHY. */
672 XL_SEL_WIN(3);
673 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
674 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
675 else
676 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
677 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
678
679 return;
680 }
681
682 /*
683 * Special support for the 3c905B-COMBO. This card has 10/100 support
684 * plus BNC and AUI ports. This means we will have both an miibus attached
685 * plus some non-MII media settings. In order to allow this, we have to
686 * add the extra media to the miibus's ifmedia struct, but we can't do
687 * that during xl_attach() because the miibus hasn't been attached yet.
688 * So instead, we wait until the miibus probe/attach is done, at which
689 * point we will get a callback telling is that it's safe to add our
690 * extra media.
691 */
692 static void
693 xl_miibus_mediainit(dev)
694 device_t dev;
695 {
696 struct xl_softc *sc;
697 struct mii_data *mii;
698 struct ifmedia *ifm;
699
700 sc = device_get_softc(dev);
701 mii = device_get_softc(sc->xl_miibus);
702 ifm = &mii->mii_media;
703
704 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
705 /*
706 * Check for a 10baseFL board in disguise.
707 */
708 if (sc->xl_type == XL_TYPE_905B &&
709 sc->xl_media == XL_MEDIAOPT_10FL) {
710 if (bootverbose)
711 printf("xl%d: found 10baseFL\n", sc->xl_unit);
712 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
713 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL);
714 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
715 ifmedia_add(ifm,
716 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
717 } else {
718 if (bootverbose)
719 printf("xl%d: found AUI\n", sc->xl_unit);
720 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
721 }
722 }
723
724 if (sc->xl_media & XL_MEDIAOPT_BNC) {
725 if (bootverbose)
726 printf("xl%d: found BNC\n", sc->xl_unit);
727 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
728 }
729
730 return;
731 }
732
733 /*
734 * The EEPROM is slow: give it time to come ready after issuing
735 * it a command.
736 */
737 static int
738 xl_eeprom_wait(sc)
739 struct xl_softc *sc;
740 {
741 int i;
742
743 for (i = 0; i < 100; i++) {
744 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
745 DELAY(162);
746 else
747 break;
748 }
749
750 if (i == 100) {
751 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
752 return(1);
753 }
754
755 return(0);
756 }
757
758 /*
759 * Read a sequence of words from the EEPROM. Note that ethernet address
760 * data is stored in the EEPROM in network byte order.
761 */
762 static int
763 xl_read_eeprom(sc, dest, off, cnt, swap)
764 struct xl_softc *sc;
765 caddr_t dest;
766 int off;
767 int cnt;
768 int swap;
769 {
770 int err = 0, i;
771 u_int16_t word = 0, *ptr;
772 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
773 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
774 /* WARNING! DANGER!
775 * It's easy to accidentally overwrite the rom content!
776 * Note: the 3c575 uses 8bit EEPROM offsets.
777 */
778 XL_SEL_WIN(0);
779
780 if (xl_eeprom_wait(sc))
781 return(1);
782
783 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
784 off += 0x30;
785
786 for (i = 0; i < cnt; i++) {
787 if (sc->xl_flags & XL_FLAG_8BITROM)
788 CSR_WRITE_2(sc, XL_W0_EE_CMD,
789 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
790 else
791 CSR_WRITE_2(sc, XL_W0_EE_CMD,
792 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
793 err = xl_eeprom_wait(sc);
794 if (err)
795 break;
796 word = CSR_READ_2(sc, XL_W0_EE_DATA);
797 ptr = (u_int16_t *)(dest + (i * 2));
798 if (swap)
799 *ptr = ntohs(word);
800 else
801 *ptr = word;
802 }
803
804 return(err ? 1 : 0);
805 }
806
807 /*
808 * This routine is taken from the 3Com Etherlink XL manual,
809 * page 10-7. It calculates a CRC of the supplied multicast
810 * group address and returns the lower 8 bits, which are used
811 * as the multicast filter position.
812 * Note: the 3c905B currently only supports a 64-bit hash table,
813 * which means we really only need 6 bits, but the manual indicates
814 * that future chip revisions will have a 256-bit hash table,
815 * hence the routine is set up to calculate 8 bits of position
816 * info in case we need it some day.
817 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
818 * 256 bit hash table. This means we have to use all 8 bits regardless.
819 * On older cards, the upper 2 bits will be ignored. Grrrr....
820 */
821 static u_int8_t xl_calchash(addr)
822 caddr_t addr;
823 {
824 u_int32_t crc, carry;
825 int i, j;
826 u_int8_t c;
827
828 /* Compute CRC for the address value. */
829 crc = 0xFFFFFFFF; /* initial value */
830
831 for (i = 0; i < 6; i++) {
832 c = *(addr + i);
833 for (j = 0; j < 8; j++) {
834 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
835 crc <<= 1;
836 c >>= 1;
837 if (carry)
838 crc = (crc ^ 0x04c11db6) | carry;
839 }
840 }
841
842 /* return the filter bit position */
843 return(crc & 0x000000FF);
844 }
845
846 /*
847 * NICs older than the 3c905B have only one multicast option, which
848 * is to enable reception of all multicast frames.
849 */
850 static void
851 xl_setmulti(sc)
852 struct xl_softc *sc;
853 {
854 struct ifnet *ifp;
855 struct ifmultiaddr *ifma;
856 u_int8_t rxfilt;
857 int mcnt = 0;
858
859 ifp = &sc->arpcom.ac_if;
860
861 XL_SEL_WIN(5);
862 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
863
864 if (ifp->if_flags & IFF_ALLMULTI) {
865 rxfilt |= XL_RXFILTER_ALLMULTI;
866 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
867 return;
868 }
869
870 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
871 ifma = ifma->ifma_link.le_next)
872 mcnt++;
873
874 if (mcnt)
875 rxfilt |= XL_RXFILTER_ALLMULTI;
876 else
877 rxfilt &= ~XL_RXFILTER_ALLMULTI;
878
879 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
880
881 return;
882 }
883
884 /*
885 * 3c905B adapters have a hash filter that we can program.
886 */
887 static void
888 xl_setmulti_hash(sc)
889 struct xl_softc *sc;
890 {
891 struct ifnet *ifp;
892 int h = 0, i;
893 struct ifmultiaddr *ifma;
894 u_int8_t rxfilt;
895 int mcnt = 0;
896
897 ifp = &sc->arpcom.ac_if;
898
899 XL_SEL_WIN(5);
900 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
901
902 if (ifp->if_flags & IFF_ALLMULTI) {
903 rxfilt |= XL_RXFILTER_ALLMULTI;
904 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
905 return;
906 } else
907 rxfilt &= ~XL_RXFILTER_ALLMULTI;
908
909
910 /* first, zot all the existing hash bits */
911 for (i = 0; i < XL_HASHFILT_SIZE; i++)
912 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
913
914 /* now program new ones */
915 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
916 ifma = ifma->ifma_link.le_next) {
917 if (ifma->ifma_addr->sa_family != AF_LINK)
918 continue;
919 h = xl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
920 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
921 mcnt++;
922 }
923
924 if (mcnt)
925 rxfilt |= XL_RXFILTER_MULTIHASH;
926 else
927 rxfilt &= ~XL_RXFILTER_MULTIHASH;
928
929 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
930
931 return;
932 }
933
934 #ifdef notdef
935 static void
936 xl_testpacket(sc)
937 struct xl_softc *sc;
938 {
939 struct mbuf *m;
940 struct ifnet *ifp;
941
942 ifp = &sc->arpcom.ac_if;
943
944 MGETHDR(m, M_DONTWAIT, MT_DATA);
945
946 if (m == NULL)
947 return;
948
949 bcopy(&sc->arpcom.ac_enaddr,
950 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
951 bcopy(&sc->arpcom.ac_enaddr,
952 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
953 mtod(m, struct ether_header *)->ether_type = htons(3);
954 mtod(m, unsigned char *)[14] = 0;
955 mtod(m, unsigned char *)[15] = 0;
956 mtod(m, unsigned char *)[16] = 0xE3;
957 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
958 IF_ENQUEUE(&ifp->if_snd, m);
959 xl_start(ifp);
960
961 return;
962 }
963 #endif
964
965 static void
966 xl_setcfg(sc)
967 struct xl_softc *sc;
968 {
969 u_int32_t icfg;
970
971 XL_SEL_WIN(3);
972 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
973 icfg &= ~XL_ICFG_CONNECTOR_MASK;
974 if (sc->xl_media & XL_MEDIAOPT_MII ||
975 sc->xl_media & XL_MEDIAOPT_BT4)
976 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
977 if (sc->xl_media & XL_MEDIAOPT_BTX)
978 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
979
980 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
981 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
982
983 return;
984 }
985
986 static void
987 xl_setmode(sc, media)
988 struct xl_softc *sc;
989 int media;
990 {
991 u_int32_t icfg;
992 u_int16_t mediastat;
993
994 printf("xl%d: selecting ", sc->xl_unit);
995
996 XL_SEL_WIN(4);
997 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
998 XL_SEL_WIN(3);
999 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1000
1001 if (sc->xl_media & XL_MEDIAOPT_BT) {
1002 if (IFM_SUBTYPE(media) == IFM_10_T) {
1003 printf("10baseT transceiver, ");
1004 sc->xl_xcvr = XL_XCVR_10BT;
1005 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1006 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
1007 mediastat |= XL_MEDIASTAT_LINKBEAT|
1008 XL_MEDIASTAT_JABGUARD;
1009 mediastat &= ~XL_MEDIASTAT_SQEENB;
1010 }
1011 }
1012
1013 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1014 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1015 printf("100baseFX port, ");
1016 sc->xl_xcvr = XL_XCVR_100BFX;
1017 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1018 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1019 mediastat |= XL_MEDIASTAT_LINKBEAT;
1020 mediastat &= ~XL_MEDIASTAT_SQEENB;
1021 }
1022 }
1023
1024 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1025 if (IFM_SUBTYPE(media) == IFM_10_5) {
1026 printf("AUI port, ");
1027 sc->xl_xcvr = XL_XCVR_AUI;
1028 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1029 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1030 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1031 XL_MEDIASTAT_JABGUARD);
1032 mediastat |= ~XL_MEDIASTAT_SQEENB;
1033 }
1034 if (IFM_SUBTYPE(media) == IFM_10_FL) {
1035 printf("10baseFL transceiver, ");
1036 sc->xl_xcvr = XL_XCVR_AUI;
1037 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1038 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1039 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1040 XL_MEDIASTAT_JABGUARD);
1041 mediastat |= ~XL_MEDIASTAT_SQEENB;
1042 }
1043 }
1044
1045 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1046 if (IFM_SUBTYPE(media) == IFM_10_2) {
1047 printf("BNC port, ");
1048 sc->xl_xcvr = XL_XCVR_COAX;
1049 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1050 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1051 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1052 XL_MEDIASTAT_JABGUARD|
1053 XL_MEDIASTAT_SQEENB);
1054 }
1055 }
1056
1057 if ((media & IFM_GMASK) == IFM_FDX ||
1058 IFM_SUBTYPE(media) == IFM_100_FX) {
1059 printf("full duplex\n");
1060 XL_SEL_WIN(3);
1061 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1062 } else {
1063 printf("half duplex\n");
1064 XL_SEL_WIN(3);
1065 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1066 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1067 }
1068
1069 if (IFM_SUBTYPE(media) == IFM_10_2)
1070 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1071 else
1072 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1073 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1074 XL_SEL_WIN(4);
1075 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1076 DELAY(800);
1077 XL_SEL_WIN(7);
1078
1079 return;
1080 }
1081
1082 static void
1083 xl_reset(sc)
1084 struct xl_softc *sc;
1085 {
1086 register int i;
1087
1088 XL_SEL_WIN(0);
1089 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
1090 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
1091 XL_RESETOPT_DISADVFD:0));
1092
1093 /*
1094 * If we're using memory mapped register mode, pause briefly
1095 * after issuing the reset command before trying to access any
1096 * other registers. With my 3c575C cardbus card, failing to do
1097 * this results in the system locking up while trying to poll
1098 * the command busy bit in the status register.
1099 */
1100 if (sc->xl_flags & XL_FLAG_USE_MMIO)
1101 DELAY(100000);
1102
1103 for (i = 0; i < XL_TIMEOUT; i++) {
1104 DELAY(10);
1105 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1106 break;
1107 }
1108
1109 if (i == XL_TIMEOUT)
1110 printf("xl%d: reset didn't complete\n", sc->xl_unit);
1111
1112 /* Reset TX and RX. */
1113 /* Note: the RX reset takes an absurd amount of time
1114 * on newer versions of the Tornado chips such as those
1115 * on the 3c905CX and newer 3c908C cards. We wait an
1116 * extra amount of time so that xl_wait() doesn't complain
1117 * and annoy the users.
1118 */
1119 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1120 DELAY(100000);
1121 xl_wait(sc);
1122 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1123 xl_wait(sc);
1124
1125 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
1126 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1127 XL_SEL_WIN(2);
1128 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
1129 XL_W2_RESET_OPTIONS)
1130 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
1131 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
1132 );
1133 }
1134
1135 /* Wait a little while for the chip to get its brains in order. */
1136 DELAY(100000);
1137 return;
1138 }
1139
1140 /*
1141 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1142 * IDs against our list and return a device name if we find a match.
1143 */
1144 static int
1145 xl_probe(dev)
1146 device_t dev;
1147 {
1148 struct xl_type *t;
1149
1150 t = xl_devs;
1151
1152 while(t->xl_name != NULL) {
1153 if ((pci_get_vendor(dev) == t->xl_vid) &&
1154 (pci_get_device(dev) == t->xl_did)) {
1155 device_set_desc(dev, t->xl_name);
1156 return(0);
1157 }
1158 t++;
1159 }
1160
1161 return(ENXIO);
1162 }
1163
1164 /*
1165 * This routine is a kludge to work around possible hardware faults
1166 * or manufacturing defects that can cause the media options register
1167 * (or reset options register, as it's called for the first generation
1168 * 3c90x adapters) to return an incorrect result. I have encountered
1169 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1170 * which doesn't have any of the 'mediaopt' bits set. This screws up
1171 * the attach routine pretty badly because it doesn't know what media
1172 * to look for. If we find ourselves in this predicament, this routine
1173 * will try to guess the media options values and warn the user of a
1174 * possible manufacturing defect with his adapter/system/whatever.
1175 */
1176 static void
1177 xl_mediacheck(sc)
1178 struct xl_softc *sc;
1179 {
1180
1181 /*
1182 * If some of the media options bits are set, assume they are
1183 * correct. If not, try to figure it out down below.
1184 * XXX I should check for 10baseFL, but I don't have an adapter
1185 * to test with.
1186 */
1187 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1188 /*
1189 * Check the XCVR value. If it's not in the normal range
1190 * of values, we need to fake it up here.
1191 */
1192 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1193 return;
1194 else {
1195 printf("xl%d: bogus xcvr value "
1196 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1197 printf("xl%d: choosing new default based "
1198 "on card type\n", sc->xl_unit);
1199 }
1200 } else {
1201 if (sc->xl_type == XL_TYPE_905B &&
1202 sc->xl_media & XL_MEDIAOPT_10FL)
1203 return;
1204 printf("xl%d: WARNING: no media options bits set in "
1205 "the media options register!!\n", sc->xl_unit);
1206 printf("xl%d: this could be a manufacturing defect in "
1207 "your adapter or system\n", sc->xl_unit);
1208 printf("xl%d: attempting to guess media type; you "
1209 "should probably consult your vendor\n", sc->xl_unit);
1210 }
1211
1212 xl_choose_xcvr(sc, 1);
1213
1214 return;
1215 }
1216
1217 static void
1218 xl_choose_xcvr(sc, verbose)
1219 struct xl_softc *sc;
1220 int verbose;
1221 {
1222 u_int16_t devid;
1223
1224 /*
1225 * Read the device ID from the EEPROM.
1226 * This is what's loaded into the PCI device ID register, so it has
1227 * to be correct otherwise we wouldn't have gotten this far.
1228 */
1229 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1230
1231 switch(devid) {
1232 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
1233 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
1234 sc->xl_media = XL_MEDIAOPT_BT;
1235 sc->xl_xcvr = XL_XCVR_10BT;
1236 if (verbose)
1237 printf("xl%d: guessing 10BaseT "
1238 "transceiver\n", sc->xl_unit);
1239 break;
1240 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1241 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
1242 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1243 sc->xl_xcvr = XL_XCVR_10BT;
1244 if (verbose)
1245 printf("xl%d: guessing COMBO "
1246 "(AUI/BNC/TP)\n", sc->xl_unit);
1247 break;
1248 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
1249 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1250 sc->xl_xcvr = XL_XCVR_10BT;
1251 if (verbose)
1252 printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
1253 break;
1254 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
1255 sc->xl_media = XL_MEDIAOPT_10FL;
1256 sc->xl_xcvr = XL_XCVR_AUI;
1257 if (verbose)
1258 printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
1259 break;
1260 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1261 case TC_DEVICEID_HURRICANE_555: /* 3c555 */
1262 case TC_DEVICEID_HURRICANE_556: /* 3c556 */
1263 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
1264 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
1265 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
1266 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
1267 case TC_DEVICEID_HURRICANE_656: /* 3c656 */
1268 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
1269 case TC_DEVICEID_TORNADO_656C: /* 3c656C */
1270 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
1271 sc->xl_media = XL_MEDIAOPT_MII;
1272 sc->xl_xcvr = XL_XCVR_MII;
1273 if (verbose)
1274 printf("xl%d: guessing MII\n", sc->xl_unit);
1275 break;
1276 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1277 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1278 sc->xl_media = XL_MEDIAOPT_BT4;
1279 sc->xl_xcvr = XL_XCVR_MII;
1280 if (verbose)
1281 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1282 break;
1283 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
1284 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1285 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
1286 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1287 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
1288 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
1289 sc->xl_media = XL_MEDIAOPT_BTX;
1290 sc->xl_xcvr = XL_XCVR_AUTO;
1291 if (verbose)
1292 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1293 break;
1294 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1295 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1296 sc->xl_xcvr = XL_XCVR_AUTO;
1297 if (verbose)
1298 printf("xl%d: guessing 10/100 "
1299 "plus BNC/AUI\n", sc->xl_unit);
1300 break;
1301 default:
1302 printf("xl%d: unknown device ID: %x -- "
1303 "defaulting to 10baseT\n", sc->xl_unit, devid);
1304 sc->xl_media = XL_MEDIAOPT_BT;
1305 break;
1306 }
1307
1308 return;
1309 }
1310
1311 /*
1312 * Attach the interface. Allocate softc structures, do ifmedia
1313 * setup and ethernet/BPF attach.
1314 */
1315 static int
1316 xl_attach(dev)
1317 device_t dev;
1318 {
1319 int s;
1320 u_char eaddr[ETHER_ADDR_LEN];
1321 u_int16_t xcvr[2];
1322 u_int32_t command;
1323 struct xl_softc *sc;
1324 struct ifnet *ifp;
1325 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1326 int unit, error = 0, rid, res;
1327
1328 s = splimp();
1329
1330 sc = device_get_softc(dev);
1331 unit = device_get_unit(dev);
1332
1333 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1334
1335 sc->xl_flags = 0;
1336 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_555)
1337 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1338 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_556 ||
1339 pci_get_device(dev) == TC_DEVICEID_HURRICANE_556B)
1340 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1341 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1342 XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1343 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_555 ||
1344 pci_get_device(dev) == TC_DEVICEID_HURRICANE_556)
1345 sc->xl_flags |= XL_FLAG_8BITROM;
1346 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_556B)
1347 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1348
1349 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575A ||
1350 pci_get_device(dev) == TC_DEVICEID_HURRICANE_575B ||
1351 pci_get_device(dev) == TC_DEVICEID_HURRICANE_575C ||
1352 pci_get_device(dev) == TC_DEVICEID_HURRICANE_656B ||
1353 pci_get_device(dev) == TC_DEVICEID_TORNADO_656C)
1354 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1355 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM;
1356 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_656)
1357 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1358 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575B)
1359 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1360 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575C)
1361 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1362 if (pci_get_device(dev) == TC_DEVICEID_TORNADO_656C)
1363 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1364 if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_656 ||
1365 pci_get_device(dev) == TC_DEVICEID_HURRICANE_656B)
1366 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1367 XL_FLAG_INVERT_LED_PWR;
1368 if (pci_get_device(dev) == TC_DEVICEID_TORNADO_10_100BT_920B)
1369 sc->xl_flags |= XL_FLAG_PHYOK;
1370 #ifndef BURN_BRIDGES
1371 /*
1372 * If this is a 3c905B, we have to check one extra thing.
1373 * The 905B supports power management and may be placed in
1374 * a low-power mode (D3 mode), typically by certain operating
1375 * systems which shall not be named. The PCI BIOS is supposed
1376 * to reset the NIC and bring it out of low-power mode, but
1377 * some do not. Consequently, we have to see if this chip
1378 * supports power management, and if so, make sure it's not
1379 * in low-power mode. If power management is available, the
1380 * capid byte will be 0x01.
1381 *
1382 * I _think_ that what actually happens is that the chip
1383 * loses its PCI configuration during the transition from
1384 * D3 back to D0; this means that it should be possible for
1385 * us to save the PCI iobase, membase and IRQ, put the chip
1386 * back in the D0 state, then restore the PCI config ourselves.
1387 */
1388
1389 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1390 u_int32_t iobase, membase, irq;
1391
1392 /* Save important PCI config data. */
1393 iobase = pci_read_config(dev, XL_PCI_LOIO, 4);
1394 membase = pci_read_config(dev, XL_PCI_LOMEM, 4);
1395 irq = pci_read_config(dev, XL_PCI_INTLINE, 4);
1396
1397 /* Reset the power state. */
1398 printf("xl%d: chip is in D%d power mode "
1399 "-- setting to D0\n", unit,
1400 pci_get_powerstate(dev));
1401
1402 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1403
1404 /* Restore PCI config data. */
1405 pci_write_config(dev, XL_PCI_LOIO, iobase, 4);
1406 pci_write_config(dev, XL_PCI_LOMEM, membase, 4);
1407 pci_write_config(dev, XL_PCI_INTLINE, irq, 4);
1408 }
1409 #endif
1410 /*
1411 * Map control/status registers.
1412 */
1413 pci_enable_busmaster(dev);
1414 pci_enable_io(dev, SYS_RES_IOPORT);
1415 pci_enable_io(dev, SYS_RES_MEMORY);
1416 command = pci_read_config(dev, PCIR_COMMAND, 4);
1417
1418 if (!(command & PCIM_CMD_PORTEN) && !(command & PCIM_CMD_MEMEN)) {
1419 printf("xl%d: failed to enable I/O ports and memory mappings!\n", unit);
1420 error = ENXIO;
1421 goto fail;
1422 }
1423
1424 rid = XL_PCI_LOMEM;
1425 res = SYS_RES_MEMORY;
1426
1427 #if 0
1428 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1429 0, ~0, 1, RF_ACTIVE);
1430 #endif
1431
1432 if (sc->xl_res != NULL) {
1433 sc->xl_flags |= XL_FLAG_USE_MMIO;
1434 if (bootverbose)
1435 printf("xl%d: using memory mapped I/O\n", unit);
1436 } else {
1437 rid = XL_PCI_LOIO;
1438 res = SYS_RES_IOPORT;
1439 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1440 0, ~0, 1, RF_ACTIVE);
1441 if (sc->xl_res == NULL) {
1442 printf ("xl%d: couldn't map ports/memory\n", unit);
1443 error = ENXIO;
1444 goto fail;
1445 }
1446 if (bootverbose)
1447 printf("xl%d: using port I/O\n", unit);
1448 }
1449
1450 sc->xl_btag = rman_get_bustag(sc->xl_res);
1451 sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1452
1453 if (sc->xl_flags & XL_FLAG_FUNCREG) {
1454 rid = XL_PCI_FUNCMEM;
1455 sc->xl_fres = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1456 0, ~0, 1, RF_ACTIVE);
1457
1458 if (sc->xl_fres == NULL) {
1459 printf ("xl%d: couldn't map ports/memory\n", unit);
1460 error = ENXIO;
1461 goto fail;
1462 }
1463
1464 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1465 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1466 }
1467
1468 /* Allocate interrupt */
1469 rid = 0;
1470 sc->xl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1471 RF_SHAREABLE | RF_ACTIVE);
1472 if (sc->xl_irq == NULL) {
1473 printf("xl%d: couldn't map interrupt\n", unit);
1474 error = ENXIO;
1475 goto fail;
1476 }
1477
1478 /* Reset the adapter. */
1479 xl_reset(sc);
1480
1481 /*
1482 * Get station address from the EEPROM.
1483 */
1484 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1485 printf("xl%d: failed to read station address\n", sc->xl_unit);
1486 error = ENXIO;
1487 goto fail;
1488 }
1489
1490 /*
1491 * A 3Com chip was detected. Inform the world.
1492 */
1493 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1494
1495 sc->xl_unit = unit;
1496 callout_handle_init(&sc->xl_stat_ch);
1497 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1498
1499 /*
1500 * Now allocate a tag for the DMA descriptor lists and a chunk
1501 * of DMA-able memory based on the tag. Also obtain the DMA
1502 * addresses of the RX and TX ring, which we'll need later.
1503 * All of our lists are allocated as a contiguous block
1504 * of memory.
1505 */
1506 error = bus_dma_tag_create(NULL, 8, 0,
1507 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1508 XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0,
1509 &sc->xl_ldata.xl_rx_tag);
1510 if (error) {
1511 printf("xl%d: failed to allocate rx dma tag\n", unit);
1512 goto fail;
1513 }
1514
1515 error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1516 (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT,
1517 &sc->xl_ldata.xl_rx_dmamap);
1518 if (error) {
1519 printf("xl%d: no memory for rx list buffers!\n", unit);
1520 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1521 sc->xl_ldata.xl_rx_tag = NULL;
1522 goto fail;
1523 }
1524
1525 error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1526 sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1527 XL_RX_LIST_SZ, xl_dma_map_addr,
1528 &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1529 if (error) {
1530 printf("xl%d: cannot get dma address of the rx ring!\n", unit);
1531 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1532 sc->xl_ldata.xl_rx_dmamap);
1533 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1534 sc->xl_ldata.xl_rx_tag = NULL;
1535 goto fail;
1536 }
1537
1538 error = bus_dma_tag_create(NULL, 8, 0,
1539 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1540 XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0,
1541 &sc->xl_ldata.xl_tx_tag);
1542 if (error) {
1543 printf("xl%d: failed to allocate tx dma tag\n", unit);
1544 goto fail;
1545 }
1546
1547 error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1548 (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT,
1549 &sc->xl_ldata.xl_tx_dmamap);
1550 if (error) {
1551 printf("xl%d: no memory for list buffers!\n", unit);
1552 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1553 sc->xl_ldata.xl_tx_tag = NULL;
1554 goto fail;
1555 }
1556
1557 error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1558 sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1559 XL_TX_LIST_SZ, xl_dma_map_addr,
1560 &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1561 if (error) {
1562 printf("xl%d: cannot get dma address of the tx ring!\n", unit);
1563 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1564 sc->xl_ldata.xl_tx_dmamap);
1565 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1566 sc->xl_ldata.xl_tx_tag = NULL;
1567 goto fail;
1568 }
1569
1570 /*
1571 * Allocate a DMA tag for the mapping of mbufs.
1572 */
1573 error = bus_dma_tag_create(NULL, 1, 0,
1574 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1575 MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0,
1576 &sc->xl_mtag);
1577 if (error) {
1578 printf("xl%d: failed to allocate mbuf dma tag\n", unit);
1579 goto fail;
1580 }
1581
1582 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
1583 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
1584
1585 /* We need a spare DMA map for the RX ring. */
1586 error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1587 if (error)
1588 goto fail;
1589
1590 /*
1591 * Figure out the card type. 3c905B adapters have the
1592 * 'supportsNoTxLength' bit set in the capabilities
1593 * word in the EEPROM.
1594 * Note: my 3c575C cardbus card lies. It returns a value
1595 * of 0x1578 for its capabilities word, which is somewhat
1596 * nonsensical. Another way to distinguish a 3c90x chip
1597 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1598 * bit. This will only be set for 3c90x boomerage chips.
1599 */
1600 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1601 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1602 !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1603 sc->xl_type = XL_TYPE_905B;
1604 else
1605 sc->xl_type = XL_TYPE_90X;
1606
1607 ifp = &sc->arpcom.ac_if;
1608 ifp->if_softc = sc;
1609 ifp->if_unit = unit;
1610 ifp->if_name = "xl";
1611 ifp->if_mtu = ETHERMTU;
1612 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1613 ifp->if_ioctl = xl_ioctl;
1614 ifp->if_output = ether_output;
1615 ifp->if_capabilities = 0;
1616 if (sc->xl_type == XL_TYPE_905B) {
1617 ifp->if_start = xl_start_90xB;
1618 ifp->if_hwassist = XL905B_CSUM_FEATURES;
1619 #ifdef XL905B_TXCSUM_BROKEN
1620 ifp->if_capabilities |= IFCAP_RXCSUM;
1621 #else
1622 ifp->if_capabilities |= IFCAP_HWCSUM;
1623 #endif
1624 } else
1625 ifp->if_start = xl_start;
1626 ifp->if_watchdog = xl_watchdog;
1627 ifp->if_init = xl_init;
1628 ifp->if_baudrate = 10000000;
1629 ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1;
1630 ifp->if_capenable = ifp->if_capabilities;
1631
1632 /*
1633 * Now we have to see what sort of media we have.
1634 * This includes probing for an MII interace and a
1635 * possible PHY.
1636 */
1637 XL_SEL_WIN(3);
1638 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1639 if (bootverbose)
1640 printf("xl%d: media options word: %x\n", sc->xl_unit,
1641 sc->xl_media);
1642
1643 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1644 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1645 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1646 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1647
1648 xl_mediacheck(sc);
1649
1650 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1651 || sc->xl_media & XL_MEDIAOPT_BT4) {
1652 if (bootverbose)
1653 printf("xl%d: found MII/AUTO\n", sc->xl_unit);
1654 xl_setcfg(sc);
1655 if (mii_phy_probe(dev, &sc->xl_miibus,
1656 xl_ifmedia_upd, xl_ifmedia_sts)) {
1657 printf("xl%d: no PHY found!\n", sc->xl_unit);
1658 error = ENXIO;
1659 goto fail;
1660 }
1661
1662 goto done;
1663 }
1664
1665 /*
1666 * Sanity check. If the user has selected "auto" and this isn't
1667 * a 10/100 card of some kind, we need to force the transceiver
1668 * type to something sane.
1669 */
1670 if (sc->xl_xcvr == XL_XCVR_AUTO)
1671 xl_choose_xcvr(sc, bootverbose);
1672
1673 /*
1674 * Do ifmedia setup.
1675 */
1676 if (sc->xl_media & XL_MEDIAOPT_BT) {
1677 if (bootverbose)
1678 printf("xl%d: found 10baseT\n", sc->xl_unit);
1679 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1680 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1681 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1682 ifmedia_add(&sc->ifmedia,
1683 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1684 }
1685
1686 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1687 /*
1688 * Check for a 10baseFL board in disguise.
1689 */
1690 if (sc->xl_type == XL_TYPE_905B &&
1691 sc->xl_media == XL_MEDIAOPT_10FL) {
1692 if (bootverbose)
1693 printf("xl%d: found 10baseFL\n", sc->xl_unit);
1694 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1695 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1696 0, NULL);
1697 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1698 ifmedia_add(&sc->ifmedia,
1699 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1700 } else {
1701 if (bootverbose)
1702 printf("xl%d: found AUI\n", sc->xl_unit);
1703 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1704 }
1705 }
1706
1707 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1708 if (bootverbose)
1709 printf("xl%d: found BNC\n", sc->xl_unit);
1710 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1711 }
1712
1713 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1714 if (bootverbose)
1715 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1716 ifp->if_baudrate = 100000000;
1717 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1718 }
1719
1720 /* Choose a default media. */
1721 switch(sc->xl_xcvr) {
1722 case XL_XCVR_10BT:
1723 media = IFM_ETHER|IFM_10_T;
1724 xl_setmode(sc, media);
1725 break;
1726 case XL_XCVR_AUI:
1727 if (sc->xl_type == XL_TYPE_905B &&
1728 sc->xl_media == XL_MEDIAOPT_10FL) {
1729 media = IFM_ETHER|IFM_10_FL;
1730 xl_setmode(sc, media);
1731 } else {
1732 media = IFM_ETHER|IFM_10_5;
1733 xl_setmode(sc, media);
1734 }
1735 break;
1736 case XL_XCVR_COAX:
1737 media = IFM_ETHER|IFM_10_2;
1738 xl_setmode(sc, media);
1739 break;
1740 case XL_XCVR_AUTO:
1741 case XL_XCVR_100BTX:
1742 case XL_XCVR_MII:
1743 /* Chosen by miibus */
1744 break;
1745 case XL_XCVR_100BFX:
1746 media = IFM_ETHER|IFM_100_FX;
1747 break;
1748 default:
1749 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1750 sc->xl_xcvr);
1751 /*
1752 * This will probably be wrong, but it prevents
1753 * the ifmedia code from panicking.
1754 */
1755 media = IFM_ETHER|IFM_10_T;
1756 break;
1757 }
1758
1759 if (sc->xl_miibus == NULL)
1760 ifmedia_set(&sc->ifmedia, media);
1761
1762 done:
1763
1764 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1765 XL_SEL_WIN(0);
1766 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1767 }
1768
1769 /*
1770 * Call MI attach routine.
1771 */
1772 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1773
1774 /*
1775 * Tell the upper layer(s) we support long frames.
1776 */
1777 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1778
1779 /* Hook interrupt last to avoid having to lock softc */
1780 error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET,
1781 xl_intr, sc, &sc->xl_intrhand);
1782 if (error) {
1783 printf("xl%d: couldn't set up irq\n", unit);
1784 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1785 goto fail;
1786 }
1787
1788 fail:
1789 if (error)
1790 xl_detach(dev);
1791
1792 splx(s);
1793
1794 return(error);
1795 }
1796
1797 /*
1798 * Shutdown hardware and free up resources. This can be called any
1799 * time after the mutex has been initialized. It is called in both
1800 * the error case in attach and the normal detach case so it needs
1801 * to be careful about only freeing resources that have actually been
1802 * allocated.
1803 */
1804 static int
1805 xl_detach(dev)
1806 device_t dev;
1807 {
1808 struct xl_softc *sc;
1809 struct ifnet *ifp;
1810 int rid, res;
1811 int s;
1812
1813 s = splimp();
1814
1815 sc = device_get_softc(dev);
1816 ifp = &sc->arpcom.ac_if;
1817
1818 if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1819 rid = XL_PCI_LOMEM;
1820 res = SYS_RES_MEMORY;
1821 } else {
1822 rid = XL_PCI_LOIO;
1823 res = SYS_RES_IOPORT;
1824 }
1825
1826 xl_reset(sc);
1827 xl_stop(sc);
1828 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1829
1830 if (sc->xl_miibus)
1831 device_delete_child(dev, sc->xl_miibus);
1832 bus_generic_detach(dev);
1833 ifmedia_removeall(&sc->ifmedia);
1834
1835 if (sc->xl_intrhand)
1836 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1837 if (sc->xl_irq)
1838 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1839 if (sc->xl_fres != NULL)
1840 bus_release_resource(dev, SYS_RES_MEMORY,
1841 XL_PCI_FUNCMEM, sc->xl_fres);
1842 if (sc->xl_res)
1843 bus_release_resource(dev, res, rid, sc->xl_res);
1844
1845 if (sc->xl_mtag) {
1846 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1847 bus_dma_tag_destroy(sc->xl_mtag);
1848 }
1849 if (sc->xl_ldata.xl_rx_tag) {
1850 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1851 sc->xl_ldata.xl_rx_dmamap);
1852 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1853 sc->xl_ldata.xl_rx_dmamap);
1854 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1855 }
1856 if (sc->xl_ldata.xl_tx_tag) {
1857 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1858 sc->xl_ldata.xl_tx_dmamap);
1859 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1860 sc->xl_ldata.xl_tx_dmamap);
1861 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1862 }
1863
1864 splx(s);
1865
1866 return(0);
1867 }
1868
1869 /*
1870 * Initialize the transmit descriptors.
1871 */
1872 static int
1873 xl_list_tx_init(sc)
1874 struct xl_softc *sc;
1875 {
1876 struct xl_chain_data *cd;
1877 struct xl_list_data *ld;
1878 int error, i;
1879
1880 cd = &sc->xl_cdata;
1881 ld = &sc->xl_ldata;
1882 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1883 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1884 error = bus_dmamap_create(sc->xl_mtag, 0,
1885 &cd->xl_tx_chain[i].xl_map);
1886 if (error)
1887 return(error);
1888 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1889 i * sizeof(struct xl_list);
1890 if (i == (XL_TX_LIST_CNT - 1))
1891 cd->xl_tx_chain[i].xl_next = NULL;
1892 else
1893 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1894 }
1895
1896 cd->xl_tx_free = &cd->xl_tx_chain[0];
1897 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1898
1899 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1900 return(0);
1901 }
1902
1903 /*
1904 * Initialize the transmit descriptors.
1905 */
1906 static int
1907 xl_list_tx_init_90xB(sc)
1908 struct xl_softc *sc;
1909 {
1910 struct xl_chain_data *cd;
1911 struct xl_list_data *ld;
1912 int error, i;
1913
1914 cd = &sc->xl_cdata;
1915 ld = &sc->xl_ldata;
1916 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1917 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1918 error = bus_dmamap_create(sc->xl_mtag, 0,
1919 &cd->xl_tx_chain[i].xl_map);
1920 if (error)
1921 return(error);
1922 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1923 i * sizeof(struct xl_list);
1924 if (i == (XL_TX_LIST_CNT - 1))
1925 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1926 else
1927 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1928 if (i == 0)
1929 cd->xl_tx_chain[i].xl_prev =
1930 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1931 else
1932 cd->xl_tx_chain[i].xl_prev =
1933 &cd->xl_tx_chain[i - 1];
1934 }
1935
1936 bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1937 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1938
1939 cd->xl_tx_prod = 1;
1940 cd->xl_tx_cons = 1;
1941 cd->xl_tx_cnt = 0;
1942
1943 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1944 return(0);
1945 }
1946
1947 /*
1948 * Initialize the RX descriptors and allocate mbufs for them. Note that
1949 * we arrange the descriptors in a closed ring, so that the last descriptor
1950 * points back to the first.
1951 */
1952 static int
1953 xl_list_rx_init(sc)
1954 struct xl_softc *sc;
1955 {
1956 struct xl_chain_data *cd;
1957 struct xl_list_data *ld;
1958 int error, i, next;
1959 u_int32_t nextptr;
1960
1961 cd = &sc->xl_cdata;
1962 ld = &sc->xl_ldata;
1963
1964 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1965 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1966 error = bus_dmamap_create(sc->xl_mtag, 0,
1967 &cd->xl_rx_chain[i].xl_map);
1968 if (error)
1969 return(error);
1970 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1971 if (error)
1972 return(error);
1973 if (i == (XL_RX_LIST_CNT - 1))
1974 next = 0;
1975 else
1976 next = i + 1;
1977 nextptr = ld->xl_rx_dmaaddr +
1978 next * sizeof(struct xl_list_onefrag);
1979 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1980 ld->xl_rx_list[i].xl_next = htole32(nextptr);
1981 }
1982
1983 bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1984 cd->xl_rx_head = &cd->xl_rx_chain[0];
1985
1986 return(0);
1987 }
1988
1989 /*
1990 * Initialize an RX descriptor and attach an MBUF cluster.
1991 * If we fail to do so, we need to leave the old mbuf and
1992 * the old DMA map untouched so that it can be reused.
1993 */
1994 static int
1995 xl_newbuf(sc, c)
1996 struct xl_softc *sc;
1997 struct xl_chain_onefrag *c;
1998 {
1999 struct mbuf *m_new = NULL;
2000 bus_dmamap_t map;
2001 int error;
2002 u_int32_t baddr;
2003
2004 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2005 if (m_new == NULL)
2006 return(ENOBUFS);
2007
2008 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2009
2010 /* Force longword alignment for packet payload. */
2011 m_adj(m_new, ETHER_ALIGN);
2012
2013 error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
2014 xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
2015 if (error) {
2016 m_freem(m_new);
2017 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
2018 return(error);
2019 }
2020
2021 bus_dmamap_unload(sc->xl_mtag, c->xl_map);
2022 map = c->xl_map;
2023 c->xl_map = sc->xl_tmpmap;
2024 sc->xl_tmpmap = map;
2025 c->xl_mbuf = m_new;
2026 c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
2027 c->xl_ptr->xl_status = 0;
2028 c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
2029 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
2030 return(0);
2031 }
2032
2033 static int
2034 xl_rx_resync(sc)
2035 struct xl_softc *sc;
2036 {
2037 struct xl_chain_onefrag *pos;
2038 int i;
2039
2040 pos = sc->xl_cdata.xl_rx_head;
2041
2042 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2043 if (pos->xl_ptr->xl_status)
2044 break;
2045 pos = pos->xl_next;
2046 }
2047
2048 if (i == XL_RX_LIST_CNT)
2049 return(0);
2050
2051 sc->xl_cdata.xl_rx_head = pos;
2052
2053 return(EAGAIN);
2054 }
2055
2056 /*
2057 * A frame has been uploaded: pass the resulting mbuf chain up to
2058 * the higher level protocols.
2059 */
2060 static void
2061 xl_rxeof(sc)
2062 struct xl_softc *sc;
2063 {
2064 struct ether_header *eh;
2065 struct mbuf *m;
2066 struct ifnet *ifp;
2067 struct xl_chain_onefrag *cur_rx;
2068 int total_len = 0;
2069 u_int32_t rxstat;
2070
2071 ifp = &sc->arpcom.ac_if;
2072
2073 again:
2074
2075 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
2076 BUS_DMASYNC_POSTREAD);
2077 while((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
2078 cur_rx = sc->xl_cdata.xl_rx_head;
2079 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
2080 total_len = rxstat & XL_RXSTAT_LENMASK;
2081
2082 /*
2083 * Since we have told the chip to allow large frames,
2084 * we need to trap giant frame errors in software. We allow
2085 * a little more than the normal frame size to account for
2086 * frames with VLAN tags.
2087 */
2088 if (total_len > XL_MAX_FRAMELEN)
2089 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
2090
2091 /*
2092 * If an error occurs, update stats, clear the
2093 * status word and leave the mbuf cluster in place:
2094 * it should simply get re-used next time this descriptor
2095 * comes up in the ring.
2096 */
2097 if (rxstat & XL_RXSTAT_UP_ERROR) {
2098 ifp->if_ierrors++;
2099 cur_rx->xl_ptr->xl_status = 0;
2100 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2101 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2102 continue;
2103 }
2104
2105 /*
2106 * If the error bit was not set, the upload complete
2107 * bit should be set which means we have a valid packet.
2108 * If not, something truly strange has happened.
2109 */
2110 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
2111 printf("xl%d: bad receive status -- "
2112 "packet dropped\n", sc->xl_unit);
2113 ifp->if_ierrors++;
2114 cur_rx->xl_ptr->xl_status = 0;
2115 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2116 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2117 continue;
2118 }
2119
2120 /* No errors; receive the packet. */
2121 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
2122 BUS_DMASYNC_POSTREAD);
2123 m = cur_rx->xl_mbuf;
2124
2125 /*
2126 * Try to conjure up a new mbuf cluster. If that
2127 * fails, it means we have an out of memory condition and
2128 * should leave the buffer in place and continue. This will
2129 * result in a lost packet, but there's little else we
2130 * can do in this situation.
2131 */
2132 if (xl_newbuf(sc, cur_rx)) {
2133 ifp->if_ierrors++;
2134 cur_rx->xl_ptr->xl_status = 0;
2135 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2136 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2137 continue;
2138 }
2139 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2140 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2141
2142 ifp->if_ipackets++;
2143 eh = mtod(m, struct ether_header *);
2144 m->m_pkthdr.rcvif = ifp;
2145 m->m_pkthdr.len = m->m_len = total_len;
2146
2147 /* Remove header from mbuf and pass it on. */
2148 m_adj(m, sizeof(struct ether_header));
2149
2150 if (ifp->if_capenable & IFCAP_RXCSUM) {
2151 /* Do IP checksum checking. */
2152 if (rxstat & XL_RXSTAT_IPCKOK)
2153 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2154 if (!(rxstat & XL_RXSTAT_IPCKERR))
2155 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2156 if ((rxstat & XL_RXSTAT_TCPCOK &&
2157 !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2158 (rxstat & XL_RXSTAT_UDPCKOK &&
2159 !(rxstat & XL_RXSTAT_UDPCKERR))) {
2160 m->m_pkthdr.csum_flags |=
2161 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2162 m->m_pkthdr.csum_data = 0xffff;
2163 }
2164 }
2165
2166 ether_input(ifp, eh, m);
2167 }
2168
2169 /*
2170 * Handle the 'end of channel' condition. When the upload
2171 * engine hits the end of the RX ring, it will stall. This
2172 * is our cue to flush the RX ring, reload the uplist pointer
2173 * register and unstall the engine.
2174 * XXX This is actually a little goofy. With the ThunderLAN
2175 * chip, you get an interrupt when the receiver hits the end
2176 * of the receive ring, which tells you exactly when you
2177 * you need to reload the ring pointer. Here we have to
2178 * fake it. I'm mad at myself for not being clever enough
2179 * to avoid the use of a goto here.
2180 */
2181 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2182 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2183 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2184 xl_wait(sc);
2185 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2186 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2187 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2188 goto again;
2189 }
2190
2191 return;
2192 }
2193
2194 /*
2195 * A frame was downloaded to the chip. It's safe for us to clean up
2196 * the list buffers.
2197 */
2198 static void
2199 xl_txeof(sc)
2200 struct xl_softc *sc;
2201 {
2202 struct xl_chain *cur_tx;
2203 struct ifnet *ifp;
2204
2205 ifp = &sc->arpcom.ac_if;
2206
2207 /* Clear the timeout timer. */
2208 ifp->if_timer = 0;
2209
2210 /*
2211 * Go through our tx list and free mbufs for those
2212 * frames that have been uploaded. Note: the 3c905B
2213 * sets a special bit in the status word to let us
2214 * know that a frame has been downloaded, but the
2215 * original 3c900/3c905 adapters don't do that.
2216 * Consequently, we have to use a different test if
2217 * xl_type != XL_TYPE_905B.
2218 */
2219 while(sc->xl_cdata.xl_tx_head != NULL) {
2220 cur_tx = sc->xl_cdata.xl_tx_head;
2221
2222 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2223 break;
2224
2225 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2226 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2227 BUS_DMASYNC_POSTWRITE);
2228 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2229 m_freem(cur_tx->xl_mbuf);
2230 cur_tx->xl_mbuf = NULL;
2231 ifp->if_opackets++;
2232
2233 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2234 sc->xl_cdata.xl_tx_free = cur_tx;
2235 }
2236
2237 if (sc->xl_cdata.xl_tx_head == NULL) {
2238 ifp->if_flags &= ~IFF_OACTIVE;
2239 sc->xl_cdata.xl_tx_tail = NULL;
2240 } else {
2241 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2242 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2243 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2244 sc->xl_cdata.xl_tx_head->xl_phys);
2245 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2246 }
2247 }
2248
2249 return;
2250 }
2251
2252 static void
2253 xl_txeof_90xB(sc)
2254 struct xl_softc *sc;
2255 {
2256 struct xl_chain *cur_tx = NULL;
2257 struct ifnet *ifp;
2258 int idx;
2259
2260 ifp = &sc->arpcom.ac_if;
2261
2262 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2263 BUS_DMASYNC_POSTREAD);
2264 idx = sc->xl_cdata.xl_tx_cons;
2265 while(idx != sc->xl_cdata.xl_tx_prod) {
2266
2267 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2268
2269 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2270 XL_TXSTAT_DL_COMPLETE))
2271 break;
2272
2273 if (cur_tx->xl_mbuf != NULL) {
2274 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2275 BUS_DMASYNC_POSTWRITE);
2276 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2277 m_freem(cur_tx->xl_mbuf);
2278 cur_tx->xl_mbuf = NULL;
2279 }
2280
2281 ifp->if_opackets++;
2282
2283 sc->xl_cdata.xl_tx_cnt--;
2284 XL_INC(idx, XL_TX_LIST_CNT);
2285 ifp->if_timer = 0;
2286 }
2287
2288 sc->xl_cdata.xl_tx_cons = idx;
2289
2290 if (cur_tx != NULL)
2291 ifp->if_flags &= ~IFF_OACTIVE;
2292
2293 return;
2294 }
2295
2296 /*
2297 * TX 'end of channel' interrupt handler. Actually, we should
2298 * only get a 'TX complete' interrupt if there's a transmit error,
2299 * so this is really TX error handler.
2300 */
2301 static void
2302 xl_txeoc(sc)
2303 struct xl_softc *sc;
2304 {
2305 u_int8_t txstat;
2306
2307 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2308 if (txstat & XL_TXSTATUS_UNDERRUN ||
2309 txstat & XL_TXSTATUS_JABBER ||
2310 txstat & XL_TXSTATUS_RECLAIM) {
2311 printf("xl%d: transmission error: %x\n",
2312 sc->xl_unit, txstat);
2313 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2314 xl_wait(sc);
2315 if (sc->xl_type == XL_TYPE_905B) {
2316 if (sc->xl_cdata.xl_tx_cnt) {
2317 int i;
2318 struct xl_chain *c;
2319 i = sc->xl_cdata.xl_tx_cons;
2320 c = &sc->xl_cdata.xl_tx_chain[i];
2321 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2322 c->xl_phys);
2323 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2324 }
2325 } else {
2326 if (sc->xl_cdata.xl_tx_head != NULL)
2327 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2328 sc->xl_cdata.xl_tx_head->xl_phys);
2329 }
2330 /*
2331 * Remember to set this for the
2332 * first generation 3c90X chips.
2333 */
2334 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2335 if (txstat & XL_TXSTATUS_UNDERRUN &&
2336 sc->xl_tx_thresh < XL_PACKET_SIZE) {
2337 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2338 printf("xl%d: tx underrun, increasing tx start"
2339 " threshold to %d bytes\n", sc->xl_unit,
2340 sc->xl_tx_thresh);
2341 }
2342 CSR_WRITE_2(sc, XL_COMMAND,
2343 XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2344 if (sc->xl_type == XL_TYPE_905B) {
2345 CSR_WRITE_2(sc, XL_COMMAND,
2346 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2347 }
2348 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2349 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2350 } else {
2351 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2352 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2353 }
2354 /*
2355 * Write an arbitrary byte to the TX_STATUS register
2356 * to clear this interrupt/error and advance to the next.
2357 */
2358 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2359 }
2360
2361 return;
2362 }
2363
2364 static void
2365 xl_intr(arg)
2366 void *arg;
2367 {
2368 struct xl_softc *sc;
2369 struct ifnet *ifp;
2370 u_int16_t status;
2371
2372 sc = arg;
2373 ifp = &sc->arpcom.ac_if;
2374
2375 while((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
2376
2377 CSR_WRITE_2(sc, XL_COMMAND,
2378 XL_CMD_INTR_ACK|(status & XL_INTRS));
2379
2380 if (status & XL_STAT_UP_COMPLETE) {
2381 int curpkts;
2382
2383 curpkts = ifp->if_ipackets;
2384 xl_rxeof(sc);
2385 if (curpkts == ifp->if_ipackets) {
2386 while (xl_rx_resync(sc))
2387 xl_rxeof(sc);
2388 }
2389 }
2390
2391 if (status & XL_STAT_DOWN_COMPLETE) {
2392 if (sc->xl_type == XL_TYPE_905B)
2393 xl_txeof_90xB(sc);
2394 else
2395 xl_txeof(sc);
2396 }
2397
2398 if (status & XL_STAT_TX_COMPLETE) {
2399 ifp->if_oerrors++;
2400 xl_txeoc(sc);
2401 }
2402
2403 if (status & XL_STAT_ADFAIL) {
2404 xl_reset(sc);
2405 xl_init(sc);
2406 }
2407
2408 if (status & XL_STAT_STATSOFLOW) {
2409 sc->xl_stats_no_timeout = 1;
2410 xl_stats_update(sc);
2411 sc->xl_stats_no_timeout = 0;
2412 }
2413 }
2414
2415 if (ifp->if_snd.ifq_head != NULL)
2416 (*ifp->if_start)(ifp);
2417
2418 return;
2419 }
2420
2421 static void
2422 xl_stats_update(xsc)
2423 void *xsc;
2424 {
2425 struct xl_softc *sc;
2426 struct ifnet *ifp;
2427 struct xl_stats xl_stats;
2428 u_int8_t *p;
2429 int i;
2430 struct mii_data *mii = NULL;
2431
2432 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2433
2434 sc = xsc;
2435 ifp = &sc->arpcom.ac_if;
2436 if (sc->xl_miibus != NULL)
2437 mii = device_get_softc(sc->xl_miibus);
2438
2439 p = (u_int8_t *)&xl_stats;
2440
2441 /* Read all the stats registers. */
2442 XL_SEL_WIN(6);
2443
2444 for (i = 0; i < 16; i++)
2445 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2446
2447 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2448
2449 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2450 xl_stats.xl_tx_single_collision +
2451 xl_stats.xl_tx_late_collision;
2452
2453 /*
2454 * Boomerang and cyclone chips have an extra stats counter
2455 * in window 4 (BadSSD). We have to read this too in order
2456 * to clear out all the stats registers and avoid a statsoflow
2457 * interrupt.
2458 */
2459 XL_SEL_WIN(4);
2460 CSR_READ_1(sc, XL_W4_BADSSD);
2461
2462 if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2463 mii_tick(mii);
2464
2465 XL_SEL_WIN(7);
2466
2467 if (!sc->xl_stats_no_timeout)
2468 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2469
2470 return;
2471 }
2472
2473 /*
2474 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2475 * pointers to the fragment pointers.
2476 */
2477 static int
2478 xl_encap(sc, c, m_head)
2479 struct xl_softc *sc;
2480 struct xl_chain *c;
2481 struct mbuf *m_head;
2482 {
2483 int error;
2484 u_int32_t status;
2485 struct ifnet *ifp;
2486
2487 ifp = &sc->arpcom.ac_if;
2488
2489 /*
2490 * Start packing the mbufs in this chain into
2491 * the fragment pointers. Stop when we run out
2492 * of fragments or hit the end of the mbuf chain.
2493 */
2494 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
2495 xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2496
2497 if (error && error != EFBIG) {
2498 m_freem(m_head);
2499 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
2500 return(1);
2501 }
2502
2503 /*
2504 * Handle special case: we used up all 63 fragments,
2505 * but we have more mbufs left in the chain. Copy the
2506 * data into an mbuf cluster. Note that we don't
2507 * bother clearing the values in the other fragment
2508 * pointers/counters; it wouldn't gain us anything,
2509 * and would waste cycles.
2510 */
2511 if (error) {
2512 struct mbuf *m_new;
2513
2514 m_new = m_defrag(m_head, M_DONTWAIT);
2515 if (m_new == NULL) {
2516 m_freem(m_head);
2517 return(1);
2518 } else {
2519 m_head = m_new;
2520 }
2521
2522 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
2523 m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2524 if (error) {
2525 m_freem(m_head);
2526 printf("xl%d: can't map mbuf (error %d)\n",
2527 sc->xl_unit, error);
2528 return(1);
2529 }
2530 }
2531
2532 if (sc->xl_type == XL_TYPE_905B) {
2533 status = XL_TXSTAT_RND_DEFEAT;
2534
2535 #ifndef XL905B_TXCSUM_BROKEN
2536 if (m_head->m_pkthdr.csum_flags) {
2537 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2538 status |= XL_TXSTAT_IPCKSUM;
2539 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2540 status |= XL_TXSTAT_TCPCKSUM;
2541 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2542 status |= XL_TXSTAT_UDPCKSUM;
2543 }
2544 #endif
2545 c->xl_ptr->xl_status = htole32(status);
2546 }
2547
2548 c->xl_mbuf = m_head;
2549 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2550 return(0);
2551 }
2552
2553 /*
2554 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2555 * to the mbuf data regions directly in the transmit lists. We also save a
2556 * copy of the pointers since the transmit list fragment pointers are
2557 * physical addresses.
2558 */
2559 static void
2560 xl_start(ifp)
2561 struct ifnet *ifp;
2562 {
2563 struct xl_softc *sc;
2564 struct mbuf *m_head = NULL;
2565 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2566 struct xl_chain *prev_tx;
2567 u_int32_t status;
2568 int error;
2569
2570 sc = ifp->if_softc;
2571 /*
2572 * Check for an available queue slot. If there are none,
2573 * punt.
2574 */
2575 if (sc->xl_cdata.xl_tx_free == NULL) {
2576 xl_txeoc(sc);
2577 xl_txeof(sc);
2578 if (sc->xl_cdata.xl_tx_free == NULL) {
2579 ifp->if_flags |= IFF_OACTIVE;
2580 return;
2581 }
2582 }
2583
2584 start_tx = sc->xl_cdata.xl_tx_free;
2585
2586 while(sc->xl_cdata.xl_tx_free != NULL) {
2587 IF_DEQUEUE(&ifp->if_snd, m_head);
2588 if (m_head == NULL)
2589 break;
2590
2591 /* Pick a descriptor off the free list. */
2592 prev_tx = cur_tx;
2593 cur_tx = sc->xl_cdata.xl_tx_free;
2594
2595 /* Pack the data into the descriptor. */
2596 error = xl_encap(sc, cur_tx, m_head);
2597 if (error) {
2598 cur_tx = prev_tx;
2599 continue;
2600 }
2601
2602 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2603 cur_tx->xl_next = NULL;
2604
2605 /* Chain it together. */
2606 if (prev != NULL) {
2607 prev->xl_next = cur_tx;
2608 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2609 }
2610 prev = cur_tx;
2611
2612 /*
2613 * If there's a BPF listener, bounce a copy of this frame
2614 * to him.
2615 */
2616 if (ifp->if_bpf)
2617 bpf_mtap(ifp, cur_tx->xl_mbuf);
2618 }
2619
2620 /*
2621 * If there are no packets queued, bail.
2622 */
2623 if (cur_tx == NULL) {
2624 return;
2625 }
2626
2627 /*
2628 * Place the request for the upload interrupt
2629 * in the last descriptor in the chain. This way, if
2630 * we're chaining several packets at once, we'll only
2631 * get an interupt once for the whole chain rather than
2632 * once for each packet.
2633 */
2634 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2635 XL_TXSTAT_DL_INTR);
2636 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2637 BUS_DMASYNC_PREWRITE);
2638
2639 /*
2640 * Queue the packets. If the TX channel is clear, update
2641 * the downlist pointer register.
2642 */
2643 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2644 xl_wait(sc);
2645
2646 if (sc->xl_cdata.xl_tx_head != NULL) {
2647 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2648 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2649 htole32(start_tx->xl_phys);
2650 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2651 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2652 htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2653 sc->xl_cdata.xl_tx_tail = cur_tx;
2654 } else {
2655 sc->xl_cdata.xl_tx_head = start_tx;
2656 sc->xl_cdata.xl_tx_tail = cur_tx;
2657 }
2658 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2659 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2660
2661 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2662
2663 XL_SEL_WIN(7);
2664
2665 /*
2666 * Set a timeout in case the chip goes out to lunch.
2667 */
2668 ifp->if_timer = 5;
2669
2670 /*
2671 * XXX Under certain conditions, usually on slower machines
2672 * where interrupts may be dropped, it's possible for the
2673 * adapter to chew up all the buffers in the receive ring
2674 * and stall, without us being able to do anything about it.
2675 * To guard against this, we need to make a pass over the
2676 * RX queue to make sure there aren't any packets pending.
2677 * Doing it here means we can flush the receive ring at the
2678 * same time the chip is DMAing the transmit descriptors we
2679 * just gave it.
2680 *
2681 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2682 * nature of their chips in all their marketing literature;
2683 * we may as well take advantage of it. :)
2684 */
2685 xl_rxeof(sc);
2686
2687 return;
2688 }
2689
2690 static void
2691 xl_start_90xB(ifp)
2692 struct ifnet *ifp;
2693 {
2694 struct xl_softc *sc;
2695 struct mbuf *m_head = NULL;
2696 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2697 struct xl_chain *prev_tx;
2698 int error, idx;
2699
2700 sc = ifp->if_softc;
2701
2702 if (ifp->if_flags & IFF_OACTIVE) {
2703 return;
2704 }
2705
2706 idx = sc->xl_cdata.xl_tx_prod;
2707 start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2708
2709 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2710
2711 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2712 ifp->if_flags |= IFF_OACTIVE;
2713 break;
2714 }
2715
2716 IF_DEQUEUE(&ifp->if_snd, m_head);
2717 if (m_head == NULL)
2718 break;
2719
2720 prev_tx = cur_tx;
2721 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2722
2723 /* Pack the data into the descriptor. */
2724 error = xl_encap(sc, cur_tx, m_head);
2725 if (error) {
2726 cur_tx = prev_tx;
2727 continue;
2728 }
2729
2730 /* Chain it together. */
2731 if (prev != NULL)
2732 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2733 prev = cur_tx;
2734
2735 /*
2736 * If there's a BPF listener, bounce a copy of this frame
2737 * to him.
2738 */
2739 if (ifp->if_bpf)
2740 bpf_mtap(ifp, cur_tx->xl_mbuf);
2741
2742 XL_INC(idx, XL_TX_LIST_CNT);
2743 sc->xl_cdata.xl_tx_cnt++;
2744 }
2745
2746 /*
2747 * If there are no packets queued, bail.
2748 */
2749 if (cur_tx == NULL) {
2750 return;
2751 }
2752
2753 /*
2754 * Place the request for the upload interrupt
2755 * in the last descriptor in the chain. This way, if
2756 * we're chaining several packets at once, we'll only
2757 * get an interupt once for the whole chain rather than
2758 * once for each packet.
2759 */
2760 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2761 XL_TXSTAT_DL_INTR);
2762 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2763 BUS_DMASYNC_PREWRITE);
2764
2765 /* Start transmission */
2766 sc->xl_cdata.xl_tx_prod = idx;
2767 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2768
2769 /*
2770 * Set a timeout in case the chip goes out to lunch.
2771 */
2772 ifp->if_timer = 5;
2773
2774 return;
2775 }
2776
2777 static void
2778 xl_init(xsc)
2779 void *xsc;
2780 {
2781 struct xl_softc *sc = xsc;
2782 struct ifnet *ifp = &sc->arpcom.ac_if;
2783 int error, i;
2784 u_int16_t rxfilt = 0;
2785 struct mii_data *mii = NULL;
2786 int s;
2787
2788 s = splimp();
2789
2790 /*
2791 * Cancel pending I/O and free all RX/TX buffers.
2792 */
2793 xl_stop(sc);
2794
2795 if (sc->xl_miibus == NULL) {
2796 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2797 xl_wait(sc);
2798 }
2799 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2800 xl_wait(sc);
2801 DELAY(10000);
2802
2803 if (sc->xl_miibus != NULL)
2804 mii = device_get_softc(sc->xl_miibus);
2805
2806 /* Init our MAC address */
2807 XL_SEL_WIN(2);
2808 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2809 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2810 sc->arpcom.ac_enaddr[i]);
2811 }
2812
2813 /* Clear the station mask. */
2814 for (i = 0; i < 3; i++)
2815 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2816 #ifdef notdef
2817 /* Reset TX and RX. */
2818 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2819 xl_wait(sc);
2820 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2821 xl_wait(sc);
2822 #endif
2823 /* Init circular RX list. */
2824 error = xl_list_rx_init(sc);
2825 if (error) {
2826 printf("xl%d: initialization of the rx ring failed (%d)\n",
2827 sc->xl_unit, error);
2828 xl_stop(sc);
2829 splx(s);
2830 return;
2831 }
2832
2833 /* Init TX descriptors. */
2834 if (sc->xl_type == XL_TYPE_905B)
2835 error = xl_list_tx_init_90xB(sc);
2836 else
2837 error = xl_list_tx_init(sc);
2838 if (error) {
2839 printf("xl%d: initialization of the tx ring failed (%d)\n",
2840 sc->xl_unit, error);
2841 xl_stop(sc);
2842 splx(s);
2843 }
2844
2845 /*
2846 * Set the TX freethresh value.
2847 * Note that this has no effect on 3c905B "cyclone"
2848 * cards but is required for 3c900/3c905 "boomerang"
2849 * cards in order to enable the download engine.
2850 */
2851 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2852
2853 /* Set the TX start threshold for best performance. */
2854 sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2855 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2856
2857 /*
2858 * If this is a 3c905B, also set the tx reclaim threshold.
2859 * This helps cut down on the number of tx reclaim errors
2860 * that could happen on a busy network. The chip multiplies
2861 * the register value by 16 to obtain the actual threshold
2862 * in bytes, so we divide by 16 when setting the value here.
2863 * The existing threshold value can be examined by reading
2864 * the register at offset 9 in window 5.
2865 */
2866 if (sc->xl_type == XL_TYPE_905B) {
2867 CSR_WRITE_2(sc, XL_COMMAND,
2868 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2869 }
2870
2871 /* Set RX filter bits. */
2872 XL_SEL_WIN(5);
2873 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2874
2875 /* Set the individual bit to receive frames for this host only. */
2876 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2877
2878 /* If we want promiscuous mode, set the allframes bit. */
2879 if (ifp->if_flags & IFF_PROMISC) {
2880 rxfilt |= XL_RXFILTER_ALLFRAMES;
2881 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2882 } else {
2883 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2884 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2885 }
2886
2887 /*
2888 * Set capture broadcast bit to capture broadcast frames.
2889 */
2890 if (ifp->if_flags & IFF_BROADCAST) {
2891 rxfilt |= XL_RXFILTER_BROADCAST;
2892 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2893 } else {
2894 rxfilt &= ~XL_RXFILTER_BROADCAST;
2895 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2896 }
2897
2898 /*
2899 * Program the multicast filter, if necessary.
2900 */
2901 if (sc->xl_type == XL_TYPE_905B)
2902 xl_setmulti_hash(sc);
2903 else
2904 xl_setmulti(sc);
2905
2906 /*
2907 * Load the address of the RX list. We have to
2908 * stall the upload engine before we can manipulate
2909 * the uplist pointer register, then unstall it when
2910 * we're finished. We also have to wait for the
2911 * stall command to complete before proceeding.
2912 * Note that we have to do this after any RX resets
2913 * have completed since the uplist register is cleared
2914 * by a reset.
2915 */
2916 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2917 xl_wait(sc);
2918 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2919 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2920 xl_wait(sc);
2921
2922
2923 if (sc->xl_type == XL_TYPE_905B) {
2924 /* Set polling interval */
2925 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2926 /* Load the address of the TX list */
2927 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2928 xl_wait(sc);
2929 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2930 sc->xl_cdata.xl_tx_chain[0].xl_phys);
2931 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2932 xl_wait(sc);
2933 }
2934
2935 /*
2936 * If the coax transceiver is on, make sure to enable
2937 * the DC-DC converter.
2938 */
2939 XL_SEL_WIN(3);
2940 if (sc->xl_xcvr == XL_XCVR_COAX)
2941 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2942 else
2943 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2944
2945 /*
2946 * increase packet size to allow reception of 802.1q or ISL packets.
2947 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2948 * control register. For 3c90xB/C chips, use the RX packet size
2949 * register.
2950 */
2951
2952 if (sc->xl_type == XL_TYPE_905B)
2953 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2954 else {
2955 u_int8_t macctl;
2956 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2957 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2958 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2959 }
2960
2961 /* Clear out the stats counters. */
2962 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2963 sc->xl_stats_no_timeout = 1;
2964 xl_stats_update(sc);
2965 sc->xl_stats_no_timeout = 0;
2966 XL_SEL_WIN(4);
2967 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2968 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2969
2970 /*
2971 * Enable interrupts.
2972 */
2973 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2974 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2975 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2976 if (sc->xl_flags & XL_FLAG_FUNCREG)
2977 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2978
2979 /* Set the RX early threshold */
2980 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2981 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2982
2983 /* Enable receiver and transmitter. */
2984 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2985 xl_wait(sc);
2986 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2987 xl_wait(sc);
2988
2989 if (mii != NULL)
2990 mii_mediachg(mii);
2991
2992 /* Select window 7 for normal operations. */
2993 XL_SEL_WIN(7);
2994
2995 ifp->if_flags |= IFF_RUNNING;
2996 ifp->if_flags &= ~IFF_OACTIVE;
2997
2998 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2999
3000 splx(s);
3001
3002 return;
3003 }
3004
3005 /*
3006 * Set media options.
3007 */
3008 static int
3009 xl_ifmedia_upd(ifp)
3010 struct ifnet *ifp;
3011 {
3012 struct xl_softc *sc;
3013 struct ifmedia *ifm = NULL;
3014 struct mii_data *mii = NULL;
3015
3016 sc = ifp->if_softc;
3017 if (sc->xl_miibus != NULL)
3018 mii = device_get_softc(sc->xl_miibus);
3019 if (mii == NULL)
3020 ifm = &sc->ifmedia;
3021 else
3022 ifm = &mii->mii_media;
3023
3024 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3025 case IFM_100_FX:
3026 case IFM_10_FL:
3027 case IFM_10_2:
3028 case IFM_10_5:
3029 xl_setmode(sc, ifm->ifm_media);
3030 return(0);
3031 break;
3032 default:
3033 break;
3034 }
3035
3036 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
3037 || sc->xl_media & XL_MEDIAOPT_BT4) {
3038 xl_init(sc);
3039 } else {
3040 xl_setmode(sc, ifm->ifm_media);
3041 }
3042
3043 return(0);
3044 }
3045
3046 /*
3047 * Report current media status.
3048 */
3049 static void
3050 xl_ifmedia_sts(ifp, ifmr)
3051 struct ifnet *ifp;
3052 struct ifmediareq *ifmr;
3053 {
3054 struct xl_softc *sc;
3055 u_int32_t icfg;
3056 struct mii_data *mii = NULL;
3057
3058 sc = ifp->if_softc;
3059 if (sc->xl_miibus != NULL)
3060 mii = device_get_softc(sc->xl_miibus);
3061
3062 XL_SEL_WIN(3);
3063 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
3064 icfg >>= XL_ICFG_CONNECTOR_BITS;
3065
3066 ifmr->ifm_active = IFM_ETHER;
3067
3068 switch(icfg) {
3069 case XL_XCVR_10BT:
3070 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
3071 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3072 ifmr->ifm_active |= IFM_FDX;
3073 else
3074 ifmr->ifm_active |= IFM_HDX;
3075 break;
3076 case XL_XCVR_AUI:
3077 if (sc->xl_type == XL_TYPE_905B &&
3078 sc->xl_media == XL_MEDIAOPT_10FL) {
3079 ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3080 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3081 ifmr->ifm_active |= IFM_FDX;
3082 else
3083 ifmr->ifm_active |= IFM_HDX;
3084 } else
3085 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3086 break;
3087 case XL_XCVR_COAX:
3088 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3089 break;
3090 /*
3091 * XXX MII and BTX/AUTO should be separate cases.
3092 */
3093
3094 case XL_XCVR_100BTX:
3095 case XL_XCVR_AUTO:
3096 case XL_XCVR_MII:
3097 if (mii != NULL) {
3098 mii_pollstat(mii);
3099 ifmr->ifm_active = mii->mii_media_active;
3100 ifmr->ifm_status = mii->mii_media_status;
3101 }
3102 break;
3103 case XL_XCVR_100BFX:
3104 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3105 break;
3106 default:
3107 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
3108 break;
3109 }
3110
3111 return;
3112 }
3113
3114 static int
3115 xl_ioctl(ifp, command, data)
3116 struct ifnet *ifp;
3117 u_long command;
3118 caddr_t data;
3119 {
3120 struct xl_softc *sc = ifp->if_softc;
3121 struct ifreq *ifr = (struct ifreq *) data;
3122 int error = 0;
3123 struct mii_data *mii = NULL;
3124 u_int8_t rxfilt;
3125 int s;
3126
3127 s = splimp();
3128
3129 switch(command) {
3130 case SIOCSIFADDR:
3131 case SIOCGIFADDR:
3132 case SIOCSIFMTU:
3133 error = ether_ioctl(ifp, command, data);
3134 break;
3135 case SIOCSIFFLAGS:
3136 XL_SEL_WIN(5);
3137 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3138 if (ifp->if_flags & IFF_UP) {
3139 if (ifp->if_flags & IFF_RUNNING &&
3140 ifp->if_flags & IFF_PROMISC &&
3141 !(sc->xl_if_flags & IFF_PROMISC)) {
3142 rxfilt |= XL_RXFILTER_ALLFRAMES;
3143 CSR_WRITE_2(sc, XL_COMMAND,
3144 XL_CMD_RX_SET_FILT|rxfilt);
3145 XL_SEL_WIN(7);
3146 } else if (ifp->if_flags & IFF_RUNNING &&
3147 !(ifp->if_flags & IFF_PROMISC) &&
3148 sc->xl_if_flags & IFF_PROMISC) {
3149 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3150 CSR_WRITE_2(sc, XL_COMMAND,
3151 XL_CMD_RX_SET_FILT|rxfilt);
3152 XL_SEL_WIN(7);
3153 } else
3154 xl_init(sc);
3155 } else {
3156 if (ifp->if_flags & IFF_RUNNING)
3157 xl_stop(sc);
3158 }
3159 sc->xl_if_flags = ifp->if_flags;
3160 error = 0;
3161 break;
3162 case SIOCADDMULTI:
3163 case SIOCDELMULTI:
3164 if (sc->xl_type == XL_TYPE_905B)
3165 xl_setmulti_hash(sc);
3166 else
3167 xl_setmulti(sc);
3168 error = 0;
3169 break;
3170 case SIOCGIFMEDIA:
3171 case SIOCSIFMEDIA:
3172 if (sc->xl_miibus != NULL)
3173 mii = device_get_softc(sc->xl_miibus);
3174 if (mii == NULL)
3175 error = ifmedia_ioctl(ifp, ifr,
3176 &sc->ifmedia, command);
3177 else
3178 error = ifmedia_ioctl(ifp, ifr,
3179 &mii->mii_media, command);
3180 break;
3181 case SIOCSIFCAP:
3182 ifp->if_capenable = ifr->ifr_reqcap;
3183 if (ifp->if_capenable & IFCAP_TXCSUM)
3184 ifp->if_hwassist = XL905B_CSUM_FEATURES;
3185 else
3186 ifp->if_hwassist = 0;
3187 break;
3188 default:
3189 error = EINVAL;
3190 break;
3191 }
3192
3193 splx(s);
3194 return(error);
3195 }
3196
3197 static void
3198 xl_watchdog(ifp)
3199 struct ifnet *ifp;
3200 {
3201 struct xl_softc *sc;
3202 u_int16_t status = 0;
3203
3204 sc = ifp->if_softc;
3205
3206 ifp->if_oerrors++;
3207 XL_SEL_WIN(4);
3208 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3209 printf("xl%d: watchdog timeout\n", sc->xl_unit);
3210
3211 if (status & XL_MEDIASTAT_CARRIER)
3212 printf("xl%d: no carrier - transceiver cable problem?\n",
3213 sc->xl_unit);
3214 xl_txeoc(sc);
3215 xl_txeof(sc);
3216 xl_rxeof(sc);
3217 xl_reset(sc);
3218 xl_init(sc);
3219
3220 if (ifp->if_snd.ifq_head != NULL)
3221 (*ifp->if_start)(ifp);
3222
3223 return;
3224 }
3225
3226 /*
3227 * Stop the adapter and free any mbufs allocated to the
3228 * RX and TX lists.
3229 */
3230 static void
3231 xl_stop(sc)
3232 struct xl_softc *sc;
3233 {
3234 register int i;
3235 struct ifnet *ifp;
3236
3237 ifp = &sc->arpcom.ac_if;
3238 ifp->if_timer = 0;
3239
3240 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3241 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3242 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3243 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3244 xl_wait(sc);
3245 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3246 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3247 DELAY(800);
3248
3249 #ifdef foo
3250 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3251 xl_wait(sc);
3252 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3253 xl_wait(sc);
3254 #endif
3255
3256 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3257 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3258 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3259 if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4 (sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3260
3261 /* Stop the stats updater. */
3262 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
3263
3264 /*
3265 * Free data in the RX lists.
3266 */
3267 for (i = 0; i < XL_RX_LIST_CNT; i++) {
3268 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3269 bus_dmamap_unload(sc->xl_mtag,
3270 sc->xl_cdata.xl_rx_chain[i].xl_map);
3271 bus_dmamap_destroy(sc->xl_mtag,
3272 sc->xl_cdata.xl_rx_chain[i].xl_map);
3273 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3274 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3275 }
3276 }
3277 if (sc->xl_ldata.xl_rx_list != NULL)
3278 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3279 /*
3280 * Free the TX list buffers.
3281 */
3282 for (i = 0; i < XL_TX_LIST_CNT; i++) {
3283 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3284 bus_dmamap_unload(sc->xl_mtag,
3285 sc->xl_cdata.xl_tx_chain[i].xl_map);
3286 bus_dmamap_destroy(sc->xl_mtag,
3287 sc->xl_cdata.xl_tx_chain[i].xl_map);
3288 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3289 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3290 }
3291 }
3292 if (sc->xl_ldata.xl_tx_list != NULL)
3293 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3294
3295 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3296
3297 return;
3298 }
3299
3300 /*
3301 * Stop all chip I/O so that the kernel's probe routines don't
3302 * get confused by errant DMAs when rebooting.
3303 */
3304 static void
3305 xl_shutdown(dev)
3306 device_t dev;
3307 {
3308 struct xl_softc *sc;
3309
3310 sc = device_get_softc(dev);
3311
3312 xl_reset(sc);
3313 xl_stop(sc);
3314
3315 return;
3316 }
3317
3318 static int
3319 xl_suspend(dev)
3320 device_t dev;
3321 {
3322 struct xl_softc *sc;
3323 int s;
3324
3325 s = splimp();
3326
3327 sc = device_get_softc(dev);
3328
3329 xl_stop(sc);
3330
3331 splx(s);
3332
3333 return(0);
3334 }
3335
3336 static int
3337 xl_resume(dev)
3338 device_t dev;
3339 {
3340 struct xl_softc *sc;
3341 struct ifnet *ifp;
3342 int s;
3343
3344 s = splimp();
3345
3346 sc = device_get_softc(dev);
3347 ifp = &sc->arpcom.ac_if;
3348
3349 xl_reset(sc);
3350 if (ifp->if_flags & IFF_UP)
3351 xl_init(sc);
3352
3353 splx(s);
3354 return(0);
3355 }
Cache object: db6dfea10f513abc9a34acc8990b1157
|