FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c
1 /*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/5.4/sys/pci/if_xl.c 142884 2005-03-01 08:11:52Z imp $");
35
36 /*
37 * 3Com 3c90x Etherlink XL PCI NIC driver
38 *
39 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40 * bus-master chips (3c90x cards and embedded controllers) including
41 * the following:
42 *
43 * 3Com 3c900-TPO 10Mbps/RJ-45
44 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
45 * 3Com 3c905-TX 10/100Mbps/RJ-45
46 * 3Com 3c905-T4 10/100Mbps/RJ-45
47 * 3Com 3c900B-TPO 10Mbps/RJ-45
48 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
49 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
50 * 3Com 3c900B-FL 10Mbps/Fiber-optic
51 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
52 * 3Com 3c905B-TX 10/100Mbps/RJ-45
53 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
54 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
55 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
56 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
57 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
58 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
59 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69 * Dell on-board 3c920 10/100Mbps/RJ-45
70 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71 * Dell Latitude laptop docking station embedded 3c905-TX
72 *
73 * Written by Bill Paul <wpaul@ctr.columbia.edu>
74 * Electrical Engineering Department
75 * Columbia University, New York City
76 */
77 /*
78 * The 3c90x series chips use a bus-master DMA interface for transfering
79 * packets to and from the controller chip. Some of the "vortex" cards
80 * (3c59x) also supported a bus master mode, however for those chips
81 * you could only DMA packets to/from a contiguous memory buffer. For
82 * transmission this would mean copying the contents of the queued mbuf
83 * chain into an mbuf cluster and then DMAing the cluster. This extra
84 * copy would sort of defeat the purpose of the bus master support for
85 * any packet that doesn't fit into a single mbuf.
86 *
87 * By contrast, the 3c90x cards support a fragment-based bus master
88 * mode where mbuf chains can be encapsulated using TX descriptors.
89 * This is similar to other PCI chips such as the Texas Instruments
90 * ThunderLAN and the Intel 82557/82558.
91 *
92 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
93 * bus master chips because they maintain the old PIO interface for
94 * backwards compatibility, but starting with the 3c905B and the
95 * "cyclone" chips, the compatibility interface has been dropped.
96 * Since using bus master DMA is a big win, we use this driver to
97 * support the PCI "boomerang" chips even though they work with the
98 * "vortex" driver in order to obtain better performance.
99 *
100 * This driver is in the /sys/pci directory because it only supports
101 * PCI-based NICs.
102 */
103
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/sockio.h>
107 #include <sys/endian.h>
108 #include <sys/mbuf.h>
109 #include <sys/kernel.h>
110 #include <sys/module.h>
111 #include <sys/socket.h>
112
113 #include <net/if.h>
114 #include <net/if_arp.h>
115 #include <net/ethernet.h>
116 #include <net/if_dl.h>
117 #include <net/if_media.h>
118
119 #include <net/bpf.h>
120
121 #include <machine/bus_memio.h>
122 #include <machine/bus_pio.h>
123 #include <machine/bus.h>
124 #include <machine/resource.h>
125 #include <sys/bus.h>
126 #include <sys/rman.h>
127
128 #include <dev/mii/mii.h>
129 #include <dev/mii/miivar.h>
130
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133
134 MODULE_DEPEND(xl, pci, 1, 1, 1);
135 MODULE_DEPEND(xl, ether, 1, 1, 1);
136 MODULE_DEPEND(xl, miibus, 1, 1, 1);
137
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
140
141 #include <pci/if_xlreg.h>
142
143 /*
144 * TX Checksumming is disabled by default for two reasons:
145 * - TX Checksumming will occasionally produce corrupt packets
146 * - TX Checksumming seems to reduce performance
147 *
148 * Only 905B/C cards were reported to have this problem, it is possible
149 * that later chips _may_ be immune.
150 */
151 #define XL905B_TXCSUM_BROKEN 1
152
153 #ifdef XL905B_TXCSUM_BROKEN
154 #define XL905B_CSUM_FEATURES 0
155 #else
156 #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
157 #endif
158
159 /*
160 * Various supported device vendors/types and their names.
161 */
162 static struct xl_type xl_devs[] = {
163 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
164 "3Com 3c900-TPO Etherlink XL" },
165 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
166 "3Com 3c900-COMBO Etherlink XL" },
167 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
168 "3Com 3c905-TX Fast Etherlink XL" },
169 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
170 "3Com 3c905-T4 Fast Etherlink XL" },
171 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
172 "3Com 3c900B-TPO Etherlink XL" },
173 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
174 "3Com 3c900B-COMBO Etherlink XL" },
175 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
176 "3Com 3c900B-TPC Etherlink XL" },
177 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
178 "3Com 3c900B-FL Etherlink XL" },
179 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
180 "3Com 3c905B-TX Fast Etherlink XL" },
181 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
182 "3Com 3c905B-T4 Fast Etherlink XL" },
183 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
184 "3Com 3c905B-FX/SC Fast Etherlink XL" },
185 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
186 "3Com 3c905B-COMBO Fast Etherlink XL" },
187 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
188 "3Com 3c905C-TX Fast Etherlink XL" },
189 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
190 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
191 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
192 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
193 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
194 "3Com 3c980 Fast Etherlink XL" },
195 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
196 "3Com 3c980C Fast Etherlink XL" },
197 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
198 "3Com 3cSOHO100-TX OfficeConnect" },
199 { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
200 "3Com 3c450-TX HomeConnect" },
201 { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
202 "3Com 3c555 Fast Etherlink XL" },
203 { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
204 "3Com 3c556 Fast Etherlink XL" },
205 { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
206 "3Com 3c556B Fast Etherlink XL" },
207 { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
208 "3Com 3c575TX Fast Etherlink XL" },
209 { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
210 "3Com 3c575B Fast Etherlink XL" },
211 { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
212 "3Com 3c575C Fast Etherlink XL" },
213 { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
214 "3Com 3c656 Fast Etherlink XL" },
215 { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
216 "3Com 3c656B Fast Etherlink XL" },
217 { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
218 "3Com 3c656C Fast Etherlink XL" },
219 { 0, 0, NULL }
220 };
221
222 static int xl_probe(device_t);
223 static int xl_attach(device_t);
224 static int xl_detach(device_t);
225
226 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
227 static void xl_stats_update(void *);
228 static void xl_stats_update_locked(struct xl_softc *);
229 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf *);
230 static void xl_rxeof(struct xl_softc *);
231 static int xl_rx_resync(struct xl_softc *);
232 static void xl_txeof(struct xl_softc *);
233 static void xl_txeof_90xB(struct xl_softc *);
234 static void xl_txeoc(struct xl_softc *);
235 static void xl_intr(void *);
236 static void xl_start(struct ifnet *);
237 static void xl_start_locked(struct ifnet *);
238 static void xl_start_90xB_locked(struct ifnet *);
239 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
240 static void xl_init(void *);
241 static void xl_init_locked(struct xl_softc *);
242 static void xl_stop(struct xl_softc *);
243 static void xl_watchdog(struct ifnet *);
244 static void xl_shutdown(device_t);
245 static int xl_suspend(device_t);
246 static int xl_resume(device_t);
247
248 static int xl_ifmedia_upd(struct ifnet *);
249 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
250
251 static int xl_eeprom_wait(struct xl_softc *);
252 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
253 static void xl_mii_sync(struct xl_softc *);
254 static void xl_mii_send(struct xl_softc *, u_int32_t, int);
255 static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
256 static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
257
258 static void xl_setcfg(struct xl_softc *);
259 static void xl_setmode(struct xl_softc *, int);
260 static void xl_setmulti(struct xl_softc *);
261 static void xl_setmulti_hash(struct xl_softc *);
262 static void xl_reset(struct xl_softc *);
263 static int xl_list_rx_init(struct xl_softc *);
264 static int xl_list_tx_init(struct xl_softc *);
265 static int xl_list_tx_init_90xB(struct xl_softc *);
266 static void xl_wait(struct xl_softc *);
267 static void xl_mediacheck(struct xl_softc *);
268 static void xl_choose_media(struct xl_softc *sc, int *media);
269 static void xl_choose_xcvr(struct xl_softc *, int);
270 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
271 static void xl_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
272 static void xl_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
273 #ifdef notdef
274 static void xl_testpacket(struct xl_softc *);
275 #endif
276
277 static int xl_miibus_readreg(device_t, int, int);
278 static int xl_miibus_writereg(device_t, int, int, int);
279 static void xl_miibus_statchg(device_t);
280 static void xl_miibus_mediainit(device_t);
281
282 static device_method_t xl_methods[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe, xl_probe),
285 DEVMETHOD(device_attach, xl_attach),
286 DEVMETHOD(device_detach, xl_detach),
287 DEVMETHOD(device_shutdown, xl_shutdown),
288 DEVMETHOD(device_suspend, xl_suspend),
289 DEVMETHOD(device_resume, xl_resume),
290
291 /* bus interface */
292 DEVMETHOD(bus_print_child, bus_generic_print_child),
293 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
294
295 /* MII interface */
296 DEVMETHOD(miibus_readreg, xl_miibus_readreg),
297 DEVMETHOD(miibus_writereg, xl_miibus_writereg),
298 DEVMETHOD(miibus_statchg, xl_miibus_statchg),
299 DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
300
301 { 0, 0 }
302 };
303
304 static driver_t xl_driver = {
305 "xl",
306 xl_methods,
307 sizeof(struct xl_softc)
308 };
309
310 static devclass_t xl_devclass;
311
312 DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
313 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
314 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
315
316 static void
317 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
318 {
319 u_int32_t *paddr;
320
321 paddr = arg;
322 *paddr = segs->ds_addr;
323 }
324
325 static void
326 xl_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg,
327 bus_size_t mapsize, int error)
328 {
329 u_int32_t *paddr;
330
331 if (error)
332 return;
333
334 KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
335 paddr = arg;
336 *paddr = segs->ds_addr;
337 }
338
339 static void
340 xl_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
341 bus_size_t mapsize, int error)
342 {
343 struct xl_list *l;
344 int i, total_len;
345
346 if (error)
347 return;
348
349 KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
350
351 total_len = 0;
352 l = arg;
353 for (i = 0; i < nseg; i++) {
354 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
355 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
356 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
357 total_len += segs[i].ds_len;
358 }
359 l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
360 XL_LAST_FRAG);
361 l->xl_status = htole32(total_len);
362 l->xl_next = 0;
363 }
364
365 /*
366 * Murphy's law says that it's possible the chip can wedge and
367 * the 'command in progress' bit may never clear. Hence, we wait
368 * only a finite amount of time to avoid getting caught in an
369 * infinite loop. Normally this delay routine would be a macro,
370 * but it isn't called during normal operation so we can afford
371 * to make it a function.
372 */
373 static void
374 xl_wait(struct xl_softc *sc)
375 {
376 register int i;
377
378 for (i = 0; i < XL_TIMEOUT; i++) {
379 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
380 break;
381 }
382
383 if (i == XL_TIMEOUT)
384 if_printf(&sc->arpcom.ac_if, "command never completed!\n");
385 }
386
387 /*
388 * MII access routines are provided for adapters with external
389 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
390 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
391 * Note: if you don't perform the MDIO operations just right,
392 * it's possible to end up with code that works correctly with
393 * some chips/CPUs/processor speeds/bus speeds/etc but not
394 * with others.
395 */
396 #define MII_SET(x) \
397 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
398 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
399
400 #define MII_CLR(x) \
401 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
402 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
403
404 /*
405 * Sync the PHYs by setting data bit and strobing the clock 32 times.
406 */
407 static void
408 xl_mii_sync(struct xl_softc *sc)
409 {
410 register int i;
411
412 XL_SEL_WIN(4);
413 MII_SET(XL_MII_DIR|XL_MII_DATA);
414
415 for (i = 0; i < 32; i++) {
416 MII_SET(XL_MII_CLK);
417 MII_SET(XL_MII_DATA);
418 MII_SET(XL_MII_DATA);
419 MII_CLR(XL_MII_CLK);
420 MII_SET(XL_MII_DATA);
421 MII_SET(XL_MII_DATA);
422 }
423 }
424
425 /*
426 * Clock a series of bits through the MII.
427 */
428 static void
429 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
430 {
431 int i;
432
433 XL_SEL_WIN(4);
434 MII_CLR(XL_MII_CLK);
435
436 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
437 if (bits & i) {
438 MII_SET(XL_MII_DATA);
439 } else {
440 MII_CLR(XL_MII_DATA);
441 }
442 MII_CLR(XL_MII_CLK);
443 MII_SET(XL_MII_CLK);
444 }
445 }
446
447 /*
448 * Read an PHY register through the MII.
449 */
450 static int
451 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
452 {
453 int i, ack;
454
455 /*XL_LOCK_ASSERT(sc);*/
456
457 /* Set up frame for RX. */
458 frame->mii_stdelim = XL_MII_STARTDELIM;
459 frame->mii_opcode = XL_MII_READOP;
460 frame->mii_turnaround = 0;
461 frame->mii_data = 0;
462
463 /* Select register window 4. */
464 XL_SEL_WIN(4);
465
466 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
467 /* Turn on data xmit. */
468 MII_SET(XL_MII_DIR);
469
470 xl_mii_sync(sc);
471
472 /* Send command/address info. */
473 xl_mii_send(sc, frame->mii_stdelim, 2);
474 xl_mii_send(sc, frame->mii_opcode, 2);
475 xl_mii_send(sc, frame->mii_phyaddr, 5);
476 xl_mii_send(sc, frame->mii_regaddr, 5);
477
478 /* Idle bit */
479 MII_CLR((XL_MII_CLK|XL_MII_DATA));
480 MII_SET(XL_MII_CLK);
481
482 /* Turn off xmit. */
483 MII_CLR(XL_MII_DIR);
484
485 /* Check for ack */
486 MII_CLR(XL_MII_CLK);
487 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
488 MII_SET(XL_MII_CLK);
489
490 /*
491 * Now try reading data bits. If the ack failed, we still
492 * need to clock through 16 cycles to keep the PHY(s) in sync.
493 */
494 if (ack) {
495 for (i = 0; i < 16; i++) {
496 MII_CLR(XL_MII_CLK);
497 MII_SET(XL_MII_CLK);
498 }
499 goto fail;
500 }
501
502 for (i = 0x8000; i; i >>= 1) {
503 MII_CLR(XL_MII_CLK);
504 if (!ack) {
505 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
506 frame->mii_data |= i;
507 }
508 MII_SET(XL_MII_CLK);
509 }
510
511 fail:
512 MII_CLR(XL_MII_CLK);
513 MII_SET(XL_MII_CLK);
514
515 return (ack ? 1 : 0);
516 }
517
518 /*
519 * Write to a PHY register through the MII.
520 */
521 static int
522 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
523 {
524
525 /*XL_LOCK_ASSERT(sc);*/
526
527 /* Set up frame for TX. */
528 frame->mii_stdelim = XL_MII_STARTDELIM;
529 frame->mii_opcode = XL_MII_WRITEOP;
530 frame->mii_turnaround = XL_MII_TURNAROUND;
531
532 /* Select the window 4. */
533 XL_SEL_WIN(4);
534
535 /* Turn on data output. */
536 MII_SET(XL_MII_DIR);
537
538 xl_mii_sync(sc);
539
540 xl_mii_send(sc, frame->mii_stdelim, 2);
541 xl_mii_send(sc, frame->mii_opcode, 2);
542 xl_mii_send(sc, frame->mii_phyaddr, 5);
543 xl_mii_send(sc, frame->mii_regaddr, 5);
544 xl_mii_send(sc, frame->mii_turnaround, 2);
545 xl_mii_send(sc, frame->mii_data, 16);
546
547 /* Idle bit. */
548 MII_SET(XL_MII_CLK);
549 MII_CLR(XL_MII_CLK);
550
551 /* Turn off xmit. */
552 MII_CLR(XL_MII_DIR);
553
554 return (0);
555 }
556
557 static int
558 xl_miibus_readreg(device_t dev, int phy, int reg)
559 {
560 struct xl_softc *sc;
561 struct xl_mii_frame frame;
562
563 sc = device_get_softc(dev);
564
565 /*
566 * Pretend that PHYs are only available at MII address 24.
567 * This is to guard against problems with certain 3Com ASIC
568 * revisions that incorrectly map the internal transceiver
569 * control registers at all MII addresses. This can cause
570 * the miibus code to attach the same PHY several times over.
571 */
572 if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
573 return (0);
574
575 bzero((char *)&frame, sizeof(frame));
576 frame.mii_phyaddr = phy;
577 frame.mii_regaddr = reg;
578
579 xl_mii_readreg(sc, &frame);
580
581 return (frame.mii_data);
582 }
583
584 static int
585 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
586 {
587 struct xl_softc *sc;
588 struct xl_mii_frame frame;
589
590 sc = device_get_softc(dev);
591
592 if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
593 return (0);
594
595 bzero((char *)&frame, sizeof(frame));
596 frame.mii_phyaddr = phy;
597 frame.mii_regaddr = reg;
598 frame.mii_data = data;
599
600 xl_mii_writereg(sc, &frame);
601
602 return (0);
603 }
604
605 static void
606 xl_miibus_statchg(device_t dev)
607 {
608 struct xl_softc *sc;
609 struct mii_data *mii;
610
611 sc = device_get_softc(dev);
612 mii = device_get_softc(sc->xl_miibus);
613
614 /*XL_LOCK_ASSERT(sc);*/
615
616 xl_setcfg(sc);
617
618 /* Set ASIC's duplex mode to match the PHY. */
619 XL_SEL_WIN(3);
620 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
621 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
622 else
623 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
624 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
625 }
626
627 /*
628 * Special support for the 3c905B-COMBO. This card has 10/100 support
629 * plus BNC and AUI ports. This means we will have both an miibus attached
630 * plus some non-MII media settings. In order to allow this, we have to
631 * add the extra media to the miibus's ifmedia struct, but we can't do
632 * that during xl_attach() because the miibus hasn't been attached yet.
633 * So instead, we wait until the miibus probe/attach is done, at which
634 * point we will get a callback telling is that it's safe to add our
635 * extra media.
636 */
637 static void
638 xl_miibus_mediainit(device_t dev)
639 {
640 struct xl_softc *sc;
641 struct mii_data *mii;
642 struct ifmedia *ifm;
643
644 sc = device_get_softc(dev);
645 mii = device_get_softc(sc->xl_miibus);
646 ifm = &mii->mii_media;
647
648 /*XL_LOCK_ASSERT(sc);*/
649
650 if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
651 /*
652 * Check for a 10baseFL board in disguise.
653 */
654 if (sc->xl_type == XL_TYPE_905B &&
655 sc->xl_media == XL_MEDIAOPT_10FL) {
656 if (bootverbose)
657 if_printf(&sc->arpcom.ac_if,
658 "found 10baseFL\n");
659 ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
660 ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
661 NULL);
662 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
663 ifmedia_add(ifm,
664 IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
665 } else {
666 if (bootverbose)
667 if_printf(&sc->arpcom.ac_if, "found AUI\n");
668 ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
669 }
670 }
671
672 if (sc->xl_media & XL_MEDIAOPT_BNC) {
673 if (bootverbose)
674 if_printf(&sc->arpcom.ac_if, "found BNC\n");
675 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
676 }
677 }
678
679 /*
680 * The EEPROM is slow: give it time to come ready after issuing
681 * it a command.
682 */
683 static int
684 xl_eeprom_wait(struct xl_softc *sc)
685 {
686 int i;
687
688 for (i = 0; i < 100; i++) {
689 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
690 DELAY(162);
691 else
692 break;
693 }
694
695 if (i == 100) {
696 if_printf(&sc->arpcom.ac_if, "eeprom failed to come ready\n");
697 return (1);
698 }
699
700 return (0);
701 }
702
703 /*
704 * Read a sequence of words from the EEPROM. Note that ethernet address
705 * data is stored in the EEPROM in network byte order.
706 */
707 static int
708 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
709 {
710 int err = 0, i;
711 u_int16_t word = 0, *ptr;
712
713 XL_LOCK_ASSERT(sc);
714
715 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
716 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
717 /*
718 * XXX: WARNING! DANGER!
719 * It's easy to accidentally overwrite the rom content!
720 * Note: the 3c575 uses 8bit EEPROM offsets.
721 */
722 XL_SEL_WIN(0);
723
724 if (xl_eeprom_wait(sc))
725 return (1);
726
727 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
728 off += 0x30;
729
730 for (i = 0; i < cnt; i++) {
731 if (sc->xl_flags & XL_FLAG_8BITROM)
732 CSR_WRITE_2(sc, XL_W0_EE_CMD,
733 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
734 else
735 CSR_WRITE_2(sc, XL_W0_EE_CMD,
736 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
737 err = xl_eeprom_wait(sc);
738 if (err)
739 break;
740 word = CSR_READ_2(sc, XL_W0_EE_DATA);
741 ptr = (u_int16_t *)(dest + (i * 2));
742 if (swap)
743 *ptr = ntohs(word);
744 else
745 *ptr = word;
746 }
747
748 return (err ? 1 : 0);
749 }
750
751 /*
752 * NICs older than the 3c905B have only one multicast option, which
753 * is to enable reception of all multicast frames.
754 */
755 static void
756 xl_setmulti(struct xl_softc *sc)
757 {
758 struct ifnet *ifp = &sc->arpcom.ac_if;
759 struct ifmultiaddr *ifma;
760 u_int8_t rxfilt;
761 int mcnt = 0;
762
763 XL_LOCK_ASSERT(sc);
764
765 XL_SEL_WIN(5);
766 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
767
768 if (ifp->if_flags & IFF_ALLMULTI) {
769 rxfilt |= XL_RXFILTER_ALLMULTI;
770 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
771 return;
772 }
773
774 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
775 mcnt++;
776
777 if (mcnt)
778 rxfilt |= XL_RXFILTER_ALLMULTI;
779 else
780 rxfilt &= ~XL_RXFILTER_ALLMULTI;
781
782 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
783 }
784
785 /*
786 * 3c905B adapters have a hash filter that we can program.
787 */
788 static void
789 xl_setmulti_hash(struct xl_softc *sc)
790 {
791 struct ifnet *ifp = &sc->arpcom.ac_if;
792 int h = 0, i;
793 struct ifmultiaddr *ifma;
794 u_int8_t rxfilt;
795 int mcnt = 0;
796
797 XL_LOCK_ASSERT(sc);
798
799 XL_SEL_WIN(5);
800 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
801
802 if (ifp->if_flags & IFF_ALLMULTI) {
803 rxfilt |= XL_RXFILTER_ALLMULTI;
804 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
805 return;
806 } else
807 rxfilt &= ~XL_RXFILTER_ALLMULTI;
808
809 /* first, zot all the existing hash bits */
810 for (i = 0; i < XL_HASHFILT_SIZE; i++)
811 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
812
813 /* now program new ones */
814 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
815 if (ifma->ifma_addr->sa_family != AF_LINK)
816 continue;
817 /*
818 * Note: the 3c905B currently only supports a 64-bit hash
819 * table, which means we really only need 6 bits, but the
820 * manual indicates that future chip revisions will have a
821 * 256-bit hash table, hence the routine is set up to
822 * calculate 8 bits of position info in case we need it some
823 * day.
824 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
825 * a 256 bit hash table. This means we have to use all 8 bits
826 * regardless. On older cards, the upper 2 bits will be
827 * ignored. Grrrr....
828 */
829 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
830 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
831 CSR_WRITE_2(sc, XL_COMMAND,
832 h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
833 mcnt++;
834 }
835
836 if (mcnt)
837 rxfilt |= XL_RXFILTER_MULTIHASH;
838 else
839 rxfilt &= ~XL_RXFILTER_MULTIHASH;
840
841 CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
842 }
843
844 #ifdef notdef
845 static void
846 xl_testpacket(struct xl_softc *sc)
847 {
848 struct mbuf *m;
849 struct ifnet *ifp = &sc->arpcom.ac_if;
850
851 MGETHDR(m, M_DONTWAIT, MT_DATA);
852
853 if (m == NULL)
854 return;
855
856 bcopy(&sc->arpcom.ac_enaddr,
857 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
858 bcopy(&sc->arpcom.ac_enaddr,
859 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
860 mtod(m, struct ether_header *)->ether_type = htons(3);
861 mtod(m, unsigned char *)[14] = 0;
862 mtod(m, unsigned char *)[15] = 0;
863 mtod(m, unsigned char *)[16] = 0xE3;
864 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
865 IFQ_ENQUEUE(&ifp->if_snd, m);
866 xl_start(ifp);
867 }
868 #endif
869
870 static void
871 xl_setcfg(struct xl_softc *sc)
872 {
873 u_int32_t icfg;
874
875 /*XL_LOCK_ASSERT(sc);*/
876
877 XL_SEL_WIN(3);
878 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
879 icfg &= ~XL_ICFG_CONNECTOR_MASK;
880 if (sc->xl_media & XL_MEDIAOPT_MII ||
881 sc->xl_media & XL_MEDIAOPT_BT4)
882 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
883 if (sc->xl_media & XL_MEDIAOPT_BTX)
884 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
885
886 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
887 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
888 }
889
890 static void
891 xl_setmode(struct xl_softc *sc, int media)
892 {
893 u_int32_t icfg;
894 u_int16_t mediastat;
895 char *pmsg = "", *dmsg = "";
896
897 /*XL_LOCK_ASSERT(sc);*/
898
899 XL_SEL_WIN(4);
900 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
901 XL_SEL_WIN(3);
902 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
903
904 if (sc->xl_media & XL_MEDIAOPT_BT) {
905 if (IFM_SUBTYPE(media) == IFM_10_T) {
906 pmsg = "10baseT transceiver";
907 sc->xl_xcvr = XL_XCVR_10BT;
908 icfg &= ~XL_ICFG_CONNECTOR_MASK;
909 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
910 mediastat |= XL_MEDIASTAT_LINKBEAT |
911 XL_MEDIASTAT_JABGUARD;
912 mediastat &= ~XL_MEDIASTAT_SQEENB;
913 }
914 }
915
916 if (sc->xl_media & XL_MEDIAOPT_BFX) {
917 if (IFM_SUBTYPE(media) == IFM_100_FX) {
918 pmsg = "100baseFX port";
919 sc->xl_xcvr = XL_XCVR_100BFX;
920 icfg &= ~XL_ICFG_CONNECTOR_MASK;
921 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
922 mediastat |= XL_MEDIASTAT_LINKBEAT;
923 mediastat &= ~XL_MEDIASTAT_SQEENB;
924 }
925 }
926
927 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
928 if (IFM_SUBTYPE(media) == IFM_10_5) {
929 pmsg = "AUI port";
930 sc->xl_xcvr = XL_XCVR_AUI;
931 icfg &= ~XL_ICFG_CONNECTOR_MASK;
932 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
933 mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
934 XL_MEDIASTAT_JABGUARD);
935 mediastat |= ~XL_MEDIASTAT_SQEENB;
936 }
937 if (IFM_SUBTYPE(media) == IFM_10_FL) {
938 pmsg = "10baseFL transceiver";
939 sc->xl_xcvr = XL_XCVR_AUI;
940 icfg &= ~XL_ICFG_CONNECTOR_MASK;
941 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
942 mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
943 XL_MEDIASTAT_JABGUARD);
944 mediastat |= ~XL_MEDIASTAT_SQEENB;
945 }
946 }
947
948 if (sc->xl_media & XL_MEDIAOPT_BNC) {
949 if (IFM_SUBTYPE(media) == IFM_10_2) {
950 pmsg = "AUI port";
951 sc->xl_xcvr = XL_XCVR_COAX;
952 icfg &= ~XL_ICFG_CONNECTOR_MASK;
953 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
954 mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
955 XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
956 }
957 }
958
959 if ((media & IFM_GMASK) == IFM_FDX ||
960 IFM_SUBTYPE(media) == IFM_100_FX) {
961 dmsg = "full";
962 XL_SEL_WIN(3);
963 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
964 } else {
965 dmsg = "half";
966 XL_SEL_WIN(3);
967 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
968 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
969 }
970
971 if (IFM_SUBTYPE(media) == IFM_10_2)
972 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
973 else
974 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
975
976 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
977 XL_SEL_WIN(4);
978 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
979
980 DELAY(800);
981 XL_SEL_WIN(7);
982
983 if_printf(&sc->arpcom.ac_if, "selecting %s, %s duplex\n", pmsg, dmsg);
984 }
985
986 static void
987 xl_reset(struct xl_softc *sc)
988 {
989 register int i;
990
991 XL_LOCK_ASSERT(sc);
992
993 XL_SEL_WIN(0);
994 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
995 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
996 XL_RESETOPT_DISADVFD:0));
997
998 /*
999 * If we're using memory mapped register mode, pause briefly
1000 * after issuing the reset command before trying to access any
1001 * other registers. With my 3c575C cardbus card, failing to do
1002 * this results in the system locking up while trying to poll
1003 * the command busy bit in the status register.
1004 */
1005 if (sc->xl_flags & XL_FLAG_USE_MMIO)
1006 DELAY(100000);
1007
1008 for (i = 0; i < XL_TIMEOUT; i++) {
1009 DELAY(10);
1010 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1011 break;
1012 }
1013
1014 if (i == XL_TIMEOUT)
1015 if_printf(&sc->arpcom.ac_if, "reset didn't complete\n");
1016
1017 /* Reset TX and RX. */
1018 /* Note: the RX reset takes an absurd amount of time
1019 * on newer versions of the Tornado chips such as those
1020 * on the 3c905CX and newer 3c908C cards. We wait an
1021 * extra amount of time so that xl_wait() doesn't complain
1022 * and annoy the users.
1023 */
1024 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1025 DELAY(100000);
1026 xl_wait(sc);
1027 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1028 xl_wait(sc);
1029
1030 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
1031 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1032 XL_SEL_WIN(2);
1033 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
1034 CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
1035 ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
1036 XL_RESETOPT_INVERT_LED : 0) |
1037 ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
1038 XL_RESETOPT_INVERT_MII : 0));
1039 }
1040
1041 /* Wait a little while for the chip to get its brains in order. */
1042 DELAY(100000);
1043 }
1044
1045 /*
1046 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1047 * IDs against our list and return a device name if we find a match.
1048 */
1049 static int
1050 xl_probe(device_t dev)
1051 {
1052 struct xl_type *t;
1053
1054 t = xl_devs;
1055
1056 while (t->xl_name != NULL) {
1057 if ((pci_get_vendor(dev) == t->xl_vid) &&
1058 (pci_get_device(dev) == t->xl_did)) {
1059 device_set_desc(dev, t->xl_name);
1060 return (BUS_PROBE_DEFAULT);
1061 }
1062 t++;
1063 }
1064
1065 return (ENXIO);
1066 }
1067
1068 /*
1069 * This routine is a kludge to work around possible hardware faults
1070 * or manufacturing defects that can cause the media options register
1071 * (or reset options register, as it's called for the first generation
1072 * 3c90x adapters) to return an incorrect result. I have encountered
1073 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1074 * which doesn't have any of the 'mediaopt' bits set. This screws up
1075 * the attach routine pretty badly because it doesn't know what media
1076 * to look for. If we find ourselves in this predicament, this routine
1077 * will try to guess the media options values and warn the user of a
1078 * possible manufacturing defect with his adapter/system/whatever.
1079 */
1080 static void
1081 xl_mediacheck(struct xl_softc *sc)
1082 {
1083
1084 XL_LOCK_ASSERT(sc);
1085
1086 /*
1087 * If some of the media options bits are set, assume they are
1088 * correct. If not, try to figure it out down below.
1089 * XXX I should check for 10baseFL, but I don't have an adapter
1090 * to test with.
1091 */
1092 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1093 /*
1094 * Check the XCVR value. If it's not in the normal range
1095 * of values, we need to fake it up here.
1096 */
1097 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1098 return;
1099 else {
1100 if_printf(&sc->arpcom.ac_if,
1101 "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
1102 if_printf(&sc->arpcom.ac_if,
1103 "choosing new default based on card type\n");
1104 }
1105 } else {
1106 if (sc->xl_type == XL_TYPE_905B &&
1107 sc->xl_media & XL_MEDIAOPT_10FL)
1108 return;
1109 if_printf(&sc->arpcom.ac_if,
1110 "WARNING: no media options bits set in the media options register!!\n");
1111 if_printf(&sc->arpcom.ac_if,
1112 "this could be a manufacturing defect in your adapter or system\n");
1113 if_printf(&sc->arpcom.ac_if,
1114 "attempting to guess media type; you should probably consult your vendor\n");
1115 }
1116
1117 xl_choose_xcvr(sc, 1);
1118 }
1119
1120 static void
1121 xl_choose_xcvr(struct xl_softc *sc, int verbose)
1122 {
1123 u_int16_t devid;
1124
1125 /*
1126 * Read the device ID from the EEPROM.
1127 * This is what's loaded into the PCI device ID register, so it has
1128 * to be correct otherwise we wouldn't have gotten this far.
1129 */
1130 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1131
1132 switch (devid) {
1133 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
1134 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
1135 sc->xl_media = XL_MEDIAOPT_BT;
1136 sc->xl_xcvr = XL_XCVR_10BT;
1137 if (verbose)
1138 if_printf(&sc->arpcom.ac_if,
1139 "guessing 10BaseT transceiver\n");
1140 break;
1141 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1142 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
1143 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1144 sc->xl_xcvr = XL_XCVR_10BT;
1145 if (verbose)
1146 if_printf(&sc->arpcom.ac_if,
1147 "guessing COMBO (AUI/BNC/TP)\n");
1148 break;
1149 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
1150 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1151 sc->xl_xcvr = XL_XCVR_10BT;
1152 if (verbose)
1153 if_printf(&sc->arpcom.ac_if, "guessing TPC (BNC/TP)\n");
1154 break;
1155 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
1156 sc->xl_media = XL_MEDIAOPT_10FL;
1157 sc->xl_xcvr = XL_XCVR_AUI;
1158 if (verbose)
1159 if_printf(&sc->arpcom.ac_if, "guessing 10baseFL\n");
1160 break;
1161 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1162 case TC_DEVICEID_HURRICANE_555: /* 3c555 */
1163 case TC_DEVICEID_HURRICANE_556: /* 3c556 */
1164 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
1165 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
1166 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
1167 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
1168 case TC_DEVICEID_HURRICANE_656: /* 3c656 */
1169 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
1170 case TC_DEVICEID_TORNADO_656C: /* 3c656C */
1171 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
1172 case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */
1173 sc->xl_media = XL_MEDIAOPT_MII;
1174 sc->xl_xcvr = XL_XCVR_MII;
1175 if (verbose)
1176 if_printf(&sc->arpcom.ac_if, "guessing MII\n");
1177 break;
1178 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1179 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1180 sc->xl_media = XL_MEDIAOPT_BT4;
1181 sc->xl_xcvr = XL_XCVR_MII;
1182 if (verbose)
1183 if_printf(&sc->arpcom.ac_if,
1184 "guessing 100baseT4/MII\n");
1185 break;
1186 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
1187 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1188 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
1189 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1190 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
1191 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
1192 sc->xl_media = XL_MEDIAOPT_BTX;
1193 sc->xl_xcvr = XL_XCVR_AUTO;
1194 if (verbose)
1195 if_printf(&sc->arpcom.ac_if,
1196 "guessing 10/100 internal\n");
1197 break;
1198 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1199 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1200 sc->xl_xcvr = XL_XCVR_AUTO;
1201 if (verbose)
1202 if_printf(&sc->arpcom.ac_if,
1203 "guessing 10/100 plus BNC/AUI\n");
1204 break;
1205 default:
1206 if_printf(&sc->arpcom.ac_if,
1207 "unknown device ID: %x -- defaulting to 10baseT\n", devid);
1208 sc->xl_media = XL_MEDIAOPT_BT;
1209 break;
1210 }
1211 }
1212
1213 /*
1214 * Attach the interface. Allocate softc structures, do ifmedia
1215 * setup and ethernet/BPF attach.
1216 */
1217 static int
1218 xl_attach(device_t dev)
1219 {
1220 u_char eaddr[ETHER_ADDR_LEN];
1221 u_int16_t xcvr[2];
1222 struct xl_softc *sc;
1223 struct ifnet *ifp;
1224 int media;
1225 int unit, error = 0, rid, res;
1226 uint16_t did;
1227
1228 sc = device_get_softc(dev);
1229 unit = device_get_unit(dev);
1230
1231 mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1232 MTX_DEF);
1233 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1234
1235 did = pci_get_device(dev);
1236
1237 sc->xl_flags = 0;
1238 if (did == TC_DEVICEID_HURRICANE_555)
1239 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1240 if (did == TC_DEVICEID_HURRICANE_556 ||
1241 did == TC_DEVICEID_HURRICANE_556B)
1242 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1243 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1244 XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1245 if (did == TC_DEVICEID_HURRICANE_555 ||
1246 did == TC_DEVICEID_HURRICANE_556)
1247 sc->xl_flags |= XL_FLAG_8BITROM;
1248 if (did == TC_DEVICEID_HURRICANE_556B)
1249 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1250
1251 if (did == TC_DEVICEID_HURRICANE_575A ||
1252 did == TC_DEVICEID_HURRICANE_575B ||
1253 did == TC_DEVICEID_HURRICANE_575C ||
1254 did == TC_DEVICEID_HURRICANE_656B ||
1255 did == TC_DEVICEID_TORNADO_656C)
1256 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1257 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM;
1258 if (did == TC_DEVICEID_HURRICANE_656)
1259 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1260 if (did == TC_DEVICEID_HURRICANE_575B)
1261 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1262 if (did == TC_DEVICEID_HURRICANE_575C)
1263 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1264 if (did == TC_DEVICEID_TORNADO_656C)
1265 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1266 if (did == TC_DEVICEID_HURRICANE_656 ||
1267 did == TC_DEVICEID_HURRICANE_656B)
1268 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1269 XL_FLAG_INVERT_LED_PWR;
1270 if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
1271 did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
1272 sc->xl_flags |= XL_FLAG_PHYOK;
1273
1274 switch (did) {
1275 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1276 case TC_DEVICEID_HURRICANE_575A:
1277 case TC_DEVICEID_HURRICANE_575B:
1278 case TC_DEVICEID_HURRICANE_575C:
1279 sc->xl_flags |= XL_FLAG_NO_MMIO;
1280 break;
1281 default:
1282 break;
1283 }
1284
1285 /*
1286 * Map control/status registers.
1287 */
1288 pci_enable_busmaster(dev);
1289
1290 if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
1291 rid = XL_PCI_LOMEM;
1292 res = SYS_RES_MEMORY;
1293
1294 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1295 }
1296
1297 if (sc->xl_res != NULL) {
1298 sc->xl_flags |= XL_FLAG_USE_MMIO;
1299 if (bootverbose)
1300 device_printf(dev, "using memory mapped I/O\n");
1301 } else {
1302 rid = XL_PCI_LOIO;
1303 res = SYS_RES_IOPORT;
1304 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1305 if (sc->xl_res == NULL) {
1306 device_printf(dev, "couldn't map ports/memory\n");
1307 error = ENXIO;
1308 goto fail;
1309 }
1310 if (bootverbose)
1311 device_printf(dev, "using port I/O\n");
1312 }
1313
1314 sc->xl_btag = rman_get_bustag(sc->xl_res);
1315 sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1316
1317 if (sc->xl_flags & XL_FLAG_FUNCREG) {
1318 rid = XL_PCI_FUNCMEM;
1319 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1320 RF_ACTIVE);
1321
1322 if (sc->xl_fres == NULL) {
1323 device_printf(dev, "couldn't map ports/memory\n");
1324 error = ENXIO;
1325 goto fail;
1326 }
1327
1328 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1329 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1330 }
1331
1332 /* Allocate interrupt */
1333 rid = 0;
1334 sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1335 RF_SHAREABLE | RF_ACTIVE);
1336 if (sc->xl_irq == NULL) {
1337 device_printf(dev, "couldn't map interrupt\n");
1338 error = ENXIO;
1339 goto fail;
1340 }
1341
1342 /* Initialize interface name. */
1343 ifp = &sc->arpcom.ac_if;
1344 ifp->if_softc = sc;
1345 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1346
1347 XL_LOCK(sc);
1348
1349 /* Reset the adapter. */
1350 xl_reset(sc);
1351
1352 /*
1353 * Get station address from the EEPROM.
1354 */
1355 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1356 device_printf(dev, "failed to read station address\n");
1357 error = ENXIO;
1358 XL_UNLOCK(sc);
1359 goto fail;
1360 }
1361
1362 XL_UNLOCK(sc);
1363
1364 sc->xl_unit = unit;
1365 callout_handle_init(&sc->xl_stat_ch);
1366 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1367
1368 /*
1369 * Now allocate a tag for the DMA descriptor lists and a chunk
1370 * of DMA-able memory based on the tag. Also obtain the DMA
1371 * addresses of the RX and TX ring, which we'll need later.
1372 * All of our lists are allocated as a contiguous block
1373 * of memory.
1374 */
1375 error = bus_dma_tag_create(NULL, 8, 0,
1376 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1377 XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
1378 &sc->xl_ldata.xl_rx_tag);
1379 if (error) {
1380 device_printf(dev, "failed to allocate rx dma tag\n");
1381 goto fail;
1382 }
1383
1384 error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1385 (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1386 &sc->xl_ldata.xl_rx_dmamap);
1387 if (error) {
1388 device_printf(dev, "no memory for rx list buffers!\n");
1389 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1390 sc->xl_ldata.xl_rx_tag = NULL;
1391 goto fail;
1392 }
1393
1394 error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1395 sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1396 XL_RX_LIST_SZ, xl_dma_map_addr,
1397 &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1398 if (error) {
1399 device_printf(dev, "cannot get dma address of the rx ring!\n");
1400 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1401 sc->xl_ldata.xl_rx_dmamap);
1402 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1403 sc->xl_ldata.xl_rx_tag = NULL;
1404 goto fail;
1405 }
1406
1407 error = bus_dma_tag_create(NULL, 8, 0,
1408 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1409 XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
1410 &sc->xl_ldata.xl_tx_tag);
1411 if (error) {
1412 device_printf(dev, "failed to allocate tx dma tag\n");
1413 goto fail;
1414 }
1415
1416 error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1417 (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1418 &sc->xl_ldata.xl_tx_dmamap);
1419 if (error) {
1420 device_printf(dev, "no memory for list buffers!\n");
1421 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1422 sc->xl_ldata.xl_tx_tag = NULL;
1423 goto fail;
1424 }
1425
1426 error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1427 sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1428 XL_TX_LIST_SZ, xl_dma_map_addr,
1429 &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1430 if (error) {
1431 device_printf(dev, "cannot get dma address of the tx ring!\n");
1432 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1433 sc->xl_ldata.xl_tx_dmamap);
1434 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1435 sc->xl_ldata.xl_tx_tag = NULL;
1436 goto fail;
1437 }
1438
1439 /*
1440 * Allocate a DMA tag for the mapping of mbufs.
1441 */
1442 error = bus_dma_tag_create(NULL, 1, 0,
1443 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1444 MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
1445 NULL, &sc->xl_mtag);
1446 if (error) {
1447 device_printf(dev, "failed to allocate mbuf dma tag\n");
1448 goto fail;
1449 }
1450
1451 /* We need a spare DMA map for the RX ring. */
1452 error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1453 if (error)
1454 goto fail;
1455
1456 XL_LOCK(sc);
1457
1458 /*
1459 * Figure out the card type. 3c905B adapters have the
1460 * 'supportsNoTxLength' bit set in the capabilities
1461 * word in the EEPROM.
1462 * Note: my 3c575C cardbus card lies. It returns a value
1463 * of 0x1578 for its capabilities word, which is somewhat
1464 * nonsensical. Another way to distinguish a 3c90x chip
1465 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1466 * bit. This will only be set for 3c90x boomerage chips.
1467 */
1468 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1469 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1470 !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1471 sc->xl_type = XL_TYPE_905B;
1472 else
1473 sc->xl_type = XL_TYPE_90X;
1474
1475 ifp->if_mtu = ETHERMTU;
1476 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1477 ifp->if_ioctl = xl_ioctl;
1478 ifp->if_capabilities = IFCAP_VLAN_MTU;
1479 if (sc->xl_type == XL_TYPE_905B) {
1480 ifp->if_hwassist = XL905B_CSUM_FEATURES;
1481 #ifdef XL905B_TXCSUM_BROKEN
1482 ifp->if_capabilities |= IFCAP_RXCSUM;
1483 #else
1484 ifp->if_capabilities |= IFCAP_HWCSUM;
1485 #endif
1486 }
1487 ifp->if_start = xl_start;
1488 ifp->if_watchdog = xl_watchdog;
1489 ifp->if_init = xl_init;
1490 ifp->if_baudrate = 10000000;
1491 IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
1492 ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
1493 IFQ_SET_READY(&ifp->if_snd);
1494 ifp->if_capenable = ifp->if_capabilities;
1495
1496 /*
1497 * Now we have to see what sort of media we have.
1498 * This includes probing for an MII interace and a
1499 * possible PHY.
1500 */
1501 XL_SEL_WIN(3);
1502 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1503 if (bootverbose)
1504 device_printf(dev, "media options word: %x\n", sc->xl_media);
1505
1506 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1507 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1508 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1509 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1510
1511 xl_mediacheck(sc);
1512
1513 /* XXX Downcalls to ifmedia, miibus about to happen. */
1514 XL_UNLOCK(sc);
1515
1516 if (sc->xl_media & XL_MEDIAOPT_MII ||
1517 sc->xl_media & XL_MEDIAOPT_BTX ||
1518 sc->xl_media & XL_MEDIAOPT_BT4) {
1519 if (bootverbose)
1520 device_printf(dev, "found MII/AUTO\n");
1521 xl_setcfg(sc);
1522 if (mii_phy_probe(dev, &sc->xl_miibus,
1523 xl_ifmedia_upd, xl_ifmedia_sts)) {
1524 device_printf(dev, "no PHY found!\n");
1525 error = ENXIO;
1526 goto fail;
1527 }
1528 goto done;
1529 }
1530
1531 /*
1532 * Sanity check. If the user has selected "auto" and this isn't
1533 * a 10/100 card of some kind, we need to force the transceiver
1534 * type to something sane.
1535 */
1536 if (sc->xl_xcvr == XL_XCVR_AUTO) {
1537 /* XXX Direct hardware access needs lock coverage. */
1538 XL_LOCK(sc);
1539 xl_choose_xcvr(sc, bootverbose);
1540 XL_UNLOCK(sc);
1541 }
1542
1543 /*
1544 * Do ifmedia setup.
1545 */
1546 if (sc->xl_media & XL_MEDIAOPT_BT) {
1547 if (bootverbose)
1548 device_printf(dev, "found 10baseT\n");
1549 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1550 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1551 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1552 ifmedia_add(&sc->ifmedia,
1553 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1554 }
1555
1556 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1557 /*
1558 * Check for a 10baseFL board in disguise.
1559 */
1560 if (sc->xl_type == XL_TYPE_905B &&
1561 sc->xl_media == XL_MEDIAOPT_10FL) {
1562 if (bootverbose)
1563 device_printf(dev, "found 10baseFL\n");
1564 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1565 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1566 0, NULL);
1567 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1568 ifmedia_add(&sc->ifmedia,
1569 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1570 } else {
1571 if (bootverbose)
1572 device_printf(dev, "found AUI\n");
1573 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1574 }
1575 }
1576
1577 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1578 if (bootverbose)
1579 device_printf(dev, "found BNC\n");
1580 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1581 }
1582
1583 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1584 if (bootverbose)
1585 device_printf(dev, "found 100baseFX\n");
1586 ifp->if_baudrate = 100000000;
1587 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1588 }
1589
1590 /* XXX: Unlocked, leaf will take lock. */
1591 media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1592 xl_choose_media(sc, &media);
1593
1594 if (sc->xl_miibus == NULL)
1595 ifmedia_set(&sc->ifmedia, media);
1596
1597 done:
1598 /* XXX: Unlocked hardware access, narrow race. */
1599 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1600 XL_SEL_WIN(0);
1601 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1602 }
1603
1604 /*
1605 * Call MI attach routine.
1606 */
1607 ether_ifattach(ifp, eaddr);
1608
1609 error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1610 xl_intr, sc, &sc->xl_intrhand);
1611 if (error) {
1612 device_printf(dev, "couldn't set up irq\n");
1613 ether_ifdetach(ifp);
1614 goto fail;
1615 }
1616
1617 fail:
1618 if (error)
1619 xl_detach(dev);
1620
1621 return (error);
1622 }
1623
1624 /*
1625 * Choose a default media.
1626 * XXX This is a leaf function only called by xl_attach() and
1627 * acquires/releases the non-recursible driver mutex.
1628 */
1629 static void
1630 xl_choose_media(struct xl_softc *sc, int *media)
1631 {
1632
1633 XL_LOCK(sc);
1634
1635 switch (sc->xl_xcvr) {
1636 case XL_XCVR_10BT:
1637 *media = IFM_ETHER|IFM_10_T;
1638 xl_setmode(sc, *media);
1639 break;
1640 case XL_XCVR_AUI:
1641 if (sc->xl_type == XL_TYPE_905B &&
1642 sc->xl_media == XL_MEDIAOPT_10FL) {
1643 *media = IFM_ETHER|IFM_10_FL;
1644 xl_setmode(sc, *media);
1645 } else {
1646 *media = IFM_ETHER|IFM_10_5;
1647 xl_setmode(sc, *media);
1648 }
1649 break;
1650 case XL_XCVR_COAX:
1651 *media = IFM_ETHER|IFM_10_2;
1652 xl_setmode(sc, *media);
1653 break;
1654 case XL_XCVR_AUTO:
1655 case XL_XCVR_100BTX:
1656 case XL_XCVR_MII:
1657 /* Chosen by miibus */
1658 break;
1659 case XL_XCVR_100BFX:
1660 *media = IFM_ETHER|IFM_100_FX;
1661 break;
1662 default:
1663 if_printf(&sc->arpcom.ac_if, "unknown XCVR type: %d\n",
1664 sc->xl_xcvr);
1665 /*
1666 * This will probably be wrong, but it prevents
1667 * the ifmedia code from panicking.
1668 */
1669 *media = IFM_ETHER|IFM_10_T;
1670 break;
1671 }
1672
1673 XL_UNLOCK(sc);
1674 }
1675
1676 /*
1677 * Shutdown hardware and free up resources. This can be called any
1678 * time after the mutex has been initialized. It is called in both
1679 * the error case in attach and the normal detach case so it needs
1680 * to be careful about only freeing resources that have actually been
1681 * allocated.
1682 */
1683 static int
1684 xl_detach(device_t dev)
1685 {
1686 struct xl_softc *sc;
1687 struct ifnet *ifp;
1688 int rid, res;
1689
1690 sc = device_get_softc(dev);
1691 ifp = &sc->arpcom.ac_if;
1692
1693 KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
1694 XL_LOCK(sc);
1695
1696 if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1697 rid = XL_PCI_LOMEM;
1698 res = SYS_RES_MEMORY;
1699 } else {
1700 rid = XL_PCI_LOIO;
1701 res = SYS_RES_IOPORT;
1702 }
1703
1704 /* These should only be active if attach succeeded */
1705 if (device_is_attached(dev)) {
1706 xl_reset(sc);
1707 xl_stop(sc);
1708 ether_ifdetach(ifp);
1709 }
1710 if (sc->xl_miibus)
1711 device_delete_child(dev, sc->xl_miibus);
1712 bus_generic_detach(dev);
1713 ifmedia_removeall(&sc->ifmedia);
1714
1715 if (sc->xl_intrhand)
1716 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1717 if (sc->xl_irq)
1718 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1719 if (sc->xl_fres != NULL)
1720 bus_release_resource(dev, SYS_RES_MEMORY,
1721 XL_PCI_FUNCMEM, sc->xl_fres);
1722 if (sc->xl_res)
1723 bus_release_resource(dev, res, rid, sc->xl_res);
1724
1725 if (sc->xl_mtag) {
1726 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1727 bus_dma_tag_destroy(sc->xl_mtag);
1728 }
1729 if (sc->xl_ldata.xl_rx_tag) {
1730 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1731 sc->xl_ldata.xl_rx_dmamap);
1732 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1733 sc->xl_ldata.xl_rx_dmamap);
1734 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1735 }
1736 if (sc->xl_ldata.xl_tx_tag) {
1737 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1738 sc->xl_ldata.xl_tx_dmamap);
1739 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1740 sc->xl_ldata.xl_tx_dmamap);
1741 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1742 }
1743
1744 XL_UNLOCK(sc);
1745 mtx_destroy(&sc->xl_mtx);
1746
1747 return (0);
1748 }
1749
1750 /*
1751 * Initialize the transmit descriptors.
1752 */
1753 static int
1754 xl_list_tx_init(struct xl_softc *sc)
1755 {
1756 struct xl_chain_data *cd;
1757 struct xl_list_data *ld;
1758 int error, i;
1759
1760 XL_LOCK_ASSERT(sc);
1761
1762 cd = &sc->xl_cdata;
1763 ld = &sc->xl_ldata;
1764 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1765 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1766 error = bus_dmamap_create(sc->xl_mtag, 0,
1767 &cd->xl_tx_chain[i].xl_map);
1768 if (error)
1769 return (error);
1770 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1771 i * sizeof(struct xl_list);
1772 if (i == (XL_TX_LIST_CNT - 1))
1773 cd->xl_tx_chain[i].xl_next = NULL;
1774 else
1775 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1776 }
1777
1778 cd->xl_tx_free = &cd->xl_tx_chain[0];
1779 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1780
1781 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1782 return (0);
1783 }
1784
1785 /*
1786 * Initialize the transmit descriptors.
1787 */
1788 static int
1789 xl_list_tx_init_90xB(struct xl_softc *sc)
1790 {
1791 struct xl_chain_data *cd;
1792 struct xl_list_data *ld;
1793 int error, i;
1794
1795 XL_LOCK_ASSERT(sc);
1796
1797 cd = &sc->xl_cdata;
1798 ld = &sc->xl_ldata;
1799 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1800 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1801 error = bus_dmamap_create(sc->xl_mtag, 0,
1802 &cd->xl_tx_chain[i].xl_map);
1803 if (error)
1804 return (error);
1805 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1806 i * sizeof(struct xl_list);
1807 if (i == (XL_TX_LIST_CNT - 1))
1808 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1809 else
1810 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1811 if (i == 0)
1812 cd->xl_tx_chain[i].xl_prev =
1813 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1814 else
1815 cd->xl_tx_chain[i].xl_prev =
1816 &cd->xl_tx_chain[i - 1];
1817 }
1818
1819 bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1820 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1821
1822 cd->xl_tx_prod = 1;
1823 cd->xl_tx_cons = 1;
1824 cd->xl_tx_cnt = 0;
1825
1826 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1827 return (0);
1828 }
1829
1830 /*
1831 * Initialize the RX descriptors and allocate mbufs for them. Note that
1832 * we arrange the descriptors in a closed ring, so that the last descriptor
1833 * points back to the first.
1834 */
1835 static int
1836 xl_list_rx_init(struct xl_softc *sc)
1837 {
1838 struct xl_chain_data *cd;
1839 struct xl_list_data *ld;
1840 int error, i, next;
1841 u_int32_t nextptr;
1842
1843 XL_LOCK_ASSERT(sc);
1844
1845 cd = &sc->xl_cdata;
1846 ld = &sc->xl_ldata;
1847
1848 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1849 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1850 error = bus_dmamap_create(sc->xl_mtag, 0,
1851 &cd->xl_rx_chain[i].xl_map);
1852 if (error)
1853 return (error);
1854 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1855 if (error)
1856 return (error);
1857 if (i == (XL_RX_LIST_CNT - 1))
1858 next = 0;
1859 else
1860 next = i + 1;
1861 nextptr = ld->xl_rx_dmaaddr +
1862 next * sizeof(struct xl_list_onefrag);
1863 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1864 ld->xl_rx_list[i].xl_next = htole32(nextptr);
1865 }
1866
1867 bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1868 cd->xl_rx_head = &cd->xl_rx_chain[0];
1869
1870 return (0);
1871 }
1872
1873 /*
1874 * Initialize an RX descriptor and attach an MBUF cluster.
1875 * If we fail to do so, we need to leave the old mbuf and
1876 * the old DMA map untouched so that it can be reused.
1877 */
1878 static int
1879 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1880 {
1881 struct mbuf *m_new = NULL;
1882 bus_dmamap_t map;
1883 int error;
1884 u_int32_t baddr;
1885
1886 XL_LOCK_ASSERT(sc);
1887
1888 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1889 if (m_new == NULL)
1890 return (ENOBUFS);
1891
1892 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1893
1894 /* Force longword alignment for packet payload. */
1895 m_adj(m_new, ETHER_ALIGN);
1896
1897 error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
1898 xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
1899 if (error) {
1900 m_freem(m_new);
1901 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n",
1902 error);
1903 return (error);
1904 }
1905
1906 bus_dmamap_unload(sc->xl_mtag, c->xl_map);
1907 map = c->xl_map;
1908 c->xl_map = sc->xl_tmpmap;
1909 sc->xl_tmpmap = map;
1910 c->xl_mbuf = m_new;
1911 c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
1912 c->xl_ptr->xl_status = 0;
1913 c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
1914 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
1915 return (0);
1916 }
1917
1918 static int
1919 xl_rx_resync(struct xl_softc *sc)
1920 {
1921 struct xl_chain_onefrag *pos;
1922 int i;
1923
1924 XL_LOCK_ASSERT(sc);
1925
1926 pos = sc->xl_cdata.xl_rx_head;
1927
1928 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1929 if (pos->xl_ptr->xl_status)
1930 break;
1931 pos = pos->xl_next;
1932 }
1933
1934 if (i == XL_RX_LIST_CNT)
1935 return (0);
1936
1937 sc->xl_cdata.xl_rx_head = pos;
1938
1939 return (EAGAIN);
1940 }
1941
1942 /*
1943 * A frame has been uploaded: pass the resulting mbuf chain up to
1944 * the higher level protocols.
1945 */
1946 static void
1947 xl_rxeof(struct xl_softc *sc)
1948 {
1949 struct mbuf *m;
1950 struct ifnet *ifp = &sc->arpcom.ac_if;
1951 struct xl_chain_onefrag *cur_rx;
1952 int total_len = 0;
1953 u_int32_t rxstat;
1954
1955 XL_LOCK_ASSERT(sc);
1956 again:
1957 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
1958 BUS_DMASYNC_POSTREAD);
1959 while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
1960 cur_rx = sc->xl_cdata.xl_rx_head;
1961 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1962 total_len = rxstat & XL_RXSTAT_LENMASK;
1963
1964 /*
1965 * Since we have told the chip to allow large frames,
1966 * we need to trap giant frame errors in software. We allow
1967 * a little more than the normal frame size to account for
1968 * frames with VLAN tags.
1969 */
1970 if (total_len > XL_MAX_FRAMELEN)
1971 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1972
1973 /*
1974 * If an error occurs, update stats, clear the
1975 * status word and leave the mbuf cluster in place:
1976 * it should simply get re-used next time this descriptor
1977 * comes up in the ring.
1978 */
1979 if (rxstat & XL_RXSTAT_UP_ERROR) {
1980 ifp->if_ierrors++;
1981 cur_rx->xl_ptr->xl_status = 0;
1982 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1983 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1984 continue;
1985 }
1986
1987 /*
1988 * If the error bit was not set, the upload complete
1989 * bit should be set which means we have a valid packet.
1990 * If not, something truly strange has happened.
1991 */
1992 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1993 if_printf(ifp,
1994 "bad receive status -- packet dropped\n");
1995 ifp->if_ierrors++;
1996 cur_rx->xl_ptr->xl_status = 0;
1997 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1998 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1999 continue;
2000 }
2001
2002 /* No errors; receive the packet. */
2003 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
2004 BUS_DMASYNC_POSTREAD);
2005 m = cur_rx->xl_mbuf;
2006
2007 /*
2008 * Try to conjure up a new mbuf cluster. If that
2009 * fails, it means we have an out of memory condition and
2010 * should leave the buffer in place and continue. This will
2011 * result in a lost packet, but there's little else we
2012 * can do in this situation.
2013 */
2014 if (xl_newbuf(sc, cur_rx)) {
2015 ifp->if_ierrors++;
2016 cur_rx->xl_ptr->xl_status = 0;
2017 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2018 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2019 continue;
2020 }
2021 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2022 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2023
2024 ifp->if_ipackets++;
2025 m->m_pkthdr.rcvif = ifp;
2026 m->m_pkthdr.len = m->m_len = total_len;
2027
2028 if (ifp->if_capenable & IFCAP_RXCSUM) {
2029 /* Do IP checksum checking. */
2030 if (rxstat & XL_RXSTAT_IPCKOK)
2031 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2032 if (!(rxstat & XL_RXSTAT_IPCKERR))
2033 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2034 if ((rxstat & XL_RXSTAT_TCPCOK &&
2035 !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2036 (rxstat & XL_RXSTAT_UDPCKOK &&
2037 !(rxstat & XL_RXSTAT_UDPCKERR))) {
2038 m->m_pkthdr.csum_flags |=
2039 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2040 m->m_pkthdr.csum_data = 0xffff;
2041 }
2042 }
2043
2044 XL_UNLOCK(sc);
2045 (*ifp->if_input)(ifp, m);
2046 XL_LOCK(sc);
2047 }
2048
2049 /*
2050 * Handle the 'end of channel' condition. When the upload
2051 * engine hits the end of the RX ring, it will stall. This
2052 * is our cue to flush the RX ring, reload the uplist pointer
2053 * register and unstall the engine.
2054 * XXX This is actually a little goofy. With the ThunderLAN
2055 * chip, you get an interrupt when the receiver hits the end
2056 * of the receive ring, which tells you exactly when you
2057 * you need to reload the ring pointer. Here we have to
2058 * fake it. I'm mad at myself for not being clever enough
2059 * to avoid the use of a goto here.
2060 */
2061 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2062 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2063 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2064 xl_wait(sc);
2065 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2066 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2067 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2068 goto again;
2069 }
2070 }
2071
2072 /*
2073 * A frame was downloaded to the chip. It's safe for us to clean up
2074 * the list buffers.
2075 */
2076 static void
2077 xl_txeof(struct xl_softc *sc)
2078 {
2079 struct xl_chain *cur_tx;
2080 struct ifnet *ifp = &sc->arpcom.ac_if;
2081
2082 XL_LOCK_ASSERT(sc);
2083
2084 /* Clear the timeout timer. */
2085 ifp->if_timer = 0;
2086
2087 /*
2088 * Go through our tx list and free mbufs for those
2089 * frames that have been uploaded. Note: the 3c905B
2090 * sets a special bit in the status word to let us
2091 * know that a frame has been downloaded, but the
2092 * original 3c900/3c905 adapters don't do that.
2093 * Consequently, we have to use a different test if
2094 * xl_type != XL_TYPE_905B.
2095 */
2096 while (sc->xl_cdata.xl_tx_head != NULL) {
2097 cur_tx = sc->xl_cdata.xl_tx_head;
2098
2099 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2100 break;
2101
2102 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2103 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2104 BUS_DMASYNC_POSTWRITE);
2105 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2106 m_freem(cur_tx->xl_mbuf);
2107 cur_tx->xl_mbuf = NULL;
2108 ifp->if_opackets++;
2109
2110 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2111 sc->xl_cdata.xl_tx_free = cur_tx;
2112 }
2113
2114 if (sc->xl_cdata.xl_tx_head == NULL) {
2115 ifp->if_flags &= ~IFF_OACTIVE;
2116 sc->xl_cdata.xl_tx_tail = NULL;
2117 } else {
2118 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2119 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2120 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2121 sc->xl_cdata.xl_tx_head->xl_phys);
2122 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2123 }
2124 }
2125 }
2126
2127 static void
2128 xl_txeof_90xB(struct xl_softc *sc)
2129 {
2130 struct xl_chain *cur_tx = NULL;
2131 struct ifnet *ifp = &sc->arpcom.ac_if;
2132 int idx;
2133
2134 XL_LOCK_ASSERT(sc);
2135
2136 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2137 BUS_DMASYNC_POSTREAD);
2138 idx = sc->xl_cdata.xl_tx_cons;
2139 while (idx != sc->xl_cdata.xl_tx_prod) {
2140
2141 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2142
2143 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2144 XL_TXSTAT_DL_COMPLETE))
2145 break;
2146
2147 if (cur_tx->xl_mbuf != NULL) {
2148 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2149 BUS_DMASYNC_POSTWRITE);
2150 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2151 m_freem(cur_tx->xl_mbuf);
2152 cur_tx->xl_mbuf = NULL;
2153 }
2154
2155 ifp->if_opackets++;
2156
2157 sc->xl_cdata.xl_tx_cnt--;
2158 XL_INC(idx, XL_TX_LIST_CNT);
2159 ifp->if_timer = 0;
2160 }
2161
2162 sc->xl_cdata.xl_tx_cons = idx;
2163
2164 if (cur_tx != NULL)
2165 ifp->if_flags &= ~IFF_OACTIVE;
2166 }
2167
2168 /*
2169 * TX 'end of channel' interrupt handler. Actually, we should
2170 * only get a 'TX complete' interrupt if there's a transmit error,
2171 * so this is really TX error handler.
2172 */
2173 static void
2174 xl_txeoc(struct xl_softc *sc)
2175 {
2176 u_int8_t txstat;
2177
2178 XL_LOCK_ASSERT(sc);
2179
2180 while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2181 if (txstat & XL_TXSTATUS_UNDERRUN ||
2182 txstat & XL_TXSTATUS_JABBER ||
2183 txstat & XL_TXSTATUS_RECLAIM) {
2184 if_printf(&sc->arpcom.ac_if,
2185 "transmission error: %x\n", txstat);
2186 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2187 xl_wait(sc);
2188 if (sc->xl_type == XL_TYPE_905B) {
2189 if (sc->xl_cdata.xl_tx_cnt) {
2190 int i;
2191 struct xl_chain *c;
2192
2193 i = sc->xl_cdata.xl_tx_cons;
2194 c = &sc->xl_cdata.xl_tx_chain[i];
2195 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2196 c->xl_phys);
2197 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2198 }
2199 } else {
2200 if (sc->xl_cdata.xl_tx_head != NULL)
2201 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2202 sc->xl_cdata.xl_tx_head->xl_phys);
2203 }
2204 /*
2205 * Remember to set this for the
2206 * first generation 3c90X chips.
2207 */
2208 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2209 if (txstat & XL_TXSTATUS_UNDERRUN &&
2210 sc->xl_tx_thresh < XL_PACKET_SIZE) {
2211 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2212 if_printf(&sc->arpcom.ac_if,
2213 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
2214 }
2215 CSR_WRITE_2(sc, XL_COMMAND,
2216 XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2217 if (sc->xl_type == XL_TYPE_905B) {
2218 CSR_WRITE_2(sc, XL_COMMAND,
2219 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2220 }
2221 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2222 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2223 } else {
2224 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2225 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2226 }
2227 /*
2228 * Write an arbitrary byte to the TX_STATUS register
2229 * to clear this interrupt/error and advance to the next.
2230 */
2231 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2232 }
2233 }
2234
2235 static void
2236 xl_intr(void *arg)
2237 {
2238 struct xl_softc *sc = arg;
2239 struct ifnet *ifp = &sc->arpcom.ac_if;
2240 u_int16_t status;
2241
2242 XL_LOCK(sc);
2243
2244 while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
2245 status != 0xFFFF) {
2246 CSR_WRITE_2(sc, XL_COMMAND,
2247 XL_CMD_INTR_ACK|(status & XL_INTRS));
2248
2249 if (status & XL_STAT_UP_COMPLETE) {
2250 int curpkts;
2251
2252 curpkts = ifp->if_ipackets;
2253 xl_rxeof(sc);
2254 if (curpkts == ifp->if_ipackets) {
2255 while (xl_rx_resync(sc))
2256 xl_rxeof(sc);
2257 }
2258 }
2259
2260 if (status & XL_STAT_DOWN_COMPLETE) {
2261 if (sc->xl_type == XL_TYPE_905B)
2262 xl_txeof_90xB(sc);
2263 else
2264 xl_txeof(sc);
2265 }
2266
2267 if (status & XL_STAT_TX_COMPLETE) {
2268 ifp->if_oerrors++;
2269 xl_txeoc(sc);
2270 }
2271
2272 if (status & XL_STAT_ADFAIL) {
2273 xl_reset(sc);
2274 xl_init_locked(sc);
2275 }
2276
2277 if (status & XL_STAT_STATSOFLOW) {
2278 sc->xl_stats_no_timeout = 1;
2279 xl_stats_update_locked(sc);
2280 sc->xl_stats_no_timeout = 0;
2281 }
2282 }
2283
2284 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2285 if (sc->xl_type == XL_TYPE_905B)
2286 xl_start_90xB_locked(ifp);
2287 else
2288 xl_start_locked(ifp);
2289 }
2290
2291 XL_UNLOCK(sc);
2292 }
2293
2294 /*
2295 * XXX: This is an entry point for callout which needs to take the lock.
2296 */
2297 static void
2298 xl_stats_update(void *xsc)
2299 {
2300 struct xl_softc *sc = xsc;
2301
2302 XL_LOCK(sc);
2303 xl_stats_update_locked(sc);
2304 XL_UNLOCK(sc);
2305 }
2306
2307 static void
2308 xl_stats_update_locked(struct xl_softc *sc)
2309 {
2310 struct ifnet *ifp = &sc->arpcom.ac_if;
2311 struct xl_stats xl_stats;
2312 u_int8_t *p;
2313 int i;
2314 struct mii_data *mii = NULL;
2315
2316 XL_LOCK_ASSERT(sc);
2317
2318 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2319
2320 if (sc->xl_miibus != NULL)
2321 mii = device_get_softc(sc->xl_miibus);
2322
2323 p = (u_int8_t *)&xl_stats;
2324
2325 /* Read all the stats registers. */
2326 XL_SEL_WIN(6);
2327
2328 for (i = 0; i < 16; i++)
2329 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2330
2331 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2332
2333 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2334 xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
2335
2336 /*
2337 * Boomerang and cyclone chips have an extra stats counter
2338 * in window 4 (BadSSD). We have to read this too in order
2339 * to clear out all the stats registers and avoid a statsoflow
2340 * interrupt.
2341 */
2342 XL_SEL_WIN(4);
2343 CSR_READ_1(sc, XL_W4_BADSSD);
2344
2345 if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2346 mii_tick(mii);
2347
2348 XL_SEL_WIN(7);
2349
2350 if (!sc->xl_stats_no_timeout)
2351 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2352 }
2353
2354 /*
2355 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2356 * pointers to the fragment pointers.
2357 */
2358 static int
2359 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
2360 {
2361 int error;
2362 u_int32_t status;
2363 struct ifnet *ifp = &sc->arpcom.ac_if;
2364
2365 XL_LOCK_ASSERT(sc);
2366
2367 /*
2368 * Start packing the mbufs in this chain into
2369 * the fragment pointers. Stop when we run out
2370 * of fragments or hit the end of the mbuf chain.
2371 */
2372 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
2373 xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2374
2375 if (error && error != EFBIG) {
2376 m_freem(m_head);
2377 if_printf(ifp, "can't map mbuf (error %d)\n", error);
2378 return (1);
2379 }
2380
2381 /*
2382 * Handle special case: we used up all 63 fragments,
2383 * but we have more mbufs left in the chain. Copy the
2384 * data into an mbuf cluster. Note that we don't
2385 * bother clearing the values in the other fragment
2386 * pointers/counters; it wouldn't gain us anything,
2387 * and would waste cycles.
2388 */
2389 if (error) {
2390 struct mbuf *m_new;
2391
2392 m_new = m_defrag(m_head, M_DONTWAIT);
2393 if (m_new == NULL) {
2394 m_freem(m_head);
2395 return (1);
2396 } else {
2397 m_head = m_new;
2398 }
2399
2400 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
2401 m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2402 if (error) {
2403 m_freem(m_head);
2404 if_printf(ifp, "can't map mbuf (error %d)\n", error);
2405 return (1);
2406 }
2407 }
2408
2409 if (sc->xl_type == XL_TYPE_905B) {
2410 status = XL_TXSTAT_RND_DEFEAT;
2411
2412 #ifndef XL905B_TXCSUM_BROKEN
2413 if (m_head->m_pkthdr.csum_flags) {
2414 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2415 status |= XL_TXSTAT_IPCKSUM;
2416 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2417 status |= XL_TXSTAT_TCPCKSUM;
2418 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2419 status |= XL_TXSTAT_UDPCKSUM;
2420 }
2421 #endif
2422 c->xl_ptr->xl_status = htole32(status);
2423 }
2424
2425 c->xl_mbuf = m_head;
2426 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2427 return (0);
2428 }
2429
2430 /*
2431 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2432 * to the mbuf data regions directly in the transmit lists. We also save a
2433 * copy of the pointers since the transmit list fragment pointers are
2434 * physical addresses.
2435 */
2436
2437 static void
2438 xl_start(struct ifnet *ifp)
2439 {
2440 struct xl_softc *sc = ifp->if_softc;
2441
2442 XL_LOCK(sc);
2443
2444 if (sc->xl_type == XL_TYPE_905B)
2445 xl_start_90xB_locked(ifp);
2446 else
2447 xl_start_locked(ifp);
2448
2449 XL_UNLOCK(sc);
2450 }
2451
2452 static void
2453 xl_start_locked(struct ifnet *ifp)
2454 {
2455 struct xl_softc *sc = ifp->if_softc;
2456 struct mbuf *m_head = NULL;
2457 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2458 struct xl_chain *prev_tx;
2459 u_int32_t status;
2460 int error;
2461
2462 XL_LOCK_ASSERT(sc);
2463
2464 /*
2465 * Check for an available queue slot. If there are none,
2466 * punt.
2467 */
2468 if (sc->xl_cdata.xl_tx_free == NULL) {
2469 xl_txeoc(sc);
2470 xl_txeof(sc);
2471 if (sc->xl_cdata.xl_tx_free == NULL) {
2472 ifp->if_flags |= IFF_OACTIVE;
2473 return;
2474 }
2475 }
2476
2477 start_tx = sc->xl_cdata.xl_tx_free;
2478
2479 while (sc->xl_cdata.xl_tx_free != NULL) {
2480 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2481 if (m_head == NULL)
2482 break;
2483
2484 /* Pick a descriptor off the free list. */
2485 prev_tx = cur_tx;
2486 cur_tx = sc->xl_cdata.xl_tx_free;
2487
2488 /* Pack the data into the descriptor. */
2489 error = xl_encap(sc, cur_tx, m_head);
2490 if (error) {
2491 cur_tx = prev_tx;
2492 continue;
2493 }
2494
2495 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2496 cur_tx->xl_next = NULL;
2497
2498 /* Chain it together. */
2499 if (prev != NULL) {
2500 prev->xl_next = cur_tx;
2501 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2502 }
2503 prev = cur_tx;
2504
2505 /*
2506 * If there's a BPF listener, bounce a copy of this frame
2507 * to him.
2508 */
2509 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2510 }
2511
2512 /*
2513 * If there are no packets queued, bail.
2514 */
2515 if (cur_tx == NULL)
2516 return;
2517
2518 /*
2519 * Place the request for the upload interrupt
2520 * in the last descriptor in the chain. This way, if
2521 * we're chaining several packets at once, we'll only
2522 * get an interupt once for the whole chain rather than
2523 * once for each packet.
2524 */
2525 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2526 XL_TXSTAT_DL_INTR);
2527 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2528 BUS_DMASYNC_PREWRITE);
2529
2530 /*
2531 * Queue the packets. If the TX channel is clear, update
2532 * the downlist pointer register.
2533 */
2534 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2535 xl_wait(sc);
2536
2537 if (sc->xl_cdata.xl_tx_head != NULL) {
2538 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2539 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2540 htole32(start_tx->xl_phys);
2541 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2542 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2543 htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2544 sc->xl_cdata.xl_tx_tail = cur_tx;
2545 } else {
2546 sc->xl_cdata.xl_tx_head = start_tx;
2547 sc->xl_cdata.xl_tx_tail = cur_tx;
2548 }
2549 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2550 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2551
2552 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2553
2554 XL_SEL_WIN(7);
2555
2556 /*
2557 * Set a timeout in case the chip goes out to lunch.
2558 */
2559 ifp->if_timer = 5;
2560
2561 /*
2562 * XXX Under certain conditions, usually on slower machines
2563 * where interrupts may be dropped, it's possible for the
2564 * adapter to chew up all the buffers in the receive ring
2565 * and stall, without us being able to do anything about it.
2566 * To guard against this, we need to make a pass over the
2567 * RX queue to make sure there aren't any packets pending.
2568 * Doing it here means we can flush the receive ring at the
2569 * same time the chip is DMAing the transmit descriptors we
2570 * just gave it.
2571 *
2572 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2573 * nature of their chips in all their marketing literature;
2574 * we may as well take advantage of it. :)
2575 */
2576 xl_rxeof(sc);
2577 }
2578
2579 static void
2580 xl_start_90xB_locked(struct ifnet *ifp)
2581 {
2582 struct xl_softc *sc = ifp->if_softc;
2583 struct mbuf *m_head = NULL;
2584 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2585 struct xl_chain *prev_tx;
2586 int error, idx;
2587
2588 XL_LOCK_ASSERT(sc);
2589
2590 if (ifp->if_flags & IFF_OACTIVE)
2591 return;
2592
2593 idx = sc->xl_cdata.xl_tx_prod;
2594 start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2595
2596 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2597
2598 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2599 ifp->if_flags |= IFF_OACTIVE;
2600 break;
2601 }
2602
2603 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2604 if (m_head == NULL)
2605 break;
2606
2607 prev_tx = cur_tx;
2608 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2609
2610 /* Pack the data into the descriptor. */
2611 error = xl_encap(sc, cur_tx, m_head);
2612 if (error) {
2613 cur_tx = prev_tx;
2614 continue;
2615 }
2616
2617 /* Chain it together. */
2618 if (prev != NULL)
2619 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2620 prev = cur_tx;
2621
2622 /*
2623 * If there's a BPF listener, bounce a copy of this frame
2624 * to him.
2625 */
2626 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2627
2628 XL_INC(idx, XL_TX_LIST_CNT);
2629 sc->xl_cdata.xl_tx_cnt++;
2630 }
2631
2632 /*
2633 * If there are no packets queued, bail.
2634 */
2635 if (cur_tx == NULL)
2636 return;
2637
2638 /*
2639 * Place the request for the upload interrupt
2640 * in the last descriptor in the chain. This way, if
2641 * we're chaining several packets at once, we'll only
2642 * get an interupt once for the whole chain rather than
2643 * once for each packet.
2644 */
2645 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2646 XL_TXSTAT_DL_INTR);
2647 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2648 BUS_DMASYNC_PREWRITE);
2649
2650 /* Start transmission */
2651 sc->xl_cdata.xl_tx_prod = idx;
2652 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2653
2654 /*
2655 * Set a timeout in case the chip goes out to lunch.
2656 */
2657 ifp->if_timer = 5;
2658 }
2659
2660 static void
2661 xl_init(void *xsc)
2662 {
2663 struct xl_softc *sc = xsc;
2664
2665 XL_LOCK(sc);
2666 xl_init_locked(sc);
2667 XL_UNLOCK(sc);
2668 }
2669
2670 static void
2671 xl_init_locked(struct xl_softc *sc)
2672 {
2673 struct ifnet *ifp = &sc->arpcom.ac_if;
2674 int error, i;
2675 u_int16_t rxfilt = 0;
2676 struct mii_data *mii = NULL;
2677
2678 XL_LOCK_ASSERT(sc);
2679
2680 /*
2681 * Cancel pending I/O and free all RX/TX buffers.
2682 */
2683 xl_stop(sc);
2684
2685 if (sc->xl_miibus == NULL) {
2686 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2687 xl_wait(sc);
2688 }
2689 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2690 xl_wait(sc);
2691 DELAY(10000);
2692
2693 if (sc->xl_miibus != NULL)
2694 mii = device_get_softc(sc->xl_miibus);
2695
2696 /* Init our MAC address */
2697 XL_SEL_WIN(2);
2698 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2699 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2700 sc->arpcom.ac_enaddr[i]);
2701 }
2702
2703 /* Clear the station mask. */
2704 for (i = 0; i < 3; i++)
2705 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2706 #ifdef notdef
2707 /* Reset TX and RX. */
2708 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2709 xl_wait(sc);
2710 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2711 xl_wait(sc);
2712 #endif
2713 /* Init circular RX list. */
2714 error = xl_list_rx_init(sc);
2715 if (error) {
2716 if_printf(ifp, "initialization of the rx ring failed (%d)\n",
2717 error);
2718 xl_stop(sc);
2719 return;
2720 }
2721
2722 /* Init TX descriptors. */
2723 if (sc->xl_type == XL_TYPE_905B)
2724 error = xl_list_tx_init_90xB(sc);
2725 else
2726 error = xl_list_tx_init(sc);
2727 if (error) {
2728 if_printf(ifp, "initialization of the tx ring failed (%d)\n",
2729 error);
2730 xl_stop(sc);
2731 return;
2732 }
2733
2734 /*
2735 * Set the TX freethresh value.
2736 * Note that this has no effect on 3c905B "cyclone"
2737 * cards but is required for 3c900/3c905 "boomerang"
2738 * cards in order to enable the download engine.
2739 */
2740 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2741
2742 /* Set the TX start threshold for best performance. */
2743 sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2744 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2745
2746 /*
2747 * If this is a 3c905B, also set the tx reclaim threshold.
2748 * This helps cut down on the number of tx reclaim errors
2749 * that could happen on a busy network. The chip multiplies
2750 * the register value by 16 to obtain the actual threshold
2751 * in bytes, so we divide by 16 when setting the value here.
2752 * The existing threshold value can be examined by reading
2753 * the register at offset 9 in window 5.
2754 */
2755 if (sc->xl_type == XL_TYPE_905B) {
2756 CSR_WRITE_2(sc, XL_COMMAND,
2757 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2758 }
2759
2760 /* Set RX filter bits. */
2761 XL_SEL_WIN(5);
2762 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2763
2764 /* Set the individual bit to receive frames for this host only. */
2765 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2766
2767 /* If we want promiscuous mode, set the allframes bit. */
2768 if (ifp->if_flags & IFF_PROMISC) {
2769 rxfilt |= XL_RXFILTER_ALLFRAMES;
2770 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2771 } else {
2772 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2773 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2774 }
2775
2776 /*
2777 * Set capture broadcast bit to capture broadcast frames.
2778 */
2779 if (ifp->if_flags & IFF_BROADCAST) {
2780 rxfilt |= XL_RXFILTER_BROADCAST;
2781 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2782 } else {
2783 rxfilt &= ~XL_RXFILTER_BROADCAST;
2784 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2785 }
2786
2787 /*
2788 * Program the multicast filter, if necessary.
2789 */
2790 if (sc->xl_type == XL_TYPE_905B)
2791 xl_setmulti_hash(sc);
2792 else
2793 xl_setmulti(sc);
2794
2795 /*
2796 * Load the address of the RX list. We have to
2797 * stall the upload engine before we can manipulate
2798 * the uplist pointer register, then unstall it when
2799 * we're finished. We also have to wait for the
2800 * stall command to complete before proceeding.
2801 * Note that we have to do this after any RX resets
2802 * have completed since the uplist register is cleared
2803 * by a reset.
2804 */
2805 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2806 xl_wait(sc);
2807 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2808 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2809 xl_wait(sc);
2810
2811 if (sc->xl_type == XL_TYPE_905B) {
2812 /* Set polling interval */
2813 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2814 /* Load the address of the TX list */
2815 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2816 xl_wait(sc);
2817 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2818 sc->xl_cdata.xl_tx_chain[0].xl_phys);
2819 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2820 xl_wait(sc);
2821 }
2822
2823 /*
2824 * If the coax transceiver is on, make sure to enable
2825 * the DC-DC converter.
2826 */
2827 XL_SEL_WIN(3);
2828 if (sc->xl_xcvr == XL_XCVR_COAX)
2829 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2830 else
2831 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2832
2833 /*
2834 * increase packet size to allow reception of 802.1q or ISL packets.
2835 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2836 * control register. For 3c90xB/C chips, use the RX packet size
2837 * register.
2838 */
2839
2840 if (sc->xl_type == XL_TYPE_905B)
2841 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2842 else {
2843 u_int8_t macctl;
2844 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2845 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2846 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2847 }
2848
2849 /* Clear out the stats counters. */
2850 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2851 sc->xl_stats_no_timeout = 1;
2852 xl_stats_update_locked(sc);
2853 sc->xl_stats_no_timeout = 0;
2854 XL_SEL_WIN(4);
2855 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2856 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2857
2858 /*
2859 * Enable interrupts.
2860 */
2861 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2862 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2863 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2864 if (sc->xl_flags & XL_FLAG_FUNCREG)
2865 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2866
2867 /* Set the RX early threshold */
2868 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2869 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2870
2871 /* Enable receiver and transmitter. */
2872 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2873 xl_wait(sc);
2874 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2875 xl_wait(sc);
2876
2877 /* XXX Downcall to miibus. */
2878 if (mii != NULL)
2879 mii_mediachg(mii);
2880
2881 /* Select window 7 for normal operations. */
2882 XL_SEL_WIN(7);
2883
2884 ifp->if_flags |= IFF_RUNNING;
2885 ifp->if_flags &= ~IFF_OACTIVE;
2886
2887 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2888 }
2889
2890 /*
2891 * Set media options.
2892 */
2893 static int
2894 xl_ifmedia_upd(struct ifnet *ifp)
2895 {
2896 struct xl_softc *sc = ifp->if_softc;
2897 struct ifmedia *ifm = NULL;
2898 struct mii_data *mii = NULL;
2899
2900 /*XL_LOCK_ASSERT(sc);*/
2901
2902 if (sc->xl_miibus != NULL)
2903 mii = device_get_softc(sc->xl_miibus);
2904 if (mii == NULL)
2905 ifm = &sc->ifmedia;
2906 else
2907 ifm = &mii->mii_media;
2908
2909 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2910 case IFM_100_FX:
2911 case IFM_10_FL:
2912 case IFM_10_2:
2913 case IFM_10_5:
2914 xl_setmode(sc, ifm->ifm_media);
2915 return (0);
2916 break;
2917 default:
2918 break;
2919 }
2920
2921 if (sc->xl_media & XL_MEDIAOPT_MII ||
2922 sc->xl_media & XL_MEDIAOPT_BTX ||
2923 sc->xl_media & XL_MEDIAOPT_BT4) {
2924 xl_init(sc); /* XXX */
2925 } else {
2926 xl_setmode(sc, ifm->ifm_media);
2927 }
2928
2929 return (0);
2930 }
2931
2932 /*
2933 * Report current media status.
2934 */
2935 static void
2936 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2937 {
2938 struct xl_softc *sc = ifp->if_softc;
2939 u_int32_t icfg;
2940 u_int16_t status = 0;
2941 struct mii_data *mii = NULL;
2942
2943 /*XL_LOCK_ASSERT(sc);*/
2944
2945 if (sc->xl_miibus != NULL)
2946 mii = device_get_softc(sc->xl_miibus);
2947
2948 XL_SEL_WIN(4);
2949 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2950
2951 XL_SEL_WIN(3);
2952 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2953 icfg >>= XL_ICFG_CONNECTOR_BITS;
2954
2955 ifmr->ifm_active = IFM_ETHER;
2956 ifmr->ifm_status = IFM_AVALID;
2957
2958 if ((status & XL_MEDIASTAT_CARRIER) == 0)
2959 ifmr->ifm_status |= IFM_ACTIVE;
2960
2961 switch (icfg) {
2962 case XL_XCVR_10BT:
2963 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2964 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2965 ifmr->ifm_active |= IFM_FDX;
2966 else
2967 ifmr->ifm_active |= IFM_HDX;
2968 break;
2969 case XL_XCVR_AUI:
2970 if (sc->xl_type == XL_TYPE_905B &&
2971 sc->xl_media == XL_MEDIAOPT_10FL) {
2972 ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2973 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2974 ifmr->ifm_active |= IFM_FDX;
2975 else
2976 ifmr->ifm_active |= IFM_HDX;
2977 } else
2978 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2979 break;
2980 case XL_XCVR_COAX:
2981 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2982 break;
2983 /*
2984 * XXX MII and BTX/AUTO should be separate cases.
2985 */
2986
2987 case XL_XCVR_100BTX:
2988 case XL_XCVR_AUTO:
2989 case XL_XCVR_MII:
2990 if (mii != NULL) {
2991 mii_pollstat(mii);
2992 ifmr->ifm_active = mii->mii_media_active;
2993 ifmr->ifm_status = mii->mii_media_status;
2994 }
2995 break;
2996 case XL_XCVR_100BFX:
2997 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2998 break;
2999 default:
3000 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
3001 break;
3002 }
3003 }
3004
3005 static int
3006 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3007 {
3008 struct xl_softc *sc = ifp->if_softc;
3009 struct ifreq *ifr = (struct ifreq *) data;
3010 int error = 0;
3011 struct mii_data *mii = NULL;
3012 u_int8_t rxfilt;
3013
3014 switch (command) {
3015 case SIOCSIFFLAGS:
3016 XL_LOCK(sc);
3017
3018 XL_SEL_WIN(5);
3019 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3020 if (ifp->if_flags & IFF_UP) {
3021 if (ifp->if_flags & IFF_RUNNING &&
3022 ifp->if_flags & IFF_PROMISC &&
3023 !(sc->xl_if_flags & IFF_PROMISC)) {
3024 rxfilt |= XL_RXFILTER_ALLFRAMES;
3025 CSR_WRITE_2(sc, XL_COMMAND,
3026 XL_CMD_RX_SET_FILT|rxfilt);
3027 XL_SEL_WIN(7);
3028 } else if (ifp->if_flags & IFF_RUNNING &&
3029 !(ifp->if_flags & IFF_PROMISC) &&
3030 sc->xl_if_flags & IFF_PROMISC) {
3031 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3032 CSR_WRITE_2(sc, XL_COMMAND,
3033 XL_CMD_RX_SET_FILT|rxfilt);
3034 XL_SEL_WIN(7);
3035 } else {
3036 if ((ifp->if_flags & IFF_RUNNING) == 0)
3037 xl_init_locked(sc);
3038 }
3039 } else {
3040 if (ifp->if_flags & IFF_RUNNING)
3041 xl_stop(sc);
3042 }
3043 sc->xl_if_flags = ifp->if_flags;
3044 XL_UNLOCK(sc);
3045 error = 0;
3046 break;
3047 case SIOCADDMULTI:
3048 case SIOCDELMULTI:
3049 /* XXX Downcall from if_addmulti() possibly with locks held. */
3050 XL_LOCK(sc);
3051 if (sc->xl_type == XL_TYPE_905B)
3052 xl_setmulti_hash(sc);
3053 else
3054 xl_setmulti(sc);
3055 XL_UNLOCK(sc);
3056 error = 0;
3057 break;
3058 case SIOCGIFMEDIA:
3059 case SIOCSIFMEDIA:
3060 /* XXX Downcall from ifmedia possibly with locks held. */
3061 /*XL_LOCK(sc);*/
3062 if (sc->xl_miibus != NULL)
3063 mii = device_get_softc(sc->xl_miibus);
3064 if (mii == NULL)
3065 error = ifmedia_ioctl(ifp, ifr,
3066 &sc->ifmedia, command);
3067 else
3068 error = ifmedia_ioctl(ifp, ifr,
3069 &mii->mii_media, command);
3070 /*XL_UNLOCK(sc);*/
3071 break;
3072 case SIOCSIFCAP:
3073 XL_LOCK(sc);
3074 ifp->if_capenable = ifr->ifr_reqcap;
3075 if (ifp->if_capenable & IFCAP_TXCSUM)
3076 ifp->if_hwassist = XL905B_CSUM_FEATURES;
3077 else
3078 ifp->if_hwassist = 0;
3079 XL_UNLOCK(sc);
3080 break;
3081 default:
3082 error = ether_ioctl(ifp, command, data);
3083 break;
3084 }
3085
3086 return (error);
3087 }
3088
3089 /*
3090 * XXX: Invoked from ifnet slow timer. Lock coverage needed.
3091 */
3092 static void
3093 xl_watchdog(struct ifnet *ifp)
3094 {
3095 struct xl_softc *sc = ifp->if_softc;
3096 u_int16_t status = 0;
3097
3098 XL_LOCK(sc);
3099
3100 ifp->if_oerrors++;
3101 XL_SEL_WIN(4);
3102 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3103 if_printf(ifp, "watchdog timeout\n");
3104
3105 if (status & XL_MEDIASTAT_CARRIER)
3106 if_printf(ifp, "no carrier - transceiver cable problem?\n");
3107
3108 xl_txeoc(sc);
3109 xl_txeof(sc);
3110 xl_rxeof(sc);
3111 xl_reset(sc);
3112 xl_init_locked(sc);
3113
3114 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
3115 if (sc->xl_type == XL_TYPE_905B)
3116 xl_start_90xB_locked(ifp);
3117 else
3118 xl_start_locked(ifp);
3119 }
3120
3121 XL_UNLOCK(sc);
3122 }
3123
3124 /*
3125 * Stop the adapter and free any mbufs allocated to the
3126 * RX and TX lists.
3127 */
3128 static void
3129 xl_stop(struct xl_softc *sc)
3130 {
3131 register int i;
3132 struct ifnet *ifp = &sc->arpcom.ac_if;
3133
3134 XL_LOCK_ASSERT(sc);
3135
3136 ifp->if_timer = 0;
3137
3138 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3139 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3140 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3141 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3142 xl_wait(sc);
3143 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3144 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3145 DELAY(800);
3146
3147 #ifdef foo
3148 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3149 xl_wait(sc);
3150 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3151 xl_wait(sc);
3152 #endif
3153
3154 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3155 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3156 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3157 if (sc->xl_flags & XL_FLAG_FUNCREG)
3158 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3159
3160 /* Stop the stats updater. */
3161 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
3162
3163 /*
3164 * Free data in the RX lists.
3165 */
3166 for (i = 0; i < XL_RX_LIST_CNT; i++) {
3167 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3168 bus_dmamap_unload(sc->xl_mtag,
3169 sc->xl_cdata.xl_rx_chain[i].xl_map);
3170 bus_dmamap_destroy(sc->xl_mtag,
3171 sc->xl_cdata.xl_rx_chain[i].xl_map);
3172 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3173 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3174 }
3175 }
3176 if (sc->xl_ldata.xl_rx_list != NULL)
3177 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3178 /*
3179 * Free the TX list buffers.
3180 */
3181 for (i = 0; i < XL_TX_LIST_CNT; i++) {
3182 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3183 bus_dmamap_unload(sc->xl_mtag,
3184 sc->xl_cdata.xl_tx_chain[i].xl_map);
3185 bus_dmamap_destroy(sc->xl_mtag,
3186 sc->xl_cdata.xl_tx_chain[i].xl_map);
3187 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3188 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3189 }
3190 }
3191 if (sc->xl_ldata.xl_tx_list != NULL)
3192 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3193
3194 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3195 }
3196
3197 /*
3198 * Stop all chip I/O so that the kernel's probe routines don't
3199 * get confused by errant DMAs when rebooting.
3200 */
3201 static void
3202 xl_shutdown(device_t dev)
3203 {
3204 struct xl_softc *sc;
3205
3206 sc = device_get_softc(dev);
3207
3208 XL_LOCK(sc);
3209 xl_reset(sc);
3210 xl_stop(sc);
3211 XL_UNLOCK(sc);
3212 }
3213
3214 static int
3215 xl_suspend(device_t dev)
3216 {
3217 struct xl_softc *sc;
3218
3219 sc = device_get_softc(dev);
3220
3221 XL_LOCK(sc);
3222 xl_stop(sc);
3223 XL_UNLOCK(sc);
3224
3225 return (0);
3226 }
3227
3228 static int
3229 xl_resume(device_t dev)
3230 {
3231 struct xl_softc *sc;
3232 struct ifnet *ifp;
3233
3234 sc = device_get_softc(dev);
3235 ifp = &sc->arpcom.ac_if;
3236
3237 XL_LOCK(sc);
3238
3239 xl_reset(sc);
3240 if (ifp->if_flags & IFF_UP)
3241 xl_init_locked(sc);
3242
3243 XL_UNLOCK(sc);
3244
3245 return (0);
3246 }
Cache object: 76d26ec56e8cd92ac8544acd2f392659
|