FreeBSD/Linux Kernel Cross Reference
sys/pci/if_tl.c
1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/5.4/sys/pci/if_tl.c 142884 2005-03-01 08:11:52Z imp $");
35
36 /*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53 /*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 * u_int32_t fragment_address;
89 * u_int32_t fragment_size;
90 * };
91 * struct tl_list {
92 * u_int32_t forward_pointer;
93 * u_int16_t cstat;
94 * u_int16_t frame_size;
95 * struct tl_frag fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123 /*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180 #include <sys/param.h>
181 #include <sys/systm.h>
182 #include <sys/sockio.h>
183 #include <sys/mbuf.h>
184 #include <sys/malloc.h>
185 #include <sys/kernel.h>
186 #include <sys/module.h>
187 #include <sys/socket.h>
188
189 #include <net/if.h>
190 #include <net/if_arp.h>
191 #include <net/ethernet.h>
192 #include <net/if_dl.h>
193 #include <net/if_media.h>
194
195 #include <net/bpf.h>
196
197 #include <vm/vm.h> /* for vtophys */
198 #include <vm/pmap.h> /* for vtophys */
199 #include <machine/bus_memio.h>
200 #include <machine/bus_pio.h>
201 #include <machine/bus.h>
202 #include <machine/resource.h>
203 #include <sys/bus.h>
204 #include <sys/rman.h>
205
206 #include <dev/mii/mii.h>
207 #include <dev/mii/miivar.h>
208
209 #include <dev/pci/pcireg.h>
210 #include <dev/pci/pcivar.h>
211
212 /*
213 * Default to using PIO register access mode to pacify certain
214 * laptop docking stations with built-in ThunderLAN chips that
215 * don't seem to handle memory mapped mode properly.
216 */
217 #define TL_USEIOSPACE
218
219 #include <pci/if_tlreg.h>
220
221 MODULE_DEPEND(tl, pci, 1, 1, 1);
222 MODULE_DEPEND(tl, ether, 1, 1, 1);
223 MODULE_DEPEND(tl, miibus, 1, 1, 1);
224
225 /* "controller miibus0" required. See GENERIC if you get errors here. */
226 #include "miibus_if.h"
227
228 /*
229 * Various supported device vendors/types and their names.
230 */
231
232 static struct tl_type tl_devs[] = {
233 { TI_VENDORID, TI_DEVICEID_THUNDERLAN,
234 "Texas Instruments ThunderLAN" },
235 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236 "Compaq Netelligent 10" },
237 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238 "Compaq Netelligent 10/100" },
239 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240 "Compaq Netelligent 10/100 Proliant" },
241 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242 "Compaq Netelligent 10/100 Dual Port" },
243 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244 "Compaq NetFlex-3/P Integrated" },
245 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246 "Compaq NetFlex-3/P" },
247 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248 "Compaq NetFlex 3/P w/ BNC" },
249 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250 "Compaq Netelligent 10/100 TX Embedded UTP" },
251 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252 "Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254 "Compaq Netelligent 10/100 TX UTP" },
255 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256 "Olicom OC-2183/2185" },
257 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258 "Olicom OC-2325" },
259 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260 "Olicom OC-2326 10/100 TX UTP" },
261 { 0, 0, NULL }
262 };
263
264 static int tl_probe(device_t);
265 static int tl_attach(device_t);
266 static int tl_detach(device_t);
267 static int tl_intvec_rxeoc(void *, u_int32_t);
268 static int tl_intvec_txeoc(void *, u_int32_t);
269 static int tl_intvec_txeof(void *, u_int32_t);
270 static int tl_intvec_rxeof(void *, u_int32_t);
271 static int tl_intvec_adchk(void *, u_int32_t);
272 static int tl_intvec_netsts(void *, u_int32_t);
273
274 static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
275 static void tl_stats_update(void *);
276 static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
277
278 static void tl_intr(void *);
279 static void tl_start(struct ifnet *);
280 static int tl_ioctl(struct ifnet *, u_long, caddr_t);
281 static void tl_init(void *);
282 static void tl_stop(struct tl_softc *);
283 static void tl_watchdog(struct ifnet *);
284 static void tl_shutdown(device_t);
285 static int tl_ifmedia_upd(struct ifnet *);
286 static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
287
288 static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
289 static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
290 static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
291
292 static void tl_mii_sync(struct tl_softc *);
293 static void tl_mii_send(struct tl_softc *, u_int32_t, int);
294 static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
295 static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
296 static int tl_miibus_readreg(device_t, int, int);
297 static int tl_miibus_writereg(device_t, int, int, int);
298 static void tl_miibus_statchg(device_t);
299
300 static void tl_setmode(struct tl_softc *, int);
301 static uint32_t tl_mchash(const uint8_t *);
302 static void tl_setmulti(struct tl_softc *);
303 static void tl_setfilt(struct tl_softc *, caddr_t, int);
304 static void tl_softreset(struct tl_softc *, int);
305 static void tl_hardreset(device_t);
306 static int tl_list_rx_init(struct tl_softc *);
307 static int tl_list_tx_init(struct tl_softc *);
308
309 static u_int8_t tl_dio_read8(struct tl_softc *, int);
310 static u_int16_t tl_dio_read16(struct tl_softc *, int);
311 static u_int32_t tl_dio_read32(struct tl_softc *, int);
312 static void tl_dio_write8(struct tl_softc *, int, int);
313 static void tl_dio_write16(struct tl_softc *, int, int);
314 static void tl_dio_write32(struct tl_softc *, int, int);
315 static void tl_dio_setbit(struct tl_softc *, int, int);
316 static void tl_dio_clrbit(struct tl_softc *, int, int);
317 static void tl_dio_setbit16(struct tl_softc *, int, int);
318 static void tl_dio_clrbit16(struct tl_softc *, int, int);
319
320 #ifdef TL_USEIOSPACE
321 #define TL_RES SYS_RES_IOPORT
322 #define TL_RID TL_PCI_LOIO
323 #else
324 #define TL_RES SYS_RES_MEMORY
325 #define TL_RID TL_PCI_LOMEM
326 #endif
327
328 static device_method_t tl_methods[] = {
329 /* Device interface */
330 DEVMETHOD(device_probe, tl_probe),
331 DEVMETHOD(device_attach, tl_attach),
332 DEVMETHOD(device_detach, tl_detach),
333 DEVMETHOD(device_shutdown, tl_shutdown),
334
335 /* bus interface */
336 DEVMETHOD(bus_print_child, bus_generic_print_child),
337 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338
339 /* MII interface */
340 DEVMETHOD(miibus_readreg, tl_miibus_readreg),
341 DEVMETHOD(miibus_writereg, tl_miibus_writereg),
342 DEVMETHOD(miibus_statchg, tl_miibus_statchg),
343
344 { 0, 0 }
345 };
346
347 static driver_t tl_driver = {
348 "tl",
349 tl_methods,
350 sizeof(struct tl_softc)
351 };
352
353 static devclass_t tl_devclass;
354
355 DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
356 DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
357
358 static u_int8_t tl_dio_read8(sc, reg)
359 struct tl_softc *sc;
360 int reg;
361 {
362 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
363 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
364 }
365
366 static u_int16_t tl_dio_read16(sc, reg)
367 struct tl_softc *sc;
368 int reg;
369 {
370 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
371 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
372 }
373
374 static u_int32_t tl_dio_read32(sc, reg)
375 struct tl_softc *sc;
376 int reg;
377 {
378 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
379 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
380 }
381
382 static void tl_dio_write8(sc, reg, val)
383 struct tl_softc *sc;
384 int reg;
385 int val;
386 {
387 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
388 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
389 return;
390 }
391
392 static void tl_dio_write16(sc, reg, val)
393 struct tl_softc *sc;
394 int reg;
395 int val;
396 {
397 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
398 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
399 return;
400 }
401
402 static void tl_dio_write32(sc, reg, val)
403 struct tl_softc *sc;
404 int reg;
405 int val;
406 {
407 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
408 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
409 return;
410 }
411
412 static void
413 tl_dio_setbit(sc, reg, bit)
414 struct tl_softc *sc;
415 int reg;
416 int bit;
417 {
418 u_int8_t f;
419
420 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
421 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
422 f |= bit;
423 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
424
425 return;
426 }
427
428 static void
429 tl_dio_clrbit(sc, reg, bit)
430 struct tl_softc *sc;
431 int reg;
432 int bit;
433 {
434 u_int8_t f;
435
436 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
437 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
438 f &= ~bit;
439 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
440
441 return;
442 }
443
444 static void tl_dio_setbit16(sc, reg, bit)
445 struct tl_softc *sc;
446 int reg;
447 int bit;
448 {
449 u_int16_t f;
450
451 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
452 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
453 f |= bit;
454 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
455
456 return;
457 }
458
459 static void tl_dio_clrbit16(sc, reg, bit)
460 struct tl_softc *sc;
461 int reg;
462 int bit;
463 {
464 u_int16_t f;
465
466 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
467 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
468 f &= ~bit;
469 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
470
471 return;
472 }
473
474 /*
475 * Send an instruction or address to the EEPROM, check for ACK.
476 */
477 static u_int8_t tl_eeprom_putbyte(sc, byte)
478 struct tl_softc *sc;
479 int byte;
480 {
481 register int i, ack = 0;
482
483 /*
484 * Make sure we're in TX mode.
485 */
486 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
487
488 /*
489 * Feed in each bit and stobe the clock.
490 */
491 for (i = 0x80; i; i >>= 1) {
492 if (byte & i) {
493 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
494 } else {
495 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
496 }
497 DELAY(1);
498 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
499 DELAY(1);
500 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
501 }
502
503 /*
504 * Turn off TX mode.
505 */
506 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
507
508 /*
509 * Check for ack.
510 */
511 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
512 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
513 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
514
515 return(ack);
516 }
517
518 /*
519 * Read a byte of data stored in the EEPROM at address 'addr.'
520 */
521 static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
522 struct tl_softc *sc;
523 int addr;
524 u_int8_t *dest;
525 {
526 register int i;
527 u_int8_t byte = 0;
528 struct ifnet *ifp = &sc->arpcom.ac_if;
529
530 tl_dio_write8(sc, TL_NETSIO, 0);
531
532 EEPROM_START;
533
534 /*
535 * Send write control code to EEPROM.
536 */
537 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
538 if_printf(ifp, "failed to send write command, status: %x\n",
539 tl_dio_read8(sc, TL_NETSIO));
540 return(1);
541 }
542
543 /*
544 * Send address of byte we want to read.
545 */
546 if (tl_eeprom_putbyte(sc, addr)) {
547 if_printf(ifp, "failed to send address, status: %x\n",
548 tl_dio_read8(sc, TL_NETSIO));
549 return(1);
550 }
551
552 EEPROM_STOP;
553 EEPROM_START;
554 /*
555 * Send read control code to EEPROM.
556 */
557 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
558 if_printf(ifp, "failed to send write command, status: %x\n",
559 tl_dio_read8(sc, TL_NETSIO));
560 return(1);
561 }
562
563 /*
564 * Start reading bits from EEPROM.
565 */
566 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
567 for (i = 0x80; i; i >>= 1) {
568 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
569 DELAY(1);
570 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
571 byte |= i;
572 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
573 DELAY(1);
574 }
575
576 EEPROM_STOP;
577
578 /*
579 * No ACK generated for read, so just return byte.
580 */
581
582 *dest = byte;
583
584 return(0);
585 }
586
587 /*
588 * Read a sequence of bytes from the EEPROM.
589 */
590 static int
591 tl_read_eeprom(sc, dest, off, cnt)
592 struct tl_softc *sc;
593 caddr_t dest;
594 int off;
595 int cnt;
596 {
597 int err = 0, i;
598 u_int8_t byte = 0;
599
600 for (i = 0; i < cnt; i++) {
601 err = tl_eeprom_getbyte(sc, off + i, &byte);
602 if (err)
603 break;
604 *(dest + i) = byte;
605 }
606
607 return(err ? 1 : 0);
608 }
609
610 static void
611 tl_mii_sync(sc)
612 struct tl_softc *sc;
613 {
614 register int i;
615
616 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
617
618 for (i = 0; i < 32; i++) {
619 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
620 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
621 }
622
623 return;
624 }
625
626 static void
627 tl_mii_send(sc, bits, cnt)
628 struct tl_softc *sc;
629 u_int32_t bits;
630 int cnt;
631 {
632 int i;
633
634 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
635 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
636 if (bits & i) {
637 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
638 } else {
639 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
640 }
641 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
642 }
643 }
644
645 static int
646 tl_mii_readreg(sc, frame)
647 struct tl_softc *sc;
648 struct tl_mii_frame *frame;
649
650 {
651 int i, ack;
652 int minten = 0;
653
654 TL_LOCK(sc);
655
656 tl_mii_sync(sc);
657
658 /*
659 * Set up frame for RX.
660 */
661 frame->mii_stdelim = TL_MII_STARTDELIM;
662 frame->mii_opcode = TL_MII_READOP;
663 frame->mii_turnaround = 0;
664 frame->mii_data = 0;
665
666 /*
667 * Turn off MII interrupt by forcing MINTEN low.
668 */
669 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
670 if (minten) {
671 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
672 }
673
674 /*
675 * Turn on data xmit.
676 */
677 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
678
679 /*
680 * Send command/address info.
681 */
682 tl_mii_send(sc, frame->mii_stdelim, 2);
683 tl_mii_send(sc, frame->mii_opcode, 2);
684 tl_mii_send(sc, frame->mii_phyaddr, 5);
685 tl_mii_send(sc, frame->mii_regaddr, 5);
686
687 /*
688 * Turn off xmit.
689 */
690 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
691
692 /* Idle bit */
693 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
694 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
695
696 /* Check for ack */
697 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
698 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
699
700 /* Complete the cycle */
701 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
702
703 /*
704 * Now try reading data bits. If the ack failed, we still
705 * need to clock through 16 cycles to keep the PHYs in sync.
706 */
707 if (ack) {
708 for(i = 0; i < 16; i++) {
709 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
710 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
711 }
712 goto fail;
713 }
714
715 for (i = 0x8000; i; i >>= 1) {
716 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
717 if (!ack) {
718 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
719 frame->mii_data |= i;
720 }
721 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
722 }
723
724 fail:
725
726 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
727 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
728
729 /* Reenable interrupts */
730 if (minten) {
731 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
732 }
733
734 TL_UNLOCK(sc);
735
736 if (ack)
737 return(1);
738 return(0);
739 }
740
741 static int
742 tl_mii_writereg(sc, frame)
743 struct tl_softc *sc;
744 struct tl_mii_frame *frame;
745
746 {
747 int minten;
748
749 TL_LOCK(sc);
750
751 tl_mii_sync(sc);
752
753 /*
754 * Set up frame for TX.
755 */
756
757 frame->mii_stdelim = TL_MII_STARTDELIM;
758 frame->mii_opcode = TL_MII_WRITEOP;
759 frame->mii_turnaround = TL_MII_TURNAROUND;
760
761 /*
762 * Turn off MII interrupt by forcing MINTEN low.
763 */
764 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
765 if (minten) {
766 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
767 }
768
769 /*
770 * Turn on data output.
771 */
772 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
773
774 tl_mii_send(sc, frame->mii_stdelim, 2);
775 tl_mii_send(sc, frame->mii_opcode, 2);
776 tl_mii_send(sc, frame->mii_phyaddr, 5);
777 tl_mii_send(sc, frame->mii_regaddr, 5);
778 tl_mii_send(sc, frame->mii_turnaround, 2);
779 tl_mii_send(sc, frame->mii_data, 16);
780
781 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
782 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
783
784 /*
785 * Turn off xmit.
786 */
787 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
788
789 /* Reenable interrupts */
790 if (minten)
791 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
792
793 TL_UNLOCK(sc);
794
795 return(0);
796 }
797
798 static int
799 tl_miibus_readreg(dev, phy, reg)
800 device_t dev;
801 int phy, reg;
802 {
803 struct tl_softc *sc;
804 struct tl_mii_frame frame;
805
806 sc = device_get_softc(dev);
807 bzero((char *)&frame, sizeof(frame));
808
809 frame.mii_phyaddr = phy;
810 frame.mii_regaddr = reg;
811 tl_mii_readreg(sc, &frame);
812
813 return(frame.mii_data);
814 }
815
816 static int
817 tl_miibus_writereg(dev, phy, reg, data)
818 device_t dev;
819 int phy, reg, data;
820 {
821 struct tl_softc *sc;
822 struct tl_mii_frame frame;
823
824 sc = device_get_softc(dev);
825 bzero((char *)&frame, sizeof(frame));
826
827 frame.mii_phyaddr = phy;
828 frame.mii_regaddr = reg;
829 frame.mii_data = data;
830
831 tl_mii_writereg(sc, &frame);
832
833 return(0);
834 }
835
836 static void
837 tl_miibus_statchg(dev)
838 device_t dev;
839 {
840 struct tl_softc *sc;
841 struct mii_data *mii;
842
843 sc = device_get_softc(dev);
844 TL_LOCK(sc);
845 mii = device_get_softc(sc->tl_miibus);
846
847 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
848 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
849 } else {
850 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
851 }
852 TL_UNLOCK(sc);
853
854 return;
855 }
856
857 /*
858 * Set modes for bitrate devices.
859 */
860 static void
861 tl_setmode(sc, media)
862 struct tl_softc *sc;
863 int media;
864 {
865 if (IFM_SUBTYPE(media) == IFM_10_5)
866 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
867 if (IFM_SUBTYPE(media) == IFM_10_T) {
868 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
869 if ((media & IFM_GMASK) == IFM_FDX) {
870 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
871 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
872 } else {
873 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
874 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
875 }
876 }
877
878 return;
879 }
880
881 /*
882 * Calculate the hash of a MAC address for programming the multicast hash
883 * table. This hash is simply the address split into 6-bit chunks
884 * XOR'd, e.g.
885 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
886 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
887 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then
888 * the folded 24-bit value is split into 6-bit portions and XOR'd.
889 */
890 static uint32_t
891 tl_mchash(addr)
892 const uint8_t *addr;
893 {
894 int t;
895
896 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
897 (addr[2] ^ addr[5]);
898 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
899 }
900
901 /*
902 * The ThunderLAN has a perfect MAC address filter in addition to
903 * the multicast hash filter. The perfect filter can be programmed
904 * with up to four MAC addresses. The first one is always used to
905 * hold the station address, which leaves us free to use the other
906 * three for multicast addresses.
907 */
908 static void
909 tl_setfilt(sc, addr, slot)
910 struct tl_softc *sc;
911 caddr_t addr;
912 int slot;
913 {
914 int i;
915 u_int16_t regaddr;
916
917 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
918
919 for (i = 0; i < ETHER_ADDR_LEN; i++)
920 tl_dio_write8(sc, regaddr + i, *(addr + i));
921
922 return;
923 }
924
925 /*
926 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
927 * linked list. This is fine, except addresses are added from the head
928 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
929 * group to always be in the perfect filter, but as more groups are added,
930 * the 224.0.0.1 entry (which is always added first) gets pushed down
931 * the list and ends up at the tail. So after 3 or 4 multicast groups
932 * are added, the all-hosts entry gets pushed out of the perfect filter
933 * and into the hash table.
934 *
935 * Because the multicast list is a doubly-linked list as opposed to a
936 * circular queue, we don't have the ability to just grab the tail of
937 * the list and traverse it backwards. Instead, we have to traverse
938 * the list once to find the tail, then traverse it again backwards to
939 * update the multicast filter.
940 */
941 static void
942 tl_setmulti(sc)
943 struct tl_softc *sc;
944 {
945 struct ifnet *ifp;
946 u_int32_t hashes[2] = { 0, 0 };
947 int h, i;
948 struct ifmultiaddr *ifma;
949 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
950 ifp = &sc->arpcom.ac_if;
951
952 /* First, zot all the existing filters. */
953 for (i = 1; i < 4; i++)
954 tl_setfilt(sc, (caddr_t)&dummy, i);
955 tl_dio_write32(sc, TL_HASH1, 0);
956 tl_dio_write32(sc, TL_HASH2, 0);
957
958 /* Now program new ones. */
959 if (ifp->if_flags & IFF_ALLMULTI) {
960 hashes[0] = 0xFFFFFFFF;
961 hashes[1] = 0xFFFFFFFF;
962 } else {
963 i = 1;
964 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
965 if (ifma->ifma_addr->sa_family != AF_LINK)
966 continue;
967 /*
968 * Program the first three multicast groups
969 * into the perfect filter. For all others,
970 * use the hash table.
971 */
972 if (i < 4) {
973 tl_setfilt(sc,
974 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
975 i++;
976 continue;
977 }
978
979 h = tl_mchash(
980 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
981 if (h < 32)
982 hashes[0] |= (1 << h);
983 else
984 hashes[1] |= (1 << (h - 32));
985 }
986 }
987
988 tl_dio_write32(sc, TL_HASH1, hashes[0]);
989 tl_dio_write32(sc, TL_HASH2, hashes[1]);
990
991 return;
992 }
993
994 /*
995 * This routine is recommended by the ThunderLAN manual to insure that
996 * the internal PHY is powered up correctly. It also recommends a one
997 * second pause at the end to 'wait for the clocks to start' but in my
998 * experience this isn't necessary.
999 */
1000 static void
1001 tl_hardreset(dev)
1002 device_t dev;
1003 {
1004 struct tl_softc *sc;
1005 int i;
1006 u_int16_t flags;
1007
1008 sc = device_get_softc(dev);
1009
1010 tl_mii_sync(sc);
1011
1012 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1013
1014 for (i = 0; i < MII_NPHY; i++)
1015 tl_miibus_writereg(dev, i, MII_BMCR, flags);
1016
1017 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1018 DELAY(50000);
1019 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1020 tl_mii_sync(sc);
1021 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1022
1023 DELAY(50000);
1024 return;
1025 }
1026
1027 static void
1028 tl_softreset(sc, internal)
1029 struct tl_softc *sc;
1030 int internal;
1031 {
1032 u_int32_t cmd, dummy, i;
1033
1034 /* Assert the adapter reset bit. */
1035 CMD_SET(sc, TL_CMD_ADRST);
1036
1037 /* Turn off interrupts */
1038 CMD_SET(sc, TL_CMD_INTSOFF);
1039
1040 /* First, clear the stats registers. */
1041 for (i = 0; i < 5; i++)
1042 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1043
1044 /* Clear Areg and Hash registers */
1045 for (i = 0; i < 8; i++)
1046 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1047
1048 /*
1049 * Set up Netconfig register. Enable one channel and
1050 * one fragment mode.
1051 */
1052 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1053 if (internal && !sc->tl_bitrate) {
1054 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1055 } else {
1056 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1057 }
1058
1059 /* Handle cards with bitrate devices. */
1060 if (sc->tl_bitrate)
1061 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1062
1063 /*
1064 * Load adapter irq pacing timer and tx threshold.
1065 * We make the transmit threshold 1 initially but we may
1066 * change that later.
1067 */
1068 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1069 cmd |= TL_CMD_NES;
1070 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1071 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1072 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1073
1074 /* Unreset the MII */
1075 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1076
1077 /* Take the adapter out of reset */
1078 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1079
1080 /* Wait for things to settle down a little. */
1081 DELAY(500);
1082
1083 return;
1084 }
1085
1086 /*
1087 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1088 * against our list and return its name if we find a match.
1089 */
1090 static int
1091 tl_probe(dev)
1092 device_t dev;
1093 {
1094 struct tl_type *t;
1095
1096 t = tl_devs;
1097
1098 while(t->tl_name != NULL) {
1099 if ((pci_get_vendor(dev) == t->tl_vid) &&
1100 (pci_get_device(dev) == t->tl_did)) {
1101 device_set_desc(dev, t->tl_name);
1102 return (BUS_PROBE_DEFAULT);
1103 }
1104 t++;
1105 }
1106
1107 return(ENXIO);
1108 }
1109
1110 static int
1111 tl_attach(dev)
1112 device_t dev;
1113 {
1114 int i;
1115 u_int16_t did, vid;
1116 struct tl_type *t;
1117 struct ifnet *ifp;
1118 struct tl_softc *sc;
1119 int unit, error = 0, rid;
1120
1121 vid = pci_get_vendor(dev);
1122 did = pci_get_device(dev);
1123 sc = device_get_softc(dev);
1124 unit = device_get_unit(dev);
1125
1126 t = tl_devs;
1127 while(t->tl_name != NULL) {
1128 if (vid == t->tl_vid && did == t->tl_did)
1129 break;
1130 t++;
1131 }
1132
1133 if (t->tl_name == NULL) {
1134 device_printf(dev, "unknown device!?\n");
1135 return (ENXIO);
1136 }
1137
1138 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1139 MTX_DEF | MTX_RECURSE);
1140
1141 /*
1142 * Map control/status registers.
1143 */
1144 pci_enable_busmaster(dev);
1145
1146 #ifdef TL_USEIOSPACE
1147
1148 rid = TL_PCI_LOIO;
1149 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1150 RF_ACTIVE);
1151
1152 /*
1153 * Some cards have the I/O and memory mapped address registers
1154 * reversed. Try both combinations before giving up.
1155 */
1156 if (sc->tl_res == NULL) {
1157 rid = TL_PCI_LOMEM;
1158 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1159 RF_ACTIVE);
1160 }
1161 #else
1162 rid = TL_PCI_LOMEM;
1163 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1164 RF_ACTIVE);
1165 if (sc->tl_res == NULL) {
1166 rid = TL_PCI_LOIO;
1167 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1168 RF_ACTIVE);
1169 }
1170 #endif
1171
1172 if (sc->tl_res == NULL) {
1173 device_printf(dev, "couldn't map ports/memory\n");
1174 error = ENXIO;
1175 goto fail;
1176 }
1177
1178 sc->tl_btag = rman_get_bustag(sc->tl_res);
1179 sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1180
1181 #ifdef notdef
1182 /*
1183 * The ThunderLAN manual suggests jacking the PCI latency
1184 * timer all the way up to its maximum value. I'm not sure
1185 * if this is really necessary, but what the manual wants,
1186 * the manual gets.
1187 */
1188 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1189 command |= 0x0000FF00;
1190 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1191 #endif
1192
1193 /* Allocate interrupt */
1194 rid = 0;
1195 sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1196 RF_SHAREABLE | RF_ACTIVE);
1197
1198 if (sc->tl_irq == NULL) {
1199 device_printf(dev, "couldn't map interrupt\n");
1200 error = ENXIO;
1201 goto fail;
1202 }
1203
1204 /*
1205 * Now allocate memory for the TX and RX lists.
1206 */
1207 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1208 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1209
1210 if (sc->tl_ldata == NULL) {
1211 device_printf(dev, "no memory for list buffers!\n");
1212 error = ENXIO;
1213 goto fail;
1214 }
1215
1216 bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1217
1218 sc->tl_dinfo = t;
1219 if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1220 sc->tl_eeaddr = TL_EEPROM_EADDR;
1221 if (t->tl_vid == OLICOM_VENDORID)
1222 sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1223
1224 /* Reset the adapter. */
1225 tl_softreset(sc, 1);
1226 tl_hardreset(dev);
1227 tl_softreset(sc, 1);
1228
1229 /*
1230 * Get station address from the EEPROM.
1231 */
1232 if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1233 sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1234 device_printf(dev, "failed to read station address\n");
1235 error = ENXIO;
1236 goto fail;
1237 }
1238
1239 /*
1240 * XXX Olicom, in its desire to be different from the
1241 * rest of the world, has done strange things with the
1242 * encoding of the station address in the EEPROM. First
1243 * of all, they store the address at offset 0xF8 rather
1244 * than at 0x83 like the ThunderLAN manual suggests.
1245 * Second, they store the address in three 16-bit words in
1246 * network byte order, as opposed to storing it sequentially
1247 * like all the other ThunderLAN cards. In order to get
1248 * the station address in a form that matches what the Olicom
1249 * diagnostic utility specifies, we have to byte-swap each
1250 * word. To make things even more confusing, neither 00:00:28
1251 * nor 00:00:24 appear in the IEEE OUI database.
1252 */
1253 if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1254 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1255 u_int16_t *p;
1256 p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
1257 *p = ntohs(*p);
1258 }
1259 }
1260
1261 ifp = &sc->arpcom.ac_if;
1262 ifp->if_softc = sc;
1263 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
1265 IFF_NEEDSGIANT;
1266 ifp->if_ioctl = tl_ioctl;
1267 ifp->if_start = tl_start;
1268 ifp->if_watchdog = tl_watchdog;
1269 ifp->if_init = tl_init;
1270 ifp->if_mtu = ETHERMTU;
1271 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1272 callout_handle_init(&sc->tl_stat_ch);
1273
1274 /* Reset the adapter again. */
1275 tl_softreset(sc, 1);
1276 tl_hardreset(dev);
1277 tl_softreset(sc, 1);
1278
1279 /*
1280 * Do MII setup. If no PHYs are found, then this is a
1281 * bitrate ThunderLAN chip that only supports 10baseT
1282 * and AUI/BNC.
1283 */
1284 if (mii_phy_probe(dev, &sc->tl_miibus,
1285 tl_ifmedia_upd, tl_ifmedia_sts)) {
1286 struct ifmedia *ifm;
1287 sc->tl_bitrate = 1;
1288 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1289 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1290 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1291 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1292 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1293 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1294 /* Reset again, this time setting bitrate mode. */
1295 tl_softreset(sc, 1);
1296 ifm = &sc->ifmedia;
1297 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1298 tl_ifmedia_upd(ifp);
1299 }
1300
1301 /*
1302 * Call MI attach routine.
1303 */
1304 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
1305
1306 /* Hook interrupt last to avoid having to lock softc */
1307 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET,
1308 tl_intr, sc, &sc->tl_intrhand);
1309
1310 if (error) {
1311 device_printf(dev, "couldn't set up irq\n");
1312 ether_ifdetach(ifp);
1313 goto fail;
1314 }
1315
1316 fail:
1317 if (error)
1318 tl_detach(dev);
1319
1320 return(error);
1321 }
1322
1323 /*
1324 * Shutdown hardware and free up resources. This can be called any
1325 * time after the mutex has been initialized. It is called in both
1326 * the error case in attach and the normal detach case so it needs
1327 * to be careful about only freeing resources that have actually been
1328 * allocated.
1329 */
1330 static int
1331 tl_detach(dev)
1332 device_t dev;
1333 {
1334 struct tl_softc *sc;
1335 struct ifnet *ifp;
1336
1337 sc = device_get_softc(dev);
1338 KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1339 TL_LOCK(sc);
1340 ifp = &sc->arpcom.ac_if;
1341
1342 /* These should only be active if attach succeeded */
1343 if (device_is_attached(dev)) {
1344 tl_stop(sc);
1345 ether_ifdetach(ifp);
1346 }
1347 if (sc->tl_miibus)
1348 device_delete_child(dev, sc->tl_miibus);
1349 bus_generic_detach(dev);
1350
1351 if (sc->tl_ldata)
1352 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1353 if (sc->tl_bitrate)
1354 ifmedia_removeall(&sc->ifmedia);
1355
1356 if (sc->tl_intrhand)
1357 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1358 if (sc->tl_irq)
1359 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1360 if (sc->tl_res)
1361 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1362
1363 TL_UNLOCK(sc);
1364 mtx_destroy(&sc->tl_mtx);
1365
1366 return(0);
1367 }
1368
1369 /*
1370 * Initialize the transmit lists.
1371 */
1372 static int
1373 tl_list_tx_init(sc)
1374 struct tl_softc *sc;
1375 {
1376 struct tl_chain_data *cd;
1377 struct tl_list_data *ld;
1378 int i;
1379
1380 cd = &sc->tl_cdata;
1381 ld = sc->tl_ldata;
1382 for (i = 0; i < TL_TX_LIST_CNT; i++) {
1383 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1384 if (i == (TL_TX_LIST_CNT - 1))
1385 cd->tl_tx_chain[i].tl_next = NULL;
1386 else
1387 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1388 }
1389
1390 cd->tl_tx_free = &cd->tl_tx_chain[0];
1391 cd->tl_tx_tail = cd->tl_tx_head = NULL;
1392 sc->tl_txeoc = 1;
1393
1394 return(0);
1395 }
1396
1397 /*
1398 * Initialize the RX lists and allocate mbufs for them.
1399 */
1400 static int
1401 tl_list_rx_init(sc)
1402 struct tl_softc *sc;
1403 {
1404 struct tl_chain_data *cd;
1405 struct tl_list_data *ld;
1406 int i;
1407
1408 cd = &sc->tl_cdata;
1409 ld = sc->tl_ldata;
1410
1411 for (i = 0; i < TL_RX_LIST_CNT; i++) {
1412 cd->tl_rx_chain[i].tl_ptr =
1413 (struct tl_list_onefrag *)&ld->tl_rx_list[i];
1414 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1415 return(ENOBUFS);
1416 if (i == (TL_RX_LIST_CNT - 1)) {
1417 cd->tl_rx_chain[i].tl_next = NULL;
1418 ld->tl_rx_list[i].tlist_fptr = 0;
1419 } else {
1420 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1421 ld->tl_rx_list[i].tlist_fptr =
1422 vtophys(&ld->tl_rx_list[i + 1]);
1423 }
1424 }
1425
1426 cd->tl_rx_head = &cd->tl_rx_chain[0];
1427 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1428
1429 return(0);
1430 }
1431
1432 static int
1433 tl_newbuf(sc, c)
1434 struct tl_softc *sc;
1435 struct tl_chain_onefrag *c;
1436 {
1437 struct mbuf *m_new = NULL;
1438
1439 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1440 if (m_new == NULL)
1441 return(ENOBUFS);
1442
1443 MCLGET(m_new, M_DONTWAIT);
1444 if (!(m_new->m_flags & M_EXT)) {
1445 m_freem(m_new);
1446 return(ENOBUFS);
1447 }
1448
1449 #ifdef __alpha__
1450 m_new->m_data += 2;
1451 #endif
1452
1453 c->tl_mbuf = m_new;
1454 c->tl_next = NULL;
1455 c->tl_ptr->tlist_frsize = MCLBYTES;
1456 c->tl_ptr->tlist_fptr = 0;
1457 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1458 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1459 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1460
1461 return(0);
1462 }
1463 /*
1464 * Interrupt handler for RX 'end of frame' condition (EOF). This
1465 * tells us that a full ethernet frame has been captured and we need
1466 * to handle it.
1467 *
1468 * Reception is done using 'lists' which consist of a header and a
1469 * series of 10 data count/data address pairs that point to buffers.
1470 * Initially you're supposed to create a list, populate it with pointers
1471 * to buffers, then load the physical address of the list into the
1472 * ch_parm register. The adapter is then supposed to DMA the received
1473 * frame into the buffers for you.
1474 *
1475 * To make things as fast as possible, we have the chip DMA directly
1476 * into mbufs. This saves us from having to do a buffer copy: we can
1477 * just hand the mbufs directly to ether_input(). Once the frame has
1478 * been sent on its way, the 'list' structure is assigned a new buffer
1479 * and moved to the end of the RX chain. As long we we stay ahead of
1480 * the chip, it will always think it has an endless receive channel.
1481 *
1482 * If we happen to fall behind and the chip manages to fill up all of
1483 * the buffers, it will generate an end of channel interrupt and wait
1484 * for us to empty the chain and restart the receiver.
1485 */
1486 static int
1487 tl_intvec_rxeof(xsc, type)
1488 void *xsc;
1489 u_int32_t type;
1490 {
1491 struct tl_softc *sc;
1492 int r = 0, total_len = 0;
1493 struct ether_header *eh;
1494 struct mbuf *m;
1495 struct ifnet *ifp;
1496 struct tl_chain_onefrag *cur_rx;
1497
1498 sc = xsc;
1499 ifp = &sc->arpcom.ac_if;
1500
1501 TL_LOCK_ASSERT(sc);
1502
1503 while(sc->tl_cdata.tl_rx_head != NULL) {
1504 cur_rx = sc->tl_cdata.tl_rx_head;
1505 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1506 break;
1507 r++;
1508 sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1509 m = cur_rx->tl_mbuf;
1510 total_len = cur_rx->tl_ptr->tlist_frsize;
1511
1512 if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1513 ifp->if_ierrors++;
1514 cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1515 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1516 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1517 continue;
1518 }
1519
1520 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1521 vtophys(cur_rx->tl_ptr);
1522 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1523 sc->tl_cdata.tl_rx_tail = cur_rx;
1524
1525 /*
1526 * Note: when the ThunderLAN chip is in 'capture all
1527 * frames' mode, it will receive its own transmissions.
1528 * We drop don't need to process our own transmissions,
1529 * so we drop them here and continue.
1530 */
1531 eh = mtod(m, struct ether_header *);
1532 /*if (ifp->if_flags & IFF_PROMISC && */
1533 if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1534 ETHER_ADDR_LEN)) {
1535 m_freem(m);
1536 continue;
1537 }
1538
1539 m->m_pkthdr.rcvif = ifp;
1540 m->m_pkthdr.len = m->m_len = total_len;
1541
1542 TL_UNLOCK(sc);
1543 (*ifp->if_input)(ifp, m);
1544 TL_LOCK(sc);
1545 }
1546
1547 return(r);
1548 }
1549
1550 /*
1551 * The RX-EOC condition hits when the ch_parm address hasn't been
1552 * initialized or the adapter reached a list with a forward pointer
1553 * of 0 (which indicates the end of the chain). In our case, this means
1554 * the card has hit the end of the receive buffer chain and we need to
1555 * empty out the buffers and shift the pointer back to the beginning again.
1556 */
1557 static int
1558 tl_intvec_rxeoc(xsc, type)
1559 void *xsc;
1560 u_int32_t type;
1561 {
1562 struct tl_softc *sc;
1563 int r;
1564 struct tl_chain_data *cd;
1565
1566
1567 sc = xsc;
1568 cd = &sc->tl_cdata;
1569
1570 /* Flush out the receive queue and ack RXEOF interrupts. */
1571 r = tl_intvec_rxeof(xsc, type);
1572 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1573 r = 1;
1574 cd->tl_rx_head = &cd->tl_rx_chain[0];
1575 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1576 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1577 r |= (TL_CMD_GO|TL_CMD_RT);
1578 return(r);
1579 }
1580
1581 static int
1582 tl_intvec_txeof(xsc, type)
1583 void *xsc;
1584 u_int32_t type;
1585 {
1586 struct tl_softc *sc;
1587 int r = 0;
1588 struct tl_chain *cur_tx;
1589
1590 sc = xsc;
1591
1592 /*
1593 * Go through our tx list and free mbufs for those
1594 * frames that have been sent.
1595 */
1596 while (sc->tl_cdata.tl_tx_head != NULL) {
1597 cur_tx = sc->tl_cdata.tl_tx_head;
1598 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1599 break;
1600 sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1601
1602 r++;
1603 m_freem(cur_tx->tl_mbuf);
1604 cur_tx->tl_mbuf = NULL;
1605
1606 cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1607 sc->tl_cdata.tl_tx_free = cur_tx;
1608 if (!cur_tx->tl_ptr->tlist_fptr)
1609 break;
1610 }
1611
1612 return(r);
1613 }
1614
1615 /*
1616 * The transmit end of channel interrupt. The adapter triggers this
1617 * interrupt to tell us it hit the end of the current transmit list.
1618 *
1619 * A note about this: it's possible for a condition to arise where
1620 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1621 * You have to avoid this since the chip expects things to go in a
1622 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1623 * When the TXEOF handler is called, it will free all of the transmitted
1624 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1625 * interrupt should be received and acknowledged before any more frames
1626 * are queued for transmission. If tl_statrt() is called after TXEOF
1627 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1628 * it could attempt to issue a transmit command prematurely.
1629 *
1630 * To guard against this, tl_start() will only issue transmit commands
1631 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1632 * can set this flag once tl_start() has cleared it.
1633 */
1634 static int
1635 tl_intvec_txeoc(xsc, type)
1636 void *xsc;
1637 u_int32_t type;
1638 {
1639 struct tl_softc *sc;
1640 struct ifnet *ifp;
1641 u_int32_t cmd;
1642
1643 sc = xsc;
1644 ifp = &sc->arpcom.ac_if;
1645
1646 /* Clear the timeout timer. */
1647 ifp->if_timer = 0;
1648
1649 if (sc->tl_cdata.tl_tx_head == NULL) {
1650 ifp->if_flags &= ~IFF_OACTIVE;
1651 sc->tl_cdata.tl_tx_tail = NULL;
1652 sc->tl_txeoc = 1;
1653 } else {
1654 sc->tl_txeoc = 0;
1655 /* First we have to ack the EOC interrupt. */
1656 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1657 /* Then load the address of the next TX list. */
1658 CSR_WRITE_4(sc, TL_CH_PARM,
1659 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1660 /* Restart TX channel. */
1661 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1662 cmd &= ~TL_CMD_RT;
1663 cmd |= TL_CMD_GO|TL_CMD_INTSON;
1664 CMD_PUT(sc, cmd);
1665 return(0);
1666 }
1667
1668 return(1);
1669 }
1670
1671 static int
1672 tl_intvec_adchk(xsc, type)
1673 void *xsc;
1674 u_int32_t type;
1675 {
1676 struct tl_softc *sc;
1677
1678 sc = xsc;
1679
1680 if (type)
1681 if_printf(&sc->arpcom.ac_if, "adapter check: %x\n",
1682 (unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1683
1684 tl_softreset(sc, 1);
1685 tl_stop(sc);
1686 tl_init(sc);
1687 CMD_SET(sc, TL_CMD_INTSON);
1688
1689 return(0);
1690 }
1691
1692 static int
1693 tl_intvec_netsts(xsc, type)
1694 void *xsc;
1695 u_int32_t type;
1696 {
1697 struct tl_softc *sc;
1698 u_int16_t netsts;
1699
1700 sc = xsc;
1701
1702 netsts = tl_dio_read16(sc, TL_NETSTS);
1703 tl_dio_write16(sc, TL_NETSTS, netsts);
1704
1705 if_printf(&sc->arpcom.ac_if, "network status: %x\n", netsts);
1706
1707 return(1);
1708 }
1709
1710 static void
1711 tl_intr(xsc)
1712 void *xsc;
1713 {
1714 struct tl_softc *sc;
1715 struct ifnet *ifp;
1716 int r = 0;
1717 u_int32_t type = 0;
1718 u_int16_t ints = 0;
1719 u_int8_t ivec = 0;
1720
1721 sc = xsc;
1722 TL_LOCK(sc);
1723
1724 /* Disable interrupts */
1725 ints = CSR_READ_2(sc, TL_HOST_INT);
1726 CSR_WRITE_2(sc, TL_HOST_INT, ints);
1727 type = (ints << 16) & 0xFFFF0000;
1728 ivec = (ints & TL_VEC_MASK) >> 5;
1729 ints = (ints & TL_INT_MASK) >> 2;
1730
1731 ifp = &sc->arpcom.ac_if;
1732
1733 switch(ints) {
1734 case (TL_INTR_INVALID):
1735 #ifdef DIAGNOSTIC
1736 if_printf(ifp, "got an invalid interrupt!\n");
1737 #endif
1738 /* Re-enable interrupts but don't ack this one. */
1739 CMD_PUT(sc, type);
1740 r = 0;
1741 break;
1742 case (TL_INTR_TXEOF):
1743 r = tl_intvec_txeof((void *)sc, type);
1744 break;
1745 case (TL_INTR_TXEOC):
1746 r = tl_intvec_txeoc((void *)sc, type);
1747 break;
1748 case (TL_INTR_STATOFLOW):
1749 tl_stats_update(sc);
1750 r = 1;
1751 break;
1752 case (TL_INTR_RXEOF):
1753 r = tl_intvec_rxeof((void *)sc, type);
1754 break;
1755 case (TL_INTR_DUMMY):
1756 if_printf(ifp, "got a dummy interrupt\n");
1757 r = 1;
1758 break;
1759 case (TL_INTR_ADCHK):
1760 if (ivec)
1761 r = tl_intvec_adchk((void *)sc, type);
1762 else
1763 r = tl_intvec_netsts((void *)sc, type);
1764 break;
1765 case (TL_INTR_RXEOC):
1766 r = tl_intvec_rxeoc((void *)sc, type);
1767 break;
1768 default:
1769 if_printf(ifp, "bogus interrupt type\n");
1770 break;
1771 }
1772
1773 /* Re-enable interrupts */
1774 if (r) {
1775 CMD_PUT(sc, TL_CMD_ACK | r | type);
1776 }
1777
1778 if (ifp->if_snd.ifq_head != NULL)
1779 tl_start(ifp);
1780
1781 TL_UNLOCK(sc);
1782
1783 return;
1784 }
1785
1786 static void
1787 tl_stats_update(xsc)
1788 void *xsc;
1789 {
1790 struct tl_softc *sc;
1791 struct ifnet *ifp;
1792 struct tl_stats tl_stats;
1793 struct mii_data *mii;
1794 u_int32_t *p;
1795
1796 bzero((char *)&tl_stats, sizeof(struct tl_stats));
1797
1798 sc = xsc;
1799 TL_LOCK(sc);
1800 ifp = &sc->arpcom.ac_if;
1801
1802 p = (u_int32_t *)&tl_stats;
1803
1804 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1805 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1806 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1807 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1808 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1809 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1810
1811 ifp->if_opackets += tl_tx_goodframes(tl_stats);
1812 ifp->if_collisions += tl_stats.tl_tx_single_collision +
1813 tl_stats.tl_tx_multi_collision;
1814 ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1815 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1816 tl_rx_overrun(tl_stats);
1817 ifp->if_oerrors += tl_tx_underrun(tl_stats);
1818
1819 if (tl_tx_underrun(tl_stats)) {
1820 u_int8_t tx_thresh;
1821 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1822 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1823 tx_thresh >>= 4;
1824 tx_thresh++;
1825 if_printf(ifp, "tx underrun -- increasing "
1826 "tx threshold to %d bytes\n",
1827 (64 * (tx_thresh * 4)));
1828 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1829 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1830 }
1831 }
1832
1833 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
1834
1835 if (!sc->tl_bitrate) {
1836 mii = device_get_softc(sc->tl_miibus);
1837 mii_tick(mii);
1838 }
1839
1840 TL_UNLOCK(sc);
1841
1842 return;
1843 }
1844
1845 /*
1846 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1847 * pointers to the fragment pointers.
1848 */
1849 static int
1850 tl_encap(sc, c, m_head)
1851 struct tl_softc *sc;
1852 struct tl_chain *c;
1853 struct mbuf *m_head;
1854 {
1855 int frag = 0;
1856 struct tl_frag *f = NULL;
1857 int total_len;
1858 struct mbuf *m;
1859 struct ifnet *ifp = &sc->arpcom.ac_if;
1860
1861 /*
1862 * Start packing the mbufs in this chain into
1863 * the fragment pointers. Stop when we run out
1864 * of fragments or hit the end of the mbuf chain.
1865 */
1866 m = m_head;
1867 total_len = 0;
1868
1869 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1870 if (m->m_len != 0) {
1871 if (frag == TL_MAXFRAGS)
1872 break;
1873 total_len+= m->m_len;
1874 c->tl_ptr->tl_frag[frag].tlist_dadr =
1875 vtophys(mtod(m, vm_offset_t));
1876 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1877 frag++;
1878 }
1879 }
1880
1881 /*
1882 * Handle special cases.
1883 * Special case #1: we used up all 10 fragments, but
1884 * we have more mbufs left in the chain. Copy the
1885 * data into an mbuf cluster. Note that we don't
1886 * bother clearing the values in the other fragment
1887 * pointers/counters; it wouldn't gain us anything,
1888 * and would waste cycles.
1889 */
1890 if (m != NULL) {
1891 struct mbuf *m_new = NULL;
1892
1893 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1894 if (m_new == NULL) {
1895 if_printf(ifp, "no memory for tx list\n");
1896 return(1);
1897 }
1898 if (m_head->m_pkthdr.len > MHLEN) {
1899 MCLGET(m_new, M_DONTWAIT);
1900 if (!(m_new->m_flags & M_EXT)) {
1901 m_freem(m_new);
1902 if_printf(ifp, "no memory for tx list\n");
1903 return(1);
1904 }
1905 }
1906 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1907 mtod(m_new, caddr_t));
1908 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1909 m_freem(m_head);
1910 m_head = m_new;
1911 f = &c->tl_ptr->tl_frag[0];
1912 f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1913 f->tlist_dcnt = total_len = m_new->m_len;
1914 frag = 1;
1915 }
1916
1917 /*
1918 * Special case #2: the frame is smaller than the minimum
1919 * frame size. We have to pad it to make the chip happy.
1920 */
1921 if (total_len < TL_MIN_FRAMELEN) {
1922 if (frag == TL_MAXFRAGS)
1923 if_printf(ifp,
1924 "all frags filled but frame still to small!\n");
1925 f = &c->tl_ptr->tl_frag[frag];
1926 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1927 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1928 total_len += f->tlist_dcnt;
1929 frag++;
1930 }
1931
1932 c->tl_mbuf = m_head;
1933 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1934 c->tl_ptr->tlist_frsize = total_len;
1935 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1936 c->tl_ptr->tlist_fptr = 0;
1937
1938 return(0);
1939 }
1940
1941 /*
1942 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1943 * to the mbuf data regions directly in the transmit lists. We also save a
1944 * copy of the pointers since the transmit list fragment pointers are
1945 * physical addresses.
1946 */
1947 static void
1948 tl_start(ifp)
1949 struct ifnet *ifp;
1950 {
1951 struct tl_softc *sc;
1952 struct mbuf *m_head = NULL;
1953 u_int32_t cmd;
1954 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1955
1956 sc = ifp->if_softc;
1957 TL_LOCK(sc);
1958
1959 /*
1960 * Check for an available queue slot. If there are none,
1961 * punt.
1962 */
1963 if (sc->tl_cdata.tl_tx_free == NULL) {
1964 ifp->if_flags |= IFF_OACTIVE;
1965 TL_UNLOCK(sc);
1966 return;
1967 }
1968
1969 start_tx = sc->tl_cdata.tl_tx_free;
1970
1971 while(sc->tl_cdata.tl_tx_free != NULL) {
1972 IF_DEQUEUE(&ifp->if_snd, m_head);
1973 if (m_head == NULL)
1974 break;
1975
1976 /* Pick a chain member off the free list. */
1977 cur_tx = sc->tl_cdata.tl_tx_free;
1978 sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1979
1980 cur_tx->tl_next = NULL;
1981
1982 /* Pack the data into the list. */
1983 tl_encap(sc, cur_tx, m_head);
1984
1985 /* Chain it together */
1986 if (prev != NULL) {
1987 prev->tl_next = cur_tx;
1988 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1989 }
1990 prev = cur_tx;
1991
1992 /*
1993 * If there's a BPF listener, bounce a copy of this frame
1994 * to him.
1995 */
1996 BPF_MTAP(ifp, cur_tx->tl_mbuf);
1997 }
1998
1999 /*
2000 * If there are no packets queued, bail.
2001 */
2002 if (cur_tx == NULL) {
2003 TL_UNLOCK(sc);
2004 return;
2005 }
2006
2007 /*
2008 * That's all we can stands, we can't stands no more.
2009 * If there are no other transfers pending, then issue the
2010 * TX GO command to the adapter to start things moving.
2011 * Otherwise, just leave the data in the queue and let
2012 * the EOF/EOC interrupt handler send.
2013 */
2014 if (sc->tl_cdata.tl_tx_head == NULL) {
2015 sc->tl_cdata.tl_tx_head = start_tx;
2016 sc->tl_cdata.tl_tx_tail = cur_tx;
2017
2018 if (sc->tl_txeoc) {
2019 sc->tl_txeoc = 0;
2020 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
2021 cmd = CSR_READ_4(sc, TL_HOSTCMD);
2022 cmd &= ~TL_CMD_RT;
2023 cmd |= TL_CMD_GO|TL_CMD_INTSON;
2024 CMD_PUT(sc, cmd);
2025 }
2026 } else {
2027 sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2028 sc->tl_cdata.tl_tx_tail = cur_tx;
2029 }
2030
2031 /*
2032 * Set a timeout in case the chip goes out to lunch.
2033 */
2034 ifp->if_timer = 5;
2035 TL_UNLOCK(sc);
2036
2037 return;
2038 }
2039
2040 static void
2041 tl_init(xsc)
2042 void *xsc;
2043 {
2044 struct tl_softc *sc = xsc;
2045 struct ifnet *ifp = &sc->arpcom.ac_if;
2046 struct mii_data *mii;
2047
2048 TL_LOCK(sc);
2049
2050 ifp = &sc->arpcom.ac_if;
2051
2052 /*
2053 * Cancel pending I/O.
2054 */
2055 tl_stop(sc);
2056
2057 /* Initialize TX FIFO threshold */
2058 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2059 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2060
2061 /* Set PCI burst size */
2062 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2063
2064 /*
2065 * Set 'capture all frames' bit for promiscuous mode.
2066 */
2067 if (ifp->if_flags & IFF_PROMISC)
2068 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2069 else
2070 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2071
2072 /*
2073 * Set capture broadcast bit to capture broadcast frames.
2074 */
2075 if (ifp->if_flags & IFF_BROADCAST)
2076 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2077 else
2078 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2079
2080 tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2081
2082 /* Init our MAC address */
2083 tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
2084
2085 /* Init multicast filter, if needed. */
2086 tl_setmulti(sc);
2087
2088 /* Init circular RX list. */
2089 if (tl_list_rx_init(sc) == ENOBUFS) {
2090 if_printf(ifp,
2091 "initialization failed: no memory for rx buffers\n");
2092 tl_stop(sc);
2093 TL_UNLOCK(sc);
2094 return;
2095 }
2096
2097 /* Init TX pointers. */
2098 tl_list_tx_init(sc);
2099
2100 /* Enable PCI interrupts. */
2101 CMD_SET(sc, TL_CMD_INTSON);
2102
2103 /* Load the address of the rx list */
2104 CMD_SET(sc, TL_CMD_RT);
2105 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2106
2107 if (!sc->tl_bitrate) {
2108 if (sc->tl_miibus != NULL) {
2109 mii = device_get_softc(sc->tl_miibus);
2110 mii_mediachg(mii);
2111 }
2112 } else {
2113 tl_ifmedia_upd(ifp);
2114 }
2115
2116 /* Send the RX go command */
2117 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2118
2119 ifp->if_flags |= IFF_RUNNING;
2120 ifp->if_flags &= ~IFF_OACTIVE;
2121
2122 /* Start the stats update counter */
2123 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
2124 TL_UNLOCK(sc);
2125
2126 return;
2127 }
2128
2129 /*
2130 * Set media options.
2131 */
2132 static int
2133 tl_ifmedia_upd(ifp)
2134 struct ifnet *ifp;
2135 {
2136 struct tl_softc *sc;
2137 struct mii_data *mii = NULL;
2138
2139 sc = ifp->if_softc;
2140
2141 if (sc->tl_bitrate)
2142 tl_setmode(sc, sc->ifmedia.ifm_media);
2143 else {
2144 mii = device_get_softc(sc->tl_miibus);
2145 mii_mediachg(mii);
2146 }
2147
2148 return(0);
2149 }
2150
2151 /*
2152 * Report current media status.
2153 */
2154 static void
2155 tl_ifmedia_sts(ifp, ifmr)
2156 struct ifnet *ifp;
2157 struct ifmediareq *ifmr;
2158 {
2159 struct tl_softc *sc;
2160 struct mii_data *mii;
2161
2162 sc = ifp->if_softc;
2163
2164 ifmr->ifm_active = IFM_ETHER;
2165
2166 if (sc->tl_bitrate) {
2167 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2168 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2169 else
2170 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2171 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2172 ifmr->ifm_active |= IFM_HDX;
2173 else
2174 ifmr->ifm_active |= IFM_FDX;
2175 return;
2176 } else {
2177 mii = device_get_softc(sc->tl_miibus);
2178 mii_pollstat(mii);
2179 ifmr->ifm_active = mii->mii_media_active;
2180 ifmr->ifm_status = mii->mii_media_status;
2181 }
2182
2183 return;
2184 }
2185
2186 static int
2187 tl_ioctl(ifp, command, data)
2188 struct ifnet *ifp;
2189 u_long command;
2190 caddr_t data;
2191 {
2192 struct tl_softc *sc = ifp->if_softc;
2193 struct ifreq *ifr = (struct ifreq *) data;
2194 int s, error = 0;
2195
2196 s = splimp();
2197
2198 switch(command) {
2199 case SIOCSIFFLAGS:
2200 if (ifp->if_flags & IFF_UP) {
2201 if (ifp->if_flags & IFF_RUNNING &&
2202 ifp->if_flags & IFF_PROMISC &&
2203 !(sc->tl_if_flags & IFF_PROMISC)) {
2204 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2205 tl_setmulti(sc);
2206 } else if (ifp->if_flags & IFF_RUNNING &&
2207 !(ifp->if_flags & IFF_PROMISC) &&
2208 sc->tl_if_flags & IFF_PROMISC) {
2209 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2210 tl_setmulti(sc);
2211 } else
2212 tl_init(sc);
2213 } else {
2214 if (ifp->if_flags & IFF_RUNNING) {
2215 tl_stop(sc);
2216 }
2217 }
2218 sc->tl_if_flags = ifp->if_flags;
2219 error = 0;
2220 break;
2221 case SIOCADDMULTI:
2222 case SIOCDELMULTI:
2223 tl_setmulti(sc);
2224 error = 0;
2225 break;
2226 case SIOCSIFMEDIA:
2227 case SIOCGIFMEDIA:
2228 if (sc->tl_bitrate)
2229 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2230 else {
2231 struct mii_data *mii;
2232 mii = device_get_softc(sc->tl_miibus);
2233 error = ifmedia_ioctl(ifp, ifr,
2234 &mii->mii_media, command);
2235 }
2236 break;
2237 default:
2238 error = ether_ioctl(ifp, command, data);
2239 break;
2240 }
2241
2242 (void)splx(s);
2243
2244 return(error);
2245 }
2246
2247 static void
2248 tl_watchdog(ifp)
2249 struct ifnet *ifp;
2250 {
2251 struct tl_softc *sc;
2252
2253 sc = ifp->if_softc;
2254
2255 if_printf(ifp, "device timeout\n");
2256
2257 ifp->if_oerrors++;
2258
2259 tl_softreset(sc, 1);
2260 tl_init(sc);
2261
2262 return;
2263 }
2264
2265 /*
2266 * Stop the adapter and free any mbufs allocated to the
2267 * RX and TX lists.
2268 */
2269 static void
2270 tl_stop(sc)
2271 struct tl_softc *sc;
2272 {
2273 register int i;
2274 struct ifnet *ifp;
2275
2276 TL_LOCK(sc);
2277
2278 ifp = &sc->arpcom.ac_if;
2279
2280 /* Stop the stats updater. */
2281 untimeout(tl_stats_update, sc, sc->tl_stat_ch);
2282
2283 /* Stop the transmitter */
2284 CMD_CLR(sc, TL_CMD_RT);
2285 CMD_SET(sc, TL_CMD_STOP);
2286 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2287
2288 /* Stop the receiver */
2289 CMD_SET(sc, TL_CMD_RT);
2290 CMD_SET(sc, TL_CMD_STOP);
2291 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2292
2293 /*
2294 * Disable host interrupts.
2295 */
2296 CMD_SET(sc, TL_CMD_INTSOFF);
2297
2298 /*
2299 * Clear list pointer.
2300 */
2301 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2302
2303 /*
2304 * Free the RX lists.
2305 */
2306 for (i = 0; i < TL_RX_LIST_CNT; i++) {
2307 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2308 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2309 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2310 }
2311 }
2312 bzero((char *)&sc->tl_ldata->tl_rx_list,
2313 sizeof(sc->tl_ldata->tl_rx_list));
2314
2315 /*
2316 * Free the TX list buffers.
2317 */
2318 for (i = 0; i < TL_TX_LIST_CNT; i++) {
2319 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2320 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2321 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2322 }
2323 }
2324 bzero((char *)&sc->tl_ldata->tl_tx_list,
2325 sizeof(sc->tl_ldata->tl_tx_list));
2326
2327 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2328 TL_UNLOCK(sc);
2329
2330 return;
2331 }
2332
2333 /*
2334 * Stop all chip I/O so that the kernel's probe routines don't
2335 * get confused by errant DMAs when rebooting.
2336 */
2337 static void
2338 tl_shutdown(dev)
2339 device_t dev;
2340 {
2341 struct tl_softc *sc;
2342
2343 sc = device_get_softc(dev);
2344
2345 tl_stop(sc);
2346
2347 return;
2348 }
Cache object: 4b080d6d18cc358b96a87f44d304a54b
|