FreeBSD/Linux Kernel Cross Reference
sys/dev/my/if_my.c
1 /*-
2 * Written by: yen_cw@myson.com.tw
3 * Copyright (c) 2002 Myson Technology Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/types.h>
42 #include <sys/bus.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46
47 #define NBPFILTER 1
48
49 #include <net/if.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_dl.h>
55 #include <net/bpf.h>
56
57 #include <vm/vm.h> /* for vtophys */
58 #include <vm/pmap.h> /* for vtophys */
59 #include <machine/clock.h> /* for DELAY */
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67
68 /*
69 * #define MY_USEIOSPACE
70 */
71
72 static int MY_USEIOSPACE = 1;
73
74 #if (MY_USEIOSPACE)
75 #define MY_RES SYS_RES_IOPORT
76 #define MY_RID MY_PCI_LOIO
77 #else
78 #define MY_RES SYS_RES_MEMORY
79 #define MY_RID MY_PCI_LOMEM
80 #endif
81
82
83 #include <dev/my/if_myreg.h>
84
85 #ifndef lint
86 static const char rcsid[] =
87 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $";
88 #endif
89
90 /*
91 * Various supported device vendors/types and their names.
92 */
93 struct my_type *my_info_tmp;
94 static struct my_type my_devs[] = {
95 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
96 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
97 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
98 {0, 0, NULL}
99 };
100
101 /*
102 * Various supported PHY vendors/types and their names. Note that this driver
103 * will work with pretty much any MII-compliant PHY, so failure to positively
104 * identify the chip is not a fatal error.
105 */
106 static struct my_type my_phys[] = {
107 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
108 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
109 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
110 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
111 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
112 {0, 0, "<MII-compliant physical interface>"}
113 };
114
115 static int my_probe(device_t);
116 static int my_attach(device_t);
117 static int my_detach(device_t);
118 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *);
119 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
120 static void my_rxeof(struct my_softc *);
121 static void my_txeof(struct my_softc *);
122 static void my_txeoc(struct my_softc *);
123 static void my_intr(void *);
124 static void my_start(struct ifnet *);
125 static void my_start_locked(struct ifnet *);
126 static int my_ioctl(struct ifnet *, u_long, caddr_t);
127 static void my_init(void *);
128 static void my_init_locked(struct my_softc *);
129 static void my_stop(struct my_softc *);
130 static void my_watchdog(struct ifnet *);
131 static void my_shutdown(device_t);
132 static int my_ifmedia_upd(struct ifnet *);
133 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
134 static u_int16_t my_phy_readreg(struct my_softc *, int);
135 static void my_phy_writereg(struct my_softc *, int, int);
136 static void my_autoneg_xmit(struct my_softc *);
137 static void my_autoneg_mii(struct my_softc *, int, int);
138 static void my_setmode_mii(struct my_softc *, int);
139 static void my_getmode_mii(struct my_softc *);
140 static void my_setcfg(struct my_softc *, int);
141 static void my_setmulti(struct my_softc *);
142 static void my_reset(struct my_softc *);
143 static int my_list_rx_init(struct my_softc *);
144 static int my_list_tx_init(struct my_softc *);
145 static long my_send_cmd_to_phy(struct my_softc *, int, int);
146
147 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
148 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
149
150 static device_method_t my_methods[] = {
151 /* Device interface */
152 DEVMETHOD(device_probe, my_probe),
153 DEVMETHOD(device_attach, my_attach),
154 DEVMETHOD(device_detach, my_detach),
155 DEVMETHOD(device_shutdown, my_shutdown),
156
157 {0, 0}
158 };
159
160 static driver_t my_driver = {
161 "my",
162 my_methods,
163 sizeof(struct my_softc)
164 };
165
166 static devclass_t my_devclass;
167
168 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0);
169 MODULE_DEPEND(my, pci, 1, 1, 1);
170 MODULE_DEPEND(my, ether, 1, 1, 1);
171
172 static long
173 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
174 {
175 long miir;
176 int i;
177 int mask, data;
178
179 MY_LOCK_ASSERT(sc);
180
181 /* enable MII output */
182 miir = CSR_READ_4(sc, MY_MANAGEMENT);
183 miir &= 0xfffffff0;
184
185 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
186
187 /* send 32 1's preamble */
188 for (i = 0; i < 32; i++) {
189 /* low MDC; MDO is already high (miir) */
190 miir &= ~MY_MASK_MIIR_MII_MDC;
191 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
192
193 /* high MDC */
194 miir |= MY_MASK_MIIR_MII_MDC;
195 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
196 }
197
198 /* calculate ST+OP+PHYAD+REGAD+TA */
199 data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
200
201 /* sent out */
202 mask = 0x8000;
203 while (mask) {
204 /* low MDC, prepare MDO */
205 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
206 if (mask & data)
207 miir |= MY_MASK_MIIR_MII_MDO;
208
209 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
210 /* high MDC */
211 miir |= MY_MASK_MIIR_MII_MDC;
212 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
213 DELAY(30);
214
215 /* next */
216 mask >>= 1;
217 if (mask == 0x2 && opcode == MY_OP_READ)
218 miir &= ~MY_MASK_MIIR_MII_WRITE;
219 }
220
221 return miir;
222 }
223
224
225 static u_int16_t
226 my_phy_readreg(struct my_softc * sc, int reg)
227 {
228 long miir;
229 int mask, data;
230
231 MY_LOCK_ASSERT(sc);
232
233 if (sc->my_info->my_did == MTD803ID)
234 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
235 else {
236 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
237
238 /* read data */
239 mask = 0x8000;
240 data = 0;
241 while (mask) {
242 /* low MDC */
243 miir &= ~MY_MASK_MIIR_MII_MDC;
244 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
245
246 /* read MDI */
247 miir = CSR_READ_4(sc, MY_MANAGEMENT);
248 if (miir & MY_MASK_MIIR_MII_MDI)
249 data |= mask;
250
251 /* high MDC, and wait */
252 miir |= MY_MASK_MIIR_MII_MDC;
253 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
254 DELAY(30);
255
256 /* next */
257 mask >>= 1;
258 }
259
260 /* low MDC */
261 miir &= ~MY_MASK_MIIR_MII_MDC;
262 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
263 }
264
265 return (u_int16_t) data;
266 }
267
268
269 static void
270 my_phy_writereg(struct my_softc * sc, int reg, int data)
271 {
272 long miir;
273 int mask;
274
275 MY_LOCK_ASSERT(sc);
276
277 if (sc->my_info->my_did == MTD803ID)
278 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
279 else {
280 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
281
282 /* write data */
283 mask = 0x8000;
284 while (mask) {
285 /* low MDC, prepare MDO */
286 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
287 if (mask & data)
288 miir |= MY_MASK_MIIR_MII_MDO;
289 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
290 DELAY(1);
291
292 /* high MDC */
293 miir |= MY_MASK_MIIR_MII_MDC;
294 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
295 DELAY(1);
296
297 /* next */
298 mask >>= 1;
299 }
300
301 /* low MDC */
302 miir &= ~MY_MASK_MIIR_MII_MDC;
303 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
304 }
305 return;
306 }
307
308
309 /*
310 * Program the 64-bit multicast hash filter.
311 */
312 static void
313 my_setmulti(struct my_softc * sc)
314 {
315 struct ifnet *ifp;
316 int h = 0;
317 u_int32_t hashes[2] = {0, 0};
318 struct ifmultiaddr *ifma;
319 u_int32_t rxfilt;
320 int mcnt = 0;
321
322 MY_LOCK_ASSERT(sc);
323
324 ifp = sc->my_ifp;
325
326 rxfilt = CSR_READ_4(sc, MY_TCRRCR);
327
328 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
329 rxfilt |= MY_AM;
330 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
331 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
332 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
333
334 return;
335 }
336 /* first, zot all the existing hash bits */
337 CSR_WRITE_4(sc, MY_MAR0, 0);
338 CSR_WRITE_4(sc, MY_MAR1, 0);
339
340 /* now program new ones */
341 IF_ADDR_LOCK(ifp);
342 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
343 if (ifma->ifma_addr->sa_family != AF_LINK)
344 continue;
345 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
346 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
347 if (h < 32)
348 hashes[0] |= (1 << h);
349 else
350 hashes[1] |= (1 << (h - 32));
351 mcnt++;
352 }
353 IF_ADDR_UNLOCK(ifp);
354
355 if (mcnt)
356 rxfilt |= MY_AM;
357 else
358 rxfilt &= ~MY_AM;
359 CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
360 CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
361 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
362 return;
363 }
364
365 /*
366 * Initiate an autonegotiation session.
367 */
368 static void
369 my_autoneg_xmit(struct my_softc * sc)
370 {
371 u_int16_t phy_sts = 0;
372
373 MY_LOCK_ASSERT(sc);
374
375 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
376 DELAY(500);
377 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
378
379 phy_sts = my_phy_readreg(sc, PHY_BMCR);
380 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
381 my_phy_writereg(sc, PHY_BMCR, phy_sts);
382
383 return;
384 }
385
386
387 /*
388 * Invoke autonegotiation on a PHY.
389 */
390 static void
391 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
392 {
393 u_int16_t phy_sts = 0, media, advert, ability;
394 u_int16_t ability2 = 0;
395 struct ifnet *ifp;
396 struct ifmedia *ifm;
397
398 MY_LOCK_ASSERT(sc);
399
400 ifm = &sc->ifmedia;
401 ifp = sc->my_ifp;
402
403 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
404
405 #ifndef FORCE_AUTONEG_TFOUR
406 /*
407 * First, see if autoneg is supported. If not, there's no point in
408 * continuing.
409 */
410 phy_sts = my_phy_readreg(sc, PHY_BMSR);
411 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
412 if (verbose)
413 if_printf(ifp, "autonegotiation not supported\n");
414 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
415 return;
416 }
417 #endif
418 switch (flag) {
419 case MY_FLAG_FORCEDELAY:
420 /*
421 * XXX Never use this option anywhere but in the probe
422 * routine: making the kernel stop dead in its tracks for
423 * three whole seconds after we've gone multi-user is really
424 * bad manners.
425 */
426 my_autoneg_xmit(sc);
427 DELAY(5000000);
428 break;
429 case MY_FLAG_SCHEDDELAY:
430 /*
431 * Wait for the transmitter to go idle before starting an
432 * autoneg session, otherwise my_start() may clobber our
433 * timeout, and we don't want to allow transmission during an
434 * autoneg session since that can screw it up.
435 */
436 if (sc->my_cdata.my_tx_head != NULL) {
437 sc->my_want_auto = 1;
438 MY_UNLOCK(sc);
439 return;
440 }
441 my_autoneg_xmit(sc);
442 ifp->if_timer = 5;
443 sc->my_autoneg = 1;
444 sc->my_want_auto = 0;
445 return;
446 case MY_FLAG_DELAYTIMEO:
447 ifp->if_timer = 0;
448 sc->my_autoneg = 0;
449 break;
450 default:
451 if_printf(ifp, "invalid autoneg flag: %d\n", flag);
452 return;
453 }
454
455 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
456 if (verbose)
457 if_printf(ifp, "autoneg complete, ");
458 phy_sts = my_phy_readreg(sc, PHY_BMSR);
459 } else {
460 if (verbose)
461 if_printf(ifp, "autoneg not complete, ");
462 }
463
464 media = my_phy_readreg(sc, PHY_BMCR);
465
466 /* Link is good. Report modes and set duplex mode. */
467 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
468 if (verbose)
469 if_printf(ifp, "link status good. ");
470 advert = my_phy_readreg(sc, PHY_ANAR);
471 ability = my_phy_readreg(sc, PHY_LPAR);
472 if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
473 (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
474 ability2 = my_phy_readreg(sc, PHY_1000SR);
475 if (ability2 & PHY_1000SR_1000BTXFULL) {
476 advert = 0;
477 ability = 0;
478 /*
479 * this version did not support 1000M,
480 * ifm->ifm_media =
481 * IFM_ETHER|IFM_1000_T|IFM_FDX;
482 */
483 ifm->ifm_media =
484 IFM_ETHER | IFM_100_TX | IFM_FDX;
485 media &= ~PHY_BMCR_SPEEDSEL;
486 media |= PHY_BMCR_1000;
487 media |= PHY_BMCR_DUPLEX;
488 printf("(full-duplex, 1000Mbps)\n");
489 } else if (ability2 & PHY_1000SR_1000BTXHALF) {
490 advert = 0;
491 ability = 0;
492 /*
493 * this version did not support 1000M,
494 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
495 */
496 ifm->ifm_media = IFM_ETHER | IFM_100_TX;
497 media &= ~PHY_BMCR_SPEEDSEL;
498 media &= ~PHY_BMCR_DUPLEX;
499 media |= PHY_BMCR_1000;
500 printf("(half-duplex, 1000Mbps)\n");
501 }
502 }
503 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
504 ifm->ifm_media = IFM_ETHER | IFM_100_T4;
505 media |= PHY_BMCR_SPEEDSEL;
506 media &= ~PHY_BMCR_DUPLEX;
507 printf("(100baseT4)\n");
508 } else if (advert & PHY_ANAR_100BTXFULL &&
509 ability & PHY_ANAR_100BTXFULL) {
510 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
511 media |= PHY_BMCR_SPEEDSEL;
512 media |= PHY_BMCR_DUPLEX;
513 printf("(full-duplex, 100Mbps)\n");
514 } else if (advert & PHY_ANAR_100BTXHALF &&
515 ability & PHY_ANAR_100BTXHALF) {
516 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
517 media |= PHY_BMCR_SPEEDSEL;
518 media &= ~PHY_BMCR_DUPLEX;
519 printf("(half-duplex, 100Mbps)\n");
520 } else if (advert & PHY_ANAR_10BTFULL &&
521 ability & PHY_ANAR_10BTFULL) {
522 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
523 media &= ~PHY_BMCR_SPEEDSEL;
524 media |= PHY_BMCR_DUPLEX;
525 printf("(full-duplex, 10Mbps)\n");
526 } else if (advert) {
527 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
528 media &= ~PHY_BMCR_SPEEDSEL;
529 media &= ~PHY_BMCR_DUPLEX;
530 printf("(half-duplex, 10Mbps)\n");
531 }
532 media &= ~PHY_BMCR_AUTONEGENBL;
533
534 /* Set ASIC's duplex mode to match the PHY. */
535 my_phy_writereg(sc, PHY_BMCR, media);
536 my_setcfg(sc, media);
537 } else {
538 if (verbose)
539 if_printf(ifp, "no carrier\n");
540 }
541
542 my_init_locked(sc);
543 if (sc->my_tx_pend) {
544 sc->my_autoneg = 0;
545 sc->my_tx_pend = 0;
546 my_start_locked(ifp);
547 }
548 return;
549 }
550
551 /*
552 * To get PHY ability.
553 */
554 static void
555 my_getmode_mii(struct my_softc * sc)
556 {
557 u_int16_t bmsr;
558 struct ifnet *ifp;
559
560 MY_LOCK_ASSERT(sc);
561 ifp = sc->my_ifp;
562 bmsr = my_phy_readreg(sc, PHY_BMSR);
563 if (bootverbose)
564 if_printf(ifp, "PHY status word: %x\n", bmsr);
565
566 /* fallback */
567 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
568
569 if (bmsr & PHY_BMSR_10BTHALF) {
570 if (bootverbose)
571 if_printf(ifp, "10Mbps half-duplex mode supported\n");
572 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
573 0, NULL);
574 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
575 }
576 if (bmsr & PHY_BMSR_10BTFULL) {
577 if (bootverbose)
578 if_printf(ifp, "10Mbps full-duplex mode supported\n");
579
580 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
581 0, NULL);
582 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
583 }
584 if (bmsr & PHY_BMSR_100BTXHALF) {
585 if (bootverbose)
586 if_printf(ifp, "100Mbps half-duplex mode supported\n");
587 ifp->if_baudrate = 100000000;
588 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
589 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
590 0, NULL);
591 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
592 }
593 if (bmsr & PHY_BMSR_100BTXFULL) {
594 if (bootverbose)
595 if_printf(ifp, "100Mbps full-duplex mode supported\n");
596 ifp->if_baudrate = 100000000;
597 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
598 0, NULL);
599 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
600 }
601 /* Some also support 100BaseT4. */
602 if (bmsr & PHY_BMSR_100BT4) {
603 if (bootverbose)
604 if_printf(ifp, "100baseT4 mode supported\n");
605 ifp->if_baudrate = 100000000;
606 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
607 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
608 #ifdef FORCE_AUTONEG_TFOUR
609 if (bootverbose)
610 if_printf(ifp, "forcing on autoneg support for BT4\n");
611 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
612 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
613 #endif
614 }
615 #if 0 /* this version did not support 1000M, */
616 if (sc->my_pinfo->my_vid == MarvellPHYID0) {
617 if (bootverbose)
618 if_printf(ifp, "1000Mbps half-duplex mode supported\n");
619
620 ifp->if_baudrate = 1000000000;
621 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
622 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
623 0, NULL);
624 if (bootverbose)
625 if_printf(ifp, "1000Mbps full-duplex mode supported\n");
626 ifp->if_baudrate = 1000000000;
627 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
628 0, NULL);
629 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
630 }
631 #endif
632 if (bmsr & PHY_BMSR_CANAUTONEG) {
633 if (bootverbose)
634 if_printf(ifp, "autoneg supported\n");
635 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
636 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
637 }
638 return;
639 }
640
641 /*
642 * Set speed and duplex mode.
643 */
644 static void
645 my_setmode_mii(struct my_softc * sc, int media)
646 {
647 u_int16_t bmcr;
648 struct ifnet *ifp;
649
650 MY_LOCK_ASSERT(sc);
651 ifp = sc->my_ifp;
652 /*
653 * If an autoneg session is in progress, stop it.
654 */
655 if (sc->my_autoneg) {
656 if_printf(ifp, "canceling autoneg session\n");
657 ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0;
658 bmcr = my_phy_readreg(sc, PHY_BMCR);
659 bmcr &= ~PHY_BMCR_AUTONEGENBL;
660 my_phy_writereg(sc, PHY_BMCR, bmcr);
661 }
662 if_printf(ifp, "selecting MII, ");
663 bmcr = my_phy_readreg(sc, PHY_BMCR);
664 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
665 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
666
667 #if 0 /* this version did not support 1000M, */
668 if (IFM_SUBTYPE(media) == IFM_1000_T) {
669 printf("1000Mbps/T4, half-duplex\n");
670 bmcr &= ~PHY_BMCR_SPEEDSEL;
671 bmcr &= ~PHY_BMCR_DUPLEX;
672 bmcr |= PHY_BMCR_1000;
673 }
674 #endif
675 if (IFM_SUBTYPE(media) == IFM_100_T4) {
676 printf("100Mbps/T4, half-duplex\n");
677 bmcr |= PHY_BMCR_SPEEDSEL;
678 bmcr &= ~PHY_BMCR_DUPLEX;
679 }
680 if (IFM_SUBTYPE(media) == IFM_100_TX) {
681 printf("100Mbps, ");
682 bmcr |= PHY_BMCR_SPEEDSEL;
683 }
684 if (IFM_SUBTYPE(media) == IFM_10_T) {
685 printf("10Mbps, ");
686 bmcr &= ~PHY_BMCR_SPEEDSEL;
687 }
688 if ((media & IFM_GMASK) == IFM_FDX) {
689 printf("full duplex\n");
690 bmcr |= PHY_BMCR_DUPLEX;
691 } else {
692 printf("half duplex\n");
693 bmcr &= ~PHY_BMCR_DUPLEX;
694 }
695 my_phy_writereg(sc, PHY_BMCR, bmcr);
696 my_setcfg(sc, bmcr);
697 return;
698 }
699
700 /*
701 * The Myson manual states that in order to fiddle with the 'full-duplex' and
702 * '100Mbps' bits in the netconfig register, we first have to put the
703 * transmit and/or receive logic in the idle state.
704 */
705 static void
706 my_setcfg(struct my_softc * sc, int bmcr)
707 {
708 int i, restart = 0;
709
710 MY_LOCK_ASSERT(sc);
711 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
712 restart = 1;
713 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
714 for (i = 0; i < MY_TIMEOUT; i++) {
715 DELAY(10);
716 if (!(CSR_READ_4(sc, MY_TCRRCR) &
717 (MY_TXRUN | MY_RXRUN)))
718 break;
719 }
720 if (i == MY_TIMEOUT)
721 if_printf(sc->my_ifp,
722 "failed to force tx and rx to idle \n");
723 }
724 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
725 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
726 if (bmcr & PHY_BMCR_1000)
727 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
728 else if (!(bmcr & PHY_BMCR_SPEEDSEL))
729 MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
730 if (bmcr & PHY_BMCR_DUPLEX)
731 MY_SETBIT(sc, MY_TCRRCR, MY_FD);
732 else
733 MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
734 if (restart)
735 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
736 return;
737 }
738
739 static void
740 my_reset(struct my_softc * sc)
741 {
742 register int i;
743
744 MY_LOCK_ASSERT(sc);
745 MY_SETBIT(sc, MY_BCR, MY_SWR);
746 for (i = 0; i < MY_TIMEOUT; i++) {
747 DELAY(10);
748 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
749 break;
750 }
751 if (i == MY_TIMEOUT)
752 if_printf(sc->my_ifp, "reset never completed!\n");
753
754 /* Wait a little while for the chip to get its brains in order. */
755 DELAY(1000);
756 return;
757 }
758
759 /*
760 * Probe for a Myson chip. Check the PCI vendor and device IDs against our
761 * list and return a device name if we find a match.
762 */
763 static int
764 my_probe(device_t dev)
765 {
766 struct my_type *t;
767
768 t = my_devs;
769 while (t->my_name != NULL) {
770 if ((pci_get_vendor(dev) == t->my_vid) &&
771 (pci_get_device(dev) == t->my_did)) {
772 device_set_desc(dev, t->my_name);
773 my_info_tmp = t;
774 return (BUS_PROBE_DEFAULT);
775 }
776 t++;
777 }
778 return (ENXIO);
779 }
780
781 /*
782 * Attach the interface. Allocate softc structures, do ifmedia setup and
783 * ethernet/BPF attach.
784 */
785 static int
786 my_attach(device_t dev)
787 {
788 int i;
789 u_char eaddr[ETHER_ADDR_LEN];
790 u_int32_t iobase;
791 struct my_softc *sc;
792 struct ifnet *ifp;
793 int media = IFM_ETHER | IFM_100_TX | IFM_FDX;
794 unsigned int round;
795 caddr_t roundptr;
796 struct my_type *p;
797 u_int16_t phy_vid, phy_did, phy_sts = 0;
798 int rid, error = 0;
799
800 sc = device_get_softc(dev);
801 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
802 MTX_DEF);
803
804 /*
805 * Map control/status registers.
806 */
807 pci_enable_busmaster(dev);
808
809 if (my_info_tmp->my_did == MTD800ID) {
810 iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
811 if (iobase & 0x300)
812 MY_USEIOSPACE = 0;
813 }
814
815 rid = MY_RID;
816 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
817
818 if (sc->my_res == NULL) {
819 device_printf(dev, "couldn't map ports/memory\n");
820 error = ENXIO;
821 goto destroy_mutex;
822 }
823 sc->my_btag = rman_get_bustag(sc->my_res);
824 sc->my_bhandle = rman_get_bushandle(sc->my_res);
825
826 rid = 0;
827 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
828 RF_SHAREABLE | RF_ACTIVE);
829
830 if (sc->my_irq == NULL) {
831 device_printf(dev, "couldn't map interrupt\n");
832 error = ENXIO;
833 goto release_io;
834 }
835
836 sc->my_info = my_info_tmp;
837
838 /* Reset the adapter. */
839 MY_LOCK(sc);
840 my_reset(sc);
841 MY_UNLOCK(sc);
842
843 /*
844 * Get station address
845 */
846 for (i = 0; i < ETHER_ADDR_LEN; ++i)
847 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
848
849 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
850 M_DEVBUF, M_NOWAIT);
851 if (sc->my_ldata_ptr == NULL) {
852 device_printf(dev, "no memory for list buffers!\n");
853 error = ENXIO;
854 goto release_irq;
855 }
856 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
857 round = (uintptr_t)sc->my_ldata_ptr & 0xF;
858 roundptr = sc->my_ldata_ptr;
859 for (i = 0; i < 8; i++) {
860 if (round % 8) {
861 round++;
862 roundptr++;
863 } else
864 break;
865 }
866 sc->my_ldata = (struct my_list_data *) roundptr;
867 bzero(sc->my_ldata, sizeof(struct my_list_data));
868
869 ifp = sc->my_ifp = if_alloc(IFT_ETHER);
870 if (ifp == NULL) {
871 device_printf(dev, "can not if_alloc()\n");
872 error = ENOSPC;
873 goto free_ldata;
874 }
875 ifp->if_softc = sc;
876 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
877 ifp->if_mtu = ETHERMTU;
878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879 ifp->if_ioctl = my_ioctl;
880 ifp->if_start = my_start;
881 ifp->if_watchdog = my_watchdog;
882 ifp->if_init = my_init;
883 ifp->if_baudrate = 10000000;
884 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
885 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
886 IFQ_SET_READY(&ifp->if_snd);
887
888 if (sc->my_info->my_did == MTD803ID)
889 sc->my_pinfo = my_phys;
890 else {
891 if (bootverbose)
892 device_printf(dev, "probing for a PHY\n");
893 MY_LOCK(sc);
894 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
895 if (bootverbose)
896 device_printf(dev, "checking address: %d\n", i);
897 sc->my_phy_addr = i;
898 phy_sts = my_phy_readreg(sc, PHY_BMSR);
899 if ((phy_sts != 0) && (phy_sts != 0xffff))
900 break;
901 else
902 phy_sts = 0;
903 }
904 if (phy_sts) {
905 phy_vid = my_phy_readreg(sc, PHY_VENID);
906 phy_did = my_phy_readreg(sc, PHY_DEVID);
907 if (bootverbose) {
908 device_printf(dev, "found PHY at address %d, ",
909 sc->my_phy_addr);
910 printf("vendor id: %x device id: %x\n",
911 phy_vid, phy_did);
912 }
913 p = my_phys;
914 while (p->my_vid) {
915 if (phy_vid == p->my_vid) {
916 sc->my_pinfo = p;
917 break;
918 }
919 p++;
920 }
921 if (sc->my_pinfo == NULL)
922 sc->my_pinfo = &my_phys[PHY_UNKNOWN];
923 if (bootverbose)
924 device_printf(dev, "PHY type: %s\n",
925 sc->my_pinfo->my_name);
926 } else {
927 MY_UNLOCK(sc);
928 device_printf(dev, "MII without any phy!\n");
929 error = ENXIO;
930 goto free_if;
931 }
932 MY_UNLOCK(sc);
933 }
934
935 /* Do ifmedia setup. */
936 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
937 MY_LOCK(sc);
938 my_getmode_mii(sc);
939 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
940 media = sc->ifmedia.ifm_media;
941 my_stop(sc);
942 MY_UNLOCK(sc);
943 ifmedia_set(&sc->ifmedia, media);
944
945 ether_ifattach(ifp, eaddr);
946
947 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
948 my_intr, sc, &sc->my_intrhand);
949
950 if (error) {
951 device_printf(dev, "couldn't set up irq\n");
952 goto detach_if;
953 }
954
955 return (0);
956
957 detach_if:
958 ether_ifdetach(ifp);
959 free_if:
960 if_free(ifp);
961 free_ldata:
962 free(sc->my_ldata_ptr, M_DEVBUF);
963 release_irq:
964 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
965 release_io:
966 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
967 destroy_mutex:
968 mtx_destroy(&sc->my_mtx);
969 return (error);
970 }
971
972 static int
973 my_detach(device_t dev)
974 {
975 struct my_softc *sc;
976 struct ifnet *ifp;
977
978 sc = device_get_softc(dev);
979 MY_LOCK(sc);
980 my_stop(sc);
981 MY_UNLOCK(sc);
982 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
983
984 ifp = sc->my_ifp;
985 ether_ifdetach(ifp);
986 if_free(ifp);
987 free(sc->my_ldata_ptr, M_DEVBUF);
988
989 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
990 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
991 mtx_destroy(&sc->my_mtx);
992 return (0);
993 }
994
995
996 /*
997 * Initialize the transmit descriptors.
998 */
999 static int
1000 my_list_tx_init(struct my_softc * sc)
1001 {
1002 struct my_chain_data *cd;
1003 struct my_list_data *ld;
1004 int i;
1005
1006 MY_LOCK_ASSERT(sc);
1007 cd = &sc->my_cdata;
1008 ld = sc->my_ldata;
1009 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1010 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1011 if (i == (MY_TX_LIST_CNT - 1))
1012 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1013 else
1014 cd->my_tx_chain[i].my_nextdesc =
1015 &cd->my_tx_chain[i + 1];
1016 }
1017 cd->my_tx_free = &cd->my_tx_chain[0];
1018 cd->my_tx_tail = cd->my_tx_head = NULL;
1019 return (0);
1020 }
1021
1022 /*
1023 * Initialize the RX descriptors and allocate mbufs for them. Note that we
1024 * arrange the descriptors in a closed ring, so that the last descriptor
1025 * points back to the first.
1026 */
1027 static int
1028 my_list_rx_init(struct my_softc * sc)
1029 {
1030 struct my_chain_data *cd;
1031 struct my_list_data *ld;
1032 int i;
1033
1034 MY_LOCK_ASSERT(sc);
1035 cd = &sc->my_cdata;
1036 ld = sc->my_ldata;
1037 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1038 cd->my_rx_chain[i].my_ptr =
1039 (struct my_desc *) & ld->my_rx_list[i];
1040 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1041 MY_UNLOCK(sc);
1042 return (ENOBUFS);
1043 }
1044 if (i == (MY_RX_LIST_CNT - 1)) {
1045 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1046 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1047 } else {
1048 cd->my_rx_chain[i].my_nextdesc =
1049 &cd->my_rx_chain[i + 1];
1050 ld->my_rx_list[i].my_next =
1051 vtophys(&ld->my_rx_list[i + 1]);
1052 }
1053 }
1054 cd->my_rx_head = &cd->my_rx_chain[0];
1055 return (0);
1056 }
1057
1058 /*
1059 * Initialize an RX descriptor and attach an MBUF cluster.
1060 */
1061 static int
1062 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1063 {
1064 struct mbuf *m_new = NULL;
1065
1066 MY_LOCK_ASSERT(sc);
1067 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1068 if (m_new == NULL) {
1069 if_printf(sc->my_ifp,
1070 "no memory for rx list -- packet dropped!\n");
1071 return (ENOBUFS);
1072 }
1073 MCLGET(m_new, M_DONTWAIT);
1074 if (!(m_new->m_flags & M_EXT)) {
1075 if_printf(sc->my_ifp,
1076 "no memory for rx list -- packet dropped!\n");
1077 m_freem(m_new);
1078 return (ENOBUFS);
1079 }
1080 c->my_mbuf = m_new;
1081 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1082 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1083 c->my_ptr->my_status = MY_OWNByNIC;
1084 return (0);
1085 }
1086
1087 /*
1088 * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1089 * level protocols.
1090 */
1091 static void
1092 my_rxeof(struct my_softc * sc)
1093 {
1094 struct ether_header *eh;
1095 struct mbuf *m;
1096 struct ifnet *ifp;
1097 struct my_chain_onefrag *cur_rx;
1098 int total_len = 0;
1099 u_int32_t rxstat;
1100
1101 MY_LOCK_ASSERT(sc);
1102 ifp = sc->my_ifp;
1103 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1104 & MY_OWNByNIC)) {
1105 cur_rx = sc->my_cdata.my_rx_head;
1106 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1107
1108 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */
1109 ifp->if_ierrors++;
1110 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1111 continue;
1112 }
1113 /* No errors; receive the packet. */
1114 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1115 total_len -= ETHER_CRC_LEN;
1116
1117 if (total_len < MINCLSIZE) {
1118 m = m_devget(mtod(cur_rx->my_mbuf, char *),
1119 total_len, 0, ifp, NULL);
1120 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1121 if (m == NULL) {
1122 ifp->if_ierrors++;
1123 continue;
1124 }
1125 } else {
1126 m = cur_rx->my_mbuf;
1127 /*
1128 * Try to conjure up a new mbuf cluster. If that
1129 * fails, it means we have an out of memory condition
1130 * and should leave the buffer in place and continue.
1131 * This will result in a lost packet, but there's
1132 * little else we can do in this situation.
1133 */
1134 if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1135 ifp->if_ierrors++;
1136 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1137 continue;
1138 }
1139 m->m_pkthdr.rcvif = ifp;
1140 m->m_pkthdr.len = m->m_len = total_len;
1141 }
1142 ifp->if_ipackets++;
1143 eh = mtod(m, struct ether_header *);
1144 #if NBPFILTER > 0
1145 /*
1146 * Handle BPF listeners. Let the BPF user see the packet, but
1147 * don't pass it up to the ether_input() layer unless it's a
1148 * broadcast packet, multicast packet, matches our ethernet
1149 * address or the interface is in promiscuous mode.
1150 */
1151 if (bpf_peers_present(ifp->if_bpf)) {
1152 bpf_mtap(ifp->if_bpf, m);
1153 if (ifp->if_flags & IFF_PROMISC &&
1154 (bcmp(eh->ether_dhost, IFP2ENADDR(sc->my_ifp),
1155 ETHER_ADDR_LEN) &&
1156 (eh->ether_dhost[0] & 1) == 0)) {
1157 m_freem(m);
1158 continue;
1159 }
1160 }
1161 #endif
1162 MY_UNLOCK(sc);
1163 (*ifp->if_input)(ifp, m);
1164 MY_LOCK(sc);
1165 }
1166 return;
1167 }
1168
1169
1170 /*
1171 * A frame was downloaded to the chip. It's safe for us to clean up the list
1172 * buffers.
1173 */
1174 static void
1175 my_txeof(struct my_softc * sc)
1176 {
1177 struct my_chain *cur_tx;
1178 struct ifnet *ifp;
1179
1180 MY_LOCK_ASSERT(sc);
1181 ifp = sc->my_ifp;
1182 /* Clear the timeout timer. */
1183 ifp->if_timer = 0;
1184 if (sc->my_cdata.my_tx_head == NULL) {
1185 return;
1186 }
1187 /*
1188 * Go through our tx list and free mbufs for those frames that have
1189 * been transmitted.
1190 */
1191 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1192 u_int32_t txstat;
1193
1194 cur_tx = sc->my_cdata.my_tx_head;
1195 txstat = MY_TXSTATUS(cur_tx);
1196 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1197 break;
1198 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1199 if (txstat & MY_TXERR) {
1200 ifp->if_oerrors++;
1201 if (txstat & MY_EC) /* excessive collision */
1202 ifp->if_collisions++;
1203 if (txstat & MY_LC) /* late collision */
1204 ifp->if_collisions++;
1205 }
1206 ifp->if_collisions += (txstat & MY_NCRMASK) >>
1207 MY_NCRShift;
1208 }
1209 ifp->if_opackets++;
1210 m_freem(cur_tx->my_mbuf);
1211 cur_tx->my_mbuf = NULL;
1212 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1213 sc->my_cdata.my_tx_head = NULL;
1214 sc->my_cdata.my_tx_tail = NULL;
1215 break;
1216 }
1217 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1218 }
1219 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1220 ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask);
1221 }
1222 return;
1223 }
1224
1225 /*
1226 * TX 'end of channel' interrupt handler.
1227 */
1228 static void
1229 my_txeoc(struct my_softc * sc)
1230 {
1231 struct ifnet *ifp;
1232
1233 MY_LOCK_ASSERT(sc);
1234 ifp = sc->my_ifp;
1235 ifp->if_timer = 0;
1236 if (sc->my_cdata.my_tx_head == NULL) {
1237 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1238 sc->my_cdata.my_tx_tail = NULL;
1239 if (sc->my_want_auto)
1240 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1241 } else {
1242 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1243 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1244 ifp->if_timer = 5;
1245 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1246 }
1247 }
1248 return;
1249 }
1250
1251 static void
1252 my_intr(void *arg)
1253 {
1254 struct my_softc *sc;
1255 struct ifnet *ifp;
1256 u_int32_t status;
1257
1258 sc = arg;
1259 MY_LOCK(sc);
1260 ifp = sc->my_ifp;
1261 if (!(ifp->if_flags & IFF_UP)) {
1262 MY_UNLOCK(sc);
1263 return;
1264 }
1265 /* Disable interrupts. */
1266 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1267
1268 for (;;) {
1269 status = CSR_READ_4(sc, MY_ISR);
1270 status &= MY_INTRS;
1271 if (status)
1272 CSR_WRITE_4(sc, MY_ISR, status);
1273 else
1274 break;
1275
1276 if (status & MY_RI) /* receive interrupt */
1277 my_rxeof(sc);
1278
1279 if ((status & MY_RBU) || (status & MY_RxErr)) {
1280 /* rx buffer unavailable or rx error */
1281 ifp->if_ierrors++;
1282 #ifdef foo
1283 my_stop(sc);
1284 my_reset(sc);
1285 my_init_locked(sc);
1286 #endif
1287 }
1288 if (status & MY_TI) /* tx interrupt */
1289 my_txeof(sc);
1290 if (status & MY_ETI) /* tx early interrupt */
1291 my_txeof(sc);
1292 if (status & MY_TBU) /* tx buffer unavailable */
1293 my_txeoc(sc);
1294
1295 #if 0 /* 90/1/18 delete */
1296 if (status & MY_FBE) {
1297 my_reset(sc);
1298 my_init_locked(sc);
1299 }
1300 #endif
1301
1302 }
1303
1304 /* Re-enable interrupts. */
1305 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1306 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1307 my_start_locked(ifp);
1308 MY_UNLOCK(sc);
1309 return;
1310 }
1311
1312 /*
1313 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1314 * pointers to the fragment pointers.
1315 */
1316 static int
1317 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1318 {
1319 struct my_desc *f = NULL;
1320 int total_len;
1321 struct mbuf *m, *m_new = NULL;
1322
1323 MY_LOCK_ASSERT(sc);
1324 /* calculate the total tx pkt length */
1325 total_len = 0;
1326 for (m = m_head; m != NULL; m = m->m_next)
1327 total_len += m->m_len;
1328 /*
1329 * Start packing the mbufs in this chain into the fragment pointers.
1330 * Stop when we run out of fragments or hit the end of the mbuf
1331 * chain.
1332 */
1333 m = m_head;
1334 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1335 if (m_new == NULL) {
1336 if_printf(sc->my_ifp, "no memory for tx list");
1337 return (1);
1338 }
1339 if (m_head->m_pkthdr.len > MHLEN) {
1340 MCLGET(m_new, M_DONTWAIT);
1341 if (!(m_new->m_flags & M_EXT)) {
1342 m_freem(m_new);
1343 if_printf(sc->my_ifp, "no memory for tx list");
1344 return (1);
1345 }
1346 }
1347 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1348 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1349 m_freem(m_head);
1350 m_head = m_new;
1351 f = &c->my_ptr->my_frag[0];
1352 f->my_status = 0;
1353 f->my_data = vtophys(mtod(m_new, caddr_t));
1354 total_len = m_new->m_len;
1355 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1356 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */
1357 f->my_ctl |= total_len; /* buffer size */
1358 /* 89/12/29 add, for mtd891 *//* [ 89? ] */
1359 if (sc->my_info->my_did == MTD891ID)
1360 f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1361 c->my_mbuf = m_head;
1362 c->my_lastdesc = 0;
1363 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1364 return (0);
1365 }
1366
1367 /*
1368 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1369 * to the mbuf data regions directly in the transmit lists. We also save a
1370 * copy of the pointers since the transmit list fragment pointers are
1371 * physical addresses.
1372 */
1373 static void
1374 my_start(struct ifnet * ifp)
1375 {
1376 struct my_softc *sc;
1377
1378 sc = ifp->if_softc;
1379 MY_LOCK(sc);
1380 my_start_locked(ifp);
1381 MY_UNLOCK(sc);
1382 }
1383
1384 static void
1385 my_start_locked(struct ifnet * ifp)
1386 {
1387 struct my_softc *sc;
1388 struct mbuf *m_head = NULL;
1389 struct my_chain *cur_tx = NULL, *start_tx;
1390
1391 sc = ifp->if_softc;
1392 MY_LOCK_ASSERT(sc);
1393 if (sc->my_autoneg) {
1394 sc->my_tx_pend = 1;
1395 return;
1396 }
1397 /*
1398 * Check for an available queue slot. If there are none, punt.
1399 */
1400 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1401 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1402 return;
1403 }
1404 start_tx = sc->my_cdata.my_tx_free;
1405 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1406 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1407 if (m_head == NULL)
1408 break;
1409
1410 /* Pick a descriptor off the free list. */
1411 cur_tx = sc->my_cdata.my_tx_free;
1412 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1413
1414 /* Pack the data into the descriptor. */
1415 my_encap(sc, cur_tx, m_head);
1416
1417 if (cur_tx != start_tx)
1418 MY_TXOWN(cur_tx) = MY_OWNByNIC;
1419 #if NBPFILTER > 0
1420 /*
1421 * If there's a BPF listener, bounce a copy of this frame to
1422 * him.
1423 */
1424 BPF_MTAP(ifp, cur_tx->my_mbuf);
1425 #endif
1426 }
1427 /*
1428 * If there are no packets queued, bail.
1429 */
1430 if (cur_tx == NULL) {
1431 return;
1432 }
1433 /*
1434 * Place the request for the upload interrupt in the last descriptor
1435 * in the chain. This way, if we're chaining several packets at once,
1436 * we'll only get an interupt once for the whole chain rather than
1437 * once for each packet.
1438 */
1439 MY_TXCTL(cur_tx) |= MY_TXIC;
1440 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1441 sc->my_cdata.my_tx_tail = cur_tx;
1442 if (sc->my_cdata.my_tx_head == NULL)
1443 sc->my_cdata.my_tx_head = start_tx;
1444 MY_TXOWN(start_tx) = MY_OWNByNIC;
1445 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */
1446
1447 /*
1448 * Set a timeout in case the chip goes out to lunch.
1449 */
1450 ifp->if_timer = 5;
1451 return;
1452 }
1453
1454 static void
1455 my_init(void *xsc)
1456 {
1457 struct my_softc *sc = xsc;
1458
1459 MY_LOCK(sc);
1460 my_init_locked(sc);
1461 MY_UNLOCK(sc);
1462 }
1463
1464 static void
1465 my_init_locked(struct my_softc *sc)
1466 {
1467 struct ifnet *ifp = sc->my_ifp;
1468 u_int16_t phy_bmcr = 0;
1469
1470 MY_LOCK_ASSERT(sc);
1471 if (sc->my_autoneg) {
1472 return;
1473 }
1474 if (sc->my_pinfo != NULL)
1475 phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1476 /*
1477 * Cancel pending I/O and free all RX/TX buffers.
1478 */
1479 my_stop(sc);
1480 my_reset(sc);
1481
1482 /*
1483 * Set cache alignment and burst length.
1484 */
1485 #if 0 /* 89/9/1 modify, */
1486 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1487 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1488 #endif
1489 CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1490 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1491 /*
1492 * 89/12/29 add, for mtd891,
1493 */
1494 if (sc->my_info->my_did == MTD891ID) {
1495 MY_SETBIT(sc, MY_BCR, MY_PROG);
1496 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1497 }
1498 my_setcfg(sc, phy_bmcr);
1499 /* Init circular RX list. */
1500 if (my_list_rx_init(sc) == ENOBUFS) {
1501 if_printf(ifp, "init failed: no memory for rx buffers\n");
1502 my_stop(sc);
1503 return;
1504 }
1505 /* Init TX descriptors. */
1506 my_list_tx_init(sc);
1507
1508 /* If we want promiscuous mode, set the allframes bit. */
1509 if (ifp->if_flags & IFF_PROMISC)
1510 MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1511 else
1512 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1513
1514 /*
1515 * Set capture broadcast bit to capture broadcast frames.
1516 */
1517 if (ifp->if_flags & IFF_BROADCAST)
1518 MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1519 else
1520 MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1521
1522 /*
1523 * Program the multicast filter, if necessary.
1524 */
1525 my_setmulti(sc);
1526
1527 /*
1528 * Load the address of the RX list.
1529 */
1530 MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1531 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1532
1533 /*
1534 * Enable interrupts.
1535 */
1536 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1537 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1538
1539 /* Enable receiver and transmitter. */
1540 MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1541 MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1542 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1543 MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1544
1545 /* Restore state of BMCR */
1546 if (sc->my_pinfo != NULL)
1547 my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1548 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1549 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1550 return;
1551 }
1552
1553 /*
1554 * Set media options.
1555 */
1556
1557 static int
1558 my_ifmedia_upd(struct ifnet * ifp)
1559 {
1560 struct my_softc *sc;
1561 struct ifmedia *ifm;
1562
1563 sc = ifp->if_softc;
1564 MY_LOCK(sc);
1565 ifm = &sc->ifmedia;
1566 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1567 MY_UNLOCK(sc);
1568 return (EINVAL);
1569 }
1570 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1571 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1572 else
1573 my_setmode_mii(sc, ifm->ifm_media);
1574 MY_UNLOCK(sc);
1575 return (0);
1576 }
1577
1578 /*
1579 * Report current media status.
1580 */
1581
1582 static void
1583 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1584 {
1585 struct my_softc *sc;
1586 u_int16_t advert = 0, ability = 0;
1587
1588 sc = ifp->if_softc;
1589 MY_LOCK(sc);
1590 ifmr->ifm_active = IFM_ETHER;
1591 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1592 #if 0 /* this version did not support 1000M, */
1593 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1594 ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1595 #endif
1596 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1597 ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1598 else
1599 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1600 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1601 ifmr->ifm_active |= IFM_FDX;
1602 else
1603 ifmr->ifm_active |= IFM_HDX;
1604
1605 MY_UNLOCK(sc);
1606 return;
1607 }
1608 ability = my_phy_readreg(sc, PHY_LPAR);
1609 advert = my_phy_readreg(sc, PHY_ANAR);
1610
1611 #if 0 /* this version did not support 1000M, */
1612 if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1613 ability2 = my_phy_readreg(sc, PHY_1000SR);
1614 if (ability2 & PHY_1000SR_1000BTXFULL) {
1615 advert = 0;
1616 ability = 0;
1617 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1618 } else if (ability & PHY_1000SR_1000BTXHALF) {
1619 advert = 0;
1620 ability = 0;
1621 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1622 }
1623 }
1624 #endif
1625 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1626 ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1627 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1628 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1629 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1630 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1631 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1632 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1633 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1634 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1635 MY_UNLOCK(sc);
1636 return;
1637 }
1638
1639 static int
1640 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1641 {
1642 struct my_softc *sc = ifp->if_softc;
1643 struct ifreq *ifr = (struct ifreq *) data;
1644 int error;
1645
1646 switch (command) {
1647 case SIOCSIFFLAGS:
1648 MY_LOCK(sc);
1649 if (ifp->if_flags & IFF_UP)
1650 my_init_locked(sc);
1651 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1652 my_stop(sc);
1653 MY_UNLOCK(sc);
1654 error = 0;
1655 break;
1656 case SIOCADDMULTI:
1657 case SIOCDELMULTI:
1658 MY_LOCK(sc);
1659 my_setmulti(sc);
1660 MY_UNLOCK(sc);
1661 error = 0;
1662 break;
1663 case SIOCGIFMEDIA:
1664 case SIOCSIFMEDIA:
1665 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1666 break;
1667 default:
1668 error = ether_ioctl(ifp, command, data);
1669 break;
1670 }
1671 return (error);
1672 }
1673
1674 static void
1675 my_watchdog(struct ifnet * ifp)
1676 {
1677 struct my_softc *sc;
1678
1679 sc = ifp->if_softc;
1680 MY_LOCK(sc);
1681 if (sc->my_autoneg) {
1682 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
1683 MY_UNLOCK(sc);
1684 return;
1685 }
1686 ifp->if_oerrors++;
1687 if_printf(ifp, "watchdog timeout\n");
1688 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1689 if_printf(ifp, "no carrier - transceiver cable problem?\n");
1690 my_stop(sc);
1691 my_reset(sc);
1692 my_init_locked(sc);
1693 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1694 my_start_locked(ifp);
1695 MY_LOCK(sc);
1696 return;
1697 }
1698
1699
1700 /*
1701 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1702 */
1703 static void
1704 my_stop(struct my_softc * sc)
1705 {
1706 register int i;
1707 struct ifnet *ifp;
1708
1709 MY_LOCK_ASSERT(sc);
1710 ifp = sc->my_ifp;
1711 ifp->if_timer = 0;
1712
1713 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1714 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1715 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1716 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1717
1718 /*
1719 * Free data in the RX lists.
1720 */
1721 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1722 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1723 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1724 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1725 }
1726 }
1727 bzero((char *)&sc->my_ldata->my_rx_list,
1728 sizeof(sc->my_ldata->my_rx_list));
1729 /*
1730 * Free the TX list buffers.
1731 */
1732 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1733 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1734 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1735 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1736 }
1737 }
1738 bzero((char *)&sc->my_ldata->my_tx_list,
1739 sizeof(sc->my_ldata->my_tx_list));
1740 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1741 return;
1742 }
1743
1744 /*
1745 * Stop all chip I/O so that the kernel's probe routines don't get confused
1746 * by errant DMAs when rebooting.
1747 */
1748 static void
1749 my_shutdown(device_t dev)
1750 {
1751 struct my_softc *sc;
1752
1753 sc = device_get_softc(dev);
1754 MY_LOCK(sc);
1755 my_stop(sc);
1756 MY_UNLOCK(sc);
1757 return;
1758 }
Cache object: b7b5e371fee8e56821cf972446673deb
|