FreeBSD/Linux Kernel Cross Reference
sys/pci/if_pcn.c
1 /*-
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/5.4/sys/pci/if_pcn.c 141090 2005-01-31 23:27:04Z imp $");
36
37 /*
38 * AMD Am79c972 fast ethernet PCI NIC driver. Datasheets are available
39 * from http://www.amd.com.
40 *
41 * The AMD PCnet/PCI controllers are more advanced and functional
42 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
43 * backwards compatibility with the LANCE and thus can be made
44 * to work with older LANCE drivers. This is in fact how the
45 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
46 * is that the PCnet/PCI devices offer several performance enhancements
47 * which can't be exploited in LANCE compatibility mode. Chief among
48 * these enhancements is the ability to perform PCI DMA operations
49 * using 32-bit addressing (which eliminates the need for ISA
50 * bounce-buffering), and special receive buffer alignment (which
51 * allows the receive handler to pass packets to the upper protocol
52 * layers without copying on both the x86 and alpha platforms).
53 */
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/module.h>
62 #include <sys/socket.h>
63
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/ethernet.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69
70 #include <net/bpf.h>
71
72 #include <vm/vm.h> /* for vtophys */
73 #include <vm/pmap.h> /* for vtophys */
74 #include <machine/bus_pio.h>
75 #include <machine/bus_memio.h>
76 #include <machine/bus.h>
77 #include <machine/resource.h>
78 #include <sys/bus.h>
79 #include <sys/rman.h>
80
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
86
87 #define PCN_USEIOSPACE
88
89 #include <pci/if_pcnreg.h>
90
91 MODULE_DEPEND(pcn, pci, 1, 1, 1);
92 MODULE_DEPEND(pcn, ether, 1, 1, 1);
93 MODULE_DEPEND(pcn, miibus, 1, 1, 1);
94
95 /* "controller miibus0" required. See GENERIC if you get errors here. */
96 #include "miibus_if.h"
97
98 /*
99 * Various supported device vendors/types and their names.
100 */
101 static struct pcn_type pcn_devs[] = {
102 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
103 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
104 { 0, 0, NULL }
105 };
106
107 static u_int32_t pcn_csr_read (struct pcn_softc *, int);
108 static u_int16_t pcn_csr_read16 (struct pcn_softc *, int);
109 static u_int16_t pcn_bcr_read16 (struct pcn_softc *, int);
110 static void pcn_csr_write (struct pcn_softc *, int, int);
111 static u_int32_t pcn_bcr_read (struct pcn_softc *, int);
112 static void pcn_bcr_write (struct pcn_softc *, int, int);
113
114 static int pcn_probe (device_t);
115 static int pcn_attach (device_t);
116 static int pcn_detach (device_t);
117
118 static int pcn_newbuf (struct pcn_softc *, int, struct mbuf *);
119 static int pcn_encap (struct pcn_softc *,
120 struct mbuf *, u_int32_t *);
121 static void pcn_rxeof (struct pcn_softc *);
122 static void pcn_txeof (struct pcn_softc *);
123 static void pcn_intr (void *);
124 static void pcn_tick (void *);
125 static void pcn_start (struct ifnet *);
126 static int pcn_ioctl (struct ifnet *, u_long, caddr_t);
127 static void pcn_init (void *);
128 static void pcn_stop (struct pcn_softc *);
129 static void pcn_watchdog (struct ifnet *);
130 static void pcn_shutdown (device_t);
131 static int pcn_ifmedia_upd (struct ifnet *);
132 static void pcn_ifmedia_sts (struct ifnet *, struct ifmediareq *);
133
134 static int pcn_miibus_readreg (device_t, int, int);
135 static int pcn_miibus_writereg (device_t, int, int, int);
136 static void pcn_miibus_statchg (device_t);
137
138 static void pcn_setfilt (struct ifnet *);
139 static void pcn_setmulti (struct pcn_softc *);
140 static void pcn_reset (struct pcn_softc *);
141 static int pcn_list_rx_init (struct pcn_softc *);
142 static int pcn_list_tx_init (struct pcn_softc *);
143
144 #ifdef PCN_USEIOSPACE
145 #define PCN_RES SYS_RES_IOPORT
146 #define PCN_RID PCN_PCI_LOIO
147 #else
148 #define PCN_RES SYS_RES_MEMORY
149 #define PCN_RID PCN_PCI_LOMEM
150 #endif
151
152 static device_method_t pcn_methods[] = {
153 /* Device interface */
154 DEVMETHOD(device_probe, pcn_probe),
155 DEVMETHOD(device_attach, pcn_attach),
156 DEVMETHOD(device_detach, pcn_detach),
157 DEVMETHOD(device_shutdown, pcn_shutdown),
158
159 /* bus interface */
160 DEVMETHOD(bus_print_child, bus_generic_print_child),
161 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
162
163 /* MII interface */
164 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
165 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
166 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
167
168 { 0, 0 }
169 };
170
171 static driver_t pcn_driver = {
172 "pcn",
173 pcn_methods,
174 sizeof(struct pcn_softc)
175 };
176
177 static devclass_t pcn_devclass;
178
179 DRIVER_MODULE(pcn, pci, pcn_driver, pcn_devclass, 0, 0);
180 DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
181
182 #define PCN_CSR_SETBIT(sc, reg, x) \
183 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
184
185 #define PCN_CSR_CLRBIT(sc, reg, x) \
186 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
187
188 #define PCN_BCR_SETBIT(sc, reg, x) \
189 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
190
191 #define PCN_BCR_CLRBIT(sc, reg, x) \
192 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
193
194 static u_int32_t
195 pcn_csr_read(sc, reg)
196 struct pcn_softc *sc;
197 int reg;
198 {
199 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
200 return(CSR_READ_4(sc, PCN_IO32_RDP));
201 }
202
203 static u_int16_t
204 pcn_csr_read16(sc, reg)
205 struct pcn_softc *sc;
206 int reg;
207 {
208 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
209 return(CSR_READ_2(sc, PCN_IO16_RDP));
210 }
211
212 static void
213 pcn_csr_write(sc, reg, val)
214 struct pcn_softc *sc;
215 int reg;
216 int val;
217 {
218 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
219 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
220 return;
221 }
222
223 static u_int32_t
224 pcn_bcr_read(sc, reg)
225 struct pcn_softc *sc;
226 int reg;
227 {
228 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
229 return(CSR_READ_4(sc, PCN_IO32_BDP));
230 }
231
232 static u_int16_t
233 pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236 {
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239 }
240
241 static void
242 pcn_bcr_write(sc, reg, val)
243 struct pcn_softc *sc;
244 int reg;
245 int val;
246 {
247 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
248 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
249 return;
250 }
251
252 static int
253 pcn_miibus_readreg(dev, phy, reg)
254 device_t dev;
255 int phy, reg;
256 {
257 struct pcn_softc *sc;
258 int val;
259
260 sc = device_get_softc(dev);
261
262 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
263 return(0);
264
265 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
266 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
267 if (val == 0xFFFF)
268 return(0);
269
270 sc->pcn_phyaddr = phy;
271
272 return(val);
273 }
274
275 static int
276 pcn_miibus_writereg(dev, phy, reg, data)
277 device_t dev;
278 int phy, reg, data;
279 {
280 struct pcn_softc *sc;
281
282 sc = device_get_softc(dev);
283
284 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
285 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
286
287 return(0);
288 }
289
290 static void
291 pcn_miibus_statchg(dev)
292 device_t dev;
293 {
294 struct pcn_softc *sc;
295 struct mii_data *mii;
296
297 sc = device_get_softc(dev);
298 mii = device_get_softc(sc->pcn_miibus);
299
300 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
301 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
302 } else {
303 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
304 }
305
306 return;
307 }
308
309 static void
310 pcn_setmulti(sc)
311 struct pcn_softc *sc;
312 {
313 struct ifnet *ifp;
314 struct ifmultiaddr *ifma;
315 u_int32_t h, i;
316 u_int16_t hashes[4] = { 0, 0, 0, 0 };
317
318 ifp = &sc->arpcom.ac_if;
319
320 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
321
322 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
323 for (i = 0; i < 4; i++)
324 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
325 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
326 return;
327 }
328
329 /* first, zot all the existing hash bits */
330 for (i = 0; i < 4; i++)
331 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
332
333 /* now program new ones */
334 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
335 if (ifma->ifma_addr->sa_family != AF_LINK)
336 continue;
337 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
338 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
339 hashes[h >> 4] |= 1 << (h & 0xF);
340 }
341
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
344
345 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
346
347 return;
348 }
349
350 static void
351 pcn_reset(sc)
352 struct pcn_softc *sc;
353 {
354 /*
355 * Issue a reset by reading from the RESET register.
356 * Note that we don't know if the chip is operating in
357 * 16-bit or 32-bit mode at this point, so we attempt
358 * to reset the chip both ways. If one fails, the other
359 * will succeed.
360 */
361 CSR_READ_2(sc, PCN_IO16_RESET);
362 CSR_READ_4(sc, PCN_IO32_RESET);
363
364 /* Wait a little while for the chip to get its brains in order. */
365 DELAY(1000);
366
367 /* Select 32-bit (DWIO) mode */
368 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
369
370 /* Select software style 3. */
371 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
372
373 return;
374 }
375
376 /*
377 * Probe for an AMD chip. Check the PCI vendor and device
378 * IDs against our list and return a device name if we find a match.
379 */
380 static int
381 pcn_probe(dev)
382 device_t dev;
383 {
384 struct pcn_type *t;
385 struct pcn_softc *sc;
386 int rid;
387 u_int32_t chip_id;
388
389 t = pcn_devs;
390 sc = device_get_softc(dev);
391
392 while(t->pcn_name != NULL) {
393 if ((pci_get_vendor(dev) == t->pcn_vid) &&
394 (pci_get_device(dev) == t->pcn_did)) {
395 /*
396 * Temporarily map the I/O space
397 * so we can read the chip ID register.
398 */
399 rid = PCN_RID;
400 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid,
401 RF_ACTIVE);
402 if (sc->pcn_res == NULL) {
403 device_printf(dev,
404 "couldn't map ports/memory\n");
405 return(ENXIO);
406 }
407 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
408 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
409 mtx_init(&sc->pcn_mtx,
410 device_get_nameunit(dev), MTX_NETWORK_LOCK,
411 MTX_DEF);
412 PCN_LOCK(sc);
413 /*
414 * Note: we can *NOT* put the chip into
415 * 32-bit mode yet. The lnc driver will only
416 * work in 16-bit mode, and once the chip
417 * goes into 32-bit mode, the only way to
418 * get it out again is with a hardware reset.
419 * So if pcn_probe() is called before the
420 * lnc driver's probe routine, the chip will
421 * be locked into 32-bit operation and the lnc
422 * driver will be unable to attach to it.
423 * Note II: if the chip happens to already
424 * be in 32-bit mode, we still need to check
425 * the chip ID, but first we have to detect
426 * 32-bit mode using only 16-bit operations.
427 * The safest way to do this is to read the
428 * PCI subsystem ID from BCR23/24 and compare
429 * that with the value read from PCI config
430 * space.
431 */
432 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
433 chip_id <<= 16;
434 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
435 /*
436 * Note III: the test for 0x10001000 is a hack to
437 * pacify VMware, who's pseudo-PCnet interface is
438 * broken. Reading the subsystem register from PCI
439 * config space yields 0x00000000 while reading the
440 * same value from I/O space yields 0x10001000. It's
441 * not supposed to be that way.
442 */
443 if (chip_id == pci_read_config(dev,
444 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) {
445 /* We're in 16-bit mode. */
446 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
447 chip_id <<= 16;
448 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
449 } else {
450 /* We're in 32-bit mode. */
451 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
452 chip_id <<= 16;
453 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
454 }
455 bus_release_resource(dev, PCN_RES,
456 PCN_RID, sc->pcn_res);
457 PCN_UNLOCK(sc);
458 mtx_destroy(&sc->pcn_mtx);
459 chip_id >>= 12;
460 sc->pcn_type = chip_id & PART_MASK;
461 switch(sc->pcn_type) {
462 case Am79C971:
463 case Am79C972:
464 case Am79C973:
465 case Am79C975:
466 case Am79C976:
467 case Am79C978:
468 break;
469 default:
470 return(ENXIO);
471 }
472 device_set_desc(dev, t->pcn_name);
473 return(0);
474 }
475 t++;
476 }
477
478 return(ENXIO);
479 }
480
481 /*
482 * Attach the interface. Allocate softc structures, do ifmedia
483 * setup and ethernet/BPF attach.
484 */
485 static int
486 pcn_attach(dev)
487 device_t dev;
488 {
489 u_int32_t eaddr[2];
490 struct pcn_softc *sc;
491 struct ifnet *ifp;
492 int unit, error = 0, rid;
493
494 sc = device_get_softc(dev);
495 unit = device_get_unit(dev);
496
497 /* Initialize our mutex. */
498 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
499 MTX_DEF | MTX_RECURSE);
500 /*
501 * Map control/status registers.
502 */
503 pci_enable_busmaster(dev);
504
505 rid = PCN_RID;
506 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE);
507
508 if (sc->pcn_res == NULL) {
509 printf("pcn%d: couldn't map ports/memory\n", unit);
510 error = ENXIO;
511 goto fail;
512 }
513
514 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
515 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
516
517 /* Allocate interrupt */
518 rid = 0;
519 sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
520 RF_SHAREABLE | RF_ACTIVE);
521
522 if (sc->pcn_irq == NULL) {
523 printf("pcn%d: couldn't map interrupt\n", unit);
524 error = ENXIO;
525 goto fail;
526 }
527
528 /* Reset the adapter. */
529 pcn_reset(sc);
530
531 /*
532 * Get station address from the EEPROM.
533 */
534 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
535 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
536 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
537
538 sc->pcn_unit = unit;
539 callout_handle_init(&sc->pcn_stat_ch);
540
541 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
542 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
543
544 if (sc->pcn_ldata == NULL) {
545 printf("pcn%d: no memory for list buffers!\n", unit);
546 error = ENXIO;
547 goto fail;
548 }
549 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
550
551 ifp = &sc->arpcom.ac_if;
552 ifp->if_softc = sc;
553 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
554 ifp->if_mtu = ETHERMTU;
555 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
556 IFF_NEEDSGIANT;
557 ifp->if_ioctl = pcn_ioctl;
558 ifp->if_start = pcn_start;
559 ifp->if_watchdog = pcn_watchdog;
560 ifp->if_init = pcn_init;
561 ifp->if_baudrate = 10000000;
562 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
563
564 /*
565 * Do MII setup.
566 */
567 if (mii_phy_probe(dev, &sc->pcn_miibus,
568 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
569 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
570 error = ENXIO;
571 goto fail;
572 }
573
574 /*
575 * Call MI attach routine.
576 */
577 ether_ifattach(ifp, (u_int8_t *) eaddr);
578
579 /* Hook interrupt last to avoid having to lock softc */
580 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
581 pcn_intr, sc, &sc->pcn_intrhand);
582
583 if (error) {
584 printf("pcn%d: couldn't set up irq\n", unit);
585 ether_ifdetach(ifp);
586 goto fail;
587 }
588
589 fail:
590 if (error)
591 pcn_detach(dev);
592
593 return(error);
594 }
595
596 /*
597 * Shutdown hardware and free up resources. This can be called any
598 * time after the mutex has been initialized. It is called in both
599 * the error case in attach and the normal detach case so it needs
600 * to be careful about only freeing resources that have actually been
601 * allocated.
602 */
603 static int
604 pcn_detach(dev)
605 device_t dev;
606 {
607 struct pcn_softc *sc;
608 struct ifnet *ifp;
609
610 sc = device_get_softc(dev);
611 ifp = &sc->arpcom.ac_if;
612
613 KASSERT(mtx_initialized(&sc->pcn_mtx), ("pcn mutex not initialized"));
614 PCN_LOCK(sc);
615
616 /* These should only be active if attach succeeded */
617 if (device_is_attached(dev)) {
618 pcn_reset(sc);
619 pcn_stop(sc);
620 ether_ifdetach(ifp);
621 }
622 if (sc->pcn_miibus)
623 device_delete_child(dev, sc->pcn_miibus);
624 bus_generic_detach(dev);
625
626 if (sc->pcn_intrhand)
627 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
628 if (sc->pcn_irq)
629 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
630 if (sc->pcn_res)
631 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
632
633 if (sc->pcn_ldata) {
634 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data),
635 M_DEVBUF);
636 }
637 PCN_UNLOCK(sc);
638
639 mtx_destroy(&sc->pcn_mtx);
640
641 return(0);
642 }
643
644 /*
645 * Initialize the transmit descriptors.
646 */
647 static int
648 pcn_list_tx_init(sc)
649 struct pcn_softc *sc;
650 {
651 struct pcn_list_data *ld;
652 struct pcn_ring_data *cd;
653 int i;
654
655 cd = &sc->pcn_cdata;
656 ld = sc->pcn_ldata;
657
658 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
659 cd->pcn_tx_chain[i] = NULL;
660 ld->pcn_tx_list[i].pcn_tbaddr = 0;
661 ld->pcn_tx_list[i].pcn_txctl = 0;
662 ld->pcn_tx_list[i].pcn_txstat = 0;
663 }
664
665 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
666
667 return(0);
668 }
669
670
671 /*
672 * Initialize the RX descriptors and allocate mbufs for them.
673 */
674 static int
675 pcn_list_rx_init(sc)
676 struct pcn_softc *sc;
677 {
678 struct pcn_ring_data *cd;
679 int i;
680
681 cd = &sc->pcn_cdata;
682
683 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
684 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
685 return(ENOBUFS);
686 }
687
688 cd->pcn_rx_prod = 0;
689
690 return(0);
691 }
692
693 /*
694 * Initialize an RX descriptor and attach an MBUF cluster.
695 */
696 static int
697 pcn_newbuf(sc, idx, m)
698 struct pcn_softc *sc;
699 int idx;
700 struct mbuf *m;
701 {
702 struct mbuf *m_new = NULL;
703 struct pcn_rx_desc *c;
704
705 c = &sc->pcn_ldata->pcn_rx_list[idx];
706
707 if (m == NULL) {
708 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
709 if (m_new == NULL)
710 return(ENOBUFS);
711
712 MCLGET(m_new, M_DONTWAIT);
713 if (!(m_new->m_flags & M_EXT)) {
714 m_freem(m_new);
715 return(ENOBUFS);
716 }
717 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
718 } else {
719 m_new = m;
720 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
721 m_new->m_data = m_new->m_ext.ext_buf;
722 }
723
724 m_adj(m_new, ETHER_ALIGN);
725
726 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
727 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
728 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
729 c->pcn_bufsz |= PCN_RXLEN_MBO;
730 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
731
732 return(0);
733 }
734
735 /*
736 * A frame has been uploaded: pass the resulting mbuf chain up to
737 * the higher level protocols.
738 */
739 static void
740 pcn_rxeof(sc)
741 struct pcn_softc *sc;
742 {
743 struct mbuf *m;
744 struct ifnet *ifp;
745 struct pcn_rx_desc *cur_rx;
746 int i;
747
748 PCN_LOCK_ASSERT(sc);
749
750 ifp = &sc->arpcom.ac_if;
751 i = sc->pcn_cdata.pcn_rx_prod;
752
753 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
754 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
755 m = sc->pcn_cdata.pcn_rx_chain[i];
756 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
757
758 /*
759 * If an error occurs, update stats, clear the
760 * status word and leave the mbuf cluster in place:
761 * it should simply get re-used next time this descriptor
762 * comes up in the ring.
763 */
764 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
765 ifp->if_ierrors++;
766 pcn_newbuf(sc, i, m);
767 PCN_INC(i, PCN_RX_LIST_CNT);
768 continue;
769 }
770
771 if (pcn_newbuf(sc, i, NULL)) {
772 /* Ran out of mbufs; recycle this one. */
773 pcn_newbuf(sc, i, m);
774 ifp->if_ierrors++;
775 PCN_INC(i, PCN_RX_LIST_CNT);
776 continue;
777 }
778
779 PCN_INC(i, PCN_RX_LIST_CNT);
780
781 /* No errors; receive the packet. */
782 ifp->if_ipackets++;
783 m->m_len = m->m_pkthdr.len =
784 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
785 m->m_pkthdr.rcvif = ifp;
786
787 PCN_UNLOCK(sc);
788 (*ifp->if_input)(ifp, m);
789 PCN_LOCK(sc);
790 }
791
792 sc->pcn_cdata.pcn_rx_prod = i;
793
794 return;
795 }
796
797 /*
798 * A frame was downloaded to the chip. It's safe for us to clean up
799 * the list buffers.
800 */
801
802 static void
803 pcn_txeof(sc)
804 struct pcn_softc *sc;
805 {
806 struct pcn_tx_desc *cur_tx = NULL;
807 struct ifnet *ifp;
808 u_int32_t idx;
809
810 ifp = &sc->arpcom.ac_if;
811
812 /*
813 * Go through our tx list and free mbufs for those
814 * frames that have been transmitted.
815 */
816 idx = sc->pcn_cdata.pcn_tx_cons;
817 while (idx != sc->pcn_cdata.pcn_tx_prod) {
818 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
819
820 if (!PCN_OWN_TXDESC(cur_tx))
821 break;
822
823 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
824 sc->pcn_cdata.pcn_tx_cnt--;
825 PCN_INC(idx, PCN_TX_LIST_CNT);
826 continue;
827 }
828
829 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
830 ifp->if_oerrors++;
831 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
832 ifp->if_collisions++;
833 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
834 ifp->if_collisions++;
835 }
836
837 ifp->if_collisions +=
838 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
839
840 ifp->if_opackets++;
841 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
842 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
843 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
844 }
845
846 sc->pcn_cdata.pcn_tx_cnt--;
847 PCN_INC(idx, PCN_TX_LIST_CNT);
848 }
849
850 if (idx != sc->pcn_cdata.pcn_tx_cons) {
851 /* Some buffers have been freed. */
852 sc->pcn_cdata.pcn_tx_cons = idx;
853 ifp->if_flags &= ~IFF_OACTIVE;
854 }
855 ifp->if_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5;
856
857 return;
858 }
859
860 static void
861 pcn_tick(xsc)
862 void *xsc;
863 {
864 struct pcn_softc *sc;
865 struct mii_data *mii;
866 struct ifnet *ifp;
867
868 sc = xsc;
869 ifp = &sc->arpcom.ac_if;
870 PCN_LOCK(sc);
871
872 mii = device_get_softc(sc->pcn_miibus);
873 mii_tick(mii);
874
875 /* link just died */
876 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
877 sc->pcn_link = 0;
878
879 /* link just came up, restart */
880 if (!sc->pcn_link && mii->mii_media_status & IFM_ACTIVE &&
881 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
882 sc->pcn_link++;
883 if (ifp->if_snd.ifq_head != NULL)
884 pcn_start(ifp);
885 }
886
887 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
888
889 PCN_UNLOCK(sc);
890
891 return;
892 }
893
894 static void
895 pcn_intr(arg)
896 void *arg;
897 {
898 struct pcn_softc *sc;
899 struct ifnet *ifp;
900 u_int32_t status;
901
902 sc = arg;
903 ifp = &sc->arpcom.ac_if;
904
905 /* Suppress unwanted interrupts */
906 if (!(ifp->if_flags & IFF_UP)) {
907 pcn_stop(sc);
908 return;
909 }
910
911 PCN_LOCK(sc);
912
913 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
914
915 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
916 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
917
918 if (status & PCN_CSR_RINT)
919 pcn_rxeof(sc);
920
921 if (status & PCN_CSR_TINT)
922 pcn_txeof(sc);
923
924 if (status & PCN_CSR_ERR) {
925 pcn_init(sc);
926 break;
927 }
928 }
929
930 if (ifp->if_snd.ifq_head != NULL)
931 pcn_start(ifp);
932
933 PCN_UNLOCK(sc);
934 return;
935 }
936
937 /*
938 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
939 * pointers to the fragment pointers.
940 */
941 static int
942 pcn_encap(sc, m_head, txidx)
943 struct pcn_softc *sc;
944 struct mbuf *m_head;
945 u_int32_t *txidx;
946 {
947 struct pcn_tx_desc *f = NULL;
948 struct mbuf *m;
949 int frag, cur, cnt = 0;
950
951 /*
952 * Start packing the mbufs in this chain into
953 * the fragment pointers. Stop when we run out
954 * of fragments or hit the end of the mbuf chain.
955 */
956 m = m_head;
957 cur = frag = *txidx;
958
959 for (m = m_head; m != NULL; m = m->m_next) {
960 if (m->m_len != 0) {
961 if ((PCN_TX_LIST_CNT -
962 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
963 return(ENOBUFS);
964 f = &sc->pcn_ldata->pcn_tx_list[frag];
965 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
966 f->pcn_txctl |= PCN_TXCTL_MBO;
967 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
968 if (cnt == 0)
969 f->pcn_txctl |= PCN_TXCTL_STP;
970 else
971 f->pcn_txctl |= PCN_TXCTL_OWN;
972 cur = frag;
973 PCN_INC(frag, PCN_TX_LIST_CNT);
974 cnt++;
975 }
976 }
977
978 if (m != NULL)
979 return(ENOBUFS);
980
981 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
982 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
983 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
984 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
985 sc->pcn_cdata.pcn_tx_cnt += cnt;
986 *txidx = frag;
987
988 return(0);
989 }
990
991 /*
992 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
993 * to the mbuf data regions directly in the transmit lists. We also save a
994 * copy of the pointers since the transmit list fragment pointers are
995 * physical addresses.
996 */
997 static void
998 pcn_start(ifp)
999 struct ifnet *ifp;
1000 {
1001 struct pcn_softc *sc;
1002 struct mbuf *m_head = NULL;
1003 u_int32_t idx;
1004
1005 sc = ifp->if_softc;
1006
1007 PCN_LOCK(sc);
1008
1009 if (!sc->pcn_link) {
1010 PCN_UNLOCK(sc);
1011 return;
1012 }
1013
1014 idx = sc->pcn_cdata.pcn_tx_prod;
1015
1016 if (ifp->if_flags & IFF_OACTIVE) {
1017 PCN_UNLOCK(sc);
1018 return;
1019 }
1020
1021 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1022 IF_DEQUEUE(&ifp->if_snd, m_head);
1023 if (m_head == NULL)
1024 break;
1025
1026 if (pcn_encap(sc, m_head, &idx)) {
1027 IF_PREPEND(&ifp->if_snd, m_head);
1028 ifp->if_flags |= IFF_OACTIVE;
1029 break;
1030 }
1031
1032 /*
1033 * If there's a BPF listener, bounce a copy of this frame
1034 * to him.
1035 */
1036 BPF_MTAP(ifp, m_head);
1037
1038 }
1039
1040 /* Transmit */
1041 sc->pcn_cdata.pcn_tx_prod = idx;
1042 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1043
1044 /*
1045 * Set a timeout in case the chip goes out to lunch.
1046 */
1047 ifp->if_timer = 5;
1048
1049 PCN_UNLOCK(sc);
1050
1051 return;
1052 }
1053
1054 static void
1055 pcn_setfilt(ifp)
1056 struct ifnet *ifp;
1057 {
1058 struct pcn_softc *sc;
1059
1060 sc = ifp->if_softc;
1061
1062 /* If we want promiscuous mode, set the allframes bit. */
1063 if (ifp->if_flags & IFF_PROMISC) {
1064 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1065 } else {
1066 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1067 }
1068
1069 /* Set the capture broadcast bit to capture broadcast frames. */
1070 if (ifp->if_flags & IFF_BROADCAST) {
1071 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1072 } else {
1073 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1074 }
1075
1076 return;
1077 }
1078
1079 static void
1080 pcn_init(xsc)
1081 void *xsc;
1082 {
1083 struct pcn_softc *sc = xsc;
1084 struct ifnet *ifp = &sc->arpcom.ac_if;
1085 struct mii_data *mii = NULL;
1086
1087 PCN_LOCK(sc);
1088
1089 /*
1090 * Cancel pending I/O and free all RX/TX buffers.
1091 */
1092 pcn_stop(sc);
1093 pcn_reset(sc);
1094
1095 mii = device_get_softc(sc->pcn_miibus);
1096
1097 /* Set MAC address */
1098 pcn_csr_write(sc, PCN_CSR_PAR0,
1099 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1100 pcn_csr_write(sc, PCN_CSR_PAR1,
1101 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1102 pcn_csr_write(sc, PCN_CSR_PAR2,
1103 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1104
1105 /* Init circular RX list. */
1106 if (pcn_list_rx_init(sc) == ENOBUFS) {
1107 printf("pcn%d: initialization failed: no "
1108 "memory for rx buffers\n", sc->pcn_unit);
1109 pcn_stop(sc);
1110 PCN_UNLOCK(sc);
1111 return;
1112 }
1113
1114 /*
1115 * Init tx descriptors.
1116 */
1117 pcn_list_tx_init(sc);
1118
1119 /* Set up the mode register. */
1120 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1121
1122 /* Set up RX filter. */
1123 pcn_setfilt(ifp);
1124
1125 /*
1126 * Load the multicast filter.
1127 */
1128 pcn_setmulti(sc);
1129
1130 /*
1131 * Load the addresses of the RX and TX lists.
1132 */
1133 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1134 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1135 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1136 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1137 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1138 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1139 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1140 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1141
1142 /* Set the RX and TX ring sizes. */
1143 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1144 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1145
1146 /* We're not using the initialization block. */
1147 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1148
1149 /* Enable fast suspend mode. */
1150 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1151
1152 /*
1153 * Enable burst read and write. Also set the no underflow
1154 * bit. This will avoid transmit underruns in certain
1155 * conditions while still providing decent performance.
1156 */
1157 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1158 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1159
1160 /* Enable graceful recovery from underflow. */
1161 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1162
1163 /* Enable auto-padding of short TX frames. */
1164 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1165
1166 /* Disable MII autoneg (we handle this ourselves). */
1167 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1168
1169 if (sc->pcn_type == Am79C978)
1170 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1171 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1172
1173 /* Enable interrupts and start the controller running. */
1174 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1175
1176 mii_mediachg(mii);
1177
1178 ifp->if_flags |= IFF_RUNNING;
1179 ifp->if_flags &= ~IFF_OACTIVE;
1180
1181 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1182 PCN_UNLOCK(sc);
1183
1184 return;
1185 }
1186
1187 /*
1188 * Set media options.
1189 */
1190 static int
1191 pcn_ifmedia_upd(ifp)
1192 struct ifnet *ifp;
1193 {
1194 struct pcn_softc *sc;
1195 struct mii_data *mii;
1196
1197 sc = ifp->if_softc;
1198 mii = device_get_softc(sc->pcn_miibus);
1199
1200 sc->pcn_link = 0;
1201 if (mii->mii_instance) {
1202 struct mii_softc *miisc;
1203 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1204 mii_phy_reset(miisc);
1205 }
1206 mii_mediachg(mii);
1207
1208 return(0);
1209 }
1210
1211 /*
1212 * Report current media status.
1213 */
1214 static void
1215 pcn_ifmedia_sts(ifp, ifmr)
1216 struct ifnet *ifp;
1217 struct ifmediareq *ifmr;
1218 {
1219 struct pcn_softc *sc;
1220 struct mii_data *mii;
1221
1222 sc = ifp->if_softc;
1223
1224 mii = device_get_softc(sc->pcn_miibus);
1225 mii_pollstat(mii);
1226 ifmr->ifm_active = mii->mii_media_active;
1227 ifmr->ifm_status = mii->mii_media_status;
1228
1229 return;
1230 }
1231
1232 static int
1233 pcn_ioctl(ifp, command, data)
1234 struct ifnet *ifp;
1235 u_long command;
1236 caddr_t data;
1237 {
1238 struct pcn_softc *sc = ifp->if_softc;
1239 struct ifreq *ifr = (struct ifreq *) data;
1240 struct mii_data *mii = NULL;
1241 int error = 0;
1242
1243 PCN_LOCK(sc);
1244
1245 switch(command) {
1246 case SIOCSIFFLAGS:
1247 if (ifp->if_flags & IFF_UP) {
1248 if (ifp->if_flags & IFF_RUNNING &&
1249 ifp->if_flags & IFF_PROMISC &&
1250 !(sc->pcn_if_flags & IFF_PROMISC)) {
1251 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1252 PCN_EXTCTL1_SPND);
1253 pcn_setfilt(ifp);
1254 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1255 PCN_EXTCTL1_SPND);
1256 pcn_csr_write(sc, PCN_CSR_CSR,
1257 PCN_CSR_INTEN|PCN_CSR_START);
1258 } else if (ifp->if_flags & IFF_RUNNING &&
1259 !(ifp->if_flags & IFF_PROMISC) &&
1260 sc->pcn_if_flags & IFF_PROMISC) {
1261 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1262 PCN_EXTCTL1_SPND);
1263 pcn_setfilt(ifp);
1264 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1265 PCN_EXTCTL1_SPND);
1266 pcn_csr_write(sc, PCN_CSR_CSR,
1267 PCN_CSR_INTEN|PCN_CSR_START);
1268 } else if (!(ifp->if_flags & IFF_RUNNING))
1269 pcn_init(sc);
1270 } else {
1271 if (ifp->if_flags & IFF_RUNNING)
1272 pcn_stop(sc);
1273 }
1274 sc->pcn_if_flags = ifp->if_flags;
1275 error = 0;
1276 break;
1277 case SIOCADDMULTI:
1278 case SIOCDELMULTI:
1279 pcn_setmulti(sc);
1280 error = 0;
1281 break;
1282 case SIOCGIFMEDIA:
1283 case SIOCSIFMEDIA:
1284 mii = device_get_softc(sc->pcn_miibus);
1285 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1286 break;
1287 default:
1288 error = ether_ioctl(ifp, command, data);
1289 break;
1290 }
1291
1292 PCN_UNLOCK(sc);
1293
1294 return(error);
1295 }
1296
1297 static void
1298 pcn_watchdog(ifp)
1299 struct ifnet *ifp;
1300 {
1301 struct pcn_softc *sc;
1302
1303 sc = ifp->if_softc;
1304
1305 PCN_LOCK(sc);
1306
1307 ifp->if_oerrors++;
1308 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1309
1310 pcn_stop(sc);
1311 pcn_reset(sc);
1312 pcn_init(sc);
1313
1314 if (ifp->if_snd.ifq_head != NULL)
1315 pcn_start(ifp);
1316
1317 PCN_UNLOCK(sc);
1318
1319 return;
1320 }
1321
1322 /*
1323 * Stop the adapter and free any mbufs allocated to the
1324 * RX and TX lists.
1325 */
1326 static void
1327 pcn_stop(sc)
1328 struct pcn_softc *sc;
1329 {
1330 register int i;
1331 struct ifnet *ifp;
1332
1333 ifp = &sc->arpcom.ac_if;
1334 PCN_LOCK(sc);
1335 ifp->if_timer = 0;
1336
1337 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1338
1339 /* Turn off interrupts */
1340 PCN_CSR_CLRBIT(sc, PCN_CSR_CSR, PCN_CSR_INTEN);
1341 /* Stop adapter */
1342 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1343 sc->pcn_link = 0;
1344
1345 /*
1346 * Free data in the RX lists.
1347 */
1348 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1349 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1350 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1351 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1352 }
1353 }
1354 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1355 sizeof(sc->pcn_ldata->pcn_rx_list));
1356
1357 /*
1358 * Free the TX list buffers.
1359 */
1360 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1361 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1362 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1363 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1364 }
1365 }
1366
1367 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1368 sizeof(sc->pcn_ldata->pcn_tx_list));
1369
1370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1371 PCN_UNLOCK(sc);
1372
1373 return;
1374 }
1375
1376 /*
1377 * Stop all chip I/O so that the kernel's probe routines don't
1378 * get confused by errant DMAs when rebooting.
1379 */
1380 static void
1381 pcn_shutdown(dev)
1382 device_t dev;
1383 {
1384 struct pcn_softc *sc;
1385
1386 sc = device_get_softc(dev);
1387
1388 PCN_LOCK(sc);
1389 pcn_reset(sc);
1390 pcn_stop(sc);
1391 PCN_UNLOCK(sc);
1392
1393 return;
1394 }
Cache object: c1a797e987dcf935613c693c40d46a65
|