FreeBSD/Linux Kernel Cross Reference
sys/pci/if_pcn.c
1 /*
2 * Copyright (c) 2000 Berkeley Software Design, Inc.
3 * Copyright (c) 1997, 1998, 1999, 2000
4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available
36 * from http://www.amd.com.
37 *
38 * The AMD PCnet/PCI controllers are more advanced and functional
39 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain
40 * backwards compatibility with the LANCE and thus can be made
41 * to work with older LANCE drivers. This is in fact how the
42 * PCnet/PCI chips were supported in FreeBSD originally. The trouble
43 * is that the PCnet/PCI devices offer several performance enhancements
44 * which can't be exploited in LANCE compatibility mode. Chief among
45 * these enhancements is the ability to perform PCI DMA operations
46 * using 32-bit addressing (which eliminates the need for ISA
47 * bounce-buffering), and special receive buffer alignment (which
48 * allows the receive handler to pass packets to the upper protocol
49 * layers without copying on both the x86 and alpha platforms).
50 */
51
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD: releng/5.1/sys/pci/if_pcn.c 113812 2003-04-21 18:34:04Z imp $");
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68
69 #include <net/bpf.h>
70
71 #include <vm/vm.h> /* for vtophys */
72 #include <vm/pmap.h> /* for vtophys */
73 #include <machine/bus_pio.h>
74 #include <machine/bus_memio.h>
75 #include <machine/bus.h>
76 #include <machine/resource.h>
77 #include <sys/bus.h>
78 #include <sys/rman.h>
79
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 #include <pci/pcireg.h>
84 #include <pci/pcivar.h>
85
86 #define PCN_USEIOSPACE
87
88 #include <pci/if_pcnreg.h>
89
90 MODULE_DEPEND(pcn, pci, 1, 1, 1);
91 MODULE_DEPEND(pcn, ether, 1, 1, 1);
92 MODULE_DEPEND(pcn, miibus, 1, 1, 1);
93
94 /* "controller miibus0" required. See GENERIC if you get errors here. */
95 #include "miibus_if.h"
96
97 /*
98 * Various supported device vendors/types and their names.
99 */
100 static struct pcn_type pcn_devs[] = {
101 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
102 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
103 { 0, 0, NULL }
104 };
105
106 static u_int32_t pcn_csr_read (struct pcn_softc *, int);
107 static u_int16_t pcn_csr_read16 (struct pcn_softc *, int);
108 static u_int16_t pcn_bcr_read16 (struct pcn_softc *, int);
109 static void pcn_csr_write (struct pcn_softc *, int, int);
110 static u_int32_t pcn_bcr_read (struct pcn_softc *, int);
111 static void pcn_bcr_write (struct pcn_softc *, int, int);
112
113 static int pcn_probe (device_t);
114 static int pcn_attach (device_t);
115 static int pcn_detach (device_t);
116
117 static int pcn_newbuf (struct pcn_softc *, int, struct mbuf *);
118 static int pcn_encap (struct pcn_softc *,
119 struct mbuf *, u_int32_t *);
120 static void pcn_rxeof (struct pcn_softc *);
121 static void pcn_txeof (struct pcn_softc *);
122 static void pcn_intr (void *);
123 static void pcn_tick (void *);
124 static void pcn_start (struct ifnet *);
125 static int pcn_ioctl (struct ifnet *, u_long, caddr_t);
126 static void pcn_init (void *);
127 static void pcn_stop (struct pcn_softc *);
128 static void pcn_watchdog (struct ifnet *);
129 static void pcn_shutdown (device_t);
130 static int pcn_ifmedia_upd (struct ifnet *);
131 static void pcn_ifmedia_sts (struct ifnet *, struct ifmediareq *);
132
133 static int pcn_miibus_readreg (device_t, int, int);
134 static int pcn_miibus_writereg (device_t, int, int, int);
135 static void pcn_miibus_statchg (device_t);
136
137 static void pcn_setfilt (struct ifnet *);
138 static void pcn_setmulti (struct pcn_softc *);
139 static u_int32_t pcn_crc (caddr_t);
140 static void pcn_reset (struct pcn_softc *);
141 static int pcn_list_rx_init (struct pcn_softc *);
142 static int pcn_list_tx_init (struct pcn_softc *);
143
144 #ifdef PCN_USEIOSPACE
145 #define PCN_RES SYS_RES_IOPORT
146 #define PCN_RID PCN_PCI_LOIO
147 #else
148 #define PCN_RES SYS_RES_MEMORY
149 #define PCN_RID PCN_PCI_LOMEM
150 #endif
151
152 static device_method_t pcn_methods[] = {
153 /* Device interface */
154 DEVMETHOD(device_probe, pcn_probe),
155 DEVMETHOD(device_attach, pcn_attach),
156 DEVMETHOD(device_detach, pcn_detach),
157 DEVMETHOD(device_shutdown, pcn_shutdown),
158
159 /* bus interface */
160 DEVMETHOD(bus_print_child, bus_generic_print_child),
161 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
162
163 /* MII interface */
164 DEVMETHOD(miibus_readreg, pcn_miibus_readreg),
165 DEVMETHOD(miibus_writereg, pcn_miibus_writereg),
166 DEVMETHOD(miibus_statchg, pcn_miibus_statchg),
167
168 { 0, 0 }
169 };
170
171 static driver_t pcn_driver = {
172 "pcn",
173 pcn_methods,
174 sizeof(struct pcn_softc)
175 };
176
177 static devclass_t pcn_devclass;
178
179 DRIVER_MODULE(pcn, pci, pcn_driver, pcn_devclass, 0, 0);
180 DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0);
181
182 #define PCN_CSR_SETBIT(sc, reg, x) \
183 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x))
184
185 #define PCN_CSR_CLRBIT(sc, reg, x) \
186 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x))
187
188 #define PCN_BCR_SETBIT(sc, reg, x) \
189 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x))
190
191 #define PCN_BCR_CLRBIT(sc, reg, x) \
192 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x))
193
194 static u_int32_t
195 pcn_csr_read(sc, reg)
196 struct pcn_softc *sc;
197 int reg;
198 {
199 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
200 return(CSR_READ_4(sc, PCN_IO32_RDP));
201 }
202
203 static u_int16_t
204 pcn_csr_read16(sc, reg)
205 struct pcn_softc *sc;
206 int reg;
207 {
208 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
209 return(CSR_READ_2(sc, PCN_IO16_RDP));
210 }
211
212 static void
213 pcn_csr_write(sc, reg, val)
214 struct pcn_softc *sc;
215 int reg;
216 int val;
217 {
218 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
219 CSR_WRITE_4(sc, PCN_IO32_RDP, val);
220 return;
221 }
222
223 static u_int32_t
224 pcn_bcr_read(sc, reg)
225 struct pcn_softc *sc;
226 int reg;
227 {
228 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
229 return(CSR_READ_4(sc, PCN_IO32_BDP));
230 }
231
232 static u_int16_t
233 pcn_bcr_read16(sc, reg)
234 struct pcn_softc *sc;
235 int reg;
236 {
237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg);
238 return(CSR_READ_2(sc, PCN_IO16_BDP));
239 }
240
241 static void
242 pcn_bcr_write(sc, reg, val)
243 struct pcn_softc *sc;
244 int reg;
245 int val;
246 {
247 CSR_WRITE_4(sc, PCN_IO32_RAP, reg);
248 CSR_WRITE_4(sc, PCN_IO32_BDP, val);
249 return;
250 }
251
252 static int
253 pcn_miibus_readreg(dev, phy, reg)
254 device_t dev;
255 int phy, reg;
256 {
257 struct pcn_softc *sc;
258 int val;
259
260 sc = device_get_softc(dev);
261
262 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr)
263 return(0);
264
265 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
266 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF;
267 if (val == 0xFFFF)
268 return(0);
269
270 sc->pcn_phyaddr = phy;
271
272 return(val);
273 }
274
275 static int
276 pcn_miibus_writereg(dev, phy, reg, data)
277 device_t dev;
278 int phy, reg, data;
279 {
280 struct pcn_softc *sc;
281
282 sc = device_get_softc(dev);
283
284 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5));
285 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data);
286
287 return(0);
288 }
289
290 static void
291 pcn_miibus_statchg(dev)
292 device_t dev;
293 {
294 struct pcn_softc *sc;
295 struct mii_data *mii;
296
297 sc = device_get_softc(dev);
298 mii = device_get_softc(sc->pcn_miibus);
299
300 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
301 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
302 } else {
303 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN);
304 }
305
306 return;
307 }
308
309 #define DC_POLY 0xEDB88320
310
311 static u_int32_t
312 pcn_crc(addr)
313 caddr_t addr;
314 {
315 u_int32_t idx, bit, data, crc;
316
317 /* Compute CRC for the address value. */
318 crc = 0xFFFFFFFF; /* initial value */
319
320 for (idx = 0; idx < 6; idx++) {
321 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
322 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
323 }
324
325 return ((crc >> 26) & 0x3F);
326 }
327
328 static void
329 pcn_setmulti(sc)
330 struct pcn_softc *sc;
331 {
332 struct ifnet *ifp;
333 struct ifmultiaddr *ifma;
334 u_int32_t h, i;
335 u_int16_t hashes[4] = { 0, 0, 0, 0 };
336
337 ifp = &sc->arpcom.ac_if;
338
339 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
340
341 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
342 for (i = 0; i < 4; i++)
343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF);
344 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
345 return;
346 }
347
348 /* first, zot all the existing hash bits */
349 for (i = 0; i < 4; i++)
350 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0);
351
352 /* now program new ones */
353 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
354 if (ifma->ifma_addr->sa_family != AF_LINK)
355 continue;
356 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
357 hashes[h >> 4] |= 1 << (h & 0xF);
358 }
359
360 for (i = 0; i < 4; i++)
361 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]);
362
363 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
364
365 return;
366 }
367
368 static void
369 pcn_reset(sc)
370 struct pcn_softc *sc;
371 {
372 /*
373 * Issue a reset by reading from the RESET register.
374 * Note that we don't know if the chip is operating in
375 * 16-bit or 32-bit mode at this point, so we attempt
376 * to reset the chip both ways. If one fails, the other
377 * will succeed.
378 */
379 CSR_READ_2(sc, PCN_IO16_RESET);
380 CSR_READ_4(sc, PCN_IO32_RESET);
381
382 /* Wait a little while for the chip to get its brains in order. */
383 DELAY(1000);
384
385 /* Select 32-bit (DWIO) mode */
386 CSR_WRITE_4(sc, PCN_IO32_RDP, 0);
387
388 /* Select software style 3. */
389 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST);
390
391 return;
392 }
393
394 /*
395 * Probe for an AMD chip. Check the PCI vendor and device
396 * IDs against our list and return a device name if we find a match.
397 */
398 static int
399 pcn_probe(dev)
400 device_t dev;
401 {
402 struct pcn_type *t;
403 struct pcn_softc *sc;
404 int rid;
405 u_int32_t chip_id;
406
407 t = pcn_devs;
408 sc = device_get_softc(dev);
409
410 while(t->pcn_name != NULL) {
411 if ((pci_get_vendor(dev) == t->pcn_vid) &&
412 (pci_get_device(dev) == t->pcn_did)) {
413 /*
414 * Temporarily map the I/O space
415 * so we can read the chip ID register.
416 */
417 rid = PCN_RID;
418 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
419 0, ~0, 1, RF_ACTIVE);
420 if (sc->pcn_res == NULL) {
421 device_printf(dev,
422 "couldn't map ports/memory\n");
423 return(ENXIO);
424 }
425 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
426 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
427 mtx_init(&sc->pcn_mtx,
428 device_get_nameunit(dev), MTX_NETWORK_LOCK,
429 MTX_DEF);
430 PCN_LOCK(sc);
431 /*
432 * Note: we can *NOT* put the chip into
433 * 32-bit mode yet. The lnc driver will only
434 * work in 16-bit mode, and once the chip
435 * goes into 32-bit mode, the only way to
436 * get it out again is with a hardware reset.
437 * So if pcn_probe() is called before the
438 * lnc driver's probe routine, the chip will
439 * be locked into 32-bit operation and the lnc
440 * driver will be unable to attach to it.
441 * Note II: if the chip happens to already
442 * be in 32-bit mode, we still need to check
443 * the chip ID, but first we have to detect
444 * 32-bit mode using only 16-bit operations.
445 * The safest way to do this is to read the
446 * PCI subsystem ID from BCR23/24 and compare
447 * that with the value read from PCI config
448 * space.
449 */
450 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID);
451 chip_id <<= 16;
452 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID);
453 /*
454 * Note III: the test for 0x10001000 is a hack to
455 * pacify VMware, who's pseudo-PCnet interface is
456 * broken. Reading the subsystem register from PCI
457 * config space yeilds 0x00000000 while reading the
458 * same value from I/O space yeilds 0x10001000. It's
459 * not supposed to be that way.
460 */
461 if (chip_id == pci_read_config(dev,
462 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) {
463 /* We're in 16-bit mode. */
464 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1);
465 chip_id <<= 16;
466 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0);
467 } else {
468 /* We're in 32-bit mode. */
469 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1);
470 chip_id <<= 16;
471 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0);
472 }
473 bus_release_resource(dev, PCN_RES,
474 PCN_RID, sc->pcn_res);
475 PCN_UNLOCK(sc);
476 mtx_destroy(&sc->pcn_mtx);
477 chip_id >>= 12;
478 sc->pcn_type = chip_id & PART_MASK;
479 switch(sc->pcn_type) {
480 case Am79C971:
481 case Am79C972:
482 case Am79C973:
483 case Am79C975:
484 case Am79C976:
485 case Am79C978:
486 break;
487 default:
488 return(ENXIO);
489 break;
490 }
491 device_set_desc(dev, t->pcn_name);
492 return(0);
493 }
494 t++;
495 }
496
497 return(ENXIO);
498 }
499
500 /*
501 * Attach the interface. Allocate softc structures, do ifmedia
502 * setup and ethernet/BPF attach.
503 */
504 static int
505 pcn_attach(dev)
506 device_t dev;
507 {
508 u_int32_t eaddr[2];
509 struct pcn_softc *sc;
510 struct ifnet *ifp;
511 int unit, error = 0, rid;
512
513 sc = device_get_softc(dev);
514 unit = device_get_unit(dev);
515
516 /* Initialize our mutex. */
517 mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
518 MTX_DEF | MTX_RECURSE);
519
520 /*
521 * Handle power management nonsense.
522 */
523 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
524 u_int32_t iobase, membase, irq;
525
526 /* Save important PCI config data. */
527 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4);
528 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4);
529 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4);
530
531 /* Reset the power state. */
532 printf("pcn%d: chip is in D%d power mode "
533 "-- setting to D0\n", unit,
534 pci_get_powerstate(dev));
535 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
536
537 /* Restore PCI config data. */
538 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4);
539 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4);
540 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4);
541 }
542
543 /*
544 * Map control/status registers.
545 */
546 pci_enable_busmaster(dev);
547
548 rid = PCN_RID;
549 sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid,
550 0, ~0, 1, RF_ACTIVE);
551
552 if (sc->pcn_res == NULL) {
553 printf("pcn%d: couldn't map ports/memory\n", unit);
554 error = ENXIO;
555 goto fail;
556 }
557
558 sc->pcn_btag = rman_get_bustag(sc->pcn_res);
559 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res);
560
561 /* Allocate interrupt */
562 rid = 0;
563 sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
564 RF_SHAREABLE | RF_ACTIVE);
565
566 if (sc->pcn_irq == NULL) {
567 printf("pcn%d: couldn't map interrupt\n", unit);
568 error = ENXIO;
569 goto fail;
570 }
571
572 /* Reset the adapter. */
573 pcn_reset(sc);
574
575 /*
576 * Get station address from the EEPROM.
577 */
578 eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00);
579 eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01);
580 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
581
582 /*
583 * An AMD chip was detected. Inform the world.
584 */
585 printf("pcn%d: Ethernet address: %6D\n", unit,
586 sc->arpcom.ac_enaddr, ":");
587
588 sc->pcn_unit = unit;
589 callout_handle_init(&sc->pcn_stat_ch);
590
591 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF,
592 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
593
594 if (sc->pcn_ldata == NULL) {
595 printf("pcn%d: no memory for list buffers!\n", unit);
596 error = ENXIO;
597 goto fail;
598 }
599 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data));
600
601 ifp = &sc->arpcom.ac_if;
602 ifp->if_softc = sc;
603 ifp->if_unit = unit;
604 ifp->if_name = "pcn";
605 ifp->if_mtu = ETHERMTU;
606 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
607 ifp->if_ioctl = pcn_ioctl;
608 ifp->if_output = ether_output;
609 ifp->if_start = pcn_start;
610 ifp->if_watchdog = pcn_watchdog;
611 ifp->if_init = pcn_init;
612 ifp->if_baudrate = 10000000;
613 ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1;
614
615 /*
616 * Do MII setup.
617 */
618 if (mii_phy_probe(dev, &sc->pcn_miibus,
619 pcn_ifmedia_upd, pcn_ifmedia_sts)) {
620 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit);
621 error = ENXIO;
622 goto fail;
623 }
624
625 /*
626 * Call MI attach routine.
627 */
628 ether_ifattach(ifp, (u_int8_t *) eaddr);
629
630 /* Hook interrupt last to avoid having to lock softc */
631 error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET,
632 pcn_intr, sc, &sc->pcn_intrhand);
633
634 if (error) {
635 printf("pcn%d: couldn't set up irq\n", unit);
636 ether_ifdetach(ifp);
637 goto fail;
638 }
639
640 fail:
641 if (error)
642 pcn_detach(dev);
643
644 return(error);
645 }
646
647 /*
648 * Shutdown hardware and free up resources. This can be called any
649 * time after the mutex has been initialized. It is called in both
650 * the error case in attach and the normal detach case so it needs
651 * to be careful about only freeing resources that have actually been
652 * allocated.
653 */
654 static int
655 pcn_detach(dev)
656 device_t dev;
657 {
658 struct pcn_softc *sc;
659 struct ifnet *ifp;
660
661 sc = device_get_softc(dev);
662 ifp = &sc->arpcom.ac_if;
663
664 KASSERT(mtx_initialized(&sc->pcn_mtx), ("pcn mutex not initialized"));
665 PCN_LOCK(sc);
666
667 /* These should only be active if attach succeeded */
668 if (device_is_attached(dev)) {
669 pcn_reset(sc);
670 pcn_stop(sc);
671 ether_ifdetach(ifp);
672 }
673 if (sc->pcn_miibus)
674 device_delete_child(dev, sc->pcn_miibus);
675 bus_generic_detach(dev);
676
677 if (sc->pcn_intrhand)
678 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand);
679 if (sc->pcn_irq)
680 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq);
681 if (sc->pcn_res)
682 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res);
683
684 if (sc->pcn_ldata) {
685 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data),
686 M_DEVBUF);
687 }
688 PCN_UNLOCK(sc);
689
690 mtx_destroy(&sc->pcn_mtx);
691
692 return(0);
693 }
694
695 /*
696 * Initialize the transmit descriptors.
697 */
698 static int
699 pcn_list_tx_init(sc)
700 struct pcn_softc *sc;
701 {
702 struct pcn_list_data *ld;
703 struct pcn_ring_data *cd;
704 int i;
705
706 cd = &sc->pcn_cdata;
707 ld = sc->pcn_ldata;
708
709 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
710 cd->pcn_tx_chain[i] = NULL;
711 ld->pcn_tx_list[i].pcn_tbaddr = 0;
712 ld->pcn_tx_list[i].pcn_txctl = 0;
713 ld->pcn_tx_list[i].pcn_txstat = 0;
714 }
715
716 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0;
717
718 return(0);
719 }
720
721
722 /*
723 * Initialize the RX descriptors and allocate mbufs for them.
724 */
725 static int
726 pcn_list_rx_init(sc)
727 struct pcn_softc *sc;
728 {
729 struct pcn_list_data *ld;
730 struct pcn_ring_data *cd;
731 int i;
732
733 ld = sc->pcn_ldata;
734 cd = &sc->pcn_cdata;
735
736 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
737 if (pcn_newbuf(sc, i, NULL) == ENOBUFS)
738 return(ENOBUFS);
739 }
740
741 cd->pcn_rx_prod = 0;
742
743 return(0);
744 }
745
746 /*
747 * Initialize an RX descriptor and attach an MBUF cluster.
748 */
749 static int
750 pcn_newbuf(sc, idx, m)
751 struct pcn_softc *sc;
752 int idx;
753 struct mbuf *m;
754 {
755 struct mbuf *m_new = NULL;
756 struct pcn_rx_desc *c;
757
758 c = &sc->pcn_ldata->pcn_rx_list[idx];
759
760 if (m == NULL) {
761 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
762 if (m_new == NULL)
763 return(ENOBUFS);
764
765 MCLGET(m_new, M_DONTWAIT);
766 if (!(m_new->m_flags & M_EXT)) {
767 m_freem(m_new);
768 return(ENOBUFS);
769 }
770 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
771 } else {
772 m_new = m;
773 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
774 m_new->m_data = m_new->m_ext.ext_buf;
775 }
776
777 m_adj(m_new, ETHER_ALIGN);
778
779 sc->pcn_cdata.pcn_rx_chain[idx] = m_new;
780 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t));
781 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ;
782 c->pcn_bufsz |= PCN_RXLEN_MBO;
783 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN;
784
785 return(0);
786 }
787
788 /*
789 * A frame has been uploaded: pass the resulting mbuf chain up to
790 * the higher level protocols.
791 */
792 static void
793 pcn_rxeof(sc)
794 struct pcn_softc *sc;
795 {
796 struct ether_header *eh;
797 struct mbuf *m;
798 struct ifnet *ifp;
799 struct pcn_rx_desc *cur_rx;
800 int i;
801
802 ifp = &sc->arpcom.ac_if;
803 i = sc->pcn_cdata.pcn_rx_prod;
804
805 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) {
806 cur_rx = &sc->pcn_ldata->pcn_rx_list[i];
807 m = sc->pcn_cdata.pcn_rx_chain[i];
808 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
809
810 /*
811 * If an error occurs, update stats, clear the
812 * status word and leave the mbuf cluster in place:
813 * it should simply get re-used next time this descriptor
814 * comes up in the ring.
815 */
816 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) {
817 ifp->if_ierrors++;
818 pcn_newbuf(sc, i, m);
819 PCN_INC(i, PCN_RX_LIST_CNT);
820 continue;
821 }
822
823 if (pcn_newbuf(sc, i, NULL)) {
824 /* Ran out of mbufs; recycle this one. */
825 pcn_newbuf(sc, i, m);
826 ifp->if_ierrors++;
827 PCN_INC(i, PCN_RX_LIST_CNT);
828 continue;
829 }
830
831 PCN_INC(i, PCN_RX_LIST_CNT);
832
833 /* No errors; receive the packet. */
834 ifp->if_ipackets++;
835 eh = mtod(m, struct ether_header *);
836 m->m_len = m->m_pkthdr.len =
837 cur_rx->pcn_rxlen - ETHER_CRC_LEN;
838 m->m_pkthdr.rcvif = ifp;
839
840 (*ifp->if_input)(ifp, m);
841 }
842
843 sc->pcn_cdata.pcn_rx_prod = i;
844
845 return;
846 }
847
848 /*
849 * A frame was downloaded to the chip. It's safe for us to clean up
850 * the list buffers.
851 */
852
853 static void
854 pcn_txeof(sc)
855 struct pcn_softc *sc;
856 {
857 struct pcn_tx_desc *cur_tx = NULL;
858 struct ifnet *ifp;
859 u_int32_t idx;
860
861 ifp = &sc->arpcom.ac_if;
862
863 /*
864 * Go through our tx list and free mbufs for those
865 * frames that have been transmitted.
866 */
867 idx = sc->pcn_cdata.pcn_tx_cons;
868 while (idx != sc->pcn_cdata.pcn_tx_prod) {
869 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx];
870
871 if (!PCN_OWN_TXDESC(cur_tx))
872 break;
873
874 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) {
875 sc->pcn_cdata.pcn_tx_cnt--;
876 PCN_INC(idx, PCN_TX_LIST_CNT);
877 continue;
878 }
879
880 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) {
881 ifp->if_oerrors++;
882 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF)
883 ifp->if_collisions++;
884 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY)
885 ifp->if_collisions++;
886 }
887
888 ifp->if_collisions +=
889 cur_tx->pcn_txstat & PCN_TXSTAT_TRC;
890
891 ifp->if_opackets++;
892 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) {
893 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]);
894 sc->pcn_cdata.pcn_tx_chain[idx] = NULL;
895 }
896
897 sc->pcn_cdata.pcn_tx_cnt--;
898 PCN_INC(idx, PCN_TX_LIST_CNT);
899 }
900
901 if (idx != sc->pcn_cdata.pcn_tx_cons) {
902 /* Some buffers have been freed. */
903 sc->pcn_cdata.pcn_tx_cons = idx;
904 ifp->if_flags &= ~IFF_OACTIVE;
905 }
906 ifp->if_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5;
907
908 return;
909 }
910
911 static void
912 pcn_tick(xsc)
913 void *xsc;
914 {
915 struct pcn_softc *sc;
916 struct mii_data *mii;
917 struct ifnet *ifp;
918
919 sc = xsc;
920 ifp = &sc->arpcom.ac_if;
921 PCN_LOCK(sc);
922
923 mii = device_get_softc(sc->pcn_miibus);
924 mii_tick(mii);
925
926 /* link just died */
927 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE))
928 sc->pcn_link = 0;
929
930 /* link just came up, restart */
931 if (!sc->pcn_link && mii->mii_media_status & IFM_ACTIVE &&
932 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
933 sc->pcn_link++;
934 if (ifp->if_snd.ifq_head != NULL)
935 pcn_start(ifp);
936 }
937
938 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
939
940 PCN_UNLOCK(sc);
941
942 return;
943 }
944
945 static void
946 pcn_intr(arg)
947 void *arg;
948 {
949 struct pcn_softc *sc;
950 struct ifnet *ifp;
951 u_int32_t status;
952
953 sc = arg;
954 ifp = &sc->arpcom.ac_if;
955
956 /* Supress unwanted interrupts */
957 if (!(ifp->if_flags & IFF_UP)) {
958 pcn_stop(sc);
959 return;
960 }
961
962 PCN_LOCK(sc);
963
964 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR);
965
966 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) {
967 CSR_WRITE_4(sc, PCN_IO32_RDP, status);
968
969 if (status & PCN_CSR_RINT)
970 pcn_rxeof(sc);
971
972 if (status & PCN_CSR_TINT)
973 pcn_txeof(sc);
974
975 if (status & PCN_CSR_ERR) {
976 pcn_init(sc);
977 break;
978 }
979 }
980
981 if (ifp->if_snd.ifq_head != NULL)
982 pcn_start(ifp);
983
984 PCN_UNLOCK(sc);
985 return;
986 }
987
988 /*
989 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
990 * pointers to the fragment pointers.
991 */
992 static int
993 pcn_encap(sc, m_head, txidx)
994 struct pcn_softc *sc;
995 struct mbuf *m_head;
996 u_int32_t *txidx;
997 {
998 struct pcn_tx_desc *f = NULL;
999 struct mbuf *m;
1000 int frag, cur, cnt = 0;
1001
1002 /*
1003 * Start packing the mbufs in this chain into
1004 * the fragment pointers. Stop when we run out
1005 * of fragments or hit the end of the mbuf chain.
1006 */
1007 m = m_head;
1008 cur = frag = *txidx;
1009
1010 for (m = m_head; m != NULL; m = m->m_next) {
1011 if (m->m_len != 0) {
1012 if ((PCN_TX_LIST_CNT -
1013 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2)
1014 return(ENOBUFS);
1015 f = &sc->pcn_ldata->pcn_tx_list[frag];
1016 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ;
1017 f->pcn_txctl |= PCN_TXCTL_MBO;
1018 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t));
1019 if (cnt == 0)
1020 f->pcn_txctl |= PCN_TXCTL_STP;
1021 else
1022 f->pcn_txctl |= PCN_TXCTL_OWN;
1023 cur = frag;
1024 PCN_INC(frag, PCN_TX_LIST_CNT);
1025 cnt++;
1026 }
1027 }
1028
1029 if (m != NULL)
1030 return(ENOBUFS);
1031
1032 sc->pcn_cdata.pcn_tx_chain[cur] = m_head;
1033 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |=
1034 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT;
1035 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN;
1036 sc->pcn_cdata.pcn_tx_cnt += cnt;
1037 *txidx = frag;
1038
1039 return(0);
1040 }
1041
1042 /*
1043 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1044 * to the mbuf data regions directly in the transmit lists. We also save a
1045 * copy of the pointers since the transmit list fragment pointers are
1046 * physical addresses.
1047 */
1048 static void
1049 pcn_start(ifp)
1050 struct ifnet *ifp;
1051 {
1052 struct pcn_softc *sc;
1053 struct mbuf *m_head = NULL;
1054 u_int32_t idx;
1055
1056 sc = ifp->if_softc;
1057
1058 PCN_LOCK(sc);
1059
1060 if (!sc->pcn_link) {
1061 PCN_UNLOCK(sc);
1062 return;
1063 }
1064
1065 idx = sc->pcn_cdata.pcn_tx_prod;
1066
1067 if (ifp->if_flags & IFF_OACTIVE) {
1068 PCN_UNLOCK(sc);
1069 return;
1070 }
1071
1072 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) {
1073 IF_DEQUEUE(&ifp->if_snd, m_head);
1074 if (m_head == NULL)
1075 break;
1076
1077 if (pcn_encap(sc, m_head, &idx)) {
1078 IF_PREPEND(&ifp->if_snd, m_head);
1079 ifp->if_flags |= IFF_OACTIVE;
1080 break;
1081 }
1082
1083 /*
1084 * If there's a BPF listener, bounce a copy of this frame
1085 * to him.
1086 */
1087 BPF_MTAP(ifp, m_head);
1088
1089 }
1090
1091 /* Transmit */
1092 sc->pcn_cdata.pcn_tx_prod = idx;
1093 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
1094
1095 /*
1096 * Set a timeout in case the chip goes out to lunch.
1097 */
1098 ifp->if_timer = 5;
1099
1100 PCN_UNLOCK(sc);
1101
1102 return;
1103 }
1104
1105 static void
1106 pcn_setfilt(ifp)
1107 struct ifnet *ifp;
1108 {
1109 struct pcn_softc *sc;
1110
1111 sc = ifp->if_softc;
1112
1113 /* If we want promiscuous mode, set the allframes bit. */
1114 if (ifp->if_flags & IFF_PROMISC) {
1115 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1116 } else {
1117 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC);
1118 }
1119
1120 /* Set the capture broadcast bit to capture broadcast frames. */
1121 if (ifp->if_flags & IFF_BROADCAST) {
1122 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1123 } else {
1124 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD);
1125 }
1126
1127 return;
1128 }
1129
1130 static void
1131 pcn_init(xsc)
1132 void *xsc;
1133 {
1134 struct pcn_softc *sc = xsc;
1135 struct ifnet *ifp = &sc->arpcom.ac_if;
1136 struct mii_data *mii = NULL;
1137
1138 PCN_LOCK(sc);
1139
1140 /*
1141 * Cancel pending I/O and free all RX/TX buffers.
1142 */
1143 pcn_stop(sc);
1144 pcn_reset(sc);
1145
1146 mii = device_get_softc(sc->pcn_miibus);
1147
1148 /* Set MAC address */
1149 pcn_csr_write(sc, PCN_CSR_PAR0,
1150 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1151 pcn_csr_write(sc, PCN_CSR_PAR1,
1152 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1153 pcn_csr_write(sc, PCN_CSR_PAR2,
1154 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1155
1156 /* Init circular RX list. */
1157 if (pcn_list_rx_init(sc) == ENOBUFS) {
1158 printf("pcn%d: initialization failed: no "
1159 "memory for rx buffers\n", sc->pcn_unit);
1160 pcn_stop(sc);
1161 PCN_UNLOCK(sc);
1162 return;
1163 }
1164
1165 /*
1166 * Init tx descriptors.
1167 */
1168 pcn_list_tx_init(sc);
1169
1170 /* Set up the mode register. */
1171 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII);
1172
1173 /* Set up RX filter. */
1174 pcn_setfilt(ifp);
1175
1176 /*
1177 * Load the multicast filter.
1178 */
1179 pcn_setmulti(sc);
1180
1181 /*
1182 * Load the addresses of the RX and TX lists.
1183 */
1184 pcn_csr_write(sc, PCN_CSR_RXADDR0,
1185 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF);
1186 pcn_csr_write(sc, PCN_CSR_RXADDR1,
1187 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF);
1188 pcn_csr_write(sc, PCN_CSR_TXADDR0,
1189 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF);
1190 pcn_csr_write(sc, PCN_CSR_TXADDR1,
1191 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF);
1192
1193 /* Set the RX and TX ring sizes. */
1194 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1);
1195 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1);
1196
1197 /* We're not using the initialization block. */
1198 pcn_csr_write(sc, PCN_CSR_IAB1, 0);
1199
1200 /* Enable fast suspend mode. */
1201 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1202
1203 /*
1204 * Enable burst read and write. Also set the no underflow
1205 * bit. This will avoid transmit underruns in certain
1206 * conditions while still providing decent performance.
1207 */
1208 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW|
1209 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE);
1210
1211 /* Enable graceful recovery from underflow. */
1212 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1213
1214 /* Enable auto-padding of short TX frames. */
1215 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1216
1217 /* Disable MII autoneg (we handle this ourselves). */
1218 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS);
1219
1220 if (sc->pcn_type == Am79C978)
1221 pcn_bcr_write(sc, PCN_BCR_PHYSEL,
1222 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1223
1224 /* Enable interrupts and start the controller running. */
1225 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START);
1226
1227 mii_mediachg(mii);
1228
1229 ifp->if_flags |= IFF_RUNNING;
1230 ifp->if_flags &= ~IFF_OACTIVE;
1231
1232 sc->pcn_stat_ch = timeout(pcn_tick, sc, hz);
1233 PCN_UNLOCK(sc);
1234
1235 return;
1236 }
1237
1238 /*
1239 * Set media options.
1240 */
1241 static int
1242 pcn_ifmedia_upd(ifp)
1243 struct ifnet *ifp;
1244 {
1245 struct pcn_softc *sc;
1246 struct mii_data *mii;
1247
1248 sc = ifp->if_softc;
1249 mii = device_get_softc(sc->pcn_miibus);
1250
1251 sc->pcn_link = 0;
1252 if (mii->mii_instance) {
1253 struct mii_softc *miisc;
1254 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1255 mii_phy_reset(miisc);
1256 }
1257 mii_mediachg(mii);
1258
1259 return(0);
1260 }
1261
1262 /*
1263 * Report current media status.
1264 */
1265 static void
1266 pcn_ifmedia_sts(ifp, ifmr)
1267 struct ifnet *ifp;
1268 struct ifmediareq *ifmr;
1269 {
1270 struct pcn_softc *sc;
1271 struct mii_data *mii;
1272
1273 sc = ifp->if_softc;
1274
1275 mii = device_get_softc(sc->pcn_miibus);
1276 mii_pollstat(mii);
1277 ifmr->ifm_active = mii->mii_media_active;
1278 ifmr->ifm_status = mii->mii_media_status;
1279
1280 return;
1281 }
1282
1283 static int
1284 pcn_ioctl(ifp, command, data)
1285 struct ifnet *ifp;
1286 u_long command;
1287 caddr_t data;
1288 {
1289 struct pcn_softc *sc = ifp->if_softc;
1290 struct ifreq *ifr = (struct ifreq *) data;
1291 struct mii_data *mii = NULL;
1292 int error = 0;
1293
1294 PCN_LOCK(sc);
1295
1296 switch(command) {
1297 case SIOCSIFFLAGS:
1298 if (ifp->if_flags & IFF_UP) {
1299 if (ifp->if_flags & IFF_RUNNING &&
1300 ifp->if_flags & IFF_PROMISC &&
1301 !(sc->pcn_if_flags & IFF_PROMISC)) {
1302 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1303 PCN_EXTCTL1_SPND);
1304 pcn_setfilt(ifp);
1305 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1306 PCN_EXTCTL1_SPND);
1307 pcn_csr_write(sc, PCN_CSR_CSR,
1308 PCN_CSR_INTEN|PCN_CSR_START);
1309 } else if (ifp->if_flags & IFF_RUNNING &&
1310 !(ifp->if_flags & IFF_PROMISC) &&
1311 sc->pcn_if_flags & IFF_PROMISC) {
1312 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1,
1313 PCN_EXTCTL1_SPND);
1314 pcn_setfilt(ifp);
1315 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1,
1316 PCN_EXTCTL1_SPND);
1317 pcn_csr_write(sc, PCN_CSR_CSR,
1318 PCN_CSR_INTEN|PCN_CSR_START);
1319 } else if (!(ifp->if_flags & IFF_RUNNING))
1320 pcn_init(sc);
1321 } else {
1322 if (ifp->if_flags & IFF_RUNNING)
1323 pcn_stop(sc);
1324 }
1325 sc->pcn_if_flags = ifp->if_flags;
1326 error = 0;
1327 break;
1328 case SIOCADDMULTI:
1329 case SIOCDELMULTI:
1330 pcn_setmulti(sc);
1331 error = 0;
1332 break;
1333 case SIOCGIFMEDIA:
1334 case SIOCSIFMEDIA:
1335 mii = device_get_softc(sc->pcn_miibus);
1336 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1337 break;
1338 default:
1339 error = ether_ioctl(ifp, command, data);
1340 break;
1341 }
1342
1343 PCN_UNLOCK(sc);
1344
1345 return(error);
1346 }
1347
1348 static void
1349 pcn_watchdog(ifp)
1350 struct ifnet *ifp;
1351 {
1352 struct pcn_softc *sc;
1353
1354 sc = ifp->if_softc;
1355
1356 PCN_LOCK(sc);
1357
1358 ifp->if_oerrors++;
1359 printf("pcn%d: watchdog timeout\n", sc->pcn_unit);
1360
1361 pcn_stop(sc);
1362 pcn_reset(sc);
1363 pcn_init(sc);
1364
1365 if (ifp->if_snd.ifq_head != NULL)
1366 pcn_start(ifp);
1367
1368 PCN_UNLOCK(sc);
1369
1370 return;
1371 }
1372
1373 /*
1374 * Stop the adapter and free any mbufs allocated to the
1375 * RX and TX lists.
1376 */
1377 static void
1378 pcn_stop(sc)
1379 struct pcn_softc *sc;
1380 {
1381 register int i;
1382 struct ifnet *ifp;
1383
1384 ifp = &sc->arpcom.ac_if;
1385 PCN_LOCK(sc);
1386 ifp->if_timer = 0;
1387
1388 untimeout(pcn_tick, sc, sc->pcn_stat_ch);
1389
1390 /* Turn off interrupts */
1391 PCN_CSR_CLRBIT(sc, PCN_CSR_CSR, PCN_CSR_INTEN);
1392 /* Stop adapter */
1393 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP);
1394 sc->pcn_link = 0;
1395
1396 /*
1397 * Free data in the RX lists.
1398 */
1399 for (i = 0; i < PCN_RX_LIST_CNT; i++) {
1400 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) {
1401 m_freem(sc->pcn_cdata.pcn_rx_chain[i]);
1402 sc->pcn_cdata.pcn_rx_chain[i] = NULL;
1403 }
1404 }
1405 bzero((char *)&sc->pcn_ldata->pcn_rx_list,
1406 sizeof(sc->pcn_ldata->pcn_rx_list));
1407
1408 /*
1409 * Free the TX list buffers.
1410 */
1411 for (i = 0; i < PCN_TX_LIST_CNT; i++) {
1412 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) {
1413 m_freem(sc->pcn_cdata.pcn_tx_chain[i]);
1414 sc->pcn_cdata.pcn_tx_chain[i] = NULL;
1415 }
1416 }
1417
1418 bzero((char *)&sc->pcn_ldata->pcn_tx_list,
1419 sizeof(sc->pcn_ldata->pcn_tx_list));
1420
1421 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1422 PCN_UNLOCK(sc);
1423
1424 return;
1425 }
1426
1427 /*
1428 * Stop all chip I/O so that the kernel's probe routines don't
1429 * get confused by errant DMAs when rebooting.
1430 */
1431 static void
1432 pcn_shutdown(dev)
1433 device_t dev;
1434 {
1435 struct pcn_softc *sc;
1436
1437 sc = device_get_softc(dev);
1438
1439 PCN_LOCK(sc);
1440 pcn_reset(sc);
1441 pcn_stop(sc);
1442 PCN_UNLOCK(sc);
1443
1444 return;
1445 }
Cache object: 0dd2e862c205f3950b11d87f7e271a4b
|