FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/aic6915.c
1 /* $NetBSD: aic6915.c,v 1.10 2003/10/25 18:35:42 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.10 2003/10/25 18:35:42 christos Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73
74 #include <dev/mii/miivar.h>
75
76 #include <dev/ic/aic6915reg.h>
77 #include <dev/ic/aic6915var.h>
78
79 void sf_start(struct ifnet *);
80 void sf_watchdog(struct ifnet *);
81 int sf_ioctl(struct ifnet *, u_long, caddr_t);
82 int sf_init(struct ifnet *);
83 void sf_stop(struct ifnet *, int);
84
85 void sf_shutdown(void *);
86
87 void sf_txintr(struct sf_softc *);
88 void sf_rxintr(struct sf_softc *);
89 void sf_stats_update(struct sf_softc *);
90
91 void sf_reset(struct sf_softc *);
92 void sf_macreset(struct sf_softc *);
93 void sf_rxdrain(struct sf_softc *);
94 int sf_add_rxbuf(struct sf_softc *, int);
95 uint8_t sf_read_eeprom(struct sf_softc *, int);
96 void sf_set_filter(struct sf_softc *);
97
98 int sf_mii_read(struct device *, int, int);
99 void sf_mii_write(struct device *, int, int, int);
100 void sf_mii_statchg(struct device *);
101
102 void sf_tick(void *);
103
104 int sf_mediachange(struct ifnet *);
105 void sf_mediastatus(struct ifnet *, struct ifmediareq *);
106
107 #define sf_funcreg_read(sc, reg) \
108 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
109 #define sf_funcreg_write(sc, reg, val) \
110 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
111
112 static __inline uint32_t
113 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
114 {
115
116 if (__predict_false(sc->sc_iomapped)) {
117 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
118 reg);
119 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
120 SF_IndirectIoDataPort));
121 }
122
123 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
124 }
125
126 static __inline void
127 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
128 {
129
130 if (__predict_false(sc->sc_iomapped)) {
131 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
132 reg);
133 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
134 val);
135 return;
136 }
137
138 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
139 }
140
141 #define sf_genreg_read(sc, reg) \
142 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
143 #define sf_genreg_write(sc, reg, val) \
144 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
145
146 /*
147 * sf_attach:
148 *
149 * Attach a Starfire interface to the system.
150 */
151 void
152 sf_attach(struct sf_softc *sc)
153 {
154 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
155 int i, rseg, error;
156 bus_dma_segment_t seg;
157 u_int8_t enaddr[ETHER_ADDR_LEN];
158
159 callout_init(&sc->sc_tick_callout);
160
161 /*
162 * If we're I/O mapped, the functional register handle is
163 * the same as the base handle. If we're memory mapped,
164 * carve off a chunk of the register space for the functional
165 * registers, to save on arithmetic later.
166 */
167 if (sc->sc_iomapped)
168 sc->sc_sh_func = sc->sc_sh;
169 else {
170 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
171 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
172 printf("%s: unable to sub-region functional "
173 "registers, error = %d\n", sc->sc_dev.dv_xname,
174 error);
175 return;
176 }
177 }
178
179 /*
180 * Initialize the transmit threshold for this interface. The
181 * manual describes the default as 4 * 16 bytes. We start out
182 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
183 * several platforms.
184 */
185 sc->sc_txthresh = 10;
186
187 /*
188 * Allocate the control data structures, and create and load the
189 * DMA map for it.
190 */
191 if ((error = bus_dmamem_alloc(sc->sc_dmat,
192 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
193 BUS_DMA_NOWAIT)) != 0) {
194 printf("%s: unable to allocate control data, error = %d\n",
195 sc->sc_dev.dv_xname, error);
196 goto fail_0;
197 }
198
199 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
200 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 printf("%s: unable to map control data, error = %d\n",
203 sc->sc_dev.dv_xname, error);
204 goto fail_1;
205 }
206
207 if ((error = bus_dmamap_create(sc->sc_dmat,
208 sizeof(struct sf_control_data), 1,
209 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
210 &sc->sc_cddmamap)) != 0) {
211 printf("%s: unable to create control data DMA map, "
212 "error = %d\n", sc->sc_dev.dv_xname, error);
213 goto fail_2;
214 }
215
216 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
217 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
218 BUS_DMA_NOWAIT)) != 0) {
219 printf("%s: unable to load control data DMA map, error = %d\n",
220 sc->sc_dev.dv_xname, error);
221 goto fail_3;
222 }
223
224 /*
225 * Create the transmit buffer DMA maps.
226 */
227 for (i = 0; i < SF_NTXDESC; i++) {
228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
229 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
230 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
231 printf("%s: unable to create tx DMA map %d, "
232 "error = %d\n", sc->sc_dev.dv_xname, i, error);
233 goto fail_4;
234 }
235 }
236
237 /*
238 * Create the receive buffer DMA maps.
239 */
240 for (i = 0; i < SF_NRXDESC; i++) {
241 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
242 MCLBYTES, 0, BUS_DMA_NOWAIT,
243 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
244 printf("%s: unable to create rx DMA map %d, "
245 "error = %d\n", sc->sc_dev.dv_xname, i, error);
246 goto fail_5;
247 }
248 }
249
250 /*
251 * Reset the chip to a known state.
252 */
253 sf_reset(sc);
254
255 /*
256 * Read the Ethernet address from the EEPROM.
257 */
258 for (i = 0; i < ETHER_ADDR_LEN; i++)
259 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
260
261 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
262 ether_sprintf(enaddr));
263
264 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
265 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
266
267 /*
268 * Initialize our media structures and probe the MII.
269 */
270 sc->sc_mii.mii_ifp = ifp;
271 sc->sc_mii.mii_readreg = sf_mii_read;
272 sc->sc_mii.mii_writereg = sf_mii_write;
273 sc->sc_mii.mii_statchg = sf_mii_statchg;
274 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
275 sf_mediastatus);
276 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
277 MII_OFFSET_ANY, 0);
278 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
279 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
281 } else
282 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
283
284 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
285 ifp->if_softc = sc;
286 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
287 ifp->if_ioctl = sf_ioctl;
288 ifp->if_start = sf_start;
289 ifp->if_watchdog = sf_watchdog;
290 ifp->if_init = sf_init;
291 ifp->if_stop = sf_stop;
292 IFQ_SET_READY(&ifp->if_snd);
293
294 /*
295 * Attach the interface.
296 */
297 if_attach(ifp);
298 ether_ifattach(ifp, enaddr);
299
300 /*
301 * Make sure the interface is shutdown during reboot.
302 */
303 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
304 if (sc->sc_sdhook == NULL)
305 printf("%s: WARNING: unable to establish shutdown hook\n",
306 sc->sc_dev.dv_xname);
307 return;
308
309 /*
310 * Free any resources we've allocated during the failed attach
311 * attempt. Do this in reverse order an fall through.
312 */
313 fail_5:
314 for (i = 0; i < SF_NRXDESC; i++) {
315 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
316 bus_dmamap_destroy(sc->sc_dmat,
317 sc->sc_rxsoft[i].ds_dmamap);
318 }
319 fail_4:
320 for (i = 0; i < SF_NTXDESC; i++) {
321 if (sc->sc_txsoft[i].ds_dmamap != NULL)
322 bus_dmamap_destroy(sc->sc_dmat,
323 sc->sc_txsoft[i].ds_dmamap);
324 }
325 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
326 fail_3:
327 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
328 fail_2:
329 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
330 sizeof(struct sf_control_data));
331 fail_1:
332 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
333 fail_0:
334 return;
335 }
336
337 /*
338 * sf_shutdown:
339 *
340 * Shutdown hook -- make sure the interface is stopped at reboot.
341 */
342 void
343 sf_shutdown(void *arg)
344 {
345 struct sf_softc *sc = arg;
346
347 sf_stop(&sc->sc_ethercom.ec_if, 1);
348 }
349
350 /*
351 * sf_start: [ifnet interface function]
352 *
353 * Start packet transmission on the interface.
354 */
355 void
356 sf_start(struct ifnet *ifp)
357 {
358 struct sf_softc *sc = ifp->if_softc;
359 struct mbuf *m0, *m;
360 struct sf_txdesc0 *txd;
361 struct sf_descsoft *ds;
362 bus_dmamap_t dmamap;
363 int error, producer, last = -1, opending, seg;
364
365 /*
366 * Remember the previous number of pending transmits.
367 */
368 opending = sc->sc_txpending;
369
370 /*
371 * Find out where we're sitting.
372 */
373 producer = SF_TXDINDEX_TO_HOST(
374 TDQPI_HiPrTxProducerIndex_get(
375 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
376
377 /*
378 * Loop through the send queue, setting up transmit descriptors
379 * until we drain the queue, or use up all available transmit
380 * descriptors. Leave a blank one at the end for sanity's sake.
381 */
382 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
383 /*
384 * Grab a packet off the queue.
385 */
386 IFQ_POLL(&ifp->if_snd, m0);
387 if (m0 == NULL)
388 break;
389 m = NULL;
390
391 /*
392 * Get the transmit descriptor.
393 */
394 txd = &sc->sc_txdescs[producer];
395 ds = &sc->sc_txsoft[producer];
396 dmamap = ds->ds_dmamap;
397
398 /*
399 * Load the DMA map. If this fails, the packet either
400 * didn't fit in the allotted number of frags, or we were
401 * short on resources. In this case, we'll copy and try
402 * again.
403 */
404 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
405 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
406 MGETHDR(m, M_DONTWAIT, MT_DATA);
407 if (m == NULL) {
408 printf("%s: unable to allocate Tx mbuf\n",
409 sc->sc_dev.dv_xname);
410 break;
411 }
412 if (m0->m_pkthdr.len > MHLEN) {
413 MCLGET(m, M_DONTWAIT);
414 if ((m->m_flags & M_EXT) == 0) {
415 printf("%s: unable to allocate Tx "
416 "cluster\n", sc->sc_dev.dv_xname);
417 m_freem(m);
418 break;
419 }
420 }
421 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
422 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
423 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
424 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
425 if (error) {
426 printf("%s: unable to load Tx buffer, "
427 "error = %d\n", sc->sc_dev.dv_xname, error);
428 break;
429 }
430 }
431
432 /*
433 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
434 */
435 IFQ_DEQUEUE(&ifp->if_snd, m0);
436 if (m != NULL) {
437 m_freem(m0);
438 m0 = m;
439 }
440
441 /* Initialize the descriptor. */
442 txd->td_word0 =
443 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
444 if (producer == (SF_NTXDESC - 1))
445 txd->td_word0 |= TD_W0_END;
446 txd->td_word1 = htole32(dmamap->dm_nsegs);
447 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
448 txd->td_frags[seg].fr_addr =
449 htole32(dmamap->dm_segs[seg].ds_addr);
450 txd->td_frags[seg].fr_len =
451 htole32(dmamap->dm_segs[seg].ds_len);
452 }
453
454 /* Sync the descriptor and the DMA map. */
455 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
456 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
457 BUS_DMASYNC_PREWRITE);
458
459 /*
460 * Store a pointer to the packet so we can free it later.
461 */
462 ds->ds_mbuf = m0;
463
464 /* Advance the Tx pointer. */
465 sc->sc_txpending++;
466 last = producer;
467 producer = SF_NEXTTX(producer);
468
469 #if NBPFILTER > 0
470 /*
471 * Pass the packet to any BPF listeners.
472 */
473 if (ifp->if_bpf)
474 bpf_mtap(ifp->if_bpf, m0);
475 #endif
476 }
477
478 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
479 /* No more slots left; notify upper layer. */
480 ifp->if_flags |= IFF_OACTIVE;
481 }
482
483 if (sc->sc_txpending != opending) {
484 KASSERT(last != -1);
485 /*
486 * We enqueued packets. Cause a transmit interrupt to
487 * happen on the last packet we enqueued, and give the
488 * new descriptors to the chip by writing the new
489 * producer index.
490 */
491 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
492 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
493
494 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
495 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
496
497 /* Set a watchdog timer in case the chip flakes out. */
498 ifp->if_timer = 5;
499 }
500 }
501
502 /*
503 * sf_watchdog: [ifnet interface function]
504 *
505 * Watchdog timer handler.
506 */
507 void
508 sf_watchdog(struct ifnet *ifp)
509 {
510 struct sf_softc *sc = ifp->if_softc;
511
512 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
513 ifp->if_oerrors++;
514
515 (void) sf_init(ifp);
516
517 /* Try to get more packets going. */
518 sf_start(ifp);
519 }
520
521 /*
522 * sf_ioctl: [ifnet interface function]
523 *
524 * Handle control requests from the operator.
525 */
526 int
527 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
528 {
529 struct sf_softc *sc = ifp->if_softc;
530 struct ifreq *ifr = (struct ifreq *) data;
531 int s, error;
532
533 s = splnet();
534
535 switch (cmd) {
536 case SIOCSIFMEDIA:
537 case SIOCGIFMEDIA:
538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
539 break;
540
541 default:
542 error = ether_ioctl(ifp, cmd, data);
543 if (error == ENETRESET) {
544 /*
545 * Multicast list has changed; set the hardware filter
546 * accordingly.
547 */
548 sf_set_filter(sc);
549 error = 0;
550 }
551 break;
552 }
553
554 /* Try to get more packets going. */
555 sf_start(ifp);
556
557 splx(s);
558 return (error);
559 }
560
561 /*
562 * sf_intr:
563 *
564 * Interrupt service routine.
565 */
566 int
567 sf_intr(void *arg)
568 {
569 struct sf_softc *sc = arg;
570 uint32_t isr;
571 int handled = 0, wantinit = 0;
572
573 for (;;) {
574 /* Reading clears all interrupts we're interested in. */
575 isr = sf_funcreg_read(sc, SF_InterruptStatus);
576 if ((isr & IS_PCIPadInt) == 0)
577 break;
578
579 handled = 1;
580
581 /* Handle receive interrupts. */
582 if (isr & IS_RxQ1DoneInt)
583 sf_rxintr(sc);
584
585 /* Handle transmit completion interrupts. */
586 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
587 sf_txintr(sc);
588
589 /* Handle abnormal interrupts. */
590 if (isr & IS_AbnormalInterrupt) {
591 /* Statistics. */
592 if (isr & IS_StatisticWrapInt)
593 sf_stats_update(sc);
594
595 /* DMA errors. */
596 if (isr & IS_DmaErrInt) {
597 wantinit = 1;
598 printf("%s: WARNING: DMA error\n",
599 sc->sc_dev.dv_xname);
600 }
601
602 /* Transmit FIFO underruns. */
603 if (isr & IS_TxDataLowInt) {
604 if (sc->sc_txthresh < 0xff)
605 sc->sc_txthresh++;
606 printf("%s: transmit FIFO underrun, new "
607 "threshold: %d bytes\n",
608 sc->sc_dev.dv_xname,
609 sc->sc_txthresh * 16);
610 sf_funcreg_write(sc, SF_TransmitFrameCSR,
611 sc->sc_TransmitFrameCSR |
612 TFCSR_TransmitThreshold(sc->sc_txthresh));
613 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
614 sc->sc_TxDescQueueCtrl |
615 TDQC_TxHighPriorityFifoThreshold(
616 sc->sc_txthresh));
617 }
618 }
619 }
620
621 if (handled) {
622 /* Reset the interface, if necessary. */
623 if (wantinit)
624 sf_init(&sc->sc_ethercom.ec_if);
625
626 /* Try and get more packets going. */
627 sf_start(&sc->sc_ethercom.ec_if);
628 }
629
630 return (handled);
631 }
632
633 /*
634 * sf_txintr:
635 *
636 * Helper -- handle transmit completion interrupts.
637 */
638 void
639 sf_txintr(struct sf_softc *sc)
640 {
641 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
642 struct sf_descsoft *ds;
643 uint32_t cqci, tcd;
644 int consumer, producer, txidx;
645
646 try_again:
647 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
648
649 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
650 producer = CQPI_TxCompletionProducerIndex_get(
651 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
652
653 if (consumer == producer)
654 return;
655
656 ifp->if_flags &= ~IFF_OACTIVE;
657
658 while (consumer != producer) {
659 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
660 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
661
662 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
663 #ifdef DIAGNOSTIC
664 if ((tcd & TCD_PR) == 0)
665 printf("%s: Tx queue mismatch, index %d\n",
666 sc->sc_dev.dv_xname, txidx);
667 #endif
668 /*
669 * NOTE: stats are updated later. We're just
670 * releasing packets that have been DMA'd to
671 * the chip.
672 */
673 ds = &sc->sc_txsoft[txidx];
674 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
675 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
676 0, ds->ds_dmamap->dm_mapsize,
677 BUS_DMASYNC_POSTWRITE);
678 m_freem(ds->ds_mbuf);
679 ds->ds_mbuf = NULL;
680
681 consumer = SF_NEXTTCD(consumer);
682 sc->sc_txpending--;
683 }
684
685 /* XXXJRT -- should be KDASSERT() */
686 KASSERT(sc->sc_txpending >= 0);
687
688 /* If all packets are done, cancel the watchdog timer. */
689 if (sc->sc_txpending == 0)
690 ifp->if_timer = 0;
691
692 /* Update the consumer index. */
693 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
694 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
695 CQCI_TxCompletionConsumerIndex(consumer));
696
697 /* Double check for new completions. */
698 goto try_again;
699 }
700
701 /*
702 * sf_rxintr:
703 *
704 * Helper -- handle receive interrupts.
705 */
706 void
707 sf_rxintr(struct sf_softc *sc)
708 {
709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
710 struct sf_descsoft *ds;
711 struct sf_rcd_full *rcd;
712 struct mbuf *m;
713 uint32_t cqci, word0;
714 int consumer, producer, bufproducer, rxidx, len;
715
716 try_again:
717 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
718
719 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
720 producer = CQPI_RxCompletionQ1ProducerIndex_get(
721 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
722 bufproducer = RXQ1P_RxDescQ1Producer_get(
723 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
724
725 if (consumer == producer)
726 return;
727
728 while (consumer != producer) {
729 rcd = &sc->sc_rxcomp[consumer];
730 SF_CDRXCSYNC(sc, consumer,
731 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
732 SF_CDRXCSYNC(sc, consumer,
733 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
734
735 word0 = le32toh(rcd->rcd_word0);
736 rxidx = RCD_W0_EndIndex(word0);
737
738 ds = &sc->sc_rxsoft[rxidx];
739
740 consumer = SF_NEXTRCD(consumer);
741 bufproducer = SF_NEXTRX(bufproducer);
742
743 if ((word0 & RCD_W0_OK) == 0) {
744 SF_INIT_RXDESC(sc, rxidx);
745 continue;
746 }
747
748 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
749 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
750
751 /*
752 * No errors; receive the packet. Note that we have
753 * configured the Starfire to NOT transfer the CRC
754 * with the packet.
755 */
756 len = RCD_W0_Length(word0);
757
758 #ifdef __NO_STRICT_ALIGNMENT
759 /*
760 * Allocate a new mbuf cluster. If that fails, we are
761 * out of memory, and must drop the packet and recycle
762 * the buffer that's already attached to this descriptor.
763 */
764 m = ds->ds_mbuf;
765 if (sf_add_rxbuf(sc, rxidx) != 0) {
766 ifp->if_ierrors++;
767 SF_INIT_RXDESC(sc, rxidx);
768 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
769 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
770 continue;
771 }
772 #else
773 /*
774 * The Starfire's receive buffer must be 4-byte aligned.
775 * But this means that the data after the Ethernet header
776 * is misaligned. We must allocate a new buffer and
777 * copy the data, shifted forward 2 bytes.
778 */
779 MGETHDR(m, M_DONTWAIT, MT_DATA);
780 if (m == NULL) {
781 dropit:
782 ifp->if_ierrors++;
783 SF_INIT_RXDESC(sc, rxidx);
784 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
785 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
786 continue;
787 }
788 if (len > (MHLEN - 2)) {
789 MCLGET(m, M_DONTWAIT);
790 if ((m->m_flags & M_EXT) == 0) {
791 m_freem(m);
792 goto dropit;
793 }
794 }
795 m->m_data += 2;
796
797 /*
798 * Note that we use cluster for incoming frames, so the
799 * buffer is virtually contiguous.
800 */
801 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
802
803 /* Allow the receive descriptor to continue using its mbuf. */
804 SF_INIT_RXDESC(sc, rxidx);
805 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
806 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
807 #endif /* __NO_STRICT_ALIGNMENT */
808
809 m->m_pkthdr.rcvif = ifp;
810 m->m_pkthdr.len = m->m_len = len;
811
812 #if NBPFILTER > 0
813 /*
814 * Pass this up to any BPF listeners.
815 */
816 if (ifp->if_bpf)
817 bpf_mtap(ifp->if_bpf, m);
818 #endif /* NBPFILTER > 0 */
819
820 /* Pass it on. */
821 (*ifp->if_input)(ifp, m);
822 }
823
824 /* Update the chip's pointers. */
825 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
826 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
827 CQCI_RxCompletionQ1ConsumerIndex(consumer));
828 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
829 RXQ1P_RxDescQ1Producer(bufproducer));
830
831 /* Double-check for any new completions. */
832 goto try_again;
833 }
834
835 /*
836 * sf_tick:
837 *
838 * One second timer, used to tick the MII and update stats.
839 */
840 void
841 sf_tick(void *arg)
842 {
843 struct sf_softc *sc = arg;
844 int s;
845
846 s = splnet();
847 mii_tick(&sc->sc_mii);
848 sf_stats_update(sc);
849 splx(s);
850
851 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
852 }
853
854 /*
855 * sf_stats_update:
856 *
857 * Read the statitistics counters.
858 */
859 void
860 sf_stats_update(struct sf_softc *sc)
861 {
862 struct sf_stats stats;
863 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
864 uint32_t *p;
865 u_int i;
866
867 p = &stats.TransmitOKFrames;
868 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
869 *p++ = sf_genreg_read(sc,
870 SF_STATS_BASE + (i * sizeof(uint32_t)));
871 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
872 }
873
874 ifp->if_opackets += stats.TransmitOKFrames;
875
876 ifp->if_collisions += stats.SingleCollisionFrames +
877 stats.MultipleCollisionFrames;
878
879 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
880 stats.TransmitAbortDueToExcessingDeferral +
881 stats.FramesLostDueToInternalTransmitErrors;
882
883 ifp->if_ipackets += stats.ReceiveOKFrames;
884
885 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
886 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
887 stats.ReceiveFramesJabbersError +
888 stats.FramesLostDueToInternalReceiveErrors;
889 }
890
891 /*
892 * sf_reset:
893 *
894 * Perform a soft reset on the Starfire.
895 */
896 void
897 sf_reset(struct sf_softc *sc)
898 {
899 int i;
900
901 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
902
903 sf_macreset(sc);
904
905 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
906 for (i = 0; i < 1000; i++) {
907 delay(10);
908 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
909 PDC_SoftReset) == 0)
910 break;
911 }
912
913 if (i == 1000) {
914 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
915 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
916 }
917
918 delay(1000);
919 }
920
921 /*
922 * sf_macreset:
923 *
924 * Reset the MAC portion of the Starfire.
925 */
926 void
927 sf_macreset(struct sf_softc *sc)
928 {
929
930 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
931 delay(1000);
932 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
933 }
934
935 /*
936 * sf_init: [ifnet interface function]
937 *
938 * Initialize the interface. Must be called at splnet().
939 */
940 int
941 sf_init(struct ifnet *ifp)
942 {
943 struct sf_softc *sc = ifp->if_softc;
944 struct sf_descsoft *ds;
945 int error = 0;
946 u_int i;
947
948 /*
949 * Cancel any pending I/O.
950 */
951 sf_stop(ifp, 0);
952
953 /*
954 * Reset the Starfire to a known state.
955 */
956 sf_reset(sc);
957
958 /* Clear the stat counters. */
959 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
960 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
961
962 /*
963 * Initialize the transmit descriptor ring.
964 */
965 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
966 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
967 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
968 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
969
970 /*
971 * Initialize the transmit completion ring.
972 */
973 for (i = 0; i < SF_NTCD; i++) {
974 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
975 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
976 }
977 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
978 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
979
980 /*
981 * Initialize the receive descriptor ring.
982 */
983 for (i = 0; i < SF_NRXDESC; i++) {
984 ds = &sc->sc_rxsoft[i];
985 if (ds->ds_mbuf == NULL) {
986 if ((error = sf_add_rxbuf(sc, i)) != 0) {
987 printf("%s: unable to allocate or map rx "
988 "buffer %d, error = %d\n",
989 sc->sc_dev.dv_xname, i, error);
990 /*
991 * XXX Should attempt to run with fewer receive
992 * XXX buffers instead of just failing.
993 */
994 sf_rxdrain(sc);
995 goto out;
996 }
997 } else
998 SF_INIT_RXDESC(sc, i);
999 }
1000 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
1001 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
1002 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
1003
1004 /*
1005 * Initialize the receive completion ring.
1006 */
1007 for (i = 0; i < SF_NRCD; i++) {
1008 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
1009 sc->sc_rxcomp[i].rcd_word1 = 0;
1010 sc->sc_rxcomp[i].rcd_word2 = 0;
1011 sc->sc_rxcomp[i].rcd_timestamp = 0;
1012 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1013 }
1014 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1015 RCQ1C_RxCompletionQ1Type(3));
1016 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1017
1018 /*
1019 * Initialize the Tx CSR.
1020 */
1021 sc->sc_TransmitFrameCSR = 0;
1022 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1023 sc->sc_TransmitFrameCSR |
1024 TFCSR_TransmitThreshold(sc->sc_txthresh));
1025
1026 /*
1027 * Initialize the Tx descriptor control register.
1028 */
1029 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1030 TDQC_TxDmaBurstSize(4) | /* default */
1031 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1032 TDQC_TxDescType(0);
1033 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1034 sc->sc_TxDescQueueCtrl |
1035 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1036
1037 /*
1038 * Initialize the Rx descriptor control registers.
1039 */
1040 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1041 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1042 RDQ1C_RxDescSpacing(0));
1043 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1044
1045 /*
1046 * Initialize the Tx descriptor producer indices.
1047 */
1048 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1049 TDQPI_HiPrTxProducerIndex(0) |
1050 TDQPI_LoPrTxProducerIndex(0));
1051
1052 /*
1053 * Initialize the Rx descriptor producer indices.
1054 */
1055 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1056 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1057 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1058 RXQ2P_RxDescQ2Producer(0));
1059
1060 /*
1061 * Initialize the Tx and Rx completion queue consumer indices.
1062 */
1063 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1064 CQCI_TxCompletionConsumerIndex(0) |
1065 CQCI_RxCompletionQ1ConsumerIndex(0));
1066 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1067
1068 /*
1069 * Initialize the Rx DMA control register.
1070 */
1071 sf_funcreg_write(sc, SF_RxDmaCtrl,
1072 RDC_RxHighPriorityThreshold(6) | /* default */
1073 RDC_RxBurstSize(4)); /* default */
1074
1075 /*
1076 * Set the receive filter.
1077 */
1078 sc->sc_RxAddressFilteringCtl = 0;
1079 sf_set_filter(sc);
1080
1081 /*
1082 * Set MacConfig1. When we set the media, MacConfig1 will
1083 * actually be written and the MAC part reset.
1084 */
1085 sc->sc_MacConfig1 = MC1_PadEn;
1086
1087 /*
1088 * Set the media.
1089 */
1090 mii_mediachg(&sc->sc_mii);
1091
1092 /*
1093 * Initialize the interrupt register.
1094 */
1095 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1096 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1097 IS_StatisticWrapInt;
1098 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1099
1100 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1101 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1102
1103 /*
1104 * Start the transmit and receive processes.
1105 */
1106 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1107 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1108
1109 /* Start the on second clock. */
1110 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1111
1112 /*
1113 * Note that the interface is now running.
1114 */
1115 ifp->if_flags |= IFF_RUNNING;
1116 ifp->if_flags &= ~IFF_OACTIVE;
1117
1118 out:
1119 if (error) {
1120 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1121 ifp->if_timer = 0;
1122 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1123 }
1124 return (error);
1125 }
1126
1127 /*
1128 * sf_rxdrain:
1129 *
1130 * Drain the receive queue.
1131 */
1132 void
1133 sf_rxdrain(struct sf_softc *sc)
1134 {
1135 struct sf_descsoft *ds;
1136 int i;
1137
1138 for (i = 0; i < SF_NRXDESC; i++) {
1139 ds = &sc->sc_rxsoft[i];
1140 if (ds->ds_mbuf != NULL) {
1141 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1142 m_freem(ds->ds_mbuf);
1143 ds->ds_mbuf = NULL;
1144 }
1145 }
1146 }
1147
1148 /*
1149 * sf_stop: [ifnet interface function]
1150 *
1151 * Stop transmission on the interface.
1152 */
1153 void
1154 sf_stop(struct ifnet *ifp, int disable)
1155 {
1156 struct sf_softc *sc = ifp->if_softc;
1157 struct sf_descsoft *ds;
1158 int i;
1159
1160 /* Stop the one second clock. */
1161 callout_stop(&sc->sc_tick_callout);
1162
1163 /* Down the MII. */
1164 mii_down(&sc->sc_mii);
1165
1166 /* Disable interrupts. */
1167 sf_funcreg_write(sc, SF_InterruptEn, 0);
1168
1169 /* Stop the transmit and receive processes. */
1170 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1171
1172 /*
1173 * Release any queued transmit buffers.
1174 */
1175 for (i = 0; i < SF_NTXDESC; i++) {
1176 ds = &sc->sc_txsoft[i];
1177 if (ds->ds_mbuf != NULL) {
1178 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1179 m_freem(ds->ds_mbuf);
1180 ds->ds_mbuf = NULL;
1181 }
1182 }
1183
1184 if (disable)
1185 sf_rxdrain(sc);
1186
1187 /*
1188 * Mark the interface down and cancel the watchdog timer.
1189 */
1190 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1191 ifp->if_timer = 0;
1192 }
1193
1194 /*
1195 * sf_read_eeprom:
1196 *
1197 * Read from the Starfire EEPROM.
1198 */
1199 uint8_t
1200 sf_read_eeprom(struct sf_softc *sc, int offset)
1201 {
1202 uint32_t reg;
1203
1204 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1205
1206 return ((reg >> (8 * (offset & 3))) & 0xff);
1207 }
1208
1209 /*
1210 * sf_add_rxbuf:
1211 *
1212 * Add a receive buffer to the indicated descriptor.
1213 */
1214 int
1215 sf_add_rxbuf(struct sf_softc *sc, int idx)
1216 {
1217 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1218 struct mbuf *m;
1219 int error;
1220
1221 MGETHDR(m, M_DONTWAIT, MT_DATA);
1222 if (m == NULL)
1223 return (ENOBUFS);
1224
1225 MCLGET(m, M_DONTWAIT);
1226 if ((m->m_flags & M_EXT) == 0) {
1227 m_freem(m);
1228 return (ENOBUFS);
1229 }
1230
1231 if (ds->ds_mbuf != NULL)
1232 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1233
1234 ds->ds_mbuf = m;
1235
1236 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1237 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1238 BUS_DMA_READ|BUS_DMA_NOWAIT);
1239 if (error) {
1240 printf("%s: can't load rx DMA map %d, error = %d\n",
1241 sc->sc_dev.dv_xname, idx, error);
1242 panic("sf_add_rxbuf"); /* XXX */
1243 }
1244
1245 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1246 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1247
1248 SF_INIT_RXDESC(sc, idx);
1249
1250 return (0);
1251 }
1252
1253 static void
1254 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1255 {
1256 uint32_t reg0, reg1, reg2;
1257
1258 reg0 = enaddr[5] | (enaddr[4] << 8);
1259 reg1 = enaddr[3] | (enaddr[2] << 8);
1260 reg2 = enaddr[1] | (enaddr[0] << 8);
1261
1262 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1263 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1264 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1265 }
1266
1267 static void
1268 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1269 {
1270 uint32_t hash, slot, reg;
1271
1272 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1273 slot = hash >> 4;
1274
1275 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1276 reg |= 1 << (hash & 0xf);
1277 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1278 }
1279
1280 /*
1281 * sf_set_filter:
1282 *
1283 * Set the Starfire receive filter.
1284 */
1285 void
1286 sf_set_filter(struct sf_softc *sc)
1287 {
1288 struct ethercom *ec = &sc->sc_ethercom;
1289 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1290 struct ether_multi *enm;
1291 struct ether_multistep step;
1292 int i;
1293
1294 /* Start by clearing the perfect and hash tables. */
1295 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1296 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1297
1298 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1299 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1300
1301 /*
1302 * Clear the perfect and hash mode bits.
1303 */
1304 sc->sc_RxAddressFilteringCtl &=
1305 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1306
1307 if (ifp->if_flags & IFF_BROADCAST)
1308 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1309 else
1310 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1311
1312 if (ifp->if_flags & IFF_PROMISC) {
1313 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1314 goto allmulti;
1315 } else
1316 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1317
1318 /*
1319 * Set normal perfect filtering mode.
1320 */
1321 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1322
1323 /*
1324 * First, write the station address to the perfect filter
1325 * table.
1326 */
1327 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1328
1329 /*
1330 * Now set the hash bits for each multicast address in our
1331 * list.
1332 */
1333 ETHER_FIRST_MULTI(step, ec, enm);
1334 if (enm == NULL)
1335 goto done;
1336 while (enm != NULL) {
1337 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1338 /*
1339 * We must listen to a range of multicast addresses.
1340 * For now, just accept all multicasts, rather than
1341 * trying to set only those filter bits needed to match
1342 * the range. (At this time, the only use of address
1343 * ranges is for IP multicast routing, for which the
1344 * range is big enough to require all bits set.)
1345 */
1346 goto allmulti;
1347 }
1348 sf_set_filter_hash(sc, enm->enm_addrlo);
1349 ETHER_NEXT_MULTI(step, enm);
1350 }
1351
1352 /*
1353 * Set "hash only multicast dest, match regardless of VLAN ID".
1354 */
1355 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1356 goto done;
1357
1358 allmulti:
1359 /*
1360 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1361 */
1362 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1363 ifp->if_flags |= IFF_ALLMULTI;
1364
1365 done:
1366 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1367 sc->sc_RxAddressFilteringCtl);
1368 }
1369
1370 /*
1371 * sf_mii_read: [mii interface function]
1372 *
1373 * Read from the MII.
1374 */
1375 int
1376 sf_mii_read(struct device *self, int phy, int reg)
1377 {
1378 struct sf_softc *sc = (void *) self;
1379 uint32_t v;
1380 int i;
1381
1382 for (i = 0; i < 1000; i++) {
1383 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1384 if (v & MiiDataValid)
1385 break;
1386 delay(1);
1387 }
1388
1389 if ((v & MiiDataValid) == 0)
1390 return (0);
1391
1392 if (MiiRegDataPort(v) == 0xffff)
1393 return (0);
1394
1395 return (MiiRegDataPort(v));
1396 }
1397
1398 /*
1399 * sf_mii_write: [mii interface function]
1400 *
1401 * Write to the MII.
1402 */
1403 void
1404 sf_mii_write(struct device *self, int phy, int reg, int val)
1405 {
1406 struct sf_softc *sc = (void *) self;
1407 int i;
1408
1409 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1410
1411 for (i = 0; i < 1000; i++) {
1412 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1413 MiiBusy) == 0)
1414 return;
1415 delay(1);
1416 }
1417
1418 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1419 }
1420
1421 /*
1422 * sf_mii_statchg: [mii interface function]
1423 *
1424 * Callback from the PHY when the media changes.
1425 */
1426 void
1427 sf_mii_statchg(struct device *self)
1428 {
1429 struct sf_softc *sc = (void *) self;
1430 uint32_t ipg;
1431
1432 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1433 sc->sc_MacConfig1 |= MC1_FullDuplex;
1434 ipg = 0x15;
1435 } else {
1436 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1437 ipg = 0x11;
1438 }
1439
1440 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1441 sf_macreset(sc);
1442
1443 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1444 }
1445
1446 /*
1447 * sf_mediastatus: [ifmedia interface function]
1448 *
1449 * Callback from ifmedia to request current media status.
1450 */
1451 void
1452 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1453 {
1454 struct sf_softc *sc = ifp->if_softc;
1455
1456 mii_pollstat(&sc->sc_mii);
1457 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1458 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1459 }
1460
1461 /*
1462 * sf_mediachange: [ifmedia interface function]
1463 *
1464 * Callback from ifmedia to request new media setting.
1465 */
1466 int
1467 sf_mediachange(struct ifnet *ifp)
1468 {
1469 struct sf_softc *sc = ifp->if_softc;
1470
1471 if (ifp->if_flags & IFF_UP)
1472 mii_mediachg(&sc->sc_mii);
1473 return (0);
1474 }
Cache object: d0f1514a8d2aeebddc3f3279f10b3b8c
|