FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/aic6915.c
1 /* $NetBSD: aic6915.c,v 1.23 2008/04/28 20:23:49 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Adaptec AIC-6915 (``Starfire'')
34 * 10/100 Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.23 2008/04/28 20:23:49 martin Exp $");
39
40 #include "bpfilter.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
63
64 #include <sys/bus.h>
65 #include <sys/intr.h>
66
67 #include <dev/mii/miivar.h>
68
69 #include <dev/ic/aic6915reg.h>
70 #include <dev/ic/aic6915var.h>
71
72 static void sf_start(struct ifnet *);
73 static void sf_watchdog(struct ifnet *);
74 static int sf_ioctl(struct ifnet *, u_long, void *);
75 static int sf_init(struct ifnet *);
76 static void sf_stop(struct ifnet *, int);
77
78 static void sf_shutdown(void *);
79
80 static void sf_txintr(struct sf_softc *);
81 static void sf_rxintr(struct sf_softc *);
82 static void sf_stats_update(struct sf_softc *);
83
84 static void sf_reset(struct sf_softc *);
85 static void sf_macreset(struct sf_softc *);
86 static void sf_rxdrain(struct sf_softc *);
87 static int sf_add_rxbuf(struct sf_softc *, int);
88 static uint8_t sf_read_eeprom(struct sf_softc *, int);
89 static void sf_set_filter(struct sf_softc *);
90
91 static int sf_mii_read(struct device *, int, int);
92 static void sf_mii_write(struct device *, int, int, int);
93 static void sf_mii_statchg(struct device *);
94
95 static void sf_tick(void *);
96
97 #define sf_funcreg_read(sc, reg) \
98 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
99 #define sf_funcreg_write(sc, reg, val) \
100 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
101
102 static inline uint32_t
103 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
104 {
105
106 if (__predict_false(sc->sc_iomapped)) {
107 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
108 reg);
109 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
110 SF_IndirectIoDataPort));
111 }
112
113 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
114 }
115
116 static inline void
117 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
118 {
119
120 if (__predict_false(sc->sc_iomapped)) {
121 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
122 reg);
123 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
124 val);
125 return;
126 }
127
128 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
129 }
130
131 #define sf_genreg_read(sc, reg) \
132 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
133 #define sf_genreg_write(sc, reg, val) \
134 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
135
136 /*
137 * sf_attach:
138 *
139 * Attach a Starfire interface to the system.
140 */
141 void
142 sf_attach(struct sf_softc *sc)
143 {
144 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
145 int i, rseg, error;
146 bus_dma_segment_t seg;
147 u_int8_t enaddr[ETHER_ADDR_LEN];
148
149 callout_init(&sc->sc_tick_callout, 0);
150
151 /*
152 * If we're I/O mapped, the functional register handle is
153 * the same as the base handle. If we're memory mapped,
154 * carve off a chunk of the register space for the functional
155 * registers, to save on arithmetic later.
156 */
157 if (sc->sc_iomapped)
158 sc->sc_sh_func = sc->sc_sh;
159 else {
160 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
161 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
162 aprint_error_dev(&sc->sc_dev, "unable to sub-region functional "
163 "registers, error = %d\n",
164 error);
165 return;
166 }
167 }
168
169 /*
170 * Initialize the transmit threshold for this interface. The
171 * manual describes the default as 4 * 16 bytes. We start out
172 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
173 * several platforms.
174 */
175 sc->sc_txthresh = 10;
176
177 /*
178 * Allocate the control data structures, and create and load the
179 * DMA map for it.
180 */
181 if ((error = bus_dmamem_alloc(sc->sc_dmat,
182 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
183 BUS_DMA_NOWAIT)) != 0) {
184 aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n",
185 error);
186 goto fail_0;
187 }
188
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
191 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
192 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
193 error);
194 goto fail_1;
195 }
196
197 if ((error = bus_dmamap_create(sc->sc_dmat,
198 sizeof(struct sf_control_data), 1,
199 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
200 &sc->sc_cddmamap)) != 0) {
201 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
202 "error = %d\n", error);
203 goto fail_2;
204 }
205
206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
207 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
208 BUS_DMA_NOWAIT)) != 0) {
209 aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
210 error);
211 goto fail_3;
212 }
213
214 /*
215 * Create the transmit buffer DMA maps.
216 */
217 for (i = 0; i < SF_NTXDESC; i++) {
218 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
219 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
220 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
221 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
222 "error = %d\n", i, error);
223 goto fail_4;
224 }
225 }
226
227 /*
228 * Create the receive buffer DMA maps.
229 */
230 for (i = 0; i < SF_NRXDESC; i++) {
231 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
232 MCLBYTES, 0, BUS_DMA_NOWAIT,
233 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
234 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
235 "error = %d\n", i, error);
236 goto fail_5;
237 }
238 }
239
240 /*
241 * Reset the chip to a known state.
242 */
243 sf_reset(sc);
244
245 /*
246 * Read the Ethernet address from the EEPROM.
247 */
248 for (i = 0; i < ETHER_ADDR_LEN; i++)
249 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
250
251 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
252 ether_sprintf(enaddr));
253
254 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
255 printf("%s: 64-bit PCI slot detected\n", device_xname(&sc->sc_dev));
256
257 /*
258 * Initialize our media structures and probe the MII.
259 */
260 sc->sc_mii.mii_ifp = ifp;
261 sc->sc_mii.mii_readreg = sf_mii_read;
262 sc->sc_mii.mii_writereg = sf_mii_write;
263 sc->sc_mii.mii_statchg = sf_mii_statchg;
264 sc->sc_ethercom.ec_mii = &sc->sc_mii;
265 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
266 ether_mediastatus);
267 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
268 MII_OFFSET_ANY, 0);
269 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
270 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
271 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
272 } else
273 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
274
275 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
276 ifp->if_softc = sc;
277 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 ifp->if_ioctl = sf_ioctl;
279 ifp->if_start = sf_start;
280 ifp->if_watchdog = sf_watchdog;
281 ifp->if_init = sf_init;
282 ifp->if_stop = sf_stop;
283 IFQ_SET_READY(&ifp->if_snd);
284
285 /*
286 * Attach the interface.
287 */
288 if_attach(ifp);
289 ether_ifattach(ifp, enaddr);
290
291 /*
292 * Make sure the interface is shutdown during reboot.
293 */
294 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
295 if (sc->sc_sdhook == NULL)
296 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
297 return;
298
299 /*
300 * Free any resources we've allocated during the failed attach
301 * attempt. Do this in reverse order an fall through.
302 */
303 fail_5:
304 for (i = 0; i < SF_NRXDESC; i++) {
305 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
306 bus_dmamap_destroy(sc->sc_dmat,
307 sc->sc_rxsoft[i].ds_dmamap);
308 }
309 fail_4:
310 for (i = 0; i < SF_NTXDESC; i++) {
311 if (sc->sc_txsoft[i].ds_dmamap != NULL)
312 bus_dmamap_destroy(sc->sc_dmat,
313 sc->sc_txsoft[i].ds_dmamap);
314 }
315 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
316 fail_3:
317 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
318 fail_2:
319 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
320 sizeof(struct sf_control_data));
321 fail_1:
322 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
323 fail_0:
324 return;
325 }
326
327 /*
328 * sf_shutdown:
329 *
330 * Shutdown hook -- make sure the interface is stopped at reboot.
331 */
332 static void
333 sf_shutdown(void *arg)
334 {
335 struct sf_softc *sc = arg;
336
337 sf_stop(&sc->sc_ethercom.ec_if, 1);
338 }
339
340 /*
341 * sf_start: [ifnet interface function]
342 *
343 * Start packet transmission on the interface.
344 */
345 static void
346 sf_start(struct ifnet *ifp)
347 {
348 struct sf_softc *sc = ifp->if_softc;
349 struct mbuf *m0, *m;
350 struct sf_txdesc0 *txd;
351 struct sf_descsoft *ds;
352 bus_dmamap_t dmamap;
353 int error, producer, last = -1, opending, seg;
354
355 /*
356 * Remember the previous number of pending transmits.
357 */
358 opending = sc->sc_txpending;
359
360 /*
361 * Find out where we're sitting.
362 */
363 producer = SF_TXDINDEX_TO_HOST(
364 TDQPI_HiPrTxProducerIndex_get(
365 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
366
367 /*
368 * Loop through the send queue, setting up transmit descriptors
369 * until we drain the queue, or use up all available transmit
370 * descriptors. Leave a blank one at the end for sanity's sake.
371 */
372 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
373 /*
374 * Grab a packet off the queue.
375 */
376 IFQ_POLL(&ifp->if_snd, m0);
377 if (m0 == NULL)
378 break;
379 m = NULL;
380
381 /*
382 * Get the transmit descriptor.
383 */
384 txd = &sc->sc_txdescs[producer];
385 ds = &sc->sc_txsoft[producer];
386 dmamap = ds->ds_dmamap;
387
388 /*
389 * Load the DMA map. If this fails, the packet either
390 * didn't fit in the allotted number of frags, or we were
391 * short on resources. In this case, we'll copy and try
392 * again.
393 */
394 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
395 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
396 MGETHDR(m, M_DONTWAIT, MT_DATA);
397 if (m == NULL) {
398 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
399 break;
400 }
401 if (m0->m_pkthdr.len > MHLEN) {
402 MCLGET(m, M_DONTWAIT);
403 if ((m->m_flags & M_EXT) == 0) {
404 aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
405 "cluster\n");
406 m_freem(m);
407 break;
408 }
409 }
410 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
411 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
412 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
413 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
414 if (error) {
415 aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
416 "error = %d\n", error);
417 break;
418 }
419 }
420
421 /*
422 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
423 */
424 IFQ_DEQUEUE(&ifp->if_snd, m0);
425 if (m != NULL) {
426 m_freem(m0);
427 m0 = m;
428 }
429
430 /* Initialize the descriptor. */
431 txd->td_word0 =
432 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
433 if (producer == (SF_NTXDESC - 1))
434 txd->td_word0 |= TD_W0_END;
435 txd->td_word1 = htole32(dmamap->dm_nsegs);
436 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
437 txd->td_frags[seg].fr_addr =
438 htole32(dmamap->dm_segs[seg].ds_addr);
439 txd->td_frags[seg].fr_len =
440 htole32(dmamap->dm_segs[seg].ds_len);
441 }
442
443 /* Sync the descriptor and the DMA map. */
444 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
445 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
446 BUS_DMASYNC_PREWRITE);
447
448 /*
449 * Store a pointer to the packet so we can free it later.
450 */
451 ds->ds_mbuf = m0;
452
453 /* Advance the Tx pointer. */
454 sc->sc_txpending++;
455 last = producer;
456 producer = SF_NEXTTX(producer);
457
458 #if NBPFILTER > 0
459 /*
460 * Pass the packet to any BPF listeners.
461 */
462 if (ifp->if_bpf)
463 bpf_mtap(ifp->if_bpf, m0);
464 #endif
465 }
466
467 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
468 /* No more slots left; notify upper layer. */
469 ifp->if_flags |= IFF_OACTIVE;
470 }
471
472 if (sc->sc_txpending != opending) {
473 KASSERT(last != -1);
474 /*
475 * We enqueued packets. Cause a transmit interrupt to
476 * happen on the last packet we enqueued, and give the
477 * new descriptors to the chip by writing the new
478 * producer index.
479 */
480 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
481 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
482
483 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
484 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
485
486 /* Set a watchdog timer in case the chip flakes out. */
487 ifp->if_timer = 5;
488 }
489 }
490
491 /*
492 * sf_watchdog: [ifnet interface function]
493 *
494 * Watchdog timer handler.
495 */
496 static void
497 sf_watchdog(struct ifnet *ifp)
498 {
499 struct sf_softc *sc = ifp->if_softc;
500
501 printf("%s: device timeout\n", device_xname(&sc->sc_dev));
502 ifp->if_oerrors++;
503
504 (void) sf_init(ifp);
505
506 /* Try to get more packets going. */
507 sf_start(ifp);
508 }
509
510 /*
511 * sf_ioctl: [ifnet interface function]
512 *
513 * Handle control requests from the operator.
514 */
515 static int
516 sf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
517 {
518 struct sf_softc *sc = ifp->if_softc;
519 int s, error;
520
521 s = splnet();
522
523 error = ether_ioctl(ifp, cmd, data);
524 if (error == ENETRESET) {
525 /*
526 * Multicast list has changed; set the hardware filter
527 * accordingly.
528 */
529 if (ifp->if_flags & IFF_RUNNING)
530 sf_set_filter(sc);
531 error = 0;
532 }
533
534 /* Try to get more packets going. */
535 sf_start(ifp);
536
537 splx(s);
538 return (error);
539 }
540
541 /*
542 * sf_intr:
543 *
544 * Interrupt service routine.
545 */
546 int
547 sf_intr(void *arg)
548 {
549 struct sf_softc *sc = arg;
550 uint32_t isr;
551 int handled = 0, wantinit = 0;
552
553 for (;;) {
554 /* Reading clears all interrupts we're interested in. */
555 isr = sf_funcreg_read(sc, SF_InterruptStatus);
556 if ((isr & IS_PCIPadInt) == 0)
557 break;
558
559 handled = 1;
560
561 /* Handle receive interrupts. */
562 if (isr & IS_RxQ1DoneInt)
563 sf_rxintr(sc);
564
565 /* Handle transmit completion interrupts. */
566 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
567 sf_txintr(sc);
568
569 /* Handle abnormal interrupts. */
570 if (isr & IS_AbnormalInterrupt) {
571 /* Statistics. */
572 if (isr & IS_StatisticWrapInt)
573 sf_stats_update(sc);
574
575 /* DMA errors. */
576 if (isr & IS_DmaErrInt) {
577 wantinit = 1;
578 aprint_error_dev(&sc->sc_dev, "WARNING: DMA error\n");
579 }
580
581 /* Transmit FIFO underruns. */
582 if (isr & IS_TxDataLowInt) {
583 if (sc->sc_txthresh < 0xff)
584 sc->sc_txthresh++;
585 printf("%s: transmit FIFO underrun, new "
586 "threshold: %d bytes\n",
587 device_xname(&sc->sc_dev),
588 sc->sc_txthresh * 16);
589 sf_funcreg_write(sc, SF_TransmitFrameCSR,
590 sc->sc_TransmitFrameCSR |
591 TFCSR_TransmitThreshold(sc->sc_txthresh));
592 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
593 sc->sc_TxDescQueueCtrl |
594 TDQC_TxHighPriorityFifoThreshold(
595 sc->sc_txthresh));
596 }
597 }
598 }
599
600 if (handled) {
601 /* Reset the interface, if necessary. */
602 if (wantinit)
603 sf_init(&sc->sc_ethercom.ec_if);
604
605 /* Try and get more packets going. */
606 sf_start(&sc->sc_ethercom.ec_if);
607 }
608
609 return (handled);
610 }
611
612 /*
613 * sf_txintr:
614 *
615 * Helper -- handle transmit completion interrupts.
616 */
617 static void
618 sf_txintr(struct sf_softc *sc)
619 {
620 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
621 struct sf_descsoft *ds;
622 uint32_t cqci, tcd;
623 int consumer, producer, txidx;
624
625 try_again:
626 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
627
628 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
629 producer = CQPI_TxCompletionProducerIndex_get(
630 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
631
632 if (consumer == producer)
633 return;
634
635 ifp->if_flags &= ~IFF_OACTIVE;
636
637 while (consumer != producer) {
638 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
639 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
640
641 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
642 #ifdef DIAGNOSTIC
643 if ((tcd & TCD_PR) == 0)
644 aprint_error_dev(&sc->sc_dev, "Tx queue mismatch, index %d\n",
645 txidx);
646 #endif
647 /*
648 * NOTE: stats are updated later. We're just
649 * releasing packets that have been DMA'd to
650 * the chip.
651 */
652 ds = &sc->sc_txsoft[txidx];
653 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
654 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
655 0, ds->ds_dmamap->dm_mapsize,
656 BUS_DMASYNC_POSTWRITE);
657 m_freem(ds->ds_mbuf);
658 ds->ds_mbuf = NULL;
659
660 consumer = SF_NEXTTCD(consumer);
661 sc->sc_txpending--;
662 }
663
664 /* XXXJRT -- should be KDASSERT() */
665 KASSERT(sc->sc_txpending >= 0);
666
667 /* If all packets are done, cancel the watchdog timer. */
668 if (sc->sc_txpending == 0)
669 ifp->if_timer = 0;
670
671 /* Update the consumer index. */
672 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
673 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
674 CQCI_TxCompletionConsumerIndex(consumer));
675
676 /* Double check for new completions. */
677 goto try_again;
678 }
679
680 /*
681 * sf_rxintr:
682 *
683 * Helper -- handle receive interrupts.
684 */
685 static void
686 sf_rxintr(struct sf_softc *sc)
687 {
688 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
689 struct sf_descsoft *ds;
690 struct sf_rcd_full *rcd;
691 struct mbuf *m;
692 uint32_t cqci, word0;
693 int consumer, producer, bufproducer, rxidx, len;
694
695 try_again:
696 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
697
698 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
699 producer = CQPI_RxCompletionQ1ProducerIndex_get(
700 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
701 bufproducer = RXQ1P_RxDescQ1Producer_get(
702 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
703
704 if (consumer == producer)
705 return;
706
707 while (consumer != producer) {
708 rcd = &sc->sc_rxcomp[consumer];
709 SF_CDRXCSYNC(sc, consumer,
710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
711 SF_CDRXCSYNC(sc, consumer,
712 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
713
714 word0 = le32toh(rcd->rcd_word0);
715 rxidx = RCD_W0_EndIndex(word0);
716
717 ds = &sc->sc_rxsoft[rxidx];
718
719 consumer = SF_NEXTRCD(consumer);
720 bufproducer = SF_NEXTRX(bufproducer);
721
722 if ((word0 & RCD_W0_OK) == 0) {
723 SF_INIT_RXDESC(sc, rxidx);
724 continue;
725 }
726
727 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
728 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
729
730 /*
731 * No errors; receive the packet. Note that we have
732 * configured the Starfire to NOT transfer the CRC
733 * with the packet.
734 */
735 len = RCD_W0_Length(word0);
736
737 #ifdef __NO_STRICT_ALIGNMENT
738 /*
739 * Allocate a new mbuf cluster. If that fails, we are
740 * out of memory, and must drop the packet and recycle
741 * the buffer that's already attached to this descriptor.
742 */
743 m = ds->ds_mbuf;
744 if (sf_add_rxbuf(sc, rxidx) != 0) {
745 ifp->if_ierrors++;
746 SF_INIT_RXDESC(sc, rxidx);
747 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
748 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
749 continue;
750 }
751 #else
752 /*
753 * The Starfire's receive buffer must be 4-byte aligned.
754 * But this means that the data after the Ethernet header
755 * is misaligned. We must allocate a new buffer and
756 * copy the data, shifted forward 2 bytes.
757 */
758 MGETHDR(m, M_DONTWAIT, MT_DATA);
759 if (m == NULL) {
760 dropit:
761 ifp->if_ierrors++;
762 SF_INIT_RXDESC(sc, rxidx);
763 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
764 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
765 continue;
766 }
767 if (len > (MHLEN - 2)) {
768 MCLGET(m, M_DONTWAIT);
769 if ((m->m_flags & M_EXT) == 0) {
770 m_freem(m);
771 goto dropit;
772 }
773 }
774 m->m_data += 2;
775
776 /*
777 * Note that we use cluster for incoming frames, so the
778 * buffer is virtually contiguous.
779 */
780 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
781
782 /* Allow the receive descriptor to continue using its mbuf. */
783 SF_INIT_RXDESC(sc, rxidx);
784 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
785 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
786 #endif /* __NO_STRICT_ALIGNMENT */
787
788 m->m_pkthdr.rcvif = ifp;
789 m->m_pkthdr.len = m->m_len = len;
790
791 #if NBPFILTER > 0
792 /*
793 * Pass this up to any BPF listeners.
794 */
795 if (ifp->if_bpf)
796 bpf_mtap(ifp->if_bpf, m);
797 #endif /* NBPFILTER > 0 */
798
799 /* Pass it on. */
800 (*ifp->if_input)(ifp, m);
801 }
802
803 /* Update the chip's pointers. */
804 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
805 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
806 CQCI_RxCompletionQ1ConsumerIndex(consumer));
807 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
808 RXQ1P_RxDescQ1Producer(bufproducer));
809
810 /* Double-check for any new completions. */
811 goto try_again;
812 }
813
814 /*
815 * sf_tick:
816 *
817 * One second timer, used to tick the MII and update stats.
818 */
819 static void
820 sf_tick(void *arg)
821 {
822 struct sf_softc *sc = arg;
823 int s;
824
825 s = splnet();
826 mii_tick(&sc->sc_mii);
827 sf_stats_update(sc);
828 splx(s);
829
830 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
831 }
832
833 /*
834 * sf_stats_update:
835 *
836 * Read the statitistics counters.
837 */
838 static void
839 sf_stats_update(struct sf_softc *sc)
840 {
841 struct sf_stats stats;
842 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
843 uint32_t *p;
844 u_int i;
845
846 p = &stats.TransmitOKFrames;
847 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
848 *p++ = sf_genreg_read(sc,
849 SF_STATS_BASE + (i * sizeof(uint32_t)));
850 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
851 }
852
853 ifp->if_opackets += stats.TransmitOKFrames;
854
855 ifp->if_collisions += stats.SingleCollisionFrames +
856 stats.MultipleCollisionFrames;
857
858 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
859 stats.TransmitAbortDueToExcessingDeferral +
860 stats.FramesLostDueToInternalTransmitErrors;
861
862 ifp->if_ipackets += stats.ReceiveOKFrames;
863
864 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
865 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
866 stats.ReceiveFramesJabbersError +
867 stats.FramesLostDueToInternalReceiveErrors;
868 }
869
870 /*
871 * sf_reset:
872 *
873 * Perform a soft reset on the Starfire.
874 */
875 static void
876 sf_reset(struct sf_softc *sc)
877 {
878 int i;
879
880 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
881
882 sf_macreset(sc);
883
884 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
885 for (i = 0; i < 1000; i++) {
886 delay(10);
887 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
888 PDC_SoftReset) == 0)
889 break;
890 }
891
892 if (i == 1000) {
893 aprint_error_dev(&sc->sc_dev, "reset failed to complete\n");
894 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
895 }
896
897 delay(1000);
898 }
899
900 /*
901 * sf_macreset:
902 *
903 * Reset the MAC portion of the Starfire.
904 */
905 static void
906 sf_macreset(struct sf_softc *sc)
907 {
908
909 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
910 delay(1000);
911 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
912 }
913
914 /*
915 * sf_init: [ifnet interface function]
916 *
917 * Initialize the interface. Must be called at splnet().
918 */
919 static int
920 sf_init(struct ifnet *ifp)
921 {
922 struct sf_softc *sc = ifp->if_softc;
923 struct sf_descsoft *ds;
924 int error = 0;
925 u_int i;
926
927 /*
928 * Cancel any pending I/O.
929 */
930 sf_stop(ifp, 0);
931
932 /*
933 * Reset the Starfire to a known state.
934 */
935 sf_reset(sc);
936
937 /* Clear the stat counters. */
938 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
939 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
940
941 /*
942 * Initialize the transmit descriptor ring.
943 */
944 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
945 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
946 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
947 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
948
949 /*
950 * Initialize the transmit completion ring.
951 */
952 for (i = 0; i < SF_NTCD; i++) {
953 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
954 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
955 }
956 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
957 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
958
959 /*
960 * Initialize the receive descriptor ring.
961 */
962 for (i = 0; i < SF_NRXDESC; i++) {
963 ds = &sc->sc_rxsoft[i];
964 if (ds->ds_mbuf == NULL) {
965 if ((error = sf_add_rxbuf(sc, i)) != 0) {
966 aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
967 "buffer %d, error = %d\n",
968 i, error);
969 /*
970 * XXX Should attempt to run with fewer receive
971 * XXX buffers instead of just failing.
972 */
973 sf_rxdrain(sc);
974 goto out;
975 }
976 } else
977 SF_INIT_RXDESC(sc, i);
978 }
979 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
980 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
981 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
982
983 /*
984 * Initialize the receive completion ring.
985 */
986 for (i = 0; i < SF_NRCD; i++) {
987 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
988 sc->sc_rxcomp[i].rcd_word1 = 0;
989 sc->sc_rxcomp[i].rcd_word2 = 0;
990 sc->sc_rxcomp[i].rcd_timestamp = 0;
991 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
992 }
993 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
994 RCQ1C_RxCompletionQ1Type(3));
995 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
996
997 /*
998 * Initialize the Tx CSR.
999 */
1000 sc->sc_TransmitFrameCSR = 0;
1001 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1002 sc->sc_TransmitFrameCSR |
1003 TFCSR_TransmitThreshold(sc->sc_txthresh));
1004
1005 /*
1006 * Initialize the Tx descriptor control register.
1007 */
1008 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1009 TDQC_TxDmaBurstSize(4) | /* default */
1010 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1011 TDQC_TxDescType(0);
1012 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1013 sc->sc_TxDescQueueCtrl |
1014 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1015
1016 /*
1017 * Initialize the Rx descriptor control registers.
1018 */
1019 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1020 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1021 RDQ1C_RxDescSpacing(0));
1022 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1023
1024 /*
1025 * Initialize the Tx descriptor producer indices.
1026 */
1027 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1028 TDQPI_HiPrTxProducerIndex(0) |
1029 TDQPI_LoPrTxProducerIndex(0));
1030
1031 /*
1032 * Initialize the Rx descriptor producer indices.
1033 */
1034 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1035 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1036 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1037 RXQ2P_RxDescQ2Producer(0));
1038
1039 /*
1040 * Initialize the Tx and Rx completion queue consumer indices.
1041 */
1042 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1043 CQCI_TxCompletionConsumerIndex(0) |
1044 CQCI_RxCompletionQ1ConsumerIndex(0));
1045 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1046
1047 /*
1048 * Initialize the Rx DMA control register.
1049 */
1050 sf_funcreg_write(sc, SF_RxDmaCtrl,
1051 RDC_RxHighPriorityThreshold(6) | /* default */
1052 RDC_RxBurstSize(4)); /* default */
1053
1054 /*
1055 * Set the receive filter.
1056 */
1057 sc->sc_RxAddressFilteringCtl = 0;
1058 sf_set_filter(sc);
1059
1060 /*
1061 * Set MacConfig1. When we set the media, MacConfig1 will
1062 * actually be written and the MAC part reset.
1063 */
1064 sc->sc_MacConfig1 = MC1_PadEn;
1065
1066 /*
1067 * Set the media.
1068 */
1069 if ((error = ether_mediachange(ifp)) != 0)
1070 goto out;
1071
1072 /*
1073 * Initialize the interrupt register.
1074 */
1075 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1076 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1077 IS_StatisticWrapInt;
1078 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1079
1080 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1081 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1082
1083 /*
1084 * Start the transmit and receive processes.
1085 */
1086 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1087 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1088
1089 /* Start the on second clock. */
1090 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1091
1092 /*
1093 * Note that the interface is now running.
1094 */
1095 ifp->if_flags |= IFF_RUNNING;
1096 ifp->if_flags &= ~IFF_OACTIVE;
1097
1098 out:
1099 if (error) {
1100 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1101 ifp->if_timer = 0;
1102 printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1103 }
1104 return (error);
1105 }
1106
1107 /*
1108 * sf_rxdrain:
1109 *
1110 * Drain the receive queue.
1111 */
1112 static void
1113 sf_rxdrain(struct sf_softc *sc)
1114 {
1115 struct sf_descsoft *ds;
1116 int i;
1117
1118 for (i = 0; i < SF_NRXDESC; i++) {
1119 ds = &sc->sc_rxsoft[i];
1120 if (ds->ds_mbuf != NULL) {
1121 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1122 m_freem(ds->ds_mbuf);
1123 ds->ds_mbuf = NULL;
1124 }
1125 }
1126 }
1127
1128 /*
1129 * sf_stop: [ifnet interface function]
1130 *
1131 * Stop transmission on the interface.
1132 */
1133 static void
1134 sf_stop(struct ifnet *ifp, int disable)
1135 {
1136 struct sf_softc *sc = ifp->if_softc;
1137 struct sf_descsoft *ds;
1138 int i;
1139
1140 /* Stop the one second clock. */
1141 callout_stop(&sc->sc_tick_callout);
1142
1143 /* Down the MII. */
1144 mii_down(&sc->sc_mii);
1145
1146 /* Disable interrupts. */
1147 sf_funcreg_write(sc, SF_InterruptEn, 0);
1148
1149 /* Stop the transmit and receive processes. */
1150 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1151
1152 /*
1153 * Release any queued transmit buffers.
1154 */
1155 for (i = 0; i < SF_NTXDESC; i++) {
1156 ds = &sc->sc_txsoft[i];
1157 if (ds->ds_mbuf != NULL) {
1158 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1159 m_freem(ds->ds_mbuf);
1160 ds->ds_mbuf = NULL;
1161 }
1162 }
1163
1164 /*
1165 * Mark the interface down and cancel the watchdog timer.
1166 */
1167 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1168 ifp->if_timer = 0;
1169
1170 if (disable)
1171 sf_rxdrain(sc);
1172 }
1173
1174 /*
1175 * sf_read_eeprom:
1176 *
1177 * Read from the Starfire EEPROM.
1178 */
1179 static uint8_t
1180 sf_read_eeprom(struct sf_softc *sc, int offset)
1181 {
1182 uint32_t reg;
1183
1184 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1185
1186 return ((reg >> (8 * (offset & 3))) & 0xff);
1187 }
1188
1189 /*
1190 * sf_add_rxbuf:
1191 *
1192 * Add a receive buffer to the indicated descriptor.
1193 */
1194 static int
1195 sf_add_rxbuf(struct sf_softc *sc, int idx)
1196 {
1197 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1198 struct mbuf *m;
1199 int error;
1200
1201 MGETHDR(m, M_DONTWAIT, MT_DATA);
1202 if (m == NULL)
1203 return (ENOBUFS);
1204
1205 MCLGET(m, M_DONTWAIT);
1206 if ((m->m_flags & M_EXT) == 0) {
1207 m_freem(m);
1208 return (ENOBUFS);
1209 }
1210
1211 if (ds->ds_mbuf != NULL)
1212 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1213
1214 ds->ds_mbuf = m;
1215
1216 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1217 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1218 BUS_DMA_READ|BUS_DMA_NOWAIT);
1219 if (error) {
1220 aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1221 idx, error);
1222 panic("sf_add_rxbuf"); /* XXX */
1223 }
1224
1225 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1226 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1227
1228 SF_INIT_RXDESC(sc, idx);
1229
1230 return (0);
1231 }
1232
1233 static void
1234 sf_set_filter_perfect(struct sf_softc *sc, int slot, const uint8_t *enaddr)
1235 {
1236 uint32_t reg0, reg1, reg2;
1237
1238 reg0 = enaddr[5] | (enaddr[4] << 8);
1239 reg1 = enaddr[3] | (enaddr[2] << 8);
1240 reg2 = enaddr[1] | (enaddr[0] << 8);
1241
1242 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1243 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1244 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1245 }
1246
1247 static void
1248 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1249 {
1250 uint32_t hash, slot, reg;
1251
1252 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1253 slot = hash >> 4;
1254
1255 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1256 reg |= 1 << (hash & 0xf);
1257 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1258 }
1259
1260 /*
1261 * sf_set_filter:
1262 *
1263 * Set the Starfire receive filter.
1264 */
1265 static void
1266 sf_set_filter(struct sf_softc *sc)
1267 {
1268 struct ethercom *ec = &sc->sc_ethercom;
1269 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1270 struct ether_multi *enm;
1271 struct ether_multistep step;
1272 int i;
1273
1274 /* Start by clearing the perfect and hash tables. */
1275 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1276 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1277
1278 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1279 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1280
1281 /*
1282 * Clear the perfect and hash mode bits.
1283 */
1284 sc->sc_RxAddressFilteringCtl &=
1285 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1286
1287 if (ifp->if_flags & IFF_BROADCAST)
1288 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1289 else
1290 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1291
1292 if (ifp->if_flags & IFF_PROMISC) {
1293 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1294 goto allmulti;
1295 } else
1296 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1297
1298 /*
1299 * Set normal perfect filtering mode.
1300 */
1301 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1302
1303 /*
1304 * First, write the station address to the perfect filter
1305 * table.
1306 */
1307 sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
1308
1309 /*
1310 * Now set the hash bits for each multicast address in our
1311 * list.
1312 */
1313 ETHER_FIRST_MULTI(step, ec, enm);
1314 if (enm == NULL)
1315 goto done;
1316 while (enm != NULL) {
1317 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1318 /*
1319 * We must listen to a range of multicast addresses.
1320 * For now, just accept all multicasts, rather than
1321 * trying to set only those filter bits needed to match
1322 * the range. (At this time, the only use of address
1323 * ranges is for IP multicast routing, for which the
1324 * range is big enough to require all bits set.)
1325 */
1326 goto allmulti;
1327 }
1328 sf_set_filter_hash(sc, enm->enm_addrlo);
1329 ETHER_NEXT_MULTI(step, enm);
1330 }
1331
1332 /*
1333 * Set "hash only multicast dest, match regardless of VLAN ID".
1334 */
1335 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1336 goto done;
1337
1338 allmulti:
1339 /*
1340 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1341 */
1342 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1343 ifp->if_flags |= IFF_ALLMULTI;
1344
1345 done:
1346 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1347 sc->sc_RxAddressFilteringCtl);
1348 }
1349
1350 /*
1351 * sf_mii_read: [mii interface function]
1352 *
1353 * Read from the MII.
1354 */
1355 static int
1356 sf_mii_read(struct device *self, int phy, int reg)
1357 {
1358 struct sf_softc *sc = (void *) self;
1359 uint32_t v;
1360 int i;
1361
1362 for (i = 0; i < 1000; i++) {
1363 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1364 if (v & MiiDataValid)
1365 break;
1366 delay(1);
1367 }
1368
1369 if ((v & MiiDataValid) == 0)
1370 return (0);
1371
1372 if (MiiRegDataPort(v) == 0xffff)
1373 return (0);
1374
1375 return (MiiRegDataPort(v));
1376 }
1377
1378 /*
1379 * sf_mii_write: [mii interface function]
1380 *
1381 * Write to the MII.
1382 */
1383 static void
1384 sf_mii_write(struct device *self, int phy, int reg, int val)
1385 {
1386 struct sf_softc *sc = (void *) self;
1387 int i;
1388
1389 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1390
1391 for (i = 0; i < 1000; i++) {
1392 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1393 MiiBusy) == 0)
1394 return;
1395 delay(1);
1396 }
1397
1398 printf("%s: MII write timed out\n", device_xname(&sc->sc_dev));
1399 }
1400
1401 /*
1402 * sf_mii_statchg: [mii interface function]
1403 *
1404 * Callback from the PHY when the media changes.
1405 */
1406 static void
1407 sf_mii_statchg(struct device *self)
1408 {
1409 struct sf_softc *sc = (void *) self;
1410 uint32_t ipg;
1411
1412 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1413 sc->sc_MacConfig1 |= MC1_FullDuplex;
1414 ipg = 0x15;
1415 } else {
1416 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1417 ipg = 0x11;
1418 }
1419
1420 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1421 sf_macreset(sc);
1422
1423 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1424 }
Cache object: 0f714575e52c565acf3128be63b2e178
|