FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/aic6915.c
1 /* $NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Device driver for the Adaptec AIC-6915 (``Starfire'')
41 * 10/100 Ethernet controller.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $");
46
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73
74 #include <dev/mii/miivar.h>
75
76 #include <dev/ic/aic6915reg.h>
77 #include <dev/ic/aic6915var.h>
78
79 static void sf_start(struct ifnet *);
80 static void sf_watchdog(struct ifnet *);
81 static int sf_ioctl(struct ifnet *, u_long, caddr_t);
82 static int sf_init(struct ifnet *);
83 static void sf_stop(struct ifnet *, int);
84
85 static void sf_shutdown(void *);
86
87 static void sf_txintr(struct sf_softc *);
88 static void sf_rxintr(struct sf_softc *);
89 static void sf_stats_update(struct sf_softc *);
90
91 static void sf_reset(struct sf_softc *);
92 static void sf_macreset(struct sf_softc *);
93 static void sf_rxdrain(struct sf_softc *);
94 static int sf_add_rxbuf(struct sf_softc *, int);
95 static uint8_t sf_read_eeprom(struct sf_softc *, int);
96 static void sf_set_filter(struct sf_softc *);
97
98 static int sf_mii_read(struct device *, int, int);
99 static void sf_mii_write(struct device *, int, int, int);
100 static void sf_mii_statchg(struct device *);
101
102 static void sf_tick(void *);
103
104 static int sf_mediachange(struct ifnet *);
105 static void sf_mediastatus(struct ifnet *, struct ifmediareq *);
106
107 #define sf_funcreg_read(sc, reg) \
108 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
109 #define sf_funcreg_write(sc, reg, val) \
110 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
111
112 static inline uint32_t
113 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
114 {
115
116 if (__predict_false(sc->sc_iomapped)) {
117 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
118 reg);
119 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
120 SF_IndirectIoDataPort));
121 }
122
123 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
124 }
125
126 static inline void
127 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
128 {
129
130 if (__predict_false(sc->sc_iomapped)) {
131 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
132 reg);
133 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
134 val);
135 return;
136 }
137
138 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
139 }
140
141 #define sf_genreg_read(sc, reg) \
142 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
143 #define sf_genreg_write(sc, reg, val) \
144 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
145
146 /*
147 * sf_attach:
148 *
149 * Attach a Starfire interface to the system.
150 */
151 void
152 sf_attach(struct sf_softc *sc)
153 {
154 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
155 int i, rseg, error;
156 bus_dma_segment_t seg;
157 u_int8_t enaddr[ETHER_ADDR_LEN];
158
159 callout_init(&sc->sc_tick_callout);
160
161 /*
162 * If we're I/O mapped, the functional register handle is
163 * the same as the base handle. If we're memory mapped,
164 * carve off a chunk of the register space for the functional
165 * registers, to save on arithmetic later.
166 */
167 if (sc->sc_iomapped)
168 sc->sc_sh_func = sc->sc_sh;
169 else {
170 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
171 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
172 printf("%s: unable to sub-region functional "
173 "registers, error = %d\n", sc->sc_dev.dv_xname,
174 error);
175 return;
176 }
177 }
178
179 /*
180 * Initialize the transmit threshold for this interface. The
181 * manual describes the default as 4 * 16 bytes. We start out
182 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
183 * several platforms.
184 */
185 sc->sc_txthresh = 10;
186
187 /*
188 * Allocate the control data structures, and create and load the
189 * DMA map for it.
190 */
191 if ((error = bus_dmamem_alloc(sc->sc_dmat,
192 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
193 BUS_DMA_NOWAIT)) != 0) {
194 printf("%s: unable to allocate control data, error = %d\n",
195 sc->sc_dev.dv_xname, error);
196 goto fail_0;
197 }
198
199 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
200 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 printf("%s: unable to map control data, error = %d\n",
203 sc->sc_dev.dv_xname, error);
204 goto fail_1;
205 }
206
207 if ((error = bus_dmamap_create(sc->sc_dmat,
208 sizeof(struct sf_control_data), 1,
209 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
210 &sc->sc_cddmamap)) != 0) {
211 printf("%s: unable to create control data DMA map, "
212 "error = %d\n", sc->sc_dev.dv_xname, error);
213 goto fail_2;
214 }
215
216 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
217 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
218 BUS_DMA_NOWAIT)) != 0) {
219 printf("%s: unable to load control data DMA map, error = %d\n",
220 sc->sc_dev.dv_xname, error);
221 goto fail_3;
222 }
223
224 /*
225 * Create the transmit buffer DMA maps.
226 */
227 for (i = 0; i < SF_NTXDESC; i++) {
228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
229 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
230 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
231 printf("%s: unable to create tx DMA map %d, "
232 "error = %d\n", sc->sc_dev.dv_xname, i, error);
233 goto fail_4;
234 }
235 }
236
237 /*
238 * Create the receive buffer DMA maps.
239 */
240 for (i = 0; i < SF_NRXDESC; i++) {
241 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
242 MCLBYTES, 0, BUS_DMA_NOWAIT,
243 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
244 printf("%s: unable to create rx DMA map %d, "
245 "error = %d\n", sc->sc_dev.dv_xname, i, error);
246 goto fail_5;
247 }
248 }
249
250 /*
251 * Reset the chip to a known state.
252 */
253 sf_reset(sc);
254
255 /*
256 * Read the Ethernet address from the EEPROM.
257 */
258 for (i = 0; i < ETHER_ADDR_LEN; i++)
259 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
260
261 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
262 ether_sprintf(enaddr));
263
264 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
265 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
266
267 /*
268 * Initialize our media structures and probe the MII.
269 */
270 sc->sc_mii.mii_ifp = ifp;
271 sc->sc_mii.mii_readreg = sf_mii_read;
272 sc->sc_mii.mii_writereg = sf_mii_write;
273 sc->sc_mii.mii_statchg = sf_mii_statchg;
274 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
275 sf_mediastatus);
276 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
277 MII_OFFSET_ANY, 0);
278 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
279 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
281 } else
282 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
283
284 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
285 ifp->if_softc = sc;
286 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
287 ifp->if_ioctl = sf_ioctl;
288 ifp->if_start = sf_start;
289 ifp->if_watchdog = sf_watchdog;
290 ifp->if_init = sf_init;
291 ifp->if_stop = sf_stop;
292 IFQ_SET_READY(&ifp->if_snd);
293
294 /*
295 * Attach the interface.
296 */
297 if_attach(ifp);
298 ether_ifattach(ifp, enaddr);
299
300 /*
301 * Make sure the interface is shutdown during reboot.
302 */
303 sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
304 if (sc->sc_sdhook == NULL)
305 printf("%s: WARNING: unable to establish shutdown hook\n",
306 sc->sc_dev.dv_xname);
307 return;
308
309 /*
310 * Free any resources we've allocated during the failed attach
311 * attempt. Do this in reverse order an fall through.
312 */
313 fail_5:
314 for (i = 0; i < SF_NRXDESC; i++) {
315 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
316 bus_dmamap_destroy(sc->sc_dmat,
317 sc->sc_rxsoft[i].ds_dmamap);
318 }
319 fail_4:
320 for (i = 0; i < SF_NTXDESC; i++) {
321 if (sc->sc_txsoft[i].ds_dmamap != NULL)
322 bus_dmamap_destroy(sc->sc_dmat,
323 sc->sc_txsoft[i].ds_dmamap);
324 }
325 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
326 fail_3:
327 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
328 fail_2:
329 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
330 sizeof(struct sf_control_data));
331 fail_1:
332 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
333 fail_0:
334 return;
335 }
336
337 /*
338 * sf_shutdown:
339 *
340 * Shutdown hook -- make sure the interface is stopped at reboot.
341 */
342 static void
343 sf_shutdown(void *arg)
344 {
345 struct sf_softc *sc = arg;
346
347 sf_stop(&sc->sc_ethercom.ec_if, 1);
348 }
349
350 /*
351 * sf_start: [ifnet interface function]
352 *
353 * Start packet transmission on the interface.
354 */
355 static void
356 sf_start(struct ifnet *ifp)
357 {
358 struct sf_softc *sc = ifp->if_softc;
359 struct mbuf *m0, *m;
360 struct sf_txdesc0 *txd;
361 struct sf_descsoft *ds;
362 bus_dmamap_t dmamap;
363 int error, producer, last = -1, opending, seg;
364
365 /*
366 * Remember the previous number of pending transmits.
367 */
368 opending = sc->sc_txpending;
369
370 /*
371 * Find out where we're sitting.
372 */
373 producer = SF_TXDINDEX_TO_HOST(
374 TDQPI_HiPrTxProducerIndex_get(
375 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
376
377 /*
378 * Loop through the send queue, setting up transmit descriptors
379 * until we drain the queue, or use up all available transmit
380 * descriptors. Leave a blank one at the end for sanity's sake.
381 */
382 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
383 /*
384 * Grab a packet off the queue.
385 */
386 IFQ_POLL(&ifp->if_snd, m0);
387 if (m0 == NULL)
388 break;
389 m = NULL;
390
391 /*
392 * Get the transmit descriptor.
393 */
394 txd = &sc->sc_txdescs[producer];
395 ds = &sc->sc_txsoft[producer];
396 dmamap = ds->ds_dmamap;
397
398 /*
399 * Load the DMA map. If this fails, the packet either
400 * didn't fit in the allotted number of frags, or we were
401 * short on resources. In this case, we'll copy and try
402 * again.
403 */
404 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
405 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
406 MGETHDR(m, M_DONTWAIT, MT_DATA);
407 if (m == NULL) {
408 printf("%s: unable to allocate Tx mbuf\n",
409 sc->sc_dev.dv_xname);
410 break;
411 }
412 if (m0->m_pkthdr.len > MHLEN) {
413 MCLGET(m, M_DONTWAIT);
414 if ((m->m_flags & M_EXT) == 0) {
415 printf("%s: unable to allocate Tx "
416 "cluster\n", sc->sc_dev.dv_xname);
417 m_freem(m);
418 break;
419 }
420 }
421 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
422 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
423 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
424 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
425 if (error) {
426 printf("%s: unable to load Tx buffer, "
427 "error = %d\n", sc->sc_dev.dv_xname, error);
428 break;
429 }
430 }
431
432 /*
433 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
434 */
435 IFQ_DEQUEUE(&ifp->if_snd, m0);
436 if (m != NULL) {
437 m_freem(m0);
438 m0 = m;
439 }
440
441 /* Initialize the descriptor. */
442 txd->td_word0 =
443 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
444 if (producer == (SF_NTXDESC - 1))
445 txd->td_word0 |= TD_W0_END;
446 txd->td_word1 = htole32(dmamap->dm_nsegs);
447 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
448 txd->td_frags[seg].fr_addr =
449 htole32(dmamap->dm_segs[seg].ds_addr);
450 txd->td_frags[seg].fr_len =
451 htole32(dmamap->dm_segs[seg].ds_len);
452 }
453
454 /* Sync the descriptor and the DMA map. */
455 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
456 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
457 BUS_DMASYNC_PREWRITE);
458
459 /*
460 * Store a pointer to the packet so we can free it later.
461 */
462 ds->ds_mbuf = m0;
463
464 /* Advance the Tx pointer. */
465 sc->sc_txpending++;
466 last = producer;
467 producer = SF_NEXTTX(producer);
468
469 #if NBPFILTER > 0
470 /*
471 * Pass the packet to any BPF listeners.
472 */
473 if (ifp->if_bpf)
474 bpf_mtap(ifp->if_bpf, m0);
475 #endif
476 }
477
478 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
479 /* No more slots left; notify upper layer. */
480 ifp->if_flags |= IFF_OACTIVE;
481 }
482
483 if (sc->sc_txpending != opending) {
484 KASSERT(last != -1);
485 /*
486 * We enqueued packets. Cause a transmit interrupt to
487 * happen on the last packet we enqueued, and give the
488 * new descriptors to the chip by writing the new
489 * producer index.
490 */
491 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
492 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
493
494 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
495 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
496
497 /* Set a watchdog timer in case the chip flakes out. */
498 ifp->if_timer = 5;
499 }
500 }
501
502 /*
503 * sf_watchdog: [ifnet interface function]
504 *
505 * Watchdog timer handler.
506 */
507 static void
508 sf_watchdog(struct ifnet *ifp)
509 {
510 struct sf_softc *sc = ifp->if_softc;
511
512 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
513 ifp->if_oerrors++;
514
515 (void) sf_init(ifp);
516
517 /* Try to get more packets going. */
518 sf_start(ifp);
519 }
520
521 /*
522 * sf_ioctl: [ifnet interface function]
523 *
524 * Handle control requests from the operator.
525 */
526 static int
527 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
528 {
529 struct sf_softc *sc = ifp->if_softc;
530 struct ifreq *ifr = (struct ifreq *) data;
531 int s, error;
532
533 s = splnet();
534
535 switch (cmd) {
536 case SIOCSIFMEDIA:
537 case SIOCGIFMEDIA:
538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
539 break;
540
541 default:
542 error = ether_ioctl(ifp, cmd, data);
543 if (error == ENETRESET) {
544 /*
545 * Multicast list has changed; set the hardware filter
546 * accordingly.
547 */
548 if (ifp->if_flags & IFF_RUNNING)
549 sf_set_filter(sc);
550 error = 0;
551 }
552 break;
553 }
554
555 /* Try to get more packets going. */
556 sf_start(ifp);
557
558 splx(s);
559 return (error);
560 }
561
562 /*
563 * sf_intr:
564 *
565 * Interrupt service routine.
566 */
567 int
568 sf_intr(void *arg)
569 {
570 struct sf_softc *sc = arg;
571 uint32_t isr;
572 int handled = 0, wantinit = 0;
573
574 for (;;) {
575 /* Reading clears all interrupts we're interested in. */
576 isr = sf_funcreg_read(sc, SF_InterruptStatus);
577 if ((isr & IS_PCIPadInt) == 0)
578 break;
579
580 handled = 1;
581
582 /* Handle receive interrupts. */
583 if (isr & IS_RxQ1DoneInt)
584 sf_rxintr(sc);
585
586 /* Handle transmit completion interrupts. */
587 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
588 sf_txintr(sc);
589
590 /* Handle abnormal interrupts. */
591 if (isr & IS_AbnormalInterrupt) {
592 /* Statistics. */
593 if (isr & IS_StatisticWrapInt)
594 sf_stats_update(sc);
595
596 /* DMA errors. */
597 if (isr & IS_DmaErrInt) {
598 wantinit = 1;
599 printf("%s: WARNING: DMA error\n",
600 sc->sc_dev.dv_xname);
601 }
602
603 /* Transmit FIFO underruns. */
604 if (isr & IS_TxDataLowInt) {
605 if (sc->sc_txthresh < 0xff)
606 sc->sc_txthresh++;
607 printf("%s: transmit FIFO underrun, new "
608 "threshold: %d bytes\n",
609 sc->sc_dev.dv_xname,
610 sc->sc_txthresh * 16);
611 sf_funcreg_write(sc, SF_TransmitFrameCSR,
612 sc->sc_TransmitFrameCSR |
613 TFCSR_TransmitThreshold(sc->sc_txthresh));
614 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
615 sc->sc_TxDescQueueCtrl |
616 TDQC_TxHighPriorityFifoThreshold(
617 sc->sc_txthresh));
618 }
619 }
620 }
621
622 if (handled) {
623 /* Reset the interface, if necessary. */
624 if (wantinit)
625 sf_init(&sc->sc_ethercom.ec_if);
626
627 /* Try and get more packets going. */
628 sf_start(&sc->sc_ethercom.ec_if);
629 }
630
631 return (handled);
632 }
633
634 /*
635 * sf_txintr:
636 *
637 * Helper -- handle transmit completion interrupts.
638 */
639 static void
640 sf_txintr(struct sf_softc *sc)
641 {
642 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
643 struct sf_descsoft *ds;
644 uint32_t cqci, tcd;
645 int consumer, producer, txidx;
646
647 try_again:
648 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
649
650 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
651 producer = CQPI_TxCompletionProducerIndex_get(
652 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
653
654 if (consumer == producer)
655 return;
656
657 ifp->if_flags &= ~IFF_OACTIVE;
658
659 while (consumer != producer) {
660 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
661 tcd = le32toh(sc->sc_txcomp[consumer].tcd_word0);
662
663 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
664 #ifdef DIAGNOSTIC
665 if ((tcd & TCD_PR) == 0)
666 printf("%s: Tx queue mismatch, index %d\n",
667 sc->sc_dev.dv_xname, txidx);
668 #endif
669 /*
670 * NOTE: stats are updated later. We're just
671 * releasing packets that have been DMA'd to
672 * the chip.
673 */
674 ds = &sc->sc_txsoft[txidx];
675 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
676 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
677 0, ds->ds_dmamap->dm_mapsize,
678 BUS_DMASYNC_POSTWRITE);
679 m_freem(ds->ds_mbuf);
680 ds->ds_mbuf = NULL;
681
682 consumer = SF_NEXTTCD(consumer);
683 sc->sc_txpending--;
684 }
685
686 /* XXXJRT -- should be KDASSERT() */
687 KASSERT(sc->sc_txpending >= 0);
688
689 /* If all packets are done, cancel the watchdog timer. */
690 if (sc->sc_txpending == 0)
691 ifp->if_timer = 0;
692
693 /* Update the consumer index. */
694 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
695 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
696 CQCI_TxCompletionConsumerIndex(consumer));
697
698 /* Double check for new completions. */
699 goto try_again;
700 }
701
702 /*
703 * sf_rxintr:
704 *
705 * Helper -- handle receive interrupts.
706 */
707 static void
708 sf_rxintr(struct sf_softc *sc)
709 {
710 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
711 struct sf_descsoft *ds;
712 struct sf_rcd_full *rcd;
713 struct mbuf *m;
714 uint32_t cqci, word0;
715 int consumer, producer, bufproducer, rxidx, len;
716
717 try_again:
718 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
719
720 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
721 producer = CQPI_RxCompletionQ1ProducerIndex_get(
722 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
723 bufproducer = RXQ1P_RxDescQ1Producer_get(
724 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
725
726 if (consumer == producer)
727 return;
728
729 while (consumer != producer) {
730 rcd = &sc->sc_rxcomp[consumer];
731 SF_CDRXCSYNC(sc, consumer,
732 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
733 SF_CDRXCSYNC(sc, consumer,
734 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
735
736 word0 = le32toh(rcd->rcd_word0);
737 rxidx = RCD_W0_EndIndex(word0);
738
739 ds = &sc->sc_rxsoft[rxidx];
740
741 consumer = SF_NEXTRCD(consumer);
742 bufproducer = SF_NEXTRX(bufproducer);
743
744 if ((word0 & RCD_W0_OK) == 0) {
745 SF_INIT_RXDESC(sc, rxidx);
746 continue;
747 }
748
749 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
750 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
751
752 /*
753 * No errors; receive the packet. Note that we have
754 * configured the Starfire to NOT transfer the CRC
755 * with the packet.
756 */
757 len = RCD_W0_Length(word0);
758
759 #ifdef __NO_STRICT_ALIGNMENT
760 /*
761 * Allocate a new mbuf cluster. If that fails, we are
762 * out of memory, and must drop the packet and recycle
763 * the buffer that's already attached to this descriptor.
764 */
765 m = ds->ds_mbuf;
766 if (sf_add_rxbuf(sc, rxidx) != 0) {
767 ifp->if_ierrors++;
768 SF_INIT_RXDESC(sc, rxidx);
769 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
770 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
771 continue;
772 }
773 #else
774 /*
775 * The Starfire's receive buffer must be 4-byte aligned.
776 * But this means that the data after the Ethernet header
777 * is misaligned. We must allocate a new buffer and
778 * copy the data, shifted forward 2 bytes.
779 */
780 MGETHDR(m, M_DONTWAIT, MT_DATA);
781 if (m == NULL) {
782 dropit:
783 ifp->if_ierrors++;
784 SF_INIT_RXDESC(sc, rxidx);
785 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
786 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
787 continue;
788 }
789 if (len > (MHLEN - 2)) {
790 MCLGET(m, M_DONTWAIT);
791 if ((m->m_flags & M_EXT) == 0) {
792 m_freem(m);
793 goto dropit;
794 }
795 }
796 m->m_data += 2;
797
798 /*
799 * Note that we use cluster for incoming frames, so the
800 * buffer is virtually contiguous.
801 */
802 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
803
804 /* Allow the receive descriptor to continue using its mbuf. */
805 SF_INIT_RXDESC(sc, rxidx);
806 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
807 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
808 #endif /* __NO_STRICT_ALIGNMENT */
809
810 m->m_pkthdr.rcvif = ifp;
811 m->m_pkthdr.len = m->m_len = len;
812
813 #if NBPFILTER > 0
814 /*
815 * Pass this up to any BPF listeners.
816 */
817 if (ifp->if_bpf)
818 bpf_mtap(ifp->if_bpf, m);
819 #endif /* NBPFILTER > 0 */
820
821 /* Pass it on. */
822 (*ifp->if_input)(ifp, m);
823 }
824
825 /* Update the chip's pointers. */
826 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
827 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
828 CQCI_RxCompletionQ1ConsumerIndex(consumer));
829 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
830 RXQ1P_RxDescQ1Producer(bufproducer));
831
832 /* Double-check for any new completions. */
833 goto try_again;
834 }
835
836 /*
837 * sf_tick:
838 *
839 * One second timer, used to tick the MII and update stats.
840 */
841 static void
842 sf_tick(void *arg)
843 {
844 struct sf_softc *sc = arg;
845 int s;
846
847 s = splnet();
848 mii_tick(&sc->sc_mii);
849 sf_stats_update(sc);
850 splx(s);
851
852 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
853 }
854
855 /*
856 * sf_stats_update:
857 *
858 * Read the statitistics counters.
859 */
860 static void
861 sf_stats_update(struct sf_softc *sc)
862 {
863 struct sf_stats stats;
864 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
865 uint32_t *p;
866 u_int i;
867
868 p = &stats.TransmitOKFrames;
869 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
870 *p++ = sf_genreg_read(sc,
871 SF_STATS_BASE + (i * sizeof(uint32_t)));
872 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
873 }
874
875 ifp->if_opackets += stats.TransmitOKFrames;
876
877 ifp->if_collisions += stats.SingleCollisionFrames +
878 stats.MultipleCollisionFrames;
879
880 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
881 stats.TransmitAbortDueToExcessingDeferral +
882 stats.FramesLostDueToInternalTransmitErrors;
883
884 ifp->if_ipackets += stats.ReceiveOKFrames;
885
886 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
887 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
888 stats.ReceiveFramesJabbersError +
889 stats.FramesLostDueToInternalReceiveErrors;
890 }
891
892 /*
893 * sf_reset:
894 *
895 * Perform a soft reset on the Starfire.
896 */
897 static void
898 sf_reset(struct sf_softc *sc)
899 {
900 int i;
901
902 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
903
904 sf_macreset(sc);
905
906 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
907 for (i = 0; i < 1000; i++) {
908 delay(10);
909 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
910 PDC_SoftReset) == 0)
911 break;
912 }
913
914 if (i == 1000) {
915 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
916 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
917 }
918
919 delay(1000);
920 }
921
922 /*
923 * sf_macreset:
924 *
925 * Reset the MAC portion of the Starfire.
926 */
927 static void
928 sf_macreset(struct sf_softc *sc)
929 {
930
931 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
932 delay(1000);
933 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
934 }
935
936 /*
937 * sf_init: [ifnet interface function]
938 *
939 * Initialize the interface. Must be called at splnet().
940 */
941 static int
942 sf_init(struct ifnet *ifp)
943 {
944 struct sf_softc *sc = ifp->if_softc;
945 struct sf_descsoft *ds;
946 int error = 0;
947 u_int i;
948
949 /*
950 * Cancel any pending I/O.
951 */
952 sf_stop(ifp, 0);
953
954 /*
955 * Reset the Starfire to a known state.
956 */
957 sf_reset(sc);
958
959 /* Clear the stat counters. */
960 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
961 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
962
963 /*
964 * Initialize the transmit descriptor ring.
965 */
966 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
967 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
968 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
969 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
970
971 /*
972 * Initialize the transmit completion ring.
973 */
974 for (i = 0; i < SF_NTCD; i++) {
975 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
976 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
977 }
978 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
979 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
980
981 /*
982 * Initialize the receive descriptor ring.
983 */
984 for (i = 0; i < SF_NRXDESC; i++) {
985 ds = &sc->sc_rxsoft[i];
986 if (ds->ds_mbuf == NULL) {
987 if ((error = sf_add_rxbuf(sc, i)) != 0) {
988 printf("%s: unable to allocate or map rx "
989 "buffer %d, error = %d\n",
990 sc->sc_dev.dv_xname, i, error);
991 /*
992 * XXX Should attempt to run with fewer receive
993 * XXX buffers instead of just failing.
994 */
995 sf_rxdrain(sc);
996 goto out;
997 }
998 } else
999 SF_INIT_RXDESC(sc, i);
1000 }
1001 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
1002 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
1003 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
1004
1005 /*
1006 * Initialize the receive completion ring.
1007 */
1008 for (i = 0; i < SF_NRCD; i++) {
1009 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
1010 sc->sc_rxcomp[i].rcd_word1 = 0;
1011 sc->sc_rxcomp[i].rcd_word2 = 0;
1012 sc->sc_rxcomp[i].rcd_timestamp = 0;
1013 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1014 }
1015 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1016 RCQ1C_RxCompletionQ1Type(3));
1017 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1018
1019 /*
1020 * Initialize the Tx CSR.
1021 */
1022 sc->sc_TransmitFrameCSR = 0;
1023 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1024 sc->sc_TransmitFrameCSR |
1025 TFCSR_TransmitThreshold(sc->sc_txthresh));
1026
1027 /*
1028 * Initialize the Tx descriptor control register.
1029 */
1030 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1031 TDQC_TxDmaBurstSize(4) | /* default */
1032 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1033 TDQC_TxDescType(0);
1034 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1035 sc->sc_TxDescQueueCtrl |
1036 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1037
1038 /*
1039 * Initialize the Rx descriptor control registers.
1040 */
1041 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1042 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1043 RDQ1C_RxDescSpacing(0));
1044 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1045
1046 /*
1047 * Initialize the Tx descriptor producer indices.
1048 */
1049 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1050 TDQPI_HiPrTxProducerIndex(0) |
1051 TDQPI_LoPrTxProducerIndex(0));
1052
1053 /*
1054 * Initialize the Rx descriptor producer indices.
1055 */
1056 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1057 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1058 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1059 RXQ2P_RxDescQ2Producer(0));
1060
1061 /*
1062 * Initialize the Tx and Rx completion queue consumer indices.
1063 */
1064 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1065 CQCI_TxCompletionConsumerIndex(0) |
1066 CQCI_RxCompletionQ1ConsumerIndex(0));
1067 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1068
1069 /*
1070 * Initialize the Rx DMA control register.
1071 */
1072 sf_funcreg_write(sc, SF_RxDmaCtrl,
1073 RDC_RxHighPriorityThreshold(6) | /* default */
1074 RDC_RxBurstSize(4)); /* default */
1075
1076 /*
1077 * Set the receive filter.
1078 */
1079 sc->sc_RxAddressFilteringCtl = 0;
1080 sf_set_filter(sc);
1081
1082 /*
1083 * Set MacConfig1. When we set the media, MacConfig1 will
1084 * actually be written and the MAC part reset.
1085 */
1086 sc->sc_MacConfig1 = MC1_PadEn;
1087
1088 /*
1089 * Set the media.
1090 */
1091 mii_mediachg(&sc->sc_mii);
1092
1093 /*
1094 * Initialize the interrupt register.
1095 */
1096 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1097 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1098 IS_StatisticWrapInt;
1099 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1100
1101 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1102 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1103
1104 /*
1105 * Start the transmit and receive processes.
1106 */
1107 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1108 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1109
1110 /* Start the on second clock. */
1111 callout_reset(&sc->sc_tick_callout, hz, sf_tick, sc);
1112
1113 /*
1114 * Note that the interface is now running.
1115 */
1116 ifp->if_flags |= IFF_RUNNING;
1117 ifp->if_flags &= ~IFF_OACTIVE;
1118
1119 out:
1120 if (error) {
1121 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1122 ifp->if_timer = 0;
1123 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1124 }
1125 return (error);
1126 }
1127
1128 /*
1129 * sf_rxdrain:
1130 *
1131 * Drain the receive queue.
1132 */
1133 static void
1134 sf_rxdrain(struct sf_softc *sc)
1135 {
1136 struct sf_descsoft *ds;
1137 int i;
1138
1139 for (i = 0; i < SF_NRXDESC; i++) {
1140 ds = &sc->sc_rxsoft[i];
1141 if (ds->ds_mbuf != NULL) {
1142 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1143 m_freem(ds->ds_mbuf);
1144 ds->ds_mbuf = NULL;
1145 }
1146 }
1147 }
1148
1149 /*
1150 * sf_stop: [ifnet interface function]
1151 *
1152 * Stop transmission on the interface.
1153 */
1154 static void
1155 sf_stop(struct ifnet *ifp, int disable)
1156 {
1157 struct sf_softc *sc = ifp->if_softc;
1158 struct sf_descsoft *ds;
1159 int i;
1160
1161 /* Stop the one second clock. */
1162 callout_stop(&sc->sc_tick_callout);
1163
1164 /* Down the MII. */
1165 mii_down(&sc->sc_mii);
1166
1167 /* Disable interrupts. */
1168 sf_funcreg_write(sc, SF_InterruptEn, 0);
1169
1170 /* Stop the transmit and receive processes. */
1171 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1172
1173 /*
1174 * Release any queued transmit buffers.
1175 */
1176 for (i = 0; i < SF_NTXDESC; i++) {
1177 ds = &sc->sc_txsoft[i];
1178 if (ds->ds_mbuf != NULL) {
1179 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1180 m_freem(ds->ds_mbuf);
1181 ds->ds_mbuf = NULL;
1182 }
1183 }
1184
1185 if (disable)
1186 sf_rxdrain(sc);
1187
1188 /*
1189 * Mark the interface down and cancel the watchdog timer.
1190 */
1191 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1192 ifp->if_timer = 0;
1193 }
1194
1195 /*
1196 * sf_read_eeprom:
1197 *
1198 * Read from the Starfire EEPROM.
1199 */
1200 static uint8_t
1201 sf_read_eeprom(struct sf_softc *sc, int offset)
1202 {
1203 uint32_t reg;
1204
1205 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1206
1207 return ((reg >> (8 * (offset & 3))) & 0xff);
1208 }
1209
1210 /*
1211 * sf_add_rxbuf:
1212 *
1213 * Add a receive buffer to the indicated descriptor.
1214 */
1215 static int
1216 sf_add_rxbuf(struct sf_softc *sc, int idx)
1217 {
1218 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1219 struct mbuf *m;
1220 int error;
1221
1222 MGETHDR(m, M_DONTWAIT, MT_DATA);
1223 if (m == NULL)
1224 return (ENOBUFS);
1225
1226 MCLGET(m, M_DONTWAIT);
1227 if ((m->m_flags & M_EXT) == 0) {
1228 m_freem(m);
1229 return (ENOBUFS);
1230 }
1231
1232 if (ds->ds_mbuf != NULL)
1233 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1234
1235 ds->ds_mbuf = m;
1236
1237 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1238 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1239 BUS_DMA_READ|BUS_DMA_NOWAIT);
1240 if (error) {
1241 printf("%s: can't load rx DMA map %d, error = %d\n",
1242 sc->sc_dev.dv_xname, idx, error);
1243 panic("sf_add_rxbuf"); /* XXX */
1244 }
1245
1246 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1247 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1248
1249 SF_INIT_RXDESC(sc, idx);
1250
1251 return (0);
1252 }
1253
1254 static void
1255 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1256 {
1257 uint32_t reg0, reg1, reg2;
1258
1259 reg0 = enaddr[5] | (enaddr[4] << 8);
1260 reg1 = enaddr[3] | (enaddr[2] << 8);
1261 reg2 = enaddr[1] | (enaddr[0] << 8);
1262
1263 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1264 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1265 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1266 }
1267
1268 static void
1269 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1270 {
1271 uint32_t hash, slot, reg;
1272
1273 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1274 slot = hash >> 4;
1275
1276 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1277 reg |= 1 << (hash & 0xf);
1278 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1279 }
1280
1281 /*
1282 * sf_set_filter:
1283 *
1284 * Set the Starfire receive filter.
1285 */
1286 static void
1287 sf_set_filter(struct sf_softc *sc)
1288 {
1289 struct ethercom *ec = &sc->sc_ethercom;
1290 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1291 struct ether_multi *enm;
1292 struct ether_multistep step;
1293 int i;
1294
1295 /* Start by clearing the perfect and hash tables. */
1296 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1297 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1298
1299 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1300 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1301
1302 /*
1303 * Clear the perfect and hash mode bits.
1304 */
1305 sc->sc_RxAddressFilteringCtl &=
1306 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1307
1308 if (ifp->if_flags & IFF_BROADCAST)
1309 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1310 else
1311 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1312
1313 if (ifp->if_flags & IFF_PROMISC) {
1314 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1315 goto allmulti;
1316 } else
1317 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1318
1319 /*
1320 * Set normal perfect filtering mode.
1321 */
1322 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1323
1324 /*
1325 * First, write the station address to the perfect filter
1326 * table.
1327 */
1328 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1329
1330 /*
1331 * Now set the hash bits for each multicast address in our
1332 * list.
1333 */
1334 ETHER_FIRST_MULTI(step, ec, enm);
1335 if (enm == NULL)
1336 goto done;
1337 while (enm != NULL) {
1338 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1339 /*
1340 * We must listen to a range of multicast addresses.
1341 * For now, just accept all multicasts, rather than
1342 * trying to set only those filter bits needed to match
1343 * the range. (At this time, the only use of address
1344 * ranges is for IP multicast routing, for which the
1345 * range is big enough to require all bits set.)
1346 */
1347 goto allmulti;
1348 }
1349 sf_set_filter_hash(sc, enm->enm_addrlo);
1350 ETHER_NEXT_MULTI(step, enm);
1351 }
1352
1353 /*
1354 * Set "hash only multicast dest, match regardless of VLAN ID".
1355 */
1356 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1357 goto done;
1358
1359 allmulti:
1360 /*
1361 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1362 */
1363 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1364 ifp->if_flags |= IFF_ALLMULTI;
1365
1366 done:
1367 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1368 sc->sc_RxAddressFilteringCtl);
1369 }
1370
1371 /*
1372 * sf_mii_read: [mii interface function]
1373 *
1374 * Read from the MII.
1375 */
1376 static int
1377 sf_mii_read(struct device *self, int phy, int reg)
1378 {
1379 struct sf_softc *sc = (void *) self;
1380 uint32_t v;
1381 int i;
1382
1383 for (i = 0; i < 1000; i++) {
1384 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1385 if (v & MiiDataValid)
1386 break;
1387 delay(1);
1388 }
1389
1390 if ((v & MiiDataValid) == 0)
1391 return (0);
1392
1393 if (MiiRegDataPort(v) == 0xffff)
1394 return (0);
1395
1396 return (MiiRegDataPort(v));
1397 }
1398
1399 /*
1400 * sf_mii_write: [mii interface function]
1401 *
1402 * Write to the MII.
1403 */
1404 static void
1405 sf_mii_write(struct device *self, int phy, int reg, int val)
1406 {
1407 struct sf_softc *sc = (void *) self;
1408 int i;
1409
1410 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1411
1412 for (i = 0; i < 1000; i++) {
1413 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1414 MiiBusy) == 0)
1415 return;
1416 delay(1);
1417 }
1418
1419 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1420 }
1421
1422 /*
1423 * sf_mii_statchg: [mii interface function]
1424 *
1425 * Callback from the PHY when the media changes.
1426 */
1427 static void
1428 sf_mii_statchg(struct device *self)
1429 {
1430 struct sf_softc *sc = (void *) self;
1431 uint32_t ipg;
1432
1433 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1434 sc->sc_MacConfig1 |= MC1_FullDuplex;
1435 ipg = 0x15;
1436 } else {
1437 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1438 ipg = 0x11;
1439 }
1440
1441 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1442 sf_macreset(sc);
1443
1444 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1445 }
1446
1447 /*
1448 * sf_mediastatus: [ifmedia interface function]
1449 *
1450 * Callback from ifmedia to request current media status.
1451 */
1452 static void
1453 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1454 {
1455 struct sf_softc *sc = ifp->if_softc;
1456
1457 mii_pollstat(&sc->sc_mii);
1458 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1459 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1460 }
1461
1462 /*
1463 * sf_mediachange: [ifmedia interface function]
1464 *
1465 * Callback from ifmedia to request new media setting.
1466 */
1467 static int
1468 sf_mediachange(struct ifnet *ifp)
1469 {
1470 struct sf_softc *sc = ifp->if_softc;
1471
1472 if (ifp->if_flags & IFF_UP)
1473 mii_mediachg(&sc->sc_mii);
1474 return (0);
1475 }
Cache object: 73cfd1ffac8e9da263ba926211aff3d3
|