FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 /*
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/5.3/sys/pci/if_sk.c 139775 2005-01-06 17:54:47Z nectar $");
52
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71 /*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <vm/vm.h> /* for vtophys */
107 #include <vm/pmap.h> /* for vtophys */
108 #include <machine/bus_pio.h>
109 #include <machine/bus_memio.h>
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133
134 /* "controller miibus0" required. See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136
137 #ifndef lint
138 static const char rcsid[] =
139 "$FreeBSD: releng/5.3/sys/pci/if_sk.c 139775 2005-01-06 17:54:47Z nectar $";
140 #endif
141
142 static struct sk_type sk_devs[] = {
143 {
144 VENDORID_SK,
145 DEVICEID_SK_V1,
146 "SysKonnect Gigabit Ethernet (V1.0)"
147 },
148 {
149 VENDORID_SK,
150 DEVICEID_SK_V2,
151 "SysKonnect Gigabit Ethernet (V2.0)"
152 },
153 {
154 VENDORID_MARVELL,
155 DEVICEID_SK_V2,
156 "Marvell Gigabit Ethernet"
157 },
158 {
159 VENDORID_MARVELL,
160 DEVICEID_BELKIN_5005,
161 "Belkin F5D5005 Gigabit Ethernet"
162 },
163 {
164 VENDORID_3COM,
165 DEVICEID_3COM_3C940,
166 "3Com 3C940 Gigabit Ethernet"
167 },
168 {
169 VENDORID_LINKSYS,
170 DEVICEID_LINKSYS_EG1032,
171 "Linksys EG1032 Gigabit Ethernet"
172 },
173 {
174 VENDORID_DLINK,
175 DEVICEID_DLINK_DGE530T,
176 "D-Link DGE-530T Gigabit Ethernet"
177 },
178 { 0, 0, NULL }
179 };
180
181 static int skc_probe (device_t);
182 static int skc_attach (device_t);
183 static int skc_detach (device_t);
184 static void skc_shutdown (device_t);
185 static int sk_detach (device_t);
186 static int sk_probe (device_t);
187 static int sk_attach (device_t);
188 static void sk_tick (void *);
189 static void sk_intr (void *);
190 static void sk_intr_xmac (struct sk_if_softc *);
191 static void sk_intr_bcom (struct sk_if_softc *);
192 static void sk_intr_yukon (struct sk_if_softc *);
193 static void sk_rxeof (struct sk_if_softc *);
194 static void sk_txeof (struct sk_if_softc *);
195 static int sk_encap (struct sk_if_softc *, struct mbuf *,
196 u_int32_t *);
197 static void sk_start (struct ifnet *);
198 static int sk_ioctl (struct ifnet *, u_long, caddr_t);
199 static void sk_init (void *);
200 static void sk_init_xmac (struct sk_if_softc *);
201 static void sk_init_yukon (struct sk_if_softc *);
202 static void sk_stop (struct sk_if_softc *);
203 static void sk_watchdog (struct ifnet *);
204 static int sk_ifmedia_upd (struct ifnet *);
205 static void sk_ifmedia_sts (struct ifnet *, struct ifmediareq *);
206 static void sk_reset (struct sk_softc *);
207 static int sk_newbuf (struct sk_if_softc *,
208 struct sk_chain *, struct mbuf *);
209 static int sk_alloc_jumbo_mem (struct sk_if_softc *);
210 static void *sk_jalloc (struct sk_if_softc *);
211 static void sk_jfree (void *, void *);
212 static int sk_init_rx_ring (struct sk_if_softc *);
213 static void sk_init_tx_ring (struct sk_if_softc *);
214 static u_int32_t sk_win_read_4 (struct sk_softc *, int);
215 static u_int16_t sk_win_read_2 (struct sk_softc *, int);
216 static u_int8_t sk_win_read_1 (struct sk_softc *, int);
217 static void sk_win_write_4 (struct sk_softc *, int, u_int32_t);
218 static void sk_win_write_2 (struct sk_softc *, int, u_int32_t);
219 static void sk_win_write_1 (struct sk_softc *, int, u_int32_t);
220 static u_int8_t sk_vpd_readbyte (struct sk_softc *, int);
221 static void sk_vpd_read_res (struct sk_softc *, struct vpd_res *, int);
222 static void sk_vpd_read (struct sk_softc *);
223
224 static int sk_miibus_readreg (device_t, int, int);
225 static int sk_miibus_writereg (device_t, int, int, int);
226 static void sk_miibus_statchg (device_t);
227
228 static int sk_xmac_miibus_readreg (struct sk_if_softc *, int, int);
229 static int sk_xmac_miibus_writereg (struct sk_if_softc *, int, int,
230 int);
231 static void sk_xmac_miibus_statchg (struct sk_if_softc *);
232
233 static int sk_marv_miibus_readreg (struct sk_if_softc *, int, int);
234 static int sk_marv_miibus_writereg (struct sk_if_softc *, int, int,
235 int);
236 static void sk_marv_miibus_statchg (struct sk_if_softc *);
237
238 static uint32_t sk_xmchash (const uint8_t *);
239 static uint32_t sk_gmchash (const uint8_t *);
240 static void sk_setfilt (struct sk_if_softc *, caddr_t, int);
241 static void sk_setmulti (struct sk_if_softc *);
242 static void sk_setpromisc (struct sk_if_softc *);
243
244 #ifdef SK_USEIOSPACE
245 #define SK_RES SYS_RES_IOPORT
246 #define SK_RID SK_PCI_LOIO
247 #else
248 #define SK_RES SYS_RES_MEMORY
249 #define SK_RID SK_PCI_LOMEM
250 #endif
251
252 /*
253 * Note that we have newbus methods for both the GEnesis controller
254 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
255 * the miibus code is a child of the XMACs. We need to do it this way
256 * so that the miibus drivers can access the PHY registers on the
257 * right PHY. It's not quite what I had in mind, but it's the only
258 * design that achieves the desired effect.
259 */
260 static device_method_t skc_methods[] = {
261 /* Device interface */
262 DEVMETHOD(device_probe, skc_probe),
263 DEVMETHOD(device_attach, skc_attach),
264 DEVMETHOD(device_detach, skc_detach),
265 DEVMETHOD(device_shutdown, skc_shutdown),
266
267 /* bus interface */
268 DEVMETHOD(bus_print_child, bus_generic_print_child),
269 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
270
271 { 0, 0 }
272 };
273
274 static driver_t skc_driver = {
275 "skc",
276 skc_methods,
277 sizeof(struct sk_softc)
278 };
279
280 static devclass_t skc_devclass;
281
282 static device_method_t sk_methods[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe, sk_probe),
285 DEVMETHOD(device_attach, sk_attach),
286 DEVMETHOD(device_detach, sk_detach),
287 DEVMETHOD(device_shutdown, bus_generic_shutdown),
288
289 /* bus interface */
290 DEVMETHOD(bus_print_child, bus_generic_print_child),
291 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
292
293 /* MII interface */
294 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
295 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
296 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
297
298 { 0, 0 }
299 };
300
301 static driver_t sk_driver = {
302 "sk",
303 sk_methods,
304 sizeof(struct sk_if_softc)
305 };
306
307 static devclass_t sk_devclass;
308
309 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
310 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
311 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
312
313 #define SK_SETBIT(sc, reg, x) \
314 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
315
316 #define SK_CLRBIT(sc, reg, x) \
317 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
318
319 #define SK_WIN_SETBIT_4(sc, reg, x) \
320 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
321
322 #define SK_WIN_CLRBIT_4(sc, reg, x) \
323 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
324
325 #define SK_WIN_SETBIT_2(sc, reg, x) \
326 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
327
328 #define SK_WIN_CLRBIT_2(sc, reg, x) \
329 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
330
331 static u_int32_t
332 sk_win_read_4(sc, reg)
333 struct sk_softc *sc;
334 int reg;
335 {
336 #ifdef SK_USEIOSPACE
337 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
338 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
339 #else
340 return(CSR_READ_4(sc, reg));
341 #endif
342 }
343
344 static u_int16_t
345 sk_win_read_2(sc, reg)
346 struct sk_softc *sc;
347 int reg;
348 {
349 #ifdef SK_USEIOSPACE
350 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
351 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
352 #else
353 return(CSR_READ_2(sc, reg));
354 #endif
355 }
356
357 static u_int8_t
358 sk_win_read_1(sc, reg)
359 struct sk_softc *sc;
360 int reg;
361 {
362 #ifdef SK_USEIOSPACE
363 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
364 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
365 #else
366 return(CSR_READ_1(sc, reg));
367 #endif
368 }
369
370 static void
371 sk_win_write_4(sc, reg, val)
372 struct sk_softc *sc;
373 int reg;
374 u_int32_t val;
375 {
376 #ifdef SK_USEIOSPACE
377 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
378 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
379 #else
380 CSR_WRITE_4(sc, reg, val);
381 #endif
382 return;
383 }
384
385 static void
386 sk_win_write_2(sc, reg, val)
387 struct sk_softc *sc;
388 int reg;
389 u_int32_t val;
390 {
391 #ifdef SK_USEIOSPACE
392 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
393 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
394 #else
395 CSR_WRITE_2(sc, reg, val);
396 #endif
397 return;
398 }
399
400 static void
401 sk_win_write_1(sc, reg, val)
402 struct sk_softc *sc;
403 int reg;
404 u_int32_t val;
405 {
406 #ifdef SK_USEIOSPACE
407 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
408 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
409 #else
410 CSR_WRITE_1(sc, reg, val);
411 #endif
412 return;
413 }
414
415 /*
416 * The VPD EEPROM contains Vital Product Data, as suggested in
417 * the PCI 2.1 specification. The VPD data is separared into areas
418 * denoted by resource IDs. The SysKonnect VPD contains an ID string
419 * resource (the name of the adapter), a read-only area resource
420 * containing various key/data fields and a read/write area which
421 * can be used to store asset management information or log messages.
422 * We read the ID string and read-only into buffers attached to
423 * the controller softc structure for later use. At the moment,
424 * we only use the ID string during skc_attach().
425 */
426 static u_int8_t
427 sk_vpd_readbyte(sc, addr)
428 struct sk_softc *sc;
429 int addr;
430 {
431 int i;
432
433 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
434 for (i = 0; i < SK_TIMEOUT; i++) {
435 DELAY(1);
436 if (sk_win_read_2(sc,
437 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
438 break;
439 }
440
441 if (i == SK_TIMEOUT)
442 return(0);
443
444 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
445 }
446
447 static void
448 sk_vpd_read_res(sc, res, addr)
449 struct sk_softc *sc;
450 struct vpd_res *res;
451 int addr;
452 {
453 int i;
454 u_int8_t *ptr;
455
456 ptr = (u_int8_t *)res;
457 for (i = 0; i < sizeof(struct vpd_res); i++)
458 ptr[i] = sk_vpd_readbyte(sc, i + addr);
459
460 return;
461 }
462
463 static void
464 sk_vpd_read(sc)
465 struct sk_softc *sc;
466 {
467 int pos = 0, i;
468 struct vpd_res res;
469
470 if (sc->sk_vpd_prodname != NULL)
471 free(sc->sk_vpd_prodname, M_DEVBUF);
472 if (sc->sk_vpd_readonly != NULL)
473 free(sc->sk_vpd_readonly, M_DEVBUF);
474 sc->sk_vpd_prodname = NULL;
475 sc->sk_vpd_readonly = NULL;
476
477 sk_vpd_read_res(sc, &res, pos);
478
479 /*
480 * Bail out quietly if the eeprom appears to be missing or empty.
481 */
482 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
483 return;
484
485 if (res.vr_id != VPD_RES_ID) {
486 printf("skc%d: bad VPD resource id: expected %x got %x\n",
487 sc->sk_unit, VPD_RES_ID, res.vr_id);
488 return;
489 }
490
491 pos += sizeof(res);
492 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
493 for (i = 0; i < res.vr_len; i++)
494 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
495 sc->sk_vpd_prodname[i] = '\0';
496 pos += i;
497
498 sk_vpd_read_res(sc, &res, pos);
499
500 if (res.vr_id != VPD_RES_READ) {
501 printf("skc%d: bad VPD resource id: expected %x got %x\n",
502 sc->sk_unit, VPD_RES_READ, res.vr_id);
503 return;
504 }
505
506 pos += sizeof(res);
507 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
508 for (i = 0; i < res.vr_len; i++)
509 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
510
511 return;
512 }
513
514 static int
515 sk_miibus_readreg(dev, phy, reg)
516 device_t dev;
517 int phy, reg;
518 {
519 struct sk_if_softc *sc_if;
520
521 sc_if = device_get_softc(dev);
522
523 switch(sc_if->sk_softc->sk_type) {
524 case SK_GENESIS:
525 return(sk_xmac_miibus_readreg(sc_if, phy, reg));
526 case SK_YUKON:
527 return(sk_marv_miibus_readreg(sc_if, phy, reg));
528 }
529
530 return(0);
531 }
532
533 static int
534 sk_miibus_writereg(dev, phy, reg, val)
535 device_t dev;
536 int phy, reg, val;
537 {
538 struct sk_if_softc *sc_if;
539
540 sc_if = device_get_softc(dev);
541
542 switch(sc_if->sk_softc->sk_type) {
543 case SK_GENESIS:
544 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
545 case SK_YUKON:
546 return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
547 }
548
549 return(0);
550 }
551
552 static void
553 sk_miibus_statchg(dev)
554 device_t dev;
555 {
556 struct sk_if_softc *sc_if;
557
558 sc_if = device_get_softc(dev);
559
560 switch(sc_if->sk_softc->sk_type) {
561 case SK_GENESIS:
562 sk_xmac_miibus_statchg(sc_if);
563 break;
564 case SK_YUKON:
565 sk_marv_miibus_statchg(sc_if);
566 break;
567 }
568
569 return;
570 }
571
572 static int
573 sk_xmac_miibus_readreg(sc_if, phy, reg)
574 struct sk_if_softc *sc_if;
575 int phy, reg;
576 {
577 int i;
578
579 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
580 return(0);
581
582 SK_IF_LOCK(sc_if);
583 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
584 SK_XM_READ_2(sc_if, XM_PHY_DATA);
585 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
586 for (i = 0; i < SK_TIMEOUT; i++) {
587 DELAY(1);
588 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
589 XM_MMUCMD_PHYDATARDY)
590 break;
591 }
592
593 if (i == SK_TIMEOUT) {
594 printf("sk%d: phy failed to come ready\n",
595 sc_if->sk_unit);
596 SK_IF_UNLOCK(sc_if);
597 return(0);
598 }
599 }
600 DELAY(1);
601 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
602 SK_IF_UNLOCK(sc_if);
603 return(i);
604 }
605
606 static int
607 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
608 struct sk_if_softc *sc_if;
609 int phy, reg, val;
610 {
611 int i;
612
613 SK_IF_LOCK(sc_if);
614 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
615 for (i = 0; i < SK_TIMEOUT; i++) {
616 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
617 break;
618 }
619
620 if (i == SK_TIMEOUT) {
621 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
622 SK_IF_UNLOCK(sc_if);
623 return(ETIMEDOUT);
624 }
625
626 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
627 for (i = 0; i < SK_TIMEOUT; i++) {
628 DELAY(1);
629 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
630 break;
631 }
632 SK_IF_UNLOCK(sc_if);
633 if (i == SK_TIMEOUT)
634 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
635
636 return(0);
637 }
638
639 static void
640 sk_xmac_miibus_statchg(sc_if)
641 struct sk_if_softc *sc_if;
642 {
643 struct mii_data *mii;
644
645 mii = device_get_softc(sc_if->sk_miibus);
646
647 SK_IF_LOCK(sc_if);
648 /*
649 * If this is a GMII PHY, manually set the XMAC's
650 * duplex mode accordingly.
651 */
652 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
653 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
654 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
655 } else {
656 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
657 }
658 }
659 SK_IF_UNLOCK(sc_if);
660
661 return;
662 }
663
664 static int
665 sk_marv_miibus_readreg(sc_if, phy, reg)
666 struct sk_if_softc *sc_if;
667 int phy, reg;
668 {
669 u_int16_t val;
670 int i;
671
672 if (phy != 0 ||
673 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
674 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
675 return(0);
676 }
677
678 SK_IF_LOCK(sc_if);
679 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
680 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
681
682 for (i = 0; i < SK_TIMEOUT; i++) {
683 DELAY(1);
684 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
685 if (val & YU_SMICR_READ_VALID)
686 break;
687 }
688
689 if (i == SK_TIMEOUT) {
690 printf("sk%d: phy failed to come ready\n",
691 sc_if->sk_unit);
692 SK_IF_UNLOCK(sc_if);
693 return(0);
694 }
695
696 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
697 SK_IF_UNLOCK(sc_if);
698
699 return(val);
700 }
701
702 static int
703 sk_marv_miibus_writereg(sc_if, phy, reg, val)
704 struct sk_if_softc *sc_if;
705 int phy, reg, val;
706 {
707 int i;
708
709 SK_IF_LOCK(sc_if);
710 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
711 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
712 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
713
714 for (i = 0; i < SK_TIMEOUT; i++) {
715 DELAY(1);
716 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
717 break;
718 }
719 SK_IF_UNLOCK(sc_if);
720
721 return(0);
722 }
723
724 static void
725 sk_marv_miibus_statchg(sc_if)
726 struct sk_if_softc *sc_if;
727 {
728 return;
729 }
730
731 #define HASH_BITS 6
732
733 static u_int32_t
734 sk_xmchash(addr)
735 const uint8_t *addr;
736 {
737 uint32_t crc;
738
739 /* Compute CRC for the address value. */
740 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
741
742 return (~crc & ((1 << HASH_BITS) - 1));
743 }
744
745 /* gmchash is just a big endian crc */
746 static u_int32_t
747 sk_gmchash(addr)
748 const uint8_t *addr;
749 {
750 uint32_t crc;
751
752 /* Compute CRC for the address value. */
753 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
754
755 return (crc & ((1 << HASH_BITS) - 1));
756 }
757
758 static void
759 sk_setfilt(sc_if, addr, slot)
760 struct sk_if_softc *sc_if;
761 caddr_t addr;
762 int slot;
763 {
764 int base;
765
766 base = XM_RXFILT_ENTRY(slot);
767
768 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
769 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
770 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
771
772 return;
773 }
774
775 static void
776 sk_setmulti(sc_if)
777 struct sk_if_softc *sc_if;
778 {
779 struct sk_softc *sc = sc_if->sk_softc;
780 struct ifnet *ifp = &sc_if->arpcom.ac_if;
781 u_int32_t hashes[2] = { 0, 0 };
782 int h = 0, i;
783 struct ifmultiaddr *ifma;
784 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
785
786
787 /* First, zot all the existing filters. */
788 switch(sc->sk_type) {
789 case SK_GENESIS:
790 for (i = 1; i < XM_RXFILT_MAX; i++)
791 sk_setfilt(sc_if, (caddr_t)&dummy, i);
792
793 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
794 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
795 break;
796 case SK_YUKON:
797 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
798 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
799 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
800 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
801 break;
802 }
803
804 /* Now program new ones. */
805 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
806 hashes[0] = 0xFFFFFFFF;
807 hashes[1] = 0xFFFFFFFF;
808 } else {
809 i = 1;
810 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
811 if (ifma->ifma_addr->sa_family != AF_LINK)
812 continue;
813 /*
814 * Program the first XM_RXFILT_MAX multicast groups
815 * into the perfect filter. For all others,
816 * use the hash table.
817 */
818 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
819 sk_setfilt(sc_if,
820 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
821 i++;
822 continue;
823 }
824
825 switch(sc->sk_type) {
826 case SK_GENESIS:
827 h = sk_xmchash(
828 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
829 break;
830 case SK_YUKON:
831 h = sk_gmchash(
832 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
833 break;
834 }
835 if (h < 32)
836 hashes[0] |= (1 << h);
837 else
838 hashes[1] |= (1 << (h - 32));
839 }
840 }
841
842 switch(sc->sk_type) {
843 case SK_GENESIS:
844 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
845 XM_MODE_RX_USE_PERFECT);
846 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
847 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
848 break;
849 case SK_YUKON:
850 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
851 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
852 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
853 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
854 break;
855 }
856
857 return;
858 }
859
860 static void
861 sk_setpromisc(sc_if)
862 struct sk_if_softc *sc_if;
863 {
864 struct sk_softc *sc = sc_if->sk_softc;
865 struct ifnet *ifp = &sc_if->arpcom.ac_if;
866
867 switch(sc->sk_type) {
868 case SK_GENESIS:
869 if (ifp->if_flags & IFF_PROMISC) {
870 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
871 } else {
872 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
873 }
874 break;
875 case SK_YUKON:
876 if (ifp->if_flags & IFF_PROMISC) {
877 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
878 YU_RCR_UFLEN | YU_RCR_MUFLEN);
879 } else {
880 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
881 YU_RCR_UFLEN | YU_RCR_MUFLEN);
882 }
883 break;
884 }
885
886 return;
887 }
888
889 static int
890 sk_init_rx_ring(sc_if)
891 struct sk_if_softc *sc_if;
892 {
893 struct sk_chain_data *cd = &sc_if->sk_cdata;
894 struct sk_ring_data *rd = sc_if->sk_rdata;
895 int i;
896
897 bzero((char *)rd->sk_rx_ring,
898 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
899
900 for (i = 0; i < SK_RX_RING_CNT; i++) {
901 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
902 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
903 return(ENOBUFS);
904 if (i == (SK_RX_RING_CNT - 1)) {
905 cd->sk_rx_chain[i].sk_next =
906 &cd->sk_rx_chain[0];
907 rd->sk_rx_ring[i].sk_next =
908 vtophys(&rd->sk_rx_ring[0]);
909 } else {
910 cd->sk_rx_chain[i].sk_next =
911 &cd->sk_rx_chain[i + 1];
912 rd->sk_rx_ring[i].sk_next =
913 vtophys(&rd->sk_rx_ring[i + 1]);
914 }
915 }
916
917 sc_if->sk_cdata.sk_rx_prod = 0;
918 sc_if->sk_cdata.sk_rx_cons = 0;
919
920 return(0);
921 }
922
923 static void
924 sk_init_tx_ring(sc_if)
925 struct sk_if_softc *sc_if;
926 {
927 struct sk_chain_data *cd = &sc_if->sk_cdata;
928 struct sk_ring_data *rd = sc_if->sk_rdata;
929 int i;
930
931 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
932 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
933
934 for (i = 0; i < SK_TX_RING_CNT; i++) {
935 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
936 if (i == (SK_TX_RING_CNT - 1)) {
937 cd->sk_tx_chain[i].sk_next =
938 &cd->sk_tx_chain[0];
939 rd->sk_tx_ring[i].sk_next =
940 vtophys(&rd->sk_tx_ring[0]);
941 } else {
942 cd->sk_tx_chain[i].sk_next =
943 &cd->sk_tx_chain[i + 1];
944 rd->sk_tx_ring[i].sk_next =
945 vtophys(&rd->sk_tx_ring[i + 1]);
946 }
947 }
948
949 sc_if->sk_cdata.sk_tx_prod = 0;
950 sc_if->sk_cdata.sk_tx_cons = 0;
951 sc_if->sk_cdata.sk_tx_cnt = 0;
952
953 return;
954 }
955
956 static int
957 sk_newbuf(sc_if, c, m)
958 struct sk_if_softc *sc_if;
959 struct sk_chain *c;
960 struct mbuf *m;
961 {
962 struct mbuf *m_new = NULL;
963 struct sk_rx_desc *r;
964
965 if (m == NULL) {
966 caddr_t *buf = NULL;
967
968 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
969 if (m_new == NULL)
970 return(ENOBUFS);
971
972 /* Allocate the jumbo buffer */
973 buf = sk_jalloc(sc_if);
974 if (buf == NULL) {
975 m_freem(m_new);
976 #ifdef SK_VERBOSE
977 printf("sk%d: jumbo allocation failed "
978 "-- packet dropped!\n", sc_if->sk_unit);
979 #endif
980 return(ENOBUFS);
981 }
982
983 /* Attach the buffer to the mbuf */
984 MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
985 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
986 m_new->m_data = (void *)buf;
987 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
988 } else {
989 /*
990 * We're re-using a previously allocated mbuf;
991 * be sure to re-init pointers and lengths to
992 * default values.
993 */
994 m_new = m;
995 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
996 m_new->m_data = m_new->m_ext.ext_buf;
997 }
998
999 /*
1000 * Adjust alignment so packet payload begins on a
1001 * longword boundary. Mandatory for Alpha, useful on
1002 * x86 too.
1003 */
1004 m_adj(m_new, ETHER_ALIGN);
1005
1006 r = c->sk_desc;
1007 c->sk_mbuf = m_new;
1008 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1009 r->sk_ctl = m_new->m_len | SK_RXSTAT;
1010
1011 return(0);
1012 }
1013
1014 /*
1015 * Allocate jumbo buffer storage. The SysKonnect adapters support
1016 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1017 * use them in their drivers. In order for us to use them, we need
1018 * large 9K receive buffers, however standard mbuf clusters are only
1019 * 2048 bytes in size. Consequently, we need to allocate and manage
1020 * our own jumbo buffer pool. Fortunately, this does not require an
1021 * excessive amount of additional code.
1022 */
1023 static int
1024 sk_alloc_jumbo_mem(sc_if)
1025 struct sk_if_softc *sc_if;
1026 {
1027 caddr_t ptr;
1028 register int i;
1029 struct sk_jpool_entry *entry;
1030
1031 /* Grab a big chunk o' storage. */
1032 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1033 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1034
1035 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1036 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1037 return(ENOBUFS);
1038 }
1039
1040 SLIST_INIT(&sc_if->sk_jfree_listhead);
1041 SLIST_INIT(&sc_if->sk_jinuse_listhead);
1042
1043 /*
1044 * Now divide it up into 9K pieces and save the addresses
1045 * in an array.
1046 */
1047 ptr = sc_if->sk_cdata.sk_jumbo_buf;
1048 for (i = 0; i < SK_JSLOTS; i++) {
1049 sc_if->sk_cdata.sk_jslots[i] = ptr;
1050 ptr += SK_JLEN;
1051 entry = malloc(sizeof(struct sk_jpool_entry),
1052 M_DEVBUF, M_NOWAIT);
1053 if (entry == NULL) {
1054 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1055 sc_if->sk_cdata.sk_jumbo_buf = NULL;
1056 printf("sk%d: no memory for jumbo "
1057 "buffer queue!\n", sc_if->sk_unit);
1058 return(ENOBUFS);
1059 }
1060 entry->slot = i;
1061 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1062 entry, jpool_entries);
1063 }
1064
1065 return(0);
1066 }
1067
1068 /*
1069 * Allocate a jumbo buffer.
1070 */
1071 static void *
1072 sk_jalloc(sc_if)
1073 struct sk_if_softc *sc_if;
1074 {
1075 struct sk_jpool_entry *entry;
1076
1077 SK_IF_LOCK_ASSERT(sc_if);
1078
1079 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1080
1081 if (entry == NULL) {
1082 #ifdef SK_VERBOSE
1083 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1084 #endif
1085 return(NULL);
1086 }
1087
1088 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1089 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1090 return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1091 }
1092
1093 /*
1094 * Release a jumbo buffer.
1095 */
1096 static void
1097 sk_jfree(buf, args)
1098 void *buf;
1099 void *args;
1100 {
1101 struct sk_if_softc *sc_if;
1102 int i;
1103 struct sk_jpool_entry *entry;
1104
1105 /* Extract the softc struct pointer. */
1106 sc_if = (struct sk_if_softc *)args;
1107 if (sc_if == NULL)
1108 panic("sk_jfree: didn't get softc pointer!");
1109
1110 SK_IF_LOCK(sc_if);
1111
1112 /* calculate the slot this buffer belongs to */
1113 i = ((vm_offset_t)buf
1114 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1115
1116 if ((i < 0) || (i >= SK_JSLOTS))
1117 panic("sk_jfree: asked to free buffer that we don't manage!");
1118
1119 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1120 if (entry == NULL)
1121 panic("sk_jfree: buffer not in use!");
1122 entry->slot = i;
1123 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1124 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1125
1126 SK_IF_UNLOCK(sc_if);
1127 return;
1128 }
1129
1130 /*
1131 * Set media options.
1132 */
1133 static int
1134 sk_ifmedia_upd(ifp)
1135 struct ifnet *ifp;
1136 {
1137 struct sk_if_softc *sc_if = ifp->if_softc;
1138 struct mii_data *mii;
1139
1140 mii = device_get_softc(sc_if->sk_miibus);
1141 sk_init(sc_if);
1142 mii_mediachg(mii);
1143
1144 return(0);
1145 }
1146
1147 /*
1148 * Report current media status.
1149 */
1150 static void
1151 sk_ifmedia_sts(ifp, ifmr)
1152 struct ifnet *ifp;
1153 struct ifmediareq *ifmr;
1154 {
1155 struct sk_if_softc *sc_if;
1156 struct mii_data *mii;
1157
1158 sc_if = ifp->if_softc;
1159 mii = device_get_softc(sc_if->sk_miibus);
1160
1161 mii_pollstat(mii);
1162 ifmr->ifm_active = mii->mii_media_active;
1163 ifmr->ifm_status = mii->mii_media_status;
1164
1165 return;
1166 }
1167
1168 static int
1169 sk_ioctl(ifp, command, data)
1170 struct ifnet *ifp;
1171 u_long command;
1172 caddr_t data;
1173 {
1174 struct sk_if_softc *sc_if = ifp->if_softc;
1175 struct ifreq *ifr = (struct ifreq *) data;
1176 int error = 0;
1177 struct mii_data *mii;
1178
1179 SK_IF_LOCK(sc_if);
1180
1181 switch(command) {
1182 case SIOCSIFMTU:
1183 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1184 error = EINVAL;
1185 else {
1186 ifp->if_mtu = ifr->ifr_mtu;
1187 sk_init(sc_if);
1188 }
1189 break;
1190 case SIOCSIFFLAGS:
1191 if (ifp->if_flags & IFF_UP) {
1192 if (ifp->if_flags & IFF_RUNNING) {
1193 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1194 & IFF_PROMISC) {
1195 sk_setpromisc(sc_if);
1196 sk_setmulti(sc_if);
1197 }
1198 } else
1199 sk_init(sc_if);
1200 } else {
1201 if (ifp->if_flags & IFF_RUNNING)
1202 sk_stop(sc_if);
1203 }
1204 sc_if->sk_if_flags = ifp->if_flags;
1205 error = 0;
1206 break;
1207 case SIOCADDMULTI:
1208 case SIOCDELMULTI:
1209 sk_setmulti(sc_if);
1210 error = 0;
1211 break;
1212 case SIOCGIFMEDIA:
1213 case SIOCSIFMEDIA:
1214 mii = device_get_softc(sc_if->sk_miibus);
1215 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1216 break;
1217 default:
1218 error = ether_ioctl(ifp, command, data);
1219 break;
1220 }
1221
1222 SK_IF_UNLOCK(sc_if);
1223
1224 return(error);
1225 }
1226
1227 /*
1228 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1229 * IDs against our list and return a device name if we find a match.
1230 */
1231 static int
1232 skc_probe(dev)
1233 device_t dev;
1234 {
1235 struct sk_softc *sc;
1236 struct sk_type *t = sk_devs;
1237
1238 sc = device_get_softc(dev);
1239
1240 while(t->sk_name != NULL) {
1241 if ((pci_get_vendor(dev) == t->sk_vid) &&
1242 (pci_get_device(dev) == t->sk_did)) {
1243 device_set_desc(dev, t->sk_name);
1244 return(0);
1245 }
1246 t++;
1247 }
1248
1249 return(ENXIO);
1250 }
1251
1252 /*
1253 * Force the GEnesis into reset, then bring it out of reset.
1254 */
1255 static void
1256 sk_reset(sc)
1257 struct sk_softc *sc;
1258 {
1259 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1260 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1261 if (sc->sk_type == SK_YUKON)
1262 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1263
1264 DELAY(1000);
1265 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1266 DELAY(2);
1267 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1268 if (sc->sk_type == SK_YUKON)
1269 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1270
1271 if (sc->sk_type == SK_GENESIS) {
1272 /* Configure packet arbiter */
1273 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1274 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1275 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1276 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1277 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1278 }
1279
1280 /* Enable RAM interface */
1281 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1282
1283 /*
1284 * Configure interrupt moderation. The moderation timer
1285 * defers interrupts specified in the interrupt moderation
1286 * timer mask based on the timeout specified in the interrupt
1287 * moderation timer init register. Each bit in the timer
1288 * register represents 18.825ns, so to specify a timeout in
1289 * microseconds, we have to multiply by 54.
1290 */
1291 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1292 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1293 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1294 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1295
1296 return;
1297 }
1298
1299 static int
1300 sk_probe(dev)
1301 device_t dev;
1302 {
1303 struct sk_softc *sc;
1304
1305 sc = device_get_softc(device_get_parent(dev));
1306
1307 /*
1308 * Not much to do here. We always know there will be
1309 * at least one XMAC present, and if there are two,
1310 * skc_attach() will create a second device instance
1311 * for us.
1312 */
1313 switch (sc->sk_type) {
1314 case SK_GENESIS:
1315 device_set_desc(dev, "XaQti Corp. XMAC II");
1316 break;
1317 case SK_YUKON:
1318 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1319 break;
1320 }
1321
1322 return(0);
1323 }
1324
1325 /*
1326 * Each XMAC chip is attached as a separate logical IP interface.
1327 * Single port cards will have only one logical interface of course.
1328 */
1329 static int
1330 sk_attach(dev)
1331 device_t dev;
1332 {
1333 struct sk_softc *sc;
1334 struct sk_if_softc *sc_if;
1335 struct ifnet *ifp;
1336 int i, port, error;
1337
1338 if (dev == NULL)
1339 return(EINVAL);
1340
1341 error = 0;
1342 sc_if = device_get_softc(dev);
1343 sc = device_get_softc(device_get_parent(dev));
1344 port = *(int *)device_get_ivars(dev);
1345 free(device_get_ivars(dev), M_DEVBUF);
1346 device_set_ivars(dev, NULL);
1347
1348 sc_if->sk_dev = dev;
1349 sc_if->sk_unit = device_get_unit(dev);
1350 sc_if->sk_port = port;
1351 sc_if->sk_softc = sc;
1352 sc->sk_if[port] = sc_if;
1353 if (port == SK_PORT_A)
1354 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1355 if (port == SK_PORT_B)
1356 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1357
1358 /* Allocate the descriptor queues. */
1359 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1360 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1361
1362 if (sc_if->sk_rdata == NULL) {
1363 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1364 error = ENOMEM;
1365 goto fail;
1366 }
1367
1368 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1369
1370 /* Try to allocate memory for jumbo buffers. */
1371 if (sk_alloc_jumbo_mem(sc_if)) {
1372 printf("sk%d: jumbo buffer allocation failed\n",
1373 sc_if->sk_unit);
1374 error = ENOMEM;
1375 goto fail;
1376 }
1377
1378 ifp = &sc_if->arpcom.ac_if;
1379 ifp->if_softc = sc_if;
1380 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1381 ifp->if_mtu = ETHERMTU;
1382 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1383 ifp->if_ioctl = sk_ioctl;
1384 ifp->if_start = sk_start;
1385 ifp->if_watchdog = sk_watchdog;
1386 ifp->if_init = sk_init;
1387 ifp->if_baudrate = 1000000000;
1388 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1389
1390 callout_handle_init(&sc_if->sk_tick_ch);
1391
1392 /*
1393 * Get station address for this interface. Note that
1394 * dual port cards actually come with three station
1395 * addresses: one for each port, plus an extra. The
1396 * extra one is used by the SysKonnect driver software
1397 * as a 'virtual' station address for when both ports
1398 * are operating in failover mode. Currently we don't
1399 * use this extra address.
1400 */
1401 SK_LOCK(sc);
1402 for (i = 0; i < ETHER_ADDR_LEN; i++)
1403 sc_if->arpcom.ac_enaddr[i] =
1404 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1405
1406 /*
1407 * Set up RAM buffer addresses. The NIC will have a certain
1408 * amount of SRAM on it, somewhere between 512K and 2MB. We
1409 * need to divide this up a) between the transmitter and
1410 * receiver and b) between the two XMACs, if this is a
1411 * dual port NIC. Our algotithm is to divide up the memory
1412 * evenly so that everyone gets a fair share.
1413 */
1414 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1415 u_int32_t chunk, val;
1416
1417 chunk = sc->sk_ramsize / 2;
1418 val = sc->sk_rboff / sizeof(u_int64_t);
1419 sc_if->sk_rx_ramstart = val;
1420 val += (chunk / sizeof(u_int64_t));
1421 sc_if->sk_rx_ramend = val - 1;
1422 sc_if->sk_tx_ramstart = val;
1423 val += (chunk / sizeof(u_int64_t));
1424 sc_if->sk_tx_ramend = val - 1;
1425 } else {
1426 u_int32_t chunk, val;
1427
1428 chunk = sc->sk_ramsize / 4;
1429 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1430 sizeof(u_int64_t);
1431 sc_if->sk_rx_ramstart = val;
1432 val += (chunk / sizeof(u_int64_t));
1433 sc_if->sk_rx_ramend = val - 1;
1434 sc_if->sk_tx_ramstart = val;
1435 val += (chunk / sizeof(u_int64_t));
1436 sc_if->sk_tx_ramend = val - 1;
1437 }
1438
1439 /* Read and save PHY type and set PHY address */
1440 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1441 switch(sc_if->sk_phytype) {
1442 case SK_PHYTYPE_XMAC:
1443 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1444 break;
1445 case SK_PHYTYPE_BCOM:
1446 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1447 break;
1448 case SK_PHYTYPE_MARV_COPPER:
1449 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1450 break;
1451 default:
1452 printf("skc%d: unsupported PHY type: %d\n",
1453 sc->sk_unit, sc_if->sk_phytype);
1454 error = ENODEV;
1455 SK_UNLOCK(sc);
1456 goto fail;
1457 }
1458
1459
1460 /*
1461 * Call MI attach routine. Can't hold locks when calling into ether_*.
1462 */
1463 SK_UNLOCK(sc);
1464 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1465 SK_LOCK(sc);
1466
1467 /*
1468 * Do miibus setup.
1469 */
1470 switch (sc->sk_type) {
1471 case SK_GENESIS:
1472 sk_init_xmac(sc_if);
1473 break;
1474 case SK_YUKON:
1475 sk_init_yukon(sc_if);
1476 break;
1477 }
1478
1479 SK_UNLOCK(sc);
1480 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1481 sk_ifmedia_upd, sk_ifmedia_sts)) {
1482 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1483 ether_ifdetach(ifp);
1484 error = ENXIO;
1485 goto fail;
1486 }
1487
1488 fail:
1489 if (error) {
1490 /* Access should be ok even though lock has been dropped */
1491 sc->sk_if[port] = NULL;
1492 sk_detach(dev);
1493 }
1494
1495 return(error);
1496 }
1497
1498 /*
1499 * Attach the interface. Allocate softc structures, do ifmedia
1500 * setup and ethernet/BPF attach.
1501 */
1502 static int
1503 skc_attach(dev)
1504 device_t dev;
1505 {
1506 struct sk_softc *sc;
1507 int unit, error = 0, rid, *port;
1508 uint8_t skrs;
1509
1510 sc = device_get_softc(dev);
1511 unit = device_get_unit(dev);
1512
1513 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1514 MTX_DEF | MTX_RECURSE);
1515 /*
1516 * Map control/status registers.
1517 */
1518 pci_enable_busmaster(dev);
1519
1520 rid = SK_RID;
1521 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1522
1523 if (sc->sk_res == NULL) {
1524 printf("sk%d: couldn't map ports/memory\n", unit);
1525 error = ENXIO;
1526 goto fail;
1527 }
1528
1529 sc->sk_btag = rman_get_bustag(sc->sk_res);
1530 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1531
1532 /* Allocate interrupt */
1533 rid = 0;
1534 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1535 RF_SHAREABLE | RF_ACTIVE);
1536
1537 if (sc->sk_irq == NULL) {
1538 printf("skc%d: couldn't map interrupt\n", unit);
1539 error = ENXIO;
1540 goto fail;
1541 }
1542
1543 /* Set adapter type */
1544 switch (pci_get_device(dev)) {
1545 case DEVICEID_SK_V1:
1546 sc->sk_type = SK_GENESIS;
1547 break;
1548 case DEVICEID_SK_V2:
1549 case DEVICEID_BELKIN_5005:
1550 case DEVICEID_3COM_3C940:
1551 case DEVICEID_LINKSYS_EG1032:
1552 case DEVICEID_DLINK_DGE530T:
1553 sc->sk_type = SK_YUKON;
1554 break;
1555 default:
1556 printf("skc%d: unknown device!\n", unit);
1557 error = ENXIO;
1558 goto fail;
1559 }
1560
1561 /* Reset the adapter. */
1562 sk_reset(sc);
1563
1564 sc->sk_unit = unit;
1565
1566 /* Read and save vital product data from EEPROM. */
1567 sk_vpd_read(sc);
1568
1569 skrs = sk_win_read_1(sc, SK_EPROM0);
1570 if (sc->sk_type == SK_GENESIS) {
1571 /* Read and save RAM size and RAMbuffer offset */
1572 switch(skrs) {
1573 case SK_RAMSIZE_512K_64:
1574 sc->sk_ramsize = 0x80000;
1575 sc->sk_rboff = SK_RBOFF_0;
1576 break;
1577 case SK_RAMSIZE_1024K_64:
1578 sc->sk_ramsize = 0x100000;
1579 sc->sk_rboff = SK_RBOFF_80000;
1580 break;
1581 case SK_RAMSIZE_1024K_128:
1582 sc->sk_ramsize = 0x100000;
1583 sc->sk_rboff = SK_RBOFF_0;
1584 break;
1585 case SK_RAMSIZE_2048K_128:
1586 sc->sk_ramsize = 0x200000;
1587 sc->sk_rboff = SK_RBOFF_0;
1588 break;
1589 default:
1590 printf("skc%d: unknown ram size: %d\n",
1591 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1592 error = ENXIO;
1593 goto fail;
1594 }
1595 } else { /* SK_YUKON */
1596 if (skrs == 0x00)
1597 sc->sk_ramsize = 0x20000;
1598 else
1599 sc->sk_ramsize = skrs * (1<<12);
1600 sc->sk_rboff = SK_RBOFF_0;
1601 }
1602
1603 /* Read and save physical media type */
1604 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1605 case SK_PMD_1000BASESX:
1606 sc->sk_pmd = IFM_1000_SX;
1607 break;
1608 case SK_PMD_1000BASELX:
1609 sc->sk_pmd = IFM_1000_LX;
1610 break;
1611 case SK_PMD_1000BASECX:
1612 sc->sk_pmd = IFM_1000_CX;
1613 break;
1614 case SK_PMD_1000BASETX:
1615 sc->sk_pmd = IFM_1000_T;
1616 break;
1617 default:
1618 printf("skc%d: unknown media type: 0x%x\n",
1619 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1620 error = ENXIO;
1621 goto fail;
1622 }
1623
1624 /* Announce the product name. */
1625 if (sc->sk_vpd_prodname != NULL)
1626 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1627 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1628 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1629 *port = SK_PORT_A;
1630 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1631
1632 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1633 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1634 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1635 *port = SK_PORT_B;
1636 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1637 }
1638
1639 /* Turn on the 'driver is loaded' LED. */
1640 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1641
1642 bus_generic_attach(dev);
1643
1644 /* Hook interrupt last to avoid having to lock softc */
1645 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1646 sk_intr, sc, &sc->sk_intrhand);
1647
1648 if (error) {
1649 printf("skc%d: couldn't set up irq\n", unit);
1650 goto fail;
1651 }
1652
1653 fail:
1654 if (error)
1655 skc_detach(dev);
1656
1657 return(error);
1658 }
1659
1660 /*
1661 * Shutdown hardware and free up resources. This can be called any
1662 * time after the mutex has been initialized. It is called in both
1663 * the error case in attach and the normal detach case so it needs
1664 * to be careful about only freeing resources that have actually been
1665 * allocated.
1666 */
1667 static int
1668 sk_detach(dev)
1669 device_t dev;
1670 {
1671 struct sk_if_softc *sc_if;
1672 struct ifnet *ifp;
1673
1674 sc_if = device_get_softc(dev);
1675 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1676 ("sk mutex not initialized in sk_detach"));
1677 SK_IF_LOCK(sc_if);
1678
1679 ifp = &sc_if->arpcom.ac_if;
1680 /* These should only be active if attach_xmac succeeded */
1681 if (device_is_attached(dev)) {
1682 sk_stop(sc_if);
1683 /* Can't hold locks while calling detach */
1684 SK_IF_UNLOCK(sc_if);
1685 ether_ifdetach(ifp);
1686 SK_IF_LOCK(sc_if);
1687 }
1688 /*
1689 * We're generally called from skc_detach() which is using
1690 * device_delete_child() to get to here. It's already trashed
1691 * miibus for us, so don't do it here or we'll panic.
1692 */
1693 /*
1694 if (sc_if->sk_miibus != NULL)
1695 device_delete_child(dev, sc_if->sk_miibus);
1696 */
1697 bus_generic_detach(dev);
1698 if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1699 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1700 if (sc_if->sk_rdata != NULL) {
1701 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1702 M_DEVBUF);
1703 }
1704 SK_IF_UNLOCK(sc_if);
1705
1706 return(0);
1707 }
1708
1709 static int
1710 skc_detach(dev)
1711 device_t dev;
1712 {
1713 struct sk_softc *sc;
1714
1715 sc = device_get_softc(dev);
1716 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1717
1718 if (device_is_alive(dev)) {
1719 if (sc->sk_devs[SK_PORT_A] != NULL)
1720 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1721 if (sc->sk_devs[SK_PORT_B] != NULL)
1722 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1723 bus_generic_detach(dev);
1724 }
1725
1726 if (sc->sk_intrhand)
1727 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1728 if (sc->sk_irq)
1729 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1730 if (sc->sk_res)
1731 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1732
1733 mtx_destroy(&sc->sk_mtx);
1734
1735 return(0);
1736 }
1737
1738 static int
1739 sk_encap(sc_if, m_head, txidx)
1740 struct sk_if_softc *sc_if;
1741 struct mbuf *m_head;
1742 u_int32_t *txidx;
1743 {
1744 struct sk_tx_desc *f = NULL;
1745 struct mbuf *m;
1746 u_int32_t frag, cur, cnt = 0;
1747
1748 SK_IF_LOCK_ASSERT(sc_if);
1749
1750 m = m_head;
1751 cur = frag = *txidx;
1752
1753 /*
1754 * Start packing the mbufs in this chain into
1755 * the fragment pointers. Stop when we run out
1756 * of fragments or hit the end of the mbuf chain.
1757 */
1758 for (m = m_head; m != NULL; m = m->m_next) {
1759 if (m->m_len != 0) {
1760 if ((SK_TX_RING_CNT -
1761 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1762 return(ENOBUFS);
1763 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1764 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1765 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1766 if (cnt == 0)
1767 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1768 else
1769 f->sk_ctl |= SK_TXCTL_OWN;
1770 cur = frag;
1771 SK_INC(frag, SK_TX_RING_CNT);
1772 cnt++;
1773 }
1774 }
1775
1776 if (m != NULL)
1777 return(ENOBUFS);
1778
1779 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1780 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1781 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1782 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1783 sc_if->sk_cdata.sk_tx_cnt += cnt;
1784
1785 *txidx = frag;
1786
1787 return(0);
1788 }
1789
1790 static void
1791 sk_start(ifp)
1792 struct ifnet *ifp;
1793 {
1794 struct sk_softc *sc;
1795 struct sk_if_softc *sc_if;
1796 struct mbuf *m_head = NULL;
1797 u_int32_t idx;
1798
1799 sc_if = ifp->if_softc;
1800 sc = sc_if->sk_softc;
1801
1802 SK_IF_LOCK(sc_if);
1803
1804 idx = sc_if->sk_cdata.sk_tx_prod;
1805
1806 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1807 IF_DEQUEUE(&ifp->if_snd, m_head);
1808 if (m_head == NULL)
1809 break;
1810
1811 /*
1812 * Pack the data into the transmit ring. If we
1813 * don't have room, set the OACTIVE flag and wait
1814 * for the NIC to drain the ring.
1815 */
1816 if (sk_encap(sc_if, m_head, &idx)) {
1817 IF_PREPEND(&ifp->if_snd, m_head);
1818 ifp->if_flags |= IFF_OACTIVE;
1819 break;
1820 }
1821
1822 /*
1823 * If there's a BPF listener, bounce a copy of this frame
1824 * to him.
1825 */
1826 BPF_MTAP(ifp, m_head);
1827 }
1828
1829 /* Transmit */
1830 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1831 sc_if->sk_cdata.sk_tx_prod = idx;
1832 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1833
1834 /* Set a timeout in case the chip goes out to lunch. */
1835 ifp->if_timer = 5;
1836 }
1837 SK_IF_UNLOCK(sc_if);
1838
1839 return;
1840 }
1841
1842
1843 static void
1844 sk_watchdog(ifp)
1845 struct ifnet *ifp;
1846 {
1847 struct sk_if_softc *sc_if;
1848
1849 sc_if = ifp->if_softc;
1850
1851 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1852 sk_init(sc_if);
1853
1854 return;
1855 }
1856
1857 static void
1858 skc_shutdown(dev)
1859 device_t dev;
1860 {
1861 struct sk_softc *sc;
1862
1863 sc = device_get_softc(dev);
1864 SK_LOCK(sc);
1865
1866 /* Turn off the 'driver is loaded' LED. */
1867 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1868
1869 /*
1870 * Reset the GEnesis controller. Doing this should also
1871 * assert the resets on the attached XMAC(s).
1872 */
1873 sk_reset(sc);
1874 SK_UNLOCK(sc);
1875
1876 return;
1877 }
1878
1879 static void
1880 sk_rxeof(sc_if)
1881 struct sk_if_softc *sc_if;
1882 {
1883 struct sk_softc *sc;
1884 struct mbuf *m;
1885 struct ifnet *ifp;
1886 struct sk_chain *cur_rx;
1887 int total_len = 0;
1888 int i;
1889 u_int32_t rxstat;
1890
1891 sc = sc_if->sk_softc;
1892 ifp = &sc_if->arpcom.ac_if;
1893 i = sc_if->sk_cdata.sk_rx_prod;
1894 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1895
1896 SK_LOCK_ASSERT(sc);
1897
1898 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1899
1900 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1901 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1902 m = cur_rx->sk_mbuf;
1903 cur_rx->sk_mbuf = NULL;
1904 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1905 SK_INC(i, SK_RX_RING_CNT);
1906
1907 if (rxstat & XM_RXSTAT_ERRFRAME) {
1908 ifp->if_ierrors++;
1909 sk_newbuf(sc_if, cur_rx, m);
1910 continue;
1911 }
1912
1913 /*
1914 * Try to allocate a new jumbo buffer. If that
1915 * fails, copy the packet to mbufs and put the
1916 * jumbo buffer back in the ring so it can be
1917 * re-used. If allocating mbufs fails, then we
1918 * have to drop the packet.
1919 */
1920 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1921 struct mbuf *m0;
1922 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1923 ifp, NULL);
1924 sk_newbuf(sc_if, cur_rx, m);
1925 if (m0 == NULL) {
1926 printf("sk%d: no receive buffers "
1927 "available -- packet dropped!\n",
1928 sc_if->sk_unit);
1929 ifp->if_ierrors++;
1930 continue;
1931 }
1932 m = m0;
1933 } else {
1934 m->m_pkthdr.rcvif = ifp;
1935 m->m_pkthdr.len = m->m_len = total_len;
1936 }
1937
1938 ifp->if_ipackets++;
1939 SK_UNLOCK(sc);
1940 (*ifp->if_input)(ifp, m);
1941 SK_LOCK(sc);
1942 }
1943
1944 sc_if->sk_cdata.sk_rx_prod = i;
1945
1946 return;
1947 }
1948
1949 static void
1950 sk_txeof(sc_if)
1951 struct sk_if_softc *sc_if;
1952 {
1953 struct sk_softc *sc;
1954 struct sk_tx_desc *cur_tx;
1955 struct ifnet *ifp;
1956 u_int32_t idx;
1957
1958 sc = sc_if->sk_softc;
1959 ifp = &sc_if->arpcom.ac_if;
1960
1961 /*
1962 * Go through our tx ring and free mbufs for those
1963 * frames that have been sent.
1964 */
1965 idx = sc_if->sk_cdata.sk_tx_cons;
1966 while(idx != sc_if->sk_cdata.sk_tx_prod) {
1967 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1968 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1969 break;
1970 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1971 ifp->if_opackets++;
1972 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1973 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1974 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1975 }
1976 sc_if->sk_cdata.sk_tx_cnt--;
1977 SK_INC(idx, SK_TX_RING_CNT);
1978 }
1979
1980 if (sc_if->sk_cdata.sk_tx_cnt == 0) {
1981 ifp->if_timer = 0;
1982 } else /* nudge chip to keep tx ring moving */
1983 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1984
1985 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1986 ifp->if_flags &= ~IFF_OACTIVE;
1987
1988 sc_if->sk_cdata.sk_tx_cons = idx;
1989 }
1990
1991 static void
1992 sk_tick(xsc_if)
1993 void *xsc_if;
1994 {
1995 struct sk_if_softc *sc_if;
1996 struct mii_data *mii;
1997 struct ifnet *ifp;
1998 int i;
1999
2000 sc_if = xsc_if;
2001 SK_IF_LOCK(sc_if);
2002 ifp = &sc_if->arpcom.ac_if;
2003 mii = device_get_softc(sc_if->sk_miibus);
2004
2005 if (!(ifp->if_flags & IFF_UP)) {
2006 SK_IF_UNLOCK(sc_if);
2007 return;
2008 }
2009
2010 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2011 sk_intr_bcom(sc_if);
2012 SK_IF_UNLOCK(sc_if);
2013 return;
2014 }
2015
2016 /*
2017 * According to SysKonnect, the correct way to verify that
2018 * the link has come back up is to poll bit 0 of the GPIO
2019 * register three times. This pin has the signal from the
2020 * link_sync pin connected to it; if we read the same link
2021 * state 3 times in a row, we know the link is up.
2022 */
2023 for (i = 0; i < 3; i++) {
2024 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2025 break;
2026 }
2027
2028 if (i != 3) {
2029 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2030 SK_IF_UNLOCK(sc_if);
2031 return;
2032 }
2033
2034 /* Turn the GP0 interrupt back on. */
2035 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2036 SK_XM_READ_2(sc_if, XM_ISR);
2037 mii_tick(mii);
2038 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2039
2040 SK_IF_UNLOCK(sc_if);
2041 return;
2042 }
2043
2044 static void
2045 sk_intr_bcom(sc_if)
2046 struct sk_if_softc *sc_if;
2047 {
2048 struct mii_data *mii;
2049 struct ifnet *ifp;
2050 int status;
2051 mii = device_get_softc(sc_if->sk_miibus);
2052 ifp = &sc_if->arpcom.ac_if;
2053
2054 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2055
2056 /*
2057 * Read the PHY interrupt register to make sure
2058 * we clear any pending interrupts.
2059 */
2060 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2061
2062 if (!(ifp->if_flags & IFF_RUNNING)) {
2063 sk_init_xmac(sc_if);
2064 return;
2065 }
2066
2067 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2068 int lstat;
2069 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2070 BRGPHY_MII_AUXSTS);
2071
2072 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2073 mii_mediachg(mii);
2074 /* Turn off the link LED. */
2075 SK_IF_WRITE_1(sc_if, 0,
2076 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2077 sc_if->sk_link = 0;
2078 } else if (status & BRGPHY_ISR_LNK_CHG) {
2079 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2080 BRGPHY_MII_IMR, 0xFF00);
2081 mii_tick(mii);
2082 sc_if->sk_link = 1;
2083 /* Turn on the link LED. */
2084 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2085 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2086 SK_LINKLED_BLINK_OFF);
2087 } else {
2088 mii_tick(mii);
2089 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2090 }
2091 }
2092
2093 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2094
2095 return;
2096 }
2097
2098 static void
2099 sk_intr_xmac(sc_if)
2100 struct sk_if_softc *sc_if;
2101 {
2102 struct sk_softc *sc;
2103 u_int16_t status;
2104
2105 sc = sc_if->sk_softc;
2106 status = SK_XM_READ_2(sc_if, XM_ISR);
2107
2108 /*
2109 * Link has gone down. Start MII tick timeout to
2110 * watch for link resync.
2111 */
2112 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2113 if (status & XM_ISR_GP0_SET) {
2114 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2115 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2116 }
2117
2118 if (status & XM_ISR_AUTONEG_DONE) {
2119 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2120 }
2121 }
2122
2123 if (status & XM_IMR_TX_UNDERRUN)
2124 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2125
2126 if (status & XM_IMR_RX_OVERRUN)
2127 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2128
2129 status = SK_XM_READ_2(sc_if, XM_ISR);
2130
2131 return;
2132 }
2133
2134 static void
2135 sk_intr_yukon(sc_if)
2136 struct sk_if_softc *sc_if;
2137 {
2138 int status;
2139
2140 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2141
2142 return;
2143 }
2144
2145 static void
2146 sk_intr(xsc)
2147 void *xsc;
2148 {
2149 struct sk_softc *sc = xsc;
2150 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
2151 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2152 u_int32_t status;
2153
2154 SK_LOCK(sc);
2155
2156 sc_if0 = sc->sk_if[SK_PORT_A];
2157 sc_if1 = sc->sk_if[SK_PORT_B];
2158
2159 if (sc_if0 != NULL)
2160 ifp0 = &sc_if0->arpcom.ac_if;
2161 if (sc_if1 != NULL)
2162 ifp1 = &sc_if1->arpcom.ac_if;
2163
2164 for (;;) {
2165 status = CSR_READ_4(sc, SK_ISSR);
2166 if (!(status & sc->sk_intrmask))
2167 break;
2168
2169 /* Handle receive interrupts first. */
2170 if (status & SK_ISR_RX1_EOF) {
2171 sk_rxeof(sc_if0);
2172 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2173 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2174 }
2175 if (status & SK_ISR_RX2_EOF) {
2176 sk_rxeof(sc_if1);
2177 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2178 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2179 }
2180
2181 /* Then transmit interrupts. */
2182 if (status & SK_ISR_TX1_S_EOF) {
2183 sk_txeof(sc_if0);
2184 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2185 SK_TXBMU_CLR_IRQ_EOF);
2186 }
2187 if (status & SK_ISR_TX2_S_EOF) {
2188 sk_txeof(sc_if1);
2189 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2190 SK_TXBMU_CLR_IRQ_EOF);
2191 }
2192
2193 /* Then MAC interrupts. */
2194 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2195 if (sc->sk_type == SK_GENESIS)
2196 sk_intr_xmac(sc_if0);
2197 else
2198 sk_intr_yukon(sc_if0);
2199 }
2200
2201 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2202 if (sc->sk_type == SK_GENESIS)
2203 sk_intr_xmac(sc_if1);
2204 else
2205 sk_intr_yukon(sc_if1);
2206 }
2207
2208 if (status & SK_ISR_EXTERNAL_REG) {
2209 if (ifp0 != NULL &&
2210 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2211 sk_intr_bcom(sc_if0);
2212 if (ifp1 != NULL &&
2213 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2214 sk_intr_bcom(sc_if1);
2215 }
2216 }
2217
2218 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2219
2220 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
2221 sk_start(ifp0);
2222 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
2223 sk_start(ifp1);
2224
2225 SK_UNLOCK(sc);
2226
2227 return;
2228 }
2229
2230 static void
2231 sk_init_xmac(sc_if)
2232 struct sk_if_softc *sc_if;
2233 {
2234 struct sk_softc *sc;
2235 struct ifnet *ifp;
2236 struct sk_bcom_hack bhack[] = {
2237 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2238 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2239 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2240 { 0, 0 } };
2241
2242 sc = sc_if->sk_softc;
2243 ifp = &sc_if->arpcom.ac_if;
2244
2245 /* Unreset the XMAC. */
2246 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2247 DELAY(1000);
2248
2249 /* Reset the XMAC's internal state. */
2250 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2251
2252 /* Save the XMAC II revision */
2253 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2254
2255 /*
2256 * Perform additional initialization for external PHYs,
2257 * namely for the 1000baseTX cards that use the XMAC's
2258 * GMII mode.
2259 */
2260 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2261 int i = 0;
2262 u_int32_t val;
2263
2264 /* Take PHY out of reset. */
2265 val = sk_win_read_4(sc, SK_GPIO);
2266 if (sc_if->sk_port == SK_PORT_A)
2267 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2268 else
2269 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2270 sk_win_write_4(sc, SK_GPIO, val);
2271
2272 /* Enable GMII mode on the XMAC. */
2273 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2274
2275 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2276 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2277 DELAY(10000);
2278 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2279 BRGPHY_MII_IMR, 0xFFF0);
2280
2281 /*
2282 * Early versions of the BCM5400 apparently have
2283 * a bug that requires them to have their reserved
2284 * registers initialized to some magic values. I don't
2285 * know what the numbers do, I'm just the messenger.
2286 */
2287 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2288 == 0x6041) {
2289 while(bhack[i].reg) {
2290 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2291 bhack[i].reg, bhack[i].val);
2292 i++;
2293 }
2294 }
2295 }
2296
2297 /* Set station address */
2298 SK_XM_WRITE_2(sc_if, XM_PAR0,
2299 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2300 SK_XM_WRITE_2(sc_if, XM_PAR1,
2301 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2302 SK_XM_WRITE_2(sc_if, XM_PAR2,
2303 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2304 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2305
2306 if (ifp->if_flags & IFF_BROADCAST) {
2307 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2308 } else {
2309 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2310 }
2311
2312 /* We don't need the FCS appended to the packet. */
2313 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2314
2315 /* We want short frames padded to 60 bytes. */
2316 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2317
2318 /*
2319 * Enable the reception of all error frames. This is is
2320 * a necessary evil due to the design of the XMAC. The
2321 * XMAC's receive FIFO is only 8K in size, however jumbo
2322 * frames can be up to 9000 bytes in length. When bad
2323 * frame filtering is enabled, the XMAC's RX FIFO operates
2324 * in 'store and forward' mode. For this to work, the
2325 * entire frame has to fit into the FIFO, but that means
2326 * that jumbo frames larger than 8192 bytes will be
2327 * truncated. Disabling all bad frame filtering causes
2328 * the RX FIFO to operate in streaming mode, in which
2329 * case the XMAC will start transfering frames out of the
2330 * RX FIFO as soon as the FIFO threshold is reached.
2331 */
2332 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2333 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2334 XM_MODE_RX_INRANGELEN);
2335
2336 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2337 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2338 else
2339 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2340
2341 /*
2342 * Bump up the transmit threshold. This helps hold off transmit
2343 * underruns when we're blasting traffic from both ports at once.
2344 */
2345 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2346
2347 /* Set promiscuous mode */
2348 sk_setpromisc(sc_if);
2349
2350 /* Set multicast filter */
2351 sk_setmulti(sc_if);
2352
2353 /* Clear and enable interrupts */
2354 SK_XM_READ_2(sc_if, XM_ISR);
2355 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2356 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2357 else
2358 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2359
2360 /* Configure MAC arbiter */
2361 switch(sc_if->sk_xmac_rev) {
2362 case XM_XMAC_REV_B2:
2363 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2364 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2365 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2366 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2367 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2368 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2369 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2370 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2371 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2372 break;
2373 case XM_XMAC_REV_C1:
2374 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2375 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2376 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2377 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2378 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2379 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2380 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2381 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2382 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2383 break;
2384 default:
2385 break;
2386 }
2387 sk_win_write_2(sc, SK_MACARB_CTL,
2388 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2389
2390 sc_if->sk_link = 1;
2391
2392 return;
2393 }
2394
2395 static void
2396 sk_init_yukon(sc_if)
2397 struct sk_if_softc *sc_if;
2398 {
2399 u_int32_t phy;
2400 u_int16_t reg;
2401 int i;
2402
2403 /* GMAC and GPHY Reset */
2404 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2405 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2406 DELAY(1000);
2407 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2408 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2409 DELAY(1000);
2410
2411 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2412 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2413
2414 switch(sc_if->sk_softc->sk_pmd) {
2415 case IFM_1000_SX:
2416 case IFM_1000_LX:
2417 phy |= SK_GPHY_FIBER;
2418 break;
2419
2420 case IFM_1000_CX:
2421 case IFM_1000_T:
2422 phy |= SK_GPHY_COPPER;
2423 break;
2424 }
2425
2426 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2427 DELAY(1000);
2428 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2429 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2430 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2431
2432 /* unused read of the interrupt source register */
2433 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2434
2435 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2436
2437 /* MIB Counter Clear Mode set */
2438 reg |= YU_PAR_MIB_CLR;
2439 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2440
2441 /* MIB Counter Clear Mode clear */
2442 reg &= ~YU_PAR_MIB_CLR;
2443 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2444
2445 /* receive control reg */
2446 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2447
2448 /* transmit parameter register */
2449 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2450 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2451
2452 /* serial mode register */
2453 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2454 YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e));
2455
2456 /* Setup Yukon's address */
2457 for (i = 0; i < 3; i++) {
2458 /* Write Source Address 1 (unicast filter) */
2459 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2460 sc_if->arpcom.ac_enaddr[i * 2] |
2461 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2462 }
2463
2464 for (i = 0; i < 3; i++) {
2465 reg = sk_win_read_2(sc_if->sk_softc,
2466 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2467 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2468 }
2469
2470 /* Set promiscuous mode */
2471 sk_setpromisc(sc_if);
2472
2473 /* Set multicast filter */
2474 sk_setmulti(sc_if);
2475
2476 /* enable interrupt mask for counter overflows */
2477 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2478 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2479 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2480
2481 /* Configure RX MAC FIFO */
2482 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2483 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2484
2485 /* Configure TX MAC FIFO */
2486 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2487 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2488 }
2489
2490 /*
2491 * Note that to properly initialize any part of the GEnesis chip,
2492 * you first have to take it out of reset mode.
2493 */
2494 static void
2495 sk_init(xsc)
2496 void *xsc;
2497 {
2498 struct sk_if_softc *sc_if = xsc;
2499 struct sk_softc *sc;
2500 struct ifnet *ifp;
2501 struct mii_data *mii;
2502 u_int16_t reg;
2503
2504 SK_IF_LOCK(sc_if);
2505
2506 ifp = &sc_if->arpcom.ac_if;
2507 sc = sc_if->sk_softc;
2508 mii = device_get_softc(sc_if->sk_miibus);
2509
2510 /* Cancel pending I/O and free all RX/TX buffers. */
2511 sk_stop(sc_if);
2512
2513 if (sc->sk_type == SK_GENESIS) {
2514 /* Configure LINK_SYNC LED */
2515 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2516 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2517 SK_LINKLED_LINKSYNC_ON);
2518
2519 /* Configure RX LED */
2520 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2521 SK_RXLEDCTL_COUNTER_START);
2522
2523 /* Configure TX LED */
2524 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2525 SK_TXLEDCTL_COUNTER_START);
2526 }
2527
2528 /* Configure I2C registers */
2529
2530 /* Configure XMAC(s) */
2531 switch (sc->sk_type) {
2532 case SK_GENESIS:
2533 sk_init_xmac(sc_if);
2534 break;
2535 case SK_YUKON:
2536 sk_init_yukon(sc_if);
2537 break;
2538 }
2539 mii_mediachg(mii);
2540
2541 if (sc->sk_type == SK_GENESIS) {
2542 /* Configure MAC FIFOs */
2543 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2544 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2545 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2546
2547 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2548 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2549 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2550 }
2551
2552 /* Configure transmit arbiter(s) */
2553 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2554 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2555
2556 /* Configure RAMbuffers */
2557 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2558 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2559 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2560 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2561 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2562 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2563
2564 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2565 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2566 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2567 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2568 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2569 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2570 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2571
2572 /* Configure BMUs */
2573 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2574 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2575 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2576 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2577
2578 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2579 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2580 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2581 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2582
2583 /* Init descriptors */
2584 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2585 printf("sk%d: initialization failed: no "
2586 "memory for rx buffers\n", sc_if->sk_unit);
2587 sk_stop(sc_if);
2588 SK_IF_UNLOCK(sc_if);
2589 return;
2590 }
2591 sk_init_tx_ring(sc_if);
2592
2593 /* Configure interrupt handling */
2594 CSR_READ_4(sc, SK_ISSR);
2595 if (sc_if->sk_port == SK_PORT_A)
2596 sc->sk_intrmask |= SK_INTRS1;
2597 else
2598 sc->sk_intrmask |= SK_INTRS2;
2599
2600 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2601
2602 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2603
2604 /* Start BMUs. */
2605 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2606
2607 switch(sc->sk_type) {
2608 case SK_GENESIS:
2609 /* Enable XMACs TX and RX state machines */
2610 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2611 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2612 break;
2613 case SK_YUKON:
2614 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2615 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2616 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2617 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2618 }
2619
2620 ifp->if_flags |= IFF_RUNNING;
2621 ifp->if_flags &= ~IFF_OACTIVE;
2622
2623 SK_IF_UNLOCK(sc_if);
2624
2625 return;
2626 }
2627
2628 static void
2629 sk_stop(sc_if)
2630 struct sk_if_softc *sc_if;
2631 {
2632 int i;
2633 struct sk_softc *sc;
2634 struct ifnet *ifp;
2635
2636 SK_IF_LOCK(sc_if);
2637 sc = sc_if->sk_softc;
2638 ifp = &sc_if->arpcom.ac_if;
2639
2640 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2641
2642 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2643 u_int32_t val;
2644
2645 /* Put PHY back into reset. */
2646 val = sk_win_read_4(sc, SK_GPIO);
2647 if (sc_if->sk_port == SK_PORT_A) {
2648 val |= SK_GPIO_DIR0;
2649 val &= ~SK_GPIO_DAT0;
2650 } else {
2651 val |= SK_GPIO_DIR2;
2652 val &= ~SK_GPIO_DAT2;
2653 }
2654 sk_win_write_4(sc, SK_GPIO, val);
2655 }
2656
2657 /* Turn off various components of this interface. */
2658 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2659 switch (sc->sk_type) {
2660 case SK_GENESIS:
2661 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2662 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2663 break;
2664 case SK_YUKON:
2665 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2666 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2667 break;
2668 }
2669 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2670 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2671 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2672 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2673 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2674 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2675 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2676 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2677 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2678
2679 /* Disable interrupts */
2680 if (sc_if->sk_port == SK_PORT_A)
2681 sc->sk_intrmask &= ~SK_INTRS1;
2682 else
2683 sc->sk_intrmask &= ~SK_INTRS2;
2684 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2685
2686 SK_XM_READ_2(sc_if, XM_ISR);
2687 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2688
2689 /* Free RX and TX mbufs still in the queues. */
2690 for (i = 0; i < SK_RX_RING_CNT; i++) {
2691 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2692 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2693 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2694 }
2695 }
2696
2697 for (i = 0; i < SK_TX_RING_CNT; i++) {
2698 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2699 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2700 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2701 }
2702 }
2703
2704 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2705 SK_IF_UNLOCK(sc_if);
2706 return;
2707 }
Cache object: 7f1368e921c45af03cd6ad8626bac83d
|