FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 /*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/5.4/sys/pci/if_sk.c 144237 2005-03-28 16:21:16Z bz $");
52
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71 /*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <vm/vm.h> /* for vtophys */
107 #include <vm/pmap.h> /* for vtophys */
108 #include <machine/bus_pio.h>
109 #include <machine/bus_memio.h>
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133
134 /* "controller miibus0" required. See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136
137 #ifndef lint
138 static const char rcsid[] =
139 "$FreeBSD: releng/5.4/sys/pci/if_sk.c 144237 2005-03-28 16:21:16Z bz $";
140 #endif
141
142 static struct sk_type sk_devs[] = {
143 {
144 VENDORID_SK,
145 DEVICEID_SK_V1,
146 "SysKonnect Gigabit Ethernet (V1.0)"
147 },
148 {
149 VENDORID_SK,
150 DEVICEID_SK_V2,
151 "SysKonnect Gigabit Ethernet (V2.0)"
152 },
153 {
154 VENDORID_MARVELL,
155 DEVICEID_SK_V2,
156 "Marvell Gigabit Ethernet"
157 },
158 {
159 VENDORID_MARVELL,
160 DEVICEID_BELKIN_5005,
161 "Belkin F5D5005 Gigabit Ethernet"
162 },
163 {
164 VENDORID_3COM,
165 DEVICEID_3COM_3C940,
166 "3Com 3C940 Gigabit Ethernet"
167 },
168 {
169 VENDORID_LINKSYS,
170 DEVICEID_LINKSYS_EG1032,
171 "Linksys EG1032 Gigabit Ethernet"
172 },
173 {
174 VENDORID_DLINK,
175 DEVICEID_DLINK_DGE530T,
176 "D-Link DGE-530T Gigabit Ethernet"
177 },
178 { 0, 0, NULL }
179 };
180
181 static int skc_probe(device_t);
182 static int skc_attach(device_t);
183 static int skc_detach(device_t);
184 static void skc_shutdown(device_t);
185 static int sk_detach(device_t);
186 static int sk_probe(device_t);
187 static int sk_attach(device_t);
188 static void sk_tick(void *);
189 static void sk_intr(void *);
190 static void sk_intr_xmac(struct sk_if_softc *);
191 static void sk_intr_bcom(struct sk_if_softc *);
192 static void sk_intr_yukon(struct sk_if_softc *);
193 static void sk_rxeof(struct sk_if_softc *);
194 static void sk_txeof(struct sk_if_softc *);
195 static int sk_encap(struct sk_if_softc *, struct mbuf *,
196 u_int32_t *);
197 static void sk_start(struct ifnet *);
198 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
199 static void sk_init(void *);
200 static void sk_init_xmac(struct sk_if_softc *);
201 static void sk_init_yukon(struct sk_if_softc *);
202 static void sk_stop(struct sk_if_softc *);
203 static void sk_watchdog(struct ifnet *);
204 static int sk_ifmedia_upd(struct ifnet *);
205 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206 static void sk_reset(struct sk_softc *);
207 static int sk_newbuf(struct sk_if_softc *,
208 struct sk_chain *, struct mbuf *);
209 static int sk_alloc_jumbo_mem(struct sk_if_softc *);
210 static void sk_free_jumbo_mem(struct sk_if_softc *);
211 static void *sk_jalloc(struct sk_if_softc *);
212 static void sk_jfree(void *, void *);
213 static int sk_init_rx_ring(struct sk_if_softc *);
214 static void sk_init_tx_ring(struct sk_if_softc *);
215 static u_int32_t sk_win_read_4(struct sk_softc *, int);
216 static u_int16_t sk_win_read_2(struct sk_softc *, int);
217 static u_int8_t sk_win_read_1(struct sk_softc *, int);
218 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
219 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
220 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
221 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
222 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
223 static void sk_vpd_read(struct sk_softc *);
224
225 static int sk_miibus_readreg(device_t, int, int);
226 static int sk_miibus_writereg(device_t, int, int, int);
227 static void sk_miibus_statchg(device_t);
228
229 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
230 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
231 int);
232 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
233
234 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
235 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
236 int);
237 static void sk_marv_miibus_statchg(struct sk_if_softc *);
238
239 static uint32_t sk_xmchash(const uint8_t *);
240 static uint32_t sk_gmchash(const uint8_t *);
241 static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
242 static void sk_setmulti(struct sk_if_softc *);
243 static void sk_setpromisc(struct sk_if_softc *);
244
245 #ifdef SK_USEIOSPACE
246 #define SK_RES SYS_RES_IOPORT
247 #define SK_RID SK_PCI_LOIO
248 #else
249 #define SK_RES SYS_RES_MEMORY
250 #define SK_RID SK_PCI_LOMEM
251 #endif
252
253 /*
254 * Note that we have newbus methods for both the GEnesis controller
255 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
256 * the miibus code is a child of the XMACs. We need to do it this way
257 * so that the miibus drivers can access the PHY registers on the
258 * right PHY. It's not quite what I had in mind, but it's the only
259 * design that achieves the desired effect.
260 */
261 static device_method_t skc_methods[] = {
262 /* Device interface */
263 DEVMETHOD(device_probe, skc_probe),
264 DEVMETHOD(device_attach, skc_attach),
265 DEVMETHOD(device_detach, skc_detach),
266 DEVMETHOD(device_shutdown, skc_shutdown),
267
268 /* bus interface */
269 DEVMETHOD(bus_print_child, bus_generic_print_child),
270 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
271
272 { 0, 0 }
273 };
274
275 static driver_t skc_driver = {
276 "skc",
277 skc_methods,
278 sizeof(struct sk_softc)
279 };
280
281 static devclass_t skc_devclass;
282
283 static device_method_t sk_methods[] = {
284 /* Device interface */
285 DEVMETHOD(device_probe, sk_probe),
286 DEVMETHOD(device_attach, sk_attach),
287 DEVMETHOD(device_detach, sk_detach),
288 DEVMETHOD(device_shutdown, bus_generic_shutdown),
289
290 /* bus interface */
291 DEVMETHOD(bus_print_child, bus_generic_print_child),
292 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
293
294 /* MII interface */
295 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
296 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
297 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
298
299 { 0, 0 }
300 };
301
302 static driver_t sk_driver = {
303 "sk",
304 sk_methods,
305 sizeof(struct sk_if_softc)
306 };
307
308 static devclass_t sk_devclass;
309
310 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
311 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
312 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
313
314 #define SK_SETBIT(sc, reg, x) \
315 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
316
317 #define SK_CLRBIT(sc, reg, x) \
318 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
319
320 #define SK_WIN_SETBIT_4(sc, reg, x) \
321 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
322
323 #define SK_WIN_CLRBIT_4(sc, reg, x) \
324 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
325
326 #define SK_WIN_SETBIT_2(sc, reg, x) \
327 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
328
329 #define SK_WIN_CLRBIT_2(sc, reg, x) \
330 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
331
332 static u_int32_t
333 sk_win_read_4(sc, reg)
334 struct sk_softc *sc;
335 int reg;
336 {
337 #ifdef SK_USEIOSPACE
338 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
339 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
340 #else
341 return(CSR_READ_4(sc, reg));
342 #endif
343 }
344
345 static u_int16_t
346 sk_win_read_2(sc, reg)
347 struct sk_softc *sc;
348 int reg;
349 {
350 #ifdef SK_USEIOSPACE
351 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
352 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
353 #else
354 return(CSR_READ_2(sc, reg));
355 #endif
356 }
357
358 static u_int8_t
359 sk_win_read_1(sc, reg)
360 struct sk_softc *sc;
361 int reg;
362 {
363 #ifdef SK_USEIOSPACE
364 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
365 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
366 #else
367 return(CSR_READ_1(sc, reg));
368 #endif
369 }
370
371 static void
372 sk_win_write_4(sc, reg, val)
373 struct sk_softc *sc;
374 int reg;
375 u_int32_t val;
376 {
377 #ifdef SK_USEIOSPACE
378 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
379 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
380 #else
381 CSR_WRITE_4(sc, reg, val);
382 #endif
383 return;
384 }
385
386 static void
387 sk_win_write_2(sc, reg, val)
388 struct sk_softc *sc;
389 int reg;
390 u_int32_t val;
391 {
392 #ifdef SK_USEIOSPACE
393 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
394 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
395 #else
396 CSR_WRITE_2(sc, reg, val);
397 #endif
398 return;
399 }
400
401 static void
402 sk_win_write_1(sc, reg, val)
403 struct sk_softc *sc;
404 int reg;
405 u_int32_t val;
406 {
407 #ifdef SK_USEIOSPACE
408 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
409 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
410 #else
411 CSR_WRITE_1(sc, reg, val);
412 #endif
413 return;
414 }
415
416 /*
417 * The VPD EEPROM contains Vital Product Data, as suggested in
418 * the PCI 2.1 specification. The VPD data is separared into areas
419 * denoted by resource IDs. The SysKonnect VPD contains an ID string
420 * resource (the name of the adapter), a read-only area resource
421 * containing various key/data fields and a read/write area which
422 * can be used to store asset management information or log messages.
423 * We read the ID string and read-only into buffers attached to
424 * the controller softc structure for later use. At the moment,
425 * we only use the ID string during skc_attach().
426 */
427 static u_int8_t
428 sk_vpd_readbyte(sc, addr)
429 struct sk_softc *sc;
430 int addr;
431 {
432 int i;
433
434 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
435 for (i = 0; i < SK_TIMEOUT; i++) {
436 DELAY(1);
437 if (sk_win_read_2(sc,
438 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
439 break;
440 }
441
442 if (i == SK_TIMEOUT)
443 return(0);
444
445 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
446 }
447
448 static void
449 sk_vpd_read_res(sc, res, addr)
450 struct sk_softc *sc;
451 struct vpd_res *res;
452 int addr;
453 {
454 int i;
455 u_int8_t *ptr;
456
457 ptr = (u_int8_t *)res;
458 for (i = 0; i < sizeof(struct vpd_res); i++)
459 ptr[i] = sk_vpd_readbyte(sc, i + addr);
460
461 return;
462 }
463
464 static void
465 sk_vpd_read(sc)
466 struct sk_softc *sc;
467 {
468 int pos = 0, i;
469 struct vpd_res res;
470
471 if (sc->sk_vpd_prodname != NULL)
472 free(sc->sk_vpd_prodname, M_DEVBUF);
473 if (sc->sk_vpd_readonly != NULL)
474 free(sc->sk_vpd_readonly, M_DEVBUF);
475 sc->sk_vpd_prodname = NULL;
476 sc->sk_vpd_readonly = NULL;
477
478 sk_vpd_read_res(sc, &res, pos);
479
480 /*
481 * Bail out quietly if the eeprom appears to be missing or empty.
482 */
483 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
484 return;
485
486 if (res.vr_id != VPD_RES_ID) {
487 printf("skc%d: bad VPD resource id: expected %x got %x\n",
488 sc->sk_unit, VPD_RES_ID, res.vr_id);
489 return;
490 }
491
492 pos += sizeof(res);
493 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
494 for (i = 0; i < res.vr_len; i++)
495 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
496 sc->sk_vpd_prodname[i] = '\0';
497 pos += i;
498
499 sk_vpd_read_res(sc, &res, pos);
500
501 if (res.vr_id != VPD_RES_READ) {
502 printf("skc%d: bad VPD resource id: expected %x got %x\n",
503 sc->sk_unit, VPD_RES_READ, res.vr_id);
504 return;
505 }
506
507 pos += sizeof(res);
508 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
509 for (i = 0; i < res.vr_len; i++)
510 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
511
512 return;
513 }
514
515 static int
516 sk_miibus_readreg(dev, phy, reg)
517 device_t dev;
518 int phy, reg;
519 {
520 struct sk_if_softc *sc_if;
521
522 sc_if = device_get_softc(dev);
523
524 switch(sc_if->sk_softc->sk_type) {
525 case SK_GENESIS:
526 return(sk_xmac_miibus_readreg(sc_if, phy, reg));
527 case SK_YUKON:
528 case SK_YUKON_LITE:
529 case SK_YUKON_LP:
530 return(sk_marv_miibus_readreg(sc_if, phy, reg));
531 }
532
533 return(0);
534 }
535
536 static int
537 sk_miibus_writereg(dev, phy, reg, val)
538 device_t dev;
539 int phy, reg, val;
540 {
541 struct sk_if_softc *sc_if;
542
543 sc_if = device_get_softc(dev);
544
545 switch(sc_if->sk_softc->sk_type) {
546 case SK_GENESIS:
547 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
548 case SK_YUKON:
549 case SK_YUKON_LITE:
550 case SK_YUKON_LP:
551 return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
552 }
553
554 return(0);
555 }
556
557 static void
558 sk_miibus_statchg(dev)
559 device_t dev;
560 {
561 struct sk_if_softc *sc_if;
562
563 sc_if = device_get_softc(dev);
564
565 switch(sc_if->sk_softc->sk_type) {
566 case SK_GENESIS:
567 sk_xmac_miibus_statchg(sc_if);
568 break;
569 case SK_YUKON:
570 case SK_YUKON_LITE:
571 case SK_YUKON_LP:
572 sk_marv_miibus_statchg(sc_if);
573 break;
574 }
575
576 return;
577 }
578
579 static int
580 sk_xmac_miibus_readreg(sc_if, phy, reg)
581 struct sk_if_softc *sc_if;
582 int phy, reg;
583 {
584 int i;
585
586 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
587 return(0);
588
589 SK_IF_LOCK(sc_if);
590 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
591 SK_XM_READ_2(sc_if, XM_PHY_DATA);
592 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
593 for (i = 0; i < SK_TIMEOUT; i++) {
594 DELAY(1);
595 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
596 XM_MMUCMD_PHYDATARDY)
597 break;
598 }
599
600 if (i == SK_TIMEOUT) {
601 printf("sk%d: phy failed to come ready\n",
602 sc_if->sk_unit);
603 SK_IF_UNLOCK(sc_if);
604 return(0);
605 }
606 }
607 DELAY(1);
608 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
609 SK_IF_UNLOCK(sc_if);
610 return(i);
611 }
612
613 static int
614 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
615 struct sk_if_softc *sc_if;
616 int phy, reg, val;
617 {
618 int i;
619
620 SK_IF_LOCK(sc_if);
621 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
622 for (i = 0; i < SK_TIMEOUT; i++) {
623 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
624 break;
625 }
626
627 if (i == SK_TIMEOUT) {
628 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
629 SK_IF_UNLOCK(sc_if);
630 return(ETIMEDOUT);
631 }
632
633 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
634 for (i = 0; i < SK_TIMEOUT; i++) {
635 DELAY(1);
636 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
637 break;
638 }
639 SK_IF_UNLOCK(sc_if);
640 if (i == SK_TIMEOUT)
641 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
642
643 return(0);
644 }
645
646 static void
647 sk_xmac_miibus_statchg(sc_if)
648 struct sk_if_softc *sc_if;
649 {
650 struct mii_data *mii;
651
652 mii = device_get_softc(sc_if->sk_miibus);
653
654 SK_IF_LOCK(sc_if);
655 /*
656 * If this is a GMII PHY, manually set the XMAC's
657 * duplex mode accordingly.
658 */
659 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
660 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
661 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
662 } else {
663 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
664 }
665 }
666 SK_IF_UNLOCK(sc_if);
667
668 return;
669 }
670
671 static int
672 sk_marv_miibus_readreg(sc_if, phy, reg)
673 struct sk_if_softc *sc_if;
674 int phy, reg;
675 {
676 u_int16_t val;
677 int i;
678
679 if (phy != 0 ||
680 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
681 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
682 return(0);
683 }
684
685 SK_IF_LOCK(sc_if);
686 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
687 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
688
689 for (i = 0; i < SK_TIMEOUT; i++) {
690 DELAY(1);
691 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
692 if (val & YU_SMICR_READ_VALID)
693 break;
694 }
695
696 if (i == SK_TIMEOUT) {
697 printf("sk%d: phy failed to come ready\n",
698 sc_if->sk_unit);
699 SK_IF_UNLOCK(sc_if);
700 return(0);
701 }
702
703 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
704 SK_IF_UNLOCK(sc_if);
705
706 return(val);
707 }
708
709 static int
710 sk_marv_miibus_writereg(sc_if, phy, reg, val)
711 struct sk_if_softc *sc_if;
712 int phy, reg, val;
713 {
714 int i;
715
716 SK_IF_LOCK(sc_if);
717 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
718 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
719 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
720
721 for (i = 0; i < SK_TIMEOUT; i++) {
722 DELAY(1);
723 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
724 break;
725 }
726 SK_IF_UNLOCK(sc_if);
727
728 return(0);
729 }
730
731 static void
732 sk_marv_miibus_statchg(sc_if)
733 struct sk_if_softc *sc_if;
734 {
735 return;
736 }
737
738 #define HASH_BITS 6
739
740 static u_int32_t
741 sk_xmchash(addr)
742 const uint8_t *addr;
743 {
744 uint32_t crc;
745
746 /* Compute CRC for the address value. */
747 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
748
749 return (~crc & ((1 << HASH_BITS) - 1));
750 }
751
752 /* gmchash is just a big endian crc */
753 static u_int32_t
754 sk_gmchash(addr)
755 const uint8_t *addr;
756 {
757 uint32_t crc;
758
759 /* Compute CRC for the address value. */
760 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
761
762 return (crc & ((1 << HASH_BITS) - 1));
763 }
764
765 static void
766 sk_setfilt(sc_if, addr, slot)
767 struct sk_if_softc *sc_if;
768 caddr_t addr;
769 int slot;
770 {
771 int base;
772
773 base = XM_RXFILT_ENTRY(slot);
774
775 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
776 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
777 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
778
779 return;
780 }
781
782 static void
783 sk_setmulti(sc_if)
784 struct sk_if_softc *sc_if;
785 {
786 struct sk_softc *sc = sc_if->sk_softc;
787 struct ifnet *ifp = &sc_if->arpcom.ac_if;
788 u_int32_t hashes[2] = { 0, 0 };
789 int h = 0, i;
790 struct ifmultiaddr *ifma;
791 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
792
793
794 /* First, zot all the existing filters. */
795 switch(sc->sk_type) {
796 case SK_GENESIS:
797 for (i = 1; i < XM_RXFILT_MAX; i++)
798 sk_setfilt(sc_if, (caddr_t)&dummy, i);
799
800 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
801 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
802 break;
803 case SK_YUKON:
804 case SK_YUKON_LITE:
805 case SK_YUKON_LP:
806 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
807 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
808 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
809 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
810 break;
811 }
812
813 /* Now program new ones. */
814 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
815 hashes[0] = 0xFFFFFFFF;
816 hashes[1] = 0xFFFFFFFF;
817 } else {
818 i = 1;
819 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
820 if (ifma->ifma_addr->sa_family != AF_LINK)
821 continue;
822 /*
823 * Program the first XM_RXFILT_MAX multicast groups
824 * into the perfect filter. For all others,
825 * use the hash table.
826 */
827 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
828 sk_setfilt(sc_if,
829 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
830 i++;
831 continue;
832 }
833
834 switch(sc->sk_type) {
835 case SK_GENESIS:
836 h = sk_xmchash(
837 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
838 break;
839 case SK_YUKON:
840 case SK_YUKON_LITE:
841 case SK_YUKON_LP:
842 h = sk_gmchash(
843 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
844 break;
845 }
846 if (h < 32)
847 hashes[0] |= (1 << h);
848 else
849 hashes[1] |= (1 << (h - 32));
850 }
851 }
852
853 switch(sc->sk_type) {
854 case SK_GENESIS:
855 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
856 XM_MODE_RX_USE_PERFECT);
857 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
858 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
859 break;
860 case SK_YUKON:
861 case SK_YUKON_LITE:
862 case SK_YUKON_LP:
863 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
864 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
865 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
866 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
867 break;
868 }
869
870 return;
871 }
872
873 static void
874 sk_setpromisc(sc_if)
875 struct sk_if_softc *sc_if;
876 {
877 struct sk_softc *sc = sc_if->sk_softc;
878 struct ifnet *ifp = &sc_if->arpcom.ac_if;
879
880 switch(sc->sk_type) {
881 case SK_GENESIS:
882 if (ifp->if_flags & IFF_PROMISC) {
883 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
884 } else {
885 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
886 }
887 break;
888 case SK_YUKON:
889 case SK_YUKON_LITE:
890 case SK_YUKON_LP:
891 if (ifp->if_flags & IFF_PROMISC) {
892 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
893 YU_RCR_UFLEN | YU_RCR_MUFLEN);
894 } else {
895 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
896 YU_RCR_UFLEN | YU_RCR_MUFLEN);
897 }
898 break;
899 }
900
901 return;
902 }
903
904 static int
905 sk_init_rx_ring(sc_if)
906 struct sk_if_softc *sc_if;
907 {
908 struct sk_chain_data *cd = &sc_if->sk_cdata;
909 struct sk_ring_data *rd = sc_if->sk_rdata;
910 int i;
911
912 bzero((char *)rd->sk_rx_ring,
913 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
914
915 for (i = 0; i < SK_RX_RING_CNT; i++) {
916 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
917 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
918 return(ENOBUFS);
919 if (i == (SK_RX_RING_CNT - 1)) {
920 cd->sk_rx_chain[i].sk_next =
921 &cd->sk_rx_chain[0];
922 rd->sk_rx_ring[i].sk_next =
923 vtophys(&rd->sk_rx_ring[0]);
924 } else {
925 cd->sk_rx_chain[i].sk_next =
926 &cd->sk_rx_chain[i + 1];
927 rd->sk_rx_ring[i].sk_next =
928 vtophys(&rd->sk_rx_ring[i + 1]);
929 }
930 }
931
932 sc_if->sk_cdata.sk_rx_prod = 0;
933 sc_if->sk_cdata.sk_rx_cons = 0;
934
935 return(0);
936 }
937
938 static void
939 sk_init_tx_ring(sc_if)
940 struct sk_if_softc *sc_if;
941 {
942 struct sk_chain_data *cd = &sc_if->sk_cdata;
943 struct sk_ring_data *rd = sc_if->sk_rdata;
944 int i;
945
946 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
947 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
948
949 for (i = 0; i < SK_TX_RING_CNT; i++) {
950 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
951 if (i == (SK_TX_RING_CNT - 1)) {
952 cd->sk_tx_chain[i].sk_next =
953 &cd->sk_tx_chain[0];
954 rd->sk_tx_ring[i].sk_next =
955 vtophys(&rd->sk_tx_ring[0]);
956 } else {
957 cd->sk_tx_chain[i].sk_next =
958 &cd->sk_tx_chain[i + 1];
959 rd->sk_tx_ring[i].sk_next =
960 vtophys(&rd->sk_tx_ring[i + 1]);
961 }
962 }
963
964 sc_if->sk_cdata.sk_tx_prod = 0;
965 sc_if->sk_cdata.sk_tx_cons = 0;
966 sc_if->sk_cdata.sk_tx_cnt = 0;
967
968 return;
969 }
970
971 static int
972 sk_newbuf(sc_if, c, m)
973 struct sk_if_softc *sc_if;
974 struct sk_chain *c;
975 struct mbuf *m;
976 {
977 struct mbuf *m_new = NULL;
978 struct sk_rx_desc *r;
979
980 if (m == NULL) {
981 caddr_t *buf = NULL;
982
983 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
984 if (m_new == NULL)
985 return(ENOBUFS);
986
987 /* Allocate the jumbo buffer */
988 buf = sk_jalloc(sc_if);
989 if (buf == NULL) {
990 m_freem(m_new);
991 #ifdef SK_VERBOSE
992 printf("sk%d: jumbo allocation failed "
993 "-- packet dropped!\n", sc_if->sk_unit);
994 #endif
995 return(ENOBUFS);
996 }
997
998 /* Attach the buffer to the mbuf */
999 MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1000 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1001 m_new->m_data = (void *)buf;
1002 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1003 } else {
1004 /*
1005 * We're re-using a previously allocated mbuf;
1006 * be sure to re-init pointers and lengths to
1007 * default values.
1008 */
1009 m_new = m;
1010 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1011 m_new->m_data = m_new->m_ext.ext_buf;
1012 }
1013
1014 /*
1015 * Adjust alignment so packet payload begins on a
1016 * longword boundary. Mandatory for Alpha, useful on
1017 * x86 too.
1018 */
1019 m_adj(m_new, ETHER_ALIGN);
1020
1021 r = c->sk_desc;
1022 c->sk_mbuf = m_new;
1023 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1024 r->sk_ctl = m_new->m_len | SK_RXSTAT;
1025
1026 return(0);
1027 }
1028
1029 /*
1030 * Allocate jumbo buffer storage. The SysKonnect adapters support
1031 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1032 * use them in their drivers. In order for us to use them, we need
1033 * large 9K receive buffers, however standard mbuf clusters are only
1034 * 2048 bytes in size. Consequently, we need to allocate and manage
1035 * our own jumbo buffer pool. Fortunately, this does not require an
1036 * excessive amount of additional code.
1037 */
1038 static int
1039 sk_alloc_jumbo_mem(sc_if)
1040 struct sk_if_softc *sc_if;
1041 {
1042 caddr_t ptr;
1043 register int i;
1044 struct sk_jpool_entry *entry;
1045
1046 /* Grab a big chunk o' storage. */
1047 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1048 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1049
1050 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1051 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1052 return(ENOBUFS);
1053 }
1054
1055 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1056
1057 SLIST_INIT(&sc_if->sk_jfree_listhead);
1058 SLIST_INIT(&sc_if->sk_jinuse_listhead);
1059
1060 /*
1061 * Now divide it up into 9K pieces and save the addresses
1062 * in an array.
1063 */
1064 ptr = sc_if->sk_cdata.sk_jumbo_buf;
1065 for (i = 0; i < SK_JSLOTS; i++) {
1066 sc_if->sk_cdata.sk_jslots[i] = ptr;
1067 ptr += SK_JLEN;
1068 entry = malloc(sizeof(struct sk_jpool_entry),
1069 M_DEVBUF, M_NOWAIT);
1070 if (entry == NULL) {
1071 sk_free_jumbo_mem(sc_if);
1072 sc_if->sk_cdata.sk_jumbo_buf = NULL;
1073 printf("sk%d: no memory for jumbo "
1074 "buffer queue!\n", sc_if->sk_unit);
1075 return(ENOBUFS);
1076 }
1077 entry->slot = i;
1078 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1079 entry, jpool_entries);
1080 }
1081
1082 return(0);
1083 }
1084
1085 static void
1086 sk_free_jumbo_mem(sc_if)
1087 struct sk_if_softc *sc_if;
1088 {
1089 int retval = 0;
1090 struct sk_jpool_entry *entry;
1091
1092 SK_JLIST_LOCK(sc_if);
1093
1094 /* Wait for the "inuse" list to drain. */
1095 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1096 retval = msleep(sc_if, &sc_if->sk_jlist_mtx, PZERO,
1097 "skfjm", 5 * hz);
1098
1099 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1100 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1101 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1102 free(entry, M_DEVBUF);
1103 }
1104
1105 SK_JLIST_UNLOCK(sc_if);
1106
1107 mtx_destroy(&sc_if->sk_jlist_mtx);
1108
1109 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1110
1111 return;
1112 }
1113
1114 /*
1115 * Allocate a jumbo buffer.
1116 */
1117 static void *
1118 sk_jalloc(sc_if)
1119 struct sk_if_softc *sc_if;
1120 {
1121 struct sk_jpool_entry *entry;
1122
1123 SK_JLIST_LOCK(sc_if);
1124
1125 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1126
1127 if (entry == NULL) {
1128 #ifdef SK_VERBOSE
1129 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1130 #endif
1131 SK_JLIST_UNLOCK(sc_if);
1132 return(NULL);
1133 }
1134
1135 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1136 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1137
1138 SK_JLIST_UNLOCK(sc_if);
1139
1140 return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1141 }
1142
1143 /*
1144 * Release a jumbo buffer.
1145 */
1146 static void
1147 sk_jfree(buf, args)
1148 void *buf;
1149 void *args;
1150 {
1151 struct sk_if_softc *sc_if;
1152 int i;
1153 struct sk_jpool_entry *entry;
1154
1155 /* Extract the softc struct pointer. */
1156 sc_if = (struct sk_if_softc *)args;
1157 if (sc_if == NULL)
1158 panic("sk_jfree: didn't get softc pointer!");
1159
1160 SK_JLIST_LOCK(sc_if);
1161
1162 /* calculate the slot this buffer belongs to */
1163 i = ((vm_offset_t)buf
1164 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1165
1166 if ((i < 0) || (i >= SK_JSLOTS))
1167 panic("sk_jfree: asked to free buffer that we don't manage!");
1168
1169 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1170 if (entry == NULL)
1171 panic("sk_jfree: buffer not in use!");
1172 entry->slot = i;
1173 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1174 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1175 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1176 wakeup(sc_if);
1177
1178 SK_JLIST_UNLOCK(sc_if);
1179 return;
1180 }
1181
1182 /*
1183 * Set media options.
1184 */
1185 static int
1186 sk_ifmedia_upd(ifp)
1187 struct ifnet *ifp;
1188 {
1189 struct sk_if_softc *sc_if = ifp->if_softc;
1190 struct mii_data *mii;
1191
1192 mii = device_get_softc(sc_if->sk_miibus);
1193 sk_init(sc_if);
1194 mii_mediachg(mii);
1195
1196 return(0);
1197 }
1198
1199 /*
1200 * Report current media status.
1201 */
1202 static void
1203 sk_ifmedia_sts(ifp, ifmr)
1204 struct ifnet *ifp;
1205 struct ifmediareq *ifmr;
1206 {
1207 struct sk_if_softc *sc_if;
1208 struct mii_data *mii;
1209
1210 sc_if = ifp->if_softc;
1211 mii = device_get_softc(sc_if->sk_miibus);
1212
1213 mii_pollstat(mii);
1214 ifmr->ifm_active = mii->mii_media_active;
1215 ifmr->ifm_status = mii->mii_media_status;
1216
1217 return;
1218 }
1219
1220 static int
1221 sk_ioctl(ifp, command, data)
1222 struct ifnet *ifp;
1223 u_long command;
1224 caddr_t data;
1225 {
1226 struct sk_if_softc *sc_if = ifp->if_softc;
1227 struct ifreq *ifr = (struct ifreq *) data;
1228 int error = 0;
1229 struct mii_data *mii;
1230
1231 switch(command) {
1232 case SIOCSIFMTU:
1233 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1234 error = EINVAL;
1235 else {
1236 ifp->if_mtu = ifr->ifr_mtu;
1237 ifp->if_flags &= ~IFF_RUNNING;
1238 sk_init(sc_if);
1239 }
1240 break;
1241 case SIOCSIFFLAGS:
1242 SK_IF_LOCK(sc_if);
1243 if (ifp->if_flags & IFF_UP) {
1244 if (ifp->if_flags & IFF_RUNNING) {
1245 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1246 & IFF_PROMISC) {
1247 sk_setpromisc(sc_if);
1248 sk_setmulti(sc_if);
1249 }
1250 } else
1251 sk_init(sc_if);
1252 } else {
1253 if (ifp->if_flags & IFF_RUNNING)
1254 sk_stop(sc_if);
1255 }
1256 sc_if->sk_if_flags = ifp->if_flags;
1257 SK_IF_UNLOCK(sc_if);
1258 error = 0;
1259 break;
1260 case SIOCADDMULTI:
1261 case SIOCDELMULTI:
1262 if (ifp->if_flags & IFF_RUNNING) {
1263 SK_IF_LOCK(sc_if);
1264 sk_setmulti(sc_if);
1265 SK_IF_UNLOCK(sc_if);
1266 error = 0;
1267 }
1268 break;
1269 case SIOCGIFMEDIA:
1270 case SIOCSIFMEDIA:
1271 mii = device_get_softc(sc_if->sk_miibus);
1272 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1273 break;
1274 default:
1275 error = ether_ioctl(ifp, command, data);
1276 break;
1277 }
1278
1279 return(error);
1280 }
1281
1282 /*
1283 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1284 * IDs against our list and return a device name if we find a match.
1285 */
1286 static int
1287 skc_probe(dev)
1288 device_t dev;
1289 {
1290 struct sk_softc *sc;
1291 struct sk_type *t = sk_devs;
1292
1293 sc = device_get_softc(dev);
1294
1295 while(t->sk_name != NULL) {
1296 if ((pci_get_vendor(dev) == t->sk_vid) &&
1297 (pci_get_device(dev) == t->sk_did)) {
1298 device_set_desc(dev, t->sk_name);
1299 return (BUS_PROBE_DEFAULT);
1300 }
1301 t++;
1302 }
1303
1304 return(ENXIO);
1305 }
1306
1307 /*
1308 * Force the GEnesis into reset, then bring it out of reset.
1309 */
1310 static void
1311 sk_reset(sc)
1312 struct sk_softc *sc;
1313 {
1314 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1315 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1316 if (SK_YUKON_FAMILY(sc->sk_type))
1317 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1318
1319 DELAY(1000);
1320 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1321 DELAY(2);
1322 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1323 if (SK_YUKON_FAMILY(sc->sk_type))
1324 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1325
1326 if (sc->sk_type == SK_GENESIS) {
1327 /* Configure packet arbiter */
1328 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1329 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1330 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1331 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1332 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1333 }
1334
1335 /* Enable RAM interface */
1336 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1337
1338 /*
1339 * Configure interrupt moderation. The moderation timer
1340 * defers interrupts specified in the interrupt moderation
1341 * timer mask based on the timeout specified in the interrupt
1342 * moderation timer init register. Each bit in the timer
1343 * register represents 18.825ns, so to specify a timeout in
1344 * microseconds, we have to multiply by 54.
1345 */
1346 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1347 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1348 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1349 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1350
1351 return;
1352 }
1353
1354 static int
1355 sk_probe(dev)
1356 device_t dev;
1357 {
1358 struct sk_softc *sc;
1359
1360 sc = device_get_softc(device_get_parent(dev));
1361
1362 /*
1363 * Not much to do here. We always know there will be
1364 * at least one XMAC present, and if there are two,
1365 * skc_attach() will create a second device instance
1366 * for us.
1367 */
1368 switch (sc->sk_type) {
1369 case SK_GENESIS:
1370 device_set_desc(dev, "XaQti Corp. XMAC II");
1371 break;
1372 case SK_YUKON:
1373 case SK_YUKON_LITE:
1374 case SK_YUKON_LP:
1375 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1376 break;
1377 }
1378
1379 return (BUS_PROBE_DEFAULT);
1380 }
1381
1382 /*
1383 * Each XMAC chip is attached as a separate logical IP interface.
1384 * Single port cards will have only one logical interface of course.
1385 */
1386 static int
1387 sk_attach(dev)
1388 device_t dev;
1389 {
1390 struct sk_softc *sc;
1391 struct sk_if_softc *sc_if;
1392 struct ifnet *ifp;
1393 int i, port, error;
1394
1395 if (dev == NULL)
1396 return(EINVAL);
1397
1398 error = 0;
1399 sc_if = device_get_softc(dev);
1400 sc = device_get_softc(device_get_parent(dev));
1401 port = *(int *)device_get_ivars(dev);
1402
1403 sc_if->sk_dev = dev;
1404 sc_if->sk_unit = device_get_unit(dev);
1405 sc_if->sk_port = port;
1406 sc_if->sk_softc = sc;
1407 sc->sk_if[port] = sc_if;
1408 if (port == SK_PORT_A)
1409 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1410 if (port == SK_PORT_B)
1411 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1412
1413 /* Allocate the descriptor queues. */
1414 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1415 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1416
1417 if (sc_if->sk_rdata == NULL) {
1418 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1419 error = ENOMEM;
1420 goto fail;
1421 }
1422
1423 /* Try to allocate memory for jumbo buffers. */
1424 if (sk_alloc_jumbo_mem(sc_if)) {
1425 printf("sk%d: jumbo buffer allocation failed\n",
1426 sc_if->sk_unit);
1427 error = ENOMEM;
1428 goto fail;
1429 }
1430
1431 ifp = &sc_if->arpcom.ac_if;
1432 ifp->if_softc = sc_if;
1433 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1434 ifp->if_mtu = ETHERMTU;
1435 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1436 ifp->if_ioctl = sk_ioctl;
1437 ifp->if_start = sk_start;
1438 ifp->if_watchdog = sk_watchdog;
1439 ifp->if_init = sk_init;
1440 ifp->if_baudrate = 1000000000;
1441 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1442 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1443 IFQ_SET_READY(&ifp->if_snd);
1444
1445 callout_handle_init(&sc_if->sk_tick_ch);
1446
1447 /*
1448 * Get station address for this interface. Note that
1449 * dual port cards actually come with three station
1450 * addresses: one for each port, plus an extra. The
1451 * extra one is used by the SysKonnect driver software
1452 * as a 'virtual' station address for when both ports
1453 * are operating in failover mode. Currently we don't
1454 * use this extra address.
1455 */
1456 SK_LOCK(sc);
1457 for (i = 0; i < ETHER_ADDR_LEN; i++)
1458 sc_if->arpcom.ac_enaddr[i] =
1459 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1460
1461 /*
1462 * Set up RAM buffer addresses. The NIC will have a certain
1463 * amount of SRAM on it, somewhere between 512K and 2MB. We
1464 * need to divide this up a) between the transmitter and
1465 * receiver and b) between the two XMACs, if this is a
1466 * dual port NIC. Our algotithm is to divide up the memory
1467 * evenly so that everyone gets a fair share.
1468 */
1469 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1470 u_int32_t chunk, val;
1471
1472 chunk = sc->sk_ramsize / 2;
1473 val = sc->sk_rboff / sizeof(u_int64_t);
1474 sc_if->sk_rx_ramstart = val;
1475 val += (chunk / sizeof(u_int64_t));
1476 sc_if->sk_rx_ramend = val - 1;
1477 sc_if->sk_tx_ramstart = val;
1478 val += (chunk / sizeof(u_int64_t));
1479 sc_if->sk_tx_ramend = val - 1;
1480 } else {
1481 u_int32_t chunk, val;
1482
1483 chunk = sc->sk_ramsize / 4;
1484 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1485 sizeof(u_int64_t);
1486 sc_if->sk_rx_ramstart = val;
1487 val += (chunk / sizeof(u_int64_t));
1488 sc_if->sk_rx_ramend = val - 1;
1489 sc_if->sk_tx_ramstart = val;
1490 val += (chunk / sizeof(u_int64_t));
1491 sc_if->sk_tx_ramend = val - 1;
1492 }
1493
1494 /* Read and save PHY type and set PHY address */
1495 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1496 switch(sc_if->sk_phytype) {
1497 case SK_PHYTYPE_XMAC:
1498 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1499 break;
1500 case SK_PHYTYPE_BCOM:
1501 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1502 break;
1503 case SK_PHYTYPE_MARV_COPPER:
1504 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1505 break;
1506 default:
1507 printf("skc%d: unsupported PHY type: %d\n",
1508 sc->sk_unit, sc_if->sk_phytype);
1509 error = ENODEV;
1510 SK_UNLOCK(sc);
1511 goto fail;
1512 }
1513
1514
1515 /*
1516 * Call MI attach routine. Can't hold locks when calling into ether_*.
1517 */
1518 SK_UNLOCK(sc);
1519 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1520 SK_LOCK(sc);
1521
1522 /*
1523 * Do miibus setup.
1524 */
1525 switch (sc->sk_type) {
1526 case SK_GENESIS:
1527 sk_init_xmac(sc_if);
1528 break;
1529 case SK_YUKON:
1530 case SK_YUKON_LITE:
1531 case SK_YUKON_LP:
1532 sk_init_yukon(sc_if);
1533 break;
1534 }
1535
1536 SK_UNLOCK(sc);
1537 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1538 sk_ifmedia_upd, sk_ifmedia_sts)) {
1539 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1540 ether_ifdetach(ifp);
1541 error = ENXIO;
1542 goto fail;
1543 }
1544
1545 fail:
1546 if (error) {
1547 /* Access should be ok even though lock has been dropped */
1548 sc->sk_if[port] = NULL;
1549 sk_detach(dev);
1550 }
1551
1552 return(error);
1553 }
1554
1555 /*
1556 * Attach the interface. Allocate softc structures, do ifmedia
1557 * setup and ethernet/BPF attach.
1558 */
1559 static int
1560 skc_attach(dev)
1561 device_t dev;
1562 {
1563 struct sk_softc *sc;
1564 int unit, error = 0, rid, *port;
1565 uint8_t skrs;
1566 char *pname, *revstr;
1567
1568 sc = device_get_softc(dev);
1569 unit = device_get_unit(dev);
1570
1571 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1572 MTX_DEF | MTX_RECURSE);
1573 /*
1574 * Map control/status registers.
1575 */
1576 pci_enable_busmaster(dev);
1577
1578 rid = SK_RID;
1579 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1580
1581 if (sc->sk_res == NULL) {
1582 printf("sk%d: couldn't map ports/memory\n", unit);
1583 error = ENXIO;
1584 goto fail;
1585 }
1586
1587 sc->sk_btag = rman_get_bustag(sc->sk_res);
1588 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1589
1590 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1591 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1592
1593 /* Bail out if chip is not recognized. */
1594 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1595 printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1596 unit, sc->sk_type, sc->sk_rev);
1597 error = ENXIO;
1598 goto fail;
1599 }
1600
1601 /* Allocate interrupt */
1602 rid = 0;
1603 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1604 RF_SHAREABLE | RF_ACTIVE);
1605
1606 if (sc->sk_irq == NULL) {
1607 printf("skc%d: couldn't map interrupt\n", unit);
1608 error = ENXIO;
1609 goto fail;
1610 }
1611
1612 /* Reset the adapter. */
1613 sk_reset(sc);
1614
1615 sc->sk_unit = unit;
1616
1617 /* Read and save vital product data from EEPROM. */
1618 sk_vpd_read(sc);
1619
1620 skrs = sk_win_read_1(sc, SK_EPROM0);
1621 if (sc->sk_type == SK_GENESIS) {
1622 /* Read and save RAM size and RAMbuffer offset */
1623 switch(skrs) {
1624 case SK_RAMSIZE_512K_64:
1625 sc->sk_ramsize = 0x80000;
1626 sc->sk_rboff = SK_RBOFF_0;
1627 break;
1628 case SK_RAMSIZE_1024K_64:
1629 sc->sk_ramsize = 0x100000;
1630 sc->sk_rboff = SK_RBOFF_80000;
1631 break;
1632 case SK_RAMSIZE_1024K_128:
1633 sc->sk_ramsize = 0x100000;
1634 sc->sk_rboff = SK_RBOFF_0;
1635 break;
1636 case SK_RAMSIZE_2048K_128:
1637 sc->sk_ramsize = 0x200000;
1638 sc->sk_rboff = SK_RBOFF_0;
1639 break;
1640 default:
1641 printf("skc%d: unknown ram size: %d\n",
1642 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1643 error = ENXIO;
1644 goto fail;
1645 }
1646 } else { /* SK_YUKON_FAMILY */
1647 if (skrs == 0x00)
1648 sc->sk_ramsize = 0x20000;
1649 else
1650 sc->sk_ramsize = skrs * (1<<12);
1651 sc->sk_rboff = SK_RBOFF_0;
1652 }
1653
1654 /* Read and save physical media type */
1655 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1656 case SK_PMD_1000BASESX:
1657 sc->sk_pmd = IFM_1000_SX;
1658 break;
1659 case SK_PMD_1000BASELX:
1660 sc->sk_pmd = IFM_1000_LX;
1661 break;
1662 case SK_PMD_1000BASECX:
1663 sc->sk_pmd = IFM_1000_CX;
1664 break;
1665 case SK_PMD_1000BASETX:
1666 sc->sk_pmd = IFM_1000_T;
1667 break;
1668 default:
1669 printf("skc%d: unknown media type: 0x%x\n",
1670 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1671 error = ENXIO;
1672 goto fail;
1673 }
1674
1675 /* Determine whether to name it with VPD PN or just make it up.
1676 * Marvell Yukon VPD PN seems to freqently be bogus. */
1677 switch (pci_get_device(dev)) {
1678 case DEVICEID_SK_V1:
1679 case DEVICEID_BELKIN_5005:
1680 case DEVICEID_3COM_3C940:
1681 case DEVICEID_LINKSYS_EG1032:
1682 case DEVICEID_DLINK_DGE530T:
1683 /* Stay with VPD PN. */
1684 pname = sc->sk_vpd_prodname;
1685 break;
1686 case DEVICEID_SK_V2:
1687 /* YUKON VPD PN might bear no resemblance to reality. */
1688 switch (sc->sk_type) {
1689 case SK_GENESIS:
1690 /* Stay with VPD PN. */
1691 pname = sc->sk_vpd_prodname;
1692 break;
1693 case SK_YUKON:
1694 pname = "Marvell Yukon Gigabit Ethernet";
1695 break;
1696 case SK_YUKON_LITE:
1697 pname = "Marvell Yukon Lite Gigabit Ethernet";
1698 break;
1699 case SK_YUKON_LP:
1700 pname = "Marvell Yukon LP Gigabit Ethernet";
1701 break;
1702 default:
1703 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1704 break;
1705 }
1706
1707 /* Yukon Lite Rev. A0 needs special test. */
1708 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1709 u_int32_t far;
1710 u_int8_t testbyte;
1711
1712 /* Save flash address register before testing. */
1713 far = sk_win_read_4(sc, SK_EP_ADDR);
1714
1715 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1716 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1717
1718 if (testbyte != 0x00) {
1719 /* Yukon Lite Rev. A0 detected. */
1720 sc->sk_type = SK_YUKON_LITE;
1721 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1722 /* Restore flash address register. */
1723 sk_win_write_4(sc, SK_EP_ADDR, far);
1724 }
1725 }
1726 break;
1727 default:
1728 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1729 "chipver=%02x, rev=%x\n",
1730 pci_get_vendor(dev), pci_get_device(dev),
1731 sc->sk_type, sc->sk_rev);
1732 error = ENXIO;
1733 goto fail;
1734 }
1735
1736 if (sc->sk_type == SK_YUKON_LITE) {
1737 switch (sc->sk_rev) {
1738 case SK_YUKON_LITE_REV_A0:
1739 revstr = "A0";
1740 break;
1741 case SK_YUKON_LITE_REV_A1:
1742 revstr = "A1";
1743 break;
1744 case SK_YUKON_LITE_REV_A3:
1745 revstr = "A3";
1746 break;
1747 default:
1748 revstr = "";
1749 break;
1750 }
1751 } else {
1752 revstr = "";
1753 }
1754
1755 /* Announce the product name. */
1756 device_printf(dev, "%s rev. %s(0x%x)\n", pname, revstr, sc->sk_rev);
1757 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1758 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1759 *port = SK_PORT_A;
1760 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1761
1762 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1763 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1764 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1765 *port = SK_PORT_B;
1766 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1767 }
1768
1769 /* Turn on the 'driver is loaded' LED. */
1770 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1771
1772 bus_generic_attach(dev);
1773
1774 /* Hook interrupt last to avoid having to lock softc */
1775 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1776 sk_intr, sc, &sc->sk_intrhand);
1777
1778 if (error) {
1779 printf("skc%d: couldn't set up irq\n", unit);
1780 goto fail;
1781 }
1782
1783 fail:
1784 if (error)
1785 skc_detach(dev);
1786
1787 return(error);
1788 }
1789
1790 /*
1791 * Shutdown hardware and free up resources. This can be called any
1792 * time after the mutex has been initialized. It is called in both
1793 * the error case in attach and the normal detach case so it needs
1794 * to be careful about only freeing resources that have actually been
1795 * allocated.
1796 */
1797 static int
1798 sk_detach(dev)
1799 device_t dev;
1800 {
1801 struct sk_if_softc *sc_if;
1802 struct ifnet *ifp;
1803
1804 sc_if = device_get_softc(dev);
1805 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1806 ("sk mutex not initialized in sk_detach"));
1807 SK_IF_LOCK(sc_if);
1808
1809 ifp = &sc_if->arpcom.ac_if;
1810 /* These should only be active if attach_xmac succeeded */
1811 if (device_is_attached(dev)) {
1812 sk_stop(sc_if);
1813 /* Can't hold locks while calling detach */
1814 SK_IF_UNLOCK(sc_if);
1815 ether_ifdetach(ifp);
1816 SK_IF_LOCK(sc_if);
1817 }
1818 /*
1819 * We're generally called from skc_detach() which is using
1820 * device_delete_child() to get to here. It's already trashed
1821 * miibus for us, so don't do it here or we'll panic.
1822 */
1823 /*
1824 if (sc_if->sk_miibus != NULL)
1825 device_delete_child(dev, sc_if->sk_miibus);
1826 */
1827 bus_generic_detach(dev);
1828 if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1829 sk_free_jumbo_mem(sc_if);
1830 if (sc_if->sk_rdata != NULL) {
1831 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1832 M_DEVBUF);
1833 }
1834 SK_IF_UNLOCK(sc_if);
1835
1836 return(0);
1837 }
1838
1839 static int
1840 skc_detach(dev)
1841 device_t dev;
1842 {
1843 struct sk_softc *sc;
1844
1845 sc = device_get_softc(dev);
1846 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1847
1848 if (device_is_alive(dev)) {
1849 if (sc->sk_devs[SK_PORT_A] != NULL) {
1850 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1851 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1852 }
1853 if (sc->sk_devs[SK_PORT_B] != NULL) {
1854 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1855 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1856 }
1857 bus_generic_detach(dev);
1858 }
1859
1860 if (sc->sk_vpd_prodname != NULL)
1861 free(sc->sk_vpd_prodname, M_DEVBUF);
1862 if (sc->sk_vpd_readonly != NULL)
1863 free(sc->sk_vpd_readonly, M_DEVBUF);
1864
1865 if (sc->sk_intrhand)
1866 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1867 if (sc->sk_irq)
1868 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1869 if (sc->sk_res)
1870 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1871
1872 mtx_destroy(&sc->sk_mtx);
1873
1874 return(0);
1875 }
1876
1877 static int
1878 sk_encap(sc_if, m_head, txidx)
1879 struct sk_if_softc *sc_if;
1880 struct mbuf *m_head;
1881 u_int32_t *txidx;
1882 {
1883 struct sk_tx_desc *f = NULL;
1884 struct mbuf *m;
1885 u_int32_t frag, cur, cnt = 0;
1886
1887 SK_IF_LOCK_ASSERT(sc_if);
1888
1889 m = m_head;
1890 cur = frag = *txidx;
1891
1892 /*
1893 * Start packing the mbufs in this chain into
1894 * the fragment pointers. Stop when we run out
1895 * of fragments or hit the end of the mbuf chain.
1896 */
1897 for (m = m_head; m != NULL; m = m->m_next) {
1898 if (m->m_len != 0) {
1899 if ((SK_TX_RING_CNT -
1900 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1901 return(ENOBUFS);
1902 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1903 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1904 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1905 if (cnt == 0)
1906 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1907 else
1908 f->sk_ctl |= SK_TXCTL_OWN;
1909 cur = frag;
1910 SK_INC(frag, SK_TX_RING_CNT);
1911 cnt++;
1912 }
1913 }
1914
1915 if (m != NULL)
1916 return(ENOBUFS);
1917
1918 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1919 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1920 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1921 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1922 sc_if->sk_cdata.sk_tx_cnt += cnt;
1923
1924 *txidx = frag;
1925
1926 return(0);
1927 }
1928
1929 static void
1930 sk_start(ifp)
1931 struct ifnet *ifp;
1932 {
1933 struct sk_softc *sc;
1934 struct sk_if_softc *sc_if;
1935 struct mbuf *m_head = NULL;
1936 u_int32_t idx;
1937
1938 sc_if = ifp->if_softc;
1939 sc = sc_if->sk_softc;
1940
1941 SK_IF_LOCK(sc_if);
1942
1943 idx = sc_if->sk_cdata.sk_tx_prod;
1944
1945 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1946 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1947 if (m_head == NULL)
1948 break;
1949
1950 /*
1951 * Pack the data into the transmit ring. If we
1952 * don't have room, set the OACTIVE flag and wait
1953 * for the NIC to drain the ring.
1954 */
1955 if (sk_encap(sc_if, m_head, &idx)) {
1956 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1957 ifp->if_flags |= IFF_OACTIVE;
1958 break;
1959 }
1960
1961 /*
1962 * If there's a BPF listener, bounce a copy of this frame
1963 * to him.
1964 */
1965 BPF_MTAP(ifp, m_head);
1966 }
1967
1968 /* Transmit */
1969 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1970 sc_if->sk_cdata.sk_tx_prod = idx;
1971 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1972
1973 /* Set a timeout in case the chip goes out to lunch. */
1974 ifp->if_timer = 5;
1975 }
1976 SK_IF_UNLOCK(sc_if);
1977
1978 return;
1979 }
1980
1981
1982 static void
1983 sk_watchdog(ifp)
1984 struct ifnet *ifp;
1985 {
1986 struct sk_if_softc *sc_if;
1987
1988 sc_if = ifp->if_softc;
1989
1990 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1991 ifp->if_flags &= ~IFF_RUNNING;
1992 sk_init(sc_if);
1993
1994 return;
1995 }
1996
1997 static void
1998 skc_shutdown(dev)
1999 device_t dev;
2000 {
2001 struct sk_softc *sc;
2002
2003 sc = device_get_softc(dev);
2004 SK_LOCK(sc);
2005
2006 /* Turn off the 'driver is loaded' LED. */
2007 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2008
2009 /*
2010 * Reset the GEnesis controller. Doing this should also
2011 * assert the resets on the attached XMAC(s).
2012 */
2013 sk_reset(sc);
2014 SK_UNLOCK(sc);
2015
2016 return;
2017 }
2018
2019 static void
2020 sk_rxeof(sc_if)
2021 struct sk_if_softc *sc_if;
2022 {
2023 struct sk_softc *sc;
2024 struct mbuf *m;
2025 struct ifnet *ifp;
2026 struct sk_chain *cur_rx;
2027 int total_len = 0;
2028 int i;
2029 u_int32_t rxstat;
2030
2031 sc = sc_if->sk_softc;
2032 ifp = &sc_if->arpcom.ac_if;
2033 i = sc_if->sk_cdata.sk_rx_prod;
2034 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2035
2036 SK_LOCK_ASSERT(sc);
2037
2038 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2039
2040 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2041 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2042 m = cur_rx->sk_mbuf;
2043 cur_rx->sk_mbuf = NULL;
2044 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2045 SK_INC(i, SK_RX_RING_CNT);
2046
2047 if (rxstat & XM_RXSTAT_ERRFRAME) {
2048 ifp->if_ierrors++;
2049 sk_newbuf(sc_if, cur_rx, m);
2050 continue;
2051 }
2052
2053 /*
2054 * Try to allocate a new jumbo buffer. If that
2055 * fails, copy the packet to mbufs and put the
2056 * jumbo buffer back in the ring so it can be
2057 * re-used. If allocating mbufs fails, then we
2058 * have to drop the packet.
2059 */
2060 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2061 struct mbuf *m0;
2062 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2063 ifp, NULL);
2064 sk_newbuf(sc_if, cur_rx, m);
2065 if (m0 == NULL) {
2066 printf("sk%d: no receive buffers "
2067 "available -- packet dropped!\n",
2068 sc_if->sk_unit);
2069 ifp->if_ierrors++;
2070 continue;
2071 }
2072 m = m0;
2073 } else {
2074 m->m_pkthdr.rcvif = ifp;
2075 m->m_pkthdr.len = m->m_len = total_len;
2076 }
2077
2078 ifp->if_ipackets++;
2079 SK_UNLOCK(sc);
2080 (*ifp->if_input)(ifp, m);
2081 SK_LOCK(sc);
2082 }
2083
2084 sc_if->sk_cdata.sk_rx_prod = i;
2085
2086 return;
2087 }
2088
2089 static void
2090 sk_txeof(sc_if)
2091 struct sk_if_softc *sc_if;
2092 {
2093 struct sk_softc *sc;
2094 struct sk_tx_desc *cur_tx;
2095 struct ifnet *ifp;
2096 u_int32_t idx;
2097
2098 sc = sc_if->sk_softc;
2099 ifp = &sc_if->arpcom.ac_if;
2100
2101 /*
2102 * Go through our tx ring and free mbufs for those
2103 * frames that have been sent.
2104 */
2105 idx = sc_if->sk_cdata.sk_tx_cons;
2106 while(idx != sc_if->sk_cdata.sk_tx_prod) {
2107 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2108 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2109 break;
2110 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2111 ifp->if_opackets++;
2112 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2113 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2114 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2115 }
2116 sc_if->sk_cdata.sk_tx_cnt--;
2117 SK_INC(idx, SK_TX_RING_CNT);
2118 }
2119
2120 if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2121 ifp->if_timer = 0;
2122 } else /* nudge chip to keep tx ring moving */
2123 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2124
2125 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2126 ifp->if_flags &= ~IFF_OACTIVE;
2127
2128 sc_if->sk_cdata.sk_tx_cons = idx;
2129 }
2130
2131 static void
2132 sk_tick(xsc_if)
2133 void *xsc_if;
2134 {
2135 struct sk_if_softc *sc_if;
2136 struct mii_data *mii;
2137 struct ifnet *ifp;
2138 int i;
2139
2140 sc_if = xsc_if;
2141 SK_IF_LOCK(sc_if);
2142 ifp = &sc_if->arpcom.ac_if;
2143 mii = device_get_softc(sc_if->sk_miibus);
2144
2145 if (!(ifp->if_flags & IFF_UP)) {
2146 SK_IF_UNLOCK(sc_if);
2147 return;
2148 }
2149
2150 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2151 sk_intr_bcom(sc_if);
2152 SK_IF_UNLOCK(sc_if);
2153 return;
2154 }
2155
2156 /*
2157 * According to SysKonnect, the correct way to verify that
2158 * the link has come back up is to poll bit 0 of the GPIO
2159 * register three times. This pin has the signal from the
2160 * link_sync pin connected to it; if we read the same link
2161 * state 3 times in a row, we know the link is up.
2162 */
2163 for (i = 0; i < 3; i++) {
2164 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2165 break;
2166 }
2167
2168 if (i != 3) {
2169 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2170 SK_IF_UNLOCK(sc_if);
2171 return;
2172 }
2173
2174 /* Turn the GP0 interrupt back on. */
2175 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2176 SK_XM_READ_2(sc_if, XM_ISR);
2177 mii_tick(mii);
2178 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2179
2180 SK_IF_UNLOCK(sc_if);
2181 return;
2182 }
2183
2184 static void
2185 sk_intr_bcom(sc_if)
2186 struct sk_if_softc *sc_if;
2187 {
2188 struct mii_data *mii;
2189 struct ifnet *ifp;
2190 int status;
2191 mii = device_get_softc(sc_if->sk_miibus);
2192 ifp = &sc_if->arpcom.ac_if;
2193
2194 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2195
2196 /*
2197 * Read the PHY interrupt register to make sure
2198 * we clear any pending interrupts.
2199 */
2200 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2201
2202 if (!(ifp->if_flags & IFF_RUNNING)) {
2203 sk_init_xmac(sc_if);
2204 return;
2205 }
2206
2207 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2208 int lstat;
2209 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2210 BRGPHY_MII_AUXSTS);
2211
2212 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2213 mii_mediachg(mii);
2214 /* Turn off the link LED. */
2215 SK_IF_WRITE_1(sc_if, 0,
2216 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2217 sc_if->sk_link = 0;
2218 } else if (status & BRGPHY_ISR_LNK_CHG) {
2219 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2220 BRGPHY_MII_IMR, 0xFF00);
2221 mii_tick(mii);
2222 sc_if->sk_link = 1;
2223 /* Turn on the link LED. */
2224 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2225 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2226 SK_LINKLED_BLINK_OFF);
2227 } else {
2228 mii_tick(mii);
2229 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2230 }
2231 }
2232
2233 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2234
2235 return;
2236 }
2237
2238 static void
2239 sk_intr_xmac(sc_if)
2240 struct sk_if_softc *sc_if;
2241 {
2242 struct sk_softc *sc;
2243 u_int16_t status;
2244
2245 sc = sc_if->sk_softc;
2246 status = SK_XM_READ_2(sc_if, XM_ISR);
2247
2248 /*
2249 * Link has gone down. Start MII tick timeout to
2250 * watch for link resync.
2251 */
2252 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2253 if (status & XM_ISR_GP0_SET) {
2254 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2255 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2256 }
2257
2258 if (status & XM_ISR_AUTONEG_DONE) {
2259 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2260 }
2261 }
2262
2263 if (status & XM_IMR_TX_UNDERRUN)
2264 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2265
2266 if (status & XM_IMR_RX_OVERRUN)
2267 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2268
2269 status = SK_XM_READ_2(sc_if, XM_ISR);
2270
2271 return;
2272 }
2273
2274 static void
2275 sk_intr_yukon(sc_if)
2276 struct sk_if_softc *sc_if;
2277 {
2278 int status;
2279
2280 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2281
2282 return;
2283 }
2284
2285 static void
2286 sk_intr(xsc)
2287 void *xsc;
2288 {
2289 struct sk_softc *sc = xsc;
2290 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
2291 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2292 u_int32_t status;
2293
2294 SK_LOCK(sc);
2295
2296 sc_if0 = sc->sk_if[SK_PORT_A];
2297 sc_if1 = sc->sk_if[SK_PORT_B];
2298
2299 if (sc_if0 != NULL)
2300 ifp0 = &sc_if0->arpcom.ac_if;
2301 if (sc_if1 != NULL)
2302 ifp1 = &sc_if1->arpcom.ac_if;
2303
2304 for (;;) {
2305 status = CSR_READ_4(sc, SK_ISSR);
2306 if (!(status & sc->sk_intrmask))
2307 break;
2308
2309 /* Handle receive interrupts first. */
2310 if (status & SK_ISR_RX1_EOF) {
2311 sk_rxeof(sc_if0);
2312 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2313 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2314 }
2315 if (status & SK_ISR_RX2_EOF) {
2316 sk_rxeof(sc_if1);
2317 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2318 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2319 }
2320
2321 /* Then transmit interrupts. */
2322 if (status & SK_ISR_TX1_S_EOF) {
2323 sk_txeof(sc_if0);
2324 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2325 SK_TXBMU_CLR_IRQ_EOF);
2326 }
2327 if (status & SK_ISR_TX2_S_EOF) {
2328 sk_txeof(sc_if1);
2329 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2330 SK_TXBMU_CLR_IRQ_EOF);
2331 }
2332
2333 /* Then MAC interrupts. */
2334 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2335 if (sc->sk_type == SK_GENESIS)
2336 sk_intr_xmac(sc_if0);
2337 else
2338 sk_intr_yukon(sc_if0);
2339 }
2340
2341 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2342 if (sc->sk_type == SK_GENESIS)
2343 sk_intr_xmac(sc_if1);
2344 else
2345 sk_intr_yukon(sc_if1);
2346 }
2347
2348 if (status & SK_ISR_EXTERNAL_REG) {
2349 if (ifp0 != NULL &&
2350 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2351 sk_intr_bcom(sc_if0);
2352 if (ifp1 != NULL &&
2353 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2354 sk_intr_bcom(sc_if1);
2355 }
2356 }
2357
2358 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2359
2360 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2361 sk_start(ifp0);
2362 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2363 sk_start(ifp1);
2364
2365 SK_UNLOCK(sc);
2366
2367 return;
2368 }
2369
2370 static void
2371 sk_init_xmac(sc_if)
2372 struct sk_if_softc *sc_if;
2373 {
2374 struct sk_softc *sc;
2375 struct ifnet *ifp;
2376 struct sk_bcom_hack bhack[] = {
2377 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2378 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2379 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2380 { 0, 0 } };
2381
2382 sc = sc_if->sk_softc;
2383 ifp = &sc_if->arpcom.ac_if;
2384
2385 /* Unreset the XMAC. */
2386 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2387 DELAY(1000);
2388
2389 /* Reset the XMAC's internal state. */
2390 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2391
2392 /* Save the XMAC II revision */
2393 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2394
2395 /*
2396 * Perform additional initialization for external PHYs,
2397 * namely for the 1000baseTX cards that use the XMAC's
2398 * GMII mode.
2399 */
2400 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2401 int i = 0;
2402 u_int32_t val;
2403
2404 /* Take PHY out of reset. */
2405 val = sk_win_read_4(sc, SK_GPIO);
2406 if (sc_if->sk_port == SK_PORT_A)
2407 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2408 else
2409 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2410 sk_win_write_4(sc, SK_GPIO, val);
2411
2412 /* Enable GMII mode on the XMAC. */
2413 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2414
2415 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2416 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2417 DELAY(10000);
2418 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2419 BRGPHY_MII_IMR, 0xFFF0);
2420
2421 /*
2422 * Early versions of the BCM5400 apparently have
2423 * a bug that requires them to have their reserved
2424 * registers initialized to some magic values. I don't
2425 * know what the numbers do, I'm just the messenger.
2426 */
2427 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2428 == 0x6041) {
2429 while(bhack[i].reg) {
2430 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2431 bhack[i].reg, bhack[i].val);
2432 i++;
2433 }
2434 }
2435 }
2436
2437 /* Set station address */
2438 SK_XM_WRITE_2(sc_if, XM_PAR0,
2439 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2440 SK_XM_WRITE_2(sc_if, XM_PAR1,
2441 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2442 SK_XM_WRITE_2(sc_if, XM_PAR2,
2443 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2444 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2445
2446 if (ifp->if_flags & IFF_BROADCAST) {
2447 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2448 } else {
2449 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2450 }
2451
2452 /* We don't need the FCS appended to the packet. */
2453 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2454
2455 /* We want short frames padded to 60 bytes. */
2456 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2457
2458 /*
2459 * Enable the reception of all error frames. This is is
2460 * a necessary evil due to the design of the XMAC. The
2461 * XMAC's receive FIFO is only 8K in size, however jumbo
2462 * frames can be up to 9000 bytes in length. When bad
2463 * frame filtering is enabled, the XMAC's RX FIFO operates
2464 * in 'store and forward' mode. For this to work, the
2465 * entire frame has to fit into the FIFO, but that means
2466 * that jumbo frames larger than 8192 bytes will be
2467 * truncated. Disabling all bad frame filtering causes
2468 * the RX FIFO to operate in streaming mode, in which
2469 * case the XMAC will start transfering frames out of the
2470 * RX FIFO as soon as the FIFO threshold is reached.
2471 */
2472 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2473 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2474 XM_MODE_RX_INRANGELEN);
2475
2476 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2477 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2478 else
2479 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2480
2481 /*
2482 * Bump up the transmit threshold. This helps hold off transmit
2483 * underruns when we're blasting traffic from both ports at once.
2484 */
2485 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2486
2487 /* Set promiscuous mode */
2488 sk_setpromisc(sc_if);
2489
2490 /* Set multicast filter */
2491 sk_setmulti(sc_if);
2492
2493 /* Clear and enable interrupts */
2494 SK_XM_READ_2(sc_if, XM_ISR);
2495 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2496 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2497 else
2498 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2499
2500 /* Configure MAC arbiter */
2501 switch(sc_if->sk_xmac_rev) {
2502 case XM_XMAC_REV_B2:
2503 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2504 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2505 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2506 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2507 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2508 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2509 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2510 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2511 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2512 break;
2513 case XM_XMAC_REV_C1:
2514 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2515 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2516 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2517 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2518 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2519 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2520 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2521 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2522 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2523 break;
2524 default:
2525 break;
2526 }
2527 sk_win_write_2(sc, SK_MACARB_CTL,
2528 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2529
2530 sc_if->sk_link = 1;
2531
2532 return;
2533 }
2534
2535 static void
2536 sk_init_yukon(sc_if)
2537 struct sk_if_softc *sc_if;
2538 {
2539 u_int32_t phy;
2540 u_int16_t reg;
2541 struct sk_softc *sc;
2542 struct ifnet *ifp;
2543 int i;
2544
2545 sc = sc_if->sk_softc;
2546 ifp = &sc_if->arpcom.ac_if;
2547
2548 if (sc->sk_type == SK_YUKON_LITE &&
2549 sc->sk_rev == SK_YUKON_LITE_REV_A3) {
2550 /* Take PHY out of reset. */
2551 sk_win_write_4(sc, SK_GPIO,
2552 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2553 }
2554
2555 /* GMAC and GPHY Reset */
2556 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2557 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2558 DELAY(1000);
2559 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2560 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2561 DELAY(1000);
2562
2563 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2564 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2565
2566 switch(sc_if->sk_softc->sk_pmd) {
2567 case IFM_1000_SX:
2568 case IFM_1000_LX:
2569 phy |= SK_GPHY_FIBER;
2570 break;
2571
2572 case IFM_1000_CX:
2573 case IFM_1000_T:
2574 phy |= SK_GPHY_COPPER;
2575 break;
2576 }
2577
2578 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2579 DELAY(1000);
2580 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2581 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2582 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2583
2584 /* unused read of the interrupt source register */
2585 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2586
2587 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2588
2589 /* MIB Counter Clear Mode set */
2590 reg |= YU_PAR_MIB_CLR;
2591 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2592
2593 /* MIB Counter Clear Mode clear */
2594 reg &= ~YU_PAR_MIB_CLR;
2595 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2596
2597 /* receive control reg */
2598 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2599
2600 /* transmit parameter register */
2601 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2602 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2603
2604 /* serial mode register */
2605 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2606 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2607 reg |= YU_SMR_MFL_JUMBO;
2608 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2609
2610 /* Setup Yukon's address */
2611 for (i = 0; i < 3; i++) {
2612 /* Write Source Address 1 (unicast filter) */
2613 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2614 sc_if->arpcom.ac_enaddr[i * 2] |
2615 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2616 }
2617
2618 for (i = 0; i < 3; i++) {
2619 reg = sk_win_read_2(sc_if->sk_softc,
2620 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2621 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2622 }
2623
2624 /* Set promiscuous mode */
2625 sk_setpromisc(sc_if);
2626
2627 /* Set multicast filter */
2628 sk_setmulti(sc_if);
2629
2630 /* enable interrupt mask for counter overflows */
2631 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2632 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2633 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2634
2635 /* Configure RX MAC FIFO */
2636 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2637 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2638
2639 /* Configure TX MAC FIFO */
2640 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2641 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2642 }
2643
2644 /*
2645 * Note that to properly initialize any part of the GEnesis chip,
2646 * you first have to take it out of reset mode.
2647 */
2648 static void
2649 sk_init(xsc)
2650 void *xsc;
2651 {
2652 struct sk_if_softc *sc_if = xsc;
2653 struct sk_softc *sc;
2654 struct ifnet *ifp;
2655 struct mii_data *mii;
2656 u_int16_t reg;
2657
2658 SK_IF_LOCK(sc_if);
2659
2660 ifp = &sc_if->arpcom.ac_if;
2661 sc = sc_if->sk_softc;
2662 mii = device_get_softc(sc_if->sk_miibus);
2663
2664 if (ifp->if_flags & IFF_RUNNING) {
2665 SK_IF_UNLOCK(sc_if);
2666 return;
2667 }
2668
2669 /* Cancel pending I/O and free all RX/TX buffers. */
2670 sk_stop(sc_if);
2671
2672 if (sc->sk_type == SK_GENESIS) {
2673 /* Configure LINK_SYNC LED */
2674 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2675 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2676 SK_LINKLED_LINKSYNC_ON);
2677
2678 /* Configure RX LED */
2679 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2680 SK_RXLEDCTL_COUNTER_START);
2681
2682 /* Configure TX LED */
2683 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2684 SK_TXLEDCTL_COUNTER_START);
2685 }
2686
2687 /* Configure I2C registers */
2688
2689 /* Configure XMAC(s) */
2690 switch (sc->sk_type) {
2691 case SK_GENESIS:
2692 sk_init_xmac(sc_if);
2693 break;
2694 case SK_YUKON:
2695 case SK_YUKON_LITE:
2696 case SK_YUKON_LP:
2697 sk_init_yukon(sc_if);
2698 break;
2699 }
2700 mii_mediachg(mii);
2701
2702 if (sc->sk_type == SK_GENESIS) {
2703 /* Configure MAC FIFOs */
2704 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2705 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2706 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2707
2708 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2709 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2710 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2711 }
2712
2713 /* Configure transmit arbiter(s) */
2714 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2715 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2716
2717 /* Configure RAMbuffers */
2718 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2719 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2720 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2721 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2722 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2723 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2724
2725 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2726 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2727 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2728 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2729 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2730 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2731 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2732
2733 /* Configure BMUs */
2734 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2735 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2736 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2737 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2738
2739 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2740 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2741 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2742 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2743
2744 /* Init descriptors */
2745 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2746 printf("sk%d: initialization failed: no "
2747 "memory for rx buffers\n", sc_if->sk_unit);
2748 sk_stop(sc_if);
2749 SK_IF_UNLOCK(sc_if);
2750 return;
2751 }
2752 sk_init_tx_ring(sc_if);
2753
2754 /* Configure interrupt handling */
2755 CSR_READ_4(sc, SK_ISSR);
2756 if (sc_if->sk_port == SK_PORT_A)
2757 sc->sk_intrmask |= SK_INTRS1;
2758 else
2759 sc->sk_intrmask |= SK_INTRS2;
2760
2761 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2762
2763 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2764
2765 /* Start BMUs. */
2766 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2767
2768 switch(sc->sk_type) {
2769 case SK_GENESIS:
2770 /* Enable XMACs TX and RX state machines */
2771 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2772 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2773 break;
2774 case SK_YUKON:
2775 case SK_YUKON_LITE:
2776 case SK_YUKON_LP:
2777 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2778 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2779 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2780 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2781 }
2782
2783 ifp->if_flags |= IFF_RUNNING;
2784 ifp->if_flags &= ~IFF_OACTIVE;
2785
2786 SK_IF_UNLOCK(sc_if);
2787
2788 return;
2789 }
2790
2791 static void
2792 sk_stop(sc_if)
2793 struct sk_if_softc *sc_if;
2794 {
2795 int i;
2796 struct sk_softc *sc;
2797 struct ifnet *ifp;
2798
2799 SK_IF_LOCK(sc_if);
2800 sc = sc_if->sk_softc;
2801 ifp = &sc_if->arpcom.ac_if;
2802
2803 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2804
2805 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2806 u_int32_t val;
2807
2808 /* Put PHY back into reset. */
2809 val = sk_win_read_4(sc, SK_GPIO);
2810 if (sc_if->sk_port == SK_PORT_A) {
2811 val |= SK_GPIO_DIR0;
2812 val &= ~SK_GPIO_DAT0;
2813 } else {
2814 val |= SK_GPIO_DIR2;
2815 val &= ~SK_GPIO_DAT2;
2816 }
2817 sk_win_write_4(sc, SK_GPIO, val);
2818 }
2819
2820 /* Turn off various components of this interface. */
2821 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2822 switch (sc->sk_type) {
2823 case SK_GENESIS:
2824 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2825 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2826 break;
2827 case SK_YUKON:
2828 case SK_YUKON_LITE:
2829 case SK_YUKON_LP:
2830 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2831 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2832 break;
2833 }
2834 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2835 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2836 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2837 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2838 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2839 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2840 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2841 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2842 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2843
2844 /* Disable interrupts */
2845 if (sc_if->sk_port == SK_PORT_A)
2846 sc->sk_intrmask &= ~SK_INTRS1;
2847 else
2848 sc->sk_intrmask &= ~SK_INTRS2;
2849 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2850
2851 SK_XM_READ_2(sc_if, XM_ISR);
2852 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2853
2854 /* Free RX and TX mbufs still in the queues. */
2855 for (i = 0; i < SK_RX_RING_CNT; i++) {
2856 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2857 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2858 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2859 }
2860 }
2861
2862 for (i = 0; i < SK_TX_RING_CNT; i++) {
2863 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2864 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2865 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2866 }
2867 }
2868
2869 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2870 SK_IF_UNLOCK(sc_if);
2871 return;
2872 }
Cache object: 37b2ecb7cdb01ae84175833449b64559
|