FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 /*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71 /*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <vm/vm.h> /* for vtophys */
107 #include <vm/pmap.h> /* for vtophys */
108 #include <machine/bus_pio.h>
109 #include <machine/bus_memio.h>
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133
134 /* "controller miibus0" required. See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136
137 #ifndef lint
138 static const char rcsid[] =
139 "$FreeBSD$";
140 #endif
141
142 static struct sk_type sk_devs[] = {
143 {
144 VENDORID_SK,
145 DEVICEID_SK_V1,
146 "SysKonnect Gigabit Ethernet (V1.0)"
147 },
148 {
149 VENDORID_SK,
150 DEVICEID_SK_V2,
151 "SysKonnect Gigabit Ethernet (V2.0)"
152 },
153 {
154 VENDORID_MARVELL,
155 DEVICEID_SK_V2,
156 "Marvell Gigabit Ethernet"
157 },
158 {
159 VENDORID_MARVELL,
160 DEVICEID_BELKIN_5005,
161 "Belkin F5D5005 Gigabit Ethernet"
162 },
163 {
164 VENDORID_3COM,
165 DEVICEID_3COM_3C940,
166 "3Com 3C940 Gigabit Ethernet"
167 },
168 {
169 VENDORID_LINKSYS,
170 DEVICEID_LINKSYS_EG1032,
171 "Linksys EG1032 Gigabit Ethernet"
172 },
173 {
174 VENDORID_DLINK,
175 DEVICEID_DLINK_DGE530T,
176 "D-Link DGE-530T Gigabit Ethernet"
177 },
178 { 0, 0, NULL }
179 };
180
181 static int skc_probe(device_t);
182 static int skc_attach(device_t);
183 static int skc_detach(device_t);
184 static void skc_shutdown(device_t);
185 static int sk_detach(device_t);
186 static int sk_probe(device_t);
187 static int sk_attach(device_t);
188 static void sk_tick(void *);
189 static void sk_intr(void *);
190 static void sk_intr_xmac(struct sk_if_softc *);
191 static void sk_intr_bcom(struct sk_if_softc *);
192 static void sk_intr_yukon(struct sk_if_softc *);
193 static void sk_rxeof(struct sk_if_softc *);
194 static void sk_txeof(struct sk_if_softc *);
195 static int sk_encap(struct sk_if_softc *, struct mbuf *,
196 u_int32_t *);
197 static void sk_start(struct ifnet *);
198 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
199 static void sk_init(void *);
200 static void sk_init_xmac(struct sk_if_softc *);
201 static void sk_init_yukon(struct sk_if_softc *);
202 static void sk_stop(struct sk_if_softc *);
203 static void sk_watchdog(struct ifnet *);
204 static int sk_ifmedia_upd(struct ifnet *);
205 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206 static void sk_reset(struct sk_softc *);
207 static int sk_newbuf(struct sk_if_softc *,
208 struct sk_chain *, struct mbuf *);
209 static int sk_alloc_jumbo_mem(struct sk_if_softc *);
210 static void sk_free_jumbo_mem(struct sk_if_softc *);
211 static void *sk_jalloc(struct sk_if_softc *);
212 static void sk_jfree(void *, void *);
213 static int sk_init_rx_ring(struct sk_if_softc *);
214 static void sk_init_tx_ring(struct sk_if_softc *);
215 static u_int32_t sk_win_read_4(struct sk_softc *, int);
216 static u_int16_t sk_win_read_2(struct sk_softc *, int);
217 static u_int8_t sk_win_read_1(struct sk_softc *, int);
218 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
219 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
220 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
221 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
222 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
223 static void sk_vpd_read(struct sk_softc *);
224
225 static int sk_miibus_readreg(device_t, int, int);
226 static int sk_miibus_writereg(device_t, int, int, int);
227 static void sk_miibus_statchg(device_t);
228
229 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
230 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
231 int);
232 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
233
234 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
235 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
236 int);
237 static void sk_marv_miibus_statchg(struct sk_if_softc *);
238
239 static uint32_t sk_xmchash(const uint8_t *);
240 static uint32_t sk_gmchash(const uint8_t *);
241 static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
242 static void sk_setmulti(struct sk_if_softc *);
243 static void sk_setpromisc(struct sk_if_softc *);
244
245 #ifdef SK_USEIOSPACE
246 #define SK_RES SYS_RES_IOPORT
247 #define SK_RID SK_PCI_LOIO
248 #else
249 #define SK_RES SYS_RES_MEMORY
250 #define SK_RID SK_PCI_LOMEM
251 #endif
252
253 /*
254 * Note that we have newbus methods for both the GEnesis controller
255 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
256 * the miibus code is a child of the XMACs. We need to do it this way
257 * so that the miibus drivers can access the PHY registers on the
258 * right PHY. It's not quite what I had in mind, but it's the only
259 * design that achieves the desired effect.
260 */
261 static device_method_t skc_methods[] = {
262 /* Device interface */
263 DEVMETHOD(device_probe, skc_probe),
264 DEVMETHOD(device_attach, skc_attach),
265 DEVMETHOD(device_detach, skc_detach),
266 DEVMETHOD(device_shutdown, skc_shutdown),
267
268 /* bus interface */
269 DEVMETHOD(bus_print_child, bus_generic_print_child),
270 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
271
272 { 0, 0 }
273 };
274
275 static driver_t skc_driver = {
276 "skc",
277 skc_methods,
278 sizeof(struct sk_softc)
279 };
280
281 static devclass_t skc_devclass;
282
283 static device_method_t sk_methods[] = {
284 /* Device interface */
285 DEVMETHOD(device_probe, sk_probe),
286 DEVMETHOD(device_attach, sk_attach),
287 DEVMETHOD(device_detach, sk_detach),
288 DEVMETHOD(device_shutdown, bus_generic_shutdown),
289
290 /* bus interface */
291 DEVMETHOD(bus_print_child, bus_generic_print_child),
292 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
293
294 /* MII interface */
295 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
296 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
297 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
298
299 { 0, 0 }
300 };
301
302 static driver_t sk_driver = {
303 "sk",
304 sk_methods,
305 sizeof(struct sk_if_softc)
306 };
307
308 static devclass_t sk_devclass;
309
310 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
311 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
312 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
313
314 #define SK_SETBIT(sc, reg, x) \
315 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
316
317 #define SK_CLRBIT(sc, reg, x) \
318 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
319
320 #define SK_WIN_SETBIT_4(sc, reg, x) \
321 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
322
323 #define SK_WIN_CLRBIT_4(sc, reg, x) \
324 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
325
326 #define SK_WIN_SETBIT_2(sc, reg, x) \
327 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
328
329 #define SK_WIN_CLRBIT_2(sc, reg, x) \
330 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
331
332 static u_int32_t
333 sk_win_read_4(sc, reg)
334 struct sk_softc *sc;
335 int reg;
336 {
337 #ifdef SK_USEIOSPACE
338 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
339 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
340 #else
341 return(CSR_READ_4(sc, reg));
342 #endif
343 }
344
345 static u_int16_t
346 sk_win_read_2(sc, reg)
347 struct sk_softc *sc;
348 int reg;
349 {
350 #ifdef SK_USEIOSPACE
351 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
352 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
353 #else
354 return(CSR_READ_2(sc, reg));
355 #endif
356 }
357
358 static u_int8_t
359 sk_win_read_1(sc, reg)
360 struct sk_softc *sc;
361 int reg;
362 {
363 #ifdef SK_USEIOSPACE
364 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
365 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
366 #else
367 return(CSR_READ_1(sc, reg));
368 #endif
369 }
370
371 static void
372 sk_win_write_4(sc, reg, val)
373 struct sk_softc *sc;
374 int reg;
375 u_int32_t val;
376 {
377 #ifdef SK_USEIOSPACE
378 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
379 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
380 #else
381 CSR_WRITE_4(sc, reg, val);
382 #endif
383 return;
384 }
385
386 static void
387 sk_win_write_2(sc, reg, val)
388 struct sk_softc *sc;
389 int reg;
390 u_int32_t val;
391 {
392 #ifdef SK_USEIOSPACE
393 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
394 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
395 #else
396 CSR_WRITE_2(sc, reg, val);
397 #endif
398 return;
399 }
400
401 static void
402 sk_win_write_1(sc, reg, val)
403 struct sk_softc *sc;
404 int reg;
405 u_int32_t val;
406 {
407 #ifdef SK_USEIOSPACE
408 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
409 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
410 #else
411 CSR_WRITE_1(sc, reg, val);
412 #endif
413 return;
414 }
415
416 /*
417 * The VPD EEPROM contains Vital Product Data, as suggested in
418 * the PCI 2.1 specification. The VPD data is separared into areas
419 * denoted by resource IDs. The SysKonnect VPD contains an ID string
420 * resource (the name of the adapter), a read-only area resource
421 * containing various key/data fields and a read/write area which
422 * can be used to store asset management information or log messages.
423 * We read the ID string and read-only into buffers attached to
424 * the controller softc structure for later use. At the moment,
425 * we only use the ID string during skc_attach().
426 */
427 static u_int8_t
428 sk_vpd_readbyte(sc, addr)
429 struct sk_softc *sc;
430 int addr;
431 {
432 int i;
433
434 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
435 for (i = 0; i < SK_TIMEOUT; i++) {
436 DELAY(1);
437 if (sk_win_read_2(sc,
438 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
439 break;
440 }
441
442 if (i == SK_TIMEOUT)
443 return(0);
444
445 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
446 }
447
448 static void
449 sk_vpd_read_res(sc, res, addr)
450 struct sk_softc *sc;
451 struct vpd_res *res;
452 int addr;
453 {
454 int i;
455 u_int8_t *ptr;
456
457 ptr = (u_int8_t *)res;
458 for (i = 0; i < sizeof(struct vpd_res); i++)
459 ptr[i] = sk_vpd_readbyte(sc, i + addr);
460
461 return;
462 }
463
464 static void
465 sk_vpd_read(sc)
466 struct sk_softc *sc;
467 {
468 int pos = 0, i;
469 struct vpd_res res;
470
471 if (sc->sk_vpd_prodname != NULL)
472 free(sc->sk_vpd_prodname, M_DEVBUF);
473 if (sc->sk_vpd_readonly != NULL)
474 free(sc->sk_vpd_readonly, M_DEVBUF);
475 sc->sk_vpd_prodname = NULL;
476 sc->sk_vpd_readonly = NULL;
477
478 sk_vpd_read_res(sc, &res, pos);
479
480 /*
481 * Bail out quietly if the eeprom appears to be missing or empty.
482 */
483 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
484 return;
485
486 if (res.vr_id != VPD_RES_ID) {
487 printf("skc%d: bad VPD resource id: expected %x got %x\n",
488 sc->sk_unit, VPD_RES_ID, res.vr_id);
489 return;
490 }
491
492 pos += sizeof(res);
493 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
494 for (i = 0; i < res.vr_len; i++)
495 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
496 sc->sk_vpd_prodname[i] = '\0';
497 pos += i;
498
499 sk_vpd_read_res(sc, &res, pos);
500
501 if (res.vr_id != VPD_RES_READ) {
502 printf("skc%d: bad VPD resource id: expected %x got %x\n",
503 sc->sk_unit, VPD_RES_READ, res.vr_id);
504 return;
505 }
506
507 pos += sizeof(res);
508 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
509 for (i = 0; i < res.vr_len; i++)
510 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
511
512 return;
513 }
514
515 static int
516 sk_miibus_readreg(dev, phy, reg)
517 device_t dev;
518 int phy, reg;
519 {
520 struct sk_if_softc *sc_if;
521
522 sc_if = device_get_softc(dev);
523
524 switch(sc_if->sk_softc->sk_type) {
525 case SK_GENESIS:
526 return(sk_xmac_miibus_readreg(sc_if, phy, reg));
527 case SK_YUKON:
528 case SK_YUKON_LITE:
529 case SK_YUKON_LP:
530 return(sk_marv_miibus_readreg(sc_if, phy, reg));
531 }
532
533 return(0);
534 }
535
536 static int
537 sk_miibus_writereg(dev, phy, reg, val)
538 device_t dev;
539 int phy, reg, val;
540 {
541 struct sk_if_softc *sc_if;
542
543 sc_if = device_get_softc(dev);
544
545 switch(sc_if->sk_softc->sk_type) {
546 case SK_GENESIS:
547 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
548 case SK_YUKON:
549 case SK_YUKON_LITE:
550 case SK_YUKON_LP:
551 return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
552 }
553
554 return(0);
555 }
556
557 static void
558 sk_miibus_statchg(dev)
559 device_t dev;
560 {
561 struct sk_if_softc *sc_if;
562
563 sc_if = device_get_softc(dev);
564
565 switch(sc_if->sk_softc->sk_type) {
566 case SK_GENESIS:
567 sk_xmac_miibus_statchg(sc_if);
568 break;
569 case SK_YUKON:
570 case SK_YUKON_LITE:
571 case SK_YUKON_LP:
572 sk_marv_miibus_statchg(sc_if);
573 break;
574 }
575
576 return;
577 }
578
579 static int
580 sk_xmac_miibus_readreg(sc_if, phy, reg)
581 struct sk_if_softc *sc_if;
582 int phy, reg;
583 {
584 int i;
585
586 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
587 return(0);
588
589 SK_IF_LOCK(sc_if);
590 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
591 SK_XM_READ_2(sc_if, XM_PHY_DATA);
592 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
593 for (i = 0; i < SK_TIMEOUT; i++) {
594 DELAY(1);
595 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
596 XM_MMUCMD_PHYDATARDY)
597 break;
598 }
599
600 if (i == SK_TIMEOUT) {
601 printf("sk%d: phy failed to come ready\n",
602 sc_if->sk_unit);
603 SK_IF_UNLOCK(sc_if);
604 return(0);
605 }
606 }
607 DELAY(1);
608 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
609 SK_IF_UNLOCK(sc_if);
610 return(i);
611 }
612
613 static int
614 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
615 struct sk_if_softc *sc_if;
616 int phy, reg, val;
617 {
618 int i;
619
620 SK_IF_LOCK(sc_if);
621 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
622 for (i = 0; i < SK_TIMEOUT; i++) {
623 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
624 break;
625 }
626
627 if (i == SK_TIMEOUT) {
628 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
629 SK_IF_UNLOCK(sc_if);
630 return(ETIMEDOUT);
631 }
632
633 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
634 for (i = 0; i < SK_TIMEOUT; i++) {
635 DELAY(1);
636 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
637 break;
638 }
639 SK_IF_UNLOCK(sc_if);
640 if (i == SK_TIMEOUT)
641 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
642
643 return(0);
644 }
645
646 static void
647 sk_xmac_miibus_statchg(sc_if)
648 struct sk_if_softc *sc_if;
649 {
650 struct mii_data *mii;
651
652 mii = device_get_softc(sc_if->sk_miibus);
653
654 SK_IF_LOCK(sc_if);
655 /*
656 * If this is a GMII PHY, manually set the XMAC's
657 * duplex mode accordingly.
658 */
659 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
660 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
661 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
662 } else {
663 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
664 }
665 }
666 SK_IF_UNLOCK(sc_if);
667
668 return;
669 }
670
671 static int
672 sk_marv_miibus_readreg(sc_if, phy, reg)
673 struct sk_if_softc *sc_if;
674 int phy, reg;
675 {
676 u_int16_t val;
677 int i;
678
679 if (phy != 0 ||
680 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
681 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
682 return(0);
683 }
684
685 SK_IF_LOCK(sc_if);
686 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
687 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
688
689 for (i = 0; i < SK_TIMEOUT; i++) {
690 DELAY(1);
691 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
692 if (val & YU_SMICR_READ_VALID)
693 break;
694 }
695
696 if (i == SK_TIMEOUT) {
697 printf("sk%d: phy failed to come ready\n",
698 sc_if->sk_unit);
699 SK_IF_UNLOCK(sc_if);
700 return(0);
701 }
702
703 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
704 SK_IF_UNLOCK(sc_if);
705
706 return(val);
707 }
708
709 static int
710 sk_marv_miibus_writereg(sc_if, phy, reg, val)
711 struct sk_if_softc *sc_if;
712 int phy, reg, val;
713 {
714 int i;
715
716 SK_IF_LOCK(sc_if);
717 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
718 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
719 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
720
721 for (i = 0; i < SK_TIMEOUT; i++) {
722 DELAY(1);
723 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
724 break;
725 }
726 SK_IF_UNLOCK(sc_if);
727
728 return(0);
729 }
730
731 static void
732 sk_marv_miibus_statchg(sc_if)
733 struct sk_if_softc *sc_if;
734 {
735 return;
736 }
737
738 #define HASH_BITS 6
739
740 static u_int32_t
741 sk_xmchash(addr)
742 const uint8_t *addr;
743 {
744 uint32_t crc;
745
746 /* Compute CRC for the address value. */
747 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
748
749 return (~crc & ((1 << HASH_BITS) - 1));
750 }
751
752 /* gmchash is just a big endian crc */
753 static u_int32_t
754 sk_gmchash(addr)
755 const uint8_t *addr;
756 {
757 uint32_t crc;
758
759 /* Compute CRC for the address value. */
760 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
761
762 return (crc & ((1 << HASH_BITS) - 1));
763 }
764
765 static void
766 sk_setfilt(sc_if, addr, slot)
767 struct sk_if_softc *sc_if;
768 caddr_t addr;
769 int slot;
770 {
771 int base;
772
773 base = XM_RXFILT_ENTRY(slot);
774
775 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
776 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
777 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
778
779 return;
780 }
781
782 static void
783 sk_setmulti(sc_if)
784 struct sk_if_softc *sc_if;
785 {
786 struct sk_softc *sc = sc_if->sk_softc;
787 struct ifnet *ifp = &sc_if->arpcom.ac_if;
788 u_int32_t hashes[2] = { 0, 0 };
789 int h = 0, i;
790 struct ifmultiaddr *ifma;
791 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
792
793
794 /* First, zot all the existing filters. */
795 switch(sc->sk_type) {
796 case SK_GENESIS:
797 for (i = 1; i < XM_RXFILT_MAX; i++)
798 sk_setfilt(sc_if, (caddr_t)&dummy, i);
799
800 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
801 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
802 break;
803 case SK_YUKON:
804 case SK_YUKON_LITE:
805 case SK_YUKON_LP:
806 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
807 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
808 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
809 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
810 break;
811 }
812
813 /* Now program new ones. */
814 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
815 hashes[0] = 0xFFFFFFFF;
816 hashes[1] = 0xFFFFFFFF;
817 } else {
818 i = 1;
819 IF_ADDR_LOCK(ifp);
820 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
821 if (ifma->ifma_addr->sa_family != AF_LINK)
822 continue;
823 /*
824 * Program the first XM_RXFILT_MAX multicast groups
825 * into the perfect filter. For all others,
826 * use the hash table.
827 */
828 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
829 sk_setfilt(sc_if,
830 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
831 i++;
832 continue;
833 }
834
835 switch(sc->sk_type) {
836 case SK_GENESIS:
837 h = sk_xmchash(
838 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
839 break;
840 case SK_YUKON:
841 case SK_YUKON_LITE:
842 case SK_YUKON_LP:
843 h = sk_gmchash(
844 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
845 break;
846 }
847 if (h < 32)
848 hashes[0] |= (1 << h);
849 else
850 hashes[1] |= (1 << (h - 32));
851 }
852 IF_ADDR_UNLOCK(ifp);
853 }
854
855 switch(sc->sk_type) {
856 case SK_GENESIS:
857 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
858 XM_MODE_RX_USE_PERFECT);
859 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
860 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
861 break;
862 case SK_YUKON:
863 case SK_YUKON_LITE:
864 case SK_YUKON_LP:
865 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
866 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
867 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
868 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
869 break;
870 }
871
872 return;
873 }
874
875 static void
876 sk_setpromisc(sc_if)
877 struct sk_if_softc *sc_if;
878 {
879 struct sk_softc *sc = sc_if->sk_softc;
880 struct ifnet *ifp = &sc_if->arpcom.ac_if;
881
882 switch(sc->sk_type) {
883 case SK_GENESIS:
884 if (ifp->if_flags & IFF_PROMISC) {
885 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
886 } else {
887 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
888 }
889 break;
890 case SK_YUKON:
891 case SK_YUKON_LITE:
892 case SK_YUKON_LP:
893 if (ifp->if_flags & IFF_PROMISC) {
894 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
895 YU_RCR_UFLEN | YU_RCR_MUFLEN);
896 } else {
897 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
898 YU_RCR_UFLEN | YU_RCR_MUFLEN);
899 }
900 break;
901 }
902
903 return;
904 }
905
906 static int
907 sk_init_rx_ring(sc_if)
908 struct sk_if_softc *sc_if;
909 {
910 struct sk_chain_data *cd = &sc_if->sk_cdata;
911 struct sk_ring_data *rd = sc_if->sk_rdata;
912 int i;
913
914 bzero((char *)rd->sk_rx_ring,
915 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
916
917 for (i = 0; i < SK_RX_RING_CNT; i++) {
918 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
919 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
920 return(ENOBUFS);
921 if (i == (SK_RX_RING_CNT - 1)) {
922 cd->sk_rx_chain[i].sk_next =
923 &cd->sk_rx_chain[0];
924 rd->sk_rx_ring[i].sk_next =
925 vtophys(&rd->sk_rx_ring[0]);
926 } else {
927 cd->sk_rx_chain[i].sk_next =
928 &cd->sk_rx_chain[i + 1];
929 rd->sk_rx_ring[i].sk_next =
930 vtophys(&rd->sk_rx_ring[i + 1]);
931 }
932 }
933
934 sc_if->sk_cdata.sk_rx_prod = 0;
935 sc_if->sk_cdata.sk_rx_cons = 0;
936
937 return(0);
938 }
939
940 static void
941 sk_init_tx_ring(sc_if)
942 struct sk_if_softc *sc_if;
943 {
944 struct sk_chain_data *cd = &sc_if->sk_cdata;
945 struct sk_ring_data *rd = sc_if->sk_rdata;
946 int i;
947
948 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
949 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
950
951 for (i = 0; i < SK_TX_RING_CNT; i++) {
952 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
953 if (i == (SK_TX_RING_CNT - 1)) {
954 cd->sk_tx_chain[i].sk_next =
955 &cd->sk_tx_chain[0];
956 rd->sk_tx_ring[i].sk_next =
957 vtophys(&rd->sk_tx_ring[0]);
958 } else {
959 cd->sk_tx_chain[i].sk_next =
960 &cd->sk_tx_chain[i + 1];
961 rd->sk_tx_ring[i].sk_next =
962 vtophys(&rd->sk_tx_ring[i + 1]);
963 }
964 }
965
966 sc_if->sk_cdata.sk_tx_prod = 0;
967 sc_if->sk_cdata.sk_tx_cons = 0;
968 sc_if->sk_cdata.sk_tx_cnt = 0;
969
970 return;
971 }
972
973 static int
974 sk_newbuf(sc_if, c, m)
975 struct sk_if_softc *sc_if;
976 struct sk_chain *c;
977 struct mbuf *m;
978 {
979 struct mbuf *m_new = NULL;
980 struct sk_rx_desc *r;
981
982 if (m == NULL) {
983 caddr_t *buf = NULL;
984
985 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
986 if (m_new == NULL)
987 return(ENOBUFS);
988
989 /* Allocate the jumbo buffer */
990 buf = sk_jalloc(sc_if);
991 if (buf == NULL) {
992 m_freem(m_new);
993 #ifdef SK_VERBOSE
994 printf("sk%d: jumbo allocation failed "
995 "-- packet dropped!\n", sc_if->sk_unit);
996 #endif
997 return(ENOBUFS);
998 }
999
1000 /* Attach the buffer to the mbuf */
1001 MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1002 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1003 m_new->m_data = (void *)buf;
1004 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1005 } else {
1006 /*
1007 * We're re-using a previously allocated mbuf;
1008 * be sure to re-init pointers and lengths to
1009 * default values.
1010 */
1011 m_new = m;
1012 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1013 m_new->m_data = m_new->m_ext.ext_buf;
1014 }
1015
1016 /*
1017 * Adjust alignment so packet payload begins on a
1018 * longword boundary. Mandatory for Alpha, useful on
1019 * x86 too.
1020 */
1021 m_adj(m_new, ETHER_ALIGN);
1022
1023 r = c->sk_desc;
1024 c->sk_mbuf = m_new;
1025 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1026 r->sk_ctl = m_new->m_len | SK_RXSTAT;
1027
1028 return(0);
1029 }
1030
1031 /*
1032 * Allocate jumbo buffer storage. The SysKonnect adapters support
1033 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1034 * use them in their drivers. In order for us to use them, we need
1035 * large 9K receive buffers, however standard mbuf clusters are only
1036 * 2048 bytes in size. Consequently, we need to allocate and manage
1037 * our own jumbo buffer pool. Fortunately, this does not require an
1038 * excessive amount of additional code.
1039 */
1040 static int
1041 sk_alloc_jumbo_mem(sc_if)
1042 struct sk_if_softc *sc_if;
1043 {
1044 caddr_t ptr;
1045 register int i;
1046 struct sk_jpool_entry *entry;
1047
1048 /* Grab a big chunk o' storage. */
1049 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1050 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1051
1052 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1053 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1054 return(ENOBUFS);
1055 }
1056
1057 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1058
1059 SLIST_INIT(&sc_if->sk_jfree_listhead);
1060 SLIST_INIT(&sc_if->sk_jinuse_listhead);
1061
1062 /*
1063 * Now divide it up into 9K pieces and save the addresses
1064 * in an array.
1065 */
1066 ptr = sc_if->sk_cdata.sk_jumbo_buf;
1067 for (i = 0; i < SK_JSLOTS; i++) {
1068 sc_if->sk_cdata.sk_jslots[i] = ptr;
1069 ptr += SK_JLEN;
1070 entry = malloc(sizeof(struct sk_jpool_entry),
1071 M_DEVBUF, M_NOWAIT);
1072 if (entry == NULL) {
1073 sk_free_jumbo_mem(sc_if);
1074 sc_if->sk_cdata.sk_jumbo_buf = NULL;
1075 printf("sk%d: no memory for jumbo "
1076 "buffer queue!\n", sc_if->sk_unit);
1077 return(ENOBUFS);
1078 }
1079 entry->slot = i;
1080 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1081 entry, jpool_entries);
1082 }
1083
1084 return(0);
1085 }
1086
1087 static void
1088 sk_free_jumbo_mem(sc_if)
1089 struct sk_if_softc *sc_if;
1090 {
1091 int retval = 0;
1092 struct sk_jpool_entry *entry;
1093
1094 SK_JLIST_LOCK(sc_if);
1095
1096 /* Wait for the "inuse" list to drain. */
1097 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1098 retval = msleep(sc_if, &sc_if->sk_jlist_mtx, PZERO,
1099 "skfjm", 5 * hz);
1100
1101 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1102 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1103 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1104 free(entry, M_DEVBUF);
1105 }
1106
1107 SK_JLIST_UNLOCK(sc_if);
1108
1109 mtx_destroy(&sc_if->sk_jlist_mtx);
1110
1111 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1112
1113 return;
1114 }
1115
1116 /*
1117 * Allocate a jumbo buffer.
1118 */
1119 static void *
1120 sk_jalloc(sc_if)
1121 struct sk_if_softc *sc_if;
1122 {
1123 struct sk_jpool_entry *entry;
1124
1125 SK_JLIST_LOCK(sc_if);
1126
1127 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1128
1129 if (entry == NULL) {
1130 #ifdef SK_VERBOSE
1131 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1132 #endif
1133 SK_JLIST_UNLOCK(sc_if);
1134 return(NULL);
1135 }
1136
1137 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1138 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1139
1140 SK_JLIST_UNLOCK(sc_if);
1141
1142 return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1143 }
1144
1145 /*
1146 * Release a jumbo buffer.
1147 */
1148 static void
1149 sk_jfree(buf, args)
1150 void *buf;
1151 void *args;
1152 {
1153 struct sk_if_softc *sc_if;
1154 int i;
1155 struct sk_jpool_entry *entry;
1156
1157 /* Extract the softc struct pointer. */
1158 sc_if = (struct sk_if_softc *)args;
1159 if (sc_if == NULL)
1160 panic("sk_jfree: didn't get softc pointer!");
1161
1162 SK_JLIST_LOCK(sc_if);
1163
1164 /* calculate the slot this buffer belongs to */
1165 i = ((vm_offset_t)buf
1166 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1167
1168 if ((i < 0) || (i >= SK_JSLOTS))
1169 panic("sk_jfree: asked to free buffer that we don't manage!");
1170
1171 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1172 if (entry == NULL)
1173 panic("sk_jfree: buffer not in use!");
1174 entry->slot = i;
1175 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1176 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1177 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1178 wakeup(sc_if);
1179
1180 SK_JLIST_UNLOCK(sc_if);
1181 return;
1182 }
1183
1184 /*
1185 * Set media options.
1186 */
1187 static int
1188 sk_ifmedia_upd(ifp)
1189 struct ifnet *ifp;
1190 {
1191 struct sk_if_softc *sc_if = ifp->if_softc;
1192 struct mii_data *mii;
1193
1194 mii = device_get_softc(sc_if->sk_miibus);
1195 sk_init(sc_if);
1196 mii_mediachg(mii);
1197
1198 return(0);
1199 }
1200
1201 /*
1202 * Report current media status.
1203 */
1204 static void
1205 sk_ifmedia_sts(ifp, ifmr)
1206 struct ifnet *ifp;
1207 struct ifmediareq *ifmr;
1208 {
1209 struct sk_if_softc *sc_if;
1210 struct mii_data *mii;
1211
1212 sc_if = ifp->if_softc;
1213 mii = device_get_softc(sc_if->sk_miibus);
1214
1215 mii_pollstat(mii);
1216 ifmr->ifm_active = mii->mii_media_active;
1217 ifmr->ifm_status = mii->mii_media_status;
1218
1219 return;
1220 }
1221
1222 static int
1223 sk_ioctl(ifp, command, data)
1224 struct ifnet *ifp;
1225 u_long command;
1226 caddr_t data;
1227 {
1228 struct sk_if_softc *sc_if = ifp->if_softc;
1229 struct ifreq *ifr = (struct ifreq *) data;
1230 int error = 0;
1231 struct mii_data *mii;
1232
1233 switch(command) {
1234 case SIOCSIFMTU:
1235 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1236 error = EINVAL;
1237 else {
1238 ifp->if_mtu = ifr->ifr_mtu;
1239 ifp->if_flags &= ~IFF_RUNNING;
1240 sk_init(sc_if);
1241 }
1242 break;
1243 case SIOCSIFFLAGS:
1244 SK_IF_LOCK(sc_if);
1245 if (ifp->if_flags & IFF_UP) {
1246 if (ifp->if_flags & IFF_RUNNING) {
1247 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1248 & IFF_PROMISC) {
1249 sk_setpromisc(sc_if);
1250 sk_setmulti(sc_if);
1251 }
1252 } else
1253 sk_init(sc_if);
1254 } else {
1255 if (ifp->if_flags & IFF_RUNNING)
1256 sk_stop(sc_if);
1257 }
1258 sc_if->sk_if_flags = ifp->if_flags;
1259 SK_IF_UNLOCK(sc_if);
1260 error = 0;
1261 break;
1262 case SIOCADDMULTI:
1263 case SIOCDELMULTI:
1264 if (ifp->if_flags & IFF_RUNNING) {
1265 SK_IF_LOCK(sc_if);
1266 sk_setmulti(sc_if);
1267 SK_IF_UNLOCK(sc_if);
1268 error = 0;
1269 }
1270 break;
1271 case SIOCGIFMEDIA:
1272 case SIOCSIFMEDIA:
1273 mii = device_get_softc(sc_if->sk_miibus);
1274 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1275 break;
1276 default:
1277 error = ether_ioctl(ifp, command, data);
1278 break;
1279 }
1280
1281 return(error);
1282 }
1283
1284 /*
1285 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1286 * IDs against our list and return a device name if we find a match.
1287 */
1288 static int
1289 skc_probe(dev)
1290 device_t dev;
1291 {
1292 struct sk_softc *sc;
1293 struct sk_type *t = sk_devs;
1294
1295 sc = device_get_softc(dev);
1296
1297 while(t->sk_name != NULL) {
1298 if ((pci_get_vendor(dev) == t->sk_vid) &&
1299 (pci_get_device(dev) == t->sk_did)) {
1300 device_set_desc(dev, t->sk_name);
1301 return (BUS_PROBE_DEFAULT);
1302 }
1303 t++;
1304 }
1305
1306 return(ENXIO);
1307 }
1308
1309 /*
1310 * Force the GEnesis into reset, then bring it out of reset.
1311 */
1312 static void
1313 sk_reset(sc)
1314 struct sk_softc *sc;
1315 {
1316 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1317 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1318 if (SK_YUKON_FAMILY(sc->sk_type))
1319 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1320
1321 DELAY(1000);
1322 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1323 DELAY(2);
1324 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1325 if (SK_YUKON_FAMILY(sc->sk_type))
1326 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1327
1328 if (sc->sk_type == SK_GENESIS) {
1329 /* Configure packet arbiter */
1330 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1331 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1332 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1333 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1334 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1335 }
1336
1337 /* Enable RAM interface */
1338 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1339
1340 /*
1341 * Configure interrupt moderation. The moderation timer
1342 * defers interrupts specified in the interrupt moderation
1343 * timer mask based on the timeout specified in the interrupt
1344 * moderation timer init register. Each bit in the timer
1345 * register represents 18.825ns, so to specify a timeout in
1346 * microseconds, we have to multiply by 54.
1347 */
1348 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1349 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1350 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1351 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1352
1353 return;
1354 }
1355
1356 static int
1357 sk_probe(dev)
1358 device_t dev;
1359 {
1360 struct sk_softc *sc;
1361
1362 sc = device_get_softc(device_get_parent(dev));
1363
1364 /*
1365 * Not much to do here. We always know there will be
1366 * at least one XMAC present, and if there are two,
1367 * skc_attach() will create a second device instance
1368 * for us.
1369 */
1370 switch (sc->sk_type) {
1371 case SK_GENESIS:
1372 device_set_desc(dev, "XaQti Corp. XMAC II");
1373 break;
1374 case SK_YUKON:
1375 case SK_YUKON_LITE:
1376 case SK_YUKON_LP:
1377 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1378 break;
1379 }
1380
1381 return (BUS_PROBE_DEFAULT);
1382 }
1383
1384 /*
1385 * Each XMAC chip is attached as a separate logical IP interface.
1386 * Single port cards will have only one logical interface of course.
1387 */
1388 static int
1389 sk_attach(dev)
1390 device_t dev;
1391 {
1392 struct sk_softc *sc;
1393 struct sk_if_softc *sc_if;
1394 struct ifnet *ifp;
1395 int i, port, error;
1396
1397 if (dev == NULL)
1398 return(EINVAL);
1399
1400 error = 0;
1401 sc_if = device_get_softc(dev);
1402 sc = device_get_softc(device_get_parent(dev));
1403 port = *(int *)device_get_ivars(dev);
1404
1405 sc_if->sk_dev = dev;
1406 sc_if->sk_unit = device_get_unit(dev);
1407 sc_if->sk_port = port;
1408 sc_if->sk_softc = sc;
1409 sc->sk_if[port] = sc_if;
1410 if (port == SK_PORT_A)
1411 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1412 if (port == SK_PORT_B)
1413 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1414
1415 /* Allocate the descriptor queues. */
1416 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1417 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1418
1419 if (sc_if->sk_rdata == NULL) {
1420 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1421 error = ENOMEM;
1422 goto fail;
1423 }
1424
1425 /* Try to allocate memory for jumbo buffers. */
1426 if (sk_alloc_jumbo_mem(sc_if)) {
1427 printf("sk%d: jumbo buffer allocation failed\n",
1428 sc_if->sk_unit);
1429 error = ENOMEM;
1430 goto fail;
1431 }
1432
1433 ifp = &sc_if->arpcom.ac_if;
1434 ifp->if_softc = sc_if;
1435 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1436 ifp->if_mtu = ETHERMTU;
1437 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1438 ifp->if_ioctl = sk_ioctl;
1439 ifp->if_start = sk_start;
1440 ifp->if_watchdog = sk_watchdog;
1441 ifp->if_init = sk_init;
1442 ifp->if_baudrate = 1000000000;
1443 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1444 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1445 IFQ_SET_READY(&ifp->if_snd);
1446
1447 callout_handle_init(&sc_if->sk_tick_ch);
1448
1449 /*
1450 * Get station address for this interface. Note that
1451 * dual port cards actually come with three station
1452 * addresses: one for each port, plus an extra. The
1453 * extra one is used by the SysKonnect driver software
1454 * as a 'virtual' station address for when both ports
1455 * are operating in failover mode. Currently we don't
1456 * use this extra address.
1457 */
1458 SK_LOCK(sc);
1459 for (i = 0; i < ETHER_ADDR_LEN; i++)
1460 sc_if->arpcom.ac_enaddr[i] =
1461 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1462
1463 /*
1464 * Set up RAM buffer addresses. The NIC will have a certain
1465 * amount of SRAM on it, somewhere between 512K and 2MB. We
1466 * need to divide this up a) between the transmitter and
1467 * receiver and b) between the two XMACs, if this is a
1468 * dual port NIC. Our algotithm is to divide up the memory
1469 * evenly so that everyone gets a fair share.
1470 */
1471 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1472 u_int32_t chunk, val;
1473
1474 chunk = sc->sk_ramsize / 2;
1475 val = sc->sk_rboff / sizeof(u_int64_t);
1476 sc_if->sk_rx_ramstart = val;
1477 val += (chunk / sizeof(u_int64_t));
1478 sc_if->sk_rx_ramend = val - 1;
1479 sc_if->sk_tx_ramstart = val;
1480 val += (chunk / sizeof(u_int64_t));
1481 sc_if->sk_tx_ramend = val - 1;
1482 } else {
1483 u_int32_t chunk, val;
1484
1485 chunk = sc->sk_ramsize / 4;
1486 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1487 sizeof(u_int64_t);
1488 sc_if->sk_rx_ramstart = val;
1489 val += (chunk / sizeof(u_int64_t));
1490 sc_if->sk_rx_ramend = val - 1;
1491 sc_if->sk_tx_ramstart = val;
1492 val += (chunk / sizeof(u_int64_t));
1493 sc_if->sk_tx_ramend = val - 1;
1494 }
1495
1496 /* Read and save PHY type and set PHY address */
1497 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1498 switch(sc_if->sk_phytype) {
1499 case SK_PHYTYPE_XMAC:
1500 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1501 break;
1502 case SK_PHYTYPE_BCOM:
1503 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1504 break;
1505 case SK_PHYTYPE_MARV_COPPER:
1506 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1507 break;
1508 default:
1509 printf("skc%d: unsupported PHY type: %d\n",
1510 sc->sk_unit, sc_if->sk_phytype);
1511 error = ENODEV;
1512 SK_UNLOCK(sc);
1513 goto fail;
1514 }
1515
1516
1517 /*
1518 * Call MI attach routine. Can't hold locks when calling into ether_*.
1519 */
1520 SK_UNLOCK(sc);
1521 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1522 SK_LOCK(sc);
1523
1524 /*
1525 * Do miibus setup.
1526 */
1527 switch (sc->sk_type) {
1528 case SK_GENESIS:
1529 sk_init_xmac(sc_if);
1530 break;
1531 case SK_YUKON:
1532 case SK_YUKON_LITE:
1533 case SK_YUKON_LP:
1534 sk_init_yukon(sc_if);
1535 break;
1536 }
1537
1538 SK_UNLOCK(sc);
1539 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1540 sk_ifmedia_upd, sk_ifmedia_sts)) {
1541 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1542 ether_ifdetach(ifp);
1543 error = ENXIO;
1544 goto fail;
1545 }
1546
1547 fail:
1548 if (error) {
1549 /* Access should be ok even though lock has been dropped */
1550 sc->sk_if[port] = NULL;
1551 sk_detach(dev);
1552 }
1553
1554 return(error);
1555 }
1556
1557 /*
1558 * Attach the interface. Allocate softc structures, do ifmedia
1559 * setup and ethernet/BPF attach.
1560 */
1561 static int
1562 skc_attach(dev)
1563 device_t dev;
1564 {
1565 struct sk_softc *sc;
1566 int unit, error = 0, rid, *port;
1567 uint8_t skrs;
1568 char *pname, *revstr;
1569
1570 sc = device_get_softc(dev);
1571 unit = device_get_unit(dev);
1572
1573 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1574 MTX_DEF | MTX_RECURSE);
1575 /*
1576 * Map control/status registers.
1577 */
1578 pci_enable_busmaster(dev);
1579
1580 rid = SK_RID;
1581 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1582
1583 if (sc->sk_res == NULL) {
1584 printf("sk%d: couldn't map ports/memory\n", unit);
1585 error = ENXIO;
1586 goto fail;
1587 }
1588
1589 sc->sk_btag = rman_get_bustag(sc->sk_res);
1590 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1591
1592 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1593 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1594
1595 /* Bail out if chip is not recognized. */
1596 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1597 printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1598 unit, sc->sk_type, sc->sk_rev);
1599 error = ENXIO;
1600 goto fail;
1601 }
1602
1603 /* Allocate interrupt */
1604 rid = 0;
1605 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1606 RF_SHAREABLE | RF_ACTIVE);
1607
1608 if (sc->sk_irq == NULL) {
1609 printf("skc%d: couldn't map interrupt\n", unit);
1610 error = ENXIO;
1611 goto fail;
1612 }
1613
1614 /* Reset the adapter. */
1615 sk_reset(sc);
1616
1617 sc->sk_unit = unit;
1618
1619 /* Read and save vital product data from EEPROM. */
1620 sk_vpd_read(sc);
1621
1622 skrs = sk_win_read_1(sc, SK_EPROM0);
1623 if (sc->sk_type == SK_GENESIS) {
1624 /* Read and save RAM size and RAMbuffer offset */
1625 switch(skrs) {
1626 case SK_RAMSIZE_512K_64:
1627 sc->sk_ramsize = 0x80000;
1628 sc->sk_rboff = SK_RBOFF_0;
1629 break;
1630 case SK_RAMSIZE_1024K_64:
1631 sc->sk_ramsize = 0x100000;
1632 sc->sk_rboff = SK_RBOFF_80000;
1633 break;
1634 case SK_RAMSIZE_1024K_128:
1635 sc->sk_ramsize = 0x100000;
1636 sc->sk_rboff = SK_RBOFF_0;
1637 break;
1638 case SK_RAMSIZE_2048K_128:
1639 sc->sk_ramsize = 0x200000;
1640 sc->sk_rboff = SK_RBOFF_0;
1641 break;
1642 default:
1643 printf("skc%d: unknown ram size: %d\n",
1644 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1645 error = ENXIO;
1646 goto fail;
1647 }
1648 } else { /* SK_YUKON_FAMILY */
1649 if (skrs == 0x00)
1650 sc->sk_ramsize = 0x20000;
1651 else
1652 sc->sk_ramsize = skrs * (1<<12);
1653 sc->sk_rboff = SK_RBOFF_0;
1654 }
1655
1656 /* Read and save physical media type */
1657 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1658 case SK_PMD_1000BASESX:
1659 sc->sk_pmd = IFM_1000_SX;
1660 break;
1661 case SK_PMD_1000BASELX:
1662 sc->sk_pmd = IFM_1000_LX;
1663 break;
1664 case SK_PMD_1000BASECX:
1665 sc->sk_pmd = IFM_1000_CX;
1666 break;
1667 case SK_PMD_1000BASETX:
1668 sc->sk_pmd = IFM_1000_T;
1669 break;
1670 default:
1671 printf("skc%d: unknown media type: 0x%x\n",
1672 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1673 error = ENXIO;
1674 goto fail;
1675 }
1676
1677 /* Determine whether to name it with VPD PN or just make it up.
1678 * Marvell Yukon VPD PN seems to freqently be bogus. */
1679 switch (pci_get_device(dev)) {
1680 case DEVICEID_SK_V1:
1681 case DEVICEID_BELKIN_5005:
1682 case DEVICEID_3COM_3C940:
1683 case DEVICEID_LINKSYS_EG1032:
1684 case DEVICEID_DLINK_DGE530T:
1685 /* Stay with VPD PN. */
1686 pname = sc->sk_vpd_prodname;
1687 break;
1688 case DEVICEID_SK_V2:
1689 /* YUKON VPD PN might bear no resemblance to reality. */
1690 switch (sc->sk_type) {
1691 case SK_GENESIS:
1692 /* Stay with VPD PN. */
1693 pname = sc->sk_vpd_prodname;
1694 break;
1695 case SK_YUKON:
1696 pname = "Marvell Yukon Gigabit Ethernet";
1697 break;
1698 case SK_YUKON_LITE:
1699 pname = "Marvell Yukon Lite Gigabit Ethernet";
1700 break;
1701 case SK_YUKON_LP:
1702 pname = "Marvell Yukon LP Gigabit Ethernet";
1703 break;
1704 default:
1705 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1706 break;
1707 }
1708
1709 /* Yukon Lite Rev. A0 needs special test. */
1710 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1711 u_int32_t far;
1712 u_int8_t testbyte;
1713
1714 /* Save flash address register before testing. */
1715 far = sk_win_read_4(sc, SK_EP_ADDR);
1716
1717 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1718 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1719
1720 if (testbyte != 0x00) {
1721 /* Yukon Lite Rev. A0 detected. */
1722 sc->sk_type = SK_YUKON_LITE;
1723 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1724 /* Restore flash address register. */
1725 sk_win_write_4(sc, SK_EP_ADDR, far);
1726 }
1727 }
1728 break;
1729 default:
1730 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1731 "chipver=%02x, rev=%x\n",
1732 pci_get_vendor(dev), pci_get_device(dev),
1733 sc->sk_type, sc->sk_rev);
1734 error = ENXIO;
1735 goto fail;
1736 }
1737
1738 if (sc->sk_type == SK_YUKON_LITE) {
1739 switch (sc->sk_rev) {
1740 case SK_YUKON_LITE_REV_A0:
1741 revstr = "A0";
1742 break;
1743 case SK_YUKON_LITE_REV_A1:
1744 revstr = "A1";
1745 break;
1746 case SK_YUKON_LITE_REV_A3:
1747 revstr = "A3";
1748 break;
1749 default:
1750 revstr = "";
1751 break;
1752 }
1753 } else {
1754 revstr = "";
1755 }
1756
1757 /* Announce the product name. */
1758 device_printf(dev, "%s rev. %s(0x%x)\n", pname, revstr, sc->sk_rev);
1759 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1760 if (sc->sk_devs[SK_PORT_A] == NULL) {
1761 device_printf(dev, "failed to add child for PORT_A\n");
1762 error = ENXIO;
1763 goto fail;
1764 }
1765 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1766 if (port == NULL) {
1767 device_printf(dev, "failed to allocate memory for "
1768 "ivars of PORT_A\n");
1769 error = ENXIO;
1770 goto fail;
1771 }
1772 *port = SK_PORT_A;
1773 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1774
1775 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1776 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1777 if (sc->sk_devs[SK_PORT_B] == NULL) {
1778 device_printf(dev, "failed to add child for PORT_B\n");
1779 error = ENXIO;
1780 goto fail;
1781 }
1782 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1783 if (port == NULL) {
1784 device_printf(dev, "failed to allocate memory for "
1785 "ivars of PORT_B\n");
1786 error = ENXIO;
1787 goto fail;
1788 }
1789 *port = SK_PORT_B;
1790 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1791 }
1792
1793 /* Turn on the 'driver is loaded' LED. */
1794 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1795
1796 bus_generic_attach(dev);
1797
1798 /* Hook interrupt last to avoid having to lock softc */
1799 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1800 sk_intr, sc, &sc->sk_intrhand);
1801
1802 if (error) {
1803 printf("skc%d: couldn't set up irq\n", unit);
1804 goto fail;
1805 }
1806
1807 fail:
1808 if (error)
1809 skc_detach(dev);
1810
1811 return(error);
1812 }
1813
1814 /*
1815 * Shutdown hardware and free up resources. This can be called any
1816 * time after the mutex has been initialized. It is called in both
1817 * the error case in attach and the normal detach case so it needs
1818 * to be careful about only freeing resources that have actually been
1819 * allocated.
1820 */
1821 static int
1822 sk_detach(dev)
1823 device_t dev;
1824 {
1825 struct sk_if_softc *sc_if;
1826 struct ifnet *ifp;
1827
1828 sc_if = device_get_softc(dev);
1829 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1830 ("sk mutex not initialized in sk_detach"));
1831 SK_IF_LOCK(sc_if);
1832
1833 ifp = &sc_if->arpcom.ac_if;
1834 /* These should only be active if attach_xmac succeeded */
1835 if (device_is_attached(dev)) {
1836 sk_stop(sc_if);
1837 /* Can't hold locks while calling detach */
1838 SK_IF_UNLOCK(sc_if);
1839 ether_ifdetach(ifp);
1840 SK_IF_LOCK(sc_if);
1841 }
1842 /*
1843 * We're generally called from skc_detach() which is using
1844 * device_delete_child() to get to here. It's already trashed
1845 * miibus for us, so don't do it here or we'll panic.
1846 */
1847 /*
1848 if (sc_if->sk_miibus != NULL)
1849 device_delete_child(dev, sc_if->sk_miibus);
1850 */
1851 bus_generic_detach(dev);
1852 if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1853 sk_free_jumbo_mem(sc_if);
1854 if (sc_if->sk_rdata != NULL) {
1855 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1856 M_DEVBUF);
1857 }
1858 SK_IF_UNLOCK(sc_if);
1859
1860 return(0);
1861 }
1862
1863 static int
1864 skc_detach(dev)
1865 device_t dev;
1866 {
1867 struct sk_softc *sc;
1868
1869 sc = device_get_softc(dev);
1870 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1871
1872 if (device_is_alive(dev)) {
1873 if (sc->sk_devs[SK_PORT_A] != NULL) {
1874 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1875 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1876 }
1877 if (sc->sk_devs[SK_PORT_B] != NULL) {
1878 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1879 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1880 }
1881 bus_generic_detach(dev);
1882 }
1883
1884 if (sc->sk_vpd_prodname != NULL)
1885 free(sc->sk_vpd_prodname, M_DEVBUF);
1886 if (sc->sk_vpd_readonly != NULL)
1887 free(sc->sk_vpd_readonly, M_DEVBUF);
1888
1889 if (sc->sk_intrhand)
1890 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1891 if (sc->sk_irq)
1892 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1893 if (sc->sk_res)
1894 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1895
1896 mtx_destroy(&sc->sk_mtx);
1897
1898 return(0);
1899 }
1900
1901 static int
1902 sk_encap(sc_if, m_head, txidx)
1903 struct sk_if_softc *sc_if;
1904 struct mbuf *m_head;
1905 u_int32_t *txidx;
1906 {
1907 struct sk_tx_desc *f = NULL;
1908 struct mbuf *m;
1909 u_int32_t frag, cur, cnt = 0;
1910
1911 SK_IF_LOCK_ASSERT(sc_if);
1912
1913 m = m_head;
1914 cur = frag = *txidx;
1915
1916 /*
1917 * Start packing the mbufs in this chain into
1918 * the fragment pointers. Stop when we run out
1919 * of fragments or hit the end of the mbuf chain.
1920 */
1921 for (m = m_head; m != NULL; m = m->m_next) {
1922 if (m->m_len != 0) {
1923 if ((SK_TX_RING_CNT -
1924 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1925 return(ENOBUFS);
1926 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1927 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1928 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1929 if (cnt == 0)
1930 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1931 else
1932 f->sk_ctl |= SK_TXCTL_OWN;
1933 cur = frag;
1934 SK_INC(frag, SK_TX_RING_CNT);
1935 cnt++;
1936 }
1937 }
1938
1939 if (m != NULL)
1940 return(ENOBUFS);
1941
1942 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1943 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1944 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1945 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1946 sc_if->sk_cdata.sk_tx_cnt += cnt;
1947
1948 *txidx = frag;
1949
1950 return(0);
1951 }
1952
1953 static void
1954 sk_start(ifp)
1955 struct ifnet *ifp;
1956 {
1957 struct sk_softc *sc;
1958 struct sk_if_softc *sc_if;
1959 struct mbuf *m_head = NULL;
1960 u_int32_t idx;
1961
1962 sc_if = ifp->if_softc;
1963 sc = sc_if->sk_softc;
1964
1965 SK_IF_LOCK(sc_if);
1966
1967 idx = sc_if->sk_cdata.sk_tx_prod;
1968
1969 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1970 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1971 if (m_head == NULL)
1972 break;
1973
1974 /*
1975 * Pack the data into the transmit ring. If we
1976 * don't have room, set the OACTIVE flag and wait
1977 * for the NIC to drain the ring.
1978 */
1979 if (sk_encap(sc_if, m_head, &idx)) {
1980 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1981 ifp->if_flags |= IFF_OACTIVE;
1982 break;
1983 }
1984
1985 /*
1986 * If there's a BPF listener, bounce a copy of this frame
1987 * to him.
1988 */
1989 BPF_MTAP(ifp, m_head);
1990 }
1991
1992 /* Transmit */
1993 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1994 sc_if->sk_cdata.sk_tx_prod = idx;
1995 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1996
1997 /* Set a timeout in case the chip goes out to lunch. */
1998 ifp->if_timer = 5;
1999 }
2000 SK_IF_UNLOCK(sc_if);
2001
2002 return;
2003 }
2004
2005
2006 static void
2007 sk_watchdog(ifp)
2008 struct ifnet *ifp;
2009 {
2010 struct sk_if_softc *sc_if;
2011
2012 sc_if = ifp->if_softc;
2013
2014 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
2015 ifp->if_flags &= ~IFF_RUNNING;
2016 sk_init(sc_if);
2017
2018 return;
2019 }
2020
2021 static void
2022 skc_shutdown(dev)
2023 device_t dev;
2024 {
2025 struct sk_softc *sc;
2026
2027 sc = device_get_softc(dev);
2028 SK_LOCK(sc);
2029
2030 /* Turn off the 'driver is loaded' LED. */
2031 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2032
2033 /*
2034 * Reset the GEnesis controller. Doing this should also
2035 * assert the resets on the attached XMAC(s).
2036 */
2037 sk_reset(sc);
2038 SK_UNLOCK(sc);
2039
2040 return;
2041 }
2042
2043 static void
2044 sk_rxeof(sc_if)
2045 struct sk_if_softc *sc_if;
2046 {
2047 struct sk_softc *sc;
2048 struct mbuf *m;
2049 struct ifnet *ifp;
2050 struct sk_chain *cur_rx;
2051 int total_len = 0;
2052 int i;
2053 u_int32_t rxstat;
2054
2055 sc = sc_if->sk_softc;
2056 ifp = &sc_if->arpcom.ac_if;
2057 i = sc_if->sk_cdata.sk_rx_prod;
2058 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2059
2060 SK_LOCK_ASSERT(sc);
2061
2062 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2063
2064 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2065 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2066 m = cur_rx->sk_mbuf;
2067 cur_rx->sk_mbuf = NULL;
2068 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2069 SK_INC(i, SK_RX_RING_CNT);
2070
2071 if (rxstat & XM_RXSTAT_ERRFRAME) {
2072 ifp->if_ierrors++;
2073 sk_newbuf(sc_if, cur_rx, m);
2074 continue;
2075 }
2076
2077 /*
2078 * Try to allocate a new jumbo buffer. If that
2079 * fails, copy the packet to mbufs and put the
2080 * jumbo buffer back in the ring so it can be
2081 * re-used. If allocating mbufs fails, then we
2082 * have to drop the packet.
2083 */
2084 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2085 struct mbuf *m0;
2086 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2087 ifp, NULL);
2088 sk_newbuf(sc_if, cur_rx, m);
2089 if (m0 == NULL) {
2090 printf("sk%d: no receive buffers "
2091 "available -- packet dropped!\n",
2092 sc_if->sk_unit);
2093 ifp->if_ierrors++;
2094 continue;
2095 }
2096 m = m0;
2097 } else {
2098 m->m_pkthdr.rcvif = ifp;
2099 m->m_pkthdr.len = m->m_len = total_len;
2100 }
2101
2102 ifp->if_ipackets++;
2103 SK_UNLOCK(sc);
2104 (*ifp->if_input)(ifp, m);
2105 SK_LOCK(sc);
2106 }
2107
2108 sc_if->sk_cdata.sk_rx_prod = i;
2109
2110 return;
2111 }
2112
2113 static void
2114 sk_txeof(sc_if)
2115 struct sk_if_softc *sc_if;
2116 {
2117 struct sk_softc *sc;
2118 struct sk_tx_desc *cur_tx;
2119 struct ifnet *ifp;
2120 u_int32_t idx;
2121
2122 sc = sc_if->sk_softc;
2123 ifp = &sc_if->arpcom.ac_if;
2124
2125 /*
2126 * Go through our tx ring and free mbufs for those
2127 * frames that have been sent.
2128 */
2129 idx = sc_if->sk_cdata.sk_tx_cons;
2130 while(idx != sc_if->sk_cdata.sk_tx_prod) {
2131 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2132 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2133 break;
2134 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2135 ifp->if_opackets++;
2136 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2137 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2138 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2139 }
2140 sc_if->sk_cdata.sk_tx_cnt--;
2141 SK_INC(idx, SK_TX_RING_CNT);
2142 }
2143
2144 if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2145 ifp->if_timer = 0;
2146 } else /* nudge chip to keep tx ring moving */
2147 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2148
2149 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2150 ifp->if_flags &= ~IFF_OACTIVE;
2151
2152 sc_if->sk_cdata.sk_tx_cons = idx;
2153 }
2154
2155 static void
2156 sk_tick(xsc_if)
2157 void *xsc_if;
2158 {
2159 struct sk_if_softc *sc_if;
2160 struct mii_data *mii;
2161 struct ifnet *ifp;
2162 int i;
2163
2164 sc_if = xsc_if;
2165 SK_IF_LOCK(sc_if);
2166 ifp = &sc_if->arpcom.ac_if;
2167 mii = device_get_softc(sc_if->sk_miibus);
2168
2169 if (!(ifp->if_flags & IFF_UP)) {
2170 SK_IF_UNLOCK(sc_if);
2171 return;
2172 }
2173
2174 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2175 sk_intr_bcom(sc_if);
2176 SK_IF_UNLOCK(sc_if);
2177 return;
2178 }
2179
2180 /*
2181 * According to SysKonnect, the correct way to verify that
2182 * the link has come back up is to poll bit 0 of the GPIO
2183 * register three times. This pin has the signal from the
2184 * link_sync pin connected to it; if we read the same link
2185 * state 3 times in a row, we know the link is up.
2186 */
2187 for (i = 0; i < 3; i++) {
2188 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2189 break;
2190 }
2191
2192 if (i != 3) {
2193 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2194 SK_IF_UNLOCK(sc_if);
2195 return;
2196 }
2197
2198 /* Turn the GP0 interrupt back on. */
2199 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2200 SK_XM_READ_2(sc_if, XM_ISR);
2201 mii_tick(mii);
2202 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2203
2204 SK_IF_UNLOCK(sc_if);
2205 return;
2206 }
2207
2208 static void
2209 sk_intr_bcom(sc_if)
2210 struct sk_if_softc *sc_if;
2211 {
2212 struct mii_data *mii;
2213 struct ifnet *ifp;
2214 int status;
2215 mii = device_get_softc(sc_if->sk_miibus);
2216 ifp = &sc_if->arpcom.ac_if;
2217
2218 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2219
2220 /*
2221 * Read the PHY interrupt register to make sure
2222 * we clear any pending interrupts.
2223 */
2224 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2225
2226 if (!(ifp->if_flags & IFF_RUNNING)) {
2227 sk_init_xmac(sc_if);
2228 return;
2229 }
2230
2231 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2232 int lstat;
2233 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2234 BRGPHY_MII_AUXSTS);
2235
2236 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2237 mii_mediachg(mii);
2238 /* Turn off the link LED. */
2239 SK_IF_WRITE_1(sc_if, 0,
2240 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2241 sc_if->sk_link = 0;
2242 } else if (status & BRGPHY_ISR_LNK_CHG) {
2243 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2244 BRGPHY_MII_IMR, 0xFF00);
2245 mii_tick(mii);
2246 sc_if->sk_link = 1;
2247 /* Turn on the link LED. */
2248 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2249 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2250 SK_LINKLED_BLINK_OFF);
2251 } else {
2252 mii_tick(mii);
2253 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2254 }
2255 }
2256
2257 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2258
2259 return;
2260 }
2261
2262 static void
2263 sk_intr_xmac(sc_if)
2264 struct sk_if_softc *sc_if;
2265 {
2266 struct sk_softc *sc;
2267 u_int16_t status;
2268
2269 sc = sc_if->sk_softc;
2270 status = SK_XM_READ_2(sc_if, XM_ISR);
2271
2272 /*
2273 * Link has gone down. Start MII tick timeout to
2274 * watch for link resync.
2275 */
2276 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2277 if (status & XM_ISR_GP0_SET) {
2278 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2279 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2280 }
2281
2282 if (status & XM_ISR_AUTONEG_DONE) {
2283 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2284 }
2285 }
2286
2287 if (status & XM_IMR_TX_UNDERRUN)
2288 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2289
2290 if (status & XM_IMR_RX_OVERRUN)
2291 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2292
2293 status = SK_XM_READ_2(sc_if, XM_ISR);
2294
2295 return;
2296 }
2297
2298 static void
2299 sk_intr_yukon(sc_if)
2300 struct sk_if_softc *sc_if;
2301 {
2302 int status;
2303
2304 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2305
2306 return;
2307 }
2308
2309 static void
2310 sk_intr(xsc)
2311 void *xsc;
2312 {
2313 struct sk_softc *sc = xsc;
2314 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
2315 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2316 u_int32_t status;
2317
2318 SK_LOCK(sc);
2319
2320 sc_if0 = sc->sk_if[SK_PORT_A];
2321 sc_if1 = sc->sk_if[SK_PORT_B];
2322
2323 if (sc_if0 != NULL)
2324 ifp0 = &sc_if0->arpcom.ac_if;
2325 if (sc_if1 != NULL)
2326 ifp1 = &sc_if1->arpcom.ac_if;
2327
2328 for (;;) {
2329 status = CSR_READ_4(sc, SK_ISSR);
2330 if (!(status & sc->sk_intrmask))
2331 break;
2332
2333 /* Handle receive interrupts first. */
2334 if (status & SK_ISR_RX1_EOF) {
2335 sk_rxeof(sc_if0);
2336 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2337 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2338 }
2339 if (status & SK_ISR_RX2_EOF) {
2340 sk_rxeof(sc_if1);
2341 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2342 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2343 }
2344
2345 /* Then transmit interrupts. */
2346 if (status & SK_ISR_TX1_S_EOF) {
2347 sk_txeof(sc_if0);
2348 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2349 SK_TXBMU_CLR_IRQ_EOF);
2350 }
2351 if (status & SK_ISR_TX2_S_EOF) {
2352 sk_txeof(sc_if1);
2353 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2354 SK_TXBMU_CLR_IRQ_EOF);
2355 }
2356
2357 /* Then MAC interrupts. */
2358 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2359 if (sc->sk_type == SK_GENESIS)
2360 sk_intr_xmac(sc_if0);
2361 else
2362 sk_intr_yukon(sc_if0);
2363 }
2364
2365 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2366 if (sc->sk_type == SK_GENESIS)
2367 sk_intr_xmac(sc_if1);
2368 else
2369 sk_intr_yukon(sc_if1);
2370 }
2371
2372 if (status & SK_ISR_EXTERNAL_REG) {
2373 if (ifp0 != NULL &&
2374 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2375 sk_intr_bcom(sc_if0);
2376 if (ifp1 != NULL &&
2377 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2378 sk_intr_bcom(sc_if1);
2379 }
2380 }
2381
2382 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2383
2384 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2385 sk_start(ifp0);
2386 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2387 sk_start(ifp1);
2388
2389 SK_UNLOCK(sc);
2390
2391 return;
2392 }
2393
2394 static void
2395 sk_init_xmac(sc_if)
2396 struct sk_if_softc *sc_if;
2397 {
2398 struct sk_softc *sc;
2399 struct ifnet *ifp;
2400 struct sk_bcom_hack bhack[] = {
2401 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2402 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2403 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2404 { 0, 0 } };
2405
2406 sc = sc_if->sk_softc;
2407 ifp = &sc_if->arpcom.ac_if;
2408
2409 /* Unreset the XMAC. */
2410 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2411 DELAY(1000);
2412
2413 /* Reset the XMAC's internal state. */
2414 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2415
2416 /* Save the XMAC II revision */
2417 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2418
2419 /*
2420 * Perform additional initialization for external PHYs,
2421 * namely for the 1000baseTX cards that use the XMAC's
2422 * GMII mode.
2423 */
2424 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2425 int i = 0;
2426 u_int32_t val;
2427
2428 /* Take PHY out of reset. */
2429 val = sk_win_read_4(sc, SK_GPIO);
2430 if (sc_if->sk_port == SK_PORT_A)
2431 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2432 else
2433 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2434 sk_win_write_4(sc, SK_GPIO, val);
2435
2436 /* Enable GMII mode on the XMAC. */
2437 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2438
2439 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2440 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2441 DELAY(10000);
2442 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2443 BRGPHY_MII_IMR, 0xFFF0);
2444
2445 /*
2446 * Early versions of the BCM5400 apparently have
2447 * a bug that requires them to have their reserved
2448 * registers initialized to some magic values. I don't
2449 * know what the numbers do, I'm just the messenger.
2450 */
2451 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2452 == 0x6041) {
2453 while(bhack[i].reg) {
2454 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2455 bhack[i].reg, bhack[i].val);
2456 i++;
2457 }
2458 }
2459 }
2460
2461 /* Set station address */
2462 SK_XM_WRITE_2(sc_if, XM_PAR0,
2463 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2464 SK_XM_WRITE_2(sc_if, XM_PAR1,
2465 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2466 SK_XM_WRITE_2(sc_if, XM_PAR2,
2467 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2468 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2469
2470 if (ifp->if_flags & IFF_BROADCAST) {
2471 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2472 } else {
2473 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2474 }
2475
2476 /* We don't need the FCS appended to the packet. */
2477 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2478
2479 /* We want short frames padded to 60 bytes. */
2480 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2481
2482 /*
2483 * Enable the reception of all error frames. This is is
2484 * a necessary evil due to the design of the XMAC. The
2485 * XMAC's receive FIFO is only 8K in size, however jumbo
2486 * frames can be up to 9000 bytes in length. When bad
2487 * frame filtering is enabled, the XMAC's RX FIFO operates
2488 * in 'store and forward' mode. For this to work, the
2489 * entire frame has to fit into the FIFO, but that means
2490 * that jumbo frames larger than 8192 bytes will be
2491 * truncated. Disabling all bad frame filtering causes
2492 * the RX FIFO to operate in streaming mode, in which
2493 * case the XMAC will start transfering frames out of the
2494 * RX FIFO as soon as the FIFO threshold is reached.
2495 */
2496 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2497 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2498 XM_MODE_RX_INRANGELEN);
2499
2500 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2501 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2502 else
2503 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2504
2505 /*
2506 * Bump up the transmit threshold. This helps hold off transmit
2507 * underruns when we're blasting traffic from both ports at once.
2508 */
2509 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2510
2511 /* Set promiscuous mode */
2512 sk_setpromisc(sc_if);
2513
2514 /* Set multicast filter */
2515 sk_setmulti(sc_if);
2516
2517 /* Clear and enable interrupts */
2518 SK_XM_READ_2(sc_if, XM_ISR);
2519 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2520 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2521 else
2522 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2523
2524 /* Configure MAC arbiter */
2525 switch(sc_if->sk_xmac_rev) {
2526 case XM_XMAC_REV_B2:
2527 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2528 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2529 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2530 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2531 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2532 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2533 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2534 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2535 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2536 break;
2537 case XM_XMAC_REV_C1:
2538 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2539 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2540 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2541 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2542 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2543 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2544 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2545 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2546 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2547 break;
2548 default:
2549 break;
2550 }
2551 sk_win_write_2(sc, SK_MACARB_CTL,
2552 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2553
2554 sc_if->sk_link = 1;
2555
2556 return;
2557 }
2558
2559 static void
2560 sk_init_yukon(sc_if)
2561 struct sk_if_softc *sc_if;
2562 {
2563 u_int32_t phy;
2564 u_int16_t reg;
2565 struct sk_softc *sc;
2566 struct ifnet *ifp;
2567 int i;
2568
2569 sc = sc_if->sk_softc;
2570 ifp = &sc_if->arpcom.ac_if;
2571
2572 if (sc->sk_type == SK_YUKON_LITE &&
2573 sc->sk_rev == SK_YUKON_LITE_REV_A3) {
2574 /* Take PHY out of reset. */
2575 sk_win_write_4(sc, SK_GPIO,
2576 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2577 }
2578
2579 /* GMAC and GPHY Reset */
2580 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2581 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2582 DELAY(1000);
2583 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2584 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2585 DELAY(1000);
2586
2587 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2588 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2589
2590 switch(sc_if->sk_softc->sk_pmd) {
2591 case IFM_1000_SX:
2592 case IFM_1000_LX:
2593 phy |= SK_GPHY_FIBER;
2594 break;
2595
2596 case IFM_1000_CX:
2597 case IFM_1000_T:
2598 phy |= SK_GPHY_COPPER;
2599 break;
2600 }
2601
2602 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2603 DELAY(1000);
2604 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2605 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2606 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2607
2608 /* unused read of the interrupt source register */
2609 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2610
2611 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2612
2613 /* MIB Counter Clear Mode set */
2614 reg |= YU_PAR_MIB_CLR;
2615 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2616
2617 /* MIB Counter Clear Mode clear */
2618 reg &= ~YU_PAR_MIB_CLR;
2619 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2620
2621 /* receive control reg */
2622 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2623
2624 /* transmit parameter register */
2625 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2626 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2627
2628 /* serial mode register */
2629 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2630 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2631 reg |= YU_SMR_MFL_JUMBO;
2632 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2633
2634 /* Setup Yukon's address */
2635 for (i = 0; i < 3; i++) {
2636 /* Write Source Address 1 (unicast filter) */
2637 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2638 sc_if->arpcom.ac_enaddr[i * 2] |
2639 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2640 }
2641
2642 for (i = 0; i < 3; i++) {
2643 reg = sk_win_read_2(sc_if->sk_softc,
2644 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2645 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2646 }
2647
2648 /* Set promiscuous mode */
2649 sk_setpromisc(sc_if);
2650
2651 /* Set multicast filter */
2652 sk_setmulti(sc_if);
2653
2654 /* enable interrupt mask for counter overflows */
2655 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2656 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2657 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2658
2659 /* Configure RX MAC FIFO */
2660 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2661 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2662
2663 /* Configure TX MAC FIFO */
2664 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2665 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2666 }
2667
2668 /*
2669 * Note that to properly initialize any part of the GEnesis chip,
2670 * you first have to take it out of reset mode.
2671 */
2672 static void
2673 sk_init(xsc)
2674 void *xsc;
2675 {
2676 struct sk_if_softc *sc_if = xsc;
2677 struct sk_softc *sc;
2678 struct ifnet *ifp;
2679 struct mii_data *mii;
2680 u_int16_t reg;
2681
2682 SK_IF_LOCK(sc_if);
2683
2684 ifp = &sc_if->arpcom.ac_if;
2685 sc = sc_if->sk_softc;
2686 mii = device_get_softc(sc_if->sk_miibus);
2687
2688 if (ifp->if_flags & IFF_RUNNING) {
2689 SK_IF_UNLOCK(sc_if);
2690 return;
2691 }
2692
2693 /* Cancel pending I/O and free all RX/TX buffers. */
2694 sk_stop(sc_if);
2695
2696 if (sc->sk_type == SK_GENESIS) {
2697 /* Configure LINK_SYNC LED */
2698 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2699 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2700 SK_LINKLED_LINKSYNC_ON);
2701
2702 /* Configure RX LED */
2703 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2704 SK_RXLEDCTL_COUNTER_START);
2705
2706 /* Configure TX LED */
2707 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2708 SK_TXLEDCTL_COUNTER_START);
2709 }
2710
2711 /* Configure I2C registers */
2712
2713 /* Configure XMAC(s) */
2714 switch (sc->sk_type) {
2715 case SK_GENESIS:
2716 sk_init_xmac(sc_if);
2717 break;
2718 case SK_YUKON:
2719 case SK_YUKON_LITE:
2720 case SK_YUKON_LP:
2721 sk_init_yukon(sc_if);
2722 break;
2723 }
2724 mii_mediachg(mii);
2725
2726 if (sc->sk_type == SK_GENESIS) {
2727 /* Configure MAC FIFOs */
2728 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2729 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2730 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2731
2732 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2733 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2734 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2735 }
2736
2737 /* Configure transmit arbiter(s) */
2738 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2739 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2740
2741 /* Configure RAMbuffers */
2742 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2743 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2744 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2745 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2746 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2747 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2748
2749 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2750 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2751 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2752 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2753 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2754 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2755 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2756
2757 /* Configure BMUs */
2758 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2759 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2760 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2761 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2762
2763 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2764 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2765 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2766 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2767
2768 /* Init descriptors */
2769 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2770 printf("sk%d: initialization failed: no "
2771 "memory for rx buffers\n", sc_if->sk_unit);
2772 sk_stop(sc_if);
2773 SK_IF_UNLOCK(sc_if);
2774 return;
2775 }
2776 sk_init_tx_ring(sc_if);
2777
2778 /* Configure interrupt handling */
2779 CSR_READ_4(sc, SK_ISSR);
2780 if (sc_if->sk_port == SK_PORT_A)
2781 sc->sk_intrmask |= SK_INTRS1;
2782 else
2783 sc->sk_intrmask |= SK_INTRS2;
2784
2785 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2786
2787 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2788
2789 /* Start BMUs. */
2790 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2791
2792 switch(sc->sk_type) {
2793 case SK_GENESIS:
2794 /* Enable XMACs TX and RX state machines */
2795 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2796 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2797 break;
2798 case SK_YUKON:
2799 case SK_YUKON_LITE:
2800 case SK_YUKON_LP:
2801 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2802 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2803 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2804 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2805 }
2806
2807 ifp->if_flags |= IFF_RUNNING;
2808 ifp->if_flags &= ~IFF_OACTIVE;
2809
2810 SK_IF_UNLOCK(sc_if);
2811
2812 return;
2813 }
2814
2815 static void
2816 sk_stop(sc_if)
2817 struct sk_if_softc *sc_if;
2818 {
2819 int i;
2820 struct sk_softc *sc;
2821 struct ifnet *ifp;
2822
2823 SK_IF_LOCK(sc_if);
2824 sc = sc_if->sk_softc;
2825 ifp = &sc_if->arpcom.ac_if;
2826
2827 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2828
2829 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2830 u_int32_t val;
2831
2832 /* Put PHY back into reset. */
2833 val = sk_win_read_4(sc, SK_GPIO);
2834 if (sc_if->sk_port == SK_PORT_A) {
2835 val |= SK_GPIO_DIR0;
2836 val &= ~SK_GPIO_DAT0;
2837 } else {
2838 val |= SK_GPIO_DIR2;
2839 val &= ~SK_GPIO_DAT2;
2840 }
2841 sk_win_write_4(sc, SK_GPIO, val);
2842 }
2843
2844 /* Turn off various components of this interface. */
2845 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2846 switch (sc->sk_type) {
2847 case SK_GENESIS:
2848 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2849 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2850 break;
2851 case SK_YUKON:
2852 case SK_YUKON_LITE:
2853 case SK_YUKON_LP:
2854 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2855 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2856 break;
2857 }
2858 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2859 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2860 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2861 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2862 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2863 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2864 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2865 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2866 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2867
2868 /* Disable interrupts */
2869 if (sc_if->sk_port == SK_PORT_A)
2870 sc->sk_intrmask &= ~SK_INTRS1;
2871 else
2872 sc->sk_intrmask &= ~SK_INTRS2;
2873 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2874
2875 SK_XM_READ_2(sc_if, XM_ISR);
2876 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2877
2878 /* Free RX and TX mbufs still in the queues. */
2879 for (i = 0; i < SK_RX_RING_CNT; i++) {
2880 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2881 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2882 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2883 }
2884 }
2885
2886 for (i = 0; i < SK_TX_RING_CNT; i++) {
2887 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2888 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2889 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2890 }
2891 }
2892
2893 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2894 SK_IF_UNLOCK(sc_if);
2895 return;
2896 }
Cache object: 342385a669aff9205f9c2ec3f38f3698
|