FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 /*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/6.1/sys/pci/if_sk.c 156820 2006-03-17 21:30:57Z glebius $");
52
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71 /*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97 #include <sys/sysctl.h>
98
99 #include <net/if.h>
100 #include <net/if_arp.h>
101 #include <net/ethernet.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_types.h>
105
106 #include <net/bpf.h>
107
108 #include <vm/vm.h> /* for vtophys */
109 #include <vm/pmap.h> /* for vtophys */
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133
134 /* "controller miibus0" required. See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136
137 #ifndef lint
138 static const char rcsid[] =
139 "$FreeBSD: releng/6.1/sys/pci/if_sk.c 156820 2006-03-17 21:30:57Z glebius $";
140 #endif
141
142 static struct sk_type sk_devs[] = {
143 {
144 VENDORID_SK,
145 DEVICEID_SK_V1,
146 "SysKonnect Gigabit Ethernet (V1.0)"
147 },
148 {
149 VENDORID_SK,
150 DEVICEID_SK_V2,
151 "SysKonnect Gigabit Ethernet (V2.0)"
152 },
153 {
154 VENDORID_MARVELL,
155 DEVICEID_SK_V2,
156 "Marvell Gigabit Ethernet"
157 },
158 {
159 VENDORID_MARVELL,
160 DEVICEID_BELKIN_5005,
161 "Belkin F5D5005 Gigabit Ethernet"
162 },
163 {
164 VENDORID_3COM,
165 DEVICEID_3COM_3C940,
166 "3Com 3C940 Gigabit Ethernet"
167 },
168 {
169 VENDORID_LINKSYS,
170 DEVICEID_LINKSYS_EG1032,
171 "Linksys EG1032 Gigabit Ethernet"
172 },
173 {
174 VENDORID_DLINK,
175 DEVICEID_DLINK_DGE530T,
176 "D-Link DGE-530T Gigabit Ethernet"
177 },
178 { 0, 0, NULL }
179 };
180
181 static int skc_probe(device_t);
182 static int skc_attach(device_t);
183 static int skc_detach(device_t);
184 static void skc_shutdown(device_t);
185 static int sk_detach(device_t);
186 static int sk_probe(device_t);
187 static int sk_attach(device_t);
188 static void sk_tick(void *);
189 static void sk_intr(void *);
190 static void sk_intr_xmac(struct sk_if_softc *);
191 static void sk_intr_bcom(struct sk_if_softc *);
192 static void sk_intr_yukon(struct sk_if_softc *);
193 static void sk_rxeof(struct sk_if_softc *);
194 static void sk_txeof(struct sk_if_softc *);
195 static int sk_encap(struct sk_if_softc *, struct mbuf *,
196 u_int32_t *);
197 static void sk_start(struct ifnet *);
198 static void sk_start_locked(struct ifnet *);
199 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
200 static void sk_init(void *);
201 static void sk_init_locked(struct sk_if_softc *);
202 static void sk_init_xmac(struct sk_if_softc *);
203 static void sk_init_yukon(struct sk_if_softc *);
204 static void sk_stop(struct sk_if_softc *);
205 static void sk_watchdog(struct ifnet *);
206 static int sk_ifmedia_upd(struct ifnet *);
207 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
208 static void sk_reset(struct sk_softc *);
209 static int sk_newbuf(struct sk_if_softc *,
210 struct sk_chain *, struct mbuf *);
211 static int sk_alloc_jumbo_mem(struct sk_if_softc *);
212 static void sk_free_jumbo_mem(struct sk_if_softc *);
213 static void *sk_jalloc(struct sk_if_softc *);
214 static void sk_jfree(void *, void *);
215 static int sk_init_rx_ring(struct sk_if_softc *);
216 static void sk_init_tx_ring(struct sk_if_softc *);
217 static u_int32_t sk_win_read_4(struct sk_softc *, int);
218 static u_int16_t sk_win_read_2(struct sk_softc *, int);
219 static u_int8_t sk_win_read_1(struct sk_softc *, int);
220 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
221 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
222 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
223 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
224 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
225 static void sk_vpd_read(struct sk_softc *);
226
227 static int sk_miibus_readreg(device_t, int, int);
228 static int sk_miibus_writereg(device_t, int, int, int);
229 static void sk_miibus_statchg(device_t);
230
231 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
232 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
233 int);
234 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
235
236 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
237 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
238 int);
239 static void sk_marv_miibus_statchg(struct sk_if_softc *);
240
241 static uint32_t sk_xmchash(const uint8_t *);
242 static uint32_t sk_gmchash(const uint8_t *);
243 static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
244 static void sk_setmulti(struct sk_if_softc *);
245 static void sk_setpromisc(struct sk_if_softc *);
246
247 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
248 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
249
250 #ifdef SK_USEIOSPACE
251 #define SK_RES SYS_RES_IOPORT
252 #define SK_RID SK_PCI_LOIO
253 #else
254 #define SK_RES SYS_RES_MEMORY
255 #define SK_RID SK_PCI_LOMEM
256 #endif
257
258 /*
259 * Note that we have newbus methods for both the GEnesis controller
260 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
261 * the miibus code is a child of the XMACs. We need to do it this way
262 * so that the miibus drivers can access the PHY registers on the
263 * right PHY. It's not quite what I had in mind, but it's the only
264 * design that achieves the desired effect.
265 */
266 static device_method_t skc_methods[] = {
267 /* Device interface */
268 DEVMETHOD(device_probe, skc_probe),
269 DEVMETHOD(device_attach, skc_attach),
270 DEVMETHOD(device_detach, skc_detach),
271 DEVMETHOD(device_shutdown, skc_shutdown),
272
273 /* bus interface */
274 DEVMETHOD(bus_print_child, bus_generic_print_child),
275 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
276
277 { 0, 0 }
278 };
279
280 static driver_t skc_driver = {
281 "skc",
282 skc_methods,
283 sizeof(struct sk_softc)
284 };
285
286 static devclass_t skc_devclass;
287
288 static device_method_t sk_methods[] = {
289 /* Device interface */
290 DEVMETHOD(device_probe, sk_probe),
291 DEVMETHOD(device_attach, sk_attach),
292 DEVMETHOD(device_detach, sk_detach),
293 DEVMETHOD(device_shutdown, bus_generic_shutdown),
294
295 /* bus interface */
296 DEVMETHOD(bus_print_child, bus_generic_print_child),
297 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
298
299 /* MII interface */
300 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
301 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
302 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
303
304 { 0, 0 }
305 };
306
307 static driver_t sk_driver = {
308 "sk",
309 sk_methods,
310 sizeof(struct sk_if_softc)
311 };
312
313 static devclass_t sk_devclass;
314
315 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
316 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
317 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
318
319 #define SK_SETBIT(sc, reg, x) \
320 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
321
322 #define SK_CLRBIT(sc, reg, x) \
323 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
324
325 #define SK_WIN_SETBIT_4(sc, reg, x) \
326 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
327
328 #define SK_WIN_CLRBIT_4(sc, reg, x) \
329 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
330
331 #define SK_WIN_SETBIT_2(sc, reg, x) \
332 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
333
334 #define SK_WIN_CLRBIT_2(sc, reg, x) \
335 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
336
337 static u_int32_t
338 sk_win_read_4(sc, reg)
339 struct sk_softc *sc;
340 int reg;
341 {
342 #ifdef SK_USEIOSPACE
343 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
344 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
345 #else
346 return(CSR_READ_4(sc, reg));
347 #endif
348 }
349
350 static u_int16_t
351 sk_win_read_2(sc, reg)
352 struct sk_softc *sc;
353 int reg;
354 {
355 #ifdef SK_USEIOSPACE
356 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
357 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
358 #else
359 return(CSR_READ_2(sc, reg));
360 #endif
361 }
362
363 static u_int8_t
364 sk_win_read_1(sc, reg)
365 struct sk_softc *sc;
366 int reg;
367 {
368 #ifdef SK_USEIOSPACE
369 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
370 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
371 #else
372 return(CSR_READ_1(sc, reg));
373 #endif
374 }
375
376 static void
377 sk_win_write_4(sc, reg, val)
378 struct sk_softc *sc;
379 int reg;
380 u_int32_t val;
381 {
382 #ifdef SK_USEIOSPACE
383 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
384 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
385 #else
386 CSR_WRITE_4(sc, reg, val);
387 #endif
388 return;
389 }
390
391 static void
392 sk_win_write_2(sc, reg, val)
393 struct sk_softc *sc;
394 int reg;
395 u_int32_t val;
396 {
397 #ifdef SK_USEIOSPACE
398 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
399 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
400 #else
401 CSR_WRITE_2(sc, reg, val);
402 #endif
403 return;
404 }
405
406 static void
407 sk_win_write_1(sc, reg, val)
408 struct sk_softc *sc;
409 int reg;
410 u_int32_t val;
411 {
412 #ifdef SK_USEIOSPACE
413 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
414 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
415 #else
416 CSR_WRITE_1(sc, reg, val);
417 #endif
418 return;
419 }
420
421 /*
422 * The VPD EEPROM contains Vital Product Data, as suggested in
423 * the PCI 2.1 specification. The VPD data is separared into areas
424 * denoted by resource IDs. The SysKonnect VPD contains an ID string
425 * resource (the name of the adapter), a read-only area resource
426 * containing various key/data fields and a read/write area which
427 * can be used to store asset management information or log messages.
428 * We read the ID string and read-only into buffers attached to
429 * the controller softc structure for later use. At the moment,
430 * we only use the ID string during skc_attach().
431 */
432 static u_int8_t
433 sk_vpd_readbyte(sc, addr)
434 struct sk_softc *sc;
435 int addr;
436 {
437 int i;
438
439 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
440 for (i = 0; i < SK_TIMEOUT; i++) {
441 DELAY(1);
442 if (sk_win_read_2(sc,
443 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
444 break;
445 }
446
447 if (i == SK_TIMEOUT)
448 return(0);
449
450 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
451 }
452
453 static void
454 sk_vpd_read_res(sc, res, addr)
455 struct sk_softc *sc;
456 struct vpd_res *res;
457 int addr;
458 {
459 int i;
460 u_int8_t *ptr;
461
462 ptr = (u_int8_t *)res;
463 for (i = 0; i < sizeof(struct vpd_res); i++)
464 ptr[i] = sk_vpd_readbyte(sc, i + addr);
465
466 return;
467 }
468
469 static void
470 sk_vpd_read(sc)
471 struct sk_softc *sc;
472 {
473 int pos = 0, i;
474 struct vpd_res res;
475
476 if (sc->sk_vpd_prodname != NULL)
477 free(sc->sk_vpd_prodname, M_DEVBUF);
478 if (sc->sk_vpd_readonly != NULL)
479 free(sc->sk_vpd_readonly, M_DEVBUF);
480 sc->sk_vpd_prodname = NULL;
481 sc->sk_vpd_readonly = NULL;
482 sc->sk_vpd_readonly_len = 0;
483
484 sk_vpd_read_res(sc, &res, pos);
485
486 /*
487 * Bail out quietly if the eeprom appears to be missing or empty.
488 */
489 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
490 return;
491
492 if (res.vr_id != VPD_RES_ID) {
493 printf("skc%d: bad VPD resource id: expected %x got %x\n",
494 sc->sk_unit, VPD_RES_ID, res.vr_id);
495 return;
496 }
497
498 pos += sizeof(res);
499 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
500 if (sc->sk_vpd_prodname != NULL) {
501 for (i = 0; i < res.vr_len; i++)
502 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
503 sc->sk_vpd_prodname[i] = '\0';
504 }
505 pos += res.vr_len;
506
507 sk_vpd_read_res(sc, &res, pos);
508
509 if (res.vr_id != VPD_RES_READ) {
510 printf("skc%d: bad VPD resource id: expected %x got %x\n",
511 sc->sk_unit, VPD_RES_READ, res.vr_id);
512 return;
513 }
514
515 pos += sizeof(res);
516 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
517 for (i = 0; i < res.vr_len; i++)
518 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
519 sc->sk_vpd_readonly_len = res.vr_len;
520
521 return;
522 }
523
524 static int
525 sk_miibus_readreg(dev, phy, reg)
526 device_t dev;
527 int phy, reg;
528 {
529 struct sk_if_softc *sc_if;
530
531 sc_if = device_get_softc(dev);
532
533 switch(sc_if->sk_softc->sk_type) {
534 case SK_GENESIS:
535 return(sk_xmac_miibus_readreg(sc_if, phy, reg));
536 case SK_YUKON:
537 case SK_YUKON_LITE:
538 case SK_YUKON_LP:
539 return(sk_marv_miibus_readreg(sc_if, phy, reg));
540 }
541
542 return(0);
543 }
544
545 static int
546 sk_miibus_writereg(dev, phy, reg, val)
547 device_t dev;
548 int phy, reg, val;
549 {
550 struct sk_if_softc *sc_if;
551
552 sc_if = device_get_softc(dev);
553
554 switch(sc_if->sk_softc->sk_type) {
555 case SK_GENESIS:
556 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
557 case SK_YUKON:
558 case SK_YUKON_LITE:
559 case SK_YUKON_LP:
560 return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
561 }
562
563 return(0);
564 }
565
566 static void
567 sk_miibus_statchg(dev)
568 device_t dev;
569 {
570 struct sk_if_softc *sc_if;
571
572 sc_if = device_get_softc(dev);
573
574 switch(sc_if->sk_softc->sk_type) {
575 case SK_GENESIS:
576 sk_xmac_miibus_statchg(sc_if);
577 break;
578 case SK_YUKON:
579 case SK_YUKON_LITE:
580 case SK_YUKON_LP:
581 sk_marv_miibus_statchg(sc_if);
582 break;
583 }
584
585 return;
586 }
587
588 static int
589 sk_xmac_miibus_readreg(sc_if, phy, reg)
590 struct sk_if_softc *sc_if;
591 int phy, reg;
592 {
593 int i;
594
595 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
596 return(0);
597
598 SK_IF_LOCK(sc_if);
599 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
600 SK_XM_READ_2(sc_if, XM_PHY_DATA);
601 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
602 for (i = 0; i < SK_TIMEOUT; i++) {
603 DELAY(1);
604 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
605 XM_MMUCMD_PHYDATARDY)
606 break;
607 }
608
609 if (i == SK_TIMEOUT) {
610 printf("sk%d: phy failed to come ready\n",
611 sc_if->sk_unit);
612 SK_IF_UNLOCK(sc_if);
613 return(0);
614 }
615 }
616 DELAY(1);
617 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
618 SK_IF_UNLOCK(sc_if);
619 return(i);
620 }
621
622 static int
623 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
624 struct sk_if_softc *sc_if;
625 int phy, reg, val;
626 {
627 int i;
628
629 SK_IF_LOCK(sc_if);
630 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
631 for (i = 0; i < SK_TIMEOUT; i++) {
632 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
633 break;
634 }
635
636 if (i == SK_TIMEOUT) {
637 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
638 SK_IF_UNLOCK(sc_if);
639 return(ETIMEDOUT);
640 }
641
642 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
643 for (i = 0; i < SK_TIMEOUT; i++) {
644 DELAY(1);
645 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
646 break;
647 }
648 SK_IF_UNLOCK(sc_if);
649 if (i == SK_TIMEOUT)
650 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
651
652 return(0);
653 }
654
655 static void
656 sk_xmac_miibus_statchg(sc_if)
657 struct sk_if_softc *sc_if;
658 {
659 struct mii_data *mii;
660
661 mii = device_get_softc(sc_if->sk_miibus);
662
663 SK_IF_LOCK(sc_if);
664 /*
665 * If this is a GMII PHY, manually set the XMAC's
666 * duplex mode accordingly.
667 */
668 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
669 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
670 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
671 } else {
672 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
673 }
674 }
675 SK_IF_UNLOCK(sc_if);
676
677 return;
678 }
679
680 static int
681 sk_marv_miibus_readreg(sc_if, phy, reg)
682 struct sk_if_softc *sc_if;
683 int phy, reg;
684 {
685 u_int16_t val;
686 int i;
687
688 if (phy != 0 ||
689 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
690 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
691 return(0);
692 }
693
694 SK_IF_LOCK(sc_if);
695 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
696 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
697
698 for (i = 0; i < SK_TIMEOUT; i++) {
699 DELAY(1);
700 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
701 if (val & YU_SMICR_READ_VALID)
702 break;
703 }
704
705 if (i == SK_TIMEOUT) {
706 printf("sk%d: phy failed to come ready\n",
707 sc_if->sk_unit);
708 SK_IF_UNLOCK(sc_if);
709 return(0);
710 }
711
712 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
713 SK_IF_UNLOCK(sc_if);
714
715 return(val);
716 }
717
718 static int
719 sk_marv_miibus_writereg(sc_if, phy, reg, val)
720 struct sk_if_softc *sc_if;
721 int phy, reg, val;
722 {
723 int i;
724
725 SK_IF_LOCK(sc_if);
726 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
727 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
728 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
729
730 for (i = 0; i < SK_TIMEOUT; i++) {
731 DELAY(1);
732 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
733 break;
734 }
735 SK_IF_UNLOCK(sc_if);
736
737 return(0);
738 }
739
740 static void
741 sk_marv_miibus_statchg(sc_if)
742 struct sk_if_softc *sc_if;
743 {
744 return;
745 }
746
747 #define HASH_BITS 6
748
749 static u_int32_t
750 sk_xmchash(addr)
751 const uint8_t *addr;
752 {
753 uint32_t crc;
754
755 /* Compute CRC for the address value. */
756 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
757
758 return (~crc & ((1 << HASH_BITS) - 1));
759 }
760
761 /* gmchash is just a big endian crc */
762 static u_int32_t
763 sk_gmchash(addr)
764 const uint8_t *addr;
765 {
766 uint32_t crc;
767
768 /* Compute CRC for the address value. */
769 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
770
771 return (crc & ((1 << HASH_BITS) - 1));
772 }
773
774 static void
775 sk_setfilt(sc_if, addr, slot)
776 struct sk_if_softc *sc_if;
777 caddr_t addr;
778 int slot;
779 {
780 int base;
781
782 base = XM_RXFILT_ENTRY(slot);
783
784 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
785 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
786 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
787
788 return;
789 }
790
791 static void
792 sk_setmulti(sc_if)
793 struct sk_if_softc *sc_if;
794 {
795 struct sk_softc *sc = sc_if->sk_softc;
796 struct ifnet *ifp = sc_if->sk_ifp;
797 u_int32_t hashes[2] = { 0, 0 };
798 int h = 0, i;
799 struct ifmultiaddr *ifma;
800 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
801
802 SK_IF_LOCK_ASSERT(sc_if);
803
804 /* First, zot all the existing filters. */
805 switch(sc->sk_type) {
806 case SK_GENESIS:
807 for (i = 1; i < XM_RXFILT_MAX; i++)
808 sk_setfilt(sc_if, (caddr_t)&dummy, i);
809
810 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
811 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
812 break;
813 case SK_YUKON:
814 case SK_YUKON_LITE:
815 case SK_YUKON_LP:
816 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
817 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
818 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
819 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
820 break;
821 }
822
823 /* Now program new ones. */
824 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
825 hashes[0] = 0xFFFFFFFF;
826 hashes[1] = 0xFFFFFFFF;
827 } else {
828 i = 1;
829 IF_ADDR_LOCK(ifp);
830 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
831 if (ifma->ifma_addr->sa_family != AF_LINK)
832 continue;
833 /*
834 * Program the first XM_RXFILT_MAX multicast groups
835 * into the perfect filter. For all others,
836 * use the hash table.
837 */
838 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
839 sk_setfilt(sc_if,
840 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
841 i++;
842 continue;
843 }
844
845 switch(sc->sk_type) {
846 case SK_GENESIS:
847 h = sk_xmchash(
848 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
849 break;
850 case SK_YUKON:
851 case SK_YUKON_LITE:
852 case SK_YUKON_LP:
853 h = sk_gmchash(
854 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
855 break;
856 }
857 if (h < 32)
858 hashes[0] |= (1 << h);
859 else
860 hashes[1] |= (1 << (h - 32));
861 }
862 IF_ADDR_UNLOCK(ifp);
863 }
864
865 switch(sc->sk_type) {
866 case SK_GENESIS:
867 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
868 XM_MODE_RX_USE_PERFECT);
869 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
870 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
871 break;
872 case SK_YUKON:
873 case SK_YUKON_LITE:
874 case SK_YUKON_LP:
875 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
876 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
877 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
878 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
879 break;
880 }
881
882 return;
883 }
884
885 static void
886 sk_setpromisc(sc_if)
887 struct sk_if_softc *sc_if;
888 {
889 struct sk_softc *sc = sc_if->sk_softc;
890 struct ifnet *ifp = sc_if->sk_ifp;
891
892 SK_IF_LOCK_ASSERT(sc_if);
893
894 switch(sc->sk_type) {
895 case SK_GENESIS:
896 if (ifp->if_flags & IFF_PROMISC) {
897 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
898 } else {
899 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
900 }
901 break;
902 case SK_YUKON:
903 case SK_YUKON_LITE:
904 case SK_YUKON_LP:
905 if (ifp->if_flags & IFF_PROMISC) {
906 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
907 YU_RCR_UFLEN | YU_RCR_MUFLEN);
908 } else {
909 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
910 YU_RCR_UFLEN | YU_RCR_MUFLEN);
911 }
912 break;
913 }
914
915 return;
916 }
917
918 static int
919 sk_init_rx_ring(sc_if)
920 struct sk_if_softc *sc_if;
921 {
922 struct sk_chain_data *cd = &sc_if->sk_cdata;
923 struct sk_ring_data *rd = sc_if->sk_rdata;
924 int i;
925
926 bzero((char *)rd->sk_rx_ring,
927 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
928
929 for (i = 0; i < SK_RX_RING_CNT; i++) {
930 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
931 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
932 return(ENOBUFS);
933 if (i == (SK_RX_RING_CNT - 1)) {
934 cd->sk_rx_chain[i].sk_next =
935 &cd->sk_rx_chain[0];
936 rd->sk_rx_ring[i].sk_next =
937 vtophys(&rd->sk_rx_ring[0]);
938 } else {
939 cd->sk_rx_chain[i].sk_next =
940 &cd->sk_rx_chain[i + 1];
941 rd->sk_rx_ring[i].sk_next =
942 vtophys(&rd->sk_rx_ring[i + 1]);
943 }
944 }
945
946 sc_if->sk_cdata.sk_rx_prod = 0;
947 sc_if->sk_cdata.sk_rx_cons = 0;
948
949 return(0);
950 }
951
952 static void
953 sk_init_tx_ring(sc_if)
954 struct sk_if_softc *sc_if;
955 {
956 struct sk_chain_data *cd = &sc_if->sk_cdata;
957 struct sk_ring_data *rd = sc_if->sk_rdata;
958 int i;
959
960 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
961 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
962
963 for (i = 0; i < SK_TX_RING_CNT; i++) {
964 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
965 if (i == (SK_TX_RING_CNT - 1)) {
966 cd->sk_tx_chain[i].sk_next =
967 &cd->sk_tx_chain[0];
968 rd->sk_tx_ring[i].sk_next =
969 vtophys(&rd->sk_tx_ring[0]);
970 } else {
971 cd->sk_tx_chain[i].sk_next =
972 &cd->sk_tx_chain[i + 1];
973 rd->sk_tx_ring[i].sk_next =
974 vtophys(&rd->sk_tx_ring[i + 1]);
975 }
976 }
977
978 sc_if->sk_cdata.sk_tx_prod = 0;
979 sc_if->sk_cdata.sk_tx_cons = 0;
980 sc_if->sk_cdata.sk_tx_cnt = 0;
981
982 return;
983 }
984
985 static int
986 sk_newbuf(sc_if, c, m)
987 struct sk_if_softc *sc_if;
988 struct sk_chain *c;
989 struct mbuf *m;
990 {
991 struct mbuf *m_new = NULL;
992 struct sk_rx_desc *r;
993
994 if (m == NULL) {
995 caddr_t *buf = NULL;
996
997 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
998 if (m_new == NULL)
999 return(ENOBUFS);
1000
1001 /* Allocate the jumbo buffer */
1002 buf = sk_jalloc(sc_if);
1003 if (buf == NULL) {
1004 m_freem(m_new);
1005 #ifdef SK_VERBOSE
1006 printf("sk%d: jumbo allocation failed "
1007 "-- packet dropped!\n", sc_if->sk_unit);
1008 #endif
1009 return(ENOBUFS);
1010 }
1011
1012 /* Attach the buffer to the mbuf */
1013 MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1014 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1015 m_new->m_data = (void *)buf;
1016 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1017 } else {
1018 /*
1019 * We're re-using a previously allocated mbuf;
1020 * be sure to re-init pointers and lengths to
1021 * default values.
1022 */
1023 m_new = m;
1024 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1025 m_new->m_data = m_new->m_ext.ext_buf;
1026 }
1027
1028 /*
1029 * Adjust alignment so packet payload begins on a
1030 * longword boundary. Mandatory for Alpha, useful on
1031 * x86 too.
1032 */
1033 m_adj(m_new, ETHER_ALIGN);
1034
1035 r = c->sk_desc;
1036 c->sk_mbuf = m_new;
1037 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1038 r->sk_ctl = m_new->m_len | SK_RXSTAT;
1039
1040 return(0);
1041 }
1042
1043 /*
1044 * Allocate jumbo buffer storage. The SysKonnect adapters support
1045 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1046 * use them in their drivers. In order for us to use them, we need
1047 * large 9K receive buffers, however standard mbuf clusters are only
1048 * 2048 bytes in size. Consequently, we need to allocate and manage
1049 * our own jumbo buffer pool. Fortunately, this does not require an
1050 * excessive amount of additional code.
1051 */
1052 static int
1053 sk_alloc_jumbo_mem(sc_if)
1054 struct sk_if_softc *sc_if;
1055 {
1056 caddr_t ptr;
1057 register int i;
1058 struct sk_jpool_entry *entry;
1059
1060 /* Grab a big chunk o' storage. */
1061 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1062 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1063
1064 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1065 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1066 return(ENOBUFS);
1067 }
1068
1069 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1070
1071 SLIST_INIT(&sc_if->sk_jfree_listhead);
1072 SLIST_INIT(&sc_if->sk_jinuse_listhead);
1073
1074 /*
1075 * Now divide it up into 9K pieces and save the addresses
1076 * in an array.
1077 */
1078 ptr = sc_if->sk_cdata.sk_jumbo_buf;
1079 for (i = 0; i < SK_JSLOTS; i++) {
1080 sc_if->sk_cdata.sk_jslots[i] = ptr;
1081 ptr += SK_JLEN;
1082 entry = malloc(sizeof(struct sk_jpool_entry),
1083 M_DEVBUF, M_NOWAIT);
1084 if (entry == NULL) {
1085 sk_free_jumbo_mem(sc_if);
1086 sc_if->sk_cdata.sk_jumbo_buf = NULL;
1087 printf("sk%d: no memory for jumbo "
1088 "buffer queue!\n", sc_if->sk_unit);
1089 return(ENOBUFS);
1090 }
1091 entry->slot = i;
1092 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1093 entry, jpool_entries);
1094 }
1095
1096 return(0);
1097 }
1098
1099 static void
1100 sk_free_jumbo_mem(sc_if)
1101 struct sk_if_softc *sc_if;
1102 {
1103 struct sk_jpool_entry *entry;
1104
1105 SK_JLIST_LOCK(sc_if);
1106
1107 /* We cannot release external mbuf storage while in use. */
1108 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) {
1109 printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit);
1110 SK_JLIST_UNLOCK(sc_if);
1111 return;
1112 }
1113
1114 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1115 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1116 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1117 free(entry, M_DEVBUF);
1118 }
1119
1120 SK_JLIST_UNLOCK(sc_if);
1121
1122 mtx_destroy(&sc_if->sk_jlist_mtx);
1123
1124 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1125
1126 return;
1127 }
1128
1129 /*
1130 * Allocate a jumbo buffer.
1131 */
1132 static void *
1133 sk_jalloc(sc_if)
1134 struct sk_if_softc *sc_if;
1135 {
1136 struct sk_jpool_entry *entry;
1137
1138 SK_JLIST_LOCK(sc_if);
1139
1140 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1141
1142 if (entry == NULL) {
1143 #ifdef SK_VERBOSE
1144 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1145 #endif
1146 SK_JLIST_UNLOCK(sc_if);
1147 return(NULL);
1148 }
1149
1150 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1151 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1152
1153 SK_JLIST_UNLOCK(sc_if);
1154
1155 return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1156 }
1157
1158 /*
1159 * Release a jumbo buffer.
1160 */
1161 static void
1162 sk_jfree(buf, args)
1163 void *buf;
1164 void *args;
1165 {
1166 struct sk_if_softc *sc_if;
1167 int i;
1168 struct sk_jpool_entry *entry;
1169
1170 /* Extract the softc struct pointer. */
1171 sc_if = (struct sk_if_softc *)args;
1172 if (sc_if == NULL)
1173 panic("sk_jfree: didn't get softc pointer!");
1174
1175 SK_JLIST_LOCK(sc_if);
1176
1177 /* calculate the slot this buffer belongs to */
1178 i = ((vm_offset_t)buf
1179 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1180
1181 if ((i < 0) || (i >= SK_JSLOTS))
1182 panic("sk_jfree: asked to free buffer that we don't manage!");
1183
1184 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1185 if (entry == NULL)
1186 panic("sk_jfree: buffer not in use!");
1187 entry->slot = i;
1188 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1189 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1190 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1191 wakeup(sc_if);
1192
1193 SK_JLIST_UNLOCK(sc_if);
1194 return;
1195 }
1196
1197 /*
1198 * Set media options.
1199 */
1200 static int
1201 sk_ifmedia_upd(ifp)
1202 struct ifnet *ifp;
1203 {
1204 struct sk_if_softc *sc_if = ifp->if_softc;
1205 struct mii_data *mii;
1206
1207 mii = device_get_softc(sc_if->sk_miibus);
1208 sk_init(sc_if);
1209 mii_mediachg(mii);
1210
1211 return(0);
1212 }
1213
1214 /*
1215 * Report current media status.
1216 */
1217 static void
1218 sk_ifmedia_sts(ifp, ifmr)
1219 struct ifnet *ifp;
1220 struct ifmediareq *ifmr;
1221 {
1222 struct sk_if_softc *sc_if;
1223 struct mii_data *mii;
1224
1225 sc_if = ifp->if_softc;
1226 mii = device_get_softc(sc_if->sk_miibus);
1227
1228 mii_pollstat(mii);
1229 ifmr->ifm_active = mii->mii_media_active;
1230 ifmr->ifm_status = mii->mii_media_status;
1231
1232 return;
1233 }
1234
1235 static int
1236 sk_ioctl(ifp, command, data)
1237 struct ifnet *ifp;
1238 u_long command;
1239 caddr_t data;
1240 {
1241 struct sk_if_softc *sc_if = ifp->if_softc;
1242 struct ifreq *ifr = (struct ifreq *) data;
1243 int error = 0;
1244 struct mii_data *mii;
1245
1246 switch(command) {
1247 case SIOCSIFMTU:
1248 SK_IF_LOCK(sc_if);
1249 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1250 error = EINVAL;
1251 else {
1252 ifp->if_mtu = ifr->ifr_mtu;
1253 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1254 sk_init_locked(sc_if);
1255 }
1256 SK_IF_UNLOCK(sc_if);
1257 break;
1258 case SIOCSIFFLAGS:
1259 SK_IF_LOCK(sc_if);
1260 if (ifp->if_flags & IFF_UP) {
1261 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1262 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1263 & IFF_PROMISC) {
1264 sk_setpromisc(sc_if);
1265 sk_setmulti(sc_if);
1266 }
1267 } else
1268 sk_init_locked(sc_if);
1269 } else {
1270 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1271 sk_stop(sc_if);
1272 }
1273 sc_if->sk_if_flags = ifp->if_flags;
1274 SK_IF_UNLOCK(sc_if);
1275 error = 0;
1276 break;
1277 case SIOCADDMULTI:
1278 case SIOCDELMULTI:
1279 SK_IF_LOCK(sc_if);
1280 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1281 sk_setmulti(sc_if);
1282 error = 0;
1283 }
1284 SK_IF_UNLOCK(sc_if);
1285 break;
1286 case SIOCGIFMEDIA:
1287 case SIOCSIFMEDIA:
1288 mii = device_get_softc(sc_if->sk_miibus);
1289 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1290 break;
1291 default:
1292 error = ether_ioctl(ifp, command, data);
1293 break;
1294 }
1295
1296 return(error);
1297 }
1298
1299 /*
1300 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1301 * IDs against our list and return a device name if we find a match.
1302 */
1303 static int
1304 skc_probe(dev)
1305 device_t dev;
1306 {
1307 struct sk_type *t = sk_devs;
1308
1309 while(t->sk_name != NULL) {
1310 if ((pci_get_vendor(dev) == t->sk_vid) &&
1311 (pci_get_device(dev) == t->sk_did)) {
1312 /*
1313 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1314 * Rev. 3 is supported by re(4).
1315 */
1316 if ((t->sk_vid == VENDORID_LINKSYS) &&
1317 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1318 (pci_get_subdevice(dev) !=
1319 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1320 t++;
1321 continue;
1322 }
1323 device_set_desc(dev, t->sk_name);
1324 return (BUS_PROBE_DEFAULT);
1325 }
1326 t++;
1327 }
1328
1329 return(ENXIO);
1330 }
1331
1332 /*
1333 * Force the GEnesis into reset, then bring it out of reset.
1334 */
1335 static void
1336 sk_reset(sc)
1337 struct sk_softc *sc;
1338 {
1339 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1340 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1341 if (SK_YUKON_FAMILY(sc->sk_type))
1342 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1343
1344 DELAY(1000);
1345 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1346 DELAY(2);
1347 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1348 if (SK_YUKON_FAMILY(sc->sk_type))
1349 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1350
1351 if (sc->sk_type == SK_GENESIS) {
1352 /* Configure packet arbiter */
1353 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1354 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1355 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1356 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1357 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1358 }
1359
1360 /* Enable RAM interface */
1361 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1362
1363 /*
1364 * Configure interrupt moderation. The moderation timer
1365 * defers interrupts specified in the interrupt moderation
1366 * timer mask based on the timeout specified in the interrupt
1367 * moderation timer init register. Each bit in the timer
1368 * register represents 18.825ns, so to specify a timeout in
1369 * microseconds, we have to multiply by 54.
1370 */
1371 if (bootverbose)
1372 printf("skc%d: interrupt moderation is %d us\n",
1373 sc->sk_unit, sc->sk_int_mod);
1374 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
1375 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1376 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1377 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1378
1379 return;
1380 }
1381
1382 static int
1383 sk_probe(dev)
1384 device_t dev;
1385 {
1386 struct sk_softc *sc;
1387
1388 sc = device_get_softc(device_get_parent(dev));
1389
1390 /*
1391 * Not much to do here. We always know there will be
1392 * at least one XMAC present, and if there are two,
1393 * skc_attach() will create a second device instance
1394 * for us.
1395 */
1396 switch (sc->sk_type) {
1397 case SK_GENESIS:
1398 device_set_desc(dev, "XaQti Corp. XMAC II");
1399 break;
1400 case SK_YUKON:
1401 case SK_YUKON_LITE:
1402 case SK_YUKON_LP:
1403 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1404 break;
1405 }
1406
1407 return (BUS_PROBE_DEFAULT);
1408 }
1409
1410 /*
1411 * Each XMAC chip is attached as a separate logical IP interface.
1412 * Single port cards will have only one logical interface of course.
1413 */
1414 static int
1415 sk_attach(dev)
1416 device_t dev;
1417 {
1418 struct sk_softc *sc;
1419 struct sk_if_softc *sc_if;
1420 struct ifnet *ifp;
1421 int i, port, error;
1422 u_char eaddr[6];
1423
1424 if (dev == NULL)
1425 return(EINVAL);
1426
1427 error = 0;
1428 sc_if = device_get_softc(dev);
1429 sc = device_get_softc(device_get_parent(dev));
1430 port = *(int *)device_get_ivars(dev);
1431
1432 sc_if->sk_dev = dev;
1433 sc_if->sk_unit = device_get_unit(dev);
1434 sc_if->sk_port = port;
1435 sc_if->sk_softc = sc;
1436 sc->sk_if[port] = sc_if;
1437 if (port == SK_PORT_A)
1438 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1439 if (port == SK_PORT_B)
1440 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1441
1442 /* Allocate the descriptor queues. */
1443 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1444 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1445
1446 if (sc_if->sk_rdata == NULL) {
1447 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1448 error = ENOMEM;
1449 goto fail;
1450 }
1451
1452 /* Try to allocate memory for jumbo buffers. */
1453 if (sk_alloc_jumbo_mem(sc_if)) {
1454 printf("sk%d: jumbo buffer allocation failed\n",
1455 sc_if->sk_unit);
1456 error = ENOMEM;
1457 goto fail;
1458 }
1459
1460 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1461 if (ifp == NULL) {
1462 printf("sk%d: can not if_alloc()\n", sc_if->sk_unit);
1463 error = ENOSPC;
1464 goto fail;
1465 }
1466 ifp->if_softc = sc_if;
1467 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1468 ifp->if_mtu = ETHERMTU;
1469 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1470 /*
1471 * The hardware should be ready for VLAN_MTU by default:
1472 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1473 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1474 */
1475 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU;
1476 ifp->if_ioctl = sk_ioctl;
1477 ifp->if_start = sk_start;
1478 ifp->if_watchdog = sk_watchdog;
1479 ifp->if_init = sk_init;
1480 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1481 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1482 IFQ_SET_READY(&ifp->if_snd);
1483
1484 callout_handle_init(&sc_if->sk_tick_ch);
1485
1486 /*
1487 * Get station address for this interface. Note that
1488 * dual port cards actually come with three station
1489 * addresses: one for each port, plus an extra. The
1490 * extra one is used by the SysKonnect driver software
1491 * as a 'virtual' station address for when both ports
1492 * are operating in failover mode. Currently we don't
1493 * use this extra address.
1494 */
1495 SK_LOCK(sc);
1496 for (i = 0; i < ETHER_ADDR_LEN; i++)
1497 eaddr[i] =
1498 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1499
1500 /*
1501 * Set up RAM buffer addresses. The NIC will have a certain
1502 * amount of SRAM on it, somewhere between 512K and 2MB. We
1503 * need to divide this up a) between the transmitter and
1504 * receiver and b) between the two XMACs, if this is a
1505 * dual port NIC. Our algotithm is to divide up the memory
1506 * evenly so that everyone gets a fair share.
1507 */
1508 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1509 u_int32_t chunk, val;
1510
1511 chunk = sc->sk_ramsize / 2;
1512 val = sc->sk_rboff / sizeof(u_int64_t);
1513 sc_if->sk_rx_ramstart = val;
1514 val += (chunk / sizeof(u_int64_t));
1515 sc_if->sk_rx_ramend = val - 1;
1516 sc_if->sk_tx_ramstart = val;
1517 val += (chunk / sizeof(u_int64_t));
1518 sc_if->sk_tx_ramend = val - 1;
1519 } else {
1520 u_int32_t chunk, val;
1521
1522 chunk = sc->sk_ramsize / 4;
1523 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1524 sizeof(u_int64_t);
1525 sc_if->sk_rx_ramstart = val;
1526 val += (chunk / sizeof(u_int64_t));
1527 sc_if->sk_rx_ramend = val - 1;
1528 sc_if->sk_tx_ramstart = val;
1529 val += (chunk / sizeof(u_int64_t));
1530 sc_if->sk_tx_ramend = val - 1;
1531 }
1532
1533 /* Read and save PHY type and set PHY address */
1534 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1535 switch(sc_if->sk_phytype) {
1536 case SK_PHYTYPE_XMAC:
1537 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1538 break;
1539 case SK_PHYTYPE_BCOM:
1540 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1541 break;
1542 case SK_PHYTYPE_MARV_COPPER:
1543 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1544 break;
1545 default:
1546 printf("skc%d: unsupported PHY type: %d\n",
1547 sc->sk_unit, sc_if->sk_phytype);
1548 error = ENODEV;
1549 SK_UNLOCK(sc);
1550 goto fail;
1551 }
1552
1553
1554 /*
1555 * Call MI attach routine. Can't hold locks when calling into ether_*.
1556 */
1557 SK_UNLOCK(sc);
1558 ether_ifattach(ifp, eaddr);
1559 SK_LOCK(sc);
1560
1561 /*
1562 * Do miibus setup.
1563 */
1564 switch (sc->sk_type) {
1565 case SK_GENESIS:
1566 sk_init_xmac(sc_if);
1567 break;
1568 case SK_YUKON:
1569 case SK_YUKON_LITE:
1570 case SK_YUKON_LP:
1571 sk_init_yukon(sc_if);
1572 break;
1573 }
1574
1575 SK_UNLOCK(sc);
1576 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1577 sk_ifmedia_upd, sk_ifmedia_sts)) {
1578 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1579 ether_ifdetach(ifp);
1580 error = ENXIO;
1581 goto fail;
1582 }
1583
1584 fail:
1585 if (error) {
1586 /* Access should be ok even though lock has been dropped */
1587 sc->sk_if[port] = NULL;
1588 sk_detach(dev);
1589 }
1590
1591 return(error);
1592 }
1593
1594 /*
1595 * Attach the interface. Allocate softc structures, do ifmedia
1596 * setup and ethernet/BPF attach.
1597 */
1598 static int
1599 skc_attach(dev)
1600 device_t dev;
1601 {
1602 struct sk_softc *sc;
1603 int unit, error = 0, rid, *port;
1604 uint8_t skrs;
1605 char *pname, *revstr;
1606
1607 sc = device_get_softc(dev);
1608 unit = device_get_unit(dev);
1609
1610 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1611 MTX_DEF | MTX_RECURSE);
1612 /*
1613 * Map control/status registers.
1614 */
1615 pci_enable_busmaster(dev);
1616
1617 rid = SK_RID;
1618 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1619
1620 if (sc->sk_res == NULL) {
1621 printf("sk%d: couldn't map ports/memory\n", unit);
1622 error = ENXIO;
1623 goto fail;
1624 }
1625
1626 sc->sk_btag = rman_get_bustag(sc->sk_res);
1627 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1628
1629 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1630 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1631
1632 /* Bail out if chip is not recognized. */
1633 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1634 printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1635 unit, sc->sk_type, sc->sk_rev);
1636 error = ENXIO;
1637 goto fail;
1638 }
1639
1640 /* Allocate interrupt */
1641 rid = 0;
1642 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1643 RF_SHAREABLE | RF_ACTIVE);
1644
1645 if (sc->sk_irq == NULL) {
1646 printf("skc%d: couldn't map interrupt\n", unit);
1647 error = ENXIO;
1648 goto fail;
1649 }
1650
1651 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1652 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1653 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1654 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1655 "SK interrupt moderation");
1656
1657 /* Pull in device tunables. */
1658 sc->sk_int_mod = SK_IM_DEFAULT;
1659 error = resource_int_value(device_get_name(dev), unit,
1660 "int_mod", &sc->sk_int_mod);
1661 if (error == 0) {
1662 if (sc->sk_int_mod < SK_IM_MIN ||
1663 sc->sk_int_mod > SK_IM_MAX) {
1664 printf("skc%d: int_mod value out of range; "
1665 "using default: %d\n", unit, SK_IM_DEFAULT);
1666 sc->sk_int_mod = SK_IM_DEFAULT;
1667 }
1668 }
1669
1670 /* Reset the adapter. */
1671 sk_reset(sc);
1672
1673 sc->sk_unit = unit;
1674
1675 /* Read and save vital product data from EEPROM. */
1676 sk_vpd_read(sc);
1677
1678 skrs = sk_win_read_1(sc, SK_EPROM0);
1679 if (sc->sk_type == SK_GENESIS) {
1680 /* Read and save RAM size and RAMbuffer offset */
1681 switch(skrs) {
1682 case SK_RAMSIZE_512K_64:
1683 sc->sk_ramsize = 0x80000;
1684 sc->sk_rboff = SK_RBOFF_0;
1685 break;
1686 case SK_RAMSIZE_1024K_64:
1687 sc->sk_ramsize = 0x100000;
1688 sc->sk_rboff = SK_RBOFF_80000;
1689 break;
1690 case SK_RAMSIZE_1024K_128:
1691 sc->sk_ramsize = 0x100000;
1692 sc->sk_rboff = SK_RBOFF_0;
1693 break;
1694 case SK_RAMSIZE_2048K_128:
1695 sc->sk_ramsize = 0x200000;
1696 sc->sk_rboff = SK_RBOFF_0;
1697 break;
1698 default:
1699 printf("skc%d: unknown ram size: %d\n",
1700 sc->sk_unit, skrs);
1701 error = ENXIO;
1702 goto fail;
1703 }
1704 } else { /* SK_YUKON_FAMILY */
1705 if (skrs == 0x00)
1706 sc->sk_ramsize = 0x20000;
1707 else
1708 sc->sk_ramsize = skrs * (1<<12);
1709 sc->sk_rboff = SK_RBOFF_0;
1710 }
1711
1712 /* Read and save physical media type */
1713 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1714 case SK_PMD_1000BASESX:
1715 sc->sk_pmd = IFM_1000_SX;
1716 break;
1717 case SK_PMD_1000BASELX:
1718 sc->sk_pmd = IFM_1000_LX;
1719 break;
1720 case SK_PMD_1000BASECX:
1721 sc->sk_pmd = IFM_1000_CX;
1722 break;
1723 case SK_PMD_1000BASETX:
1724 sc->sk_pmd = IFM_1000_T;
1725 break;
1726 default:
1727 printf("skc%d: unknown media type: 0x%x\n",
1728 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1729 error = ENXIO;
1730 goto fail;
1731 }
1732
1733 /* Determine whether to name it with VPD PN or just make it up.
1734 * Marvell Yukon VPD PN seems to freqently be bogus. */
1735 switch (pci_get_device(dev)) {
1736 case DEVICEID_SK_V1:
1737 case DEVICEID_BELKIN_5005:
1738 case DEVICEID_3COM_3C940:
1739 case DEVICEID_LINKSYS_EG1032:
1740 case DEVICEID_DLINK_DGE530T:
1741 /* Stay with VPD PN. */
1742 pname = sc->sk_vpd_prodname;
1743 break;
1744 case DEVICEID_SK_V2:
1745 /* YUKON VPD PN might bear no resemblance to reality. */
1746 switch (sc->sk_type) {
1747 case SK_GENESIS:
1748 /* Stay with VPD PN. */
1749 pname = sc->sk_vpd_prodname;
1750 break;
1751 case SK_YUKON:
1752 pname = "Marvell Yukon Gigabit Ethernet";
1753 break;
1754 case SK_YUKON_LITE:
1755 pname = "Marvell Yukon Lite Gigabit Ethernet";
1756 break;
1757 case SK_YUKON_LP:
1758 pname = "Marvell Yukon LP Gigabit Ethernet";
1759 break;
1760 default:
1761 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1762 break;
1763 }
1764
1765 /* Yukon Lite Rev. A0 needs special test. */
1766 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1767 u_int32_t far;
1768 u_int8_t testbyte;
1769
1770 /* Save flash address register before testing. */
1771 far = sk_win_read_4(sc, SK_EP_ADDR);
1772
1773 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1774 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1775
1776 if (testbyte != 0x00) {
1777 /* Yukon Lite Rev. A0 detected. */
1778 sc->sk_type = SK_YUKON_LITE;
1779 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1780 /* Restore flash address register. */
1781 sk_win_write_4(sc, SK_EP_ADDR, far);
1782 }
1783 }
1784 break;
1785 default:
1786 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1787 "chipver=%02x, rev=%x\n",
1788 pci_get_vendor(dev), pci_get_device(dev),
1789 sc->sk_type, sc->sk_rev);
1790 error = ENXIO;
1791 goto fail;
1792 }
1793
1794 if (sc->sk_type == SK_YUKON_LITE) {
1795 switch (sc->sk_rev) {
1796 case SK_YUKON_LITE_REV_A0:
1797 revstr = "A0";
1798 break;
1799 case SK_YUKON_LITE_REV_A1:
1800 revstr = "A1";
1801 break;
1802 case SK_YUKON_LITE_REV_A3:
1803 revstr = "A3";
1804 break;
1805 default:
1806 revstr = "";
1807 break;
1808 }
1809 } else {
1810 revstr = "";
1811 }
1812
1813 /* Announce the product name and more VPD data if there. */
1814 device_printf(dev, "%s rev. %s(0x%x)\n",
1815 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev);
1816
1817 if (bootverbose) {
1818 if (sc->sk_vpd_readonly != NULL &&
1819 sc->sk_vpd_readonly_len != 0) {
1820 char buf[256];
1821 char *dp = sc->sk_vpd_readonly;
1822 uint16_t l, len = sc->sk_vpd_readonly_len;
1823
1824 while (len >= 3) {
1825 if ((*dp == 'P' && *(dp+1) == 'N') ||
1826 (*dp == 'E' && *(dp+1) == 'C') ||
1827 (*dp == 'M' && *(dp+1) == 'N') ||
1828 (*dp == 'S' && *(dp+1) == 'N')) {
1829 l = 0;
1830 while (l < *(dp+2)) {
1831 buf[l] = *(dp+3+l);
1832 ++l;
1833 }
1834 buf[l] = '\0';
1835 device_printf(dev, "%c%c: %s\n",
1836 *dp, *(dp+1), buf);
1837 len -= (3 + l);
1838 dp += (3 + l);
1839 } else {
1840 len -= (3 + *(dp+2));
1841 dp += (3 + *(dp+2));
1842 }
1843 }
1844 }
1845 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1846 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1847 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1848 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1849 }
1850
1851 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1852 if (sc->sk_devs[SK_PORT_A] == NULL) {
1853 device_printf(dev, "failed to add child for PORT_A\n");
1854 error = ENXIO;
1855 goto fail;
1856 }
1857 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1858 if (port == NULL) {
1859 device_printf(dev, "failed to allocate memory for "
1860 "ivars of PORT_A\n");
1861 error = ENXIO;
1862 goto fail;
1863 }
1864 *port = SK_PORT_A;
1865 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1866
1867 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1868 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1869 if (sc->sk_devs[SK_PORT_B] == NULL) {
1870 device_printf(dev, "failed to add child for PORT_B\n");
1871 error = ENXIO;
1872 goto fail;
1873 }
1874 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1875 if (port == NULL) {
1876 device_printf(dev, "failed to allocate memory for "
1877 "ivars of PORT_B\n");
1878 error = ENXIO;
1879 goto fail;
1880 }
1881 *port = SK_PORT_B;
1882 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1883 }
1884
1885 /* Turn on the 'driver is loaded' LED. */
1886 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1887
1888 error = bus_generic_attach(dev);
1889 if (error) {
1890 device_printf(dev, "failed to attach port(s)\n");
1891 goto fail;
1892 }
1893
1894 /* Hook interrupt last to avoid having to lock softc */
1895 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1896 sk_intr, sc, &sc->sk_intrhand);
1897
1898 if (error) {
1899 printf("skc%d: couldn't set up irq\n", unit);
1900 goto fail;
1901 }
1902
1903 fail:
1904 if (error)
1905 skc_detach(dev);
1906
1907 return(error);
1908 }
1909
1910 /*
1911 * Shutdown hardware and free up resources. This can be called any
1912 * time after the mutex has been initialized. It is called in both
1913 * the error case in attach and the normal detach case so it needs
1914 * to be careful about only freeing resources that have actually been
1915 * allocated.
1916 */
1917 static int
1918 sk_detach(dev)
1919 device_t dev;
1920 {
1921 struct sk_if_softc *sc_if;
1922 struct ifnet *ifp;
1923
1924 sc_if = device_get_softc(dev);
1925 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1926 ("sk mutex not initialized in sk_detach"));
1927 SK_IF_LOCK(sc_if);
1928
1929 ifp = sc_if->sk_ifp;
1930 /* These should only be active if attach_xmac succeeded */
1931 if (device_is_attached(dev)) {
1932 sk_stop(sc_if);
1933 /* Can't hold locks while calling detach */
1934 SK_IF_UNLOCK(sc_if);
1935 ether_ifdetach(ifp);
1936 SK_IF_LOCK(sc_if);
1937 }
1938 if (ifp)
1939 if_free(ifp);
1940 /*
1941 * We're generally called from skc_detach() which is using
1942 * device_delete_child() to get to here. It's already trashed
1943 * miibus for us, so don't do it here or we'll panic.
1944 */
1945 /*
1946 if (sc_if->sk_miibus != NULL)
1947 device_delete_child(dev, sc_if->sk_miibus);
1948 */
1949 bus_generic_detach(dev);
1950 if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1951 sk_free_jumbo_mem(sc_if);
1952 if (sc_if->sk_rdata != NULL) {
1953 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1954 M_DEVBUF);
1955 }
1956 SK_IF_UNLOCK(sc_if);
1957
1958 return(0);
1959 }
1960
1961 static int
1962 skc_detach(dev)
1963 device_t dev;
1964 {
1965 struct sk_softc *sc;
1966
1967 sc = device_get_softc(dev);
1968 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1969
1970 if (device_is_alive(dev)) {
1971 if (sc->sk_devs[SK_PORT_A] != NULL) {
1972 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1973 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1974 }
1975 if (sc->sk_devs[SK_PORT_B] != NULL) {
1976 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1977 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1978 }
1979 bus_generic_detach(dev);
1980 }
1981
1982 if (sc->sk_vpd_prodname != NULL)
1983 free(sc->sk_vpd_prodname, M_DEVBUF);
1984 if (sc->sk_vpd_readonly != NULL)
1985 free(sc->sk_vpd_readonly, M_DEVBUF);
1986
1987 if (sc->sk_intrhand)
1988 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1989 if (sc->sk_irq)
1990 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1991 if (sc->sk_res)
1992 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1993
1994 mtx_destroy(&sc->sk_mtx);
1995
1996 return(0);
1997 }
1998
1999 static int
2000 sk_encap(sc_if, m_head, txidx)
2001 struct sk_if_softc *sc_if;
2002 struct mbuf *m_head;
2003 u_int32_t *txidx;
2004 {
2005 struct sk_tx_desc *f = NULL;
2006 struct mbuf *m;
2007 u_int32_t frag, cur, cnt = 0;
2008
2009 SK_IF_LOCK_ASSERT(sc_if);
2010
2011 m = m_head;
2012 cur = frag = *txidx;
2013
2014 /*
2015 * Start packing the mbufs in this chain into
2016 * the fragment pointers. Stop when we run out
2017 * of fragments or hit the end of the mbuf chain.
2018 */
2019 for (m = m_head; m != NULL; m = m->m_next) {
2020 if (m->m_len != 0) {
2021 if ((SK_TX_RING_CNT -
2022 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
2023 return(ENOBUFS);
2024 f = &sc_if->sk_rdata->sk_tx_ring[frag];
2025 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
2026 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
2027 if (cnt == 0)
2028 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
2029 else
2030 f->sk_ctl |= SK_TXCTL_OWN;
2031 cur = frag;
2032 SK_INC(frag, SK_TX_RING_CNT);
2033 cnt++;
2034 }
2035 }
2036
2037 if (m != NULL)
2038 return(ENOBUFS);
2039
2040 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
2041 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
2042 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
2043 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
2044 sc_if->sk_cdata.sk_tx_cnt += cnt;
2045
2046 *txidx = frag;
2047
2048 return(0);
2049 }
2050
2051 static void
2052 sk_start(ifp)
2053 struct ifnet *ifp;
2054 {
2055 struct sk_if_softc *sc_if;
2056
2057 sc_if = ifp->if_softc;
2058
2059 SK_IF_LOCK(sc_if);
2060 sk_start_locked(ifp);
2061 SK_IF_UNLOCK(sc_if);
2062
2063 return;
2064 }
2065
2066 static void
2067 sk_start_locked(ifp)
2068 struct ifnet *ifp;
2069 {
2070 struct sk_softc *sc;
2071 struct sk_if_softc *sc_if;
2072 struct mbuf *m_head = NULL;
2073 u_int32_t idx;
2074
2075 sc_if = ifp->if_softc;
2076 sc = sc_if->sk_softc;
2077
2078 SK_IF_LOCK_ASSERT(sc_if);
2079
2080 idx = sc_if->sk_cdata.sk_tx_prod;
2081
2082 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
2083 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2084 if (m_head == NULL)
2085 break;
2086
2087 /*
2088 * Pack the data into the transmit ring. If we
2089 * don't have room, set the OACTIVE flag and wait
2090 * for the NIC to drain the ring.
2091 */
2092 if (sk_encap(sc_if, m_head, &idx)) {
2093 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2094 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2095 break;
2096 }
2097
2098 /*
2099 * If there's a BPF listener, bounce a copy of this frame
2100 * to him.
2101 */
2102 BPF_MTAP(ifp, m_head);
2103 }
2104
2105 /* Transmit */
2106 if (idx != sc_if->sk_cdata.sk_tx_prod) {
2107 sc_if->sk_cdata.sk_tx_prod = idx;
2108 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2109
2110 /* Set a timeout in case the chip goes out to lunch. */
2111 ifp->if_timer = 5;
2112 }
2113
2114 return;
2115 }
2116
2117
2118 static void
2119 sk_watchdog(ifp)
2120 struct ifnet *ifp;
2121 {
2122 struct sk_if_softc *sc_if;
2123
2124 sc_if = ifp->if_softc;
2125
2126 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
2127 SK_IF_LOCK(sc_if);
2128 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2129 sk_init_locked(sc_if);
2130 SK_IF_UNLOCK(sc_if);
2131
2132 return;
2133 }
2134
2135 static void
2136 skc_shutdown(dev)
2137 device_t dev;
2138 {
2139 struct sk_softc *sc;
2140
2141 sc = device_get_softc(dev);
2142 SK_LOCK(sc);
2143
2144 /* Turn off the 'driver is loaded' LED. */
2145 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2146
2147 /*
2148 * Reset the GEnesis controller. Doing this should also
2149 * assert the resets on the attached XMAC(s).
2150 */
2151 sk_reset(sc);
2152 SK_UNLOCK(sc);
2153
2154 return;
2155 }
2156
2157 static void
2158 sk_rxeof(sc_if)
2159 struct sk_if_softc *sc_if;
2160 {
2161 struct sk_softc *sc;
2162 struct mbuf *m;
2163 struct ifnet *ifp;
2164 struct sk_chain *cur_rx;
2165 int total_len = 0;
2166 int i;
2167 u_int32_t rxstat;
2168
2169 sc = sc_if->sk_softc;
2170 ifp = sc_if->sk_ifp;
2171 i = sc_if->sk_cdata.sk_rx_prod;
2172 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2173
2174 SK_LOCK_ASSERT(sc);
2175
2176 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2177
2178 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2179 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2180 m = cur_rx->sk_mbuf;
2181 cur_rx->sk_mbuf = NULL;
2182 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2183 SK_INC(i, SK_RX_RING_CNT);
2184
2185 if (rxstat & XM_RXSTAT_ERRFRAME) {
2186 ifp->if_ierrors++;
2187 sk_newbuf(sc_if, cur_rx, m);
2188 continue;
2189 }
2190
2191 /*
2192 * Try to allocate a new jumbo buffer. If that
2193 * fails, copy the packet to mbufs and put the
2194 * jumbo buffer back in the ring so it can be
2195 * re-used. If allocating mbufs fails, then we
2196 * have to drop the packet.
2197 */
2198 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2199 struct mbuf *m0;
2200 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2201 ifp, NULL);
2202 sk_newbuf(sc_if, cur_rx, m);
2203 if (m0 == NULL) {
2204 printf("sk%d: no receive buffers "
2205 "available -- packet dropped!\n",
2206 sc_if->sk_unit);
2207 ifp->if_ierrors++;
2208 continue;
2209 }
2210 m = m0;
2211 } else {
2212 m->m_pkthdr.rcvif = ifp;
2213 m->m_pkthdr.len = m->m_len = total_len;
2214 }
2215
2216 ifp->if_ipackets++;
2217 SK_UNLOCK(sc);
2218 (*ifp->if_input)(ifp, m);
2219 SK_LOCK(sc);
2220 }
2221
2222 sc_if->sk_cdata.sk_rx_prod = i;
2223
2224 return;
2225 }
2226
2227 static void
2228 sk_txeof(sc_if)
2229 struct sk_if_softc *sc_if;
2230 {
2231 struct sk_softc *sc;
2232 struct sk_tx_desc *cur_tx;
2233 struct ifnet *ifp;
2234 u_int32_t idx;
2235
2236 sc = sc_if->sk_softc;
2237 ifp = sc_if->sk_ifp;
2238
2239 /*
2240 * Go through our tx ring and free mbufs for those
2241 * frames that have been sent.
2242 */
2243 idx = sc_if->sk_cdata.sk_tx_cons;
2244 while(idx != sc_if->sk_cdata.sk_tx_prod) {
2245 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2246 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2247 break;
2248 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2249 ifp->if_opackets++;
2250 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2251 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2252 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2253 }
2254 sc_if->sk_cdata.sk_tx_cnt--;
2255 SK_INC(idx, SK_TX_RING_CNT);
2256 }
2257
2258 if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2259 ifp->if_timer = 0;
2260 } else /* nudge chip to keep tx ring moving */
2261 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2262
2263 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2264 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2265
2266 sc_if->sk_cdata.sk_tx_cons = idx;
2267 }
2268
2269 static void
2270 sk_tick(xsc_if)
2271 void *xsc_if;
2272 {
2273 struct sk_if_softc *sc_if;
2274 struct mii_data *mii;
2275 struct ifnet *ifp;
2276 int i;
2277
2278 sc_if = xsc_if;
2279 SK_IF_LOCK(sc_if);
2280 ifp = sc_if->sk_ifp;
2281 mii = device_get_softc(sc_if->sk_miibus);
2282
2283 if (!(ifp->if_flags & IFF_UP)) {
2284 SK_IF_UNLOCK(sc_if);
2285 return;
2286 }
2287
2288 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2289 sk_intr_bcom(sc_if);
2290 SK_IF_UNLOCK(sc_if);
2291 return;
2292 }
2293
2294 /*
2295 * According to SysKonnect, the correct way to verify that
2296 * the link has come back up is to poll bit 0 of the GPIO
2297 * register three times. This pin has the signal from the
2298 * link_sync pin connected to it; if we read the same link
2299 * state 3 times in a row, we know the link is up.
2300 */
2301 for (i = 0; i < 3; i++) {
2302 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2303 break;
2304 }
2305
2306 if (i != 3) {
2307 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2308 SK_IF_UNLOCK(sc_if);
2309 return;
2310 }
2311
2312 /* Turn the GP0 interrupt back on. */
2313 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2314 SK_XM_READ_2(sc_if, XM_ISR);
2315 mii_tick(mii);
2316 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2317
2318 SK_IF_UNLOCK(sc_if);
2319 return;
2320 }
2321
2322 static void
2323 sk_intr_bcom(sc_if)
2324 struct sk_if_softc *sc_if;
2325 {
2326 struct mii_data *mii;
2327 struct ifnet *ifp;
2328 int status;
2329 mii = device_get_softc(sc_if->sk_miibus);
2330 ifp = sc_if->sk_ifp;
2331
2332 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2333
2334 /*
2335 * Read the PHY interrupt register to make sure
2336 * we clear any pending interrupts.
2337 */
2338 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2339
2340 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2341 sk_init_xmac(sc_if);
2342 return;
2343 }
2344
2345 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2346 int lstat;
2347 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2348 BRGPHY_MII_AUXSTS);
2349
2350 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2351 mii_mediachg(mii);
2352 /* Turn off the link LED. */
2353 SK_IF_WRITE_1(sc_if, 0,
2354 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2355 sc_if->sk_link = 0;
2356 } else if (status & BRGPHY_ISR_LNK_CHG) {
2357 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2358 BRGPHY_MII_IMR, 0xFF00);
2359 mii_tick(mii);
2360 sc_if->sk_link = 1;
2361 /* Turn on the link LED. */
2362 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2363 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2364 SK_LINKLED_BLINK_OFF);
2365 } else {
2366 mii_tick(mii);
2367 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2368 }
2369 }
2370
2371 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2372
2373 return;
2374 }
2375
2376 static void
2377 sk_intr_xmac(sc_if)
2378 struct sk_if_softc *sc_if;
2379 {
2380 struct sk_softc *sc;
2381 u_int16_t status;
2382
2383 sc = sc_if->sk_softc;
2384 status = SK_XM_READ_2(sc_if, XM_ISR);
2385
2386 /*
2387 * Link has gone down. Start MII tick timeout to
2388 * watch for link resync.
2389 */
2390 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2391 if (status & XM_ISR_GP0_SET) {
2392 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2393 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2394 }
2395
2396 if (status & XM_ISR_AUTONEG_DONE) {
2397 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2398 }
2399 }
2400
2401 if (status & XM_IMR_TX_UNDERRUN)
2402 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2403
2404 if (status & XM_IMR_RX_OVERRUN)
2405 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2406
2407 status = SK_XM_READ_2(sc_if, XM_ISR);
2408
2409 return;
2410 }
2411
2412 static void
2413 sk_intr_yukon(sc_if)
2414 struct sk_if_softc *sc_if;
2415 {
2416 int status;
2417
2418 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2419
2420 return;
2421 }
2422
2423 static void
2424 sk_intr(xsc)
2425 void *xsc;
2426 {
2427 struct sk_softc *sc = xsc;
2428 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
2429 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2430 u_int32_t status;
2431
2432 SK_LOCK(sc);
2433
2434 sc_if0 = sc->sk_if[SK_PORT_A];
2435 sc_if1 = sc->sk_if[SK_PORT_B];
2436
2437 if (sc_if0 != NULL)
2438 ifp0 = sc_if0->sk_ifp;
2439 if (sc_if1 != NULL)
2440 ifp1 = sc_if1->sk_ifp;
2441
2442 for (;;) {
2443 status = CSR_READ_4(sc, SK_ISSR);
2444 if (!(status & sc->sk_intrmask))
2445 break;
2446
2447 /* Handle receive interrupts first. */
2448 if (status & SK_ISR_RX1_EOF) {
2449 sk_rxeof(sc_if0);
2450 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2451 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2452 }
2453 if (status & SK_ISR_RX2_EOF) {
2454 sk_rxeof(sc_if1);
2455 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2456 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2457 }
2458
2459 /* Then transmit interrupts. */
2460 if (status & SK_ISR_TX1_S_EOF) {
2461 sk_txeof(sc_if0);
2462 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2463 SK_TXBMU_CLR_IRQ_EOF);
2464 }
2465 if (status & SK_ISR_TX2_S_EOF) {
2466 sk_txeof(sc_if1);
2467 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2468 SK_TXBMU_CLR_IRQ_EOF);
2469 }
2470
2471 /* Then MAC interrupts. */
2472 if (status & SK_ISR_MAC1 &&
2473 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
2474 if (sc->sk_type == SK_GENESIS)
2475 sk_intr_xmac(sc_if0);
2476 else
2477 sk_intr_yukon(sc_if0);
2478 }
2479
2480 if (status & SK_ISR_MAC2 &&
2481 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
2482 if (sc->sk_type == SK_GENESIS)
2483 sk_intr_xmac(sc_if1);
2484 else
2485 sk_intr_yukon(sc_if1);
2486 }
2487
2488 if (status & SK_ISR_EXTERNAL_REG) {
2489 if (ifp0 != NULL &&
2490 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2491 sk_intr_bcom(sc_if0);
2492 if (ifp1 != NULL &&
2493 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2494 sk_intr_bcom(sc_if1);
2495 }
2496 }
2497
2498 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2499
2500 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2501 sk_start_locked(ifp0);
2502 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2503 sk_start_locked(ifp1);
2504
2505 SK_UNLOCK(sc);
2506
2507 return;
2508 }
2509
2510 static void
2511 sk_init_xmac(sc_if)
2512 struct sk_if_softc *sc_if;
2513 {
2514 struct sk_softc *sc;
2515 struct ifnet *ifp;
2516 struct sk_bcom_hack bhack[] = {
2517 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2518 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2519 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2520 { 0, 0 } };
2521
2522 sc = sc_if->sk_softc;
2523 ifp = sc_if->sk_ifp;
2524
2525 /* Unreset the XMAC. */
2526 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2527 DELAY(1000);
2528
2529 /* Reset the XMAC's internal state. */
2530 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2531
2532 /* Save the XMAC II revision */
2533 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2534
2535 /*
2536 * Perform additional initialization for external PHYs,
2537 * namely for the 1000baseTX cards that use the XMAC's
2538 * GMII mode.
2539 */
2540 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2541 int i = 0;
2542 u_int32_t val;
2543
2544 /* Take PHY out of reset. */
2545 val = sk_win_read_4(sc, SK_GPIO);
2546 if (sc_if->sk_port == SK_PORT_A)
2547 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2548 else
2549 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2550 sk_win_write_4(sc, SK_GPIO, val);
2551
2552 /* Enable GMII mode on the XMAC. */
2553 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2554
2555 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2556 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2557 DELAY(10000);
2558 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2559 BRGPHY_MII_IMR, 0xFFF0);
2560
2561 /*
2562 * Early versions of the BCM5400 apparently have
2563 * a bug that requires them to have their reserved
2564 * registers initialized to some magic values. I don't
2565 * know what the numbers do, I'm just the messenger.
2566 */
2567 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2568 == 0x6041) {
2569 while(bhack[i].reg) {
2570 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2571 bhack[i].reg, bhack[i].val);
2572 i++;
2573 }
2574 }
2575 }
2576
2577 /* Set station address */
2578 SK_XM_WRITE_2(sc_if, XM_PAR0,
2579 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[0]));
2580 SK_XM_WRITE_2(sc_if, XM_PAR1,
2581 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[2]));
2582 SK_XM_WRITE_2(sc_if, XM_PAR2,
2583 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[4]));
2584 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2585
2586 if (ifp->if_flags & IFF_BROADCAST) {
2587 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2588 } else {
2589 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2590 }
2591
2592 /* We don't need the FCS appended to the packet. */
2593 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2594
2595 /* We want short frames padded to 60 bytes. */
2596 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2597
2598 /*
2599 * Enable the reception of all error frames. This is is
2600 * a necessary evil due to the design of the XMAC. The
2601 * XMAC's receive FIFO is only 8K in size, however jumbo
2602 * frames can be up to 9000 bytes in length. When bad
2603 * frame filtering is enabled, the XMAC's RX FIFO operates
2604 * in 'store and forward' mode. For this to work, the
2605 * entire frame has to fit into the FIFO, but that means
2606 * that jumbo frames larger than 8192 bytes will be
2607 * truncated. Disabling all bad frame filtering causes
2608 * the RX FIFO to operate in streaming mode, in which
2609 * case the XMAC will start transfering frames out of the
2610 * RX FIFO as soon as the FIFO threshold is reached.
2611 */
2612 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2613 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2614 XM_MODE_RX_INRANGELEN);
2615
2616 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2617 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2618 else
2619 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2620
2621 /*
2622 * Bump up the transmit threshold. This helps hold off transmit
2623 * underruns when we're blasting traffic from both ports at once.
2624 */
2625 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2626
2627 /* Set promiscuous mode */
2628 sk_setpromisc(sc_if);
2629
2630 /* Set multicast filter */
2631 sk_setmulti(sc_if);
2632
2633 /* Clear and enable interrupts */
2634 SK_XM_READ_2(sc_if, XM_ISR);
2635 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2636 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2637 else
2638 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2639
2640 /* Configure MAC arbiter */
2641 switch(sc_if->sk_xmac_rev) {
2642 case XM_XMAC_REV_B2:
2643 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2644 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2645 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2646 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2647 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2648 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2649 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2650 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2651 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2652 break;
2653 case XM_XMAC_REV_C1:
2654 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2655 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2656 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2657 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2658 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2659 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2660 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2661 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2662 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2663 break;
2664 default:
2665 break;
2666 }
2667 sk_win_write_2(sc, SK_MACARB_CTL,
2668 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2669
2670 sc_if->sk_link = 1;
2671
2672 return;
2673 }
2674
2675 static void
2676 sk_init_yukon(sc_if)
2677 struct sk_if_softc *sc_if;
2678 {
2679 u_int32_t phy;
2680 u_int16_t reg;
2681 struct sk_softc *sc;
2682 struct ifnet *ifp;
2683 int i;
2684
2685 sc = sc_if->sk_softc;
2686 ifp = sc_if->sk_ifp;
2687
2688 if (sc->sk_type == SK_YUKON_LITE &&
2689 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2690 /* Take PHY out of reset. */
2691 sk_win_write_4(sc, SK_GPIO,
2692 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2693 }
2694
2695 /* GMAC and GPHY Reset */
2696 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2697 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2698 DELAY(1000);
2699 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2700 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2701 DELAY(1000);
2702
2703 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2704 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2705
2706 switch(sc_if->sk_softc->sk_pmd) {
2707 case IFM_1000_SX:
2708 case IFM_1000_LX:
2709 phy |= SK_GPHY_FIBER;
2710 break;
2711
2712 case IFM_1000_CX:
2713 case IFM_1000_T:
2714 phy |= SK_GPHY_COPPER;
2715 break;
2716 }
2717
2718 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2719 DELAY(1000);
2720 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2721 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2722 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2723
2724 /* unused read of the interrupt source register */
2725 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2726
2727 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2728
2729 /* MIB Counter Clear Mode set */
2730 reg |= YU_PAR_MIB_CLR;
2731 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2732
2733 /* MIB Counter Clear Mode clear */
2734 reg &= ~YU_PAR_MIB_CLR;
2735 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2736
2737 /* receive control reg */
2738 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2739
2740 /* transmit parameter register */
2741 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2742 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2743
2744 /* serial mode register */
2745 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2746 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2747 reg |= YU_SMR_MFL_JUMBO;
2748 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2749
2750 /* Setup Yukon's address */
2751 for (i = 0; i < 3; i++) {
2752 /* Write Source Address 1 (unicast filter) */
2753 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2754 IFP2ENADDR(sc_if->sk_ifp)[i * 2] |
2755 IFP2ENADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
2756 }
2757
2758 for (i = 0; i < 3; i++) {
2759 reg = sk_win_read_2(sc_if->sk_softc,
2760 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2761 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2762 }
2763
2764 /* Set promiscuous mode */
2765 sk_setpromisc(sc_if);
2766
2767 /* Set multicast filter */
2768 sk_setmulti(sc_if);
2769
2770 /* enable interrupt mask for counter overflows */
2771 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2772 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2773 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2774
2775 /* Configure RX MAC FIFO */
2776 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2777 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2778
2779 /* Configure TX MAC FIFO */
2780 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2781 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2782 }
2783
2784 /*
2785 * Note that to properly initialize any part of the GEnesis chip,
2786 * you first have to take it out of reset mode.
2787 */
2788 static void
2789 sk_init(xsc)
2790 void *xsc;
2791 {
2792 struct sk_if_softc *sc_if = xsc;
2793
2794 SK_IF_LOCK(sc_if);
2795 sk_init_locked(sc_if);
2796 SK_IF_UNLOCK(sc_if);
2797
2798 return;
2799 }
2800
2801 static void
2802 sk_init_locked(sc_if)
2803 struct sk_if_softc *sc_if;
2804 {
2805 struct sk_softc *sc;
2806 struct ifnet *ifp;
2807 struct mii_data *mii;
2808 u_int16_t reg;
2809 u_int32_t imr;
2810
2811 SK_IF_LOCK_ASSERT(sc_if);
2812
2813 ifp = sc_if->sk_ifp;
2814 sc = sc_if->sk_softc;
2815 mii = device_get_softc(sc_if->sk_miibus);
2816
2817 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2818 return;
2819
2820 /* Cancel pending I/O and free all RX/TX buffers. */
2821 sk_stop(sc_if);
2822
2823 if (sc->sk_type == SK_GENESIS) {
2824 /* Configure LINK_SYNC LED */
2825 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2826 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2827 SK_LINKLED_LINKSYNC_ON);
2828
2829 /* Configure RX LED */
2830 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2831 SK_RXLEDCTL_COUNTER_START);
2832
2833 /* Configure TX LED */
2834 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2835 SK_TXLEDCTL_COUNTER_START);
2836 }
2837
2838 /* Configure I2C registers */
2839
2840 /* Configure XMAC(s) */
2841 switch (sc->sk_type) {
2842 case SK_GENESIS:
2843 sk_init_xmac(sc_if);
2844 break;
2845 case SK_YUKON:
2846 case SK_YUKON_LITE:
2847 case SK_YUKON_LP:
2848 sk_init_yukon(sc_if);
2849 break;
2850 }
2851 mii_mediachg(mii);
2852
2853 if (sc->sk_type == SK_GENESIS) {
2854 /* Configure MAC FIFOs */
2855 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2856 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2857 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2858
2859 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2860 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2861 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2862 }
2863
2864 /* Configure transmit arbiter(s) */
2865 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2866 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2867
2868 /* Configure RAMbuffers */
2869 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2870 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2871 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2872 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2873 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2874 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2875
2876 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2877 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2878 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2879 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2880 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2881 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2882 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2883
2884 /* Configure BMUs */
2885 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2886 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2887 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2888 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2889
2890 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2891 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2892 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2893 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2894
2895 /* Init descriptors */
2896 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2897 printf("sk%d: initialization failed: no "
2898 "memory for rx buffers\n", sc_if->sk_unit);
2899 sk_stop(sc_if);
2900 return;
2901 }
2902 sk_init_tx_ring(sc_if);
2903
2904 /* Set interrupt moderation if changed via sysctl. */
2905 /* SK_LOCK(sc); */
2906 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2907 if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2908 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
2909 if (bootverbose)
2910 printf("skc%d: interrupt moderation is %d us\n",
2911 sc->sk_unit, sc->sk_int_mod);
2912 }
2913 /* SK_UNLOCK(sc); */
2914
2915 /* Configure interrupt handling */
2916 CSR_READ_4(sc, SK_ISSR);
2917 if (sc_if->sk_port == SK_PORT_A)
2918 sc->sk_intrmask |= SK_INTRS1;
2919 else
2920 sc->sk_intrmask |= SK_INTRS2;
2921
2922 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2923
2924 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2925
2926 /* Start BMUs. */
2927 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2928
2929 switch(sc->sk_type) {
2930 case SK_GENESIS:
2931 /* Enable XMACs TX and RX state machines */
2932 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2933 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2934 break;
2935 case SK_YUKON:
2936 case SK_YUKON_LITE:
2937 case SK_YUKON_LP:
2938 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2939 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2940 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2941 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2942 }
2943
2944 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2945 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2946
2947 return;
2948 }
2949
2950 static void
2951 sk_stop(sc_if)
2952 struct sk_if_softc *sc_if;
2953 {
2954 int i;
2955 struct sk_softc *sc;
2956 struct ifnet *ifp;
2957
2958 SK_IF_LOCK_ASSERT(sc_if);
2959 sc = sc_if->sk_softc;
2960 ifp = sc_if->sk_ifp;
2961
2962 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2963
2964 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2965 u_int32_t val;
2966
2967 /* Put PHY back into reset. */
2968 val = sk_win_read_4(sc, SK_GPIO);
2969 if (sc_if->sk_port == SK_PORT_A) {
2970 val |= SK_GPIO_DIR0;
2971 val &= ~SK_GPIO_DAT0;
2972 } else {
2973 val |= SK_GPIO_DIR2;
2974 val &= ~SK_GPIO_DAT2;
2975 }
2976 sk_win_write_4(sc, SK_GPIO, val);
2977 }
2978
2979 /* Turn off various components of this interface. */
2980 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2981 switch (sc->sk_type) {
2982 case SK_GENESIS:
2983 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2984 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2985 break;
2986 case SK_YUKON:
2987 case SK_YUKON_LITE:
2988 case SK_YUKON_LP:
2989 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2990 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2991 break;
2992 }
2993 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2994 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2995 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2996 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2997 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2998 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2999 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3000 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3001 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3002
3003 /* Disable interrupts */
3004 if (sc_if->sk_port == SK_PORT_A)
3005 sc->sk_intrmask &= ~SK_INTRS1;
3006 else
3007 sc->sk_intrmask &= ~SK_INTRS2;
3008 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3009
3010 SK_XM_READ_2(sc_if, XM_ISR);
3011 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3012
3013 /* Free RX and TX mbufs still in the queues. */
3014 for (i = 0; i < SK_RX_RING_CNT; i++) {
3015 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
3016 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
3017 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
3018 }
3019 }
3020
3021 for (i = 0; i < SK_TX_RING_CNT; i++) {
3022 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
3023 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
3024 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
3025 }
3026 }
3027
3028 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3029
3030 return;
3031 }
3032
3033 static int
3034 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3035 {
3036 int error, value;
3037
3038 if (!arg1)
3039 return (EINVAL);
3040 value = *(int *)arg1;
3041 error = sysctl_handle_int(oidp, &value, 0, req);
3042 if (error || !req->newptr)
3043 return (error);
3044 if (value < low || value > high)
3045 return (EINVAL);
3046 *(int *)arg1 = value;
3047 return (0);
3048 }
3049
3050 static int
3051 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3052 {
3053 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3054 }
Cache object: 52d08cb67c0badb682403662ed9fc637
|