FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /* $OpenBSD: if_sk.c,v 1.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD$
35 */
36
37 /*
38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39 *
40 * Permission to use, copy, modify, and distribute this software for any
41 * purpose with or without fee is hereby granted, provided that the above
42 * copyright notice and this permission notice appear in all copies.
43 *
44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51 */
52
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71
72 /*
73 * The SysKonnect gigabit ethernet adapters consist of two main
74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76 * components and a PHY while the GEnesis controller provides a PCI
77 * interface with DMA support. Each card may have between 512K and
78 * 2MB of SRAM on board depending on the configuration.
79 *
80 * The SysKonnect GEnesis controller can have either one or two XMAC
81 * chips connected to it, allowing single or dual port NIC configurations.
82 * SysKonnect has the distinction of being the only vendor on the market
83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85 * XMAC registers. This driver takes advantage of these features to allow
86 * both XMACs to operate as independent interfaces.
87 */
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/sockio.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <vm/vm.h> /* for vtophys */
107 #include <vm/pmap.h> /* for vtophys */
108 #include <machine/clock.h> /* for DELAY */
109 #include <machine/bus_pio.h>
110 #include <machine/bus_memio.h>
111 #include <machine/bus.h>
112 #include <machine/resource.h>
113 #include <sys/bus.h>
114 #include <sys/rman.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 #include <dev/mii/brgphyreg.h>
119
120 #include <pci/pcireg.h>
121 #include <pci/pcivar.h>
122
123 #if 0
124 #define SK_USEIOSPACE
125 #endif
126
127 #include <pci/if_skreg.h>
128 #include <pci/xmaciireg.h>
129 #include <pci/yukonreg.h>
130
131 /* "controller miibus0" required. See GENERIC if you get errors here. */
132 #include "miibus_if.h"
133
134 #ifndef lint
135 static const char rcsid[] =
136 "$FreeBSD$";
137 #endif
138
139 static struct sk_type sk_devs[] = {
140 {
141 VENDORID_SK,
142 DEVICEID_SK_V1,
143 "SysKonnect Gigabit Ethernet (V1.0)"
144 },
145 {
146 VENDORID_SK,
147 DEVICEID_SK_V2,
148 "SysKonnect Gigabit Ethernet (V2.0)"
149 },
150 {
151 VENDORID_MARVELL,
152 DEVICEID_SK_V2,
153 "Marvell Gigabit Ethernet"
154 },
155 {
156 VENDORID_3COM,
157 DEVICEID_3COM_3C940,
158 "3Com 3C940 Gigabit Ethernet"
159 },
160 {
161 VENDORID_LINKSYS,
162 DEVICEID_LINKSYS_EG1032,
163 "Linksys EG1032 Gigabit Ethernet"
164 },
165 {
166 VENDORID_DLINK,
167 DEVICEID_DLINK_DGE530T,
168 "D-Link DGE-530T Gigabit Ethernet"
169 },
170 { 0, 0, NULL }
171 };
172
173 static int skc_probe __P((device_t));
174 static int skc_attach __P((device_t));
175 static int skc_detach __P((device_t));
176 static void skc_shutdown __P((device_t));
177 static int sk_probe __P((device_t));
178 static int sk_attach __P((device_t));
179 static int sk_detach __P((device_t));
180 static void sk_tick __P((void *));
181 static void sk_intr __P((void *));
182 static void sk_intr_bcom __P((struct sk_if_softc *));
183 static void sk_intr_xmac __P((struct sk_if_softc *));
184 static void sk_intr_yukon __P((struct sk_if_softc *));
185 static void sk_rxeof __P((struct sk_if_softc *));
186 static void sk_txeof __P((struct sk_if_softc *));
187 static int sk_encap __P((struct sk_if_softc *, struct mbuf *,
188 u_int32_t *));
189 static void sk_start __P((struct ifnet *));
190 static int sk_ioctl __P((struct ifnet *, u_long, caddr_t));
191 static void sk_init __P((void *));
192 static void sk_init_xmac __P((struct sk_if_softc *));
193 static void sk_init_yukon __P((struct sk_if_softc *));
194 static void sk_stop __P((struct sk_if_softc *));
195 static void sk_watchdog __P((struct ifnet *));
196 static int sk_ifmedia_upd __P((struct ifnet *));
197 static void sk_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
198 static void sk_reset __P((struct sk_softc *));
199 static int sk_newbuf __P((struct sk_if_softc *,
200 struct sk_chain *, struct mbuf *));
201 static int sk_alloc_jumbo_mem __P((struct sk_if_softc *));
202 static void *sk_jalloc __P((struct sk_if_softc *));
203 static void sk_jfree __P((caddr_t, u_int));
204 static void sk_jref __P((caddr_t, u_int));
205 static int sk_init_rx_ring __P((struct sk_if_softc *));
206 static void sk_init_tx_ring __P((struct sk_if_softc *));
207 static u_int32_t sk_win_read_4 __P((struct sk_softc *, int));
208 static u_int16_t sk_win_read_2 __P((struct sk_softc *, int));
209 static u_int8_t sk_win_read_1 __P((struct sk_softc *, int));
210 static void sk_win_write_4 __P((struct sk_softc *, int, u_int32_t));
211 static void sk_win_write_2 __P((struct sk_softc *, int, u_int32_t));
212 static void sk_win_write_1 __P((struct sk_softc *, int, u_int32_t));
213 static u_int8_t sk_vpd_readbyte __P((struct sk_softc *, int));
214 static void sk_vpd_read_res __P((struct sk_softc *,
215 struct vpd_res *, int));
216 static void sk_vpd_read __P((struct sk_softc *));
217
218 static int sk_miibus_readreg __P((device_t, int, int));
219 static int sk_miibus_writereg __P((device_t, int, int, int));
220 static void sk_miibus_statchg __P((device_t));
221
222 static int sk_xmac_miibus_readreg __P((struct sk_if_softc *, int, int));
223 static int sk_xmac_miibus_writereg __P((struct sk_if_softc *, int, int,
224 int));
225 static void sk_xmac_miibus_statchg __P((struct sk_if_softc *));
226
227 static int sk_marv_miibus_readreg __P((struct sk_if_softc *, int, int));
228 static int sk_marv_miibus_writereg __P((struct sk_if_softc *, int, int,
229 int));
230 static void sk_marv_miibus_statchg __P((struct sk_if_softc *));
231
232 static u_int32_t sk_calchash __P((caddr_t));
233 static void sk_setfilt __P((struct sk_if_softc *, caddr_t, int));
234 static void sk_setmulti __P((struct sk_if_softc *));
235
236 #ifdef SK_USEIOSPACE
237 #define SK_RES SYS_RES_IOPORT
238 #define SK_RID SK_PCI_LOIO
239 #else
240 #define SK_RES SYS_RES_MEMORY
241 #define SK_RID SK_PCI_LOMEM
242 #endif
243
244 /*
245 * Note that we have newbus methods for both the GEnesis controller
246 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
247 * the miibus code is a child of the XMACs. We need to do it this way
248 * so that the miibus drivers can access the PHY registers on the
249 * right PHY. It's not quite what I had in mind, but it's the only
250 * design that achieves the desired effect.
251 */
252 static device_method_t skc_methods[] = {
253 /* Device interface */
254 DEVMETHOD(device_probe, skc_probe),
255 DEVMETHOD(device_attach, skc_attach),
256 DEVMETHOD(device_detach, skc_detach),
257 DEVMETHOD(device_shutdown, skc_shutdown),
258
259 /* bus interface */
260 DEVMETHOD(bus_print_child, bus_generic_print_child),
261 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
262
263 { 0, 0 }
264 };
265
266 static driver_t skc_driver = {
267 "skc",
268 skc_methods,
269 sizeof(struct sk_softc)
270 };
271
272 static devclass_t skc_devclass;
273
274 static device_method_t sk_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_probe, sk_probe),
277 DEVMETHOD(device_attach, sk_attach),
278 DEVMETHOD(device_detach, sk_detach),
279 DEVMETHOD(device_shutdown, bus_generic_shutdown),
280
281 /* bus interface */
282 DEVMETHOD(bus_print_child, bus_generic_print_child),
283 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
284
285 /* MII interface */
286 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
287 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
288 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
289
290 { 0, 0 }
291 };
292
293 static driver_t sk_driver = {
294 "sk",
295 sk_methods,
296 sizeof(struct sk_if_softc)
297 };
298
299 static devclass_t sk_devclass;
300
301 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
302 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
303 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
304
305 #define SK_SETBIT(sc, reg, x) \
306 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
307
308 #define SK_CLRBIT(sc, reg, x) \
309 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
310
311 #define SK_WIN_SETBIT_4(sc, reg, x) \
312 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
313
314 #define SK_WIN_CLRBIT_4(sc, reg, x) \
315 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
316
317 #define SK_WIN_SETBIT_2(sc, reg, x) \
318 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
319
320 #define SK_WIN_CLRBIT_2(sc, reg, x) \
321 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
322
323 static u_int32_t sk_win_read_4(sc, reg)
324 struct sk_softc *sc;
325 int reg;
326 {
327 #ifdef SK_USEIOSPACE
328 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
329 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
330 #else
331 return(CSR_READ_4(sc, reg));
332 #endif
333 }
334
335 static u_int16_t sk_win_read_2(sc, reg)
336 struct sk_softc *sc;
337 int reg;
338 {
339 #ifdef SK_USEIOSPACE
340 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
341 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
342 #else
343 return(CSR_READ_2(sc, reg));
344 #endif
345 }
346
347 static u_int8_t sk_win_read_1(sc, reg)
348 struct sk_softc *sc;
349 int reg;
350 {
351 #ifdef SK_USEIOSPACE
352 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
353 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
354 #else
355 return(CSR_READ_1(sc, reg));
356 #endif
357 }
358
359 static void sk_win_write_4(sc, reg, val)
360 struct sk_softc *sc;
361 int reg;
362 u_int32_t val;
363 {
364 #ifdef SK_USEIOSPACE
365 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
366 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
367 #else
368 CSR_WRITE_4(sc, reg, val);
369 #endif
370 return;
371 }
372
373 static void sk_win_write_2(sc, reg, val)
374 struct sk_softc *sc;
375 int reg;
376 u_int32_t val;
377 {
378 #ifdef SK_USEIOSPACE
379 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
380 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
381 #else
382 CSR_WRITE_2(sc, reg, val);
383 #endif
384 return;
385 }
386
387 static void sk_win_write_1(sc, reg, val)
388 struct sk_softc *sc;
389 int reg;
390 u_int32_t val;
391 {
392 #ifdef SK_USEIOSPACE
393 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
394 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
395 #else
396 CSR_WRITE_1(sc, reg, val);
397 #endif
398 return;
399 }
400
401 /*
402 * The VPD EEPROM contains Vital Product Data, as suggested in
403 * the PCI 2.1 specification. The VPD data is separared into areas
404 * denoted by resource IDs. The SysKonnect VPD contains an ID string
405 * resource (the name of the adapter), a read-only area resource
406 * containing various key/data fields and a read/write area which
407 * can be used to store asset management information or log messages.
408 * We read the ID string and read-only into buffers attached to
409 * the controller softc structure for later use. At the moment,
410 * we only use the ID string during sk_attach().
411 */
412 static u_int8_t sk_vpd_readbyte(sc, addr)
413 struct sk_softc *sc;
414 int addr;
415 {
416 int i;
417
418 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
419 for (i = 0; i < SK_TIMEOUT; i++) {
420 DELAY(1);
421 if (sk_win_read_2(sc,
422 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
423 break;
424 }
425
426 if (i == SK_TIMEOUT)
427 return(0);
428
429 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
430 }
431
432 static void sk_vpd_read_res(sc, res, addr)
433 struct sk_softc *sc;
434 struct vpd_res *res;
435 int addr;
436 {
437 int i;
438 u_int8_t *ptr;
439
440 ptr = (u_int8_t *)res;
441 for (i = 0; i < sizeof(struct vpd_res); i++)
442 ptr[i] = sk_vpd_readbyte(sc, i + addr);
443
444 return;
445 }
446
447 static void sk_vpd_read(sc)
448 struct sk_softc *sc;
449 {
450 int pos = 0, i;
451 struct vpd_res res;
452
453 if (sc->sk_vpd_prodname != NULL)
454 free(sc->sk_vpd_prodname, M_DEVBUF);
455 if (sc->sk_vpd_readonly != NULL)
456 free(sc->sk_vpd_readonly, M_DEVBUF);
457 sc->sk_vpd_prodname = NULL;
458 sc->sk_vpd_readonly = NULL;
459
460 sk_vpd_read_res(sc, &res, pos);
461
462 /*
463 * Bail out quietly if the eeprom appears to be missing or empty.
464 */
465 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
466 return;
467
468 if (res.vr_id != VPD_RES_ID) {
469 printf("skc%d: bad VPD resource id: expected %x got %x\n",
470 sc->sk_unit, VPD_RES_ID, res.vr_id);
471 return;
472 }
473
474 pos += sizeof(res);
475 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
476 for (i = 0; i < res.vr_len; i++)
477 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
478 sc->sk_vpd_prodname[i] = '\0';
479 pos += i;
480
481 sk_vpd_read_res(sc, &res, pos);
482
483 if (res.vr_id != VPD_RES_READ) {
484 printf("skc%d: bad VPD resource id: expected %x got %x\n",
485 sc->sk_unit, VPD_RES_READ, res.vr_id);
486 return;
487 }
488
489 pos += sizeof(res);
490 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
491 for (i = 0; i < res.vr_len; i++)
492 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
493
494 return;
495 }
496
497 static int sk_miibus_readreg(dev, phy, reg)
498 device_t dev;
499 int phy, reg;
500 {
501 struct sk_if_softc *sc_if;
502
503 sc_if = device_get_softc(dev);
504
505 switch(sc_if->sk_softc->sk_type) {
506 case SK_GENESIS:
507 return(sk_xmac_miibus_readreg(sc_if, phy, reg));
508 case SK_YUKON:
509 return(sk_marv_miibus_readreg(sc_if, phy, reg));
510 }
511
512 return(0);
513 }
514
515 static int sk_miibus_writereg(dev, phy, reg, val)
516 device_t dev;
517 int phy, reg, val;
518 {
519 struct sk_if_softc *sc_if;
520
521 sc_if = device_get_softc(dev);
522
523 switch(sc_if->sk_softc->sk_type) {
524 case SK_GENESIS:
525 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
526 case SK_YUKON:
527 return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
528 }
529
530 return(0);
531 }
532
533 static void sk_miibus_statchg(dev)
534 device_t dev;
535 {
536 struct sk_if_softc *sc_if;
537
538 sc_if = device_get_softc(dev);
539
540 switch(sc_if->sk_softc->sk_type) {
541 case SK_GENESIS:
542 sk_xmac_miibus_statchg(sc_if);
543 break;
544 case SK_YUKON:
545 sk_marv_miibus_statchg(sc_if);
546 break;
547 }
548
549 return;
550 }
551
552 static int sk_xmac_miibus_readreg(sc_if, phy, reg)
553 struct sk_if_softc *sc_if;
554 int phy, reg;
555 {
556 int i;
557
558 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
559 return(0);
560
561 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
562 SK_XM_READ_2(sc_if, XM_PHY_DATA);
563 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
564 for (i = 0; i < SK_TIMEOUT; i++) {
565 DELAY(1);
566 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
567 XM_MMUCMD_PHYDATARDY)
568 break;
569 }
570
571 if (i == SK_TIMEOUT) {
572 printf("sk%d: phy failed to come ready\n",
573 sc_if->sk_unit);
574 return(0);
575 }
576 }
577 DELAY(1);
578 return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
579 }
580
581 static int sk_xmac_miibus_writereg(sc_if, phy, reg, val)
582 struct sk_if_softc *sc_if;
583 int phy, reg, val;
584 {
585 int i;
586
587 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
588 for (i = 0; i < SK_TIMEOUT; i++) {
589 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
590 break;
591 }
592
593 if (i == SK_TIMEOUT) {
594 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
595 return(ETIMEDOUT);
596 }
597
598 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
599 for (i = 0; i < SK_TIMEOUT; i++) {
600 DELAY(1);
601 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
602 break;
603 }
604
605 if (i == SK_TIMEOUT)
606 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
607
608 return(0);
609 }
610
611 static void sk_xmac_miibus_statchg(sc_if)
612 struct sk_if_softc *sc_if;
613 {
614 struct mii_data *mii;
615
616 mii = device_get_softc(sc_if->sk_miibus);
617
618 /*
619 * If this is a GMII PHY, manually set the XMAC's
620 * duplex mode accordingly.
621 */
622 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
623 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
624 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
625 } else {
626 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
627 }
628 }
629
630 return;
631 }
632
633 static int sk_marv_miibus_readreg(sc_if, phy, reg)
634 struct sk_if_softc *sc_if;
635 int phy, reg;
636 {
637 u_int16_t val;
638 int i;
639
640 if (phy != 0 ||
641 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
642 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
643 return(0);
644 }
645
646 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
647 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
648
649 for (i = 0; i < SK_TIMEOUT; i++) {
650 DELAY(1);
651 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
652 if (val & YU_SMICR_READ_VALID)
653 break;
654 }
655
656 if (i == SK_TIMEOUT) {
657 printf("sk%d: phy failed to come ready\n",
658 sc_if->sk_unit);
659 return(0);
660 }
661
662 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
663
664 return(val);
665 }
666
667 static int sk_marv_miibus_writereg(sc_if, phy, reg, val)
668 struct sk_if_softc *sc_if;
669 int phy, reg, val;
670 {
671 int i;
672
673 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
674 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
675 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
676
677 for (i = 0; i < SK_TIMEOUT; i++) {
678 DELAY(1);
679 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
680 break;
681 }
682
683 return(0);
684 }
685
686 static void sk_marv_miibus_statchg(sc_if)
687 struct sk_if_softc *sc_if;
688 {
689 return;
690 }
691
692 #define SK_POLY 0xEDB88320
693 #define SK_BITS 6
694
695 static u_int32_t sk_calchash(addr)
696 caddr_t addr;
697 {
698 u_int32_t idx, bit, data, crc;
699
700 /* Compute CRC for the address value. */
701 crc = 0xFFFFFFFF; /* initial value */
702
703 for (idx = 0; idx < 6; idx++) {
704 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
705 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
706 }
707
708 return (~crc & ((1 << SK_BITS) - 1));
709 }
710
711 static void sk_setfilt(sc_if, addr, slot)
712 struct sk_if_softc *sc_if;
713 caddr_t addr;
714 int slot;
715 {
716 int base;
717
718 base = XM_RXFILT_ENTRY(slot);
719
720 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
721 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
722 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
723
724 return;
725 }
726
727 static void sk_setmulti(sc_if)
728 struct sk_if_softc *sc_if;
729 {
730 struct sk_softc *sc = sc_if->sk_softc;
731 struct ifnet *ifp = &sc_if->arpcom.ac_if;
732 u_int32_t hashes[2] = { 0, 0 };
733 int h, i;
734 struct ifmultiaddr *ifma;
735 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
736
737
738 /* First, zot all the existing filters. */
739 switch(sc->sk_type) {
740 case SK_GENESIS:
741 for (i = 1; i < XM_RXFILT_MAX; i++)
742 sk_setfilt(sc_if, (caddr_t)&dummy, i);
743
744 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
745 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
746 break;
747 case SK_YUKON:
748 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
749 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
750 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
751 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
752 break;
753 }
754
755 /* Now program new ones. */
756 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
757 hashes[0] = 0xFFFFFFFF;
758 hashes[1] = 0xFFFFFFFF;
759 } else {
760 i = 1;
761 /* First find the tail of the list. */
762 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
763 ifma = ifma->ifma_link.le_next) {
764 if (ifma->ifma_link.le_next == NULL)
765 break;
766 }
767 /* Now traverse the list backwards. */
768 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
769 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
770 if (ifma->ifma_addr->sa_family != AF_LINK)
771 continue;
772 /*
773 * Program the first XM_RXFILT_MAX multicast groups
774 * into the perfect filter. For all others,
775 * use the hash table.
776 */
777 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
778 sk_setfilt(sc_if,
779 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
780 i++;
781 continue;
782 }
783
784 h = sk_calchash(
785 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
786 if (h < 32)
787 hashes[0] |= (1 << h);
788 else
789 hashes[1] |= (1 << (h - 32));
790 }
791 }
792
793 switch(sc->sk_type) {
794 case SK_GENESIS:
795 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
796 XM_MODE_RX_USE_PERFECT);
797 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
798 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
799 break;
800 case SK_YUKON:
801 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
802 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
803 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
804 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
805 break;
806 }
807
808 return;
809 }
810
811 static int sk_init_rx_ring(sc_if)
812 struct sk_if_softc *sc_if;
813 {
814 struct sk_chain_data *cd = &sc_if->sk_cdata;
815 struct sk_ring_data *rd = sc_if->sk_rdata;
816 int i;
817
818 bzero((char *)rd->sk_rx_ring,
819 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
820
821 for (i = 0; i < SK_RX_RING_CNT; i++) {
822 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
823 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
824 return(ENOBUFS);
825 if (i == (SK_RX_RING_CNT - 1)) {
826 cd->sk_rx_chain[i].sk_next =
827 &cd->sk_rx_chain[0];
828 rd->sk_rx_ring[i].sk_next =
829 vtophys(&rd->sk_rx_ring[0]);
830 } else {
831 cd->sk_rx_chain[i].sk_next =
832 &cd->sk_rx_chain[i + 1];
833 rd->sk_rx_ring[i].sk_next =
834 vtophys(&rd->sk_rx_ring[i + 1]);
835 }
836 }
837
838 sc_if->sk_cdata.sk_rx_prod = 0;
839 sc_if->sk_cdata.sk_rx_cons = 0;
840
841 return(0);
842 }
843
844 static void sk_init_tx_ring(sc_if)
845 struct sk_if_softc *sc_if;
846 {
847 struct sk_chain_data *cd = &sc_if->sk_cdata;
848 struct sk_ring_data *rd = sc_if->sk_rdata;
849 int i;
850
851 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
852 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
853
854 for (i = 0; i < SK_TX_RING_CNT; i++) {
855 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
856 if (i == (SK_TX_RING_CNT - 1)) {
857 cd->sk_tx_chain[i].sk_next =
858 &cd->sk_tx_chain[0];
859 rd->sk_tx_ring[i].sk_next =
860 vtophys(&rd->sk_tx_ring[0]);
861 } else {
862 cd->sk_tx_chain[i].sk_next =
863 &cd->sk_tx_chain[i + 1];
864 rd->sk_tx_ring[i].sk_next =
865 vtophys(&rd->sk_tx_ring[i + 1]);
866 }
867 }
868
869 sc_if->sk_cdata.sk_tx_prod = 0;
870 sc_if->sk_cdata.sk_tx_cons = 0;
871 sc_if->sk_cdata.sk_tx_cnt = 0;
872
873 return;
874 }
875
876 static int sk_newbuf(sc_if, c, m)
877 struct sk_if_softc *sc_if;
878 struct sk_chain *c;
879 struct mbuf *m;
880 {
881 struct mbuf *m_new = NULL;
882 struct sk_rx_desc *r;
883
884 if (m == NULL) {
885 caddr_t *buf = NULL;
886
887 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
888 if (m_new == NULL)
889 return(ENOBUFS);
890
891 /* Allocate the jumbo buffer */
892 buf = sk_jalloc(sc_if);
893 if (buf == NULL) {
894 m_freem(m_new);
895 #ifdef SK_VERBOSE
896 printf("sk%d: jumbo allocation failed "
897 "-- packet dropped!\n", sc_if->sk_unit);
898 #endif
899 return(ENOBUFS);
900 }
901
902 /* Attach the buffer to the mbuf */
903 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
904 m_new->m_flags |= M_EXT;
905 m_new->m_ext.ext_size = m_new->m_pkthdr.len =
906 m_new->m_len = SK_MCLBYTES;
907 m_new->m_ext.ext_free = sk_jfree;
908 m_new->m_ext.ext_ref = sk_jref;
909 } else {
910 /*
911 * We're re-using a previously allocated mbuf;
912 * be sure to re-init pointers and lengths to
913 * default values.
914 */
915 m_new = m;
916 m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
917 m_new->m_data = m_new->m_ext.ext_buf;
918 }
919
920 /*
921 * Adjust alignment so packet payload begins on a
922 * longword boundary. Mandatory for Alpha, useful on
923 * x86 too.
924 */
925 m_adj(m_new, ETHER_ALIGN);
926
927 r = c->sk_desc;
928 c->sk_mbuf = m_new;
929 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
930 r->sk_ctl = m_new->m_len | SK_RXSTAT;
931
932 return(0);
933 }
934
935 /*
936 * Allocate jumbo buffer storage. The SysKonnect adapters support
937 * "jumbograms" (9K frames), although SysKonnect doesn't currently
938 * use them in their drivers. In order for us to use them, we need
939 * large 9K receive buffers, however standard mbuf clusters are only
940 * 2048 bytes in size. Consequently, we need to allocate and manage
941 * our own jumbo buffer pool. Fortunately, this does not require an
942 * excessive amount of additional code.
943 */
944 static int sk_alloc_jumbo_mem(sc_if)
945 struct sk_if_softc *sc_if;
946 {
947 caddr_t ptr;
948 register int i;
949 struct sk_jpool_entry *entry;
950
951 /* Grab a big chunk o' storage. */
952 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
953 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
954
955 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
956 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
957 return(ENOBUFS);
958 }
959
960 SLIST_INIT(&sc_if->sk_jfree_listhead);
961 SLIST_INIT(&sc_if->sk_jinuse_listhead);
962
963 /*
964 * Now divide it up into 9K pieces and save the addresses
965 * in an array. Note that we play an evil trick here by using
966 * the first few bytes in the buffer to hold the the address
967 * of the softc structure for this interface. This is because
968 * sk_jfree() needs it, but it is called by the mbuf management
969 * code which will not pass it to us explicitly.
970 */
971 ptr = sc_if->sk_cdata.sk_jumbo_buf;
972 for (i = 0; i < SK_JSLOTS; i++) {
973 u_int64_t **aptr;
974 aptr = (u_int64_t **)ptr;
975 aptr[0] = (u_int64_t *)sc_if;
976 ptr += sizeof(u_int64_t);
977 sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
978 sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
979 ptr += SK_MCLBYTES;
980 entry = malloc(sizeof(struct sk_jpool_entry),
981 M_DEVBUF, M_NOWAIT);
982 if (entry == NULL) {
983 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
984 sc_if->sk_cdata.sk_jumbo_buf = NULL;
985 printf("sk%d: no memory for jumbo "
986 "buffer queue!\n", sc_if->sk_unit);
987 return(ENOBUFS);
988 }
989 entry->slot = i;
990 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
991 entry, jpool_entries);
992 }
993
994 return(0);
995 }
996
997 /*
998 * Allocate a jumbo buffer.
999 */
1000 static void *sk_jalloc(sc_if)
1001 struct sk_if_softc *sc_if;
1002 {
1003 struct sk_jpool_entry *entry;
1004
1005 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1006
1007 if (entry == NULL) {
1008 #ifdef SK_VERBOSE
1009 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1010 #endif
1011 return(NULL);
1012 }
1013
1014 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1015 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1016 sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
1017 return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
1018 }
1019
1020 /*
1021 * Adjust usage count on a jumbo buffer. In general this doesn't
1022 * get used much because our jumbo buffers don't get passed around
1023 * a lot, but it's implemented for correctness.
1024 */
1025 static void sk_jref(buf, size)
1026 caddr_t buf;
1027 u_int size;
1028 {
1029 struct sk_if_softc *sc_if;
1030 u_int64_t **aptr;
1031 register int i;
1032
1033 /* Extract the softc struct pointer. */
1034 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1035 sc_if = (struct sk_if_softc *)(aptr[0]);
1036
1037 if (sc_if == NULL)
1038 panic("sk_jref: can't find softc pointer!");
1039
1040 if (size != SK_MCLBYTES)
1041 panic("sk_jref: adjusting refcount of buf of wrong size!");
1042
1043 /* calculate the slot this buffer belongs to */
1044
1045 i = ((vm_offset_t)aptr
1046 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1047
1048 if ((i < 0) || (i >= SK_JSLOTS))
1049 panic("sk_jref: asked to reference buffer "
1050 "that we don't manage!");
1051 else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1052 panic("sk_jref: buffer already free!");
1053 else
1054 sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
1055
1056 return;
1057 }
1058
1059 /*
1060 * Release a jumbo buffer.
1061 */
1062 static void sk_jfree(buf, size)
1063 caddr_t buf;
1064 u_int size;
1065 {
1066 struct sk_if_softc *sc_if;
1067 u_int64_t **aptr;
1068 int i;
1069 struct sk_jpool_entry *entry;
1070
1071 /* Extract the softc struct pointer. */
1072 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1073 sc_if = (struct sk_if_softc *)(aptr[0]);
1074
1075 if (sc_if == NULL)
1076 panic("sk_jfree: can't find softc pointer!");
1077
1078 if (size != SK_MCLBYTES)
1079 panic("sk_jfree: freeing buffer of wrong size!");
1080
1081 /* calculate the slot this buffer belongs to */
1082
1083 i = ((vm_offset_t)aptr
1084 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1085
1086 if ((i < 0) || (i >= SK_JSLOTS))
1087 panic("sk_jfree: asked to free buffer that we don't manage!");
1088 else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1089 panic("sk_jfree: buffer already free!");
1090 else {
1091 sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
1092 if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
1093 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1094 if (entry == NULL)
1095 panic("sk_jfree: buffer not in use!");
1096 entry->slot = i;
1097 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
1098 jpool_entries);
1099 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1100 entry, jpool_entries);
1101 }
1102 }
1103
1104 return;
1105 }
1106
1107 /*
1108 * Set media options.
1109 */
1110 static int sk_ifmedia_upd(ifp)
1111 struct ifnet *ifp;
1112 {
1113 struct sk_if_softc *sc_if = ifp->if_softc;
1114 struct mii_data *mii;
1115
1116 mii = device_get_softc(sc_if->sk_miibus);
1117 sk_init(sc_if);
1118 mii_mediachg(mii);
1119
1120 return(0);
1121 }
1122
1123 /*
1124 * Report current media status.
1125 */
1126 static void sk_ifmedia_sts(ifp, ifmr)
1127 struct ifnet *ifp;
1128 struct ifmediareq *ifmr;
1129 {
1130 struct sk_if_softc *sc_if;
1131 struct mii_data *mii;
1132
1133 sc_if = ifp->if_softc;
1134 mii = device_get_softc(sc_if->sk_miibus);
1135
1136 mii_pollstat(mii);
1137 ifmr->ifm_active = mii->mii_media_active;
1138 ifmr->ifm_status = mii->mii_media_status;
1139
1140 return;
1141 }
1142
1143 static int sk_ioctl(ifp, command, data)
1144 struct ifnet *ifp;
1145 u_long command;
1146 caddr_t data;
1147 {
1148 struct sk_if_softc *sc_if = ifp->if_softc;
1149 struct sk_softc *sc = sc_if->sk_softc;
1150 struct ifreq *ifr = (struct ifreq *) data;
1151 int s, error = 0;
1152 struct mii_data *mii;
1153
1154 s = splimp();
1155
1156 switch(command) {
1157 case SIOCSIFADDR:
1158 case SIOCGIFADDR:
1159 error = ether_ioctl(ifp, command, data);
1160 break;
1161 case SIOCSIFMTU:
1162 if (ifr->ifr_mtu > SK_JUMBO_MTU)
1163 error = EINVAL;
1164 else {
1165 ifp->if_mtu = ifr->ifr_mtu;
1166 sk_init(sc_if);
1167 }
1168 break;
1169 case SIOCSIFFLAGS:
1170 if (ifp->if_flags & IFF_UP) {
1171 if (ifp->if_flags & IFF_RUNNING &&
1172 ifp->if_flags & IFF_PROMISC &&
1173 !(sc_if->sk_if_flags & IFF_PROMISC)) {
1174 switch(sc->sk_type) {
1175 case SK_GENESIS:
1176 SK_XM_SETBIT_4(sc_if, XM_MODE,
1177 XM_MODE_RX_PROMISC);
1178 break;
1179 case SK_YUKON:
1180 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
1181 YU_RCR_UFLEN | YU_RCR_MUFLEN);
1182 break;
1183 }
1184 sk_setmulti(sc_if);
1185 } else if (ifp->if_flags & IFF_RUNNING &&
1186 !(ifp->if_flags & IFF_PROMISC) &&
1187 sc_if->sk_if_flags & IFF_PROMISC) {
1188 switch(sc->sk_type) {
1189 case SK_GENESIS:
1190 SK_XM_CLRBIT_4(sc_if, XM_MODE,
1191 XM_MODE_RX_PROMISC);
1192 break;
1193 case SK_YUKON:
1194 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
1195 YU_RCR_UFLEN | YU_RCR_MUFLEN);
1196 break;
1197 }
1198 sk_setmulti(sc_if);
1199 } else
1200 sk_init(sc_if);
1201 } else {
1202 if (ifp->if_flags & IFF_RUNNING)
1203 sk_stop(sc_if);
1204 }
1205 sc_if->sk_if_flags = ifp->if_flags;
1206 error = 0;
1207 break;
1208 case SIOCADDMULTI:
1209 case SIOCDELMULTI:
1210 sk_setmulti(sc_if);
1211 error = 0;
1212 break;
1213 case SIOCGIFMEDIA:
1214 case SIOCSIFMEDIA:
1215 mii = device_get_softc(sc_if->sk_miibus);
1216 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1217 break;
1218 default:
1219 error = EINVAL;
1220 break;
1221 }
1222
1223 (void)splx(s);
1224
1225 return(error);
1226 }
1227
1228 /*
1229 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1230 * IDs against our list and return a device name if we find a match.
1231 */
1232 static int skc_probe(dev)
1233 device_t dev;
1234 {
1235 struct sk_softc *sc;
1236 struct sk_type *t = sk_devs;
1237
1238 sc = device_get_softc(dev);
1239
1240 while(t->sk_name != NULL) {
1241 if ((pci_get_vendor(dev) == t->sk_vid) &&
1242 (pci_get_device(dev) == t->sk_did)) {
1243 device_set_desc(dev, t->sk_name);
1244 return(0);
1245 }
1246 t++;
1247 }
1248
1249 return(ENXIO);
1250 }
1251
1252 /*
1253 * Force the GEnesis into reset, then bring it out of reset.
1254 */
1255 static void sk_reset(sc)
1256 struct sk_softc *sc;
1257 {
1258 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1259 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1260 if (sc->sk_type == SK_YUKON)
1261 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1262
1263 DELAY(1000);
1264 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1265 DELAY(2);
1266 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1267 if (sc->sk_type == SK_YUKON)
1268 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1269
1270 if (sc->sk_type == SK_GENESIS) {
1271 /* Configure packet arbiter */
1272 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1273 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1274 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1275 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1276 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1277 }
1278
1279 /* Enable RAM interface */
1280 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1281
1282 /*
1283 * Configure interrupt moderation. The moderation timer
1284 * defers interrupts specified in the interrupt moderation
1285 * timer mask based on the timeout specified in the interrupt
1286 * moderation timer init register. Each bit in the timer
1287 * register represents 18.825ns, so to specify a timeout in
1288 * microseconds, we have to multiply by 54.
1289 */
1290 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1291 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1292 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1293 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1294
1295 return;
1296 }
1297
1298 static int sk_probe(dev)
1299 device_t dev;
1300 {
1301 struct sk_softc *sc;
1302
1303 sc = device_get_softc(device_get_parent(dev));
1304
1305 /*
1306 * Not much to do here. We always know there will be
1307 * at least one XMAC present, and if there are two,
1308 * skc_attach() will create a second device instance
1309 * for us.
1310 */
1311 switch (sc->sk_type) {
1312 case SK_GENESIS:
1313 device_set_desc(dev, "XaQti Corp. XMAC II");
1314 break;
1315 case SK_YUKON:
1316 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1317 break;
1318 }
1319
1320 return(0);
1321 }
1322
1323 /*
1324 * Each XMAC chip is attached as a separate logical IP interface.
1325 * Single port cards will have only one logical interface of course.
1326 */
1327 static int sk_attach(dev)
1328 device_t dev;
1329 {
1330 struct sk_softc *sc;
1331 struct sk_if_softc *sc_if;
1332 struct ifnet *ifp;
1333 int i, port;
1334
1335 if (dev == NULL)
1336 return(EINVAL);
1337
1338 sc_if = device_get_softc(dev);
1339 sc = device_get_softc(device_get_parent(dev));
1340 port = *(int *)device_get_ivars(dev);
1341 free(device_get_ivars(dev), M_DEVBUF);
1342 device_set_ivars(dev, NULL);
1343 sc_if->sk_dev = dev;
1344
1345 bzero((char *)sc_if, sizeof(struct sk_if_softc));
1346
1347 sc_if->sk_dev = dev;
1348 sc_if->sk_unit = device_get_unit(dev);
1349 sc_if->sk_port = port;
1350 sc_if->sk_softc = sc;
1351 sc->sk_if[port] = sc_if;
1352 if (port == SK_PORT_A)
1353 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1354 if (port == SK_PORT_B)
1355 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1356
1357 /*
1358 * Get station address for this interface. Note that
1359 * dual port cards actually come with three station
1360 * addresses: one for each port, plus an extra. The
1361 * extra one is used by the SysKonnect driver software
1362 * as a 'virtual' station address for when both ports
1363 * are operating in failover mode. Currently we don't
1364 * use this extra address.
1365 */
1366 for (i = 0; i < ETHER_ADDR_LEN; i++)
1367 sc_if->arpcom.ac_enaddr[i] =
1368 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1369
1370 printf("sk%d: Ethernet address: %6D\n",
1371 sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1372
1373 /*
1374 * Set up RAM buffer addresses. The NIC will have a certain
1375 * amount of SRAM on it, somewhere between 512K and 2MB. We
1376 * need to divide this up a) between the transmitter and
1377 * receiver and b) between the two XMACs, if this is a
1378 * dual port NIC. Our algotithm is to divide up the memory
1379 * evenly so that everyone gets a fair share.
1380 */
1381 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1382 u_int32_t chunk, val;
1383
1384 chunk = sc->sk_ramsize / 2;
1385 val = sc->sk_rboff / sizeof(u_int64_t);
1386 sc_if->sk_rx_ramstart = val;
1387 val += (chunk / sizeof(u_int64_t));
1388 sc_if->sk_rx_ramend = val - 1;
1389 sc_if->sk_tx_ramstart = val;
1390 val += (chunk / sizeof(u_int64_t));
1391 sc_if->sk_tx_ramend = val - 1;
1392 } else {
1393 u_int32_t chunk, val;
1394
1395 chunk = sc->sk_ramsize / 4;
1396 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1397 sizeof(u_int64_t);
1398 sc_if->sk_rx_ramstart = val;
1399 val += (chunk / sizeof(u_int64_t));
1400 sc_if->sk_rx_ramend = val - 1;
1401 sc_if->sk_tx_ramstart = val;
1402 val += (chunk / sizeof(u_int64_t));
1403 sc_if->sk_tx_ramend = val - 1;
1404 }
1405
1406 /* Read and save PHY type and set PHY address */
1407 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1408 switch(sc_if->sk_phytype) {
1409 case SK_PHYTYPE_XMAC:
1410 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1411 break;
1412 case SK_PHYTYPE_BCOM:
1413 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1414 break;
1415 case SK_PHYTYPE_MARV_COPPER:
1416 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1417 break;
1418 default:
1419 printf("skc%d: unsupported PHY type: %d\n",
1420 sc->sk_unit, sc_if->sk_phytype);
1421 return(ENODEV);
1422 }
1423
1424 /* Allocate the descriptor queues. */
1425 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1426 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1427
1428 if (sc_if->sk_rdata == NULL) {
1429 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1430 sc->sk_if[port] = NULL;
1431 return(ENOMEM);
1432 }
1433
1434 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1435
1436 /* Try to allocate memory for jumbo buffers. */
1437 if (sk_alloc_jumbo_mem(sc_if)) {
1438 printf("sk%d: jumbo buffer allocation failed\n",
1439 sc_if->sk_unit);
1440 contigfree(sc_if->sk_rdata,
1441 sizeof(struct sk_ring_data), M_DEVBUF);
1442 sc->sk_if[port] = NULL;
1443 return(ENOMEM);
1444 }
1445
1446 ifp = &sc_if->arpcom.ac_if;
1447 ifp->if_softc = sc_if;
1448 ifp->if_unit = sc_if->sk_unit;
1449 ifp->if_name = "sk";
1450 ifp->if_mtu = ETHERMTU;
1451 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1452 ifp->if_ioctl = sk_ioctl;
1453 ifp->if_output = ether_output;
1454 ifp->if_start = sk_start;
1455 ifp->if_watchdog = sk_watchdog;
1456 ifp->if_init = sk_init;
1457 ifp->if_baudrate = 1000000000;
1458 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1459
1460 /*
1461 * Do miibus setup.
1462 */
1463 switch (sc->sk_type) {
1464 case SK_GENESIS:
1465 sk_init_xmac(sc_if);
1466 break;
1467 case SK_YUKON:
1468 sk_init_yukon(sc_if);
1469 break;
1470 }
1471
1472 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1473 sk_ifmedia_upd, sk_ifmedia_sts)) {
1474 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1475 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM,
1476 M_DEVBUF);
1477 contigfree(sc_if->sk_rdata,
1478 sizeof(struct sk_ring_data), M_DEVBUF);
1479 return(ENXIO);
1480 }
1481
1482 /*
1483 * Call MI attach routine.
1484 */
1485 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1486 callout_handle_init(&sc_if->sk_tick_ch);
1487
1488 return(0);
1489 }
1490
1491 /*
1492 * Attach the interface. Allocate softc structures, do ifmedia
1493 * setup and ethernet/BPF attach.
1494 */
1495 static int skc_attach(dev)
1496 device_t dev;
1497 {
1498 int s;
1499 u_int32_t command;
1500 struct sk_softc *sc;
1501 int unit, error = 0, rid, *port;
1502 uint8_t skrs;
1503
1504 s = splimp();
1505
1506 sc = device_get_softc(dev);
1507 unit = device_get_unit(dev);
1508 bzero(sc, sizeof(struct sk_softc));
1509 switch (pci_get_device(dev)) {
1510 case DEVICEID_SK_V1:
1511 sc->sk_type = SK_GENESIS;
1512 break;
1513 case DEVICEID_SK_V2:
1514 case DEVICEID_3COM_3C940:
1515 case DEVICEID_LINKSYS_EG1032:
1516 case DEVICEID_DLINK_DGE530T:
1517 sc->sk_type = SK_YUKON;
1518 break;
1519 default:
1520 printf("skc%d: unknown device!\n", unit);
1521 error = ENXIO;
1522 goto fail;
1523 }
1524
1525 /*
1526 * Handle power management nonsense.
1527 */
1528 command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1529 if (command == 0x01) {
1530 command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1531 if (command & SK_PSTATE_MASK) {
1532 u_int32_t iobase, membase, irq;
1533
1534 /* Save important PCI config data. */
1535 iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1536 membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1537 irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1538
1539 /* Reset the power state. */
1540 printf("skc%d: chip is in D%d power mode "
1541 "-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1542 command &= 0xFFFFFFFC;
1543 pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1544
1545 /* Restore PCI config data. */
1546 pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1547 pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1548 pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1549 }
1550 }
1551
1552 /*
1553 * Map control/status registers.
1554 */
1555 command = pci_read_config(dev, PCIR_COMMAND, 4);
1556 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1557 pci_write_config(dev, PCIR_COMMAND, command, 4);
1558 command = pci_read_config(dev, PCIR_COMMAND, 4);
1559
1560 #ifdef SK_USEIOSPACE
1561 if (!(command & PCIM_CMD_PORTEN)) {
1562 printf("skc%d: failed to enable I/O ports!\n", unit);
1563 error = ENXIO;
1564 goto fail;
1565 }
1566 #else
1567 if (!(command & PCIM_CMD_MEMEN)) {
1568 printf("skc%d: failed to enable memory mapping!\n", unit);
1569 error = ENXIO;
1570 goto fail;
1571 }
1572 #endif
1573
1574 rid = SK_RID;
1575 sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1576 0, ~0, 1, RF_ACTIVE);
1577
1578 if (sc->sk_res == NULL) {
1579 printf("sk%d: couldn't map ports/memory\n", unit);
1580 error = ENXIO;
1581 goto fail;
1582 }
1583
1584 sc->sk_btag = rman_get_bustag(sc->sk_res);
1585 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1586
1587 /* Allocate interrupt */
1588 rid = 0;
1589 sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1590 RF_SHAREABLE | RF_ACTIVE);
1591
1592 if (sc->sk_irq == NULL) {
1593 printf("skc%d: couldn't map interrupt\n", unit);
1594 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1595 error = ENXIO;
1596 goto fail;
1597 }
1598
1599 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1600 sk_intr, sc, &sc->sk_intrhand);
1601
1602 if (error) {
1603 printf("skc%d: couldn't set up irq\n", unit);
1604 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1605 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1606 goto fail;
1607 }
1608
1609 /* Reset the adapter. */
1610 sk_reset(sc);
1611
1612 sc->sk_unit = unit;
1613
1614 /* Read and save vital product data from EEPROM. */
1615 sk_vpd_read(sc);
1616
1617 skrs = sk_win_read_1(sc, SK_EPROM0);
1618 if (sc->sk_type == SK_GENESIS) {
1619 /* Read and save RAM size and RAMbuffer offset */
1620 switch(skrs) {
1621 case SK_RAMSIZE_512K_64:
1622 sc->sk_ramsize = 0x80000;
1623 sc->sk_rboff = SK_RBOFF_0;
1624 break;
1625 case SK_RAMSIZE_1024K_64:
1626 sc->sk_ramsize = 0x100000;
1627 sc->sk_rboff = SK_RBOFF_80000;
1628 break;
1629 case SK_RAMSIZE_1024K_128:
1630 sc->sk_ramsize = 0x100000;
1631 sc->sk_rboff = SK_RBOFF_0;
1632 break;
1633 case SK_RAMSIZE_2048K_128:
1634 sc->sk_ramsize = 0x200000;
1635 sc->sk_rboff = SK_RBOFF_0;
1636 break;
1637 default:
1638 printf("skc%d: unknown ram size: %d\n",
1639 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1640 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1641 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1642 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1643 error = ENXIO;
1644 goto fail;
1645 break;
1646 }
1647 } else {
1648 if (skrs == 0x00)
1649 sc->sk_ramsize = 0x20000;
1650 else
1651 sc->sk_ramsize = skrs * (1<<12);
1652 sc->sk_rboff = SK_RBOFF_0;
1653 }
1654
1655 /* Read and save physical media type */
1656 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1657 case SK_PMD_1000BASESX:
1658 sc->sk_pmd = IFM_1000_SX;
1659 break;
1660 case SK_PMD_1000BASELX:
1661 sc->sk_pmd = IFM_1000_LX;
1662 break;
1663 case SK_PMD_1000BASECX:
1664 sc->sk_pmd = IFM_1000_CX;
1665 break;
1666 case SK_PMD_1000BASETX:
1667 sc->sk_pmd = IFM_1000_TX;
1668 break;
1669 default:
1670 printf("skc%d: unknown media type: 0x%x\n",
1671 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1672 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1673 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1674 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1675 error = ENXIO;
1676 goto fail;
1677 }
1678
1679 /* Announce the product name. */
1680 if (sc->sk_vpd_prodname != NULL)
1681 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1682 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1683 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1684 *port = SK_PORT_A;
1685 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1686
1687 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1688 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1689 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1690 *port = SK_PORT_B;
1691 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1692 }
1693
1694 /* Turn on the 'driver is loaded' LED. */
1695 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1696
1697 bus_generic_attach(dev);
1698
1699 fail:
1700 splx(s);
1701 return(error);
1702 }
1703
1704 static int sk_detach(dev)
1705 device_t dev;
1706 {
1707 struct sk_softc *sc;
1708 struct sk_if_softc *sc_if;
1709 struct ifnet *ifp;
1710 int s;
1711
1712 s = splimp();
1713
1714 sc = device_get_softc(device_get_parent(dev));
1715 sc_if = device_get_softc(dev);
1716 ifp = &sc_if->arpcom.ac_if;
1717 sk_stop(sc_if);
1718 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1719 bus_generic_detach(dev);
1720 if (sc_if->sk_miibus != NULL)
1721 device_delete_child(dev, sc_if->sk_miibus);
1722 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1723 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1724
1725 return(0);
1726 }
1727
1728 static int skc_detach(dev)
1729 device_t dev;
1730 {
1731 struct sk_softc *sc;
1732 int s;
1733
1734 s = splimp();
1735
1736 sc = device_get_softc(dev);
1737
1738 bus_generic_detach(dev);
1739 if (sc->sk_devs[SK_PORT_A] != NULL)
1740 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1741 if (sc->sk_devs[SK_PORT_B] != NULL)
1742 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1743
1744 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1745 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1746 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1747
1748 splx(s);
1749
1750 return(0);
1751 }
1752
1753 static int sk_encap(sc_if, m_head, txidx)
1754 struct sk_if_softc *sc_if;
1755 struct mbuf *m_head;
1756 u_int32_t *txidx;
1757 {
1758 struct sk_tx_desc *f = NULL;
1759 struct mbuf *m;
1760 u_int32_t frag, cur, cnt = 0;
1761
1762 m = m_head;
1763 cur = frag = *txidx;
1764
1765 /*
1766 * Start packing the mbufs in this chain into
1767 * the fragment pointers. Stop when we run out
1768 * of fragments or hit the end of the mbuf chain.
1769 */
1770 for (m = m_head; m != NULL; m = m->m_next) {
1771 if (m->m_len != 0) {
1772 if ((SK_TX_RING_CNT -
1773 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1774 return(ENOBUFS);
1775 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1776 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1777 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1778 if (cnt == 0)
1779 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1780 else
1781 f->sk_ctl |= SK_TXCTL_OWN;
1782 cur = frag;
1783 SK_INC(frag, SK_TX_RING_CNT);
1784 cnt++;
1785 }
1786 }
1787
1788 if (m != NULL)
1789 return(ENOBUFS);
1790
1791 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1792 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1793 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1794 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1795 sc_if->sk_cdata.sk_tx_cnt += cnt;
1796
1797 *txidx = frag;
1798
1799 return(0);
1800 }
1801
1802 static void sk_start(ifp)
1803 struct ifnet *ifp;
1804 {
1805 struct sk_softc *sc;
1806 struct sk_if_softc *sc_if;
1807 struct mbuf *m_head = NULL;
1808 u_int32_t idx;
1809
1810 sc_if = ifp->if_softc;
1811 sc = sc_if->sk_softc;
1812
1813 idx = sc_if->sk_cdata.sk_tx_prod;
1814
1815 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1816 IF_DEQUEUE(&ifp->if_snd, m_head);
1817 if (m_head == NULL)
1818 break;
1819
1820 /*
1821 * Pack the data into the transmit ring. If we
1822 * don't have room, set the OACTIVE flag and wait
1823 * for the NIC to drain the ring.
1824 */
1825 if (sk_encap(sc_if, m_head, &idx)) {
1826 IF_PREPEND(&ifp->if_snd, m_head);
1827 ifp->if_flags |= IFF_OACTIVE;
1828 break;
1829 }
1830
1831 /*
1832 * If there's a BPF listener, bounce a copy of this frame
1833 * to him.
1834 */
1835 if (ifp->if_bpf)
1836 bpf_mtap(ifp, m_head);
1837 }
1838
1839 /* Transmit */
1840 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1841 sc_if->sk_cdata.sk_tx_prod = idx;
1842 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1843
1844 /* Set a timeout in case the chip goes out to lunch. */
1845 ifp->if_timer = 5;
1846 }
1847
1848 return;
1849 }
1850
1851
1852 static void sk_watchdog(ifp)
1853 struct ifnet *ifp;
1854 {
1855 struct sk_if_softc *sc_if;
1856
1857 sc_if = ifp->if_softc;
1858
1859 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1860 sk_init(sc_if);
1861
1862 return;
1863 }
1864
1865 static void skc_shutdown(dev)
1866 device_t dev;
1867 {
1868 struct sk_softc *sc;
1869
1870 sc = device_get_softc(dev);
1871
1872 /* Turn off the 'driver is loaded' LED. */
1873 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1874
1875 /*
1876 * Reset the GEnesis controller. Doing this should also
1877 * assert the resets on the attached XMAC(s).
1878 */
1879 sk_reset(sc);
1880
1881 return;
1882 }
1883
1884 static void sk_rxeof(sc_if)
1885 struct sk_if_softc *sc_if;
1886 {
1887 struct ether_header *eh;
1888 struct mbuf *m;
1889 struct ifnet *ifp;
1890 struct sk_chain *cur_rx;
1891 int total_len = 0;
1892 int i;
1893 u_int32_t rxstat;
1894
1895 ifp = &sc_if->arpcom.ac_if;
1896 i = sc_if->sk_cdata.sk_rx_prod;
1897 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1898
1899 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1900
1901 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1902 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1903 m = cur_rx->sk_mbuf;
1904 cur_rx->sk_mbuf = NULL;
1905 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1906 SK_INC(i, SK_RX_RING_CNT);
1907
1908 if (rxstat & XM_RXSTAT_ERRFRAME) {
1909 ifp->if_ierrors++;
1910 sk_newbuf(sc_if, cur_rx, m);
1911 continue;
1912 }
1913
1914 /*
1915 * Try to allocate a new jumbo buffer. If that
1916 * fails, copy the packet to mbufs and put the
1917 * jumbo buffer back in the ring so it can be
1918 * re-used. If allocating mbufs fails, then we
1919 * have to drop the packet.
1920 */
1921 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1922 struct mbuf *m0;
1923 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1924 total_len + ETHER_ALIGN, 0, ifp, NULL);
1925 sk_newbuf(sc_if, cur_rx, m);
1926 if (m0 == NULL) {
1927 printf("sk%d: no receive buffers "
1928 "available -- packet dropped!\n",
1929 sc_if->sk_unit);
1930 ifp->if_ierrors++;
1931 continue;
1932 }
1933 m_adj(m0, ETHER_ALIGN);
1934 m = m0;
1935 } else {
1936 m->m_pkthdr.rcvif = ifp;
1937 m->m_pkthdr.len = m->m_len = total_len;
1938 }
1939
1940 ifp->if_ipackets++;
1941 eh = mtod(m, struct ether_header *);
1942
1943 /* Remove header from mbuf and pass it on. */
1944 m_adj(m, sizeof(struct ether_header));
1945 ether_input(ifp, eh, m);
1946 }
1947
1948 sc_if->sk_cdata.sk_rx_prod = i;
1949
1950 return;
1951 }
1952
1953 static void sk_txeof(sc_if)
1954 struct sk_if_softc *sc_if;
1955 {
1956 struct sk_softc *sc;
1957 struct sk_tx_desc *cur_tx;
1958 struct ifnet *ifp;
1959 u_int32_t idx;
1960
1961 sc = sc_if->sk_softc;
1962 ifp = &sc_if->arpcom.ac_if;
1963
1964 /*
1965 * Go through our tx ring and free mbufs for those
1966 * frames that have been sent.
1967 */
1968 idx = sc_if->sk_cdata.sk_tx_cons;
1969 while(idx != sc_if->sk_cdata.sk_tx_prod) {
1970 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1971 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1972 break;
1973 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1974 ifp->if_opackets++;
1975 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1976 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1977 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1978 }
1979 sc_if->sk_cdata.sk_tx_cnt--;
1980 SK_INC(idx, SK_TX_RING_CNT);
1981 }
1982
1983 if (sc_if->sk_cdata.sk_tx_cnt == 0)
1984 ifp->if_timer = 0;
1985 else /* nudge chip to keep tx ring moving */
1986 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1987
1988 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1989 ifp->if_flags &= ~IFF_OACTIVE;
1990
1991 sc_if->sk_cdata.sk_tx_cons = idx;
1992 }
1993
1994 static void sk_tick(xsc_if)
1995 void *xsc_if;
1996 {
1997 struct sk_if_softc *sc_if;
1998 struct mii_data *mii;
1999 struct ifnet *ifp;
2000 int i;
2001
2002 sc_if = xsc_if;
2003 ifp = &sc_if->arpcom.ac_if;
2004 mii = device_get_softc(sc_if->sk_miibus);
2005
2006 if (!(ifp->if_flags & IFF_UP))
2007 return;
2008
2009 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2010 sk_intr_bcom(sc_if);
2011 return;
2012 }
2013
2014 /*
2015 * According to SysKonnect, the correct way to verify that
2016 * the link has come back up is to poll bit 0 of the GPIO
2017 * register three times. This pin has the signal from the
2018 * link_sync pin connected to it; if we read the same link
2019 * state 3 times in a row, we know the link is up.
2020 */
2021 for (i = 0; i < 3; i++) {
2022 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2023 break;
2024 }
2025
2026 if (i != 3) {
2027 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2028 return;
2029 }
2030
2031 /* Turn the GP0 interrupt back on. */
2032 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2033 SK_XM_READ_2(sc_if, XM_ISR);
2034 mii_tick(mii);
2035 mii_pollstat(mii);
2036 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2037
2038 return;
2039 }
2040
2041 static void sk_intr_bcom(sc_if)
2042 struct sk_if_softc *sc_if;
2043 {
2044 struct sk_softc *sc;
2045 struct mii_data *mii;
2046 struct ifnet *ifp;
2047 int status;
2048
2049 sc = sc_if->sk_softc;
2050 mii = device_get_softc(sc_if->sk_miibus);
2051 ifp = &sc_if->arpcom.ac_if;
2052
2053 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2054
2055 /*
2056 * Read the PHY interrupt register to make sure
2057 * we clear any pending interrupts.
2058 */
2059 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2060
2061 if (!(ifp->if_flags & IFF_RUNNING)) {
2062 sk_init_xmac(sc_if);
2063 return;
2064 }
2065
2066 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2067 int lstat;
2068 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2069 BRGPHY_MII_AUXSTS);
2070
2071 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2072 mii_mediachg(mii);
2073 /* Turn off the link LED. */
2074 SK_IF_WRITE_1(sc_if, 0,
2075 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2076 sc_if->sk_link = 0;
2077 } else if (status & BRGPHY_ISR_LNK_CHG) {
2078 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2079 BRGPHY_MII_IMR, 0xFF00);
2080 mii_tick(mii);
2081 sc_if->sk_link = 1;
2082 /* Turn on the link LED. */
2083 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2084 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2085 SK_LINKLED_BLINK_OFF);
2086 mii_pollstat(mii);
2087 } else {
2088 mii_tick(mii);
2089 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2090 }
2091 }
2092
2093 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2094
2095 return;
2096 }
2097
2098 static void sk_intr_xmac(sc_if)
2099 struct sk_if_softc *sc_if;
2100 {
2101 struct sk_softc *sc;
2102 u_int16_t status;
2103 struct mii_data *mii;
2104
2105 sc = sc_if->sk_softc;
2106 mii = device_get_softc(sc_if->sk_miibus);
2107 status = SK_XM_READ_2(sc_if, XM_ISR);
2108
2109 /*
2110 * Link has gone down. Start MII tick timeout to
2111 * watch for link resync.
2112 */
2113 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2114 if (status & XM_ISR_GP0_SET) {
2115 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2116 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2117 }
2118
2119 if (status & XM_ISR_AUTONEG_DONE) {
2120 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2121 }
2122 }
2123
2124 if (status & XM_IMR_TX_UNDERRUN)
2125 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2126
2127 if (status & XM_IMR_RX_OVERRUN)
2128 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2129
2130 status = SK_XM_READ_2(sc_if, XM_ISR);
2131
2132 return;
2133 }
2134
2135 static void sk_intr_yukon(sc_if)
2136 struct sk_if_softc *sc_if;
2137 {
2138 int status;
2139
2140 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2141
2142 return;
2143 }
2144
2145 static void sk_intr(xsc)
2146 void *xsc;
2147 {
2148 struct sk_softc *sc = xsc;
2149 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
2150 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2151 u_int32_t status;
2152
2153 sc_if0 = sc->sk_if[SK_PORT_A];
2154 sc_if1 = sc->sk_if[SK_PORT_B];
2155
2156 if (sc_if0 != NULL)
2157 ifp0 = &sc_if0->arpcom.ac_if;
2158 if (sc_if1 != NULL)
2159 ifp1 = &sc_if1->arpcom.ac_if;
2160
2161 for (;;) {
2162 status = CSR_READ_4(sc, SK_ISSR);
2163 if (!(status & sc->sk_intrmask))
2164 break;
2165
2166 /* Handle receive interrupts first. */
2167 if (status & SK_ISR_RX1_EOF) {
2168 sk_rxeof(sc_if0);
2169 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2170 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2171 }
2172 if (status & SK_ISR_RX2_EOF) {
2173 sk_rxeof(sc_if1);
2174 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2175 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2176 }
2177
2178 /* Then transmit interrupts. */
2179 if (status & SK_ISR_TX1_S_EOF) {
2180 sk_txeof(sc_if0);
2181 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2182 SK_TXBMU_CLR_IRQ_EOF);
2183 }
2184 if (status & SK_ISR_TX2_S_EOF) {
2185 sk_txeof(sc_if1);
2186 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2187 SK_TXBMU_CLR_IRQ_EOF);
2188 }
2189
2190 /* Then MAC interrupts. */
2191 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2192 if (sc->sk_type == SK_GENESIS)
2193 sk_intr_xmac(sc_if0);
2194 else
2195 sk_intr_yukon(sc_if0);
2196 }
2197
2198 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2199 if (sc->sk_type == SK_GENESIS)
2200 sk_intr_xmac(sc_if1);
2201 else
2202 sk_intr_yukon(sc_if0);
2203 }
2204
2205 if (status & SK_ISR_EXTERNAL_REG) {
2206 if (ifp0 != NULL &&
2207 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2208 sk_intr_bcom(sc_if0);
2209 if (ifp1 != NULL &&
2210 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2211 sk_intr_bcom(sc_if1);
2212 }
2213 }
2214
2215 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2216
2217 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
2218 sk_start(ifp0);
2219 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
2220 sk_start(ifp1);
2221
2222 return;
2223 }
2224
2225 static void sk_init_xmac(sc_if)
2226 struct sk_if_softc *sc_if;
2227 {
2228 struct sk_softc *sc;
2229 struct ifnet *ifp;
2230 struct sk_bcom_hack bhack[] = {
2231 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2232 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2233 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2234 { 0, 0 } };
2235
2236 sc = sc_if->sk_softc;
2237 ifp = &sc_if->arpcom.ac_if;
2238
2239 /* Unreset the XMAC. */
2240 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2241 DELAY(1000);
2242
2243 /* Reset the XMAC's internal state. */
2244 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2245
2246 /* Save the XMAC II revision */
2247 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2248
2249 /*
2250 * Perform additional initialization for external PHYs,
2251 * namely for the 1000baseTX cards that use the XMAC's
2252 * GMII mode.
2253 */
2254 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2255 int i = 0;
2256 u_int32_t val;
2257
2258 /* Take PHY out of reset. */
2259 val = sk_win_read_4(sc, SK_GPIO);
2260 if (sc_if->sk_port == SK_PORT_A)
2261 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2262 else
2263 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2264 sk_win_write_4(sc, SK_GPIO, val);
2265
2266 /* Enable GMII mode on the XMAC. */
2267 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2268
2269 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2270 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2271 DELAY(10000);
2272 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2273 BRGPHY_MII_IMR, 0xFFF0);
2274
2275 /*
2276 * Early versions of the BCM5400 apparently have
2277 * a bug that requires them to have their reserved
2278 * registers initialized to some magic values. I don't
2279 * know what the numbers do, I'm just the messenger.
2280 */
2281 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2282 == 0x6041) {
2283 while(bhack[i].reg) {
2284 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2285 bhack[i].reg, bhack[i].val);
2286 i++;
2287 }
2288 }
2289 }
2290
2291 /* Set station address */
2292 SK_XM_WRITE_2(sc_if, XM_PAR0,
2293 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2294 SK_XM_WRITE_2(sc_if, XM_PAR1,
2295 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2296 SK_XM_WRITE_2(sc_if, XM_PAR2,
2297 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2298 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2299
2300 if (ifp->if_flags & IFF_PROMISC) {
2301 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2302 } else {
2303 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2304 }
2305
2306 if (ifp->if_flags & IFF_BROADCAST) {
2307 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2308 } else {
2309 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2310 }
2311
2312 /* We don't need the FCS appended to the packet. */
2313 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2314
2315 /* We want short frames padded to 60 bytes. */
2316 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2317
2318 /*
2319 * Enable the reception of all error frames. This is is
2320 * a necessary evil due to the design of the XMAC. The
2321 * XMAC's receive FIFO is only 8K in size, however jumbo
2322 * frames can be up to 9000 bytes in length. When bad
2323 * frame filtering is enabled, the XMAC's RX FIFO operates
2324 * in 'store and forward' mode. For this to work, the
2325 * entire frame has to fit into the FIFO, but that means
2326 * that jumbo frames larger than 8192 bytes will be
2327 * truncated. Disabling all bad frame filtering causes
2328 * the RX FIFO to operate in streaming mode, in which
2329 * case the XMAC will start transfering frames out of the
2330 * RX FIFO as soon as the FIFO threshold is reached.
2331 */
2332 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2333 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2334 XM_MODE_RX_INRANGELEN);
2335
2336 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2337 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2338 else
2339 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2340
2341 /*
2342 * Bump up the transmit threshold. This helps hold off transmit
2343 * underruns when we're blasting traffic from both ports at once.
2344 */
2345 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2346
2347 /* Set multicast filter */
2348 sk_setmulti(sc_if);
2349
2350 /* Clear and enable interrupts */
2351 SK_XM_READ_2(sc_if, XM_ISR);
2352 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2353 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2354 else
2355 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2356
2357 /* Configure MAC arbiter */
2358 switch(sc_if->sk_xmac_rev) {
2359 case XM_XMAC_REV_B2:
2360 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2361 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2362 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2363 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2364 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2365 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2366 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2367 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2368 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2369 break;
2370 case XM_XMAC_REV_C1:
2371 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2372 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2373 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2374 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2375 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2376 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2377 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2378 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2379 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2380 break;
2381 default:
2382 break;
2383 }
2384 sk_win_write_2(sc, SK_MACARB_CTL,
2385 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2386
2387 sc_if->sk_link = 1;
2388
2389 return;
2390 }
2391
2392 static void sk_init_yukon(sc_if)
2393 struct sk_if_softc *sc_if;
2394 {
2395 u_int32_t phy;
2396 u_int16_t reg;
2397 struct sk_softc *sc;
2398 struct ifnet *ifp;
2399 int i;
2400
2401 sc = sc_if->sk_softc;
2402 ifp = &sc_if->arpcom.ac_if;
2403
2404 /* GMAC and GPHY Reset */
2405 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2406 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2407 DELAY(1000);
2408 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2409 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2410 DELAY(1000);
2411
2412 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2413 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2414
2415 switch(sc_if->sk_softc->sk_pmd) {
2416 case IFM_1000_SX:
2417 case IFM_1000_LX:
2418 phy |= SK_GPHY_FIBER;
2419 break;
2420
2421 case IFM_1000_CX:
2422 case IFM_1000_TX:
2423 phy |= SK_GPHY_COPPER;
2424 break;
2425 }
2426
2427 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2428 DELAY(1000);
2429 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2430 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2431 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2432
2433 /* unused read of the interrupt source register */
2434 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2435
2436 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2437
2438 /* MIB Counter Clear Mode set */
2439 reg |= YU_PAR_MIB_CLR;
2440 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2441
2442 /* MIB Counter Clear Mode clear */
2443 reg &= ~YU_PAR_MIB_CLR;
2444 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2445
2446 /* receive control reg */
2447 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN |
2448 YU_RCR_CRCR);
2449
2450 /* transmit parameter register */
2451 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2452 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2453
2454 /* serial mode register */
2455 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2456 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2457 reg |= YU_SMR_MFL_JUMBO;
2458 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2459
2460 /* Setup Yukon's address */
2461 for (i = 0; i < 3; i++) {
2462 /* Write Source Address 1 (unicast filter) */
2463 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2464 sc_if->arpcom.ac_enaddr[i * 2] |
2465 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2466 }
2467
2468 for (i = 0; i < 3; i++) {
2469 reg = sk_win_read_2(sc_if->sk_softc,
2470 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2471 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2472 }
2473
2474 /* clear all Multicast filter hash registers */
2475 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
2476 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
2477 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
2478 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
2479
2480 /* enable interrupt mask for counter overflows */
2481 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2482 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2483 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2484
2485 /* Configure RX MAC FIFO */
2486 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2487 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2488
2489 /* Configure TX MAC FIFO */
2490 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2491 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2492 }
2493
2494 /*
2495 * Note that to properly initialize any part of the GEnesis chip,
2496 * you first have to take it out of reset mode.
2497 */
2498 static void sk_init(xsc)
2499 void *xsc;
2500 {
2501 struct sk_if_softc *sc_if = xsc;
2502 struct sk_softc *sc;
2503 struct ifnet *ifp;
2504 struct mii_data *mii;
2505 int s;
2506 u_int16_t reg;
2507
2508 s = splimp();
2509
2510 ifp = &sc_if->arpcom.ac_if;
2511 sc = sc_if->sk_softc;
2512 mii = device_get_softc(sc_if->sk_miibus);
2513
2514 /* Cancel pending I/O and free all RX/TX buffers. */
2515 sk_stop(sc_if);
2516
2517 if (sc->sk_type == SK_GENESIS) {
2518 /* Configure LINK_SYNC LED */
2519 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2520 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2521 SK_LINKLED_LINKSYNC_ON);
2522
2523 /* Configure RX LED */
2524 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2525 SK_RXLEDCTL_COUNTER_START);
2526
2527 /* Configure TX LED */
2528 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2529 SK_TXLEDCTL_COUNTER_START);
2530 }
2531
2532 /* Configure I2C registers */
2533
2534 /* Configure XMAC(s) */
2535 switch (sc->sk_type) {
2536 case SK_GENESIS:
2537 sk_init_xmac(sc_if);
2538 break;
2539 case SK_YUKON:
2540 sk_init_yukon(sc_if);
2541 break;
2542 }
2543 mii_mediachg(mii);
2544
2545 if (sc->sk_type == SK_GENESIS) {
2546 /* Configure MAC FIFOs */
2547 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2548 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2549 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2550
2551 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2552 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2553 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2554 }
2555
2556 /* Configure transmit arbiter(s) */
2557 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2558 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2559
2560 /* Configure RAMbuffers */
2561 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2562 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2563 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2564 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2565 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2566 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2567
2568 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2569 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2570 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2571 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2572 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2573 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2574 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2575
2576 /* Configure BMUs */
2577 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2578 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2579 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2580 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2581
2582 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2583 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2584 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2585 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2586
2587 /* Init descriptors */
2588 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2589 printf("sk%d: initialization failed: no "
2590 "memory for rx buffers\n", sc_if->sk_unit);
2591 sk_stop(sc_if);
2592 (void)splx(s);
2593 return;
2594 }
2595 sk_init_tx_ring(sc_if);
2596
2597 /* Configure interrupt handling */
2598 CSR_READ_4(sc, SK_ISSR);
2599 if (sc_if->sk_port == SK_PORT_A)
2600 sc->sk_intrmask |= SK_INTRS1;
2601 else
2602 sc->sk_intrmask |= SK_INTRS2;
2603
2604 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2605
2606 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2607
2608 /* Start BMUs. */
2609 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2610
2611 switch(sc->sk_type) {
2612 case SK_GENESIS:
2613 /* Enable XMACs TX and RX state machines */
2614 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2615 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2616 break;
2617 case SK_YUKON:
2618 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2619 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2620 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2621 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2622 }
2623
2624 ifp->if_flags |= IFF_RUNNING;
2625 ifp->if_flags &= ~IFF_OACTIVE;
2626
2627 splx(s);
2628
2629 return;
2630 }
2631
2632 static void sk_stop(sc_if)
2633 struct sk_if_softc *sc_if;
2634 {
2635 int i;
2636 struct sk_softc *sc;
2637 struct ifnet *ifp;
2638
2639 sc = sc_if->sk_softc;
2640 ifp = &sc_if->arpcom.ac_if;
2641
2642 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2643
2644 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2645 u_int32_t val;
2646
2647 /* Put PHY back into reset. */
2648 val = sk_win_read_4(sc, SK_GPIO);
2649 if (sc_if->sk_port == SK_PORT_A) {
2650 val |= SK_GPIO_DIR0;
2651 val &= ~SK_GPIO_DAT0;
2652 } else {
2653 val |= SK_GPIO_DIR2;
2654 val &= ~SK_GPIO_DAT2;
2655 }
2656 sk_win_write_4(sc, SK_GPIO, val);
2657 }
2658
2659 /* Turn off various components of this interface. */
2660 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2661 switch (sc->sk_type) {
2662 case SK_GENESIS:
2663 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2664 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2665 break;
2666 case SK_YUKON:
2667 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2668 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2669 break;
2670 }
2671 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2672 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2673 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2674 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2675 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2676 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2677 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2678 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2679 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2680
2681 /* Disable interrupts */
2682 if (sc_if->sk_port == SK_PORT_A)
2683 sc->sk_intrmask &= ~SK_INTRS1;
2684 else
2685 sc->sk_intrmask &= ~SK_INTRS2;
2686 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2687
2688 SK_XM_READ_2(sc_if, XM_ISR);
2689 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2690
2691 /* Free RX and TX mbufs still in the queues. */
2692 for (i = 0; i < SK_RX_RING_CNT; i++) {
2693 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2694 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2695 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2696 }
2697 }
2698
2699 for (i = 0; i < SK_TX_RING_CNT; i++) {
2700 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2701 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2702 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2703 }
2704 }
2705
2706 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2707
2708 return;
2709 }
Cache object: 536ee8643753f3f2cb3a783c4de59de1
|