FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /*
2 * Copyright (c) 1997, 1998, 1999, 2000
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
35 * the SK-984x series adapters, both single port and dual port.
36 * References:
37 * The XaQti XMAC II datasheet,
38 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
39 * The SysKonnect GEnesis manual, http://www.syskonnect.com
40 *
41 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
42 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
43 * convenience to others until Vitesse corrects this problem:
44 *
45 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
46 *
47 * Written by Bill Paul <wpaul@ee.columbia.edu>
48 * Department of Electrical Engineering
49 * Columbia University, New York City
50 */
51
52 /*
53 * The SysKonnect gigabit ethernet adapters consist of two main
54 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
55 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
56 * components and a PHY while the GEnesis controller provides a PCI
57 * interface with DMA support. Each card may have between 512K and
58 * 2MB of SRAM on board depending on the configuration.
59 *
60 * The SysKonnect GEnesis controller can have either one or two XMAC
61 * chips connected to it, allowing single or dual port NIC configurations.
62 * SysKonnect has the distinction of being the only vendor on the market
63 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
64 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
65 * XMAC registers. This driver takes advantage of these features to allow
66 * both XMACs to operate as independent interfaces.
67 */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD: releng/5.1/sys/pci/if_sk.c 113812 2003-04-21 18:34:04Z imp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/sockio.h>
75 #include <sys/mbuf.h>
76 #include <sys/malloc.h>
77 #include <sys/kernel.h>
78 #include <sys/socket.h>
79 #include <sys/queue.h>
80
81 #include <net/if.h>
82 #include <net/if_arp.h>
83 #include <net/ethernet.h>
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86
87 #include <net/bpf.h>
88
89 #include <vm/vm.h> /* for vtophys */
90 #include <vm/pmap.h> /* for vtophys */
91 #include <machine/bus_pio.h>
92 #include <machine/bus_memio.h>
93 #include <machine/bus.h>
94 #include <machine/resource.h>
95 #include <sys/bus.h>
96 #include <sys/rman.h>
97
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100 #include <dev/mii/brgphyreg.h>
101
102 #include <pci/pcireg.h>
103 #include <pci/pcivar.h>
104
105 #define SK_USEIOSPACE
106
107 #include <pci/if_skreg.h>
108 #include <pci/xmaciireg.h>
109
110 MODULE_DEPEND(sk, pci, 1, 1, 1);
111 MODULE_DEPEND(sk, ether, 1, 1, 1);
112 MODULE_DEPEND(sk, miibus, 1, 1, 1);
113
114 /* "controller miibus0" required. See GENERIC if you get errors here. */
115 #include "miibus_if.h"
116
117 #ifndef lint
118 static const char rcsid[] =
119 "$FreeBSD: releng/5.1/sys/pci/if_sk.c 113812 2003-04-21 18:34:04Z imp $";
120 #endif
121
122 static struct sk_type sk_devs[] = {
123 { SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
124 { 0, 0, NULL }
125 };
126
127 static int sk_probe (device_t);
128 static int sk_attach (device_t);
129 static int sk_detach (device_t);
130 static int sk_detach_xmac (device_t);
131 static int sk_probe_xmac (device_t);
132 static int sk_attach_xmac (device_t);
133 static void sk_tick (void *);
134 static void sk_intr (void *);
135 static void sk_intr_xmac (struct sk_if_softc *);
136 static void sk_intr_bcom (struct sk_if_softc *);
137 static void sk_rxeof (struct sk_if_softc *);
138 static void sk_txeof (struct sk_if_softc *);
139 static int sk_encap (struct sk_if_softc *, struct mbuf *,
140 u_int32_t *);
141 static void sk_start (struct ifnet *);
142 static int sk_ioctl (struct ifnet *, u_long, caddr_t);
143 static void sk_init (void *);
144 static void sk_init_xmac (struct sk_if_softc *);
145 static void sk_stop (struct sk_if_softc *);
146 static void sk_watchdog (struct ifnet *);
147 static void sk_shutdown (device_t);
148 static int sk_ifmedia_upd (struct ifnet *);
149 static void sk_ifmedia_sts (struct ifnet *, struct ifmediareq *);
150 static void sk_reset (struct sk_softc *);
151 static int sk_newbuf (struct sk_if_softc *,
152 struct sk_chain *, struct mbuf *);
153 static int sk_alloc_jumbo_mem (struct sk_if_softc *);
154 static void *sk_jalloc (struct sk_if_softc *);
155 static void sk_jfree (void *, void *);
156 static int sk_init_rx_ring (struct sk_if_softc *);
157 static void sk_init_tx_ring (struct sk_if_softc *);
158 static u_int32_t sk_win_read_4 (struct sk_softc *, int);
159 static u_int16_t sk_win_read_2 (struct sk_softc *, int);
160 static u_int8_t sk_win_read_1 (struct sk_softc *, int);
161 static void sk_win_write_4 (struct sk_softc *, int, u_int32_t);
162 static void sk_win_write_2 (struct sk_softc *, int, u_int32_t);
163 static void sk_win_write_1 (struct sk_softc *, int, u_int32_t);
164 static u_int8_t sk_vpd_readbyte (struct sk_softc *, int);
165 static void sk_vpd_read_res (struct sk_softc *, struct vpd_res *, int);
166 static void sk_vpd_read (struct sk_softc *);
167
168 static int sk_miibus_readreg (device_t, int, int);
169 static int sk_miibus_writereg (device_t, int, int, int);
170 static void sk_miibus_statchg (device_t);
171
172 static u_int32_t sk_calchash (caddr_t);
173 static void sk_setfilt (struct sk_if_softc *, caddr_t, int);
174 static void sk_setmulti (struct sk_if_softc *);
175
176 #ifdef SK_USEIOSPACE
177 #define SK_RES SYS_RES_IOPORT
178 #define SK_RID SK_PCI_LOIO
179 #else
180 #define SK_RES SYS_RES_MEMORY
181 #define SK_RID SK_PCI_LOMEM
182 #endif
183
184 /*
185 * Note that we have newbus methods for both the GEnesis controller
186 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
187 * the miibus code is a child of the XMACs. We need to do it this way
188 * so that the miibus drivers can access the PHY registers on the
189 * right PHY. It's not quite what I had in mind, but it's the only
190 * design that achieves the desired effect.
191 */
192 static device_method_t skc_methods[] = {
193 /* Device interface */
194 DEVMETHOD(device_probe, sk_probe),
195 DEVMETHOD(device_attach, sk_attach),
196 DEVMETHOD(device_detach, sk_detach),
197 DEVMETHOD(device_shutdown, sk_shutdown),
198
199 /* bus interface */
200 DEVMETHOD(bus_print_child, bus_generic_print_child),
201 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
202
203 { 0, 0 }
204 };
205
206 static driver_t skc_driver = {
207 "skc",
208 skc_methods,
209 sizeof(struct sk_softc)
210 };
211
212 static devclass_t skc_devclass;
213
214 static device_method_t sk_methods[] = {
215 /* Device interface */
216 DEVMETHOD(device_probe, sk_probe_xmac),
217 DEVMETHOD(device_attach, sk_attach_xmac),
218 DEVMETHOD(device_detach, sk_detach_xmac),
219 DEVMETHOD(device_shutdown, bus_generic_shutdown),
220
221 /* bus interface */
222 DEVMETHOD(bus_print_child, bus_generic_print_child),
223 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
224
225 /* MII interface */
226 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
227 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
228 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
229
230 { 0, 0 }
231 };
232
233 static driver_t sk_driver = {
234 "sk",
235 sk_methods,
236 sizeof(struct sk_if_softc)
237 };
238
239 static devclass_t sk_devclass;
240
241 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
242 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
243 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
244
245 #define SK_SETBIT(sc, reg, x) \
246 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
247
248 #define SK_CLRBIT(sc, reg, x) \
249 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
250
251 #define SK_WIN_SETBIT_4(sc, reg, x) \
252 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
253
254 #define SK_WIN_CLRBIT_4(sc, reg, x) \
255 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
256
257 #define SK_WIN_SETBIT_2(sc, reg, x) \
258 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
259
260 #define SK_WIN_CLRBIT_2(sc, reg, x) \
261 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
262
263 static u_int32_t
264 sk_win_read_4(sc, reg)
265 struct sk_softc *sc;
266 int reg;
267 {
268 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
269 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
270 }
271
272 static u_int16_t
273 sk_win_read_2(sc, reg)
274 struct sk_softc *sc;
275 int reg;
276 {
277 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
278 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
279 }
280
281 static u_int8_t
282 sk_win_read_1(sc, reg)
283 struct sk_softc *sc;
284 int reg;
285 {
286 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
287 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
288 }
289
290 static void
291 sk_win_write_4(sc, reg, val)
292 struct sk_softc *sc;
293 int reg;
294 u_int32_t val;
295 {
296 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
297 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
298 return;
299 }
300
301 static void
302 sk_win_write_2(sc, reg, val)
303 struct sk_softc *sc;
304 int reg;
305 u_int32_t val;
306 {
307 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
308 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
309 return;
310 }
311
312 static void
313 sk_win_write_1(sc, reg, val)
314 struct sk_softc *sc;
315 int reg;
316 u_int32_t val;
317 {
318 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
319 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
320 return;
321 }
322
323 /*
324 * The VPD EEPROM contains Vital Product Data, as suggested in
325 * the PCI 2.1 specification. The VPD data is separared into areas
326 * denoted by resource IDs. The SysKonnect VPD contains an ID string
327 * resource (the name of the adapter), a read-only area resource
328 * containing various key/data fields and a read/write area which
329 * can be used to store asset management information or log messages.
330 * We read the ID string and read-only into buffers attached to
331 * the controller softc structure for later use. At the moment,
332 * we only use the ID string during sk_attach().
333 */
334 static u_int8_t
335 sk_vpd_readbyte(sc, addr)
336 struct sk_softc *sc;
337 int addr;
338 {
339 int i;
340
341 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
342 for (i = 0; i < SK_TIMEOUT; i++) {
343 DELAY(1);
344 if (sk_win_read_2(sc,
345 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
346 break;
347 }
348
349 if (i == SK_TIMEOUT)
350 return(0);
351
352 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
353 }
354
355 static void
356 sk_vpd_read_res(sc, res, addr)
357 struct sk_softc *sc;
358 struct vpd_res *res;
359 int addr;
360 {
361 int i;
362 u_int8_t *ptr;
363
364 ptr = (u_int8_t *)res;
365 for (i = 0; i < sizeof(struct vpd_res); i++)
366 ptr[i] = sk_vpd_readbyte(sc, i + addr);
367
368 return;
369 }
370
371 static void
372 sk_vpd_read(sc)
373 struct sk_softc *sc;
374 {
375 int pos = 0, i;
376 struct vpd_res res;
377
378 if (sc->sk_vpd_prodname != NULL)
379 free(sc->sk_vpd_prodname, M_DEVBUF);
380 if (sc->sk_vpd_readonly != NULL)
381 free(sc->sk_vpd_readonly, M_DEVBUF);
382 sc->sk_vpd_prodname = NULL;
383 sc->sk_vpd_readonly = NULL;
384
385 sk_vpd_read_res(sc, &res, pos);
386
387 if (res.vr_id != VPD_RES_ID) {
388 printf("skc%d: bad VPD resource id: expected %x got %x\n",
389 sc->sk_unit, VPD_RES_ID, res.vr_id);
390 return;
391 }
392
393 pos += sizeof(res);
394 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
395 for (i = 0; i < res.vr_len; i++)
396 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
397 sc->sk_vpd_prodname[i] = '\0';
398 pos += i;
399
400 sk_vpd_read_res(sc, &res, pos);
401
402 if (res.vr_id != VPD_RES_READ) {
403 printf("skc%d: bad VPD resource id: expected %x got %x\n",
404 sc->sk_unit, VPD_RES_READ, res.vr_id);
405 return;
406 }
407
408 pos += sizeof(res);
409 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
410 for (i = 0; i < res.vr_len + 1; i++)
411 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
412
413 return;
414 }
415
416 static int
417 sk_miibus_readreg(dev, phy, reg)
418 device_t dev;
419 int phy, reg;
420 {
421 struct sk_if_softc *sc_if;
422 int i;
423
424 sc_if = device_get_softc(dev);
425
426 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
427 return(0);
428
429 SK_IF_LOCK(sc_if);
430
431 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
432 SK_XM_READ_2(sc_if, XM_PHY_DATA);
433 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
434 for (i = 0; i < SK_TIMEOUT; i++) {
435 DELAY(1);
436 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
437 XM_MMUCMD_PHYDATARDY)
438 break;
439 }
440
441 if (i == SK_TIMEOUT) {
442 printf("sk%d: phy failed to come ready\n",
443 sc_if->sk_unit);
444 return(0);
445 }
446 }
447 DELAY(1);
448 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
449 SK_IF_UNLOCK(sc_if);
450 return(i);
451 }
452
453 static int
454 sk_miibus_writereg(dev, phy, reg, val)
455 device_t dev;
456 int phy, reg, val;
457 {
458 struct sk_if_softc *sc_if;
459 int i;
460
461 sc_if = device_get_softc(dev);
462 SK_IF_LOCK(sc_if);
463
464 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
465 for (i = 0; i < SK_TIMEOUT; i++) {
466 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
467 break;
468 }
469
470 if (i == SK_TIMEOUT) {
471 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
472 return(ETIMEDOUT);
473 }
474
475 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
476 for (i = 0; i < SK_TIMEOUT; i++) {
477 DELAY(1);
478 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
479 break;
480 }
481
482 SK_IF_UNLOCK(sc_if);
483
484 if (i == SK_TIMEOUT)
485 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
486
487 return(0);
488 }
489
490 static void
491 sk_miibus_statchg(dev)
492 device_t dev;
493 {
494 struct sk_if_softc *sc_if;
495 struct mii_data *mii;
496
497 sc_if = device_get_softc(dev);
498 mii = device_get_softc(sc_if->sk_miibus);
499 SK_IF_LOCK(sc_if);
500 /*
501 * If this is a GMII PHY, manually set the XMAC's
502 * duplex mode accordingly.
503 */
504 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
505 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
506 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
507 } else {
508 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
509 }
510 }
511 SK_IF_UNLOCK(sc_if);
512
513 return;
514 }
515
516 #define SK_POLY 0xEDB88320
517 #define SK_BITS 6
518
519 static u_int32_t
520 sk_calchash(addr)
521 caddr_t addr;
522 {
523 u_int32_t idx, bit, data, crc;
524
525 /* Compute CRC for the address value. */
526 crc = 0xFFFFFFFF; /* initial value */
527
528 for (idx = 0; idx < 6; idx++) {
529 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
530 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
531 }
532
533 return (~crc & ((1 << SK_BITS) - 1));
534 }
535
536 static void
537 sk_setfilt(sc_if, addr, slot)
538 struct sk_if_softc *sc_if;
539 caddr_t addr;
540 int slot;
541 {
542 int base;
543
544 base = XM_RXFILT_ENTRY(slot);
545
546 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
547 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
548 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
549
550 return;
551 }
552
553 static void
554 sk_setmulti(sc_if)
555 struct sk_if_softc *sc_if;
556 {
557 struct ifnet *ifp;
558 u_int32_t hashes[2] = { 0, 0 };
559 int h, i;
560 struct ifmultiaddr *ifma;
561 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
562
563 ifp = &sc_if->arpcom.ac_if;
564
565 /* First, zot all the existing filters. */
566 for (i = 1; i < XM_RXFILT_MAX; i++)
567 sk_setfilt(sc_if, (caddr_t)&dummy, i);
568 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
569 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
570
571 /* Now program new ones. */
572 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
573 hashes[0] = 0xFFFFFFFF;
574 hashes[1] = 0xFFFFFFFF;
575 } else {
576 i = 1;
577 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
578 if (ifma->ifma_addr->sa_family != AF_LINK)
579 continue;
580 /*
581 * Program the first XM_RXFILT_MAX multicast groups
582 * into the perfect filter. For all others,
583 * use the hash table.
584 */
585 if (i < XM_RXFILT_MAX) {
586 sk_setfilt(sc_if,
587 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
588 i++;
589 continue;
590 }
591
592 h = sk_calchash(
593 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
594 if (h < 32)
595 hashes[0] |= (1 << h);
596 else
597 hashes[1] |= (1 << (h - 32));
598 }
599 }
600
601 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
602 XM_MODE_RX_USE_PERFECT);
603 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
604 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
605
606 return;
607 }
608
609 static int
610 sk_init_rx_ring(sc_if)
611 struct sk_if_softc *sc_if;
612 {
613 struct sk_chain_data *cd;
614 struct sk_ring_data *rd;
615 int i;
616
617 cd = &sc_if->sk_cdata;
618 rd = sc_if->sk_rdata;
619
620 bzero((char *)rd->sk_rx_ring,
621 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
622
623 for (i = 0; i < SK_RX_RING_CNT; i++) {
624 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
625 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
626 return(ENOBUFS);
627 if (i == (SK_RX_RING_CNT - 1)) {
628 cd->sk_rx_chain[i].sk_next =
629 &cd->sk_rx_chain[0];
630 rd->sk_rx_ring[i].sk_next =
631 vtophys(&rd->sk_rx_ring[0]);
632 } else {
633 cd->sk_rx_chain[i].sk_next =
634 &cd->sk_rx_chain[i + 1];
635 rd->sk_rx_ring[i].sk_next =
636 vtophys(&rd->sk_rx_ring[i + 1]);
637 }
638 }
639
640 sc_if->sk_cdata.sk_rx_prod = 0;
641 sc_if->sk_cdata.sk_rx_cons = 0;
642
643 return(0);
644 }
645
646 static void
647 sk_init_tx_ring(sc_if)
648 struct sk_if_softc *sc_if;
649 {
650 struct sk_chain_data *cd;
651 struct sk_ring_data *rd;
652 int i;
653
654 cd = &sc_if->sk_cdata;
655 rd = sc_if->sk_rdata;
656
657 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
658 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
659
660 for (i = 0; i < SK_TX_RING_CNT; i++) {
661 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
662 if (i == (SK_TX_RING_CNT - 1)) {
663 cd->sk_tx_chain[i].sk_next =
664 &cd->sk_tx_chain[0];
665 rd->sk_tx_ring[i].sk_next =
666 vtophys(&rd->sk_tx_ring[0]);
667 } else {
668 cd->sk_tx_chain[i].sk_next =
669 &cd->sk_tx_chain[i + 1];
670 rd->sk_tx_ring[i].sk_next =
671 vtophys(&rd->sk_tx_ring[i + 1]);
672 }
673 }
674
675 sc_if->sk_cdata.sk_tx_prod = 0;
676 sc_if->sk_cdata.sk_tx_cons = 0;
677 sc_if->sk_cdata.sk_tx_cnt = 0;
678
679 return;
680 }
681
682 static int
683 sk_newbuf(sc_if, c, m)
684 struct sk_if_softc *sc_if;
685 struct sk_chain *c;
686 struct mbuf *m;
687 {
688 struct mbuf *m_new = NULL;
689 struct sk_rx_desc *r;
690
691 if (m == NULL) {
692 caddr_t *buf = NULL;
693
694 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
695 if (m_new == NULL)
696 return(ENOBUFS);
697
698 /* Allocate the jumbo buffer */
699 buf = sk_jalloc(sc_if);
700 if (buf == NULL) {
701 m_freem(m_new);
702 #ifdef SK_VERBOSE
703 printf("sk%d: jumbo allocation failed "
704 "-- packet dropped!\n", sc_if->sk_unit);
705 #endif
706 return(ENOBUFS);
707 }
708
709 /* Attach the buffer to the mbuf */
710 MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
711 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
712 m_new->m_data = (void *)buf;
713 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
714 } else {
715 /*
716 * We're re-using a previously allocated mbuf;
717 * be sure to re-init pointers and lengths to
718 * default values.
719 */
720 m_new = m;
721 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
722 m_new->m_data = m_new->m_ext.ext_buf;
723 }
724
725 /*
726 * Adjust alignment so packet payload begins on a
727 * longword boundary. Mandatory for Alpha, useful on
728 * x86 too.
729 */
730 m_adj(m_new, ETHER_ALIGN);
731
732 r = c->sk_desc;
733 c->sk_mbuf = m_new;
734 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
735 r->sk_ctl = m_new->m_len | SK_RXSTAT;
736
737 return(0);
738 }
739
740 /*
741 * Allocate jumbo buffer storage. The SysKonnect adapters support
742 * "jumbograms" (9K frames), although SysKonnect doesn't currently
743 * use them in their drivers. In order for us to use them, we need
744 * large 9K receive buffers, however standard mbuf clusters are only
745 * 2048 bytes in size. Consequently, we need to allocate and manage
746 * our own jumbo buffer pool. Fortunately, this does not require an
747 * excessive amount of additional code.
748 */
749 static int
750 sk_alloc_jumbo_mem(sc_if)
751 struct sk_if_softc *sc_if;
752 {
753 caddr_t ptr;
754 register int i;
755 struct sk_jpool_entry *entry;
756
757 /* Grab a big chunk o' storage. */
758 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
759 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
760
761 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
762 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
763 return(ENOBUFS);
764 }
765
766 SLIST_INIT(&sc_if->sk_jfree_listhead);
767 SLIST_INIT(&sc_if->sk_jinuse_listhead);
768
769 /*
770 * Now divide it up into 9K pieces and save the addresses
771 * in an array.
772 */
773 ptr = sc_if->sk_cdata.sk_jumbo_buf;
774 for (i = 0; i < SK_JSLOTS; i++) {
775 sc_if->sk_cdata.sk_jslots[i] = ptr;
776 ptr += SK_JLEN;
777 entry = malloc(sizeof(struct sk_jpool_entry),
778 M_DEVBUF, M_NOWAIT);
779 if (entry == NULL) {
780 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
781 sc_if->sk_cdata.sk_jumbo_buf = NULL;
782 printf("sk%d: no memory for jumbo "
783 "buffer queue!\n", sc_if->sk_unit);
784 return(ENOBUFS);
785 }
786 entry->slot = i;
787 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
788 entry, jpool_entries);
789 }
790
791 return(0);
792 }
793
794 /*
795 * Allocate a jumbo buffer.
796 */
797 static void *
798 sk_jalloc(sc_if)
799 struct sk_if_softc *sc_if;
800 {
801 struct sk_jpool_entry *entry;
802
803 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
804
805 if (entry == NULL) {
806 #ifdef SK_VERBOSE
807 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
808 #endif
809 return(NULL);
810 }
811
812 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
813 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
814 return(sc_if->sk_cdata.sk_jslots[entry->slot]);
815 }
816
817 /*
818 * Release a jumbo buffer.
819 */
820 static void
821 sk_jfree(buf, args)
822 void *buf;
823 void *args;
824 {
825 struct sk_if_softc *sc_if;
826 int i;
827 struct sk_jpool_entry *entry;
828
829 /* Extract the softc struct pointer. */
830 sc_if = (struct sk_if_softc *)args;
831
832 if (sc_if == NULL)
833 panic("sk_jfree: didn't get softc pointer!");
834
835 /* calculate the slot this buffer belongs to */
836 i = ((vm_offset_t)buf
837 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
838
839 if ((i < 0) || (i >= SK_JSLOTS))
840 panic("sk_jfree: asked to free buffer that we don't manage!");
841
842 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
843 if (entry == NULL)
844 panic("sk_jfree: buffer not in use!");
845 entry->slot = i;
846 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
847 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
848
849 return;
850 }
851
852 /*
853 * Set media options.
854 */
855 static int
856 sk_ifmedia_upd(ifp)
857 struct ifnet *ifp;
858 {
859 struct sk_if_softc *sc_if;
860 struct mii_data *mii;
861
862 sc_if = ifp->if_softc;
863 mii = device_get_softc(sc_if->sk_miibus);
864 sk_init(sc_if);
865 mii_mediachg(mii);
866
867 return(0);
868 }
869
870 /*
871 * Report current media status.
872 */
873 static void
874 sk_ifmedia_sts(ifp, ifmr)
875 struct ifnet *ifp;
876 struct ifmediareq *ifmr;
877 {
878 struct sk_if_softc *sc_if;
879 struct mii_data *mii;
880
881 sc_if = ifp->if_softc;
882 mii = device_get_softc(sc_if->sk_miibus);
883
884 mii_pollstat(mii);
885 ifmr->ifm_active = mii->mii_media_active;
886 ifmr->ifm_status = mii->mii_media_status;
887
888 return;
889 }
890
891 static int
892 sk_ioctl(ifp, command, data)
893 struct ifnet *ifp;
894 u_long command;
895 caddr_t data;
896 {
897 struct sk_if_softc *sc_if = ifp->if_softc;
898 struct ifreq *ifr = (struct ifreq *) data;
899 int error = 0;
900 struct mii_data *mii;
901
902 SK_IF_LOCK(sc_if);
903
904 switch(command) {
905 case SIOCSIFMTU:
906 if (ifr->ifr_mtu > SK_JUMBO_MTU)
907 error = EINVAL;
908 else {
909 ifp->if_mtu = ifr->ifr_mtu;
910 sk_init(sc_if);
911 }
912 break;
913 case SIOCSIFFLAGS:
914 if (ifp->if_flags & IFF_UP) {
915 if (ifp->if_flags & IFF_RUNNING &&
916 ifp->if_flags & IFF_PROMISC &&
917 !(sc_if->sk_if_flags & IFF_PROMISC)) {
918 SK_XM_SETBIT_4(sc_if, XM_MODE,
919 XM_MODE_RX_PROMISC);
920 sk_setmulti(sc_if);
921 } else if (ifp->if_flags & IFF_RUNNING &&
922 !(ifp->if_flags & IFF_PROMISC) &&
923 sc_if->sk_if_flags & IFF_PROMISC) {
924 SK_XM_CLRBIT_4(sc_if, XM_MODE,
925 XM_MODE_RX_PROMISC);
926 sk_setmulti(sc_if);
927 } else
928 sk_init(sc_if);
929 } else {
930 if (ifp->if_flags & IFF_RUNNING)
931 sk_stop(sc_if);
932 }
933 sc_if->sk_if_flags = ifp->if_flags;
934 error = 0;
935 break;
936 case SIOCADDMULTI:
937 case SIOCDELMULTI:
938 sk_setmulti(sc_if);
939 error = 0;
940 break;
941 case SIOCGIFMEDIA:
942 case SIOCSIFMEDIA:
943 mii = device_get_softc(sc_if->sk_miibus);
944 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
945 break;
946 default:
947 error = ether_ioctl(ifp, command, data);
948 break;
949 }
950
951 SK_IF_UNLOCK(sc_if);
952
953 return(error);
954 }
955
956 /*
957 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
958 * IDs against our list and return a device name if we find a match.
959 */
960 static int
961 sk_probe(dev)
962 device_t dev;
963 {
964 struct sk_type *t;
965
966 t = sk_devs;
967
968 while(t->sk_name != NULL) {
969 if ((pci_get_vendor(dev) == t->sk_vid) &&
970 (pci_get_device(dev) == t->sk_did)) {
971 device_set_desc(dev, t->sk_name);
972 return(0);
973 }
974 t++;
975 }
976
977 return(ENXIO);
978 }
979
980 /*
981 * Force the GEnesis into reset, then bring it out of reset.
982 */
983 static void
984 sk_reset(sc)
985 struct sk_softc *sc;
986 {
987 CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
988 CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
989 DELAY(1000);
990 CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
991 CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
992
993 /* Configure packet arbiter */
994 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
995 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
996 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
997 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
998 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
999
1000 /* Enable RAM interface */
1001 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1002
1003 /*
1004 * Configure interrupt moderation. The moderation timer
1005 * defers interrupts specified in the interrupt moderation
1006 * timer mask based on the timeout specified in the interrupt
1007 * moderation timer init register. Each bit in the timer
1008 * register represents 18.825ns, so to specify a timeout in
1009 * microseconds, we have to multiply by 54.
1010 */
1011 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1012 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1013 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1014 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1015
1016 return;
1017 }
1018
1019 static int
1020 sk_probe_xmac(dev)
1021 device_t dev;
1022 {
1023 /*
1024 * Not much to do here. We always know there will be
1025 * at least one XMAC present, and if there are two,
1026 * sk_attach() will create a second device instance
1027 * for us.
1028 */
1029 device_set_desc(dev, "XaQti Corp. XMAC II");
1030
1031 return(0);
1032 }
1033
1034 /*
1035 * Each XMAC chip is attached as a separate logical IP interface.
1036 * Single port cards will have only one logical interface of course.
1037 */
1038 static int
1039 sk_attach_xmac(dev)
1040 device_t dev;
1041 {
1042 struct sk_softc *sc;
1043 struct sk_if_softc *sc_if;
1044 struct ifnet *ifp;
1045 int i, port, error;
1046
1047 if (dev == NULL)
1048 return(EINVAL);
1049
1050 error = 0;
1051 sc_if = device_get_softc(dev);
1052 sc = device_get_softc(device_get_parent(dev));
1053 SK_LOCK(sc);
1054 port = *(int *)device_get_ivars(dev);
1055 free(device_get_ivars(dev), M_DEVBUF);
1056 device_set_ivars(dev, NULL);
1057
1058 sc_if->sk_dev = dev;
1059 sc_if->sk_unit = device_get_unit(dev);
1060 sc_if->sk_port = port;
1061 sc_if->sk_softc = sc;
1062 sc->sk_if[port] = sc_if;
1063 if (port == SK_PORT_A)
1064 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1065 if (port == SK_PORT_B)
1066 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1067
1068 /*
1069 * Get station address for this interface. Note that
1070 * dual port cards actually come with three station
1071 * addresses: one for each port, plus an extra. The
1072 * extra one is used by the SysKonnect driver software
1073 * as a 'virtual' station address for when both ports
1074 * are operating in failover mode. Currently we don't
1075 * use this extra address.
1076 */
1077 for (i = 0; i < ETHER_ADDR_LEN; i++)
1078 sc_if->arpcom.ac_enaddr[i] =
1079 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1080
1081 printf("sk%d: Ethernet address: %6D\n",
1082 sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1083
1084 /*
1085 * Set up RAM buffer addresses. The NIC will have a certain
1086 * amount of SRAM on it, somewhere between 512K and 2MB. We
1087 * need to divide this up a) between the transmitter and
1088 * receiver and b) between the two XMACs, if this is a
1089 * dual port NIC. Our algotithm is to divide up the memory
1090 * evenly so that everyone gets a fair share.
1091 */
1092 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1093 u_int32_t chunk, val;
1094
1095 chunk = sc->sk_ramsize / 2;
1096 val = sc->sk_rboff / sizeof(u_int64_t);
1097 sc_if->sk_rx_ramstart = val;
1098 val += (chunk / sizeof(u_int64_t));
1099 sc_if->sk_rx_ramend = val - 1;
1100 sc_if->sk_tx_ramstart = val;
1101 val += (chunk / sizeof(u_int64_t));
1102 sc_if->sk_tx_ramend = val - 1;
1103 } else {
1104 u_int32_t chunk, val;
1105
1106 chunk = sc->sk_ramsize / 4;
1107 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1108 sizeof(u_int64_t);
1109 sc_if->sk_rx_ramstart = val;
1110 val += (chunk / sizeof(u_int64_t));
1111 sc_if->sk_rx_ramend = val - 1;
1112 sc_if->sk_tx_ramstart = val;
1113 val += (chunk / sizeof(u_int64_t));
1114 sc_if->sk_tx_ramend = val - 1;
1115 }
1116
1117 /* Read and save PHY type and set PHY address */
1118 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1119 switch(sc_if->sk_phytype) {
1120 case SK_PHYTYPE_XMAC:
1121 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1122 break;
1123 case SK_PHYTYPE_BCOM:
1124 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1125 break;
1126 default:
1127 printf("skc%d: unsupported PHY type: %d\n",
1128 sc->sk_unit, sc_if->sk_phytype);
1129 error = ENODEV;
1130 goto fail_xmac;
1131 }
1132
1133 /* Allocate the descriptor queues. */
1134 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1135 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1136
1137 if (sc_if->sk_rdata == NULL) {
1138 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1139 error = ENOMEM;
1140 goto fail_xmac;
1141 }
1142
1143 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1144
1145 /* Try to allocate memory for jumbo buffers. */
1146 if (sk_alloc_jumbo_mem(sc_if)) {
1147 printf("sk%d: jumbo buffer allocation failed\n",
1148 sc_if->sk_unit);
1149 error = ENOMEM;
1150 goto fail_xmac;
1151 }
1152
1153 ifp = &sc_if->arpcom.ac_if;
1154 ifp->if_softc = sc_if;
1155 ifp->if_unit = sc_if->sk_unit;
1156 ifp->if_name = "sk";
1157 ifp->if_mtu = ETHERMTU;
1158 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1159 ifp->if_ioctl = sk_ioctl;
1160 ifp->if_output = ether_output;
1161 ifp->if_start = sk_start;
1162 ifp->if_watchdog = sk_watchdog;
1163 ifp->if_init = sk_init;
1164 ifp->if_baudrate = 1000000000;
1165 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1166
1167 callout_handle_init(&sc_if->sk_tick_ch);
1168
1169 /*
1170 * Call MI attach routine.
1171 */
1172 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1173
1174 /*
1175 * Do miibus setup.
1176 */
1177 sk_init_xmac(sc_if);
1178 if (mii_phy_probe(dev, &sc_if->sk_miibus,
1179 sk_ifmedia_upd, sk_ifmedia_sts)) {
1180 printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1181 ether_ifdetach(ifp);
1182 error = ENXIO;
1183 goto fail_xmac;
1184 }
1185
1186 fail_xmac:
1187 SK_UNLOCK(sc);
1188 if (error) {
1189 /* Access should be ok even though lock has been dropped */
1190 sc->sk_if[port] = NULL;
1191 sk_detach_xmac(dev);
1192 }
1193
1194 return(error);
1195 }
1196
1197 /*
1198 * Attach the interface. Allocate softc structures, do ifmedia
1199 * setup and ethernet/BPF attach.
1200 */
1201 static int
1202 sk_attach(dev)
1203 device_t dev;
1204 {
1205 struct sk_softc *sc;
1206 int unit, error = 0, rid, *port;
1207
1208 sc = device_get_softc(dev);
1209 unit = device_get_unit(dev);
1210
1211 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1212 MTX_DEF | MTX_RECURSE);
1213
1214 /*
1215 * Handle power management nonsense.
1216 */
1217 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1218 u_int32_t iobase, membase, irq;
1219
1220 /* Save important PCI config data. */
1221 iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1222 membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1223 irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1224
1225 /* Reset the power state. */
1226 printf("skc%d: chip is in D%d power mode "
1227 "-- setting to D0\n", unit,
1228 pci_get_powerstate(dev));
1229 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1230
1231 /* Restore PCI config data. */
1232 pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1233 pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1234 pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1235 }
1236
1237 /*
1238 * Map control/status registers.
1239 */
1240 pci_enable_busmaster(dev);
1241
1242 rid = SK_RID;
1243 sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1244 0, ~0, 1, RF_ACTIVE);
1245
1246 if (sc->sk_res == NULL) {
1247 printf("sk%d: couldn't map ports/memory\n", unit);
1248 error = ENXIO;
1249 goto fail;
1250 }
1251
1252 sc->sk_btag = rman_get_bustag(sc->sk_res);
1253 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1254
1255 /* Allocate interrupt */
1256 rid = 0;
1257 sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1258 RF_SHAREABLE | RF_ACTIVE);
1259
1260 if (sc->sk_irq == NULL) {
1261 printf("skc%d: couldn't map interrupt\n", unit);
1262 error = ENXIO;
1263 goto fail;
1264 }
1265
1266 /* Reset the adapter. */
1267 sk_reset(sc);
1268
1269 sc->sk_unit = unit;
1270
1271 /* Read and save vital product data from EEPROM. */
1272 sk_vpd_read(sc);
1273
1274 /* Read and save RAM size and RAMbuffer offset */
1275 switch(sk_win_read_1(sc, SK_EPROM0)) {
1276 case SK_RAMSIZE_512K_64:
1277 sc->sk_ramsize = 0x80000;
1278 sc->sk_rboff = SK_RBOFF_0;
1279 break;
1280 case SK_RAMSIZE_1024K_64:
1281 sc->sk_ramsize = 0x100000;
1282 sc->sk_rboff = SK_RBOFF_80000;
1283 break;
1284 case SK_RAMSIZE_1024K_128:
1285 sc->sk_ramsize = 0x100000;
1286 sc->sk_rboff = SK_RBOFF_0;
1287 break;
1288 case SK_RAMSIZE_2048K_128:
1289 sc->sk_ramsize = 0x200000;
1290 sc->sk_rboff = SK_RBOFF_0;
1291 break;
1292 default:
1293 printf("skc%d: unknown ram size: %d\n",
1294 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1295 error = ENXIO;
1296 goto fail;
1297 }
1298
1299 /* Read and save physical media type */
1300 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1301 case SK_PMD_1000BASESX:
1302 sc->sk_pmd = IFM_1000_SX;
1303 break;
1304 case SK_PMD_1000BASELX:
1305 sc->sk_pmd = IFM_1000_LX;
1306 break;
1307 case SK_PMD_1000BASECX:
1308 sc->sk_pmd = IFM_1000_CX;
1309 break;
1310 case SK_PMD_1000BASETX:
1311 sc->sk_pmd = IFM_1000_T;
1312 break;
1313 default:
1314 printf("skc%d: unknown media type: 0x%x\n",
1315 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1316 error = ENXIO;
1317 goto fail;
1318 }
1319
1320 /* Announce the product name. */
1321 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1322 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1323 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1324 *port = SK_PORT_A;
1325 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1326
1327 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1328 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1329 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1330 *port = SK_PORT_B;
1331 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1332 }
1333
1334 /* Turn on the 'driver is loaded' LED. */
1335 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1336
1337 bus_generic_attach(dev);
1338
1339 /* Hook interrupt last to avoid having to lock softc */
1340 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1341 sk_intr, sc, &sc->sk_intrhand);
1342
1343 if (error) {
1344 printf("skc%d: couldn't set up irq\n", unit);
1345 goto fail;
1346 }
1347
1348 fail:
1349 if (error)
1350 sk_detach(dev);
1351
1352 return(error);
1353 }
1354
1355 /*
1356 * Shutdown hardware and free up resources. This can be called any
1357 * time after the mutex has been initialized. It is called in both
1358 * the error case in attach and the normal detach case so it needs
1359 * to be careful about only freeing resources that have actually been
1360 * allocated.
1361 */
1362 static int
1363 sk_detach_xmac(dev)
1364 device_t dev;
1365 {
1366 struct sk_softc *sc;
1367 struct sk_if_softc *sc_if;
1368 struct ifnet *ifp;
1369
1370 sc = device_get_softc(device_get_parent(dev));
1371 sc_if = device_get_softc(dev);
1372 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1373 ("sk mutex not initialized in sk_detach_xmac"));
1374 SK_IF_LOCK(sc_if);
1375
1376 ifp = &sc_if->arpcom.ac_if;
1377 /* These should only be active if attach_xmac succeeded */
1378 if (device_is_attached(dev)) {
1379 sk_stop(sc_if);
1380 ether_ifdetach(ifp);
1381 }
1382 if (sc_if->sk_miibus)
1383 device_delete_child(dev, sc_if->sk_miibus);
1384 bus_generic_detach(dev);
1385 if (sc_if->sk_cdata.sk_jumbo_buf)
1386 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1387 if (sc_if->sk_rdata) {
1388 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1389 M_DEVBUF);
1390 }
1391 SK_IF_UNLOCK(sc_if);
1392
1393 return(0);
1394 }
1395
1396 static int
1397 sk_detach(dev)
1398 device_t dev;
1399 {
1400 struct sk_softc *sc;
1401
1402 sc = device_get_softc(dev);
1403 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1404 SK_LOCK(sc);
1405
1406 if (device_is_alive(dev)) {
1407 if (sc->sk_devs[SK_PORT_A] != NULL)
1408 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1409 if (sc->sk_devs[SK_PORT_B] != NULL)
1410 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1411 bus_generic_detach(dev);
1412 }
1413
1414 if (sc->sk_intrhand)
1415 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1416 if (sc->sk_irq)
1417 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1418 if (sc->sk_res)
1419 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1420
1421 SK_UNLOCK(sc);
1422 mtx_destroy(&sc->sk_mtx);
1423
1424 return(0);
1425 }
1426
1427 static int
1428 sk_encap(sc_if, m_head, txidx)
1429 struct sk_if_softc *sc_if;
1430 struct mbuf *m_head;
1431 u_int32_t *txidx;
1432 {
1433 struct sk_tx_desc *f = NULL;
1434 struct mbuf *m;
1435 u_int32_t frag, cur, cnt = 0;
1436
1437 m = m_head;
1438 cur = frag = *txidx;
1439
1440 /*
1441 * Start packing the mbufs in this chain into
1442 * the fragment pointers. Stop when we run out
1443 * of fragments or hit the end of the mbuf chain.
1444 */
1445 for (m = m_head; m != NULL; m = m->m_next) {
1446 if (m->m_len != 0) {
1447 if ((SK_TX_RING_CNT -
1448 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1449 return(ENOBUFS);
1450 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1451 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1452 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1453 if (cnt == 0)
1454 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1455 else
1456 f->sk_ctl |= SK_TXCTL_OWN;
1457 cur = frag;
1458 SK_INC(frag, SK_TX_RING_CNT);
1459 cnt++;
1460 }
1461 }
1462
1463 if (m != NULL)
1464 return(ENOBUFS);
1465
1466 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1467 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1468 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1469 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1470 sc_if->sk_cdata.sk_tx_cnt += cnt;
1471
1472 *txidx = frag;
1473
1474 return(0);
1475 }
1476
1477 static void
1478 sk_start(ifp)
1479 struct ifnet *ifp;
1480 {
1481 struct sk_softc *sc;
1482 struct sk_if_softc *sc_if;
1483 struct mbuf *m_head = NULL;
1484 u_int32_t idx;
1485
1486 sc_if = ifp->if_softc;
1487 sc = sc_if->sk_softc;
1488
1489 SK_IF_LOCK(sc_if);
1490
1491 idx = sc_if->sk_cdata.sk_tx_prod;
1492
1493 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1494 IF_DEQUEUE(&ifp->if_snd, m_head);
1495 if (m_head == NULL)
1496 break;
1497
1498 /*
1499 * Pack the data into the transmit ring. If we
1500 * don't have room, set the OACTIVE flag and wait
1501 * for the NIC to drain the ring.
1502 */
1503 if (sk_encap(sc_if, m_head, &idx)) {
1504 IF_PREPEND(&ifp->if_snd, m_head);
1505 ifp->if_flags |= IFF_OACTIVE;
1506 break;
1507 }
1508
1509 /*
1510 * If there's a BPF listener, bounce a copy of this frame
1511 * to him.
1512 */
1513 BPF_MTAP(ifp, m_head);
1514 }
1515
1516 /* Transmit */
1517 sc_if->sk_cdata.sk_tx_prod = idx;
1518 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1519
1520 /* Set a timeout in case the chip goes out to lunch. */
1521 ifp->if_timer = 5;
1522 SK_IF_UNLOCK(sc_if);
1523
1524 return;
1525 }
1526
1527
1528 static void
1529 sk_watchdog(ifp)
1530 struct ifnet *ifp;
1531 {
1532 struct sk_if_softc *sc_if;
1533
1534 sc_if = ifp->if_softc;
1535
1536 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1537 sk_init(sc_if);
1538
1539 return;
1540 }
1541
1542 static void
1543 sk_shutdown(dev)
1544 device_t dev;
1545 {
1546 struct sk_softc *sc;
1547
1548 sc = device_get_softc(dev);
1549 SK_LOCK(sc);
1550
1551 /* Turn off the 'driver is loaded' LED. */
1552 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1553
1554 /*
1555 * Reset the GEnesis controller. Doing this should also
1556 * assert the resets on the attached XMAC(s).
1557 */
1558 sk_reset(sc);
1559 SK_UNLOCK(sc);
1560
1561 return;
1562 }
1563
1564 static void
1565 sk_rxeof(sc_if)
1566 struct sk_if_softc *sc_if;
1567 {
1568 struct mbuf *m;
1569 struct ifnet *ifp;
1570 struct sk_chain *cur_rx;
1571 int total_len = 0;
1572 int i;
1573 u_int32_t rxstat;
1574
1575 ifp = &sc_if->arpcom.ac_if;
1576 i = sc_if->sk_cdata.sk_rx_prod;
1577 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1578
1579 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1580
1581 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1582 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1583 m = cur_rx->sk_mbuf;
1584 cur_rx->sk_mbuf = NULL;
1585 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1586 SK_INC(i, SK_RX_RING_CNT);
1587
1588 if (rxstat & XM_RXSTAT_ERRFRAME) {
1589 ifp->if_ierrors++;
1590 sk_newbuf(sc_if, cur_rx, m);
1591 continue;
1592 }
1593
1594 /*
1595 * Try to allocate a new jumbo buffer. If that
1596 * fails, copy the packet to mbufs and put the
1597 * jumbo buffer back in the ring so it can be
1598 * re-used. If allocating mbufs fails, then we
1599 * have to drop the packet.
1600 */
1601 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1602 struct mbuf *m0;
1603 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1604 ifp, NULL);
1605 sk_newbuf(sc_if, cur_rx, m);
1606 if (m0 == NULL) {
1607 printf("sk%d: no receive buffers "
1608 "available -- packet dropped!\n",
1609 sc_if->sk_unit);
1610 ifp->if_ierrors++;
1611 continue;
1612 }
1613 m = m0;
1614 } else {
1615 m->m_pkthdr.rcvif = ifp;
1616 m->m_pkthdr.len = m->m_len = total_len;
1617 }
1618
1619 ifp->if_ipackets++;
1620 (*ifp->if_input)(ifp, m);
1621 }
1622
1623 sc_if->sk_cdata.sk_rx_prod = i;
1624
1625 return;
1626 }
1627
1628 static void
1629 sk_txeof(sc_if)
1630 struct sk_if_softc *sc_if;
1631 {
1632 struct sk_tx_desc *cur_tx = NULL;
1633 struct ifnet *ifp;
1634 u_int32_t idx;
1635
1636 ifp = &sc_if->arpcom.ac_if;
1637
1638 /*
1639 * Go through our tx ring and free mbufs for those
1640 * frames that have been sent.
1641 */
1642 idx = sc_if->sk_cdata.sk_tx_cons;
1643 while(idx != sc_if->sk_cdata.sk_tx_prod) {
1644 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1645 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1646 break;
1647 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1648 ifp->if_opackets++;
1649 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1650 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1651 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1652 }
1653 sc_if->sk_cdata.sk_tx_cnt--;
1654 SK_INC(idx, SK_TX_RING_CNT);
1655 ifp->if_timer = 0;
1656 }
1657
1658 sc_if->sk_cdata.sk_tx_cons = idx;
1659
1660 if (cur_tx != NULL)
1661 ifp->if_flags &= ~IFF_OACTIVE;
1662
1663 return;
1664 }
1665
1666 static void
1667 sk_tick(xsc_if)
1668 void *xsc_if;
1669 {
1670 struct sk_if_softc *sc_if;
1671 struct mii_data *mii;
1672 struct ifnet *ifp;
1673 int i;
1674
1675 sc_if = xsc_if;
1676 SK_IF_LOCK(sc_if);
1677 ifp = &sc_if->arpcom.ac_if;
1678 mii = device_get_softc(sc_if->sk_miibus);
1679
1680 if (!(ifp->if_flags & IFF_UP)) {
1681 SK_IF_UNLOCK(sc_if);
1682 return;
1683 }
1684
1685 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1686 sk_intr_bcom(sc_if);
1687 SK_IF_UNLOCK(sc_if);
1688 return;
1689 }
1690
1691 /*
1692 * According to SysKonnect, the correct way to verify that
1693 * the link has come back up is to poll bit 0 of the GPIO
1694 * register three times. This pin has the signal from the
1695 * link_sync pin connected to it; if we read the same link
1696 * state 3 times in a row, we know the link is up.
1697 */
1698 for (i = 0; i < 3; i++) {
1699 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1700 break;
1701 }
1702
1703 if (i != 3) {
1704 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1705 SK_IF_UNLOCK(sc_if);
1706 return;
1707 }
1708
1709 /* Turn the GP0 interrupt back on. */
1710 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1711 SK_XM_READ_2(sc_if, XM_ISR);
1712 mii_tick(mii);
1713 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
1714
1715 SK_IF_UNLOCK(sc_if);
1716 return;
1717 }
1718
1719 static void
1720 sk_intr_bcom(sc_if)
1721 struct sk_if_softc *sc_if;
1722 {
1723 struct sk_softc *sc;
1724 struct mii_data *mii;
1725 struct ifnet *ifp;
1726 int status;
1727
1728 sc = sc_if->sk_softc;
1729 mii = device_get_softc(sc_if->sk_miibus);
1730 ifp = &sc_if->arpcom.ac_if;
1731
1732 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1733
1734 /*
1735 * Read the PHY interrupt register to make sure
1736 * we clear any pending interrupts.
1737 */
1738 status = sk_miibus_readreg(sc_if->sk_dev,
1739 SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1740
1741 if (!(ifp->if_flags & IFF_RUNNING)) {
1742 sk_init_xmac(sc_if);
1743 return;
1744 }
1745
1746 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1747 int lstat;
1748 lstat = sk_miibus_readreg(sc_if->sk_dev,
1749 SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1750
1751 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1752 mii_mediachg(mii);
1753 /* Turn off the link LED. */
1754 SK_IF_WRITE_1(sc_if, 0,
1755 SK_LINKLED1_CTL, SK_LINKLED_OFF);
1756 sc_if->sk_link = 0;
1757 } else if (status & BRGPHY_ISR_LNK_CHG) {
1758 sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1759 BRGPHY_MII_IMR, 0xFF00);
1760 mii_tick(mii);
1761 sc_if->sk_link = 1;
1762 /* Turn on the link LED. */
1763 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1764 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1765 SK_LINKLED_BLINK_OFF);
1766 } else {
1767 mii_tick(mii);
1768 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1769 }
1770 }
1771
1772 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1773
1774 return;
1775 }
1776
1777 static void
1778 sk_intr_xmac(sc_if)
1779 struct sk_if_softc *sc_if;
1780 {
1781 struct sk_softc *sc;
1782 u_int16_t status;
1783 struct mii_data *mii;
1784
1785 sc = sc_if->sk_softc;
1786 mii = device_get_softc(sc_if->sk_miibus);
1787 status = SK_XM_READ_2(sc_if, XM_ISR);
1788
1789 /*
1790 * Link has gone down. Start MII tick timeout to
1791 * watch for link resync.
1792 */
1793 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1794 if (status & XM_ISR_GP0_SET) {
1795 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1796 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1797 }
1798
1799 if (status & XM_ISR_AUTONEG_DONE) {
1800 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1801 }
1802 }
1803
1804 if (status & XM_IMR_TX_UNDERRUN)
1805 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1806
1807 if (status & XM_IMR_RX_OVERRUN)
1808 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1809
1810 status = SK_XM_READ_2(sc_if, XM_ISR);
1811
1812 return;
1813 }
1814
1815 static void
1816 sk_intr(xsc)
1817 void *xsc;
1818 {
1819 struct sk_softc *sc = xsc;
1820 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
1821 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
1822 u_int32_t status;
1823
1824 SK_LOCK(sc);
1825
1826 sc_if0 = sc->sk_if[SK_PORT_A];
1827 sc_if1 = sc->sk_if[SK_PORT_B];
1828
1829 if (sc_if0 != NULL)
1830 ifp0 = &sc_if0->arpcom.ac_if;
1831 if (sc_if1 != NULL)
1832 ifp1 = &sc_if1->arpcom.ac_if;
1833
1834 for (;;) {
1835 status = CSR_READ_4(sc, SK_ISSR);
1836 if (!(status & sc->sk_intrmask))
1837 break;
1838
1839 /* Handle receive interrupts first. */
1840 if (status & SK_ISR_RX1_EOF) {
1841 sk_rxeof(sc_if0);
1842 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1843 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1844 }
1845 if (status & SK_ISR_RX2_EOF) {
1846 sk_rxeof(sc_if1);
1847 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1848 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1849 }
1850
1851 /* Then transmit interrupts. */
1852 if (status & SK_ISR_TX1_S_EOF) {
1853 sk_txeof(sc_if0);
1854 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1855 SK_TXBMU_CLR_IRQ_EOF);
1856 }
1857 if (status & SK_ISR_TX2_S_EOF) {
1858 sk_txeof(sc_if1);
1859 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1860 SK_TXBMU_CLR_IRQ_EOF);
1861 }
1862
1863 /* Then MAC interrupts. */
1864 if (status & SK_ISR_MAC1 &&
1865 ifp0->if_flags & IFF_RUNNING)
1866 sk_intr_xmac(sc_if0);
1867
1868 if (status & SK_ISR_MAC2 &&
1869 ifp1->if_flags & IFF_RUNNING)
1870 sk_intr_xmac(sc_if1);
1871
1872 if (status & SK_ISR_EXTERNAL_REG) {
1873 if (ifp0 != NULL)
1874 sk_intr_bcom(sc_if0);
1875 if (ifp1 != NULL)
1876 sk_intr_bcom(sc_if1);
1877 }
1878 }
1879
1880 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1881
1882 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1883 sk_start(ifp0);
1884 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1885 sk_start(ifp1);
1886
1887 SK_UNLOCK(sc);
1888
1889 return;
1890 }
1891
1892 static void
1893 sk_init_xmac(sc_if)
1894 struct sk_if_softc *sc_if;
1895 {
1896 struct sk_softc *sc;
1897 struct ifnet *ifp;
1898 struct sk_bcom_hack bhack[] = {
1899 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1900 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1901 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1902 { 0, 0 } };
1903
1904 sc = sc_if->sk_softc;
1905 ifp = &sc_if->arpcom.ac_if;
1906
1907 /* Unreset the XMAC. */
1908 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1909 DELAY(1000);
1910
1911 /* Reset the XMAC's internal state. */
1912 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1913
1914 /* Save the XMAC II revision */
1915 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1916
1917 /*
1918 * Perform additional initialization for external PHYs,
1919 * namely for the 1000baseTX cards that use the XMAC's
1920 * GMII mode.
1921 */
1922 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1923 int i = 0;
1924 u_int32_t val;
1925
1926 /* Take PHY out of reset. */
1927 val = sk_win_read_4(sc, SK_GPIO);
1928 if (sc_if->sk_port == SK_PORT_A)
1929 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1930 else
1931 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1932 sk_win_write_4(sc, SK_GPIO, val);
1933
1934 /* Enable GMII mode on the XMAC. */
1935 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1936
1937 sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1938 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1939 DELAY(10000);
1940 sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1941 BRGPHY_MII_IMR, 0xFFF0);
1942
1943 /*
1944 * Early versions of the BCM5400 apparently have
1945 * a bug that requires them to have their reserved
1946 * registers initialized to some magic values. I don't
1947 * know what the numbers do, I'm just the messenger.
1948 */
1949 if (sk_miibus_readreg(sc_if->sk_dev,
1950 SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1951 while(bhack[i].reg) {
1952 sk_miibus_writereg(sc_if->sk_dev,
1953 SK_PHYADDR_BCOM, bhack[i].reg,
1954 bhack[i].val);
1955 i++;
1956 }
1957 }
1958 }
1959
1960 /* Set station address */
1961 SK_XM_WRITE_2(sc_if, XM_PAR0,
1962 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1963 SK_XM_WRITE_2(sc_if, XM_PAR1,
1964 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1965 SK_XM_WRITE_2(sc_if, XM_PAR2,
1966 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1967 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1968
1969 if (ifp->if_flags & IFF_PROMISC) {
1970 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1971 } else {
1972 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1973 }
1974
1975 if (ifp->if_flags & IFF_BROADCAST) {
1976 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1977 } else {
1978 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1979 }
1980
1981 /* We don't need the FCS appended to the packet. */
1982 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1983
1984 /* We want short frames padded to 60 bytes. */
1985 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1986
1987 /*
1988 * Enable the reception of all error frames. This is is
1989 * a necessary evil due to the design of the XMAC. The
1990 * XMAC's receive FIFO is only 8K in size, however jumbo
1991 * frames can be up to 9000 bytes in length. When bad
1992 * frame filtering is enabled, the XMAC's RX FIFO operates
1993 * in 'store and forward' mode. For this to work, the
1994 * entire frame has to fit into the FIFO, but that means
1995 * that jumbo frames larger than 8192 bytes will be
1996 * truncated. Disabling all bad frame filtering causes
1997 * the RX FIFO to operate in streaming mode, in which
1998 * case the XMAC will start transfering frames out of the
1999 * RX FIFO as soon as the FIFO threshold is reached.
2000 */
2001 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2002 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2003 XM_MODE_RX_INRANGELEN);
2004
2005 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2006 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2007 else
2008 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2009
2010 /*
2011 * Bump up the transmit threshold. This helps hold off transmit
2012 * underruns when we're blasting traffic from both ports at once.
2013 */
2014 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2015
2016 /* Set multicast filter */
2017 sk_setmulti(sc_if);
2018
2019 /* Clear and enable interrupts */
2020 SK_XM_READ_2(sc_if, XM_ISR);
2021 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2022 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2023 else
2024 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2025
2026 /* Configure MAC arbiter */
2027 switch(sc_if->sk_xmac_rev) {
2028 case XM_XMAC_REV_B2:
2029 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2030 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2031 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2032 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2033 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2034 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2035 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2036 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2037 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2038 break;
2039 case XM_XMAC_REV_C1:
2040 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2041 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2042 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2043 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2044 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2045 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2046 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2047 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2048 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2049 break;
2050 default:
2051 break;
2052 }
2053 sk_win_write_2(sc, SK_MACARB_CTL,
2054 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2055
2056 sc_if->sk_link = 1;
2057
2058 return;
2059 }
2060
2061 /*
2062 * Note that to properly initialize any part of the GEnesis chip,
2063 * you first have to take it out of reset mode.
2064 */
2065 static void
2066 sk_init(xsc)
2067 void *xsc;
2068 {
2069 struct sk_if_softc *sc_if = xsc;
2070 struct sk_softc *sc;
2071 struct ifnet *ifp;
2072 struct mii_data *mii;
2073
2074 SK_IF_LOCK(sc_if);
2075
2076 ifp = &sc_if->arpcom.ac_if;
2077 sc = sc_if->sk_softc;
2078 mii = device_get_softc(sc_if->sk_miibus);
2079
2080 /* Cancel pending I/O and free all RX/TX buffers. */
2081 sk_stop(sc_if);
2082
2083 /* Configure LINK_SYNC LED */
2084 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2085 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
2086
2087 /* Configure RX LED */
2088 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
2089
2090 /* Configure TX LED */
2091 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
2092
2093 /* Configure I2C registers */
2094
2095 /* Configure XMAC(s) */
2096 sk_init_xmac(sc_if);
2097 mii_mediachg(mii);
2098
2099 /* Configure MAC FIFOs */
2100 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2101 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2102 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2103
2104 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2105 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2106 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2107
2108 /* Configure transmit arbiter(s) */
2109 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2110 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2111
2112 /* Configure RAMbuffers */
2113 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2114 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2115 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2116 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2117 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2118 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2119
2120 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2121 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2122 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2123 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2124 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2125 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2126 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2127
2128 /* Configure BMUs */
2129 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2130 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2131 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2132 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2133
2134 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2135 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2136 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2137 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2138
2139 /* Init descriptors */
2140 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2141 printf("sk%d: initialization failed: no "
2142 "memory for rx buffers\n", sc_if->sk_unit);
2143 sk_stop(sc_if);
2144 SK_IF_UNLOCK(sc_if);
2145 return;
2146 }
2147 sk_init_tx_ring(sc_if);
2148
2149 /* Configure interrupt handling */
2150 CSR_READ_4(sc, SK_ISSR);
2151 if (sc_if->sk_port == SK_PORT_A)
2152 sc->sk_intrmask |= SK_INTRS1;
2153 else
2154 sc->sk_intrmask |= SK_INTRS2;
2155
2156 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2157
2158 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2159
2160 /* Start BMUs. */
2161 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2162
2163 /* Enable XMACs TX and RX state machines */
2164 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2165 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2166
2167 ifp->if_flags |= IFF_RUNNING;
2168 ifp->if_flags &= ~IFF_OACTIVE;
2169
2170 SK_IF_UNLOCK(sc_if);
2171
2172 return;
2173 }
2174
2175 static void
2176 sk_stop(sc_if)
2177 struct sk_if_softc *sc_if;
2178 {
2179 int i;
2180 struct sk_softc *sc;
2181 struct ifnet *ifp;
2182
2183 SK_IF_LOCK(sc_if);
2184 sc = sc_if->sk_softc;
2185 ifp = &sc_if->arpcom.ac_if;
2186
2187 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2188
2189 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2190 u_int32_t val;
2191
2192 /* Put PHY back into reset. */
2193 val = sk_win_read_4(sc, SK_GPIO);
2194 if (sc_if->sk_port == SK_PORT_A) {
2195 val |= SK_GPIO_DIR0;
2196 val &= ~SK_GPIO_DAT0;
2197 } else {
2198 val |= SK_GPIO_DIR2;
2199 val &= ~SK_GPIO_DAT2;
2200 }
2201 sk_win_write_4(sc, SK_GPIO, val);
2202 }
2203
2204 /* Turn off various components of this interface. */
2205 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2206 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2207 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2208 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2209 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2210 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2211 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2212 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2213 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2214 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2215 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2216 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2217
2218 /* Disable interrupts */
2219 if (sc_if->sk_port == SK_PORT_A)
2220 sc->sk_intrmask &= ~SK_INTRS1;
2221 else
2222 sc->sk_intrmask &= ~SK_INTRS2;
2223 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2224
2225 SK_XM_READ_2(sc_if, XM_ISR);
2226 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2227
2228 /* Free RX and TX mbufs still in the queues. */
2229 for (i = 0; i < SK_RX_RING_CNT; i++) {
2230 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2231 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2232 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2233 }
2234 }
2235
2236 for (i = 0; i < SK_TX_RING_CNT; i++) {
2237 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2238 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2239 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2240 }
2241 }
2242
2243 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2244 SK_IF_UNLOCK(sc_if);
2245 return;
2246 }
Cache object: a1c238271f6e371d9b36ef0ba9435437
|