FreeBSD/Linux Kernel Cross Reference
sys/pci/if_sk.c
1 /*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35 /*
36 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37 * the SK-984x series adapters, both single port and dual port.
38 * References:
39 * The XaQti XMAC II datasheet, http://www.xaqti.com
40 * The SysKonnect GEnesis manual, http://www.syskonnect.com
41 *
42 * Written by Bill Paul <wpaul@ee.columbia.edu>
43 * Department of Electrical Engineering
44 * Columbia University, New York City
45 */
46
47 /*
48 * The SysKonnect gigabit ethernet adapters consist of two main
49 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
50 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
51 * components and a PHY while the GEnesis controller provides a PCI
52 * interface with DMA support. Each card may have between 512K and
53 * 2MB of SRAM on board depending on the configuration.
54 *
55 * The SysKonnect GEnesis controller can have either one or two XMAC
56 * chips connected to it, allowing single or dual port NIC configurations.
57 * SysKonnect has the distinction of being the only vendor on the market
58 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
59 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
60 * XMAC registers. This driver takes advantage of these features to allow
61 * both XMACs to operate as independent interfaces.
62 */
63
64 #include "bpfilter.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 #include <sys/queue.h>
74
75 #include <net/if.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_dl.h>
79 #include <net/if_media.h>
80
81 #if NBPFILTER > 0
82 #include <net/bpf.h>
83 #endif
84
85 #include <vm/vm.h> /* for vtophys */
86 #include <vm/pmap.h> /* for vtophys */
87 #include <machine/clock.h> /* for DELAY */
88 #include <machine/bus_pio.h>
89 #include <machine/bus_memio.h>
90 #include <machine/bus.h>
91
92 #include <pci/pcireg.h>
93 #include <pci/pcivar.h>
94
95 #define SK_USEIOSPACE
96
97 #include <pci/if_skreg.h>
98 #include <pci/xmaciireg.h>
99
100 #ifndef lint
101 static const char rcsid[] =
102 "$FreeBSD$";
103 #endif
104
105 static struct sk_type sk_devs[] = {
106 { SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
107 { 0, 0, NULL }
108 };
109
110 static unsigned long skc_count = 0;
111 static const char *sk_probe __P((pcici_t, pcidi_t));
112 static void sk_attach __P((pcici_t, int));
113 static int sk_attach_xmac __P((struct sk_softc *, int));
114 static void sk_intr __P((void *));
115 static void sk_intr_xmac __P((struct sk_if_softc *));
116 static void sk_rxeof __P((struct sk_if_softc *));
117 static void sk_txeof __P((struct sk_if_softc *));
118 static int sk_encap __P((struct sk_if_softc *, struct mbuf *,
119 u_int32_t *));
120 static void sk_start __P((struct ifnet *));
121 static int sk_ioctl __P((struct ifnet *, u_long, caddr_t));
122 static void sk_init __P((void *));
123 static void sk_init_xmac __P((struct sk_if_softc *));
124 static void sk_stop __P((struct sk_if_softc *));
125 static void sk_watchdog __P((struct ifnet *));
126 static void sk_shutdown __P((int, void *));
127 static int sk_ifmedia_upd __P((struct ifnet *));
128 static void sk_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
129 static void sk_reset __P((struct sk_softc *));
130 static int sk_newbuf __P((struct sk_if_softc *,
131 struct sk_chain *, struct mbuf *));
132 static int sk_alloc_jumbo_mem __P((struct sk_if_softc *));
133 static void *sk_jalloc __P((struct sk_if_softc *));
134 static void sk_jfree __P((caddr_t, u_int));
135 static void sk_jref __P((caddr_t, u_int));
136 static int sk_init_rx_ring __P((struct sk_if_softc *));
137 static void sk_init_tx_ring __P((struct sk_if_softc *));
138 #ifdef notdef
139 static u_int32_t sk_win_read_4 __P((struct sk_softc *, int));
140 #endif
141 static u_int16_t sk_win_read_2 __P((struct sk_softc *, int));
142 static u_int8_t sk_win_read_1 __P((struct sk_softc *, int));
143 static void sk_win_write_4 __P((struct sk_softc *, int, u_int32_t));
144 static void sk_win_write_2 __P((struct sk_softc *, int, u_int32_t));
145 static void sk_win_write_1 __P((struct sk_softc *, int, u_int32_t));
146 static u_int8_t sk_vpd_readbyte __P((struct sk_softc *, int));
147 static void sk_vpd_read_res __P((struct sk_softc *,
148 struct vpd_res *, int));
149 static void sk_vpd_read __P((struct sk_softc *));
150 static u_int16_t sk_phy_readreg __P((struct sk_if_softc *, int));
151 static void sk_phy_writereg __P((struct sk_if_softc *, int, u_int32_t));
152 static u_int32_t sk_calchash __P((caddr_t));
153 static void sk_setfilt __P((struct sk_if_softc *, caddr_t, int));
154 static void sk_setmulti __P((struct sk_if_softc *));
155
156 #ifdef __i386__
157 #define SK_BUS_SPACE_MEM I386_BUS_SPACE_MEM
158 #define SK_BUS_SPACE_IO I386_BUS_SPACE_IO
159 #endif
160
161 #ifdef __alpha__
162 #define SK_BUS_SPACE_MEM ALPHA_BUS_SPACE_MEM
163 #define SK_BUS_SPACE_IO ALPHA_BUS_SPACE_IO
164 #endif
165
166 #define SK_SETBIT(sc, reg, x) \
167 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
168
169 #define SK_CLRBIT(sc, reg, x) \
170 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
171
172 #define SK_WIN_SETBIT_4(sc, reg, x) \
173 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
174
175 #define SK_WIN_CLRBIT_4(sc, reg, x) \
176 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
177
178 #define SK_WIN_SETBIT_2(sc, reg, x) \
179 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
180
181 #define SK_WIN_CLRBIT_2(sc, reg, x) \
182 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
183
184 #ifdef notdef
185 static u_int32_t sk_win_read_4(sc, reg)
186 struct sk_softc *sc;
187 int reg;
188 {
189 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
190 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
191 }
192 #endif
193
194 static u_int16_t sk_win_read_2(sc, reg)
195 struct sk_softc *sc;
196 int reg;
197 {
198 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
199 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
200 }
201
202 static u_int8_t sk_win_read_1(sc, reg)
203 struct sk_softc *sc;
204 int reg;
205 {
206 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
207 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
208 }
209
210 static void sk_win_write_4(sc, reg, val)
211 struct sk_softc *sc;
212 int reg;
213 u_int32_t val;
214 {
215 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
216 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
217 return;
218 }
219
220 static void sk_win_write_2(sc, reg, val)
221 struct sk_softc *sc;
222 int reg;
223 u_int32_t val;
224 {
225 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
226 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
227 return;
228 }
229
230 static void sk_win_write_1(sc, reg, val)
231 struct sk_softc *sc;
232 int reg;
233 u_int32_t val;
234 {
235 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
236 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
237 return;
238 }
239
240 /*
241 * The VPD EEPROM contains Vital Product Data, as suggested in
242 * the PCI 2.1 specification. The VPD data is separared into areas
243 * denoted by resource IDs. The SysKonnect VPD contains an ID string
244 * resource (the name of the adapter), a read-only area resource
245 * containing various key/data fields and a read/write area which
246 * can be used to store asset management information or log messages.
247 * We read the ID string and read-only into buffers attached to
248 * the controller softc structure for later use. At the moment,
249 * we only use the ID string during sk_attach().
250 */
251 static u_int8_t sk_vpd_readbyte(sc, addr)
252 struct sk_softc *sc;
253 int addr;
254 {
255 int i;
256
257 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
258 for (i = 0; i < SK_TIMEOUT; i++) {
259 DELAY(1);
260 if (sk_win_read_2(sc,
261 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
262 break;
263 }
264
265 if (i == SK_TIMEOUT)
266 return(0);
267
268 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
269 }
270
271 static void sk_vpd_read_res(sc, res, addr)
272 struct sk_softc *sc;
273 struct vpd_res *res;
274 int addr;
275 {
276 int i;
277 u_int8_t *ptr;
278
279 ptr = (u_int8_t *)res;
280 for (i = 0; i < sizeof(struct vpd_res); i++)
281 ptr[i] = sk_vpd_readbyte(sc, i + addr);
282
283 return;
284 }
285
286 static void sk_vpd_read(sc)
287 struct sk_softc *sc;
288 {
289 int pos = 0, i;
290 struct vpd_res res;
291
292 if (sc->sk_vpd_prodname != NULL)
293 free(sc->sk_vpd_prodname, M_DEVBUF);
294 if (sc->sk_vpd_readonly != NULL)
295 free(sc->sk_vpd_readonly, M_DEVBUF);
296 sc->sk_vpd_prodname = NULL;
297 sc->sk_vpd_readonly = NULL;
298
299 sk_vpd_read_res(sc, &res, pos);
300
301 if (res.vr_id != VPD_RES_ID) {
302 printf("skc%d: bad VPD resource id: expected %x got %x\n",
303 sc->sk_unit, VPD_RES_ID, res.vr_id);
304 return;
305 }
306
307 pos += sizeof(res);
308 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
309 for (i = 0; i < res.vr_len; i++)
310 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
311 sc->sk_vpd_prodname[i] = '\0';
312 pos += i;
313
314 sk_vpd_read_res(sc, &res, pos);
315
316 if (res.vr_id != VPD_RES_READ) {
317 printf("skc%d: bad VPD resource id: expected %x got %x\n",
318 sc->sk_unit, VPD_RES_READ, res.vr_id);
319 return;
320 }
321
322 pos += sizeof(res);
323 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
324 for (i = 0; i < res.vr_len + 1; i++)
325 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
326
327 return;
328 }
329
330 static u_int16_t sk_phy_readreg(sc_if, reg)
331 struct sk_if_softc *sc_if;
332 int reg;
333 {
334 int i;
335
336 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
337 for (i = 0; i < SK_TIMEOUT; i++) {
338 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
339 break;
340 }
341
342 if (i == SK_TIMEOUT) {
343 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
344 return(0);
345 }
346
347 return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
348 }
349
350 static void sk_phy_writereg(sc_if, reg, val)
351 struct sk_if_softc *sc_if;
352 int reg;
353 u_int32_t val;
354 {
355 int i;
356
357 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
358 for (i = 0; i < SK_TIMEOUT; i++) {
359 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
360 break;
361 }
362
363 if (i == SK_TIMEOUT) {
364 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
365 return;
366 }
367
368 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
369 for (i = 0; i < SK_TIMEOUT; i++) {
370 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
371 break;
372 }
373
374 if (i == SK_TIMEOUT)
375 printf("sk%d: phy write timed out\n", sc_if->sk_unit);
376
377 return;
378 }
379
380 #define SK_POLY 0xEDB88320
381 #define SK_BITS 6
382
383 static u_int32_t sk_calchash(addr)
384 caddr_t addr;
385 {
386 u_int32_t idx, bit, data, crc;
387
388 /* Compute CRC for the address value. */
389 crc = 0xFFFFFFFF; /* initial value */
390
391 for (idx = 0; idx < 6; idx++) {
392 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
393 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
394 }
395
396 return (~crc & ((1 << SK_BITS) - 1));
397 }
398
399 static void sk_setfilt(sc_if, addr, slot)
400 struct sk_if_softc *sc_if;
401 caddr_t addr;
402 int slot;
403 {
404 int base;
405
406 base = XM_RXFILT_ENTRY(slot);
407
408 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
409 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
410 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
411
412 return;
413 }
414
415 static void sk_setmulti(sc_if)
416 struct sk_if_softc *sc_if;
417 {
418 struct ifnet *ifp;
419 u_int32_t hashes[2] = { 0, 0 };
420 int h, i;
421 struct ifmultiaddr *ifma;
422 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
423
424 ifp = &sc_if->arpcom.ac_if;
425
426 /* First, zot all the existing filters. */
427 for (i = 1; i < XM_RXFILT_MAX; i++)
428 sk_setfilt(sc_if, (caddr_t)&dummy, i);
429 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
430 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
431
432 /* Now program new ones. */
433 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
434 hashes[0] = 0xFFFFFFFF;
435 hashes[1] = 0xFFFFFFFF;
436 } else {
437 i = 1;
438 /* First find the tail of the list. */
439 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
440 ifma = ifma->ifma_link.le_next) {
441 if (ifma->ifma_link.le_next == NULL)
442 break;
443 }
444 /* Now traverse the list backwards. */
445 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
446 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
447 if (ifma->ifma_addr->sa_family != AF_LINK)
448 continue;
449 /*
450 * Program the first XM_RXFILT_MAX multicast groups
451 * into the perfect filter. For all others,
452 * use the hash table.
453 */
454 if (i < XM_RXFILT_MAX) {
455 sk_setfilt(sc_if,
456 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
457 i++;
458 continue;
459 }
460
461 h = sk_calchash(
462 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
463 if (h < 32)
464 hashes[0] |= (1 << h);
465 else
466 hashes[1] |= (1 << (h - 32));
467 }
468 }
469
470 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
471 XM_MODE_RX_USE_PERFECT);
472 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
473 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
474
475 return;
476 }
477
478 static int sk_init_rx_ring(sc_if)
479 struct sk_if_softc *sc_if;
480 {
481 struct sk_chain_data *cd;
482 struct sk_ring_data *rd;
483 int i;
484
485 cd = &sc_if->sk_cdata;
486 rd = sc_if->sk_rdata;
487
488 bzero((char *)rd->sk_rx_ring,
489 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
490
491 for (i = 0; i < SK_RX_RING_CNT; i++) {
492 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
493 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
494 return(ENOBUFS);
495 if (i == (SK_RX_RING_CNT - 1)) {
496 cd->sk_rx_chain[i].sk_next =
497 &cd->sk_rx_chain[0];
498 rd->sk_rx_ring[i].sk_next =
499 vtophys(&rd->sk_rx_ring[0]);
500 } else {
501 cd->sk_rx_chain[i].sk_next =
502 &cd->sk_rx_chain[i + 1];
503 rd->sk_rx_ring[i].sk_next =
504 vtophys(&rd->sk_rx_ring[i + 1]);
505 }
506 }
507
508 sc_if->sk_cdata.sk_rx_prod = 0;
509 sc_if->sk_cdata.sk_rx_cons = 0;
510
511 return(0);
512 }
513
514 static void sk_init_tx_ring(sc_if)
515 struct sk_if_softc *sc_if;
516 {
517 struct sk_chain_data *cd;
518 struct sk_ring_data *rd;
519 int i;
520
521 cd = &sc_if->sk_cdata;
522 rd = sc_if->sk_rdata;
523
524 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
525 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
526
527 for (i = 0; i < SK_TX_RING_CNT; i++) {
528 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
529 if (i == (SK_TX_RING_CNT - 1)) {
530 cd->sk_tx_chain[i].sk_next =
531 &cd->sk_tx_chain[0];
532 rd->sk_tx_ring[i].sk_next =
533 vtophys(&rd->sk_tx_ring[0]);
534 } else {
535 cd->sk_tx_chain[i].sk_next =
536 &cd->sk_tx_chain[i + 1];
537 rd->sk_tx_ring[i].sk_next =
538 vtophys(&rd->sk_tx_ring[i + 1]);
539 }
540 }
541
542 sc_if->sk_cdata.sk_tx_prod = 0;
543 sc_if->sk_cdata.sk_tx_cons = 0;
544 sc_if->sk_cdata.sk_tx_cnt = 0;
545
546 return;
547 }
548
549 static int sk_newbuf(sc_if, c, m)
550 struct sk_if_softc *sc_if;
551 struct sk_chain *c;
552 struct mbuf *m;
553 {
554 struct mbuf *m_new = NULL;
555 struct sk_rx_desc *r;
556
557 if (m == NULL) {
558 caddr_t *buf = NULL;
559
560 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
561 if (m_new == NULL) {
562 printf("sk%d: no memory for rx list -- "
563 "packet dropped!\n", sc_if->sk_unit);
564 return(ENOBUFS);
565 }
566
567 /* Allocate the jumbo buffer */
568 buf = sk_jalloc(sc_if);
569 if (buf == NULL) {
570 m_freem(m_new);
571 #ifdef SK_VERBOSE
572 printf("sk%d: jumbo allocation failed "
573 "-- packet dropped!\n", sc_if->sk_unit);
574 #endif
575 return(ENOBUFS);
576 }
577
578 /* Attach the buffer to the mbuf */
579 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
580 m_new->m_flags |= M_EXT;
581 m_new->m_ext.ext_size = m_new->m_pkthdr.len =
582 m_new->m_len = SK_MCLBYTES;
583 m_new->m_ext.ext_free = sk_jfree;
584 m_new->m_ext.ext_ref = sk_jref;
585 } else {
586 /*
587 * We're re-using a previously allocated mbuf;
588 * be sure to re-init pointers and lengths to
589 * default values.
590 */
591 m_new = m;
592 m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
593 m_new->m_data = m_new->m_ext.ext_buf;
594 }
595
596 /*
597 * Adjust alignment so packet payload begins on a
598 * longword boundary. Mandatory for Alpha, useful on
599 * x86 too.
600 */
601 m_adj(m_new, ETHER_ALIGN);
602
603 r = c->sk_desc;
604 c->sk_mbuf = m_new;
605 r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
606 r->sk_ctl = m_new->m_len | SK_RXSTAT;
607
608 return(0);
609 }
610
611 /*
612 * Allocate jumbo buffer storage. The SysKonnect adapters support
613 * "jumbograms" (9K frames), although SysKonnect doesn't currently
614 * use them in their drivers. In order for us to use them, we need
615 * large 9K receive buffers, however standard mbuf clusters are only
616 * 2048 bytes in size. Consequently, we need to allocate and manage
617 * our own jumbo buffer pool. Fortunately, this does not require an
618 * excessive amount of additional code.
619 */
620 static int sk_alloc_jumbo_mem(sc_if)
621 struct sk_if_softc *sc_if;
622 {
623 caddr_t ptr;
624 register int i;
625 struct sk_jpool_entry *entry;
626
627 /* Grab a big chunk o' storage. */
628 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
629 M_NOWAIT, 0x100000, 0xffffffff, PAGE_SIZE, 0);
630
631 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
632 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
633 return(ENOBUFS);
634 }
635
636 SLIST_INIT(&sc_if->sk_jfree_listhead);
637 SLIST_INIT(&sc_if->sk_jinuse_listhead);
638
639 /*
640 * Now divide it up into 9K pieces and save the addresses
641 * in an array. Note that we play an evil trick here by using
642 * the first few bytes in the buffer to hold the the address
643 * of the softc structure for this interface. This is because
644 * sk_jfree() needs it, but it is called by the mbuf management
645 * code which will not pass it to us explicitly.
646 */
647 ptr = sc_if->sk_cdata.sk_jumbo_buf;
648 for (i = 0; i < SK_JSLOTS; i++) {
649 u_int64_t **aptr;
650 aptr = (u_int64_t **)ptr;
651 aptr[0] = (u_int64_t *)sc_if;
652 ptr += sizeof(u_int64_t);
653 sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
654 sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
655 ptr += SK_MCLBYTES;
656 entry = malloc(sizeof(struct sk_jpool_entry),
657 M_DEVBUF, M_NOWAIT);
658 if (entry == NULL) {
659 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
660 sc_if->sk_cdata.sk_jumbo_buf = NULL;
661 printf("sk%d: no memory for jumbo "
662 "buffer queue!\n", sc_if->sk_unit);
663 return(ENOBUFS);
664 }
665 entry->slot = i;
666 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
667 entry, jpool_entries);
668 }
669
670 return(0);
671 }
672
673 /*
674 * Allocate a jumbo buffer.
675 */
676 static void *sk_jalloc(sc_if)
677 struct sk_if_softc *sc_if;
678 {
679 struct sk_jpool_entry *entry;
680
681 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
682
683 if (entry == NULL) {
684 #ifdef SK_VERBOSE
685 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
686 #endif
687 return(NULL);
688 }
689
690 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
691 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
692 sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
693 return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
694 }
695
696 /*
697 * Adjust usage count on a jumbo buffer. In general this doesn't
698 * get used much because our jumbo buffers don't get passed around
699 * a lot, but it's implemented for correctness.
700 */
701 static void sk_jref(buf, size)
702 caddr_t buf;
703 u_int size;
704 {
705 struct sk_if_softc *sc_if;
706 u_int64_t **aptr;
707 register int i;
708
709 /* Extract the softc struct pointer. */
710 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
711 sc_if = (struct sk_if_softc *)(aptr[0]);
712
713 if (sc_if == NULL)
714 panic("sk_jref: can't find softc pointer!");
715
716 if (size != SK_MCLBYTES)
717 panic("sk_jref: adjusting refcount of buf of wrong size!");
718
719 /* calculate the slot this buffer belongs to */
720
721 i = ((vm_offset_t)aptr
722 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
723
724 if ((i < 0) || (i >= SK_JSLOTS))
725 panic("sk_jref: asked to reference buffer "
726 "that we don't manage!");
727 else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
728 panic("sk_jref: buffer already free!");
729 else
730 sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
731
732 return;
733 }
734
735 /*
736 * Release a jumbo buffer.
737 */
738 static void sk_jfree(buf, size)
739 caddr_t buf;
740 u_int size;
741 {
742 struct sk_if_softc *sc_if;
743 u_int64_t **aptr;
744 int i;
745 struct sk_jpool_entry *entry;
746
747 /* Extract the softc struct pointer. */
748 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
749 sc_if = (struct sk_if_softc *)(aptr[0]);
750
751 if (sc_if == NULL)
752 panic("sk_jfree: can't find softc pointer!");
753
754 if (size != SK_MCLBYTES)
755 panic("sk_jfree: freeing buffer of wrong size!");
756
757 /* calculate the slot this buffer belongs to */
758
759 i = ((vm_offset_t)aptr
760 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
761
762 if ((i < 0) || (i >= SK_JSLOTS))
763 panic("sk_jfree: asked to free buffer that we don't manage!");
764 else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
765 panic("sk_jfree: buffer already free!");
766 else {
767 sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
768 if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
769 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
770 if (entry == NULL)
771 panic("sk_jfree: buffer not in use!");
772 entry->slot = i;
773 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
774 jpool_entries);
775 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
776 entry, jpool_entries);
777 }
778 }
779
780 return;
781 }
782
783 /*
784 * Set media options.
785 */
786 static int sk_ifmedia_upd(ifp)
787 struct ifnet *ifp;
788 {
789 struct sk_if_softc *sc_if;
790 struct ifmedia *ifm;
791
792 sc_if = ifp->if_softc;
793 ifm = &sc_if->ifmedia;
794
795 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
796 return(EINVAL);
797
798 switch(IFM_SUBTYPE(ifm->ifm_media)) {
799 case IFM_AUTO:
800 sk_phy_writereg(sc_if, XM_PHY_BMCR,
801 XM_BMCR_RENEGOTIATE|XM_BMCR_AUTONEGENBL);
802 break;
803 case IFM_1000_LX:
804 case IFM_1000_SX:
805 case IFM_1000_CX:
806 case IFM_1000_TX:
807 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
808 sk_phy_writereg(sc_if, XM_PHY_BMCR, XM_BMCR_DUPLEX);
809 else
810 sk_phy_writereg(sc_if, XM_PHY_BMCR, 0);
811 break;
812 default:
813 printf("sk%d: invalid media selected\n", sc_if->sk_unit);
814 return(EINVAL);
815 break;
816 }
817
818 return(0);
819 }
820
821 /*
822 * Report current media status.
823 */
824 static void sk_ifmedia_sts(ifp, ifmr)
825 struct ifnet *ifp;
826 struct ifmediareq *ifmr;
827 {
828 struct sk_softc *sc;
829 struct sk_if_softc *sc_if;
830 u_int16_t bmsr, extsts;
831
832 sc_if = ifp->if_softc;
833 sc = sc_if->sk_softc;
834
835 ifmr->ifm_status = IFM_AVALID;
836 ifmr->ifm_active = IFM_ETHER;
837
838 bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
839 extsts = sk_phy_readreg(sc_if, XM_PHY_EXTSTS);
840
841 if (!(bmsr & XM_BMSR_LINKSTAT))
842 return;
843
844 ifmr->ifm_status |= IFM_ACTIVE;
845 ifmr->ifm_active |= sc->sk_pmd;;
846 if (extsts & XM_EXTSTS_FULLDUPLEX)
847 ifmr->ifm_active |= IFM_FDX;
848 else
849 ifmr->ifm_active |= IFM_HDX;
850
851 return;
852 }
853
854 static int sk_ioctl(ifp, command, data)
855 struct ifnet *ifp;
856 u_long command;
857 caddr_t data;
858 {
859 struct sk_if_softc *sc_if = ifp->if_softc;
860 struct ifreq *ifr = (struct ifreq *) data;
861 int s, error = 0;
862
863 s = splimp();
864
865 switch(command) {
866 case SIOCSIFADDR:
867 case SIOCGIFADDR:
868 error = ether_ioctl(ifp, command, data);
869 break;
870 case SIOCSIFMTU:
871 if (ifr->ifr_mtu > SK_JUMBO_MTU)
872 error = EINVAL;
873 else {
874 ifp->if_mtu = ifr->ifr_mtu;
875 sk_init(sc_if);
876 }
877 break;
878 case SIOCSIFFLAGS:
879 if (ifp->if_flags & IFF_UP) {
880 if (ifp->if_flags & IFF_RUNNING &&
881 ifp->if_flags & IFF_PROMISC &&
882 !(sc_if->sk_if_flags & IFF_PROMISC)) {
883 SK_XM_SETBIT_4(sc_if, XM_MODE,
884 XM_MODE_RX_PROMISC);
885 sk_setmulti(sc_if);
886 } else if (ifp->if_flags & IFF_RUNNING &&
887 !(ifp->if_flags & IFF_PROMISC) &&
888 sc_if->sk_if_flags & IFF_PROMISC) {
889 SK_XM_CLRBIT_4(sc_if, XM_MODE,
890 XM_MODE_RX_PROMISC);
891 sk_setmulti(sc_if);
892 } else
893 sk_init(sc_if);
894 } else {
895 if (ifp->if_flags & IFF_RUNNING)
896 sk_stop(sc_if);
897 }
898 sc_if->sk_if_flags = ifp->if_flags;
899 error = 0;
900 break;
901 case SIOCADDMULTI:
902 case SIOCDELMULTI:
903 sk_setmulti(sc_if);
904 error = 0;
905 break;
906 case SIOCGIFMEDIA:
907 case SIOCSIFMEDIA:
908 error = ifmedia_ioctl(ifp, ifr, &sc_if->ifmedia, command);
909 break;
910 default:
911 error = EINVAL;
912 break;
913 }
914
915 (void)splx(s);
916
917 return(error);
918 }
919
920 /*
921 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
922 * IDs against our list and return a device name if we find a match.
923 */
924 static const char *sk_probe(config_id, device_id)
925 pcici_t config_id;
926 pcidi_t device_id;
927 {
928 struct sk_type *t;
929
930 t = sk_devs;
931
932 while(t->sk_name != NULL) {
933 if ((device_id & 0xFFFF) == t->sk_vid &&
934 ((device_id >> 16) & 0xFFFF) == t->sk_did) {
935 return(t->sk_name);
936 }
937 t++;
938 }
939
940 return(NULL);
941 }
942
943 /*
944 * Force the GEnesis into reset, then bring it out of reset.
945 */
946 static void sk_reset(sc)
947 struct sk_softc *sc;
948 {
949 CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
950 CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
951 DELAY(1000);
952 CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
953 CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
954
955 /* Configure packet arbiter */
956 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
957 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
958 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
959 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
960 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
961
962 /* Enable RAM interface */
963 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
964
965 /*
966 * Configure interrupt moderation. The moderation timer
967 * defers interrupts specified in the interrupt moderation
968 * timer mask based on the timeout specified in the interrupt
969 * moderation timer init register. Each bit in the timer
970 * register represents 18.825ns, so to specify a timeout in
971 * microseconds, we have to multiply by 54.
972 */
973 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
974 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
975 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
976 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
977
978 return;
979 }
980
981 /*
982 * Each XMAC chip is attached as a separate logical IP interface.
983 * Single port cards will have only one logical interface of course.
984 */
985 static int sk_attach_xmac(sc, port)
986 struct sk_softc *sc;
987 int port;
988 {
989 struct sk_if_softc *sc_if;
990 struct ifnet *ifp;
991 int i;
992 char ifname[64];
993
994 if (sc == NULL)
995 return(EINVAL);
996
997 if (port != SK_PORT_A && port != SK_PORT_B)
998 return(EINVAL);
999
1000 sc_if = malloc(sizeof(struct sk_if_softc), M_DEVBUF, M_NOWAIT);
1001 if (sc_if == NULL) {
1002 printf("sk%d: no memory for interface softc!\n", sc->sk_unit);
1003 return(ENOMEM);
1004 }
1005 bzero((char *)sc_if, sizeof(struct sk_if_softc));
1006
1007 for (i = 0; i < SK_MAXUNIT; i++) {
1008 sprintf(ifname, "sk%d", i);
1009 if (ifunit(ifname) == NULL)
1010 break;
1011 }
1012
1013 if (i == SK_MAXUNIT) {
1014 printf("skc%d: too many sk units\n", sc->sk_unit);
1015 free(sc_if, M_DEVBUF);
1016 return(ENODEV);
1017 }
1018
1019 sc_if->sk_unit = i;
1020 sc_if->sk_port = port;
1021 sc_if->sk_softc = sc;
1022 sc->sk_if[port] = sc_if;
1023 if (port == SK_PORT_A)
1024 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1025 if (port == SK_PORT_B)
1026 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1027
1028 /*
1029 * Get station address for this interface. Note that
1030 * dual port cards actually come with three station
1031 * addresses: one for each port, plus an extra. The
1032 * extra one is used by the SysKonnect driver software
1033 * as a 'virtual' station address for when both ports
1034 * are operating in failover mode. Currently we don't
1035 * use this extra address.
1036 */
1037 for (i = 0; i < ETHER_ADDR_LEN; i++)
1038 sc_if->arpcom.ac_enaddr[i] =
1039 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1040
1041 printf("sk%d: <XaQti Corp. XMAC II> at skc%d port %d\n",
1042 sc_if->sk_unit, sc->sk_unit, port);
1043
1044 printf("sk%d: Ethernet address: %6D\n",
1045 sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1046
1047 /*
1048 * Set up RAM buffer addresses. The NIC will have a certain
1049 * amount of SRAM on it, somewhere between 512K and 2MB. We
1050 * need to divide this up a) between the transmitter and
1051 * receiver and b) between the two XMACs, if this is a
1052 * dual port NIC. Our algotithm is to divide up the memory
1053 * evenly so that everyone gets a fair share.
1054 */
1055 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1056 u_int32_t chunk, val;
1057
1058 chunk = sc->sk_ramsize / 2;
1059 val = sc->sk_rboff / sizeof(u_int64_t);
1060 sc_if->sk_rx_ramstart = val;
1061 val += (chunk / sizeof(u_int64_t));
1062 sc_if->sk_rx_ramend = val - 1;
1063 sc_if->sk_tx_ramstart = val;
1064 val += (chunk / sizeof(u_int64_t));
1065 sc_if->sk_tx_ramend = val - 1;
1066 } else {
1067 u_int32_t chunk, val;
1068
1069 chunk = sc->sk_ramsize / 4;
1070 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1071 sizeof(u_int64_t);
1072 sc_if->sk_rx_ramstart = val;
1073 val += (chunk / sizeof(u_int64_t));
1074 sc_if->sk_rx_ramend = val - 1;
1075 sc_if->sk_tx_ramstart = val;
1076 val += (chunk / sizeof(u_int64_t));
1077 sc_if->sk_tx_ramend = val - 1;
1078 }
1079
1080 /* Allocate the descriptor queues. */
1081 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1082 M_NOWAIT, 0x100000, 0xffffffff, PAGE_SIZE, 0);
1083
1084 if (sc_if->sk_rdata == NULL) {
1085 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1086 free(sc_if, M_DEVBUF);
1087 sc->sk_if[port] = NULL;
1088 return(ENOMEM);
1089 }
1090
1091 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1092
1093 /* Try to allocate memory for jumbo buffers. */
1094 if (sk_alloc_jumbo_mem(sc_if)) {
1095 printf("sk%d: jumbo buffer allocation failed\n",
1096 sc_if->sk_unit);
1097 free(sc_if->sk_rdata, M_DEVBUF);
1098 free(sc_if, M_DEVBUF);
1099 sc->sk_if[port] = NULL;
1100 return(ENOMEM);
1101 }
1102
1103 ifp = &sc_if->arpcom.ac_if;
1104 ifp->if_softc = sc_if;
1105 ifp->if_unit = sc_if->sk_unit;
1106 ifp->if_name = "sk";
1107 ifp->if_mtu = ETHERMTU;
1108 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1109 ifp->if_ioctl = sk_ioctl;
1110 ifp->if_output = ether_output;
1111 ifp->if_start = sk_start;
1112 ifp->if_watchdog = sk_watchdog;
1113 ifp->if_init = sk_init;
1114 ifp->if_baudrate = 1000000000;
1115 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1116
1117 /*
1118 * Do ifmedia setup.
1119 */
1120 ifmedia_init(&sc_if->ifmedia, 0, sk_ifmedia_upd, sk_ifmedia_sts);
1121 ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd, 0, NULL);
1122 ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_FDX, 0, NULL);
1123 ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_HDX, 0, NULL);
1124 ifmedia_add(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1125 ifmedia_set(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO);
1126
1127 /*
1128 * Call MI attach routines.
1129 */
1130 if_attach(ifp);
1131 ether_ifattach(ifp);
1132
1133 #if NBPFILTER > 0
1134 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1135 #endif
1136
1137 return(0);
1138 }
1139
1140 /*
1141 * Attach the interface. Allocate softc structures, do ifmedia
1142 * setup and ethernet/BPF attach.
1143 */
1144 static void
1145 sk_attach(config_id, unit)
1146 pcici_t config_id;
1147 int unit;
1148 {
1149 int s;
1150 #ifndef SK_USEIOSPACE
1151 vm_offset_t pbase, vbase;
1152 #endif
1153 u_int32_t command;
1154 struct sk_softc *sc;
1155
1156 s = splimp();
1157
1158 sc = malloc(sizeof(struct sk_softc), M_DEVBUF, M_NOWAIT);
1159 if (sc == NULL) {
1160 printf("skc%d: no memory for softc struct!\n", unit);
1161 goto fail;
1162 }
1163 bzero(sc, sizeof(struct sk_softc));
1164
1165 /*
1166 * Handle power management nonsense.
1167 */
1168 command = pci_conf_read(config_id, SK_PCI_CAPID) & 0x000000FF;
1169 if (command == 0x01) {
1170
1171 command = pci_conf_read(config_id, SK_PCI_PWRMGMTCTRL);
1172 if (command & SK_PSTATE_MASK) {
1173 u_int32_t iobase, membase, irq;
1174
1175 /* Save important PCI config data. */
1176 iobase = pci_conf_read(config_id, SK_PCI_LOIO);
1177 membase = pci_conf_read(config_id, SK_PCI_LOMEM);
1178 irq = pci_conf_read(config_id, SK_PCI_INTLINE);
1179
1180 /* Reset the power state. */
1181 printf("skc%d: chip is in D%d power mode "
1182 "-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1183 command &= 0xFFFFFFFC;
1184 pci_conf_write(config_id, SK_PCI_PWRMGMTCTRL, command);
1185
1186 /* Restore PCI config data. */
1187 pci_conf_write(config_id, SK_PCI_LOIO, iobase);
1188 pci_conf_write(config_id, SK_PCI_LOMEM, membase);
1189 pci_conf_write(config_id, SK_PCI_INTLINE, irq);
1190 }
1191 }
1192
1193 /*
1194 * Map control/status registers.
1195 */
1196 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1197 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1198 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1199 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1200
1201 #ifdef SK_USEIOSPACE
1202 if (!(command & PCIM_CMD_PORTEN)) {
1203 printf("skc%d: failed to enable I/O ports!\n", unit);
1204 free(sc, M_DEVBUF);
1205 goto fail;
1206 }
1207
1208 if (!pci_map_port(config_id, SK_PCI_LOIO,
1209 (u_short *)&(sc->sk_bhandle))) {
1210 printf ("skc%d: couldn't map ports\n", unit);
1211 goto fail;
1212 }
1213
1214 sc->sk_btag = SK_BUS_SPACE_IO;
1215 #else
1216 if (!(command & PCIM_CMD_MEMEN)) {
1217 printf("skc%d: failed to enable memory mapping!\n", unit);
1218 goto fail;
1219 }
1220
1221 if (!pci_map_mem(config_id, SK_PCI_LOMEM, &vbase, &pbase)) {
1222 printf ("skc%d: couldn't map memory\n", unit);
1223 goto fail;
1224 }
1225 sc->sk_btag = SK_BUS_SPACE_MEM;
1226 sc->sk_bhandle = vbase;
1227 #endif
1228
1229 /* Allocate interrupt */
1230 if (!pci_map_int(config_id, sk_intr, sc, &net_imask)) {
1231 printf("skc%d: couldn't map interrupt\n", unit);
1232 goto fail;
1233 }
1234
1235 /* Reset the adapter. */
1236 sk_reset(sc);
1237
1238 sc->sk_unit = unit;
1239
1240 /* Read and save vital product data from EEPROM. */
1241 sk_vpd_read(sc);
1242
1243 /* Read and save RAM size and RAMbuffer offset */
1244 switch(sk_win_read_1(sc, SK_EPROM0)) {
1245 case SK_RAMSIZE_512K_64:
1246 sc->sk_ramsize = 0x80000;
1247 sc->sk_rboff = SK_RBOFF_0;
1248 break;
1249 case SK_RAMSIZE_1024K_64:
1250 sc->sk_ramsize = 0x100000;
1251 sc->sk_rboff = SK_RBOFF_80000;
1252 break;
1253 case SK_RAMSIZE_1024K_128:
1254 sc->sk_ramsize = 0x100000;
1255 sc->sk_rboff = SK_RBOFF_0;
1256 break;
1257 case SK_RAMSIZE_2048K_128:
1258 sc->sk_ramsize = 0x200000;
1259 sc->sk_rboff = SK_RBOFF_0;
1260 break;
1261 default:
1262 printf("skc%d: unknown ram size: %d\n",
1263 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1264 goto fail;
1265 break;
1266 }
1267
1268 /* Read and save physical media type */
1269 switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1270 case SK_PMD_1000BASESX:
1271 sc->sk_pmd = IFM_1000_SX;
1272 break;
1273 case SK_PMD_1000BASELX:
1274 sc->sk_pmd = IFM_1000_LX;
1275 break;
1276 case SK_PMD_1000BASECX:
1277 sc->sk_pmd = IFM_1000_CX;
1278 break;
1279 case SK_PMD_1000BASETX:
1280 sc->sk_pmd = IFM_1000_TX;
1281 break;
1282 default:
1283 printf("skc%d: unknown media type: 0x%x\n",
1284 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1285 goto fail;
1286 }
1287
1288 /* Announce the product name. */
1289 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1290
1291 sk_attach_xmac(sc, SK_PORT_A);
1292 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1293 sk_attach_xmac(sc, SK_PORT_B);
1294
1295 /* Turn on the 'driver is loaded' LED. */
1296 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1297
1298 at_shutdown(sk_shutdown, sc, SHUTDOWN_POST_SYNC);
1299
1300 fail:
1301 splx(s);
1302 return;
1303 }
1304
1305 static int sk_encap(sc_if, m_head, txidx)
1306 struct sk_if_softc *sc_if;
1307 struct mbuf *m_head;
1308 u_int32_t *txidx;
1309 {
1310 struct sk_tx_desc *f = NULL;
1311 struct mbuf *m;
1312 u_int32_t frag, cur, cnt = 0;
1313
1314 m = m_head;
1315 cur = frag = *txidx;
1316
1317 /*
1318 * Start packing the mbufs in this chain into
1319 * the fragment pointers. Stop when we run out
1320 * of fragments or hit the end of the mbuf chain.
1321 */
1322 for (m = m_head; m != NULL; m = m->m_next) {
1323 if (m->m_len != 0) {
1324 if ((SK_TX_RING_CNT -
1325 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1326 return(ENOBUFS);
1327 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1328 f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1329 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1330 if (cnt == 0)
1331 f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1332 else
1333 f->sk_ctl |= SK_TXCTL_OWN;
1334 cur = frag;
1335 SK_INC(frag, SK_TX_RING_CNT);
1336 cnt++;
1337 }
1338 }
1339
1340 if (m != NULL)
1341 return(ENOBUFS);
1342
1343 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1344 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1345 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1346 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1347 sc_if->sk_cdata.sk_tx_cnt += cnt;
1348
1349 *txidx = frag;
1350
1351 return(0);
1352 }
1353
1354 static void sk_start(ifp)
1355 struct ifnet *ifp;
1356 {
1357 struct sk_softc *sc;
1358 struct sk_if_softc *sc_if;
1359 struct mbuf *m_head = NULL;
1360 u_int32_t idx;
1361
1362 sc_if = ifp->if_softc;
1363 sc = sc_if->sk_softc;
1364
1365 idx = sc_if->sk_cdata.sk_tx_prod;
1366
1367 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1368 IF_DEQUEUE(&ifp->if_snd, m_head);
1369 if (m_head == NULL)
1370 break;
1371
1372 /*
1373 * Pack the data into the transmit ring. If we
1374 * don't have room, set the OACTIVE flag and wait
1375 * for the NIC to drain the ring.
1376 */
1377 if (sk_encap(sc_if, m_head, &idx)) {
1378 IF_PREPEND(&ifp->if_snd, m_head);
1379 ifp->if_flags |= IFF_OACTIVE;
1380 break;
1381 }
1382
1383 /*
1384 * If there's a BPF listener, bounce a copy of this frame
1385 * to him.
1386 */
1387 #if NBPFILTER > 0
1388 if (ifp->if_bpf)
1389 bpf_mtap(ifp, m_head);
1390 #endif
1391 }
1392
1393 /* Transmit */
1394 sc_if->sk_cdata.sk_tx_prod = idx;
1395 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1396
1397 /* Set a timeout in case the chip goes out to lunch. */
1398 ifp->if_timer = 5;
1399
1400 return;
1401 }
1402
1403
1404 static void sk_watchdog(ifp)
1405 struct ifnet *ifp;
1406 {
1407 struct sk_if_softc *sc_if;
1408
1409 sc_if = ifp->if_softc;
1410
1411 printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1412 sk_init(sc_if);
1413
1414 return;
1415 }
1416
1417 static void sk_shutdown(howto, arg)
1418 int howto;
1419 void *arg;
1420 {
1421 struct sk_softc *sc;
1422
1423 sc = arg;
1424
1425 /* Turn off the 'driver is loaded' LED. */
1426 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1427
1428 /*
1429 * Reset the GEnesis controller. Doing this should also
1430 * assert the resets on the attached XMAC(s).
1431 */
1432 sk_reset(sc);
1433
1434 return;
1435 }
1436
1437 static void sk_rxeof(sc_if)
1438 struct sk_if_softc *sc_if;
1439 {
1440 struct ether_header *eh;
1441 struct mbuf *m;
1442 struct ifnet *ifp;
1443 struct sk_chain *cur_rx;
1444 int total_len = 0;
1445 int i;
1446 u_int32_t rxstat;
1447
1448 ifp = &sc_if->arpcom.ac_if;
1449 i = sc_if->sk_cdata.sk_rx_prod;
1450 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1451
1452 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1453
1454 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1455 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1456 m = cur_rx->sk_mbuf;
1457 cur_rx->sk_mbuf = NULL;
1458 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1459 SK_INC(i, SK_RX_RING_CNT);
1460
1461 if (rxstat & XM_RXSTAT_ERRFRAME) {
1462 ifp->if_ierrors++;
1463 sk_newbuf(sc_if, cur_rx, m);
1464 continue;
1465 }
1466
1467 /*
1468 * Try to allocate a new jumbo buffer. If that
1469 * fails, copy the packet to mbufs and put the
1470 * jumbo buffer back in the ring so it can be
1471 * re-used. If allocating mbufs fails, then we
1472 * have to drop the packet.
1473 */
1474 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1475 struct mbuf *m0;
1476 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1477 total_len + ETHER_ALIGN, 0, ifp, NULL);
1478 sk_newbuf(sc_if, cur_rx, m);
1479 if (m0 == NULL) {
1480 printf("sk%d: no receive buffers "
1481 "available -- packet dropped!\n",
1482 sc_if->sk_unit);
1483 ifp->if_ierrors++;
1484 continue;
1485 }
1486 m_adj(m0, ETHER_ALIGN);
1487 m = m0;
1488 } else {
1489 m->m_pkthdr.rcvif = ifp;
1490 m->m_pkthdr.len = m->m_len = total_len;
1491 }
1492
1493 ifp->if_ipackets++;
1494 eh = mtod(m, struct ether_header *);
1495
1496 #if NBPFILTER > 0
1497 if (ifp->if_bpf) {
1498 bpf_mtap(ifp, m);
1499 if (ifp->if_flags & IFF_PROMISC &&
1500 (bcmp(eh->ether_dhost, sc_if->arpcom.ac_enaddr,
1501 ETHER_ADDR_LEN) && !(eh->ether_dhost[0] & 1))) {
1502 m_freem(m);
1503 continue;
1504 }
1505 }
1506 #endif
1507 /* Remove header from mbuf and pass it on. */
1508 m_adj(m, sizeof(struct ether_header));
1509 ether_input(ifp, eh, m);
1510 }
1511
1512 sc_if->sk_cdata.sk_rx_prod = i;
1513
1514 return;
1515 }
1516
1517 static void sk_txeof(sc_if)
1518 struct sk_if_softc *sc_if;
1519 {
1520 struct sk_tx_desc *cur_tx = NULL;
1521 struct ifnet *ifp;
1522 u_int32_t idx;
1523
1524 ifp = &sc_if->arpcom.ac_if;
1525
1526 /*
1527 * Go through our tx ring and free mbufs for those
1528 * frames that have been sent.
1529 */
1530 idx = sc_if->sk_cdata.sk_tx_cons;
1531 while(idx != sc_if->sk_cdata.sk_tx_prod) {
1532 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1533 if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1534 break;
1535 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1536 ifp->if_opackets++;
1537 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1538 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1539 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1540 }
1541 sc_if->sk_cdata.sk_tx_cnt--;
1542 SK_INC(idx, SK_TX_RING_CNT);
1543 ifp->if_timer = 0;
1544 }
1545
1546 sc_if->sk_cdata.sk_tx_cons = idx;
1547
1548 if (cur_tx != NULL)
1549 ifp->if_flags &= ~IFF_OACTIVE;
1550
1551 return;
1552 }
1553
1554 static void sk_intr_xmac(sc_if)
1555 struct sk_if_softc *sc_if;
1556 {
1557 struct sk_softc *sc;
1558 u_int16_t status;
1559 u_int16_t bmsr;
1560
1561 sc = sc_if->sk_softc;
1562 status = SK_XM_READ_2(sc_if, XM_ISR);
1563
1564 if (status & XM_ISR_LINKEVENT) {
1565 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1566 if (sc_if->sk_link == 1) {
1567 printf("sk%d: gigabit link down\n", sc_if->sk_unit);
1568 sc_if->sk_link = 0;
1569 }
1570 }
1571
1572 if (status & XM_ISR_AUTONEG_DONE) {
1573 bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
1574 if (bmsr & XM_BMSR_LINKSTAT) {
1575 sc_if->sk_link = 1;
1576 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1577 printf("sk%d: gigabit link up\n", sc_if->sk_unit);
1578 }
1579 }
1580
1581 if (status & XM_IMR_TX_UNDERRUN)
1582 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1583
1584 if (status & XM_IMR_RX_OVERRUN)
1585 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1586
1587 return;
1588 }
1589
1590 static void sk_intr(xsc)
1591 void *xsc;
1592 {
1593 struct sk_softc *sc = xsc;
1594 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
1595 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
1596 u_int32_t status;
1597
1598 sc_if0 = sc->sk_if[SK_PORT_A];
1599 sc_if1 = sc->sk_if[SK_PORT_B];
1600
1601 if (sc_if0 != NULL)
1602 ifp0 = &sc_if0->arpcom.ac_if;
1603 if (sc_if1 != NULL)
1604 ifp1 = &sc_if0->arpcom.ac_if;
1605
1606 for (;;) {
1607 status = CSR_READ_4(sc, SK_ISSR);
1608 if (!(status & sc->sk_intrmask))
1609 break;
1610
1611 /* Handle receive interrupts first. */
1612 if (status & SK_ISR_RX1_EOF) {
1613 sk_rxeof(sc_if0);
1614 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1615 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1616 }
1617 if (status & SK_ISR_RX2_EOF) {
1618 sk_rxeof(sc_if1);
1619 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1620 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1621 }
1622
1623 /* Then transmit interrupts. */
1624 if (status & SK_ISR_TX1_S_EOF) {
1625 sk_txeof(sc_if0);
1626 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1627 SK_TXBMU_CLR_IRQ_EOF);
1628 }
1629 if (status & SK_ISR_TX2_S_EOF) {
1630 sk_txeof(sc_if1);
1631 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1632 SK_TXBMU_CLR_IRQ_EOF);
1633 }
1634
1635 /* Then MAC interrupts. */
1636 if (status & SK_ISR_MAC1)
1637 sk_intr_xmac(sc_if0);
1638
1639 if (status & SK_ISR_MAC2)
1640 sk_intr_xmac(sc_if1);
1641 }
1642
1643 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1644
1645 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1646 sk_start(ifp0);
1647 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1648 sk_start(ifp1);
1649
1650 return;
1651 }
1652
1653 static void sk_init_xmac(sc_if)
1654 struct sk_if_softc *sc_if;
1655 {
1656 struct sk_softc *sc;
1657 struct ifnet *ifp;
1658
1659 sc = sc_if->sk_softc;
1660 ifp = &sc_if->arpcom.ac_if;
1661
1662 /* Unreset the XMAC. */
1663 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1664 DELAY(1000);
1665
1666 /* Save the XMAC II revision */
1667 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1668
1669 /* Set station address */
1670 SK_XM_WRITE_2(sc_if, XM_PAR0,
1671 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1672 SK_XM_WRITE_2(sc_if, XM_PAR1,
1673 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1674 SK_XM_WRITE_2(sc_if, XM_PAR2,
1675 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1676 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1677
1678 if (ifp->if_flags & IFF_PROMISC) {
1679 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1680 } else {
1681 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1682 }
1683
1684 if (ifp->if_flags & IFF_BROADCAST) {
1685 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1686 } else {
1687 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1688 }
1689
1690 /* We don't need the FCS appended to the packet. */
1691 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1692
1693 /* We want short frames padded to 60 bytes. */
1694 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1695
1696 /*
1697 * Enable the reception of all error frames. This is is
1698 * a necessary evil due to the design of the XMAC. The
1699 * XMAC's receive FIFO is only 8K in size, however jumbo
1700 * frames can be up to 9000 bytes in length. When bad
1701 * frame filtering is enabled, the XMAC's RX FIFO operates
1702 * in 'store and forward' mode. For this to work, the
1703 * entire frame has to fit into the FIFO, but that means
1704 * that jumbo frames larger than 8192 bytes will be
1705 * truncated. Disabling all bad frame filtering causes
1706 * the RX FIFO to operate in streaming mode, in which
1707 * case the XMAC will start transfering frames out of the
1708 * RX FIFO as soon as the FIFO threshold is reached.
1709 */
1710 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1711 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1712 XM_MODE_RX_INRANGELEN);
1713
1714 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1715 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1716 else
1717 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1718
1719 /*
1720 * Bump up the transmit threshold. This helps hold off transmit
1721 * underruns when we're blasting traffic from both ports at once.
1722 */
1723 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1724
1725 /* Set multicast filter */
1726 sk_setmulti(sc_if);
1727
1728 /* Clear and enable interrupts */
1729 SK_XM_READ_2(sc_if, XM_ISR);
1730 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1731
1732 sc_if->sk_link = 0;
1733
1734 /* Configure MAC arbiter */
1735 switch(sc_if->sk_xmac_rev) {
1736 case XM_XMAC_REV_B2:
1737 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1738 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1739 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1740 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1741 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1742 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1743 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1744 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1745 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1746 break;
1747 case XM_XMAC_REV_C1:
1748 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1749 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1750 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1751 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1752 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1753 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1754 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1755 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1756 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1757 break;
1758 default:
1759 break;
1760 }
1761 sk_win_write_2(sc, SK_MACARB_CTL,
1762 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1763
1764 return;
1765 }
1766
1767 /*
1768 * Note that to properly initialize any part of the GEnesis chip,
1769 * you first have to take it out of reset mode.
1770 */
1771 static void sk_init(xsc)
1772 void *xsc;
1773 {
1774 struct sk_if_softc *sc_if = xsc;
1775 struct sk_softc *sc;
1776 struct ifnet *ifp;
1777 int s;
1778
1779 s = splimp();
1780
1781 ifp = &sc_if->arpcom.ac_if;
1782 sc = sc_if->sk_softc;
1783
1784 /* Cancel pending I/O and free all RX/TX buffers. */
1785 sk_stop(sc_if);
1786
1787 /* Configure LINK_SYNC LED */
1788 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1789 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1790
1791 /* Configure RX LED */
1792 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1793
1794 /* Configure TX LED */
1795 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1796
1797 /* Configure I2C registers */
1798
1799 /* Configure XMAC(s) */
1800 sk_init_xmac(sc_if);
1801
1802 /* Configure MAC FIFOs */
1803 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1804 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1805 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1806
1807 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1808 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1809 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1810
1811 /* Configure transmit arbiter(s) */
1812 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1813 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1814
1815 /* Configure RAMbuffers */
1816 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1817 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1818 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1819 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1820 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1821 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1822
1823 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1824 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1825 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1826 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1827 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1828 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1829 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1830
1831 /* Configure BMUs */
1832 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1833 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1834 vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1835 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1836
1837 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1838 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1839 vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1840 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1841
1842 /* Init descriptors */
1843 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1844 printf("sk%d: initialization failed: no "
1845 "memory for rx buffers\n", sc_if->sk_unit);
1846 sk_stop(sc_if);
1847 (void)splx(s);
1848 return;
1849 }
1850 sk_init_tx_ring(sc_if);
1851
1852 /* Configure interrupt handling */
1853 CSR_READ_4(sc, SK_ISSR);
1854 if (sc_if->sk_port == SK_PORT_A)
1855 sc->sk_intrmask |= SK_INTRS1;
1856 else
1857 sc->sk_intrmask |= SK_INTRS2;
1858 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1859
1860 /* Start BMUs. */
1861 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1862
1863 /* Enable XMACs TX and RX state machines */
1864 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1865
1866 ifp->if_flags |= IFF_RUNNING;
1867 ifp->if_flags &= ~IFF_OACTIVE;
1868
1869 splx(s);
1870
1871 return;
1872 }
1873
1874 static void sk_stop(sc_if)
1875 struct sk_if_softc *sc_if;
1876 {
1877 int i;
1878 struct sk_softc *sc;
1879 struct ifnet *ifp;
1880
1881 sc = sc_if->sk_softc;
1882 ifp = &sc_if->arpcom.ac_if;
1883
1884 /* Turn off various components of this interface. */
1885 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1886 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1887 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1888 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1889 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1890 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1891 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1892 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1893 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1894 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1895 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1896
1897 /* Disable interrupts */
1898 if (sc_if->sk_port == SK_PORT_A)
1899 sc->sk_intrmask &= ~SK_INTRS1;
1900 else
1901 sc->sk_intrmask &= ~SK_INTRS2;
1902 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1903
1904 /* Free RX and TX mbufs still in the queues. */
1905 for (i = 0; i < SK_RX_RING_CNT; i++) {
1906 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1907 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1908 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1909 }
1910 }
1911
1912 for (i = 0; i < SK_TX_RING_CNT; i++) {
1913 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1914 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1915 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1916 }
1917 }
1918
1919 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1920
1921 return;
1922 }
1923
1924 static struct pci_device sk_device = {
1925 "skc",
1926 sk_probe,
1927 sk_attach,
1928 &skc_count,
1929 NULL
1930 };
1931 #ifdef COMPAT_PCI_DRIVER
1932 COMPAT_PCI_DRIVER(sk, sk_device);
1933 #else
1934 DATA_SET(pcidevice_set, sk_device);
1935 #endif
Cache object: 7fae64504d98edf64d29c70cbe63e063
|