FreeBSD/Linux Kernel Cross Reference
sys/dev/gx/if_gx.c
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/5.0/sys/dev/gx/if_gx.c 106937 2002-11-14 23:54:55Z sam $
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
39 #include <sys/queue.h>
40
41 #include <net/if.h>
42 #include <net/if_arp.h>
43 #include <net/ethernet.h>
44 #include <net/if_dl.h>
45 #include <net/if_media.h>
46
47 #include <net/bpf.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
50
51 #include <netinet/in_systm.h>
52 #include <netinet/in.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55 #include <netinet/udp.h>
56
57 #include <vm/vm.h> /* for vtophys */
58 #include <vm/pmap.h> /* for vtophys */
59 #include <machine/clock.h> /* for DELAY */
60 #include <machine/bus_memio.h>
61 #include <machine/bus.h>
62 #include <machine/resource.h>
63 #include <sys/bus.h>
64 #include <sys/rman.h>
65
66 #include <pci/pcireg.h>
67 #include <pci/pcivar.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71
72 #include <dev/gx/if_gxreg.h>
73 #include <dev/gx/if_gxvar.h>
74
75 MODULE_DEPEND(gx, miibus, 1, 1, 1);
76 #include "miibus_if.h"
77
78 #define TUNABLE_TX_INTR_DELAY 100
79 #define TUNABLE_RX_INTR_DELAY 100
80
81 #define GX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
82
83 /*
84 * Various supported device vendors/types and their names.
85 */
86 struct gx_device {
87 u_int16_t vendor;
88 u_int16_t device;
89 int version_flags;
90 u_int32_t version_ipg;
91 char *name;
92 };
93
94 static struct gx_device gx_devs[] = {
95 { INTEL_VENDORID, DEVICEID_WISEMAN,
96 GXF_FORCE_TBI | GXF_OLD_REGS,
97 10 | 2 << 10 | 10 << 20,
98 "Intel Gigabit Ethernet (82542)" },
99 { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
100 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
101 6 | 8 << 10 | 6 << 20,
102 "Intel Gigabit Ethernet (82543GC-F)" },
103 { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
104 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
105 8 | 8 << 10 | 6 << 20,
106 "Intel Gigabit Ethernet (82543GC-T)" },
107 #if 0
108 /* notyet.. */
109 { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
110 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
111 6 | 8 << 10 | 6 << 20,
112 "Intel Gigabit Ethernet (82544EI-F)" },
113 { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
114 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
115 8 | 8 << 10 | 6 << 20,
116 "Intel Gigabit Ethernet (82544EI-T)" },
117 { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
118 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
119 8 | 8 << 10 | 6 << 20,
120 "Intel Gigabit Ethernet (82544GC-T)" },
121 #endif
122 { 0, 0, 0, 0, NULL }
123 };
124
125 static struct gx_regs new_regs = {
126 GX_RX_RING_BASE, GX_RX_RING_LEN,
127 GX_RX_RING_HEAD, GX_RX_RING_TAIL,
128 GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
129
130 GX_TX_RING_BASE, GX_TX_RING_LEN,
131 GX_TX_RING_HEAD, GX_TX_RING_TAIL,
132 GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
133 };
134 static struct gx_regs old_regs = {
135 GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
136 GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
137 GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
138
139 GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
140 GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
141 GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
142 };
143
144 static int gx_probe(device_t dev);
145 static int gx_attach(device_t dev);
146 static int gx_detach(device_t dev);
147 static void gx_shutdown(device_t dev);
148
149 static void gx_intr(void *xsc);
150 static void gx_init(void *xsc);
151
152 static struct gx_device *gx_match(device_t dev);
153 static void gx_eeprom_getword(struct gx_softc *gx, int addr,
154 u_int16_t *dest);
155 static int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
156 int cnt);
157 static int gx_ifmedia_upd(struct ifnet *ifp);
158 static void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
159 static int gx_miibus_readreg(device_t dev, int phy, int reg);
160 static void gx_miibus_writereg(device_t dev, int phy, int reg, int value);
161 static void gx_miibus_statchg(device_t dev);
162 static int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
163 static void gx_setmulti(struct gx_softc *gx);
164 static void gx_reset(struct gx_softc *gx);
165 static void gx_phy_reset(struct gx_softc *gx);
166 static void gx_release(struct gx_softc *gx);
167 static void gx_stop(struct gx_softc *gx);
168 static void gx_watchdog(struct ifnet *ifp);
169 static void gx_start(struct ifnet *ifp);
170
171 static int gx_init_rx_ring(struct gx_softc *gx);
172 static void gx_free_rx_ring(struct gx_softc *gx);
173 static int gx_init_tx_ring(struct gx_softc *gx);
174 static void gx_free_tx_ring(struct gx_softc *gx);
175
176 static device_method_t gx_methods[] = {
177 /* Device interface */
178 DEVMETHOD(device_probe, gx_probe),
179 DEVMETHOD(device_attach, gx_attach),
180 DEVMETHOD(device_detach, gx_detach),
181 DEVMETHOD(device_shutdown, gx_shutdown),
182
183 /* MII interface */
184 DEVMETHOD(miibus_readreg, gx_miibus_readreg),
185 DEVMETHOD(miibus_writereg, gx_miibus_writereg),
186 DEVMETHOD(miibus_statchg, gx_miibus_statchg),
187
188 { 0, 0 }
189 };
190
191 static driver_t gx_driver = {
192 "gx",
193 gx_methods,
194 sizeof(struct gx_softc)
195 };
196
197 static devclass_t gx_devclass;
198
199 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
200 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
201
202 static struct gx_device *
203 gx_match(device_t dev)
204 {
205 int i;
206
207 for (i = 0; gx_devs[i].name != NULL; i++) {
208 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
209 (pci_get_device(dev) == gx_devs[i].device))
210 return (&gx_devs[i]);
211 }
212 return (NULL);
213 }
214
215 static int
216 gx_probe(device_t dev)
217 {
218 struct gx_device *gx_dev;
219
220 gx_dev = gx_match(dev);
221 if (gx_dev == NULL)
222 return (ENXIO);
223
224 device_set_desc(dev, gx_dev->name);
225 return (0);
226 }
227
228 static int
229 gx_attach(device_t dev)
230 {
231 struct gx_softc *gx;
232 struct gx_device *gx_dev;
233 struct ifnet *ifp;
234 u_int32_t command;
235 int rid, s;
236 int error = 0;
237
238 s = splimp();
239
240 gx = device_get_softc(dev);
241 bzero(gx, sizeof(struct gx_softc));
242 gx->gx_dev = dev;
243
244 gx_dev = gx_match(dev);
245 gx->gx_vflags = gx_dev->version_flags;
246 gx->gx_ipg = gx_dev->version_ipg;
247
248 mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
249 MTX_DEF | MTX_RECURSE);
250
251 GX_LOCK(gx);
252
253 /*
254 * Map control/status registers.
255 */
256 command = pci_read_config(dev, PCIR_COMMAND, 4);
257 command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
258 if (gx->gx_vflags & GXF_ENABLE_MWI)
259 command |= PCIM_CMD_MWIEN;
260 pci_write_config(dev, PCIR_COMMAND, command, 4);
261 command = pci_read_config(dev, PCIR_COMMAND, 4);
262
263 /* XXX check cache line size? */
264
265 if ((command & PCIM_CMD_MEMEN) == 0) {
266 device_printf(dev, "failed to enable memory mapping!\n");
267 error = ENXIO;
268 goto fail;
269 }
270
271 rid = GX_PCI_LOMEM;
272 gx->gx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
273 0, ~0, 1, RF_ACTIVE);
274 #if 0
275 /* support PIO mode */
276 rid = PCI_LOIO;
277 gx->gx_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
278 0, ~0, 1, RF_ACTIVE);
279 #endif
280
281 if (gx->gx_res == NULL) {
282 device_printf(dev, "couldn't map memory\n");
283 error = ENXIO;
284 goto fail;
285 }
286
287 gx->gx_btag = rman_get_bustag(gx->gx_res);
288 gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
289
290 /* Allocate interrupt */
291 rid = 0;
292 gx->gx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
293 RF_SHAREABLE | RF_ACTIVE);
294
295 if (gx->gx_irq == NULL) {
296 device_printf(dev, "couldn't map interrupt\n");
297 error = ENXIO;
298 goto fail;
299 }
300
301 error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET,
302 gx_intr, gx, &gx->gx_intrhand);
303 if (error) {
304 device_printf(dev, "couldn't setup irq\n");
305 goto fail;
306 }
307
308 /* compensate for different register mappings */
309 if (gx->gx_vflags & GXF_OLD_REGS)
310 gx->gx_reg = old_regs;
311 else
312 gx->gx_reg = new_regs;
313
314 if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
315 GX_EEMAP_MAC, 3)) {
316 device_printf(dev, "failed to read station address\n");
317 error = ENXIO;
318 goto fail;
319 }
320 device_printf(dev, "Ethernet address: %6D\n",
321 gx->arpcom.ac_enaddr, ":");
322
323 /* Allocate the ring buffers. */
324 gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
325 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
326
327 if (gx->gx_rdata == NULL) {
328 device_printf(dev, "no memory for list buffers!\n");
329 error = ENXIO;
330 goto fail;
331 }
332 bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
333
334 /* Set default tuneable values. */
335 gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
336 gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
337
338 /* Set up ifnet structure */
339 ifp = &gx->arpcom.ac_if;
340 ifp->if_softc = gx;
341 ifp->if_unit = device_get_unit(dev);
342 ifp->if_name = "gx";
343 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
344 ifp->if_ioctl = gx_ioctl;
345 ifp->if_output = ether_output;
346 ifp->if_start = gx_start;
347 ifp->if_watchdog = gx_watchdog;
348 ifp->if_init = gx_init;
349 ifp->if_mtu = ETHERMTU;
350 ifp->if_snd.ifq_maxlen = GX_TX_RING_CNT - 1;
351 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
352
353 /* see if we can enable hardware checksumming */
354 if (gx->gx_vflags & GXF_CSUM) {
355 ifp->if_capabilities = IFCAP_HWCSUM;
356 ifp->if_capenable = ifp->if_capabilities;
357 }
358
359 /* figure out transciever type */
360 if (gx->gx_vflags & GXF_FORCE_TBI ||
361 CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
362 gx->gx_tbimode = 1;
363
364 if (gx->gx_tbimode) {
365 /* SERDES transceiver */
366 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
367 gx_ifmedia_sts);
368 ifmedia_add(&gx->gx_media,
369 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
370 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
371 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
372 } else {
373 /* GMII/MII transceiver */
374 gx_phy_reset(gx);
375 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
376 gx_ifmedia_sts)) {
377 device_printf(dev, "GMII/MII, PHY not detected\n");
378 error = ENXIO;
379 goto fail;
380 }
381 }
382
383 /*
384 * Call MI attach routines.
385 */
386 ether_ifattach(ifp, gx->arpcom.ac_enaddr);
387
388 GX_UNLOCK(gx);
389 splx(s);
390 return (0);
391
392 fail:
393 GX_UNLOCK(gx);
394 gx_release(gx);
395 splx(s);
396 return (error);
397 }
398
399 static void
400 gx_release(struct gx_softc *gx)
401 {
402
403 bus_generic_detach(gx->gx_dev);
404 if (gx->gx_miibus)
405 device_delete_child(gx->gx_dev, gx->gx_miibus);
406
407 if (gx->gx_intrhand)
408 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
409 if (gx->gx_irq)
410 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
411 if (gx->gx_res)
412 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
413 GX_PCI_LOMEM, gx->gx_res);
414 }
415
416 static void
417 gx_init(void *xsc)
418 {
419 struct gx_softc *gx = (struct gx_softc *)xsc;
420 struct ifmedia *ifm;
421 struct ifnet *ifp;
422 device_t dev;
423 u_int16_t *m;
424 u_int32_t ctrl;
425 int s, i, tmp;
426
427 dev = gx->gx_dev;
428 ifp = &gx->arpcom.ac_if;
429
430 s = splimp();
431 GX_LOCK(gx);
432
433 /* Disable host interrupts, halt chip. */
434 gx_reset(gx);
435
436 /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
437 gx_stop(gx);
438
439 /* Load our MAC address, invalidate other 15 RX addresses. */
440 m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
441 CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
442 CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
443 for (i = 1; i < 16; i++)
444 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
445
446 /* Program multicast filter. */
447 gx_setmulti(gx);
448
449 /* Init RX ring. */
450 gx_init_rx_ring(gx);
451
452 /* Init TX ring. */
453 gx_init_tx_ring(gx);
454
455 if (gx->gx_vflags & GXF_DMA) {
456 /* set up DMA control */
457 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
458 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
459 }
460
461 /* enable receiver */
462 ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
463 ctrl |= GX_RXC_BCAST_ACCEPT;
464
465 /* Enable or disable promiscuous mode as needed. */
466 if (ifp->if_flags & IFF_PROMISC)
467 ctrl |= GX_RXC_UNI_PROMISC;
468
469 /* This is required if we want to accept jumbo frames */
470 if (ifp->if_mtu > ETHERMTU)
471 ctrl |= GX_RXC_LONG_PKT_ENABLE;
472
473 /* setup receive checksum control */
474 if (ifp->if_capenable & IFCAP_RXCSUM)
475 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
476 GX_CSUM_TCP/* | GX_CSUM_IP*/);
477
478 /* setup transmit checksum control */
479 if (ifp->if_capenable & IFCAP_TXCSUM)
480 ifp->if_hwassist = GX_CSUM_FEATURES;
481
482 ctrl |= GX_RXC_STRIP_ETHERCRC; /* not on 82542? */
483 CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
484
485 /* enable transmitter */
486 ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
487
488 /* XXX we should support half-duplex here too... */
489 ctrl |= GX_TXC_COLL_TIME_FDX;
490
491 CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
492
493 /*
494 * set up recommended IPG times, which vary depending on chip type:
495 * IPG transmit time: 80ns
496 * IPG receive time 1: 20ns
497 * IPG receive time 2: 80ns
498 */
499 CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
500
501 /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
502 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
503 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
504
505 /* set up 802.3x MAC flow control type -- 88:08 */
506 CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
507
508 /* Set up tuneables */
509 CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
510 CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
511
512 /*
513 * Configure chip for correct operation.
514 */
515 ctrl = GX_CTRL_DUPLEX;
516 #if BYTE_ORDER == BIG_ENDIAN
517 ctrl |= GX_CTRL_BIGENDIAN;
518 #endif
519 ctrl |= GX_CTRL_VLAN_ENABLE;
520
521 if (gx->gx_tbimode) {
522 /*
523 * It seems that TXCW must be initialized from the EEPROM
524 * manually.
525 *
526 * XXX
527 * should probably read the eeprom and re-insert the
528 * values here.
529 */
530 #define TXCONFIG_WORD 0x000001A0
531 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
532
533 /* turn on hardware autonegotiate */
534 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
535 } else {
536 /*
537 * Auto-detect speed from PHY, instead of using direct
538 * indication. The SLU bit doesn't force the link, but
539 * must be present for ASDE to work.
540 */
541 gx_phy_reset(gx);
542 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
543 }
544
545 /*
546 * Take chip out of reset and start it running.
547 */
548 CSR_WRITE_4(gx, GX_CTRL, ctrl);
549
550 /* Turn interrupts on. */
551 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
552
553 ifp->if_flags |= IFF_RUNNING;
554 ifp->if_flags &= ~IFF_OACTIVE;
555
556 /*
557 * Set the current media.
558 */
559 if (gx->gx_miibus != NULL) {
560 mii_mediachg(device_get_softc(gx->gx_miibus));
561 } else {
562 ifm = &gx->gx_media;
563 tmp = ifm->ifm_media;
564 ifm->ifm_media = ifm->ifm_cur->ifm_media;
565 gx_ifmedia_upd(ifp);
566 ifm->ifm_media = tmp;
567 }
568
569 /*
570 * XXX
571 * Have the LINK0 flag force the link in TBI mode.
572 */
573 if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
574 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
575 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
576 }
577
578 #if 0
579 printf("66mhz: %s 64bit: %s\n",
580 CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
581 CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
582 #endif
583
584 GX_UNLOCK(gx);
585 splx(s);
586 }
587
588 /*
589 * Stop all chip I/O so that the kernel's probe routines don't
590 * get confused by errant DMAs when rebooting.
591 */
592 static void
593 gx_shutdown(device_t dev)
594 {
595 struct gx_softc *gx;
596
597 gx = device_get_softc(dev);
598 gx_reset(gx);
599 gx_stop(gx);
600 }
601
602 static int
603 gx_detach(device_t dev)
604 {
605 struct gx_softc *gx;
606 struct ifnet *ifp;
607 int s;
608
609 s = splimp();
610
611 gx = device_get_softc(dev);
612 ifp = &gx->arpcom.ac_if;
613 GX_LOCK(gx);
614
615 ether_ifdetach(ifp);
616 gx_reset(gx);
617 gx_stop(gx);
618 ifmedia_removeall(&gx->gx_media);
619 gx_release(gx);
620
621 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
622
623 GX_UNLOCK(gx);
624 mtx_destroy(&gx->gx_mtx);
625 splx(s);
626
627 return (0);
628 }
629
630 static void
631 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
632 {
633 u_int16_t word = 0;
634 u_int32_t base, reg;
635 int x;
636
637 addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
638 (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
639
640 base = CSR_READ_4(gx, GX_EEPROM_CTRL);
641 base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
642 base |= GX_EE_SELECT;
643
644 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
645
646 for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
647 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
648 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
649 DELAY(10);
650 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
651 DELAY(10);
652 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
653 DELAY(10);
654 }
655
656 for (x = 1 << 15; x; x >>= 1) {
657 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
658 DELAY(10);
659 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
660 if (reg & GX_EE_DATA_OUT)
661 word |= x;
662 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
663 DELAY(10);
664 }
665
666 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
667 DELAY(10);
668
669 *dest = word;
670 }
671
672 static int
673 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
674 {
675 u_int16_t *word;
676 int i;
677
678 word = (u_int16_t *)dest;
679 for (i = 0; i < cnt; i ++) {
680 gx_eeprom_getword(gx, off + i, word);
681 word++;
682 }
683 return (0);
684 }
685
686 /*
687 * Set media options.
688 */
689 static int
690 gx_ifmedia_upd(struct ifnet *ifp)
691 {
692 struct gx_softc *gx;
693 struct ifmedia *ifm;
694 struct mii_data *mii;
695
696 gx = ifp->if_softc;
697
698 if (gx->gx_tbimode) {
699 ifm = &gx->gx_media;
700 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
701 return (EINVAL);
702 switch (IFM_SUBTYPE(ifm->ifm_media)) {
703 case IFM_AUTO:
704 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
705 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
706 GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
707 break;
708 case IFM_1000_SX:
709 device_printf(gx->gx_dev,
710 "manual config not supported yet.\n");
711 #if 0
712 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
713 config = /* bit symbols for 802.3z */0;
714 ctrl |= GX_CTRL_SET_LINK_UP;
715 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
716 ctrl |= GX_CTRL_DUPLEX;
717 #endif
718 break;
719 default:
720 return (EINVAL);
721 }
722 } else {
723 ifm = &gx->gx_media;
724
725 /*
726 * 1000TX half duplex does not work.
727 */
728 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
729 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
730 (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
731 return (EINVAL);
732 mii = device_get_softc(gx->gx_miibus);
733 mii_mediachg(mii);
734 }
735 return (0);
736 }
737
738 /*
739 * Report current media status.
740 */
741 static void
742 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
743 {
744 struct gx_softc *gx;
745 struct mii_data *mii;
746 u_int32_t status;
747
748 gx = ifp->if_softc;
749
750 if (gx->gx_tbimode) {
751 ifmr->ifm_status = IFM_AVALID;
752 ifmr->ifm_active = IFM_ETHER;
753
754 status = CSR_READ_4(gx, GX_STATUS);
755 if ((status & GX_STAT_LINKUP) == 0)
756 return;
757
758 ifmr->ifm_status |= IFM_ACTIVE;
759 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
760 } else {
761 mii = device_get_softc(gx->gx_miibus);
762 mii_pollstat(mii);
763 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
764 (IFM_1000_T | IFM_HDX))
765 mii->mii_media_active = IFM_ETHER | IFM_NONE;
766 ifmr->ifm_active = mii->mii_media_active;
767 ifmr->ifm_status = mii->mii_media_status;
768 }
769 }
770
771 static void
772 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
773 {
774 u_int32_t reg, x;
775
776 /*
777 * Set up default GPIO direction + PHY data out.
778 */
779 reg = CSR_READ_4(gx, GX_CTRL);
780 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
781 reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
782
783 /*
784 * Shift in data to PHY.
785 */
786 for (x = 1 << (length - 1); x; x >>= 1) {
787 if (data & x)
788 reg |= GX_CTRL_PHY_IO;
789 else
790 reg &= ~GX_CTRL_PHY_IO;
791 CSR_WRITE_4(gx, GX_CTRL, reg);
792 DELAY(10);
793 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
794 DELAY(10);
795 CSR_WRITE_4(gx, GX_CTRL, reg);
796 DELAY(10);
797 }
798 }
799
800 static u_int16_t
801 gx_mii_shiftout(struct gx_softc *gx)
802 {
803 u_int32_t reg;
804 u_int16_t data;
805 int x;
806
807 /*
808 * Set up default GPIO direction + PHY data in.
809 */
810 reg = CSR_READ_4(gx, GX_CTRL);
811 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
812 reg |= GX_CTRL_GPIO_DIR;
813
814 CSR_WRITE_4(gx, GX_CTRL, reg);
815 DELAY(10);
816 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
817 DELAY(10);
818 CSR_WRITE_4(gx, GX_CTRL, reg);
819 DELAY(10);
820 /*
821 * Shift out data from PHY.
822 */
823 data = 0;
824 for (x = 1 << 15; x; x >>= 1) {
825 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
826 DELAY(10);
827 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
828 data |= x;
829 CSR_WRITE_4(gx, GX_CTRL, reg);
830 DELAY(10);
831 }
832 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
833 DELAY(10);
834 CSR_WRITE_4(gx, GX_CTRL, reg);
835 DELAY(10);
836
837 return (data);
838 }
839
840 static int
841 gx_miibus_readreg(device_t dev, int phy, int reg)
842 {
843 struct gx_softc *gx;
844
845 gx = device_get_softc(dev);
846 if (gx->gx_tbimode)
847 return (0);
848
849 /*
850 * XXX
851 * Note: Cordova has a MDIC register. livingood and < have mii bits
852 */
853
854 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
855 gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
856 (phy << 5) | reg, GX_PHY_READ_LEN);
857 return (gx_mii_shiftout(gx));
858 }
859
860 static void
861 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
862 {
863 struct gx_softc *gx;
864
865 gx = device_get_softc(dev);
866 if (gx->gx_tbimode)
867 return;
868
869 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
870 gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
871 (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
872 (value & 0xffff), GX_PHY_WRITE_LEN);
873 }
874
875 static void
876 gx_miibus_statchg(device_t dev)
877 {
878 struct gx_softc *gx;
879 struct mii_data *mii;
880 int reg, s;
881
882 gx = device_get_softc(dev);
883 if (gx->gx_tbimode)
884 return;
885
886 /*
887 * Set flow control behavior to mirror what PHY negotiated.
888 */
889 mii = device_get_softc(gx->gx_miibus);
890
891 s = splimp();
892 GX_LOCK(gx);
893
894 reg = CSR_READ_4(gx, GX_CTRL);
895 if (mii->mii_media_active & IFM_FLAG0)
896 reg |= GX_CTRL_RX_FLOWCTRL;
897 else
898 reg &= ~GX_CTRL_RX_FLOWCTRL;
899 if (mii->mii_media_active & IFM_FLAG1)
900 reg |= GX_CTRL_TX_FLOWCTRL;
901 else
902 reg &= ~GX_CTRL_TX_FLOWCTRL;
903 CSR_WRITE_4(gx, GX_CTRL, reg);
904
905 GX_UNLOCK(gx);
906 splx(s);
907 }
908
909 static int
910 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
911 {
912 struct gx_softc *gx = ifp->if_softc;
913 struct ifreq *ifr = (struct ifreq *)data;
914 struct mii_data *mii;
915 int s, mask, error = 0;
916
917 s = splimp();
918 GX_LOCK(gx);
919
920 switch (command) {
921 case SIOCSIFMTU:
922 if (ifr->ifr_mtu > GX_MAX_MTU) {
923 error = EINVAL;
924 } else {
925 ifp->if_mtu = ifr->ifr_mtu;
926 gx_init(gx);
927 }
928 break;
929 case SIOCSIFFLAGS:
930 if ((ifp->if_flags & IFF_UP) == 0) {
931 gx_stop(gx);
932 } else if (ifp->if_flags & IFF_RUNNING &&
933 ((ifp->if_flags & IFF_PROMISC) !=
934 (gx->gx_if_flags & IFF_PROMISC))) {
935 if (ifp->if_flags & IFF_PROMISC)
936 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
937 else
938 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
939 } else {
940 gx_init(gx);
941 }
942 gx->gx_if_flags = ifp->if_flags;
943 break;
944 case SIOCADDMULTI:
945 case SIOCDELMULTI:
946 if (ifp->if_flags & IFF_RUNNING)
947 gx_setmulti(gx);
948 break;
949 case SIOCSIFMEDIA:
950 case SIOCGIFMEDIA:
951 if (gx->gx_miibus != NULL) {
952 mii = device_get_softc(gx->gx_miibus);
953 error = ifmedia_ioctl(ifp, ifr,
954 &mii->mii_media, command);
955 } else {
956 error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
957 }
958 break;
959 case SIOCSIFCAP:
960 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961 if (mask & IFCAP_HWCSUM) {
962 if (IFCAP_HWCSUM & ifp->if_capenable)
963 ifp->if_capenable &= ~IFCAP_HWCSUM;
964 else
965 ifp->if_capenable |= IFCAP_HWCSUM;
966 if (ifp->if_flags & IFF_RUNNING)
967 gx_init(gx);
968 }
969 break;
970 default:
971 error = ether_ioctl(ifp, command, data);
972 break;
973 }
974
975 GX_UNLOCK(gx);
976 splx(s);
977 return (error);
978 }
979
980 static void
981 gx_phy_reset(struct gx_softc *gx)
982 {
983 int reg;
984
985 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
986
987 /*
988 * PHY reset is active low.
989 */
990 reg = CSR_READ_4(gx, GX_CTRL_EXT);
991 reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
992 reg |= GX_CTRLX_GPIO_DIR;
993
994 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
995 DELAY(10);
996 CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
997 DELAY(10);
998 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
999 DELAY(10);
1000
1001 #if 0
1002 /* post-livingood (cordova) only */
1003 GX_SETBIT(gx, GX_CTRL, 0x80000000);
1004 DELAY(1000);
1005 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
1006 #endif
1007 }
1008
1009 static void
1010 gx_reset(struct gx_softc *gx)
1011 {
1012
1013 /* Disable host interrupts. */
1014 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1015
1016 /* reset chip (THWAP!) */
1017 GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
1018 DELAY(10);
1019 }
1020
1021 static void
1022 gx_stop(struct gx_softc *gx)
1023 {
1024 struct ifnet *ifp;
1025
1026 ifp = &gx->arpcom.ac_if;
1027
1028 /* reset and flush transmitter */
1029 CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1030
1031 /* reset and flush receiver */
1032 CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1033
1034 /* reset link */
1035 if (gx->gx_tbimode)
1036 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1037
1038 /* Free the RX lists. */
1039 gx_free_rx_ring(gx);
1040
1041 /* Free TX buffers. */
1042 gx_free_tx_ring(gx);
1043
1044 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1045 }
1046
1047 static void
1048 gx_watchdog(struct ifnet *ifp)
1049 {
1050 struct gx_softc *gx;
1051
1052 gx = ifp->if_softc;
1053
1054 device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1055 gx_reset(gx);
1056 gx_init(gx);
1057
1058 ifp->if_oerrors++;
1059 }
1060
1061 /*
1062 * Intialize a receive ring descriptor.
1063 */
1064 static int
1065 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1066 {
1067 struct mbuf *m_new = NULL;
1068 struct gx_rx_desc *r;
1069
1070 if (m == NULL) {
1071 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1072 if (m_new == NULL) {
1073 device_printf(gx->gx_dev,
1074 "mbuf allocation failed -- packet dropped\n");
1075 return (ENOBUFS);
1076 }
1077 MCLGET(m_new, M_DONTWAIT);
1078 if ((m_new->m_flags & M_EXT) == 0) {
1079 device_printf(gx->gx_dev,
1080 "cluster allocation failed -- packet dropped\n");
1081 m_freem(m_new);
1082 return (ENOBUFS);
1083 }
1084 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1085 } else {
1086 m->m_len = m->m_pkthdr.len = MCLBYTES;
1087 m->m_data = m->m_ext.ext_buf;
1088 m->m_next = NULL;
1089 m_new = m;
1090 }
1091
1092 /*
1093 * XXX
1094 * this will _NOT_ work for large MTU's; it will overwrite
1095 * the end of the buffer. E.g.: take this out for jumbograms,
1096 * but then that breaks alignment.
1097 */
1098 if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1099 m_adj(m_new, ETHER_ALIGN);
1100
1101 gx->gx_cdata.gx_rx_chain[idx] = m_new;
1102 r = &gx->gx_rdata->gx_rx_ring[idx];
1103 r->rx_addr = vtophys(mtod(m_new, caddr_t));
1104 r->rx_staterr = 0;
1105
1106 return (0);
1107 }
1108
1109 /*
1110 * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1111 * cluster, could add up to 128M of memory. Due to alignment constraints,
1112 * the number of descriptors must be a multiple of 8. For now, we
1113 * allocate 256 entries and hope that our CPU is fast enough to keep up
1114 * with the NIC.
1115 */
1116 static int
1117 gx_init_rx_ring(struct gx_softc *gx)
1118 {
1119 int i, error;
1120
1121 for (i = 0; i < GX_RX_RING_CNT; i++) {
1122 error = gx_newbuf(gx, i, NULL);
1123 if (error)
1124 return (error);
1125 }
1126
1127 /* bring receiver out of reset state, leave disabled */
1128 CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1129
1130 /* set up ring registers */
1131 CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1132 (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1133
1134 CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1135 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1136 CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1137 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1138 gx->gx_rx_tail_idx = 0;
1139
1140 return (0);
1141 }
1142
1143 static void
1144 gx_free_rx_ring(struct gx_softc *gx)
1145 {
1146 struct mbuf **mp;
1147 int i;
1148
1149 mp = gx->gx_cdata.gx_rx_chain;
1150 for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1151 if (*mp != NULL) {
1152 m_freem(*mp);
1153 *mp = NULL;
1154 }
1155 }
1156 bzero((void *)gx->gx_rdata->gx_rx_ring,
1157 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1158
1159 /* release any partially-received packet chain */
1160 if (gx->gx_pkthdr != NULL) {
1161 m_freem(gx->gx_pkthdr);
1162 gx->gx_pkthdr = NULL;
1163 }
1164 }
1165
1166 static int
1167 gx_init_tx_ring(struct gx_softc *gx)
1168 {
1169
1170 /* bring transmitter out of reset state, leave disabled */
1171 CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1172
1173 /* set up ring registers */
1174 CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1175 (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1176 CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1177 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1178 CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1179 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1180 gx->gx_tx_head_idx = 0;
1181 gx->gx_tx_tail_idx = 0;
1182 gx->gx_txcnt = 0;
1183
1184 /* set up initial TX context */
1185 gx->gx_txcontext = GX_TXCONTEXT_NONE;
1186
1187 return (0);
1188 }
1189
1190 static void
1191 gx_free_tx_ring(struct gx_softc *gx)
1192 {
1193 struct mbuf **mp;
1194 int i;
1195
1196 mp = gx->gx_cdata.gx_tx_chain;
1197 for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1198 if (*mp != NULL) {
1199 m_freem(*mp);
1200 *mp = NULL;
1201 }
1202 }
1203 bzero((void *)&gx->gx_rdata->gx_tx_ring,
1204 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1205 }
1206
1207 static void
1208 gx_setmulti(struct gx_softc *gx)
1209 {
1210 int i;
1211
1212 /* wipe out the multicast table */
1213 for (i = 1; i < 128; i++)
1214 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1215 }
1216
1217 static void
1218 gx_rxeof(struct gx_softc *gx)
1219 {
1220 struct gx_rx_desc *rx;
1221 struct ifnet *ifp;
1222 int idx, staterr, len;
1223 struct mbuf *m;
1224
1225 gx->gx_rx_interrupts++;
1226
1227 ifp = &gx->arpcom.ac_if;
1228 idx = gx->gx_rx_tail_idx;
1229
1230 while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1231
1232 rx = &gx->gx_rdata->gx_rx_ring[idx];
1233 m = gx->gx_cdata.gx_rx_chain[idx];
1234 /*
1235 * gx_newbuf overwrites status and length bits, so we
1236 * make a copy of them here.
1237 */
1238 len = rx->rx_len;
1239 staterr = rx->rx_staterr;
1240
1241 if (staterr & GX_INPUT_ERROR)
1242 goto ierror;
1243
1244 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1245 goto ierror;
1246
1247 GX_INC(idx, GX_RX_RING_CNT);
1248
1249 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1250 /*
1251 * multicast packet, must verify against
1252 * multicast address.
1253 */
1254 }
1255
1256 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1257 if (gx->gx_pkthdr == NULL) {
1258 m->m_len = len;
1259 m->m_pkthdr.len = len;
1260 gx->gx_pkthdr = m;
1261 gx->gx_pktnextp = &m->m_next;
1262 } else {
1263 m->m_len = len;
1264 m->m_flags &= ~M_PKTHDR;
1265 gx->gx_pkthdr->m_pkthdr.len += len;
1266 *(gx->gx_pktnextp) = m;
1267 gx->gx_pktnextp = &m->m_next;
1268 }
1269 continue;
1270 }
1271
1272 if (gx->gx_pkthdr == NULL) {
1273 m->m_len = len;
1274 m->m_pkthdr.len = len;
1275 } else {
1276 m->m_len = len;
1277 m->m_flags &= ~M_PKTHDR;
1278 gx->gx_pkthdr->m_pkthdr.len += len;
1279 *(gx->gx_pktnextp) = m;
1280 m = gx->gx_pkthdr;
1281 gx->gx_pkthdr = NULL;
1282 }
1283
1284 ifp->if_ipackets++;
1285 m->m_pkthdr.rcvif = ifp;
1286
1287 #define IP_CSMASK (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1288 #define TCP_CSMASK \
1289 (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1290 if (ifp->if_capenable & IFCAP_RXCSUM) {
1291 #if 0
1292 /*
1293 * Intel Erratum #23 indicates that the Receive IP
1294 * Checksum offload feature has been completely
1295 * disabled.
1296 */
1297 if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1298 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1299 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1300 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1301 }
1302 #endif
1303 if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1304 m->m_pkthdr.csum_flags |=
1305 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1306 m->m_pkthdr.csum_data = 0xffff;
1307 }
1308 }
1309 /*
1310 * If we received a packet with a vlan tag,
1311 * mark the packet before it's passed up.
1312 */
1313 if (staterr & GX_RXSTAT_VLAN_PKT) {
1314 VLAN_INPUT_TAG(ifp, m, rx->rx_special, continue);
1315 }
1316 (*ifp->if_input)(ifp, m);
1317 continue;
1318
1319 ierror:
1320 ifp->if_ierrors++;
1321 gx_newbuf(gx, idx, m);
1322
1323 /*
1324 * XXX
1325 * this isn't quite right. Suppose we have a packet that
1326 * spans 5 descriptors (9K split into 2K buffers). If
1327 * the 3rd descriptor sets an error, we need to ignore
1328 * the last two. The way things stand now, the last two
1329 * will be accepted as a single packet.
1330 *
1331 * we don't worry about this -- the chip may not set an
1332 * error in this case, and the checksum of the upper layers
1333 * will catch the error.
1334 */
1335 if (gx->gx_pkthdr != NULL) {
1336 m_freem(gx->gx_pkthdr);
1337 gx->gx_pkthdr = NULL;
1338 }
1339 GX_INC(idx, GX_RX_RING_CNT);
1340 }
1341
1342 gx->gx_rx_tail_idx = idx;
1343 if (--idx < 0)
1344 idx = GX_RX_RING_CNT - 1;
1345 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1346 }
1347
1348 static void
1349 gx_txeof(struct gx_softc *gx)
1350 {
1351 struct ifnet *ifp;
1352 int idx, cnt;
1353
1354 gx->gx_tx_interrupts++;
1355
1356 ifp = &gx->arpcom.ac_if;
1357 idx = gx->gx_tx_head_idx;
1358 cnt = gx->gx_txcnt;
1359
1360 /*
1361 * If the system chipset performs I/O write buffering, it is
1362 * possible for the PIO read of the head descriptor to bypass the
1363 * memory write of the descriptor, resulting in reading a descriptor
1364 * which has not been updated yet.
1365 */
1366 while (cnt) {
1367 struct gx_tx_desc_old *tx;
1368
1369 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1370 cnt--;
1371
1372 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1373 GX_INC(idx, GX_TX_RING_CNT);
1374 continue;
1375 }
1376
1377 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1378 break;
1379
1380 ifp->if_opackets++;
1381
1382 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1383 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1384 gx->gx_txcnt = cnt;
1385 ifp->if_timer = 0;
1386
1387 GX_INC(idx, GX_TX_RING_CNT);
1388 gx->gx_tx_head_idx = idx;
1389 }
1390
1391 if (gx->gx_txcnt == 0)
1392 ifp->if_flags &= ~IFF_OACTIVE;
1393 }
1394
1395 static void
1396 gx_intr(void *xsc)
1397 {
1398 struct gx_softc *gx;
1399 struct ifnet *ifp;
1400 u_int32_t intr;
1401 int s;
1402
1403 gx = xsc;
1404 ifp = &gx->arpcom.ac_if;
1405
1406 s = splimp();
1407
1408 gx->gx_interrupts++;
1409
1410 /* Disable host interrupts. */
1411 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1412
1413 /*
1414 * find out why we're being bothered.
1415 * reading this register automatically clears all bits.
1416 */
1417 intr = CSR_READ_4(gx, GX_INT_READ);
1418
1419 /* Check RX return ring producer/consumer */
1420 if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1421 gx_rxeof(gx);
1422
1423 /* Check TX ring producer/consumer */
1424 if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1425 gx_txeof(gx);
1426
1427 /*
1428 * handle other interrupts here.
1429 */
1430
1431 /*
1432 * Link change interrupts are not reliable; the interrupt may
1433 * not be generated if the link is lost. However, the register
1434 * read is reliable, so check that. Use SEQ errors to possibly
1435 * indicate that the link has changed.
1436 */
1437 if (intr & GX_INT_LINK_CHANGE) {
1438 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1439 device_printf(gx->gx_dev, "link down\n");
1440 } else {
1441 device_printf(gx->gx_dev, "link up\n");
1442 }
1443 }
1444
1445 /* Turn interrupts on. */
1446 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1447
1448 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
1449 gx_start(ifp);
1450
1451 splx(s);
1452 }
1453
1454 /*
1455 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1456 * pointers to descriptors.
1457 */
1458 static int
1459 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1460 {
1461 struct gx_tx_desc_data *tx = NULL;
1462 struct gx_tx_desc_ctx *tctx;
1463 struct mbuf *m;
1464 int idx, cnt, csumopts, txcontext;
1465 struct m_tag *mtag;
1466
1467 cnt = gx->gx_txcnt;
1468 idx = gx->gx_tx_tail_idx;
1469 txcontext = gx->gx_txcontext;
1470
1471 /*
1472 * Insure we have at least 4 descriptors pre-allocated.
1473 */
1474 if (cnt >= GX_TX_RING_CNT - 4)
1475 return (ENOBUFS);
1476
1477 /*
1478 * Set up the appropriate offload context if necessary.
1479 */
1480 csumopts = 0;
1481 if (m_head->m_pkthdr.csum_flags) {
1482 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1483 csumopts |= GX_TXTCP_OPT_IP_CSUM;
1484 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1485 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1486 txcontext = GX_TXCONTEXT_TCPIP;
1487 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1488 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1489 txcontext = GX_TXCONTEXT_UDPIP;
1490 } else if (txcontext == GX_TXCONTEXT_NONE)
1491 txcontext = GX_TXCONTEXT_TCPIP;
1492 if (txcontext == gx->gx_txcontext)
1493 goto context_done;
1494
1495 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1496 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1497 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1498 tctx->tx_ip_csum_offset =
1499 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1500 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1501 tctx->tx_tcp_csum_end = 0;
1502 if (txcontext == GX_TXCONTEXT_TCPIP)
1503 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1504 sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1505 else
1506 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1507 sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1508 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1509 tctx->tx_type = 0;
1510 tctx->tx_status = 0;
1511 GX_INC(idx, GX_TX_RING_CNT);
1512 cnt++;
1513 }
1514 context_done:
1515 /*
1516 * Start packing the mbufs in this chain into the transmit
1517 * descriptors. Stop when we run out of descriptors or hit
1518 * the end of the mbuf chain.
1519 */
1520 for (m = m_head; m != NULL; m = m->m_next) {
1521 if (m->m_len == 0)
1522 continue;
1523
1524 if (cnt == GX_TX_RING_CNT) {
1525 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1526 return (ENOBUFS);
1527 }
1528
1529 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1530 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1531 tx->tx_status = 0;
1532 tx->tx_len = m->m_len;
1533 if (gx->arpcom.ac_if.if_hwassist) {
1534 tx->tx_type = 1;
1535 tx->tx_command = GX_TXTCP_EXTENSION;
1536 tx->tx_options = csumopts;
1537 } else {
1538 /*
1539 * This is really a struct gx_tx_desc_old.
1540 */
1541 tx->tx_command = 0;
1542 }
1543 GX_INC(idx, GX_TX_RING_CNT);
1544 cnt++;
1545 }
1546
1547 if (tx != NULL) {
1548 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1549 GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1550 mtag = VLAN_OUTPUT_TAG(&gx->arpcom.ac_if, m);
1551 if (mtag != NULL) {
1552 tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1553 tx->tx_vlan = VLAN_TAG_VALUE(mtag);
1554 }
1555 gx->gx_txcnt = cnt;
1556 gx->gx_tx_tail_idx = idx;
1557 gx->gx_txcontext = txcontext;
1558 idx = GX_PREV(idx, GX_TX_RING_CNT);
1559 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1560
1561 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1562 }
1563
1564 return (0);
1565 }
1566
1567 /*
1568 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1569 * to the mbuf data regions directly in the transmit descriptors.
1570 */
1571 static void
1572 gx_start(struct ifnet *ifp)
1573 {
1574 struct gx_softc *gx;
1575 struct mbuf *m_head;
1576 int s;
1577
1578 s = splimp();
1579
1580 gx = ifp->if_softc;
1581
1582 for (;;) {
1583 IF_DEQUEUE(&ifp->if_snd, m_head);
1584 if (m_head == NULL)
1585 break;
1586
1587 /*
1588 * Pack the data into the transmit ring. If we
1589 * don't have room, set the OACTIVE flag and wait
1590 * for the NIC to drain the ring.
1591 */
1592 if (gx_encap(gx, m_head) != 0) {
1593 IF_PREPEND(&ifp->if_snd, m_head);
1594 ifp->if_flags |= IFF_OACTIVE;
1595 break;
1596 }
1597
1598 /*
1599 * If there's a BPF listener, bounce a copy of this frame
1600 * to him.
1601 */
1602 BPF_MTAP(ifp, m_head);
1603
1604 /*
1605 * Set a timeout in case the chip goes out to lunch.
1606 */
1607 ifp->if_timer = 5;
1608 }
1609
1610 splx(s);
1611 }
Cache object: 42bc42f45ee8028f7c79a7cb855cc48f
|