FreeBSD/Linux Kernel Cross Reference
sys/dev/nve/if_nve.c
1 /*
2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>.
3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $
28 */
29
30 /*
31 * NVIDIA nForce MCP Networking Adapter driver
32 *
33 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA
34 * through their web site.
35 *
36 * All mainstream nForce and nForce2 motherboards are supported. This module
37 * is as stable, sometimes more stable, than the linux version. (Recent
38 * Linux stability issues seem to be related to some issues with newer
39 * distributions using GCC 3.x, however this don't appear to effect FreeBSD
40 * 5.x).
41 *
42 * In accordance with the NVIDIA distribution license it is necessary to
43 * link this module against the nvlibnet.o binary object included in the
44 * Linux driver source distribution. The binary component is not modified in
45 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c
46 * linux kernel module "wrapper".
47 *
48 * The Linux driver uses a common code API that is shared between Win32 and
49 * i386 Linux. This abstracts the low level driver functions and uses
50 * callbacks and hooks to access the underlying hardware device. By using
51 * this same API in a FreeBSD kernel module it is possible to support the
52 * hardware without breaching the Linux source distributions licensing
53 * requirements, or obtaining the hardware programming specifications.
54 *
55 * Although not conventional, it works, and given the relatively small
56 * amount of hardware centric code, it's hopefully no more buggy than its
57 * linux counterpart.
58 *
59 * NVIDIA now support the nForce3 AMD64 platform, however I have been
60 * unable to access such a system to verify support. However, the code is
61 * reported to work with little modification when compiled with the AMD64
62 * version of the NVIDIA Linux library. All that should be necessary to make
63 * the driver work is to link it directly into the kernel, instead of as a
64 * module, and apply the docs/amd64.diff patch in this source distribution to
65 * the NVIDIA Linux driver source.
66 *
67 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well
68 * as recent versions of DragonFly.
69 *
70 * Written by Quinton Dolan <q@onthenet.com.au>
71 * Portions based on existing FreeBSD network drivers.
72 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files.
73 *
74 */
75
76 #include <sys/cdefs.h>
77 __FBSDID("$FreeBSD: releng/6.1/sys/dev/nve/if_nve.c 153718 2005-12-25 21:57:03Z bz $");
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/socket.h>
86 #include <sys/sysctl.h>
87 #include <sys/queue.h>
88 #include <sys/module.h>
89
90 #include <net/if.h>
91 #include <net/if_arp.h>
92 #include <net/ethernet.h>
93 #include <net/if_dl.h>
94 #include <net/if_media.h>
95 #include <net/if_types.h>
96 #include <net/bpf.h>
97 #include <net/if_vlan_var.h>
98
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101
102 #include <vm/vm.h> /* for vtophys */
103 #include <vm/pmap.h> /* for vtophys */
104 #include <machine/clock.h> /* for DELAY */
105 #include <sys/bus.h>
106 #include <sys/rman.h>
107
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 #include <dev/mii/mii.h>
111 #include <dev/mii/miivar.h>
112 #include "miibus_if.h"
113
114 /* Include NVIDIA Linux driver header files */
115 #define linux
116 #include <contrib/dev/nve/basetype.h>
117 #include <contrib/dev/nve/phy.h>
118 #include "os+%DIKED-nve.h"
119 #include <contrib/dev/nve/drvinfo.h>
120 #include <contrib/dev/nve/adapter.h>
121 #undef linux
122
123 #include <dev/nve/if_nvereg.h>
124
125 MODULE_DEPEND(nve, pci, 1, 1, 1);
126 MODULE_DEPEND(nve, ether, 1, 1, 1);
127 MODULE_DEPEND(nve, miibus, 1, 1, 1);
128
129 static int nve_probe(device_t);
130 static int nve_attach(device_t);
131 static int nve_detach(device_t);
132 static void nve_init(void *);
133 static void nve_init_locked(struct nve_softc *);
134 static void nve_stop(struct nve_softc *);
135 static void nve_shutdown(device_t);
136 static int nve_init_rings(struct nve_softc *);
137 static void nve_free_rings(struct nve_softc *);
138
139 static void nve_ifstart(struct ifnet *);
140 static void nve_ifstart_locked(struct ifnet *);
141 static int nve_ioctl(struct ifnet *, u_long, caddr_t);
142 static void nve_intr(void *);
143 static void nve_tick(void *);
144 static void nve_setmulti(struct nve_softc *);
145 static void nve_watchdog(struct ifnet *);
146 static void nve_update_stats(struct nve_softc *);
147
148 static int nve_ifmedia_upd(struct ifnet *);
149 static void nve_ifmedia_upd_locked(struct ifnet *);
150 static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *);
151 static int nve_miibus_readreg(device_t, int, int);
152 static void nve_miibus_writereg(device_t, int, int, int);
153
154 static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
155 static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
156
157 static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
158 static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
159 static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
160 static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
161 static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
162 static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
163 static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
164 static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
165 static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
166 static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
167 static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
168 static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
169 static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
170 static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
171 static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
172 static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
173
174 static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
175 static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
176 static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
177 static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
178 static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
179 static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
180 static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
181
182 static device_method_t nve_methods[] = {
183 /* Device interface */
184 DEVMETHOD(device_probe, nve_probe),
185 DEVMETHOD(device_attach, nve_attach),
186 DEVMETHOD(device_detach, nve_detach),
187 DEVMETHOD(device_shutdown, nve_shutdown),
188
189 /* Bus interface */
190 DEVMETHOD(bus_print_child, bus_generic_print_child),
191 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
192
193 /* MII interface */
194 DEVMETHOD(miibus_readreg, nve_miibus_readreg),
195 DEVMETHOD(miibus_writereg, nve_miibus_writereg),
196
197 {0, 0}
198 };
199
200 static driver_t nve_driver = {
201 "nve",
202 nve_methods,
203 sizeof(struct nve_softc)
204 };
205
206 static devclass_t nve_devclass;
207
208 static int nve_pollinterval = 0;
209 SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW,
210 &nve_pollinterval, 0, "delay between interface polls");
211
212 DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0);
213 DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0);
214
215 static struct nve_type nve_devs[] = {
216 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID,
217 "NVIDIA nForce MCP Networking Adapter"},
218 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID,
219 "NVIDIA nForce MCP2 Networking Adapter"},
220 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID,
221 "NVIDIA nForce MCP3 Networking Adapter"},
222 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID,
223 "NVIDIA nForce MCP4 Networking Adapter"},
224 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID,
225 "NVIDIA nForce MCP5 Networking Adapter"},
226 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID,
227 "NVIDIA nForce MCP6 Networking Adapter"},
228 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID,
229 "NVIDIA nForce MCP7 Networking Adapter"},
230 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID,
231 "NVIDIA nForce MCP8 Networking Adapter"},
232 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID,
233 "NVIDIA nForce MCP9 Networking Adapter"},
234 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID,
235 "NVIDIA nForce MCP10 Networking Adapter"},
236 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID,
237 "NVIDIA nForce MCP11 Networking Adapter"},
238 {0, 0, NULL}
239 };
240
241 /* DMA MEM map callback function to get data segment physical address */
242 static void
243 nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error)
244 {
245 if (error)
246 return;
247
248 KASSERT(nsegs == 1,
249 ("Too many DMA segments returned when mapping DMA memory"));
250 *(bus_addr_t *)arg = segs->ds_addr;
251 }
252
253 /* DMA RX map callback function to get data segment physical address */
254 static void
255 nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs,
256 bus_size_t mapsize, int error)
257 {
258 if (error)
259 return;
260 *(bus_addr_t *)arg = segs->ds_addr;
261 }
262
263 /*
264 * DMA TX buffer callback function to allocate fragment data segment
265 * addresses
266 */
267 static void
268 nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error)
269 {
270 struct nve_tx_desc *info;
271
272 info = arg;
273 if (error)
274 return;
275 KASSERT(nsegs < NV_MAX_FRAGS,
276 ("Too many DMA segments returned when mapping mbuf"));
277 info->numfrags = nsegs;
278 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t));
279 }
280
281 /* Probe for supported hardware ID's */
282 static int
283 nve_probe(device_t dev)
284 {
285 struct nve_type *t;
286
287 t = nve_devs;
288 /* Check for matching PCI DEVICE ID's */
289 while (t->name != NULL) {
290 if ((pci_get_vendor(dev) == t->vid_id) &&
291 (pci_get_device(dev) == t->dev_id)) {
292 device_set_desc(dev, t->name);
293 return (0);
294 }
295 t++;
296 }
297
298 return (ENXIO);
299 }
300
301 /* Attach driver and initialise hardware for use */
302 static int
303 nve_attach(device_t dev)
304 {
305 u_char eaddr[ETHER_ADDR_LEN];
306 struct nve_softc *sc;
307 struct ifnet *ifp;
308 OS_API *osapi;
309 ADAPTER_OPEN_PARAMS OpenParams;
310 int error = 0, i, rid;
311
312 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n");
313
314 sc = device_get_softc(dev);
315
316 /* Allocate mutex */
317 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
318 MTX_DEF);
319 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0);
320
321 sc->dev = dev;
322
323 /* Preinitialize data structures */
324 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS));
325
326 /* Enable bus mastering */
327 pci_enable_busmaster(dev);
328
329 /* Allocate memory mapped address space */
330 rid = NV_RID;
331 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1,
332 RF_ACTIVE);
333
334 if (sc->res == NULL) {
335 device_printf(dev, "couldn't map memory\n");
336 error = ENXIO;
337 goto fail;
338 }
339 sc->sc_st = rman_get_bustag(sc->res);
340 sc->sc_sh = rman_get_bushandle(sc->res);
341
342 /* Allocate interrupt */
343 rid = 0;
344 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
345 RF_SHAREABLE | RF_ACTIVE);
346
347 if (sc->irq == NULL) {
348 device_printf(dev, "couldn't map interrupt\n");
349 error = ENXIO;
350 goto fail;
351 }
352 /* Allocate DMA tags */
353 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
354 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS,
355 NV_MAX_FRAGS, MCLBYTES, 0,
356 busdma_lock_mutex, &Giant,
357 &sc->mtag);
358 if (error) {
359 device_printf(dev, "couldn't allocate dma tag\n");
360 goto fail;
361 }
362 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
363 BUS_SPACE_MAXADDR, NULL, NULL,
364 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1,
365 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0,
366 busdma_lock_mutex, &Giant,
367 &sc->rtag);
368 if (error) {
369 device_printf(dev, "couldn't allocate dma tag\n");
370 goto fail;
371 }
372 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
373 BUS_SPACE_MAXADDR, NULL, NULL,
374 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1,
375 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0,
376 busdma_lock_mutex, &Giant,
377 &sc->ttag);
378 if (error) {
379 device_printf(dev, "couldn't allocate dma tag\n");
380 goto fail;
381 }
382 /* Allocate DMA safe memory and get the DMA addresses. */
383 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
384 BUS_DMA_WAITOK, &sc->tmap);
385 if (error) {
386 device_printf(dev, "couldn't allocate dma memory\n");
387 goto fail;
388 }
389 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE);
390 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
391 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb,
392 &sc->tx_addr, 0);
393 if (error) {
394 device_printf(dev, "couldn't map dma memory\n");
395 goto fail;
396 }
397 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
398 BUS_DMA_WAITOK, &sc->rmap);
399 if (error) {
400 device_printf(dev, "couldn't allocate dma memory\n");
401 goto fail;
402 }
403 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE);
404 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
405 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb,
406 &sc->rx_addr, 0);
407 if (error) {
408 device_printf(dev, "couldn't map dma memory\n");
409 goto fail;
410 }
411 /* Initialize rings. */
412 if (nve_init_rings(sc)) {
413 device_printf(dev, "failed to init rings\n");
414 error = ENXIO;
415 goto fail;
416 }
417 /* Setup NVIDIA API callback routines */
418 osapi = &sc->osapi;
419 osapi->pOSCX = sc;
420 osapi->pfnAllocMemory = nve_osalloc;
421 osapi->pfnFreeMemory = nve_osfree;
422 osapi->pfnAllocMemoryEx = nve_osallocex;
423 osapi->pfnFreeMemoryEx = nve_osfreeex;
424 osapi->pfnClearMemory = nve_osclear;
425 osapi->pfnStallExecution = nve_osdelay;
426 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf;
427 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf;
428 osapi->pfnPacketWasSent = nve_ospackettx;
429 osapi->pfnPacketWasReceived = nve_ospacketrx;
430 osapi->pfnLinkStateHasChanged = nve_oslinkchg;
431 osapi->pfnAllocTimer = nve_osalloctimer;
432 osapi->pfnFreeTimer = nve_osfreetimer;
433 osapi->pfnInitializeTimer = nve_osinittimer;
434 osapi->pfnSetTimer = nve_ossettimer;
435 osapi->pfnCancelTimer = nve_oscanceltimer;
436 osapi->pfnPreprocessPacket = nve_ospreprocpkt;
437 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq;
438 osapi->pfnIndicatePackets = nve_osindicatepkt;
439 osapi->pfnLockAlloc = nve_oslockalloc;
440 osapi->pfnLockAcquire = nve_oslockacquire;
441 osapi->pfnLockRelease = nve_oslockrelease;
442 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt;
443
444 sc->linkup = FALSE;
445 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN;
446
447 /* TODO - We don't support hardware offload yet */
448 sc->hwmode = 1;
449 sc->media = 0;
450
451 /* Set NVIDIA API startup parameters */
452 OpenParams.MaxDpcLoop = 2;
453 OpenParams.MaxRxPkt = RX_RING_SIZE;
454 OpenParams.MaxTxPkt = TX_RING_SIZE;
455 OpenParams.SentPacketStatusSuccess = 1;
456 OpenParams.SentPacketStatusFailure = 0;
457 OpenParams.MaxRxPktToAccumulate = 6;
458 OpenParams.ulPollInterval = nve_pollinterval;
459 OpenParams.SetForcedModeEveryNthRxPacket = 0;
460 OpenParams.SetForcedModeEveryNthTxPacket = 0;
461 OpenParams.RxForcedInterrupt = 0;
462 OpenParams.TxForcedInterrupt = 0;
463 OpenParams.pOSApi = osapi;
464 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res);
465 OpenParams.bASFEnabled = 0;
466 OpenParams.ulDescriptorVersion = sc->hwmode;
467 OpenParams.ulMaxPacketSize = sc->max_frame_size;
468 OpenParams.DeviceId = pci_get_device(dev);
469
470 /* Open NVIDIA Hardware API */
471 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr);
472 if (error) {
473 device_printf(dev,
474 "failed to open NVIDIA Hardware API: 0x%x\n", error);
475 goto fail;
476 }
477
478 /* TODO - Add support for MODE2 hardware offload */
479
480 bzero(&sc->adapterdata, sizeof(sc->adapterdata));
481
482 sc->adapterdata.ulMediaIF = sc->media;
483 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1;
484 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata);
485
486 /* MAC is loaded backwards into h/w reg */
487 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr);
488 for (i = 0; i < 6; i++) {
489 eaddr[i] = sc->original_mac_addr[5 - i];
490 }
491 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr);
492
493 /* Display ethernet address ,... */
494 device_printf(dev, "Ethernet address %6D\n", eaddr, ":");
495
496 /* Allocate interface structures */
497 ifp = sc->ifp = if_alloc(IFT_ETHER);
498 if (ifp == NULL) {
499 device_printf(dev, "can not if_alloc()\n");
500 error = ENOSPC;
501 goto fail;
502 }
503
504 /* Probe device for MII interface to PHY */
505 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n");
506 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) {
507 device_printf(dev, "MII without any phy!\n");
508 error = ENXIO;
509 goto fail;
510 }
511
512 /* Setup interface parameters */
513 ifp->if_softc = sc;
514 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
515 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
516 ifp->if_ioctl = nve_ioctl;
517 ifp->if_output = ether_output;
518 ifp->if_start = nve_ifstart;
519 ifp->if_watchdog = nve_watchdog;
520 ifp->if_timer = 0;
521 ifp->if_init = nve_init;
522 ifp->if_mtu = ETHERMTU;
523 ifp->if_baudrate = IF_Mbps(100);
524 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
525 ifp->if_capabilities |= IFCAP_VLAN_MTU;
526
527 /* Attach to OS's managers. */
528 ether_ifattach(ifp, eaddr);
529
530 /* Activate our interrupt handler. - attach last to avoid lock */
531 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
532 nve_intr, sc, &sc->sc_ih);
533 if (error) {
534 device_printf(sc->dev, "couldn't set up interrupt handler\n");
535 goto fail;
536 }
537 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n");
538
539 fail:
540 if (error)
541 nve_detach(dev);
542
543 return (error);
544 }
545
546 /* Detach interface for module unload */
547 static int
548 nve_detach(device_t dev)
549 {
550 struct nve_softc *sc = device_get_softc(dev);
551 struct ifnet *ifp;
552
553 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized"));
554
555 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n");
556
557 ifp = sc->ifp;
558
559 if (device_is_attached(dev)) {
560 NVE_LOCK(sc);
561 nve_stop(sc);
562 NVE_UNLOCK(sc);
563 callout_drain(&sc->stat_callout);
564 ether_ifdetach(ifp);
565 }
566
567 if (sc->miibus)
568 device_delete_child(dev, sc->miibus);
569 bus_generic_detach(dev);
570
571 /* Reload unreversed address back into MAC in original state */
572 if (sc->original_mac_addr)
573 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX,
574 sc->original_mac_addr);
575
576 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n");
577 /* Detach from NVIDIA hardware API */
578 if (sc->hwapi->pfnClose)
579 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE);
580 /* Release resources */
581 if (sc->sc_ih)
582 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih);
583 if (sc->irq)
584 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
585 if (sc->res)
586 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res);
587
588 nve_free_rings(sc);
589
590 if (sc->tx_desc) {
591 bus_dmamap_unload(sc->rtag, sc->rmap);
592 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
593 bus_dmamap_destroy(sc->rtag, sc->rmap);
594 }
595 if (sc->mtag)
596 bus_dma_tag_destroy(sc->mtag);
597 if (sc->ttag)
598 bus_dma_tag_destroy(sc->ttag);
599 if (sc->rtag)
600 bus_dma_tag_destroy(sc->rtag);
601
602 if (ifp)
603 if_free(ifp);
604 mtx_destroy(&sc->mtx);
605
606 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n");
607
608 return (0);
609 }
610
611 /* Initialise interface and start it "RUNNING" */
612 static void
613 nve_init(void *xsc)
614 {
615 struct nve_softc *sc = xsc;
616
617 NVE_LOCK(sc);
618 nve_init_locked(sc);
619 NVE_UNLOCK(sc);
620 }
621
622 static void
623 nve_init_locked(struct nve_softc *sc)
624 {
625 struct ifnet *ifp;
626 int error;
627
628 NVE_LOCK_ASSERT(sc);
629 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup);
630
631 ifp = sc->ifp;
632
633 /* Do nothing if already running */
634 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
635 return;
636
637 nve_stop(sc);
638 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n");
639
640 nve_ifmedia_upd_locked(ifp);
641
642 /* Setup Hardware interface and allocate memory structures */
643 error = sc->hwapi->pfnInit(sc->hwapi->pADCX,
644 0, /* force speed */
645 0, /* force full duplex */
646 0, /* force mode */
647 0, /* force async mode */
648 &sc->linkup);
649
650 if (error) {
651 device_printf(sc->dev,
652 "failed to start NVIDIA Hardware interface\n");
653 return;
654 }
655 /* Set the MAC address */
656 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IFP2ENADDR(sc->ifp));
657 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
658 sc->hwapi->pfnStart(sc->hwapi->pADCX);
659
660 /* Setup multicast filter */
661 nve_setmulti(sc);
662
663 /* Update interface parameters */
664 ifp->if_drv_flags |= IFF_DRV_RUNNING;
665 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
666
667 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
668
669 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n");
670
671 return;
672 }
673
674 /* Stop interface activity ie. not "RUNNING" */
675 static void
676 nve_stop(struct nve_softc *sc)
677 {
678 struct ifnet *ifp;
679
680 NVE_LOCK_ASSERT(sc);
681
682 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n");
683
684 ifp = sc->ifp;
685 ifp->if_timer = 0;
686
687 /* Cancel tick timer */
688 callout_stop(&sc->stat_callout);
689
690 /* Stop hardware activity */
691 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
692 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0);
693
694 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n");
695 /* Shutdown interface and deallocate memory buffers */
696 if (sc->hwapi->pfnDeinit)
697 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0);
698
699 sc->linkup = 0;
700 sc->cur_rx = 0;
701 sc->pending_rxs = 0;
702 sc->pending_txs = 0;
703
704 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
705
706 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n");
707
708 return;
709 }
710
711 /* Shutdown interface for unload/reboot */
712 static void
713 nve_shutdown(device_t dev)
714 {
715 struct nve_softc *sc;
716
717 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n");
718
719 sc = device_get_softc(dev);
720
721 /* Stop hardware activity */
722 NVE_LOCK(sc);
723 nve_stop(sc);
724 NVE_UNLOCK(sc);
725 }
726
727 /* Allocate TX ring buffers */
728 static int
729 nve_init_rings(struct nve_softc *sc)
730 {
731 int error, i;
732
733 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n");
734
735 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0;
736 /* Initialise RX ring */
737 for (i = 0; i < RX_RING_SIZE; i++) {
738 struct nve_rx_desc *desc = sc->rx_desc + i;
739 struct nve_map_buffer *buf = &desc->buf;
740
741 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
742 if (buf->mbuf == NULL) {
743 device_printf(sc->dev, "couldn't allocate mbuf\n");
744 nve_free_rings(sc);
745 return (ENOBUFS);
746 }
747 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
748 m_adj(buf->mbuf, ETHER_ALIGN);
749
750 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
751 if (error) {
752 device_printf(sc->dev, "couldn't create dma map\n");
753 nve_free_rings(sc);
754 return (error);
755 }
756 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
757 nve_dmamap_rx_cb, &desc->paddr, 0);
758 if (error) {
759 device_printf(sc->dev, "couldn't dma map mbuf\n");
760 nve_free_rings(sc);
761 return (error);
762 }
763 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
764
765 desc->buflength = buf->mbuf->m_len;
766 desc->vaddr = mtod(buf->mbuf, caddr_t);
767 }
768 bus_dmamap_sync(sc->rtag, sc->rmap,
769 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
770
771 /* Initialize TX ring */
772 for (i = 0; i < TX_RING_SIZE; i++) {
773 struct nve_tx_desc *desc = sc->tx_desc + i;
774 struct nve_map_buffer *buf = &desc->buf;
775
776 buf->mbuf = NULL;
777
778 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
779 if (error) {
780 device_printf(sc->dev, "couldn't create dma map\n");
781 nve_free_rings(sc);
782 return (error);
783 }
784 }
785 bus_dmamap_sync(sc->ttag, sc->tmap,
786 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
787
788 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n");
789
790 return (error);
791 }
792
793 /* Free the TX ring buffers */
794 static void
795 nve_free_rings(struct nve_softc *sc)
796 {
797 int i;
798
799 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n");
800
801 for (i = 0; i < RX_RING_SIZE; i++) {
802 struct nve_rx_desc *desc = sc->rx_desc + i;
803 struct nve_map_buffer *buf = &desc->buf;
804
805 if (buf->mbuf) {
806 bus_dmamap_unload(sc->mtag, buf->map);
807 bus_dmamap_destroy(sc->mtag, buf->map);
808 m_freem(buf->mbuf);
809 }
810 buf->mbuf = NULL;
811 }
812
813 for (i = 0; i < TX_RING_SIZE; i++) {
814 struct nve_tx_desc *desc = sc->tx_desc + i;
815 struct nve_map_buffer *buf = &desc->buf;
816
817 if (buf->mbuf) {
818 bus_dmamap_unload(sc->mtag, buf->map);
819 bus_dmamap_destroy(sc->mtag, buf->map);
820 m_freem(buf->mbuf);
821 }
822 buf->mbuf = NULL;
823 }
824
825 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n");
826 }
827
828 /* Main loop for sending packets from OS to interface */
829 static void
830 nve_ifstart(struct ifnet *ifp)
831 {
832 struct nve_softc *sc = ifp->if_softc;
833
834 NVE_LOCK(sc);
835 nve_ifstart_locked(ifp);
836 NVE_UNLOCK(sc);
837 }
838
839 static void
840 nve_ifstart_locked(struct ifnet *ifp)
841 {
842 struct nve_softc *sc = ifp->if_softc;
843 struct nve_map_buffer *buf;
844 struct mbuf *m0, *m;
845 struct nve_tx_desc *desc;
846 ADAPTER_WRITE_DATA txdata;
847 int error, i;
848
849 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n");
850
851 NVE_LOCK_ASSERT(sc);
852
853 /* If link is down/busy or queue is empty do nothing */
854 if (ifp->if_drv_flags & IFF_DRV_OACTIVE ||
855 IFQ_DRV_IS_EMPTY(&ifp->if_snd))
856 return;
857
858 /* Transmit queued packets until sent or TX ring is full */
859 while (sc->pending_txs < TX_RING_SIZE) {
860 desc = sc->tx_desc + sc->cur_tx;
861 buf = &desc->buf;
862
863 /* Get next packet to send. */
864 IF_DEQUEUE(&ifp->if_snd, m0);
865
866 /* If nothing to send, return. */
867 if (m0 == NULL)
868 return;
869
870 /* Map MBUF for DMA access */
871 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
872 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
873
874 if (error && error != EFBIG) {
875 m_freem(m0);
876 sc->tx_errors++;
877 continue;
878 }
879 /*
880 * Packet has too many fragments - defrag into new mbuf
881 * cluster
882 */
883 if (error) {
884 m = m_defrag(m0, M_DONTWAIT);
885 if (m == NULL) {
886 m_freem(m0);
887 sc->tx_errors++;
888 continue;
889 }
890 m0 = m;
891
892 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
893 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
894 if (error) {
895 m_freem(m);
896 sc->tx_errors++;
897 continue;
898 }
899 }
900 /* Do sync on DMA bounce buffer */
901 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
902
903 buf->mbuf = m0;
904 txdata.ulNumberOfElements = desc->numfrags;
905 txdata.pvID = (PVOID)desc;
906
907 /* Put fragments into API element list */
908 txdata.ulTotalLength = buf->mbuf->m_len;
909 for (i = 0; i < desc->numfrags; i++) {
910 txdata.sElement[i].ulLength =
911 (ulong)desc->frags[i].ds_len;
912 txdata.sElement[i].pPhysical =
913 (PVOID)desc->frags[i].ds_addr;
914 }
915
916 /* Send packet to Nvidia API for transmission */
917 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata);
918
919 switch (error) {
920 case ADAPTERERR_NONE:
921 /* Packet was queued in API TX queue successfully */
922 sc->pending_txs++;
923 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE;
924 break;
925
926 case ADAPTERERR_TRANSMIT_QUEUE_FULL:
927 /* The API TX queue is full - requeue the packet */
928 device_printf(sc->dev,
929 "nve_ifstart: transmit queue is full\n");
930 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
931 bus_dmamap_unload(sc->mtag, buf->map);
932 IF_PREPEND(&ifp->if_snd, buf->mbuf);
933 buf->mbuf = NULL;
934 return;
935
936 default:
937 /* The API failed to queue/send the packet so dump it */
938 device_printf(sc->dev, "nve_ifstart: transmit error\n");
939 bus_dmamap_unload(sc->mtag, buf->map);
940 m_freem(buf->mbuf);
941 buf->mbuf = NULL;
942 sc->tx_errors++;
943 return;
944 }
945 /* Set watchdog timer. */
946 ifp->if_timer = 8;
947
948 /* Copy packet to BPF tap */
949 BPF_MTAP(ifp, m0);
950 }
951 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
952
953 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n");
954 }
955
956 /* Handle IOCTL events */
957 static int
958 nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
959 {
960 struct nve_softc *sc = ifp->if_softc;
961 struct ifreq *ifr = (struct ifreq *) data;
962 struct mii_data *mii;
963 int error = 0;
964
965 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n");
966
967 switch (command) {
968 case SIOCSIFMTU:
969 /* Set MTU size */
970 NVE_LOCK(sc);
971 if (ifp->if_mtu == ifr->ifr_mtu) {
972 NVE_UNLOCK(sc);
973 break;
974 }
975 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) {
976 ifp->if_mtu = ifr->ifr_mtu;
977 nve_stop(sc);
978 nve_init_locked(sc);
979 } else
980 error = EINVAL;
981 NVE_UNLOCK(sc);
982 break;
983
984 case SIOCSIFFLAGS:
985 /* Setup interface flags */
986 NVE_LOCK(sc);
987 if (ifp->if_flags & IFF_UP) {
988 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
989 nve_init_locked(sc);
990 NVE_UNLOCK(sc);
991 break;
992 }
993 } else {
994 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
995 nve_stop(sc);
996 NVE_UNLOCK(sc);
997 break;
998 }
999 }
1000 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
1001 nve_setmulti(sc);
1002 NVE_UNLOCK(sc);
1003 break;
1004
1005 case SIOCADDMULTI:
1006 case SIOCDELMULTI:
1007 /* Setup multicast filter */
1008 NVE_LOCK(sc);
1009 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1010 nve_setmulti(sc);
1011 }
1012 NVE_UNLOCK(sc);
1013 break;
1014
1015 case SIOCGIFMEDIA:
1016 case SIOCSIFMEDIA:
1017 /* Get/Set interface media parameters */
1018 mii = device_get_softc(sc->miibus);
1019 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1020 break;
1021
1022 default:
1023 /* Everything else we forward to generic ether ioctl */
1024 error = ether_ioctl(ifp, (int)command, data);
1025 break;
1026 }
1027
1028 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n");
1029
1030 return (error);
1031 }
1032
1033 /* Interrupt service routine */
1034 static void
1035 nve_intr(void *arg)
1036 {
1037 struct nve_softc *sc = arg;
1038 struct ifnet *ifp = sc->ifp;
1039
1040 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n");
1041
1042 NVE_LOCK(sc);
1043 if (!ifp->if_flags & IFF_UP) {
1044 nve_stop(sc);
1045 NVE_UNLOCK(sc);
1046 return;
1047 }
1048 /* Handle interrupt event */
1049 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) {
1050 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1051 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1052 }
1053 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1054 nve_ifstart_locked(ifp);
1055
1056 /* If no pending packets we don't need a timeout */
1057 if (sc->pending_txs == 0)
1058 sc->ifp->if_timer = 0;
1059 NVE_UNLOCK(sc);
1060
1061 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n");
1062
1063 return;
1064 }
1065
1066 /* Setup multicast filters */
1067 static void
1068 nve_setmulti(struct nve_softc *sc)
1069 {
1070 struct ifnet *ifp;
1071 struct ifmultiaddr *ifma;
1072 PACKET_FILTER hwfilter;
1073 int i;
1074 u_int8_t andaddr[6], oraddr[6];
1075
1076 NVE_LOCK_ASSERT(sc);
1077
1078 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n");
1079
1080 ifp = sc->ifp;
1081
1082 /* Initialize filter */
1083 hwfilter.ulFilterFlags = 0;
1084 for (i = 0; i < 6; i++) {
1085 hwfilter.acMulticastAddress[i] = 0;
1086 hwfilter.acMulticastMask[i] = 0;
1087 }
1088
1089 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1090 /* Accept all packets */
1091 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS;
1092 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1093 return;
1094 }
1095 /* Setup multicast filter */
1096 IF_ADDR_LOCK(ifp);
1097 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1098 u_char *addrp;
1099
1100 if (ifma->ifma_addr->sa_family != AF_LINK)
1101 continue;
1102
1103 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1104 for (i = 0; i < 6; i++) {
1105 u_int8_t mcaddr = addrp[i];
1106 andaddr[i] &= mcaddr;
1107 oraddr[i] |= mcaddr;
1108 }
1109 }
1110 IF_ADDR_UNLOCK(ifp);
1111 for (i = 0; i < 6; i++) {
1112 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i];
1113 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]);
1114 }
1115
1116 /* Send filter to NVIDIA API */
1117 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1118
1119 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n");
1120
1121 return;
1122 }
1123
1124 /* Change the current media/mediaopts */
1125 static int
1126 nve_ifmedia_upd(struct ifnet *ifp)
1127 {
1128 struct nve_softc *sc = ifp->if_softc;
1129
1130 NVE_LOCK(sc);
1131 nve_ifmedia_upd_locked(ifp);
1132 NVE_UNLOCK(sc);
1133 return (0);
1134 }
1135
1136 static void
1137 nve_ifmedia_upd_locked(struct ifnet *ifp)
1138 {
1139 struct nve_softc *sc = ifp->if_softc;
1140 struct mii_data *mii;
1141
1142 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n");
1143
1144 NVE_LOCK_ASSERT(sc);
1145 mii = device_get_softc(sc->miibus);
1146
1147 if (mii->mii_instance) {
1148 struct mii_softc *miisc;
1149 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1150 miisc = LIST_NEXT(miisc, mii_list)) {
1151 mii_phy_reset(miisc);
1152 }
1153 }
1154 mii_mediachg(mii);
1155 }
1156
1157 /* Update current miibus PHY status of media */
1158 static void
1159 nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1160 {
1161 struct nve_softc *sc;
1162 struct mii_data *mii;
1163
1164 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n");
1165
1166 sc = ifp->if_softc;
1167 NVE_LOCK(sc);
1168 mii = device_get_softc(sc->miibus);
1169 mii_pollstat(mii);
1170 NVE_UNLOCK(sc);
1171
1172 ifmr->ifm_active = mii->mii_media_active;
1173 ifmr->ifm_status = mii->mii_media_status;
1174
1175 return;
1176 }
1177
1178 /* miibus tick timer - maintain link status */
1179 static void
1180 nve_tick(void *xsc)
1181 {
1182 struct nve_softc *sc = xsc;
1183 struct mii_data *mii;
1184 struct ifnet *ifp;
1185
1186 NVE_LOCK_ASSERT(sc);
1187
1188 ifp = sc->ifp;
1189 nve_update_stats(sc);
1190
1191 mii = device_get_softc(sc->miibus);
1192 mii_tick(mii);
1193
1194 if (mii->mii_media_status & IFM_ACTIVE &&
1195 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1196 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1197 nve_ifstart_locked(ifp);
1198 }
1199 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
1200
1201 return;
1202 }
1203
1204 /* Update ifnet data structure with collected interface stats from API */
1205 static void
1206 nve_update_stats(struct nve_softc *sc)
1207 {
1208 struct ifnet *ifp = sc->ifp;
1209 ADAPTER_STATS stats;
1210
1211 NVE_LOCK_ASSERT(sc);
1212
1213 if (sc->hwapi) {
1214 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats);
1215
1216 ifp->if_ipackets = stats.ulSuccessfulReceptions;
1217 ifp->if_ierrors = stats.ulMissedFrames +
1218 stats.ulFailedReceptions +
1219 stats.ulCRCErrors +
1220 stats.ulFramingErrors +
1221 stats.ulOverFlowErrors;
1222
1223 ifp->if_opackets = stats.ulSuccessfulTransmissions;
1224 ifp->if_oerrors = sc->tx_errors +
1225 stats.ulFailedTransmissions +
1226 stats.ulRetryErrors +
1227 stats.ulUnderflowErrors +
1228 stats.ulLossOfCarrierErrors +
1229 stats.ulLateCollisionErrors;
1230
1231 ifp->if_collisions = stats.ulLateCollisionErrors;
1232 }
1233
1234 return;
1235 }
1236
1237 /* miibus Read PHY register wrapper - calls Nvidia API entry point */
1238 static int
1239 nve_miibus_readreg(device_t dev, int phy, int reg)
1240 {
1241 struct nve_softc *sc = device_get_softc(dev);
1242 ULONG data;
1243
1244 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n");
1245
1246 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data);
1247
1248 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n");
1249
1250 return (data);
1251 }
1252
1253 /* miibus Write PHY register wrapper - calls Nvidia API entry point */
1254 static void
1255 nve_miibus_writereg(device_t dev, int phy, int reg, int data)
1256 {
1257 struct nve_softc *sc = device_get_softc(dev);
1258
1259 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n");
1260
1261 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data);
1262
1263 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n");
1264
1265 return;
1266 }
1267
1268 /* Watchdog timer to prevent PHY lockups */
1269 static void
1270 nve_watchdog(struct ifnet *ifp)
1271 {
1272 struct nve_softc *sc = ifp->if_softc;
1273
1274 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs);
1275
1276 NVE_LOCK(sc);
1277 sc->tx_errors++;
1278
1279 nve_stop(sc);
1280 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1281 nve_init_locked(sc);
1282
1283 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284 nve_ifstart_locked(ifp);
1285 NVE_UNLOCK(sc);
1286
1287 return;
1288 }
1289
1290 /* --- Start of NVOSAPI interface --- */
1291
1292 /* Allocate DMA enabled general use memory for API */
1293 static NV_SINT32
1294 nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
1295 {
1296 struct nve_softc *sc;
1297 bus_addr_t mem_physical;
1298
1299 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength);
1300
1301 sc = (struct nve_softc *)ctx;
1302
1303 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF,
1304 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
1305
1306 if (!mem->pLogical) {
1307 device_printf(sc->dev, "memory allocation failed\n");
1308 return (0);
1309 }
1310 memset(mem->pLogical, 0, (ulong)mem->uiLength);
1311 mem_physical = vtophys(mem->pLogical);
1312 mem->pPhysical = (PVOID)mem_physical;
1313
1314 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n",
1315 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength);
1316
1317 return (1);
1318 }
1319
1320 /* Free allocated memory */
1321 static NV_SINT32
1322 nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
1323 {
1324 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
1325 (uint)mem->pLogical, (uint) mem->uiLength);
1326
1327 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF);
1328 return (1);
1329 }
1330
1331 /* Copied directly from nvnet.c */
1332 static NV_SINT32
1333 nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1334 {
1335 MEMORY_BLOCK mem_block;
1336
1337 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n");
1338
1339 mem_block_ex->pLogical = NULL;
1340 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength;
1341
1342 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) &&
1343 (mem_block_ex->AlignmentSize > 1)) {
1344 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n",
1345 mem_block_ex->AlignmentSize);
1346 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize;
1347 }
1348 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1349
1350 if (nve_osalloc(ctx, &mem_block) == 0) {
1351 return (0);
1352 }
1353 mem_block_ex->pLogicalOrig = mem_block.pLogical;
1354 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical;
1355 mem_block_ex->pPhysicalOrigHigh = 0;
1356
1357 mem_block_ex->pPhysical = mem_block.pPhysical;
1358 mem_block_ex->pLogical = mem_block.pLogical;
1359
1360 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) {
1361 unsigned int offset;
1362 offset = mem_block_ex->pPhysicalOrigLow &
1363 (mem_block_ex->AlignmentSize - 1);
1364
1365 if (offset) {
1366 mem_block_ex->pPhysical =
1367 (PVOID)((ulong)mem_block_ex->pPhysical +
1368 mem_block_ex->AlignmentSize - offset);
1369 mem_block_ex->pLogical =
1370 (PVOID)((ulong)mem_block_ex->pLogical +
1371 mem_block_ex->AlignmentSize - offset);
1372 } /* if (offset) */
1373 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */
1374 return (1);
1375 }
1376
1377 /* Copied directly from nvnet.c */
1378 static NV_SINT32
1379 nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1380 {
1381 MEMORY_BLOCK mem_block;
1382
1383 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n");
1384
1385 mem_block.pLogical = mem_block_ex->pLogicalOrig;
1386 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow);
1387 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1388
1389 return (nve_osfree(ctx, &mem_block));
1390 }
1391
1392 /* Clear memory region */
1393 static NV_SINT32
1394 nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
1395 {
1396 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
1397 memset(mem, 0, length);
1398 return (1);
1399 }
1400
1401 /* Sleep for a tick */
1402 static NV_SINT32
1403 nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
1404 {
1405 DELAY(usec);
1406 return (1);
1407 }
1408
1409 /* Allocate memory for rx buffer */
1410 static NV_SINT32
1411 nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
1412 {
1413 struct nve_softc *sc = ctx;
1414 struct nve_rx_desc *desc;
1415 struct nve_map_buffer *buf;
1416 int error;
1417
1418 if (device_is_attached(sc->dev))
1419 NVE_LOCK_ASSERT(sc);
1420
1421 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n");
1422
1423 if (sc->pending_rxs == RX_RING_SIZE) {
1424 device_printf(sc->dev, "rx ring buffer is full\n");
1425 goto fail;
1426 }
1427 desc = sc->rx_desc + sc->cur_rx;
1428 buf = &desc->buf;
1429
1430 if (buf->mbuf == NULL) {
1431 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1432 if (buf->mbuf == NULL) {
1433 device_printf(sc->dev, "failed to allocate memory\n");
1434 goto fail;
1435 }
1436 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1437 m_adj(buf->mbuf, ETHER_ALIGN);
1438
1439 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1440 nve_dmamap_rx_cb, &desc->paddr, 0);
1441 if (error) {
1442 device_printf(sc->dev, "failed to dmamap mbuf\n");
1443 m_freem(buf->mbuf);
1444 buf->mbuf = NULL;
1445 goto fail;
1446 }
1447 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1448 desc->buflength = buf->mbuf->m_len;
1449 desc->vaddr = mtod(buf->mbuf, caddr_t);
1450 }
1451 sc->pending_rxs++;
1452 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE;
1453
1454 mem->pLogical = (void *)desc->vaddr;
1455 mem->pPhysical = (void *)desc->paddr;
1456 mem->uiLength = desc->buflength;
1457 *id = (void *)desc;
1458
1459 return (1);
1460
1461 fail:
1462 return (0);
1463 }
1464
1465 /* Free the rx buffer */
1466 static NV_SINT32
1467 nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
1468 {
1469 struct nve_softc *sc = ctx;
1470 struct nve_rx_desc *desc;
1471 struct nve_map_buffer *buf;
1472
1473 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n");
1474
1475 desc = (struct nve_rx_desc *) id;
1476 buf = &desc->buf;
1477
1478 if (buf->mbuf) {
1479 bus_dmamap_unload(sc->mtag, buf->map);
1480 bus_dmamap_destroy(sc->mtag, buf->map);
1481 m_freem(buf->mbuf);
1482 }
1483 sc->pending_rxs--;
1484 buf->mbuf = NULL;
1485
1486 return (1);
1487 }
1488
1489 /* This gets called by the Nvidia API after our TX packet has been sent */
1490 static NV_SINT32
1491 nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
1492 {
1493 struct nve_softc *sc = ctx;
1494 struct nve_map_buffer *buf;
1495 struct nve_tx_desc *desc = (struct nve_tx_desc *) id;
1496 struct ifnet *ifp;
1497
1498 NVE_LOCK_ASSERT(sc);
1499
1500 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n");
1501
1502 ifp = sc->ifp;
1503 buf = &desc->buf;
1504 sc->pending_txs--;
1505
1506 /* Unload and free mbuf cluster */
1507 if (buf->mbuf == NULL)
1508 goto fail;
1509
1510 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
1511 bus_dmamap_unload(sc->mtag, buf->map);
1512 m_freem(buf->mbuf);
1513 buf->mbuf = NULL;
1514
1515 /* Send more packets if we have them */
1516 if (sc->pending_txs < TX_RING_SIZE)
1517 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1518
1519 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE)
1520 nve_ifstart_locked(ifp);
1521
1522 fail:
1523
1524 return (1);
1525 }
1526
1527 /* This gets called by the Nvidia API when a new packet has been received */
1528 /* XXX What is newbuf used for? XXX */
1529 static NV_SINT32
1530 nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
1531 NV_UINT8 priority)
1532 {
1533 struct nve_softc *sc = ctx;
1534 struct ifnet *ifp;
1535 struct nve_rx_desc *desc;
1536 struct nve_map_buffer *buf;
1537 ADAPTER_READ_DATA *readdata;
1538 struct mbuf *m;
1539
1540 NVE_LOCK_ASSERT(sc);
1541
1542 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n");
1543
1544 ifp = sc->ifp;
1545
1546 readdata = (ADAPTER_READ_DATA *) data;
1547 desc = readdata->pvID;
1548 buf = &desc->buf;
1549 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1550
1551 if (success) {
1552 /* Sync DMA bounce buffer. */
1553 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1554
1555 /* First mbuf in packet holds the ethernet and packet headers */
1556 buf->mbuf->m_pkthdr.rcvif = ifp;
1557 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len =
1558 readdata->ulTotalLength;
1559
1560 bus_dmamap_unload(sc->mtag, buf->map);
1561
1562 /* Blat the mbuf pointer, kernel will free the mbuf cluster */
1563 m = buf->mbuf;
1564 buf->mbuf = NULL;
1565
1566 /* Give mbuf to OS. */
1567 NVE_UNLOCK(sc);
1568 (*ifp->if_input)(ifp, m);
1569 NVE_LOCK(sc);
1570 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH)
1571 ifp->if_imcasts++;
1572
1573 } else {
1574 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1575 bus_dmamap_unload(sc->mtag, buf->map);
1576 m_freem(buf->mbuf);
1577 buf->mbuf = NULL;
1578 }
1579
1580 sc->cur_rx = desc - sc->rx_desc;
1581 sc->pending_rxs--;
1582
1583 return (1);
1584 }
1585
1586 /* This gets called by NVIDIA API when the PHY link state changes */
1587 static NV_SINT32
1588 nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
1589 {
1590
1591 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n");
1592
1593 return (1);
1594 }
1595
1596 /* Setup a watchdog timer */
1597 static NV_SINT32
1598 nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
1599 {
1600 struct nve_softc *sc = (struct nve_softc *)ctx;
1601
1602 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n");
1603
1604 callout_init(&sc->ostimer, CALLOUT_MPSAFE);
1605 *timer = &sc->ostimer;
1606
1607 return (1);
1608 }
1609
1610 /* Free the timer */
1611 static NV_SINT32
1612 nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
1613 {
1614
1615 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n");
1616
1617 callout_drain((struct callout *)timer);
1618
1619 return (1);
1620 }
1621
1622 /* Setup timer parameters */
1623 static NV_SINT32
1624 nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
1625 {
1626 struct nve_softc *sc = (struct nve_softc *)ctx;
1627
1628 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n");
1629
1630 sc->ostimer_func = func;
1631 sc->ostimer_params = parameters;
1632
1633 return (1);
1634 }
1635
1636 /* Set the timer to go off */
1637 static NV_SINT32
1638 nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
1639 {
1640 struct nve_softc *sc = ctx;
1641
1642 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n");
1643
1644 callout_reset((struct callout *)timer, delay, sc->ostimer_func,
1645 sc->ostimer_params);
1646
1647 return (1);
1648 }
1649
1650 /* Cancel the timer */
1651 static NV_SINT32
1652 nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
1653 {
1654
1655 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n");
1656
1657 callout_stop((struct callout *)timer);
1658
1659 return (1);
1660 }
1661
1662 static NV_SINT32
1663 nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
1664 NV_UINT8 *newbuffer, NV_UINT8 priority)
1665 {
1666
1667 /* Not implemented */
1668 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1669
1670 return (1);
1671 }
1672
1673 static PNV_VOID
1674 nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
1675 {
1676
1677 /* Not implemented */
1678 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1679
1680 return (NULL);
1681 }
1682
1683 static NV_SINT32
1684 nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
1685 {
1686
1687 /* Not implemented */
1688 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n");
1689
1690 return (1);
1691 }
1692
1693 /* Allocate mutex context (already done in nve_attach) */
1694 static NV_SINT32
1695 nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
1696 {
1697 struct nve_softc *sc = (struct nve_softc *)ctx;
1698
1699 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n");
1700
1701 *pLock = (void **)sc;
1702
1703 return (1);
1704 }
1705
1706 /* Obtain a spin lock */
1707 static NV_SINT32
1708 nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1709 {
1710
1711 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n");
1712
1713 return (1);
1714 }
1715
1716 /* Release lock */
1717 static NV_SINT32
1718 nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1719 {
1720
1721 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n");
1722
1723 return (1);
1724 }
1725
1726 /* I have no idea what this is for */
1727 static PNV_VOID
1728 nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
1729 {
1730
1731 /* Not implemented */
1732 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n");
1733 panic("nve: nve_osreturnbufvirtual not implemented\n");
1734
1735 return (NULL);
1736 }
1737
1738 /* --- End on NVOSAPI interface --- */
Cache object: 472fdbf0d53df88aa0fdaf4025f5ec66
|