FreeBSD/Linux Kernel Cross Reference
sys/dev/nve/if_nve.c
1 /*-
2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>.
3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $
28 */
29 /*
30 * NVIDIA nForce MCP Networking Adapter driver
31 *
32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA
33 * through their web site.
34 *
35 * All mainstream nForce and nForce2 motherboards are supported. This module
36 * is as stable, sometimes more stable, than the linux version. (Recent
37 * Linux stability issues seem to be related to some issues with newer
38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD
39 * 5.x).
40 *
41 * In accordance with the NVIDIA distribution license it is necessary to
42 * link this module against the nvlibnet.o binary object included in the
43 * Linux driver source distribution. The binary component is not modified in
44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c
45 * linux kernel module "wrapper".
46 *
47 * The Linux driver uses a common code API that is shared between Win32 and
48 * i386 Linux. This abstracts the low level driver functions and uses
49 * callbacks and hooks to access the underlying hardware device. By using
50 * this same API in a FreeBSD kernel module it is possible to support the
51 * hardware without breaching the Linux source distributions licensing
52 * requirements, or obtaining the hardware programming specifications.
53 *
54 * Although not conventional, it works, and given the relatively small
55 * amount of hardware centric code, it's hopefully no more buggy than its
56 * linux counterpart.
57 *
58 * NVIDIA now support the nForce3 AMD64 platform, however I have been
59 * unable to access such a system to verify support. However, the code is
60 * reported to work with little modification when compiled with the AMD64
61 * version of the NVIDIA Linux library. All that should be necessary to make
62 * the driver work is to link it directly into the kernel, instead of as a
63 * module, and apply the docs/amd64.diff patch in this source distribution to
64 * the NVIDIA Linux driver source.
65 *
66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well
67 * as recent versions of DragonFly.
68 *
69 * Written by Quinton Dolan <q@onthenet.com.au>
70 * Portions based on existing FreeBSD network drivers.
71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files.
72 */
73
74 #include <sys/cdefs.h>
75 __FBSDID("$FreeBSD: releng/8.3/sys/dev/nve/if_nve.c 230714 2012-01-29 01:22:48Z marius $");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/sockio.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/sysctl.h>
85 #include <sys/queue.h>
86 #include <sys/module.h>
87
88 #include <net/if.h>
89 #include <net/if_arp.h>
90 #include <net/ethernet.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/bpf.h>
95 #include <net/if_vlan_var.h>
96
97 #include <machine/bus.h>
98 #include <machine/resource.h>
99
100 #include <vm/vm.h> /* for vtophys */
101 #include <vm/pmap.h> /* for vtophys */
102 #include <sys/bus.h>
103 #include <sys/rman.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/mii/mii.h>
108 #include <dev/mii/miivar.h>
109 #include "miibus_if.h"
110
111 /* Include NVIDIA Linux driver header files */
112 #include <contrib/dev/nve/nvenet_version.h>
113 #define linux
114 #include <contrib/dev/nve/basetype.h>
115 #include <contrib/dev/nve/phy.h>
116 #include "os+%DIKED-nve.h"
117 #include <contrib/dev/nve/drvinfo.h>
118 #include <contrib/dev/nve/adapter.h>
119 #undef linux
120
121 #include <dev/nve/if_nvereg.h>
122
123 MODULE_DEPEND(nve, pci, 1, 1, 1);
124 MODULE_DEPEND(nve, ether, 1, 1, 1);
125 MODULE_DEPEND(nve, miibus, 1, 1, 1);
126
127 static int nve_probe(device_t);
128 static int nve_attach(device_t);
129 static int nve_detach(device_t);
130 static void nve_init(void *);
131 static void nve_init_locked(struct nve_softc *);
132 static void nve_stop(struct nve_softc *);
133 static int nve_shutdown(device_t);
134 static int nve_init_rings(struct nve_softc *);
135 static void nve_free_rings(struct nve_softc *);
136
137 static void nve_ifstart(struct ifnet *);
138 static void nve_ifstart_locked(struct ifnet *);
139 static int nve_ioctl(struct ifnet *, u_long, caddr_t);
140 static void nve_intr(void *);
141 static void nve_tick(void *);
142 static void nve_setmulti(struct nve_softc *);
143 static void nve_watchdog(struct nve_softc *);
144 static void nve_update_stats(struct nve_softc *);
145
146 static int nve_ifmedia_upd(struct ifnet *);
147 static void nve_ifmedia_upd_locked(struct ifnet *);
148 static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 static int nve_miibus_readreg(device_t, int, int);
150 static int nve_miibus_writereg(device_t, int, int, int);
151
152 static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
153 static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
154
155 static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
156 static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
157 static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
158 static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
159 static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
160 static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
161 static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
162 static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
163 static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
164 static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
165 static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
166 static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
167 static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
168 static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
169 static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
170 static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
171
172 static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
173 static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
174 static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
175 static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
176 static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
177 static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
178 static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
179
180 static device_method_t nve_methods[] = {
181 /* Device interface */
182 DEVMETHOD(device_probe, nve_probe),
183 DEVMETHOD(device_attach, nve_attach),
184 DEVMETHOD(device_detach, nve_detach),
185 DEVMETHOD(device_shutdown, nve_shutdown),
186
187 /* MII interface */
188 DEVMETHOD(miibus_readreg, nve_miibus_readreg),
189 DEVMETHOD(miibus_writereg, nve_miibus_writereg),
190
191 DEVMETHOD_END
192 };
193
194 static driver_t nve_driver = {
195 "nve",
196 nve_methods,
197 sizeof(struct nve_softc)
198 };
199
200 static devclass_t nve_devclass;
201
202 static int nve_pollinterval = 0;
203 SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW,
204 &nve_pollinterval, 0, "delay between interface polls");
205
206 DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0);
207 DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0);
208
209 static struct nve_type nve_devs[] = {
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
211 "NVIDIA nForce MCP Networking Adapter"},
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
213 "NVIDIA nForce2 MCP2 Networking Adapter"},
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
215 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
217 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
219 "NVIDIA nForce3 MCP3 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
221 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
223 "NVIDIA nForce3 MCP7 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
225 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
227 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
229 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
231 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
233 "NVIDIA nForce 430 MCP12 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
235 "NVIDIA nForce 430 MCP13 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
237 "NVIDIA nForce MCP55 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
239 "NVIDIA nForce MCP55 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
241 "NVIDIA nForce MCP61 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
243 "NVIDIA nForce MCP61 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
245 "NVIDIA nForce MCP61 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
247 "NVIDIA nForce MCP61 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
249 "NVIDIA nForce MCP65 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
251 "NVIDIA nForce MCP65 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
253 "NVIDIA nForce MCP65 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
255 "NVIDIA nForce MCP65 Networking Adapter"},
256 {0, 0, NULL}
257 };
258
259 /* DMA MEM map callback function to get data segment physical address */
260 static void
261 nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error)
262 {
263 if (error)
264 return;
265
266 KASSERT(nsegs == 1,
267 ("Too many DMA segments returned when mapping DMA memory"));
268 *(bus_addr_t *)arg = segs->ds_addr;
269 }
270
271 /* DMA RX map callback function to get data segment physical address */
272 static void
273 nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs,
274 bus_size_t mapsize, int error)
275 {
276 if (error)
277 return;
278 *(bus_addr_t *)arg = segs->ds_addr;
279 }
280
281 /*
282 * DMA TX buffer callback function to allocate fragment data segment
283 * addresses
284 */
285 static void
286 nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error)
287 {
288 struct nve_tx_desc *info;
289
290 info = arg;
291 if (error)
292 return;
293 KASSERT(nsegs < NV_MAX_FRAGS,
294 ("Too many DMA segments returned when mapping mbuf"));
295 info->numfrags = nsegs;
296 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t));
297 }
298
299 /* Probe for supported hardware ID's */
300 static int
301 nve_probe(device_t dev)
302 {
303 struct nve_type *t;
304
305 t = nve_devs;
306 /* Check for matching PCI DEVICE ID's */
307 while (t->name != NULL) {
308 if ((pci_get_vendor(dev) == t->vid_id) &&
309 (pci_get_device(dev) == t->dev_id)) {
310 device_set_desc(dev, t->name);
311 return (BUS_PROBE_LOW_PRIORITY);
312 }
313 t++;
314 }
315
316 return (ENXIO);
317 }
318
319 /* Attach driver and initialise hardware for use */
320 static int
321 nve_attach(device_t dev)
322 {
323 u_char eaddr[ETHER_ADDR_LEN];
324 struct nve_softc *sc;
325 struct ifnet *ifp;
326 OS_API *osapi;
327 ADAPTER_OPEN_PARAMS OpenParams;
328 int error = 0, i, rid;
329
330 if (bootverbose)
331 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION);
332
333 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n");
334
335 sc = device_get_softc(dev);
336
337 /* Allocate mutex */
338 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
339 MTX_DEF);
340 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0);
341
342 sc->dev = dev;
343
344 /* Preinitialize data structures */
345 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS));
346
347 /* Enable bus mastering */
348 pci_enable_busmaster(dev);
349
350 /* Allocate memory mapped address space */
351 rid = NV_RID;
352 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1,
353 RF_ACTIVE);
354
355 if (sc->res == NULL) {
356 device_printf(dev, "couldn't map memory\n");
357 error = ENXIO;
358 goto fail;
359 }
360 sc->sc_st = rman_get_bustag(sc->res);
361 sc->sc_sh = rman_get_bushandle(sc->res);
362
363 /* Allocate interrupt */
364 rid = 0;
365 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
366 RF_SHAREABLE | RF_ACTIVE);
367
368 if (sc->irq == NULL) {
369 device_printf(dev, "couldn't map interrupt\n");
370 error = ENXIO;
371 goto fail;
372 }
373 /* Allocate DMA tags */
374 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
375 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS,
376 NV_MAX_FRAGS, MCLBYTES, 0,
377 busdma_lock_mutex, &Giant,
378 &sc->mtag);
379 if (error) {
380 device_printf(dev, "couldn't allocate dma tag\n");
381 goto fail;
382 }
383 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
384 BUS_SPACE_MAXADDR, NULL, NULL,
385 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1,
386 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0,
387 busdma_lock_mutex, &Giant,
388 &sc->rtag);
389 if (error) {
390 device_printf(dev, "couldn't allocate dma tag\n");
391 goto fail;
392 }
393 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
394 BUS_SPACE_MAXADDR, NULL, NULL,
395 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1,
396 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0,
397 busdma_lock_mutex, &Giant,
398 &sc->ttag);
399 if (error) {
400 device_printf(dev, "couldn't allocate dma tag\n");
401 goto fail;
402 }
403 /* Allocate DMA safe memory and get the DMA addresses. */
404 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
405 BUS_DMA_WAITOK, &sc->tmap);
406 if (error) {
407 device_printf(dev, "couldn't allocate dma memory\n");
408 goto fail;
409 }
410 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE);
411 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
412 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb,
413 &sc->tx_addr, 0);
414 if (error) {
415 device_printf(dev, "couldn't map dma memory\n");
416 goto fail;
417 }
418 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
419 BUS_DMA_WAITOK, &sc->rmap);
420 if (error) {
421 device_printf(dev, "couldn't allocate dma memory\n");
422 goto fail;
423 }
424 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE);
425 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
426 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb,
427 &sc->rx_addr, 0);
428 if (error) {
429 device_printf(dev, "couldn't map dma memory\n");
430 goto fail;
431 }
432 /* Initialize rings. */
433 if (nve_init_rings(sc)) {
434 device_printf(dev, "failed to init rings\n");
435 error = ENXIO;
436 goto fail;
437 }
438 /* Setup NVIDIA API callback routines */
439 osapi = &sc->osapi;
440 osapi->pOSCX = sc;
441 osapi->pfnAllocMemory = nve_osalloc;
442 osapi->pfnFreeMemory = nve_osfree;
443 osapi->pfnAllocMemoryEx = nve_osallocex;
444 osapi->pfnFreeMemoryEx = nve_osfreeex;
445 osapi->pfnClearMemory = nve_osclear;
446 osapi->pfnStallExecution = nve_osdelay;
447 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf;
448 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf;
449 osapi->pfnPacketWasSent = nve_ospackettx;
450 osapi->pfnPacketWasReceived = nve_ospacketrx;
451 osapi->pfnLinkStateHasChanged = nve_oslinkchg;
452 osapi->pfnAllocTimer = nve_osalloctimer;
453 osapi->pfnFreeTimer = nve_osfreetimer;
454 osapi->pfnInitializeTimer = nve_osinittimer;
455 osapi->pfnSetTimer = nve_ossettimer;
456 osapi->pfnCancelTimer = nve_oscanceltimer;
457 osapi->pfnPreprocessPacket = nve_ospreprocpkt;
458 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq;
459 osapi->pfnIndicatePackets = nve_osindicatepkt;
460 osapi->pfnLockAlloc = nve_oslockalloc;
461 osapi->pfnLockAcquire = nve_oslockacquire;
462 osapi->pfnLockRelease = nve_oslockrelease;
463 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt;
464
465 sc->linkup = FALSE;
466 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN;
467
468 /* TODO - We don't support hardware offload yet */
469 sc->hwmode = 1;
470 sc->media = 0;
471
472 /* Set NVIDIA API startup parameters */
473 OpenParams.MaxDpcLoop = 2;
474 OpenParams.MaxRxPkt = RX_RING_SIZE;
475 OpenParams.MaxTxPkt = TX_RING_SIZE;
476 OpenParams.SentPacketStatusSuccess = 1;
477 OpenParams.SentPacketStatusFailure = 0;
478 OpenParams.MaxRxPktToAccumulate = 6;
479 OpenParams.ulPollInterval = nve_pollinterval;
480 OpenParams.SetForcedModeEveryNthRxPacket = 0;
481 OpenParams.SetForcedModeEveryNthTxPacket = 0;
482 OpenParams.RxForcedInterrupt = 0;
483 OpenParams.TxForcedInterrupt = 0;
484 OpenParams.pOSApi = osapi;
485 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res);
486 OpenParams.bASFEnabled = 0;
487 OpenParams.ulDescriptorVersion = sc->hwmode;
488 OpenParams.ulMaxPacketSize = sc->max_frame_size;
489 OpenParams.DeviceId = pci_get_device(dev);
490
491 /* Open NVIDIA Hardware API */
492 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr);
493 if (error) {
494 device_printf(dev,
495 "failed to open NVIDIA Hardware API: 0x%x\n", error);
496 goto fail;
497 }
498
499 /* TODO - Add support for MODE2 hardware offload */
500
501 bzero(&sc->adapterdata, sizeof(sc->adapterdata));
502
503 sc->adapterdata.ulMediaIF = sc->media;
504 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1;
505 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata);
506
507 /* MAC is loaded backwards into h/w reg */
508 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr);
509 for (i = 0; i < 6; i++) {
510 eaddr[i] = sc->original_mac_addr[5 - i];
511 }
512 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr);
513
514 /* Display ethernet address ,... */
515 device_printf(dev, "Ethernet address %6D\n", eaddr, ":");
516
517 /* Allocate interface structures */
518 ifp = sc->ifp = if_alloc(IFT_ETHER);
519 if (ifp == NULL) {
520 device_printf(dev, "can not if_alloc()\n");
521 error = ENOSPC;
522 goto fail;
523 }
524
525 /* Setup interface parameters */
526 ifp->if_softc = sc;
527 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
529 ifp->if_ioctl = nve_ioctl;
530 ifp->if_start = nve_ifstart;
531 ifp->if_init = nve_init;
532 ifp->if_mtu = ETHERMTU;
533 ifp->if_baudrate = IF_Mbps(100);
534 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1);
535 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1;
536 IFQ_SET_READY(&ifp->if_snd);
537 ifp->if_capabilities |= IFCAP_VLAN_MTU;
538 ifp->if_capenable |= IFCAP_VLAN_MTU;
539
540 /* Attach device for MII interface to PHY */
541 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_attach\n");
542 error = mii_attach(dev, &sc->miibus, ifp, nve_ifmedia_upd,
543 nve_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
544 if (error != 0) {
545 device_printf(dev, "attaching PHYs failed\n");
546 goto fail;
547 }
548
549 /* Attach to OS's managers. */
550 ether_ifattach(ifp, eaddr);
551
552 /* Activate our interrupt handler. - attach last to avoid lock */
553 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
554 NULL, nve_intr, sc, &sc->sc_ih);
555 if (error) {
556 device_printf(dev, "couldn't set up interrupt handler\n");
557 goto fail;
558 }
559 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n");
560
561 fail:
562 if (error)
563 nve_detach(dev);
564
565 return (error);
566 }
567
568 /* Detach interface for module unload */
569 static int
570 nve_detach(device_t dev)
571 {
572 struct nve_softc *sc = device_get_softc(dev);
573 struct ifnet *ifp;
574
575 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized"));
576
577 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n");
578
579 ifp = sc->ifp;
580
581 if (device_is_attached(dev)) {
582 ether_ifdetach(ifp);
583 NVE_LOCK(sc);
584 nve_stop(sc);
585 NVE_UNLOCK(sc);
586 callout_drain(&sc->stat_callout);
587 }
588
589 if (sc->miibus)
590 device_delete_child(dev, sc->miibus);
591 bus_generic_detach(dev);
592
593 /* Reload unreversed address back into MAC in original state */
594 if (sc->original_mac_addr)
595 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX,
596 sc->original_mac_addr);
597
598 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n");
599 /* Detach from NVIDIA hardware API */
600 if (sc->hwapi->pfnClose)
601 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE);
602 /* Release resources */
603 if (sc->sc_ih)
604 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih);
605 if (sc->irq)
606 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
607 if (sc->res)
608 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res);
609
610 nve_free_rings(sc);
611
612 if (sc->tx_desc) {
613 bus_dmamap_unload(sc->rtag, sc->rmap);
614 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
615 bus_dmamap_destroy(sc->rtag, sc->rmap);
616 }
617 if (sc->mtag)
618 bus_dma_tag_destroy(sc->mtag);
619 if (sc->ttag)
620 bus_dma_tag_destroy(sc->ttag);
621 if (sc->rtag)
622 bus_dma_tag_destroy(sc->rtag);
623
624 if (ifp)
625 if_free(ifp);
626 mtx_destroy(&sc->mtx);
627
628 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n");
629
630 return (0);
631 }
632
633 /* Initialise interface and start it "RUNNING" */
634 static void
635 nve_init(void *xsc)
636 {
637 struct nve_softc *sc = xsc;
638
639 NVE_LOCK(sc);
640 nve_init_locked(sc);
641 NVE_UNLOCK(sc);
642 }
643
644 static void
645 nve_init_locked(struct nve_softc *sc)
646 {
647 struct ifnet *ifp;
648 int error;
649
650 NVE_LOCK_ASSERT(sc);
651 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup);
652
653 ifp = sc->ifp;
654
655 /* Do nothing if already running */
656 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
657 return;
658
659 nve_stop(sc);
660 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n");
661
662 nve_ifmedia_upd_locked(ifp);
663
664 /* Setup Hardware interface and allocate memory structures */
665 error = sc->hwapi->pfnInit(sc->hwapi->pADCX,
666 0, /* force speed */
667 0, /* force full duplex */
668 0, /* force mode */
669 0, /* force async mode */
670 &sc->linkup);
671
672 if (error) {
673 device_printf(sc->dev,
674 "failed to start NVIDIA Hardware interface\n");
675 return;
676 }
677 /* Set the MAC address */
678 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp));
679 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
680 sc->hwapi->pfnStart(sc->hwapi->pADCX);
681
682 /* Setup multicast filter */
683 nve_setmulti(sc);
684
685 /* Update interface parameters */
686 ifp->if_drv_flags |= IFF_DRV_RUNNING;
687 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
688
689 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
690
691 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n");
692
693 return;
694 }
695
696 /* Stop interface activity ie. not "RUNNING" */
697 static void
698 nve_stop(struct nve_softc *sc)
699 {
700 struct ifnet *ifp;
701
702 NVE_LOCK_ASSERT(sc);
703
704 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n");
705
706 ifp = sc->ifp;
707 sc->tx_timer = 0;
708
709 /* Cancel tick timer */
710 callout_stop(&sc->stat_callout);
711
712 /* Stop hardware activity */
713 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
714 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0);
715
716 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n");
717 /* Shutdown interface and deallocate memory buffers */
718 if (sc->hwapi->pfnDeinit)
719 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0);
720
721 sc->linkup = 0;
722 sc->cur_rx = 0;
723 sc->pending_rxs = 0;
724 sc->pending_txs = 0;
725
726 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
727
728 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n");
729
730 return;
731 }
732
733 /* Shutdown interface for unload/reboot */
734 static int
735 nve_shutdown(device_t dev)
736 {
737 struct nve_softc *sc;
738
739 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n");
740
741 sc = device_get_softc(dev);
742
743 /* Stop hardware activity */
744 NVE_LOCK(sc);
745 nve_stop(sc);
746 NVE_UNLOCK(sc);
747
748 return (0);
749 }
750
751 /* Allocate TX ring buffers */
752 static int
753 nve_init_rings(struct nve_softc *sc)
754 {
755 int error, i;
756
757 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n");
758
759 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0;
760 /* Initialise RX ring */
761 for (i = 0; i < RX_RING_SIZE; i++) {
762 struct nve_rx_desc *desc = sc->rx_desc + i;
763 struct nve_map_buffer *buf = &desc->buf;
764
765 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
766 if (buf->mbuf == NULL) {
767 device_printf(sc->dev, "couldn't allocate mbuf\n");
768 nve_free_rings(sc);
769 return (ENOBUFS);
770 }
771 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
772 m_adj(buf->mbuf, ETHER_ALIGN);
773
774 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
775 if (error) {
776 device_printf(sc->dev, "couldn't create dma map\n");
777 nve_free_rings(sc);
778 return (error);
779 }
780 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
781 nve_dmamap_rx_cb, &desc->paddr, 0);
782 if (error) {
783 device_printf(sc->dev, "couldn't dma map mbuf\n");
784 nve_free_rings(sc);
785 return (error);
786 }
787 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
788
789 desc->buflength = buf->mbuf->m_len;
790 desc->vaddr = mtod(buf->mbuf, caddr_t);
791 }
792 bus_dmamap_sync(sc->rtag, sc->rmap,
793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
794
795 /* Initialize TX ring */
796 for (i = 0; i < TX_RING_SIZE; i++) {
797 struct nve_tx_desc *desc = sc->tx_desc + i;
798 struct nve_map_buffer *buf = &desc->buf;
799
800 buf->mbuf = NULL;
801
802 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
803 if (error) {
804 device_printf(sc->dev, "couldn't create dma map\n");
805 nve_free_rings(sc);
806 return (error);
807 }
808 }
809 bus_dmamap_sync(sc->ttag, sc->tmap,
810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
811
812 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n");
813
814 return (error);
815 }
816
817 /* Free the TX ring buffers */
818 static void
819 nve_free_rings(struct nve_softc *sc)
820 {
821 int i;
822
823 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n");
824
825 for (i = 0; i < RX_RING_SIZE; i++) {
826 struct nve_rx_desc *desc = sc->rx_desc + i;
827 struct nve_map_buffer *buf = &desc->buf;
828
829 if (buf->mbuf) {
830 bus_dmamap_unload(sc->mtag, buf->map);
831 bus_dmamap_destroy(sc->mtag, buf->map);
832 m_freem(buf->mbuf);
833 }
834 buf->mbuf = NULL;
835 }
836
837 for (i = 0; i < TX_RING_SIZE; i++) {
838 struct nve_tx_desc *desc = sc->tx_desc + i;
839 struct nve_map_buffer *buf = &desc->buf;
840
841 if (buf->mbuf) {
842 bus_dmamap_unload(sc->mtag, buf->map);
843 bus_dmamap_destroy(sc->mtag, buf->map);
844 m_freem(buf->mbuf);
845 }
846 buf->mbuf = NULL;
847 }
848
849 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n");
850 }
851
852 /* Main loop for sending packets from OS to interface */
853 static void
854 nve_ifstart(struct ifnet *ifp)
855 {
856 struct nve_softc *sc = ifp->if_softc;
857
858 NVE_LOCK(sc);
859 nve_ifstart_locked(ifp);
860 NVE_UNLOCK(sc);
861 }
862
863 static void
864 nve_ifstart_locked(struct ifnet *ifp)
865 {
866 struct nve_softc *sc = ifp->if_softc;
867 struct nve_map_buffer *buf;
868 struct mbuf *m0, *m;
869 struct nve_tx_desc *desc;
870 ADAPTER_WRITE_DATA txdata;
871 int error, i;
872
873 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n");
874
875 NVE_LOCK_ASSERT(sc);
876
877 /* If link is down/busy or queue is empty do nothing */
878 if (ifp->if_drv_flags & IFF_DRV_OACTIVE ||
879 IFQ_DRV_IS_EMPTY(&ifp->if_snd))
880 return;
881
882 /* Transmit queued packets until sent or TX ring is full */
883 while (sc->pending_txs < TX_RING_SIZE) {
884 desc = sc->tx_desc + sc->cur_tx;
885 buf = &desc->buf;
886
887 /* Get next packet to send. */
888 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
889
890 /* If nothing to send, return. */
891 if (m0 == NULL)
892 return;
893
894 /*
895 * On nForce4, the chip doesn't interrupt on transmit,
896 * so try to flush transmitted packets from the queue
897 * if it's getting large (see note in nve_watchdog).
898 */
899 if (sc->pending_txs > TX_RING_SIZE/2) {
900 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
901 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
902 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
903 }
904
905 /* Map MBUF for DMA access */
906 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
907 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
908
909 if (error && error != EFBIG) {
910 m_freem(m0);
911 sc->tx_errors++;
912 continue;
913 }
914 /*
915 * Packet has too many fragments - defrag into new mbuf
916 * cluster
917 */
918 if (error) {
919 m = m_defrag(m0, M_DONTWAIT);
920 if (m == NULL) {
921 m_freem(m0);
922 sc->tx_errors++;
923 continue;
924 }
925 m0 = m;
926
927 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
928 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
929 if (error) {
930 m_freem(m);
931 sc->tx_errors++;
932 continue;
933 }
934 }
935 /* Do sync on DMA bounce buffer */
936 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
937
938 buf->mbuf = m0;
939 txdata.ulNumberOfElements = desc->numfrags;
940 txdata.pvID = (PVOID)desc;
941
942 /* Put fragments into API element list */
943 txdata.ulTotalLength = buf->mbuf->m_len;
944 for (i = 0; i < desc->numfrags; i++) {
945 txdata.sElement[i].ulLength =
946 (ulong)desc->frags[i].ds_len;
947 txdata.sElement[i].pPhysical =
948 (PVOID)desc->frags[i].ds_addr;
949 }
950
951 /* Send packet to Nvidia API for transmission */
952 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata);
953
954 switch (error) {
955 case ADAPTERERR_NONE:
956 /* Packet was queued in API TX queue successfully */
957 sc->pending_txs++;
958 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE;
959 break;
960
961 case ADAPTERERR_TRANSMIT_QUEUE_FULL:
962 /* The API TX queue is full - requeue the packet */
963 device_printf(sc->dev,
964 "nve_ifstart: transmit queue is full\n");
965 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
966 bus_dmamap_unload(sc->mtag, buf->map);
967 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf);
968 buf->mbuf = NULL;
969 return;
970
971 default:
972 /* The API failed to queue/send the packet so dump it */
973 device_printf(sc->dev, "nve_ifstart: transmit error\n");
974 bus_dmamap_unload(sc->mtag, buf->map);
975 m_freem(buf->mbuf);
976 buf->mbuf = NULL;
977 sc->tx_errors++;
978 return;
979 }
980 /* Set watchdog timer. */
981 sc->tx_timer = 8;
982
983 /* Copy packet to BPF tap */
984 BPF_MTAP(ifp, m0);
985 }
986 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
987
988 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n");
989 }
990
991 /* Handle IOCTL events */
992 static int
993 nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
994 {
995 struct nve_softc *sc = ifp->if_softc;
996 struct ifreq *ifr = (struct ifreq *) data;
997 struct mii_data *mii;
998 int error = 0;
999
1000 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n");
1001
1002 switch (command) {
1003 case SIOCSIFMTU:
1004 /* Set MTU size */
1005 NVE_LOCK(sc);
1006 if (ifp->if_mtu == ifr->ifr_mtu) {
1007 NVE_UNLOCK(sc);
1008 break;
1009 }
1010 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) {
1011 ifp->if_mtu = ifr->ifr_mtu;
1012 nve_stop(sc);
1013 nve_init_locked(sc);
1014 } else
1015 error = EINVAL;
1016 NVE_UNLOCK(sc);
1017 break;
1018
1019 case SIOCSIFFLAGS:
1020 /* Setup interface flags */
1021 NVE_LOCK(sc);
1022 if (ifp->if_flags & IFF_UP) {
1023 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1024 nve_init_locked(sc);
1025 NVE_UNLOCK(sc);
1026 break;
1027 }
1028 } else {
1029 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1030 nve_stop(sc);
1031 NVE_UNLOCK(sc);
1032 break;
1033 }
1034 }
1035 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
1036 nve_setmulti(sc);
1037 NVE_UNLOCK(sc);
1038 break;
1039
1040 case SIOCADDMULTI:
1041 case SIOCDELMULTI:
1042 /* Setup multicast filter */
1043 NVE_LOCK(sc);
1044 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1045 nve_setmulti(sc);
1046 }
1047 NVE_UNLOCK(sc);
1048 break;
1049
1050 case SIOCGIFMEDIA:
1051 case SIOCSIFMEDIA:
1052 /* Get/Set interface media parameters */
1053 mii = device_get_softc(sc->miibus);
1054 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1055 break;
1056
1057 default:
1058 /* Everything else we forward to generic ether ioctl */
1059 error = ether_ioctl(ifp, command, data);
1060 break;
1061 }
1062
1063 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n");
1064
1065 return (error);
1066 }
1067
1068 /* Interrupt service routine */
1069 static void
1070 nve_intr(void *arg)
1071 {
1072 struct nve_softc *sc = arg;
1073 struct ifnet *ifp = sc->ifp;
1074
1075 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n");
1076
1077 NVE_LOCK(sc);
1078 if (!ifp->if_flags & IFF_UP) {
1079 nve_stop(sc);
1080 NVE_UNLOCK(sc);
1081 return;
1082 }
1083 /* Handle interrupt event */
1084 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) {
1085 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1086 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1087 }
1088 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1089 nve_ifstart_locked(ifp);
1090
1091 /* If no pending packets we don't need a timeout */
1092 if (sc->pending_txs == 0)
1093 sc->tx_timer = 0;
1094 NVE_UNLOCK(sc);
1095
1096 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n");
1097
1098 return;
1099 }
1100
1101 /* Setup multicast filters */
1102 static void
1103 nve_setmulti(struct nve_softc *sc)
1104 {
1105 struct ifnet *ifp;
1106 struct ifmultiaddr *ifma;
1107 PACKET_FILTER hwfilter;
1108 int i;
1109 u_int8_t andaddr[6], oraddr[6];
1110
1111 NVE_LOCK_ASSERT(sc);
1112
1113 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n");
1114
1115 ifp = sc->ifp;
1116
1117 /* Initialize filter */
1118 hwfilter.ulFilterFlags = 0;
1119 for (i = 0; i < 6; i++) {
1120 hwfilter.acMulticastAddress[i] = 0;
1121 hwfilter.acMulticastMask[i] = 0;
1122 }
1123
1124 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1125 /* Accept all packets */
1126 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS;
1127 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1128 return;
1129 }
1130 /* Setup multicast filter */
1131 if_maddr_rlock(ifp);
1132 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1133 u_char *addrp;
1134
1135 if (ifma->ifma_addr->sa_family != AF_LINK)
1136 continue;
1137
1138 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1139 for (i = 0; i < 6; i++) {
1140 u_int8_t mcaddr = addrp[i];
1141 andaddr[i] &= mcaddr;
1142 oraddr[i] |= mcaddr;
1143 }
1144 }
1145 if_maddr_runlock(ifp);
1146 for (i = 0; i < 6; i++) {
1147 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i];
1148 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]);
1149 }
1150
1151 /* Send filter to NVIDIA API */
1152 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1153
1154 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n");
1155
1156 return;
1157 }
1158
1159 /* Change the current media/mediaopts */
1160 static int
1161 nve_ifmedia_upd(struct ifnet *ifp)
1162 {
1163 struct nve_softc *sc = ifp->if_softc;
1164
1165 NVE_LOCK(sc);
1166 nve_ifmedia_upd_locked(ifp);
1167 NVE_UNLOCK(sc);
1168 return (0);
1169 }
1170
1171 static void
1172 nve_ifmedia_upd_locked(struct ifnet *ifp)
1173 {
1174 struct nve_softc *sc = ifp->if_softc;
1175 struct mii_data *mii;
1176 struct mii_softc *miisc;
1177
1178 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n");
1179
1180 NVE_LOCK_ASSERT(sc);
1181 mii = device_get_softc(sc->miibus);
1182
1183 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1184 mii_phy_reset(miisc);
1185 mii_mediachg(mii);
1186 }
1187
1188 /* Update current miibus PHY status of media */
1189 static void
1190 nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1191 {
1192 struct nve_softc *sc;
1193 struct mii_data *mii;
1194
1195 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n");
1196
1197 sc = ifp->if_softc;
1198 NVE_LOCK(sc);
1199 mii = device_get_softc(sc->miibus);
1200 mii_pollstat(mii);
1201
1202 ifmr->ifm_active = mii->mii_media_active;
1203 ifmr->ifm_status = mii->mii_media_status;
1204 NVE_UNLOCK(sc);
1205
1206 return;
1207 }
1208
1209 /* miibus tick timer - maintain link status */
1210 static void
1211 nve_tick(void *xsc)
1212 {
1213 struct nve_softc *sc = xsc;
1214 struct mii_data *mii;
1215 struct ifnet *ifp;
1216
1217 NVE_LOCK_ASSERT(sc);
1218
1219 ifp = sc->ifp;
1220 nve_update_stats(sc);
1221
1222 mii = device_get_softc(sc->miibus);
1223 mii_tick(mii);
1224
1225 if (mii->mii_media_status & IFM_ACTIVE &&
1226 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1227 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1228 nve_ifstart_locked(ifp);
1229 }
1230
1231 if (sc->tx_timer > 0 && --sc->tx_timer == 0)
1232 nve_watchdog(sc);
1233 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
1234
1235 return;
1236 }
1237
1238 /* Update ifnet data structure with collected interface stats from API */
1239 static void
1240 nve_update_stats(struct nve_softc *sc)
1241 {
1242 struct ifnet *ifp = sc->ifp;
1243 ADAPTER_STATS stats;
1244
1245 NVE_LOCK_ASSERT(sc);
1246
1247 if (sc->hwapi) {
1248 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats);
1249
1250 ifp->if_ipackets = stats.ulSuccessfulReceptions;
1251 ifp->if_ierrors = stats.ulMissedFrames +
1252 stats.ulFailedReceptions +
1253 stats.ulCRCErrors +
1254 stats.ulFramingErrors +
1255 stats.ulOverFlowErrors;
1256
1257 ifp->if_opackets = stats.ulSuccessfulTransmissions;
1258 ifp->if_oerrors = sc->tx_errors +
1259 stats.ulFailedTransmissions +
1260 stats.ulRetryErrors +
1261 stats.ulUnderflowErrors +
1262 stats.ulLossOfCarrierErrors +
1263 stats.ulLateCollisionErrors;
1264
1265 ifp->if_collisions = stats.ulLateCollisionErrors;
1266 }
1267
1268 return;
1269 }
1270
1271 /* miibus Read PHY register wrapper - calls Nvidia API entry point */
1272 static int
1273 nve_miibus_readreg(device_t dev, int phy, int reg)
1274 {
1275 struct nve_softc *sc = device_get_softc(dev);
1276 ULONG data;
1277
1278 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n");
1279
1280 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data);
1281
1282 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n");
1283
1284 return (data);
1285 }
1286
1287 /* miibus Write PHY register wrapper - calls Nvidia API entry point */
1288 static int
1289 nve_miibus_writereg(device_t dev, int phy, int reg, int data)
1290 {
1291 struct nve_softc *sc = device_get_softc(dev);
1292
1293 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n");
1294
1295 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data);
1296
1297 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n");
1298
1299 return 0;
1300 }
1301
1302 /* Watchdog timer to prevent PHY lockups */
1303 static void
1304 nve_watchdog(struct nve_softc *sc)
1305 {
1306 struct ifnet *ifp;
1307 int pending_txs_start;
1308
1309 NVE_LOCK_ASSERT(sc);
1310 ifp = sc->ifp;
1311
1312 /*
1313 * The nvidia driver blob defers tx completion notifications.
1314 * Thus, sometimes the watchdog timer will go off when the
1315 * tx engine is fine, but the tx completions are just deferred.
1316 * Try kicking the driver blob to clear out any pending tx
1317 * completions. If that clears up any of the pending tx
1318 * operations, then just return without printing the warning
1319 * message or resetting the adapter, as we can then conclude
1320 * the chip hasn't actually crashed (it's still sending packets).
1321 */
1322 pending_txs_start = sc->pending_txs;
1323 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
1324 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1325 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1326 if (sc->pending_txs < pending_txs_start)
1327 return;
1328
1329 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs);
1330
1331 sc->tx_errors++;
1332
1333 nve_stop(sc);
1334 nve_init_locked(sc);
1335
1336 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1337 nve_ifstart_locked(ifp);
1338 }
1339
1340 /* --- Start of NVOSAPI interface --- */
1341
1342 /* Allocate DMA enabled general use memory for API */
1343 static NV_SINT32
1344 nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
1345 {
1346 struct nve_softc *sc;
1347 bus_addr_t mem_physical;
1348
1349 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength);
1350
1351 sc = (struct nve_softc *)ctx;
1352
1353 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF,
1354 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
1355
1356 if (!mem->pLogical) {
1357 device_printf(sc->dev, "memory allocation failed\n");
1358 return (0);
1359 }
1360 memset(mem->pLogical, 0, (ulong)mem->uiLength);
1361 mem_physical = vtophys(mem->pLogical);
1362 mem->pPhysical = (PVOID)mem_physical;
1363
1364 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n",
1365 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength);
1366
1367 return (1);
1368 }
1369
1370 /* Free allocated memory */
1371 static NV_SINT32
1372 nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
1373 {
1374 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
1375 (uint)mem->pLogical, (uint) mem->uiLength);
1376
1377 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF);
1378 return (1);
1379 }
1380
1381 /* Copied directly from nvnet.c */
1382 static NV_SINT32
1383 nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1384 {
1385 MEMORY_BLOCK mem_block;
1386
1387 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n");
1388
1389 mem_block_ex->pLogical = NULL;
1390 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength;
1391
1392 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) &&
1393 (mem_block_ex->AlignmentSize > 1)) {
1394 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n",
1395 mem_block_ex->AlignmentSize);
1396 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize;
1397 }
1398 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1399
1400 if (nve_osalloc(ctx, &mem_block) == 0) {
1401 return (0);
1402 }
1403 mem_block_ex->pLogicalOrig = mem_block.pLogical;
1404 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical;
1405 mem_block_ex->pPhysicalOrigHigh = 0;
1406
1407 mem_block_ex->pPhysical = mem_block.pPhysical;
1408 mem_block_ex->pLogical = mem_block.pLogical;
1409
1410 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) {
1411 unsigned int offset;
1412 offset = mem_block_ex->pPhysicalOrigLow &
1413 (mem_block_ex->AlignmentSize - 1);
1414
1415 if (offset) {
1416 mem_block_ex->pPhysical =
1417 (PVOID)((ulong)mem_block_ex->pPhysical +
1418 mem_block_ex->AlignmentSize - offset);
1419 mem_block_ex->pLogical =
1420 (PVOID)((ulong)mem_block_ex->pLogical +
1421 mem_block_ex->AlignmentSize - offset);
1422 } /* if (offset) */
1423 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */
1424 return (1);
1425 }
1426
1427 /* Copied directly from nvnet.c */
1428 static NV_SINT32
1429 nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1430 {
1431 MEMORY_BLOCK mem_block;
1432
1433 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n");
1434
1435 mem_block.pLogical = mem_block_ex->pLogicalOrig;
1436 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow);
1437 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1438
1439 return (nve_osfree(ctx, &mem_block));
1440 }
1441
1442 /* Clear memory region */
1443 static NV_SINT32
1444 nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
1445 {
1446 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
1447 memset(mem, 0, length);
1448 return (1);
1449 }
1450
1451 /* Sleep for a tick */
1452 static NV_SINT32
1453 nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
1454 {
1455 DELAY(usec);
1456 return (1);
1457 }
1458
1459 /* Allocate memory for rx buffer */
1460 static NV_SINT32
1461 nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
1462 {
1463 struct nve_softc *sc = ctx;
1464 struct nve_rx_desc *desc;
1465 struct nve_map_buffer *buf;
1466 int error;
1467
1468 if (device_is_attached(sc->dev))
1469 NVE_LOCK_ASSERT(sc);
1470
1471 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n");
1472
1473 if (sc->pending_rxs == RX_RING_SIZE) {
1474 device_printf(sc->dev, "rx ring buffer is full\n");
1475 goto fail;
1476 }
1477 desc = sc->rx_desc + sc->cur_rx;
1478 buf = &desc->buf;
1479
1480 if (buf->mbuf == NULL) {
1481 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1482 if (buf->mbuf == NULL) {
1483 device_printf(sc->dev, "failed to allocate memory\n");
1484 goto fail;
1485 }
1486 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1487 m_adj(buf->mbuf, ETHER_ALIGN);
1488
1489 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1490 nve_dmamap_rx_cb, &desc->paddr, 0);
1491 if (error) {
1492 device_printf(sc->dev, "failed to dmamap mbuf\n");
1493 m_freem(buf->mbuf);
1494 buf->mbuf = NULL;
1495 goto fail;
1496 }
1497 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1498 desc->buflength = buf->mbuf->m_len;
1499 desc->vaddr = mtod(buf->mbuf, caddr_t);
1500 }
1501 sc->pending_rxs++;
1502 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE;
1503
1504 mem->pLogical = (void *)desc->vaddr;
1505 mem->pPhysical = (void *)desc->paddr;
1506 mem->uiLength = desc->buflength;
1507 *id = (void *)desc;
1508
1509 return (1);
1510
1511 fail:
1512 return (0);
1513 }
1514
1515 /* Free the rx buffer */
1516 static NV_SINT32
1517 nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
1518 {
1519 struct nve_softc *sc = ctx;
1520 struct nve_rx_desc *desc;
1521 struct nve_map_buffer *buf;
1522
1523 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n");
1524
1525 desc = (struct nve_rx_desc *) id;
1526 buf = &desc->buf;
1527
1528 if (buf->mbuf) {
1529 bus_dmamap_unload(sc->mtag, buf->map);
1530 bus_dmamap_destroy(sc->mtag, buf->map);
1531 m_freem(buf->mbuf);
1532 }
1533 sc->pending_rxs--;
1534 buf->mbuf = NULL;
1535
1536 return (1);
1537 }
1538
1539 /* This gets called by the Nvidia API after our TX packet has been sent */
1540 static NV_SINT32
1541 nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
1542 {
1543 struct nve_softc *sc = ctx;
1544 struct nve_map_buffer *buf;
1545 struct nve_tx_desc *desc = (struct nve_tx_desc *) id;
1546 struct ifnet *ifp;
1547
1548 NVE_LOCK_ASSERT(sc);
1549
1550 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n");
1551
1552 ifp = sc->ifp;
1553 buf = &desc->buf;
1554 sc->pending_txs--;
1555
1556 /* Unload and free mbuf cluster */
1557 if (buf->mbuf == NULL)
1558 goto fail;
1559
1560 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
1561 bus_dmamap_unload(sc->mtag, buf->map);
1562 m_freem(buf->mbuf);
1563 buf->mbuf = NULL;
1564
1565 /* Send more packets if we have them */
1566 if (sc->pending_txs < TX_RING_SIZE)
1567 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1568
1569 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE)
1570 nve_ifstart_locked(ifp);
1571
1572 fail:
1573
1574 return (1);
1575 }
1576
1577 /* This gets called by the Nvidia API when a new packet has been received */
1578 /* XXX What is newbuf used for? XXX */
1579 static NV_SINT32
1580 nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
1581 NV_UINT8 priority)
1582 {
1583 struct nve_softc *sc = ctx;
1584 struct ifnet *ifp;
1585 struct nve_rx_desc *desc;
1586 struct nve_map_buffer *buf;
1587 ADAPTER_READ_DATA *readdata;
1588 struct mbuf *m;
1589
1590 NVE_LOCK_ASSERT(sc);
1591
1592 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n");
1593
1594 ifp = sc->ifp;
1595
1596 readdata = (ADAPTER_READ_DATA *) data;
1597 desc = readdata->pvID;
1598 buf = &desc->buf;
1599 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1600
1601 if (success) {
1602 /* Sync DMA bounce buffer. */
1603 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1604
1605 /* First mbuf in packet holds the ethernet and packet headers */
1606 buf->mbuf->m_pkthdr.rcvif = ifp;
1607 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len =
1608 readdata->ulTotalLength;
1609
1610 bus_dmamap_unload(sc->mtag, buf->map);
1611
1612 /* Blat the mbuf pointer, kernel will free the mbuf cluster */
1613 m = buf->mbuf;
1614 buf->mbuf = NULL;
1615
1616 /* Give mbuf to OS. */
1617 NVE_UNLOCK(sc);
1618 (*ifp->if_input)(ifp, m);
1619 NVE_LOCK(sc);
1620 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH)
1621 ifp->if_imcasts++;
1622
1623 } else {
1624 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1625 bus_dmamap_unload(sc->mtag, buf->map);
1626 m_freem(buf->mbuf);
1627 buf->mbuf = NULL;
1628 }
1629
1630 sc->cur_rx = desc - sc->rx_desc;
1631 sc->pending_rxs--;
1632
1633 return (1);
1634 }
1635
1636 /* This gets called by NVIDIA API when the PHY link state changes */
1637 static NV_SINT32
1638 nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
1639 {
1640
1641 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n");
1642
1643 return (1);
1644 }
1645
1646 /* Setup a watchdog timer */
1647 static NV_SINT32
1648 nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
1649 {
1650 struct nve_softc *sc = (struct nve_softc *)ctx;
1651
1652 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n");
1653
1654 callout_init(&sc->ostimer, CALLOUT_MPSAFE);
1655 *timer = &sc->ostimer;
1656
1657 return (1);
1658 }
1659
1660 /* Free the timer */
1661 static NV_SINT32
1662 nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
1663 {
1664
1665 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n");
1666
1667 callout_drain((struct callout *)timer);
1668
1669 return (1);
1670 }
1671
1672 /* Setup timer parameters */
1673 static NV_SINT32
1674 nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
1675 {
1676 struct nve_softc *sc = (struct nve_softc *)ctx;
1677
1678 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n");
1679
1680 sc->ostimer_func = func;
1681 sc->ostimer_params = parameters;
1682
1683 return (1);
1684 }
1685
1686 /* Set the timer to go off */
1687 static NV_SINT32
1688 nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
1689 {
1690 struct nve_softc *sc = ctx;
1691
1692 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n");
1693
1694 callout_reset((struct callout *)timer, delay, sc->ostimer_func,
1695 sc->ostimer_params);
1696
1697 return (1);
1698 }
1699
1700 /* Cancel the timer */
1701 static NV_SINT32
1702 nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
1703 {
1704
1705 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n");
1706
1707 callout_stop((struct callout *)timer);
1708
1709 return (1);
1710 }
1711
1712 static NV_SINT32
1713 nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
1714 NV_UINT8 *newbuffer, NV_UINT8 priority)
1715 {
1716
1717 /* Not implemented */
1718 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1719
1720 return (1);
1721 }
1722
1723 static PNV_VOID
1724 nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
1725 {
1726
1727 /* Not implemented */
1728 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1729
1730 return (NULL);
1731 }
1732
1733 static NV_SINT32
1734 nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
1735 {
1736
1737 /* Not implemented */
1738 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n");
1739
1740 return (1);
1741 }
1742
1743 /* Allocate mutex context (already done in nve_attach) */
1744 static NV_SINT32
1745 nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
1746 {
1747 struct nve_softc *sc = (struct nve_softc *)ctx;
1748
1749 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n");
1750
1751 *pLock = (void **)sc;
1752
1753 return (1);
1754 }
1755
1756 /* Obtain a spin lock */
1757 static NV_SINT32
1758 nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1759 {
1760
1761 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n");
1762
1763 return (1);
1764 }
1765
1766 /* Release lock */
1767 static NV_SINT32
1768 nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1769 {
1770
1771 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n");
1772
1773 return (1);
1774 }
1775
1776 /* I have no idea what this is for */
1777 static PNV_VOID
1778 nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
1779 {
1780
1781 /* Not implemented */
1782 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n");
1783 panic("nve: nve_osreturnbufvirtual not implemented\n");
1784
1785 return (NULL);
1786 }
1787
1788 /* --- End on NVOSAPI interface --- */
Cache object: fc62c7fea27f59311d6e809aa4bf9719
|