FreeBSD/Linux Kernel Cross Reference
sys/dev/nve/if_nve.c
1 /*-
2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>.
3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $
28 */
29 /*
30 * NVIDIA nForce MCP Networking Adapter driver
31 *
32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA
33 * through their web site.
34 *
35 * All mainstream nForce and nForce2 motherboards are supported. This module
36 * is as stable, sometimes more stable, than the linux version. (Recent
37 * Linux stability issues seem to be related to some issues with newer
38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD
39 * 5.x).
40 *
41 * In accordance with the NVIDIA distribution license it is necessary to
42 * link this module against the nvlibnet.o binary object included in the
43 * Linux driver source distribution. The binary component is not modified in
44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c
45 * linux kernel module "wrapper".
46 *
47 * The Linux driver uses a common code API that is shared between Win32 and
48 * i386 Linux. This abstracts the low level driver functions and uses
49 * callbacks and hooks to access the underlying hardware device. By using
50 * this same API in a FreeBSD kernel module it is possible to support the
51 * hardware without breaching the Linux source distributions licensing
52 * requirements, or obtaining the hardware programming specifications.
53 *
54 * Although not conventional, it works, and given the relatively small
55 * amount of hardware centric code, it's hopefully no more buggy than its
56 * linux counterpart.
57 *
58 * NVIDIA now support the nForce3 AMD64 platform, however I have been
59 * unable to access such a system to verify support. However, the code is
60 * reported to work with little modification when compiled with the AMD64
61 * version of the NVIDIA Linux library. All that should be necessary to make
62 * the driver work is to link it directly into the kernel, instead of as a
63 * module, and apply the docs/amd64.diff patch in this source distribution to
64 * the NVIDIA Linux driver source.
65 *
66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well
67 * as recent versions of DragonFly.
68 *
69 * Written by Quinton Dolan <q@onthenet.com.au>
70 * Portions based on existing FreeBSD network drivers.
71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files.
72 */
73
74 #include <sys/cdefs.h>
75 __FBSDID("$FreeBSD$");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/sockio.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/sysctl.h>
85 #include <sys/queue.h>
86 #include <sys/module.h>
87
88 #include <net/if.h>
89 #include <net/if_arp.h>
90 #include <net/ethernet.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/bpf.h>
95 #include <net/if_vlan_var.h>
96
97 #include <machine/bus.h>
98 #include <machine/resource.h>
99
100 #include <vm/vm.h> /* for vtophys */
101 #include <vm/pmap.h> /* for vtophys */
102 #include <sys/bus.h>
103 #include <sys/rman.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/mii/mii.h>
108 #include <dev/mii/miivar.h>
109 #include "miibus_if.h"
110
111 /* Include NVIDIA Linux driver header files */
112 #include <contrib/dev/nve/nvenet_version.h>
113 #define linux
114 #include <contrib/dev/nve/basetype.h>
115 #include <contrib/dev/nve/phy.h>
116 #include "os+%DIKED-nve.h"
117 #include <contrib/dev/nve/drvinfo.h>
118 #include <contrib/dev/nve/adapter.h>
119 #undef linux
120
121 #include <dev/nve/if_nvereg.h>
122
123 MODULE_DEPEND(nve, pci, 1, 1, 1);
124 MODULE_DEPEND(nve, ether, 1, 1, 1);
125 MODULE_DEPEND(nve, miibus, 1, 1, 1);
126
127 static int nve_probe(device_t);
128 static int nve_attach(device_t);
129 static int nve_detach(device_t);
130 static void nve_init(void *);
131 static void nve_init_locked(struct nve_softc *);
132 static void nve_stop(struct nve_softc *);
133 static int nve_shutdown(device_t);
134 static int nve_init_rings(struct nve_softc *);
135 static void nve_free_rings(struct nve_softc *);
136
137 static void nve_ifstart(struct ifnet *);
138 static void nve_ifstart_locked(struct ifnet *);
139 static int nve_ioctl(struct ifnet *, u_long, caddr_t);
140 static void nve_intr(void *);
141 static void nve_tick(void *);
142 static void nve_setmulti(struct nve_softc *);
143 static void nve_watchdog(struct ifnet *);
144 static void nve_update_stats(struct nve_softc *);
145
146 static int nve_ifmedia_upd(struct ifnet *);
147 static void nve_ifmedia_upd_locked(struct ifnet *);
148 static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149 static int nve_miibus_readreg(device_t, int, int);
150 static void nve_miibus_writereg(device_t, int, int, int);
151
152 static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
153 static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
154
155 static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
156 static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
157 static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
158 static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
159 static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
160 static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
161 static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
162 static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
163 static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
164 static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
165 static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
166 static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
167 static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
168 static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
169 static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
170 static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
171
172 static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
173 static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
174 static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
175 static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
176 static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
177 static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
178 static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
179
180 static device_method_t nve_methods[] = {
181 /* Device interface */
182 DEVMETHOD(device_probe, nve_probe),
183 DEVMETHOD(device_attach, nve_attach),
184 DEVMETHOD(device_detach, nve_detach),
185 DEVMETHOD(device_shutdown, nve_shutdown),
186
187 /* Bus interface */
188 DEVMETHOD(bus_print_child, bus_generic_print_child),
189 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
190
191 /* MII interface */
192 DEVMETHOD(miibus_readreg, nve_miibus_readreg),
193 DEVMETHOD(miibus_writereg, nve_miibus_writereg),
194
195 {0, 0}
196 };
197
198 static driver_t nve_driver = {
199 "nve",
200 nve_methods,
201 sizeof(struct nve_softc)
202 };
203
204 static devclass_t nve_devclass;
205
206 static int nve_pollinterval = 0;
207 SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW,
208 &nve_pollinterval, 0, "delay between interface polls");
209
210 DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0);
211 DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0);
212
213 static struct nve_type nve_devs[] = {
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
215 "NVIDIA nForce MCP Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
217 "NVIDIA nForce2 MCP2 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
219 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
221 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
223 "NVIDIA nForce3 MCP3 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
225 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
227 "NVIDIA nForce3 MCP7 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
229 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
231 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
233 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
235 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
237 "NVIDIA nForce 430 MCP12 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
239 "NVIDIA nForce 430 MCP13 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
241 "NVIDIA nForce MCP55 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
243 "NVIDIA nForce MCP55 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
245 "NVIDIA nForce MCP61 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
247 "NVIDIA nForce MCP61 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
249 "NVIDIA nForce MCP61 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
251 "NVIDIA nForce MCP61 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
253 "NVIDIA nForce MCP65 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
255 "NVIDIA nForce MCP65 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
257 "NVIDIA nForce MCP65 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
259 "NVIDIA nForce MCP65 Networking Adapter"},
260 {0, 0, NULL}
261 };
262
263 /* DMA MEM map callback function to get data segment physical address */
264 static void
265 nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error)
266 {
267 if (error)
268 return;
269
270 KASSERT(nsegs == 1,
271 ("Too many DMA segments returned when mapping DMA memory"));
272 *(bus_addr_t *)arg = segs->ds_addr;
273 }
274
275 /* DMA RX map callback function to get data segment physical address */
276 static void
277 nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs,
278 bus_size_t mapsize, int error)
279 {
280 if (error)
281 return;
282 *(bus_addr_t *)arg = segs->ds_addr;
283 }
284
285 /*
286 * DMA TX buffer callback function to allocate fragment data segment
287 * addresses
288 */
289 static void
290 nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error)
291 {
292 struct nve_tx_desc *info;
293
294 info = arg;
295 if (error)
296 return;
297 KASSERT(nsegs < NV_MAX_FRAGS,
298 ("Too many DMA segments returned when mapping mbuf"));
299 info->numfrags = nsegs;
300 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t));
301 }
302
303 /* Probe for supported hardware ID's */
304 static int
305 nve_probe(device_t dev)
306 {
307 struct nve_type *t;
308
309 t = nve_devs;
310 /* Check for matching PCI DEVICE ID's */
311 while (t->name != NULL) {
312 if ((pci_get_vendor(dev) == t->vid_id) &&
313 (pci_get_device(dev) == t->dev_id)) {
314 device_set_desc(dev, t->name);
315 return (BUS_PROBE_LOW_PRIORITY);
316 }
317 t++;
318 }
319
320 return (ENXIO);
321 }
322
323 /* Attach driver and initialise hardware for use */
324 static int
325 nve_attach(device_t dev)
326 {
327 u_char eaddr[ETHER_ADDR_LEN];
328 struct nve_softc *sc;
329 struct ifnet *ifp;
330 OS_API *osapi;
331 ADAPTER_OPEN_PARAMS OpenParams;
332 int error = 0, i, rid;
333
334 if (bootverbose)
335 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION);
336
337 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n");
338
339 sc = device_get_softc(dev);
340
341 /* Allocate mutex */
342 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
343 MTX_DEF);
344 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0);
345
346 sc->dev = dev;
347
348 /* Preinitialize data structures */
349 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS));
350
351 /* Enable bus mastering */
352 pci_enable_busmaster(dev);
353
354 /* Allocate memory mapped address space */
355 rid = NV_RID;
356 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1,
357 RF_ACTIVE);
358
359 if (sc->res == NULL) {
360 device_printf(dev, "couldn't map memory\n");
361 error = ENXIO;
362 goto fail;
363 }
364 sc->sc_st = rman_get_bustag(sc->res);
365 sc->sc_sh = rman_get_bushandle(sc->res);
366
367 /* Allocate interrupt */
368 rid = 0;
369 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
370 RF_SHAREABLE | RF_ACTIVE);
371
372 if (sc->irq == NULL) {
373 device_printf(dev, "couldn't map interrupt\n");
374 error = ENXIO;
375 goto fail;
376 }
377 /* Allocate DMA tags */
378 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
379 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS,
380 NV_MAX_FRAGS, MCLBYTES, 0,
381 busdma_lock_mutex, &Giant,
382 &sc->mtag);
383 if (error) {
384 device_printf(dev, "couldn't allocate dma tag\n");
385 goto fail;
386 }
387 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
388 BUS_SPACE_MAXADDR, NULL, NULL,
389 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1,
390 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0,
391 busdma_lock_mutex, &Giant,
392 &sc->rtag);
393 if (error) {
394 device_printf(dev, "couldn't allocate dma tag\n");
395 goto fail;
396 }
397 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
398 BUS_SPACE_MAXADDR, NULL, NULL,
399 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1,
400 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0,
401 busdma_lock_mutex, &Giant,
402 &sc->ttag);
403 if (error) {
404 device_printf(dev, "couldn't allocate dma tag\n");
405 goto fail;
406 }
407 /* Allocate DMA safe memory and get the DMA addresses. */
408 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
409 BUS_DMA_WAITOK, &sc->tmap);
410 if (error) {
411 device_printf(dev, "couldn't allocate dma memory\n");
412 goto fail;
413 }
414 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE);
415 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
416 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb,
417 &sc->tx_addr, 0);
418 if (error) {
419 device_printf(dev, "couldn't map dma memory\n");
420 goto fail;
421 }
422 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
423 BUS_DMA_WAITOK, &sc->rmap);
424 if (error) {
425 device_printf(dev, "couldn't allocate dma memory\n");
426 goto fail;
427 }
428 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE);
429 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
430 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb,
431 &sc->rx_addr, 0);
432 if (error) {
433 device_printf(dev, "couldn't map dma memory\n");
434 goto fail;
435 }
436 /* Initialize rings. */
437 if (nve_init_rings(sc)) {
438 device_printf(dev, "failed to init rings\n");
439 error = ENXIO;
440 goto fail;
441 }
442 /* Setup NVIDIA API callback routines */
443 osapi = &sc->osapi;
444 osapi->pOSCX = sc;
445 osapi->pfnAllocMemory = nve_osalloc;
446 osapi->pfnFreeMemory = nve_osfree;
447 osapi->pfnAllocMemoryEx = nve_osallocex;
448 osapi->pfnFreeMemoryEx = nve_osfreeex;
449 osapi->pfnClearMemory = nve_osclear;
450 osapi->pfnStallExecution = nve_osdelay;
451 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf;
452 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf;
453 osapi->pfnPacketWasSent = nve_ospackettx;
454 osapi->pfnPacketWasReceived = nve_ospacketrx;
455 osapi->pfnLinkStateHasChanged = nve_oslinkchg;
456 osapi->pfnAllocTimer = nve_osalloctimer;
457 osapi->pfnFreeTimer = nve_osfreetimer;
458 osapi->pfnInitializeTimer = nve_osinittimer;
459 osapi->pfnSetTimer = nve_ossettimer;
460 osapi->pfnCancelTimer = nve_oscanceltimer;
461 osapi->pfnPreprocessPacket = nve_ospreprocpkt;
462 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq;
463 osapi->pfnIndicatePackets = nve_osindicatepkt;
464 osapi->pfnLockAlloc = nve_oslockalloc;
465 osapi->pfnLockAcquire = nve_oslockacquire;
466 osapi->pfnLockRelease = nve_oslockrelease;
467 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt;
468
469 sc->linkup = FALSE;
470 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN;
471
472 /* TODO - We don't support hardware offload yet */
473 sc->hwmode = 1;
474 sc->media = 0;
475
476 /* Set NVIDIA API startup parameters */
477 OpenParams.MaxDpcLoop = 2;
478 OpenParams.MaxRxPkt = RX_RING_SIZE;
479 OpenParams.MaxTxPkt = TX_RING_SIZE;
480 OpenParams.SentPacketStatusSuccess = 1;
481 OpenParams.SentPacketStatusFailure = 0;
482 OpenParams.MaxRxPktToAccumulate = 6;
483 OpenParams.ulPollInterval = nve_pollinterval;
484 OpenParams.SetForcedModeEveryNthRxPacket = 0;
485 OpenParams.SetForcedModeEveryNthTxPacket = 0;
486 OpenParams.RxForcedInterrupt = 0;
487 OpenParams.TxForcedInterrupt = 0;
488 OpenParams.pOSApi = osapi;
489 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res);
490 OpenParams.bASFEnabled = 0;
491 OpenParams.ulDescriptorVersion = sc->hwmode;
492 OpenParams.ulMaxPacketSize = sc->max_frame_size;
493 OpenParams.DeviceId = pci_get_device(dev);
494
495 /* Open NVIDIA Hardware API */
496 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr);
497 if (error) {
498 device_printf(dev,
499 "failed to open NVIDIA Hardware API: 0x%x\n", error);
500 goto fail;
501 }
502
503 /* TODO - Add support for MODE2 hardware offload */
504
505 bzero(&sc->adapterdata, sizeof(sc->adapterdata));
506
507 sc->adapterdata.ulMediaIF = sc->media;
508 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1;
509 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata);
510
511 /* MAC is loaded backwards into h/w reg */
512 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr);
513 for (i = 0; i < 6; i++) {
514 eaddr[i] = sc->original_mac_addr[5 - i];
515 }
516 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr);
517
518 /* Display ethernet address ,... */
519 device_printf(dev, "Ethernet address %6D\n", eaddr, ":");
520
521 /* Allocate interface structures */
522 ifp = sc->ifp = if_alloc(IFT_ETHER);
523 if (ifp == NULL) {
524 device_printf(dev, "can not if_alloc()\n");
525 error = ENOSPC;
526 goto fail;
527 }
528
529 /* Probe device for MII interface to PHY */
530 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n");
531 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) {
532 device_printf(dev, "MII without any phy!\n");
533 error = ENXIO;
534 goto fail;
535 }
536
537 /* Setup interface parameters */
538 ifp->if_softc = sc;
539 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
541 ifp->if_ioctl = nve_ioctl;
542 ifp->if_output = ether_output;
543 ifp->if_start = nve_ifstart;
544 ifp->if_watchdog = nve_watchdog;
545 ifp->if_timer = 0;
546 ifp->if_init = nve_init;
547 ifp->if_mtu = ETHERMTU;
548 ifp->if_baudrate = IF_Mbps(100);
549 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1);
550 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1;
551 IFQ_SET_READY(&ifp->if_snd);
552 ifp->if_capabilities |= IFCAP_VLAN_MTU;
553 ifp->if_capenable |= IFCAP_VLAN_MTU;
554
555 /* Attach to OS's managers. */
556 ether_ifattach(ifp, eaddr);
557
558 /* Activate our interrupt handler. - attach last to avoid lock */
559 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
560 NULL, nve_intr, sc, &sc->sc_ih);
561 if (error) {
562 device_printf(sc->dev, "couldn't set up interrupt handler\n");
563 goto fail;
564 }
565 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n");
566
567 fail:
568 if (error)
569 nve_detach(dev);
570
571 return (error);
572 }
573
574 /* Detach interface for module unload */
575 static int
576 nve_detach(device_t dev)
577 {
578 struct nve_softc *sc = device_get_softc(dev);
579 struct ifnet *ifp;
580
581 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized"));
582
583 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n");
584
585 ifp = sc->ifp;
586
587 if (device_is_attached(dev)) {
588 NVE_LOCK(sc);
589 nve_stop(sc);
590 NVE_UNLOCK(sc);
591 callout_drain(&sc->stat_callout);
592 ether_ifdetach(ifp);
593 }
594
595 if (sc->miibus)
596 device_delete_child(dev, sc->miibus);
597 bus_generic_detach(dev);
598
599 /* Reload unreversed address back into MAC in original state */
600 if (sc->original_mac_addr)
601 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX,
602 sc->original_mac_addr);
603
604 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n");
605 /* Detach from NVIDIA hardware API */
606 if (sc->hwapi->pfnClose)
607 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE);
608 /* Release resources */
609 if (sc->sc_ih)
610 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih);
611 if (sc->irq)
612 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
613 if (sc->res)
614 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res);
615
616 nve_free_rings(sc);
617
618 if (sc->tx_desc) {
619 bus_dmamap_unload(sc->rtag, sc->rmap);
620 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
621 bus_dmamap_destroy(sc->rtag, sc->rmap);
622 }
623 if (sc->mtag)
624 bus_dma_tag_destroy(sc->mtag);
625 if (sc->ttag)
626 bus_dma_tag_destroy(sc->ttag);
627 if (sc->rtag)
628 bus_dma_tag_destroy(sc->rtag);
629
630 if (ifp)
631 if_free(ifp);
632 mtx_destroy(&sc->mtx);
633
634 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n");
635
636 return (0);
637 }
638
639 /* Initialise interface and start it "RUNNING" */
640 static void
641 nve_init(void *xsc)
642 {
643 struct nve_softc *sc = xsc;
644
645 NVE_LOCK(sc);
646 nve_init_locked(sc);
647 NVE_UNLOCK(sc);
648 }
649
650 static void
651 nve_init_locked(struct nve_softc *sc)
652 {
653 struct ifnet *ifp;
654 int error;
655
656 NVE_LOCK_ASSERT(sc);
657 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup);
658
659 ifp = sc->ifp;
660
661 /* Do nothing if already running */
662 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
663 return;
664
665 nve_stop(sc);
666 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n");
667
668 nve_ifmedia_upd_locked(ifp);
669
670 /* Setup Hardware interface and allocate memory structures */
671 error = sc->hwapi->pfnInit(sc->hwapi->pADCX,
672 0, /* force speed */
673 0, /* force full duplex */
674 0, /* force mode */
675 0, /* force async mode */
676 &sc->linkup);
677
678 if (error) {
679 device_printf(sc->dev,
680 "failed to start NVIDIA Hardware interface\n");
681 return;
682 }
683 /* Set the MAC address */
684 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp));
685 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
686 sc->hwapi->pfnStart(sc->hwapi->pADCX);
687
688 /* Setup multicast filter */
689 nve_setmulti(sc);
690
691 /* Update interface parameters */
692 ifp->if_drv_flags |= IFF_DRV_RUNNING;
693 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
694
695 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
696
697 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n");
698
699 return;
700 }
701
702 /* Stop interface activity ie. not "RUNNING" */
703 static void
704 nve_stop(struct nve_softc *sc)
705 {
706 struct ifnet *ifp;
707
708 NVE_LOCK_ASSERT(sc);
709
710 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n");
711
712 ifp = sc->ifp;
713 ifp->if_timer = 0;
714
715 /* Cancel tick timer */
716 callout_stop(&sc->stat_callout);
717
718 /* Stop hardware activity */
719 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
720 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0);
721
722 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n");
723 /* Shutdown interface and deallocate memory buffers */
724 if (sc->hwapi->pfnDeinit)
725 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0);
726
727 sc->linkup = 0;
728 sc->cur_rx = 0;
729 sc->pending_rxs = 0;
730 sc->pending_txs = 0;
731
732 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
733
734 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n");
735
736 return;
737 }
738
739 /* Shutdown interface for unload/reboot */
740 static int
741 nve_shutdown(device_t dev)
742 {
743 struct nve_softc *sc;
744
745 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n");
746
747 sc = device_get_softc(dev);
748
749 /* Stop hardware activity */
750 NVE_LOCK(sc);
751 nve_stop(sc);
752 NVE_UNLOCK(sc);
753
754 return (0);
755 }
756
757 /* Allocate TX ring buffers */
758 static int
759 nve_init_rings(struct nve_softc *sc)
760 {
761 int error, i;
762
763 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n");
764
765 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0;
766 /* Initialise RX ring */
767 for (i = 0; i < RX_RING_SIZE; i++) {
768 struct nve_rx_desc *desc = sc->rx_desc + i;
769 struct nve_map_buffer *buf = &desc->buf;
770
771 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
772 if (buf->mbuf == NULL) {
773 device_printf(sc->dev, "couldn't allocate mbuf\n");
774 nve_free_rings(sc);
775 return (ENOBUFS);
776 }
777 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
778 m_adj(buf->mbuf, ETHER_ALIGN);
779
780 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
781 if (error) {
782 device_printf(sc->dev, "couldn't create dma map\n");
783 nve_free_rings(sc);
784 return (error);
785 }
786 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
787 nve_dmamap_rx_cb, &desc->paddr, 0);
788 if (error) {
789 device_printf(sc->dev, "couldn't dma map mbuf\n");
790 nve_free_rings(sc);
791 return (error);
792 }
793 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
794
795 desc->buflength = buf->mbuf->m_len;
796 desc->vaddr = mtod(buf->mbuf, caddr_t);
797 }
798 bus_dmamap_sync(sc->rtag, sc->rmap,
799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
800
801 /* Initialize TX ring */
802 for (i = 0; i < TX_RING_SIZE; i++) {
803 struct nve_tx_desc *desc = sc->tx_desc + i;
804 struct nve_map_buffer *buf = &desc->buf;
805
806 buf->mbuf = NULL;
807
808 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
809 if (error) {
810 device_printf(sc->dev, "couldn't create dma map\n");
811 nve_free_rings(sc);
812 return (error);
813 }
814 }
815 bus_dmamap_sync(sc->ttag, sc->tmap,
816 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
817
818 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n");
819
820 return (error);
821 }
822
823 /* Free the TX ring buffers */
824 static void
825 nve_free_rings(struct nve_softc *sc)
826 {
827 int i;
828
829 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n");
830
831 for (i = 0; i < RX_RING_SIZE; i++) {
832 struct nve_rx_desc *desc = sc->rx_desc + i;
833 struct nve_map_buffer *buf = &desc->buf;
834
835 if (buf->mbuf) {
836 bus_dmamap_unload(sc->mtag, buf->map);
837 bus_dmamap_destroy(sc->mtag, buf->map);
838 m_freem(buf->mbuf);
839 }
840 buf->mbuf = NULL;
841 }
842
843 for (i = 0; i < TX_RING_SIZE; i++) {
844 struct nve_tx_desc *desc = sc->tx_desc + i;
845 struct nve_map_buffer *buf = &desc->buf;
846
847 if (buf->mbuf) {
848 bus_dmamap_unload(sc->mtag, buf->map);
849 bus_dmamap_destroy(sc->mtag, buf->map);
850 m_freem(buf->mbuf);
851 }
852 buf->mbuf = NULL;
853 }
854
855 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n");
856 }
857
858 /* Main loop for sending packets from OS to interface */
859 static void
860 nve_ifstart(struct ifnet *ifp)
861 {
862 struct nve_softc *sc = ifp->if_softc;
863
864 NVE_LOCK(sc);
865 nve_ifstart_locked(ifp);
866 NVE_UNLOCK(sc);
867 }
868
869 static void
870 nve_ifstart_locked(struct ifnet *ifp)
871 {
872 struct nve_softc *sc = ifp->if_softc;
873 struct nve_map_buffer *buf;
874 struct mbuf *m0, *m;
875 struct nve_tx_desc *desc;
876 ADAPTER_WRITE_DATA txdata;
877 int error, i;
878
879 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n");
880
881 NVE_LOCK_ASSERT(sc);
882
883 /* If link is down/busy or queue is empty do nothing */
884 if (ifp->if_drv_flags & IFF_DRV_OACTIVE ||
885 IFQ_DRV_IS_EMPTY(&ifp->if_snd))
886 return;
887
888 /* Transmit queued packets until sent or TX ring is full */
889 while (sc->pending_txs < TX_RING_SIZE) {
890 desc = sc->tx_desc + sc->cur_tx;
891 buf = &desc->buf;
892
893 /* Get next packet to send. */
894 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
895
896 /* If nothing to send, return. */
897 if (m0 == NULL)
898 return;
899
900 /*
901 * On nForce4, the chip doesn't interrupt on transmit,
902 * so try to flush transmitted packets from the queue
903 * if it's getting large (see note in nve_watchdog).
904 */
905 if (sc->pending_txs > TX_RING_SIZE/2) {
906 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
907 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
908 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
909 }
910
911 /* Map MBUF for DMA access */
912 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
913 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
914
915 if (error && error != EFBIG) {
916 m_freem(m0);
917 sc->tx_errors++;
918 continue;
919 }
920 /*
921 * Packet has too many fragments - defrag into new mbuf
922 * cluster
923 */
924 if (error) {
925 m = m_defrag(m0, M_DONTWAIT);
926 if (m == NULL) {
927 m_freem(m0);
928 sc->tx_errors++;
929 continue;
930 }
931 m0 = m;
932
933 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
934 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
935 if (error) {
936 m_freem(m);
937 sc->tx_errors++;
938 continue;
939 }
940 }
941 /* Do sync on DMA bounce buffer */
942 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
943
944 buf->mbuf = m0;
945 txdata.ulNumberOfElements = desc->numfrags;
946 txdata.pvID = (PVOID)desc;
947
948 /* Put fragments into API element list */
949 txdata.ulTotalLength = buf->mbuf->m_len;
950 for (i = 0; i < desc->numfrags; i++) {
951 txdata.sElement[i].ulLength =
952 (ulong)desc->frags[i].ds_len;
953 txdata.sElement[i].pPhysical =
954 (PVOID)desc->frags[i].ds_addr;
955 }
956
957 /* Send packet to Nvidia API for transmission */
958 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata);
959
960 switch (error) {
961 case ADAPTERERR_NONE:
962 /* Packet was queued in API TX queue successfully */
963 sc->pending_txs++;
964 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE;
965 break;
966
967 case ADAPTERERR_TRANSMIT_QUEUE_FULL:
968 /* The API TX queue is full - requeue the packet */
969 device_printf(sc->dev,
970 "nve_ifstart: transmit queue is full\n");
971 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
972 bus_dmamap_unload(sc->mtag, buf->map);
973 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf);
974 buf->mbuf = NULL;
975 return;
976
977 default:
978 /* The API failed to queue/send the packet so dump it */
979 device_printf(sc->dev, "nve_ifstart: transmit error\n");
980 bus_dmamap_unload(sc->mtag, buf->map);
981 m_freem(buf->mbuf);
982 buf->mbuf = NULL;
983 sc->tx_errors++;
984 return;
985 }
986 /* Set watchdog timer. */
987 ifp->if_timer = 8;
988
989 /* Copy packet to BPF tap */
990 BPF_MTAP(ifp, m0);
991 }
992 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
993
994 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n");
995 }
996
997 /* Handle IOCTL events */
998 static int
999 nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1000 {
1001 struct nve_softc *sc = ifp->if_softc;
1002 struct ifreq *ifr = (struct ifreq *) data;
1003 struct mii_data *mii;
1004 int error = 0;
1005
1006 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n");
1007
1008 switch (command) {
1009 case SIOCSIFMTU:
1010 /* Set MTU size */
1011 NVE_LOCK(sc);
1012 if (ifp->if_mtu == ifr->ifr_mtu) {
1013 NVE_UNLOCK(sc);
1014 break;
1015 }
1016 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) {
1017 ifp->if_mtu = ifr->ifr_mtu;
1018 nve_stop(sc);
1019 nve_init_locked(sc);
1020 } else
1021 error = EINVAL;
1022 NVE_UNLOCK(sc);
1023 break;
1024
1025 case SIOCSIFFLAGS:
1026 /* Setup interface flags */
1027 NVE_LOCK(sc);
1028 if (ifp->if_flags & IFF_UP) {
1029 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1030 nve_init_locked(sc);
1031 NVE_UNLOCK(sc);
1032 break;
1033 }
1034 } else {
1035 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1036 nve_stop(sc);
1037 NVE_UNLOCK(sc);
1038 break;
1039 }
1040 }
1041 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
1042 nve_setmulti(sc);
1043 NVE_UNLOCK(sc);
1044 break;
1045
1046 case SIOCADDMULTI:
1047 case SIOCDELMULTI:
1048 /* Setup multicast filter */
1049 NVE_LOCK(sc);
1050 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1051 nve_setmulti(sc);
1052 }
1053 NVE_UNLOCK(sc);
1054 break;
1055
1056 case SIOCGIFMEDIA:
1057 case SIOCSIFMEDIA:
1058 /* Get/Set interface media parameters */
1059 mii = device_get_softc(sc->miibus);
1060 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1061 break;
1062
1063 default:
1064 /* Everything else we forward to generic ether ioctl */
1065 error = ether_ioctl(ifp, command, data);
1066 break;
1067 }
1068
1069 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n");
1070
1071 return (error);
1072 }
1073
1074 /* Interrupt service routine */
1075 static void
1076 nve_intr(void *arg)
1077 {
1078 struct nve_softc *sc = arg;
1079 struct ifnet *ifp = sc->ifp;
1080
1081 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n");
1082
1083 NVE_LOCK(sc);
1084 if (!ifp->if_flags & IFF_UP) {
1085 nve_stop(sc);
1086 NVE_UNLOCK(sc);
1087 return;
1088 }
1089 /* Handle interrupt event */
1090 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) {
1091 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1092 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1093 }
1094 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1095 nve_ifstart_locked(ifp);
1096
1097 /* If no pending packets we don't need a timeout */
1098 if (sc->pending_txs == 0)
1099 sc->ifp->if_timer = 0;
1100 NVE_UNLOCK(sc);
1101
1102 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n");
1103
1104 return;
1105 }
1106
1107 /* Setup multicast filters */
1108 static void
1109 nve_setmulti(struct nve_softc *sc)
1110 {
1111 struct ifnet *ifp;
1112 struct ifmultiaddr *ifma;
1113 PACKET_FILTER hwfilter;
1114 int i;
1115 u_int8_t andaddr[6], oraddr[6];
1116
1117 NVE_LOCK_ASSERT(sc);
1118
1119 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n");
1120
1121 ifp = sc->ifp;
1122
1123 /* Initialize filter */
1124 hwfilter.ulFilterFlags = 0;
1125 for (i = 0; i < 6; i++) {
1126 hwfilter.acMulticastAddress[i] = 0;
1127 hwfilter.acMulticastMask[i] = 0;
1128 }
1129
1130 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1131 /* Accept all packets */
1132 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS;
1133 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1134 return;
1135 }
1136 /* Setup multicast filter */
1137 IF_ADDR_LOCK(ifp);
1138 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1139 u_char *addrp;
1140
1141 if (ifma->ifma_addr->sa_family != AF_LINK)
1142 continue;
1143
1144 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1145 for (i = 0; i < 6; i++) {
1146 u_int8_t mcaddr = addrp[i];
1147 andaddr[i] &= mcaddr;
1148 oraddr[i] |= mcaddr;
1149 }
1150 }
1151 IF_ADDR_UNLOCK(ifp);
1152 for (i = 0; i < 6; i++) {
1153 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i];
1154 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]);
1155 }
1156
1157 /* Send filter to NVIDIA API */
1158 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1159
1160 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n");
1161
1162 return;
1163 }
1164
1165 /* Change the current media/mediaopts */
1166 static int
1167 nve_ifmedia_upd(struct ifnet *ifp)
1168 {
1169 struct nve_softc *sc = ifp->if_softc;
1170
1171 NVE_LOCK(sc);
1172 nve_ifmedia_upd_locked(ifp);
1173 NVE_UNLOCK(sc);
1174 return (0);
1175 }
1176
1177 static void
1178 nve_ifmedia_upd_locked(struct ifnet *ifp)
1179 {
1180 struct nve_softc *sc = ifp->if_softc;
1181 struct mii_data *mii;
1182
1183 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n");
1184
1185 NVE_LOCK_ASSERT(sc);
1186 mii = device_get_softc(sc->miibus);
1187
1188 if (mii->mii_instance) {
1189 struct mii_softc *miisc;
1190 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1191 miisc = LIST_NEXT(miisc, mii_list)) {
1192 mii_phy_reset(miisc);
1193 }
1194 }
1195 mii_mediachg(mii);
1196 }
1197
1198 /* Update current miibus PHY status of media */
1199 static void
1200 nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1201 {
1202 struct nve_softc *sc;
1203 struct mii_data *mii;
1204
1205 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n");
1206
1207 sc = ifp->if_softc;
1208 NVE_LOCK(sc);
1209 mii = device_get_softc(sc->miibus);
1210 mii_pollstat(mii);
1211 NVE_UNLOCK(sc);
1212
1213 ifmr->ifm_active = mii->mii_media_active;
1214 ifmr->ifm_status = mii->mii_media_status;
1215
1216 return;
1217 }
1218
1219 /* miibus tick timer - maintain link status */
1220 static void
1221 nve_tick(void *xsc)
1222 {
1223 struct nve_softc *sc = xsc;
1224 struct mii_data *mii;
1225 struct ifnet *ifp;
1226
1227 NVE_LOCK_ASSERT(sc);
1228
1229 ifp = sc->ifp;
1230 nve_update_stats(sc);
1231
1232 mii = device_get_softc(sc->miibus);
1233 mii_tick(mii);
1234
1235 if (mii->mii_media_status & IFM_ACTIVE &&
1236 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1237 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1238 nve_ifstart_locked(ifp);
1239 }
1240 callout_reset(&sc->stat_callout, hz, nve_tick, sc);
1241
1242 return;
1243 }
1244
1245 /* Update ifnet data structure with collected interface stats from API */
1246 static void
1247 nve_update_stats(struct nve_softc *sc)
1248 {
1249 struct ifnet *ifp = sc->ifp;
1250 ADAPTER_STATS stats;
1251
1252 NVE_LOCK_ASSERT(sc);
1253
1254 if (sc->hwapi) {
1255 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats);
1256
1257 ifp->if_ipackets = stats.ulSuccessfulReceptions;
1258 ifp->if_ierrors = stats.ulMissedFrames +
1259 stats.ulFailedReceptions +
1260 stats.ulCRCErrors +
1261 stats.ulFramingErrors +
1262 stats.ulOverFlowErrors;
1263
1264 ifp->if_opackets = stats.ulSuccessfulTransmissions;
1265 ifp->if_oerrors = sc->tx_errors +
1266 stats.ulFailedTransmissions +
1267 stats.ulRetryErrors +
1268 stats.ulUnderflowErrors +
1269 stats.ulLossOfCarrierErrors +
1270 stats.ulLateCollisionErrors;
1271
1272 ifp->if_collisions = stats.ulLateCollisionErrors;
1273 }
1274
1275 return;
1276 }
1277
1278 /* miibus Read PHY register wrapper - calls Nvidia API entry point */
1279 static int
1280 nve_miibus_readreg(device_t dev, int phy, int reg)
1281 {
1282 struct nve_softc *sc = device_get_softc(dev);
1283 ULONG data;
1284
1285 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n");
1286
1287 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data);
1288
1289 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n");
1290
1291 return (data);
1292 }
1293
1294 /* miibus Write PHY register wrapper - calls Nvidia API entry point */
1295 static void
1296 nve_miibus_writereg(device_t dev, int phy, int reg, int data)
1297 {
1298 struct nve_softc *sc = device_get_softc(dev);
1299
1300 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n");
1301
1302 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data);
1303
1304 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n");
1305
1306 return;
1307 }
1308
1309 /* Watchdog timer to prevent PHY lockups */
1310 static void
1311 nve_watchdog(struct ifnet *ifp)
1312 {
1313 struct nve_softc *sc = ifp->if_softc;
1314 int pending_txs_start;
1315
1316 NVE_LOCK(sc);
1317
1318 /*
1319 * The nvidia driver blob defers tx completion notifications.
1320 * Thus, sometimes the watchdog timer will go off when the
1321 * tx engine is fine, but the tx completions are just deferred.
1322 * Try kicking the driver blob to clear out any pending tx
1323 * completions. If that clears up any of the pending tx
1324 * operations, then just return without printing the warning
1325 * message or resetting the adapter, as we can then conclude
1326 * the chip hasn't actually crashed (it's still sending packets).
1327 */
1328 pending_txs_start = sc->pending_txs;
1329 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
1330 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1331 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1332 if (sc->pending_txs < pending_txs_start) {
1333 NVE_UNLOCK(sc);
1334 return;
1335 }
1336
1337 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs);
1338
1339 sc->tx_errors++;
1340
1341 nve_stop(sc);
1342 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1343 nve_init_locked(sc);
1344
1345 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1346 nve_ifstart_locked(ifp);
1347 NVE_UNLOCK(sc);
1348
1349 return;
1350 }
1351
1352 /* --- Start of NVOSAPI interface --- */
1353
1354 /* Allocate DMA enabled general use memory for API */
1355 static NV_SINT32
1356 nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
1357 {
1358 struct nve_softc *sc;
1359 bus_addr_t mem_physical;
1360
1361 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength);
1362
1363 sc = (struct nve_softc *)ctx;
1364
1365 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF,
1366 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
1367
1368 if (!mem->pLogical) {
1369 device_printf(sc->dev, "memory allocation failed\n");
1370 return (0);
1371 }
1372 memset(mem->pLogical, 0, (ulong)mem->uiLength);
1373 mem_physical = vtophys(mem->pLogical);
1374 mem->pPhysical = (PVOID)mem_physical;
1375
1376 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n",
1377 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength);
1378
1379 return (1);
1380 }
1381
1382 /* Free allocated memory */
1383 static NV_SINT32
1384 nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
1385 {
1386 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
1387 (uint)mem->pLogical, (uint) mem->uiLength);
1388
1389 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF);
1390 return (1);
1391 }
1392
1393 /* Copied directly from nvnet.c */
1394 static NV_SINT32
1395 nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1396 {
1397 MEMORY_BLOCK mem_block;
1398
1399 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n");
1400
1401 mem_block_ex->pLogical = NULL;
1402 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength;
1403
1404 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) &&
1405 (mem_block_ex->AlignmentSize > 1)) {
1406 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n",
1407 mem_block_ex->AlignmentSize);
1408 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize;
1409 }
1410 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1411
1412 if (nve_osalloc(ctx, &mem_block) == 0) {
1413 return (0);
1414 }
1415 mem_block_ex->pLogicalOrig = mem_block.pLogical;
1416 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical;
1417 mem_block_ex->pPhysicalOrigHigh = 0;
1418
1419 mem_block_ex->pPhysical = mem_block.pPhysical;
1420 mem_block_ex->pLogical = mem_block.pLogical;
1421
1422 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) {
1423 unsigned int offset;
1424 offset = mem_block_ex->pPhysicalOrigLow &
1425 (mem_block_ex->AlignmentSize - 1);
1426
1427 if (offset) {
1428 mem_block_ex->pPhysical =
1429 (PVOID)((ulong)mem_block_ex->pPhysical +
1430 mem_block_ex->AlignmentSize - offset);
1431 mem_block_ex->pLogical =
1432 (PVOID)((ulong)mem_block_ex->pLogical +
1433 mem_block_ex->AlignmentSize - offset);
1434 } /* if (offset) */
1435 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */
1436 return (1);
1437 }
1438
1439 /* Copied directly from nvnet.c */
1440 static NV_SINT32
1441 nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1442 {
1443 MEMORY_BLOCK mem_block;
1444
1445 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n");
1446
1447 mem_block.pLogical = mem_block_ex->pLogicalOrig;
1448 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow);
1449 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1450
1451 return (nve_osfree(ctx, &mem_block));
1452 }
1453
1454 /* Clear memory region */
1455 static NV_SINT32
1456 nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
1457 {
1458 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
1459 memset(mem, 0, length);
1460 return (1);
1461 }
1462
1463 /* Sleep for a tick */
1464 static NV_SINT32
1465 nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
1466 {
1467 DELAY(usec);
1468 return (1);
1469 }
1470
1471 /* Allocate memory for rx buffer */
1472 static NV_SINT32
1473 nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
1474 {
1475 struct nve_softc *sc = ctx;
1476 struct nve_rx_desc *desc;
1477 struct nve_map_buffer *buf;
1478 int error;
1479
1480 if (device_is_attached(sc->dev))
1481 NVE_LOCK_ASSERT(sc);
1482
1483 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n");
1484
1485 if (sc->pending_rxs == RX_RING_SIZE) {
1486 device_printf(sc->dev, "rx ring buffer is full\n");
1487 goto fail;
1488 }
1489 desc = sc->rx_desc + sc->cur_rx;
1490 buf = &desc->buf;
1491
1492 if (buf->mbuf == NULL) {
1493 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1494 if (buf->mbuf == NULL) {
1495 device_printf(sc->dev, "failed to allocate memory\n");
1496 goto fail;
1497 }
1498 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1499 m_adj(buf->mbuf, ETHER_ALIGN);
1500
1501 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1502 nve_dmamap_rx_cb, &desc->paddr, 0);
1503 if (error) {
1504 device_printf(sc->dev, "failed to dmamap mbuf\n");
1505 m_freem(buf->mbuf);
1506 buf->mbuf = NULL;
1507 goto fail;
1508 }
1509 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1510 desc->buflength = buf->mbuf->m_len;
1511 desc->vaddr = mtod(buf->mbuf, caddr_t);
1512 }
1513 sc->pending_rxs++;
1514 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE;
1515
1516 mem->pLogical = (void *)desc->vaddr;
1517 mem->pPhysical = (void *)desc->paddr;
1518 mem->uiLength = desc->buflength;
1519 *id = (void *)desc;
1520
1521 return (1);
1522
1523 fail:
1524 return (0);
1525 }
1526
1527 /* Free the rx buffer */
1528 static NV_SINT32
1529 nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
1530 {
1531 struct nve_softc *sc = ctx;
1532 struct nve_rx_desc *desc;
1533 struct nve_map_buffer *buf;
1534
1535 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n");
1536
1537 desc = (struct nve_rx_desc *) id;
1538 buf = &desc->buf;
1539
1540 if (buf->mbuf) {
1541 bus_dmamap_unload(sc->mtag, buf->map);
1542 bus_dmamap_destroy(sc->mtag, buf->map);
1543 m_freem(buf->mbuf);
1544 }
1545 sc->pending_rxs--;
1546 buf->mbuf = NULL;
1547
1548 return (1);
1549 }
1550
1551 /* This gets called by the Nvidia API after our TX packet has been sent */
1552 static NV_SINT32
1553 nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
1554 {
1555 struct nve_softc *sc = ctx;
1556 struct nve_map_buffer *buf;
1557 struct nve_tx_desc *desc = (struct nve_tx_desc *) id;
1558 struct ifnet *ifp;
1559
1560 NVE_LOCK_ASSERT(sc);
1561
1562 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n");
1563
1564 ifp = sc->ifp;
1565 buf = &desc->buf;
1566 sc->pending_txs--;
1567
1568 /* Unload and free mbuf cluster */
1569 if (buf->mbuf == NULL)
1570 goto fail;
1571
1572 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
1573 bus_dmamap_unload(sc->mtag, buf->map);
1574 m_freem(buf->mbuf);
1575 buf->mbuf = NULL;
1576
1577 /* Send more packets if we have them */
1578 if (sc->pending_txs < TX_RING_SIZE)
1579 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1580
1581 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE)
1582 nve_ifstart_locked(ifp);
1583
1584 fail:
1585
1586 return (1);
1587 }
1588
1589 /* This gets called by the Nvidia API when a new packet has been received */
1590 /* XXX What is newbuf used for? XXX */
1591 static NV_SINT32
1592 nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
1593 NV_UINT8 priority)
1594 {
1595 struct nve_softc *sc = ctx;
1596 struct ifnet *ifp;
1597 struct nve_rx_desc *desc;
1598 struct nve_map_buffer *buf;
1599 ADAPTER_READ_DATA *readdata;
1600 struct mbuf *m;
1601
1602 NVE_LOCK_ASSERT(sc);
1603
1604 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n");
1605
1606 ifp = sc->ifp;
1607
1608 readdata = (ADAPTER_READ_DATA *) data;
1609 desc = readdata->pvID;
1610 buf = &desc->buf;
1611 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1612
1613 if (success) {
1614 /* Sync DMA bounce buffer. */
1615 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1616
1617 /* First mbuf in packet holds the ethernet and packet headers */
1618 buf->mbuf->m_pkthdr.rcvif = ifp;
1619 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len =
1620 readdata->ulTotalLength;
1621
1622 bus_dmamap_unload(sc->mtag, buf->map);
1623
1624 /* Blat the mbuf pointer, kernel will free the mbuf cluster */
1625 m = buf->mbuf;
1626 buf->mbuf = NULL;
1627
1628 /* Give mbuf to OS. */
1629 NVE_UNLOCK(sc);
1630 (*ifp->if_input)(ifp, m);
1631 NVE_LOCK(sc);
1632 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH)
1633 ifp->if_imcasts++;
1634
1635 } else {
1636 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1637 bus_dmamap_unload(sc->mtag, buf->map);
1638 m_freem(buf->mbuf);
1639 buf->mbuf = NULL;
1640 }
1641
1642 sc->cur_rx = desc - sc->rx_desc;
1643 sc->pending_rxs--;
1644
1645 return (1);
1646 }
1647
1648 /* This gets called by NVIDIA API when the PHY link state changes */
1649 static NV_SINT32
1650 nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
1651 {
1652
1653 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n");
1654
1655 return (1);
1656 }
1657
1658 /* Setup a watchdog timer */
1659 static NV_SINT32
1660 nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
1661 {
1662 struct nve_softc *sc = (struct nve_softc *)ctx;
1663
1664 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n");
1665
1666 callout_init(&sc->ostimer, CALLOUT_MPSAFE);
1667 *timer = &sc->ostimer;
1668
1669 return (1);
1670 }
1671
1672 /* Free the timer */
1673 static NV_SINT32
1674 nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
1675 {
1676
1677 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n");
1678
1679 callout_drain((struct callout *)timer);
1680
1681 return (1);
1682 }
1683
1684 /* Setup timer parameters */
1685 static NV_SINT32
1686 nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
1687 {
1688 struct nve_softc *sc = (struct nve_softc *)ctx;
1689
1690 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n");
1691
1692 sc->ostimer_func = func;
1693 sc->ostimer_params = parameters;
1694
1695 return (1);
1696 }
1697
1698 /* Set the timer to go off */
1699 static NV_SINT32
1700 nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
1701 {
1702 struct nve_softc *sc = ctx;
1703
1704 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n");
1705
1706 callout_reset((struct callout *)timer, delay, sc->ostimer_func,
1707 sc->ostimer_params);
1708
1709 return (1);
1710 }
1711
1712 /* Cancel the timer */
1713 static NV_SINT32
1714 nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
1715 {
1716
1717 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n");
1718
1719 callout_stop((struct callout *)timer);
1720
1721 return (1);
1722 }
1723
1724 static NV_SINT32
1725 nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
1726 NV_UINT8 *newbuffer, NV_UINT8 priority)
1727 {
1728
1729 /* Not implemented */
1730 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1731
1732 return (1);
1733 }
1734
1735 static PNV_VOID
1736 nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
1737 {
1738
1739 /* Not implemented */
1740 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1741
1742 return (NULL);
1743 }
1744
1745 static NV_SINT32
1746 nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
1747 {
1748
1749 /* Not implemented */
1750 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n");
1751
1752 return (1);
1753 }
1754
1755 /* Allocate mutex context (already done in nve_attach) */
1756 static NV_SINT32
1757 nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
1758 {
1759 struct nve_softc *sc = (struct nve_softc *)ctx;
1760
1761 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n");
1762
1763 *pLock = (void **)sc;
1764
1765 return (1);
1766 }
1767
1768 /* Obtain a spin lock */
1769 static NV_SINT32
1770 nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1771 {
1772
1773 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n");
1774
1775 return (1);
1776 }
1777
1778 /* Release lock */
1779 static NV_SINT32
1780 nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1781 {
1782
1783 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n");
1784
1785 return (1);
1786 }
1787
1788 /* I have no idea what this is for */
1789 static PNV_VOID
1790 nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
1791 {
1792
1793 /* Not implemented */
1794 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n");
1795 panic("nve: nve_osreturnbufvirtual not implemented\n");
1796
1797 return (NULL);
1798 }
1799
1800 /* --- End on NVOSAPI interface --- */
Cache object: 19062273ff93e31e1d48448ff9787097
|