1 /*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 *
28 */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/bus.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/rman.h>
46 #include <sys/pciio.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/taskqueue.h>
57
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/tcp_lro.h>
72
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75
76 #include <sys/dnv.h>
77 #include <sys/nv.h>
78 #include <sys/iov_schema.h>
79
80 #include <machine/bus.h>
81
82 #include "thunder_bgx.h"
83 #include "nic_reg.h"
84 #include "nic.h"
85 #include "nicvf_queues.h"
86
87 #define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver"
88
89 #define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM)
90
91 /* Lock for core interface settings */
92 #define NICVF_CORE_LOCK_INIT(nic) \
93 sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
94
95 #define NICVF_CORE_LOCK_DESTROY(nic) \
96 sx_destroy(&(nic)->core_sx)
97
98 #define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx)
99 #define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx)
100
101 #define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED)
102
103 #define SPEED_10 10
104 #define SPEED_100 100
105 #define SPEED_1000 1000
106 #define SPEED_10000 10000
107 #define SPEED_40000 40000
108
109 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
110
111 static int nicvf_probe(device_t);
112 static int nicvf_attach(device_t);
113 static int nicvf_detach(device_t);
114
115 static device_method_t nicvf_methods[] = {
116 /* Device interface */
117 DEVMETHOD(device_probe, nicvf_probe),
118 DEVMETHOD(device_attach, nicvf_attach),
119 DEVMETHOD(device_detach, nicvf_detach),
120
121 DEVMETHOD_END,
122 };
123
124 static driver_t nicvf_driver = {
125 "vnic",
126 nicvf_methods,
127 sizeof(struct nicvf),
128 };
129
130 static devclass_t nicvf_devclass;
131
132 DRIVER_MODULE(vnicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
133 MODULE_VERSION(vnicvf, 1);
134 MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
135 MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
136 MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
137
138 static int nicvf_allocate_misc_interrupt(struct nicvf *);
139 static int nicvf_enable_misc_interrupt(struct nicvf *);
140 static int nicvf_allocate_net_interrupts(struct nicvf *);
141 static void nicvf_release_all_interrupts(struct nicvf *);
142 static int nicvf_update_hw_max_frs(struct nicvf *, int);
143 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
144 static void nicvf_config_cpi(struct nicvf *);
145 static int nicvf_rss_init(struct nicvf *);
146 static int nicvf_init_resources(struct nicvf *);
147
148 static int nicvf_setup_ifnet(struct nicvf *);
149 static int nicvf_setup_ifmedia(struct nicvf *);
150 static void nicvf_hw_addr_random(uint8_t *);
151
152 static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
153 static void nicvf_if_init(void *);
154 static void nicvf_if_init_locked(struct nicvf *);
155 static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
156 static void nicvf_if_qflush(struct ifnet *);
157 static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
158
159 static int nicvf_stop_locked(struct nicvf *);
160
161 static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
162 static int nicvf_media_change(struct ifnet *);
163
164 static void nicvf_tick_stats(void *);
165
166 static int
167 nicvf_probe(device_t dev)
168 {
169 uint16_t vendor_id;
170 uint16_t device_id;
171
172 vendor_id = pci_get_vendor(dev);
173 device_id = pci_get_device(dev);
174
175 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
176 return (ENXIO);
177
178 if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
179 device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
180 device_set_desc(dev, VNIC_VF_DEVSTR);
181 return (BUS_PROBE_DEFAULT);
182 }
183
184 return (ENXIO);
185 }
186
187 static int
188 nicvf_attach(device_t dev)
189 {
190 struct nicvf *nic;
191 int rid, qcount;
192 int err = 0;
193 uint8_t hwaddr[ETHER_ADDR_LEN];
194 uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
195
196 nic = device_get_softc(dev);
197 nic->dev = dev;
198 nic->pnicvf = nic;
199
200 NICVF_CORE_LOCK_INIT(nic);
201 /* Enable HW TSO on Pass2 */
202 if (!pass1_silicon(dev))
203 nic->hw_tso = TRUE;
204
205 rid = VNIC_VF_REG_RID;
206 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
207 RF_ACTIVE);
208 if (nic->reg_base == NULL) {
209 device_printf(dev, "Could not allocate registers memory\n");
210 return (ENXIO);
211 }
212
213 qcount = MAX_CMP_QUEUES_PER_QS;
214 nic->max_queues = qcount;
215
216 err = nicvf_set_qset_resources(nic);
217 if (err != 0)
218 goto err_free_res;
219
220 /* Check if PF is alive and get MAC address for this VF */
221 err = nicvf_allocate_misc_interrupt(nic);
222 if (err != 0)
223 goto err_free_res;
224
225 NICVF_CORE_LOCK(nic);
226 err = nicvf_enable_misc_interrupt(nic);
227 NICVF_CORE_UNLOCK(nic);
228 if (err != 0)
229 goto err_release_intr;
230
231 err = nicvf_allocate_net_interrupts(nic);
232 if (err != 0) {
233 device_printf(dev,
234 "Could not allocate network interface interrupts\n");
235 goto err_free_ifnet;
236 }
237
238 /* If no MAC address was obtained we generate random one */
239 if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
240 nicvf_hw_addr_random(hwaddr);
241 memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
242 NICVF_CORE_LOCK(nic);
243 nicvf_hw_set_mac_addr(nic, hwaddr);
244 NICVF_CORE_UNLOCK(nic);
245 }
246
247 /* Configure CPI alorithm */
248 nic->cpi_alg = CPI_ALG_NONE;
249 NICVF_CORE_LOCK(nic);
250 nicvf_config_cpi(nic);
251 /* Configure receive side scaling */
252 if (nic->qs->rq_cnt > 1)
253 nicvf_rss_init(nic);
254 NICVF_CORE_UNLOCK(nic);
255
256 err = nicvf_setup_ifnet(nic);
257 if (err != 0) {
258 device_printf(dev, "Could not set-up ifnet\n");
259 goto err_release_intr;
260 }
261
262 err = nicvf_setup_ifmedia(nic);
263 if (err != 0) {
264 device_printf(dev, "Could not set-up ifmedia\n");
265 goto err_free_ifnet;
266 }
267
268 mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
269 callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
270
271 ether_ifattach(nic->ifp, nic->hwaddr);
272
273 return (0);
274
275 err_free_ifnet:
276 if_free(nic->ifp);
277 err_release_intr:
278 nicvf_release_all_interrupts(nic);
279 err_free_res:
280 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
281 nic->reg_base);
282
283 return (err);
284 }
285
286 static int
287 nicvf_detach(device_t dev)
288 {
289 struct nicvf *nic;
290
291 nic = device_get_softc(dev);
292
293 NICVF_CORE_LOCK(nic);
294 /* Shut down the port and release ring resources */
295 nicvf_stop_locked(nic);
296 /* Release stats lock */
297 mtx_destroy(&nic->stats_mtx);
298 /* Release interrupts */
299 nicvf_release_all_interrupts(nic);
300 /* Release memory resource */
301 if (nic->reg_base != NULL) {
302 bus_release_resource(dev, SYS_RES_MEMORY,
303 rman_get_rid(nic->reg_base), nic->reg_base);
304 }
305
306 /* Remove all ifmedia configurations */
307 ifmedia_removeall(&nic->if_media);
308 /* Free this ifnet */
309 if_free(nic->ifp);
310 NICVF_CORE_UNLOCK(nic);
311 /* Finally destroy the lock */
312 NICVF_CORE_LOCK_DESTROY(nic);
313
314 return (0);
315 }
316
317 static void
318 nicvf_hw_addr_random(uint8_t *hwaddr)
319 {
320 uint32_t rnd;
321 uint8_t addr[ETHER_ADDR_LEN];
322
323 /*
324 * Create randomized MAC address.
325 * Set 'bsd' + random 24 low-order bits.
326 */
327 rnd = arc4random() & 0x00ffffff;
328 addr[0] = 'b';
329 addr[1] = 's';
330 addr[2] = 'd';
331 addr[3] = rnd >> 16;
332 addr[4] = rnd >> 8;
333 addr[5] = rnd >> 0;
334
335 memcpy(hwaddr, addr, ETHER_ADDR_LEN);
336 }
337
338 static int
339 nicvf_setup_ifnet(struct nicvf *nic)
340 {
341 struct ifnet *ifp;
342
343 ifp = if_alloc(IFT_ETHER);
344 if (ifp == NULL) {
345 device_printf(nic->dev, "Could not allocate ifnet structure\n");
346 return (ENOMEM);
347 }
348
349 nic->ifp = ifp;
350
351 if_setsoftc(ifp, nic);
352 if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
353 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
354
355 if_settransmitfn(ifp, nicvf_if_transmit);
356 if_setqflushfn(ifp, nicvf_if_qflush);
357 if_setioctlfn(ifp, nicvf_if_ioctl);
358 if_setinitfn(ifp, nicvf_if_init);
359 if_setgetcounterfn(ifp, nicvf_if_getcounter);
360
361 if_setmtu(ifp, ETHERMTU);
362
363 /* Reset caps */
364 if_setcapabilities(ifp, 0);
365
366 /* Set the default values */
367 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
368 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
369 if (nic->hw_tso) {
370 /* TSO */
371 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
372 /* TSO parameters */
373 if_sethwtsomax(ifp, NICVF_TSO_MAXSIZE);
374 if_sethwtsomaxsegcount(ifp, NICVF_TSO_NSEGS);
375 if_sethwtsomaxsegsize(ifp, MCLBYTES);
376 }
377 /* IP/TCP/UDP HW checksums */
378 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
379 if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
380 /*
381 * HW offload enable
382 */
383 if_clearhwassist(ifp);
384 if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
385 if (nic->hw_tso)
386 if_sethwassistbits(ifp, (CSUM_TSO), 0);
387 if_setcapenable(ifp, if_getcapabilities(ifp));
388
389 return (0);
390 }
391
392 static int
393 nicvf_setup_ifmedia(struct nicvf *nic)
394 {
395
396 ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
397 nicvf_media_status);
398
399 /*
400 * Advertise availability of all possible connection types,
401 * even though not all are possible at the same time.
402 */
403
404 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
405 0, NULL);
406 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
407 0, NULL);
408 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
409 0, NULL);
410 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
411 0, NULL);
412 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
413 0, NULL);
414 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
415 0, NULL);
416
417 ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
418
419 return (0);
420 }
421
422 static int
423 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
424 {
425 struct nicvf *nic;
426 struct rcv_queue *rq;
427 struct ifreq *ifr;
428 uint32_t flags;
429 int mask, err;
430 int rq_idx;
431 #if defined(INET) || defined(INET6)
432 struct ifaddr *ifa;
433 boolean_t avoid_reset = FALSE;
434 #endif
435
436 nic = if_getsoftc(ifp);
437 ifr = (struct ifreq *)data;
438 #if defined(INET) || defined(INET6)
439 ifa = (struct ifaddr *)data;
440 #endif
441 err = 0;
442 switch (cmd) {
443 case SIOCSIFADDR:
444 #ifdef INET
445 if (ifa->ifa_addr->sa_family == AF_INET)
446 avoid_reset = TRUE;
447 #endif
448 #ifdef INET6
449 if (ifa->ifa_addr->sa_family == AF_INET6)
450 avoid_reset = TRUE;
451 #endif
452
453 #if defined(INET) || defined(INET6)
454 /* Avoid reinitialization unless it's necessary */
455 if (avoid_reset) {
456 if_setflagbits(ifp, IFF_UP, 0);
457 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
458 nicvf_if_init(nic);
459 #ifdef INET
460 if (!(if_getflags(ifp) & IFF_NOARP))
461 arp_ifinit(ifp, ifa);
462 #endif
463
464 return (0);
465 }
466 #endif
467 err = ether_ioctl(ifp, cmd, data);
468 break;
469 case SIOCSIFMTU:
470 if (ifr->ifr_mtu < NIC_HW_MIN_FRS ||
471 ifr->ifr_mtu > NIC_HW_MAX_FRS) {
472 err = EINVAL;
473 } else {
474 NICVF_CORE_LOCK(nic);
475 err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu);
476 if (err == 0)
477 if_setmtu(ifp, ifr->ifr_mtu);
478 NICVF_CORE_UNLOCK(nic);
479 }
480 break;
481 case SIOCSIFFLAGS:
482 NICVF_CORE_LOCK(nic);
483 flags = if_getflags(ifp);
484 if (flags & IFF_UP) {
485 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
486 if ((flags ^ nic->if_flags) & IFF_PROMISC) {
487 /* Change promiscous mode */
488 #if 0 /* XXX */
489 nicvf_set_promiscous(nic);
490 #endif
491 }
492
493 if ((flags ^ nic->if_flags) & IFF_ALLMULTI) {
494 /* Change multicasting settings */
495 #if 0 /* XXX */
496 nicvf_set_multicast(nic);
497 #endif
498 }
499 } else {
500 nicvf_if_init_locked(nic);
501 }
502 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
503 nicvf_stop_locked(nic);
504
505 nic->if_flags = flags;
506 NICVF_CORE_UNLOCK(nic);
507 break;
508
509 case SIOCADDMULTI:
510 case SIOCDELMULTI:
511 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
512 #if 0
513 NICVF_CORE_LOCK(nic);
514 /* ARM64TODO */
515 nicvf_set_multicast(nic);
516 NICVF_CORE_UNLOCK(nic);
517 #endif
518 }
519 break;
520
521 case SIOCSIFMEDIA:
522 case SIOCGIFMEDIA:
523 err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
524 break;
525
526 case SIOCSIFCAP:
527 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
528 if (mask & IFCAP_VLAN_MTU) {
529 /* No work to do except acknowledge the change took. */
530 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
531 }
532 if (mask & IFCAP_TXCSUM)
533 if_togglecapenable(ifp, IFCAP_TXCSUM);
534 if (mask & IFCAP_RXCSUM)
535 if_togglecapenable(ifp, IFCAP_RXCSUM);
536 if ((mask & IFCAP_TSO4) && nic->hw_tso)
537 if_togglecapenable(ifp, IFCAP_TSO4);
538 if (mask & IFCAP_LRO) {
539 /*
540 * Lock the driver for a moment to avoid
541 * mismatch in per-queue settings.
542 */
543 NICVF_CORE_LOCK(nic);
544 if_togglecapenable(ifp, IFCAP_LRO);
545 if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
546 /*
547 * Now disable LRO for subsequent packets.
548 * Atomicity of this change is not necessary
549 * as we don't need precise toggle of this
550 * feature for all threads processing the
551 * completion queue.
552 */
553 for (rq_idx = 0;
554 rq_idx < nic->qs->rq_cnt; rq_idx++) {
555 rq = &nic->qs->rq[rq_idx];
556 rq->lro_enabled = !rq->lro_enabled;
557 }
558 }
559 NICVF_CORE_UNLOCK(nic);
560 }
561
562 break;
563
564 default:
565 err = ether_ioctl(ifp, cmd, data);
566 break;
567 }
568
569 return (err);
570 }
571
572 static void
573 nicvf_if_init_locked(struct nicvf *nic)
574 {
575 struct queue_set *qs = nic->qs;
576 struct ifnet *ifp;
577 int qidx;
578 int err;
579 caddr_t if_addr;
580
581 NICVF_CORE_LOCK_ASSERT(nic);
582 ifp = nic->ifp;
583
584 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
585 nicvf_stop_locked(nic);
586
587 err = nicvf_enable_misc_interrupt(nic);
588 if (err != 0) {
589 if_printf(ifp, "Could not reenable Mbox interrupt\n");
590 return;
591 }
592
593 /* Get the latest MAC address */
594 if_addr = if_getlladdr(ifp);
595 /* Update MAC address if changed */
596 if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
597 memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
598 nicvf_hw_set_mac_addr(nic, if_addr);
599 }
600
601 /* Initialize the queues */
602 err = nicvf_init_resources(nic);
603 if (err != 0)
604 goto error;
605
606 /* Make sure queue initialization is written */
607 wmb();
608
609 nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
610 /* Enable Qset err interrupt */
611 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
612
613 /* Enable completion queue interrupt */
614 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
615 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
616
617 /* Enable RBDR threshold interrupt */
618 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
619 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
620
621 nic->drv_stats.txq_stop = 0;
622 nic->drv_stats.txq_wake = 0;
623
624 /* Activate network interface */
625 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
626
627 /* Schedule callout to update stats */
628 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
629
630 return;
631
632 error:
633 /* Something went very wrong. Disable this ifnet for good */
634 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
635 }
636
637 static void
638 nicvf_if_init(void *if_softc)
639 {
640 struct nicvf *nic = if_softc;
641
642 NICVF_CORE_LOCK(nic);
643 nicvf_if_init_locked(nic);
644 NICVF_CORE_UNLOCK(nic);
645 }
646
647 static int
648 nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
649 {
650 struct nicvf *nic = if_getsoftc(ifp);
651 struct queue_set *qs = nic->qs;
652 struct snd_queue *sq;
653 struct mbuf *mtmp;
654 int qidx;
655 int err = 0;
656
657 if (__predict_false(qs == NULL)) {
658 panic("%s: missing queue set for %s", __func__,
659 device_get_nameunit(nic->dev));
660 }
661
662 /* Select queue */
663 if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
664 qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
665 else
666 qidx = curcpu % qs->sq_cnt;
667
668 sq = &qs->sq[qidx];
669
670 if (mbuf->m_next != NULL &&
671 (mbuf->m_pkthdr.csum_flags &
672 (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
673 if (M_WRITABLE(mbuf) == 0) {
674 mtmp = m_dup(mbuf, M_NOWAIT);
675 m_freem(mbuf);
676 if (mtmp == NULL)
677 return (ENOBUFS);
678 mbuf = mtmp;
679 }
680 }
681
682 err = drbr_enqueue(ifp, sq->br, mbuf);
683 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
684 IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) {
685 /*
686 * Try to enqueue packet to the ring buffer.
687 * If the driver is not active, link down or enqueue operation
688 * failed, return with the appropriate error code.
689 */
690 return (err);
691 }
692
693 if (NICVF_TX_TRYLOCK(sq) != 0) {
694 err = nicvf_xmit_locked(sq);
695 NICVF_TX_UNLOCK(sq);
696 return (err);
697 } else
698 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
699
700 return (0);
701 }
702
703 static void
704 nicvf_if_qflush(struct ifnet *ifp)
705 {
706 struct nicvf *nic;
707 struct queue_set *qs;
708 struct snd_queue *sq;
709 struct mbuf *mbuf;
710 size_t idx;
711
712 nic = if_getsoftc(ifp);
713 qs = nic->qs;
714
715 for (idx = 0; idx < qs->sq_cnt; idx++) {
716 sq = &qs->sq[idx];
717 NICVF_TX_LOCK(sq);
718 while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
719 m_freem(mbuf);
720 NICVF_TX_UNLOCK(sq);
721 }
722 if_qflush(ifp);
723 }
724
725 static uint64_t
726 nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
727 {
728 struct nicvf *nic;
729 struct nicvf_hw_stats *hw_stats;
730 struct nicvf_drv_stats *drv_stats;
731
732 nic = if_getsoftc(ifp);
733 hw_stats = &nic->hw_stats;
734 drv_stats = &nic->drv_stats;
735
736 switch (cnt) {
737 case IFCOUNTER_IPACKETS:
738 return (drv_stats->rx_frames_ok);
739 case IFCOUNTER_OPACKETS:
740 return (drv_stats->tx_frames_ok);
741 case IFCOUNTER_IBYTES:
742 return (hw_stats->rx_bytes);
743 case IFCOUNTER_OBYTES:
744 return (hw_stats->tx_bytes_ok);
745 case IFCOUNTER_IMCASTS:
746 return (hw_stats->rx_mcast_frames);
747 case IFCOUNTER_COLLISIONS:
748 return (0);
749 case IFCOUNTER_IQDROPS:
750 return (drv_stats->rx_drops);
751 case IFCOUNTER_OQDROPS:
752 return (drv_stats->tx_drops);
753 default:
754 return (if_get_counter_default(ifp, cnt));
755 }
756
757 }
758
759 static void
760 nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
761 {
762 struct nicvf *nic = if_getsoftc(ifp);
763
764 NICVF_CORE_LOCK(nic);
765
766 ifmr->ifm_status = IFM_AVALID;
767 ifmr->ifm_active = IFM_ETHER;
768
769 if (nic->link_up) {
770 /* Device attached to working network */
771 ifmr->ifm_status |= IFM_ACTIVE;
772 }
773
774 switch (nic->speed) {
775 case SPEED_10:
776 ifmr->ifm_active |= IFM_10_T;
777 break;
778 case SPEED_100:
779 ifmr->ifm_active |= IFM_100_TX;
780 break;
781 case SPEED_1000:
782 ifmr->ifm_active |= IFM_1000_T;
783 break;
784 case SPEED_10000:
785 ifmr->ifm_active |= IFM_10G_SR;
786 break;
787 case SPEED_40000:
788 ifmr->ifm_active |= IFM_40G_CR4;
789 break;
790 default:
791 ifmr->ifm_active |= IFM_AUTO;
792 break;
793 }
794
795 if (nic->duplex)
796 ifmr->ifm_active |= IFM_FDX;
797 else
798 ifmr->ifm_active |= IFM_HDX;
799
800 NICVF_CORE_UNLOCK(nic);
801 }
802
803 static int
804 nicvf_media_change(struct ifnet *ifp __unused)
805 {
806
807 return (0);
808 }
809
810 /* Register read/write APIs */
811 void
812 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
813 {
814
815 bus_write_8(nic->reg_base, offset, val);
816 }
817
818 uint64_t
819 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
820 {
821
822 return (bus_read_8(nic->reg_base, offset));
823 }
824
825 void
826 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
827 uint64_t qidx, uint64_t val)
828 {
829
830 bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
831 }
832
833 uint64_t
834 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
835 uint64_t qidx)
836 {
837
838 return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
839 }
840
841 /* VF -> PF mailbox communication */
842 static void
843 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
844 {
845 uint64_t *msg = (uint64_t *)mbx;
846
847 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
848 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
849 }
850
851 int
852 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
853 {
854 int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
855 int sleep = 2;
856
857 NICVF_CORE_LOCK_ASSERT(nic);
858
859 nic->pf_acked = FALSE;
860 nic->pf_nacked = FALSE;
861
862 nicvf_write_to_mbx(nic, mbx);
863
864 /* Wait for previous message to be acked, timeout 2sec */
865 while (!nic->pf_acked) {
866 if (nic->pf_nacked)
867 return (EINVAL);
868
869 DELAY(sleep * 1000);
870
871 if (nic->pf_acked)
872 break;
873 timeout -= sleep;
874 if (!timeout) {
875 device_printf(nic->dev,
876 "PF didn't ack to mbox msg %d from VF%d\n",
877 (mbx->msg.msg & 0xFF), nic->vf_id);
878
879 return (EBUSY);
880 }
881 }
882 return (0);
883 }
884
885 /*
886 * Checks if VF is able to comminicate with PF
887 * and also gets the VNIC number this VF is associated to.
888 */
889 static int
890 nicvf_check_pf_ready(struct nicvf *nic)
891 {
892 union nic_mbx mbx = {};
893
894 mbx.msg.msg = NIC_MBOX_MSG_READY;
895 if (nicvf_send_msg_to_pf(nic, &mbx)) {
896 device_printf(nic->dev,
897 "PF didn't respond to READY msg\n");
898 return 0;
899 }
900
901 return 1;
902 }
903
904 static void
905 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
906 {
907
908 if (bgx->rx)
909 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
910 else
911 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
912 }
913
914 static void
915 nicvf_handle_mbx_intr(struct nicvf *nic)
916 {
917 union nic_mbx mbx = {};
918 uint64_t *mbx_data;
919 uint64_t mbx_addr;
920 int i;
921
922 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
923 mbx_data = (uint64_t *)&mbx;
924
925 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
926 *mbx_data = nicvf_reg_read(nic, mbx_addr);
927 mbx_data++;
928 mbx_addr += sizeof(uint64_t);
929 }
930
931 switch (mbx.msg.msg) {
932 case NIC_MBOX_MSG_READY:
933 nic->pf_acked = TRUE;
934 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
935 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
936 nic->node = mbx.nic_cfg.node_id;
937 memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
938 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
939 nic->link_up = FALSE;
940 nic->duplex = 0;
941 nic->speed = 0;
942 break;
943 case NIC_MBOX_MSG_ACK:
944 nic->pf_acked = TRUE;
945 break;
946 case NIC_MBOX_MSG_NACK:
947 nic->pf_nacked = TRUE;
948 break;
949 case NIC_MBOX_MSG_RSS_SIZE:
950 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
951 nic->pf_acked = TRUE;
952 break;
953 case NIC_MBOX_MSG_BGX_STATS:
954 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
955 nic->pf_acked = TRUE;
956 break;
957 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
958 nic->pf_acked = TRUE;
959 nic->link_up = mbx.link_status.link_up;
960 nic->duplex = mbx.link_status.duplex;
961 nic->speed = mbx.link_status.speed;
962 if (nic->link_up) {
963 if_setbaudrate(nic->ifp, nic->speed * 1000000);
964 if_link_state_change(nic->ifp, LINK_STATE_UP);
965 } else {
966 if_setbaudrate(nic->ifp, 0);
967 if_link_state_change(nic->ifp, LINK_STATE_DOWN);
968 }
969 break;
970 default:
971 device_printf(nic->dev,
972 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
973 break;
974 }
975 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
976 }
977
978 static int
979 nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
980 {
981 union nic_mbx mbx = {};
982
983 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
984 mbx.frs.max_frs = mtu;
985 mbx.frs.vf_id = nic->vf_id;
986
987 return nicvf_send_msg_to_pf(nic, &mbx);
988 }
989
990 static int
991 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
992 {
993 union nic_mbx mbx = {};
994
995 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
996 mbx.mac.vf_id = nic->vf_id;
997 memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
998
999 return (nicvf_send_msg_to_pf(nic, &mbx));
1000 }
1001
1002 static void
1003 nicvf_config_cpi(struct nicvf *nic)
1004 {
1005 union nic_mbx mbx = {};
1006
1007 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
1008 mbx.cpi_cfg.vf_id = nic->vf_id;
1009 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
1010 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
1011
1012 nicvf_send_msg_to_pf(nic, &mbx);
1013 }
1014
1015 static void
1016 nicvf_get_rss_size(struct nicvf *nic)
1017 {
1018 union nic_mbx mbx = {};
1019
1020 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
1021 mbx.rss_size.vf_id = nic->vf_id;
1022 nicvf_send_msg_to_pf(nic, &mbx);
1023 }
1024
1025 static void
1026 nicvf_config_rss(struct nicvf *nic)
1027 {
1028 union nic_mbx mbx = {};
1029 struct nicvf_rss_info *rss;
1030 int ind_tbl_len;
1031 int i, nextq;
1032
1033 rss = &nic->rss_info;
1034 ind_tbl_len = rss->rss_size;
1035 nextq = 0;
1036
1037 mbx.rss_cfg.vf_id = nic->vf_id;
1038 mbx.rss_cfg.hash_bits = rss->hash_bits;
1039 while (ind_tbl_len != 0) {
1040 mbx.rss_cfg.tbl_offset = nextq;
1041 mbx.rss_cfg.tbl_len = MIN(ind_tbl_len,
1042 RSS_IND_TBL_LEN_PER_MBX_MSG);
1043 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
1044 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
1045
1046 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
1047 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
1048
1049 nicvf_send_msg_to_pf(nic, &mbx);
1050
1051 ind_tbl_len -= mbx.rss_cfg.tbl_len;
1052 }
1053 }
1054
1055 static void
1056 nicvf_set_rss_key(struct nicvf *nic)
1057 {
1058 struct nicvf_rss_info *rss;
1059 uint64_t key_addr;
1060 int idx;
1061
1062 rss = &nic->rss_info;
1063 key_addr = NIC_VNIC_RSS_KEY_0_4;
1064
1065 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
1066 nicvf_reg_write(nic, key_addr, rss->key[idx]);
1067 key_addr += sizeof(uint64_t);
1068 }
1069 }
1070
1071 static int
1072 nicvf_rss_init(struct nicvf *nic)
1073 {
1074 struct nicvf_rss_info *rss;
1075 int idx;
1076
1077 nicvf_get_rss_size(nic);
1078
1079 rss = &nic->rss_info;
1080 if (nic->cpi_alg != CPI_ALG_NONE) {
1081 rss->enable = FALSE;
1082 rss->hash_bits = 0;
1083 return (ENXIO);
1084 }
1085
1086 rss->enable = TRUE;
1087
1088 /* Using the HW reset value for now */
1089 rss->key[0] = 0xFEED0BADFEED0BADUL;
1090 rss->key[1] = 0xFEED0BADFEED0BADUL;
1091 rss->key[2] = 0xFEED0BADFEED0BADUL;
1092 rss->key[3] = 0xFEED0BADFEED0BADUL;
1093 rss->key[4] = 0xFEED0BADFEED0BADUL;
1094
1095 nicvf_set_rss_key(nic);
1096
1097 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
1098 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
1099
1100 rss->hash_bits = fls(rss->rss_size) - 1;
1101 for (idx = 0; idx < rss->rss_size; idx++)
1102 rss->ind_tbl[idx] = idx % nic->rx_queues;
1103
1104 nicvf_config_rss(nic);
1105
1106 return (0);
1107 }
1108
1109 static int
1110 nicvf_init_resources(struct nicvf *nic)
1111 {
1112 int err;
1113 union nic_mbx mbx = {};
1114
1115 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1116
1117 /* Enable Qset */
1118 nicvf_qset_config(nic, TRUE);
1119
1120 /* Initialize queues and HW for data transfer */
1121 err = nicvf_config_data_transfer(nic, TRUE);
1122 if (err) {
1123 device_printf(nic->dev,
1124 "Failed to alloc/config VF's QSet resources\n");
1125 return (err);
1126 }
1127
1128 /* Send VF config done msg to PF */
1129 nicvf_write_to_mbx(nic, &mbx);
1130
1131 return (0);
1132 }
1133
1134 static void
1135 nicvf_misc_intr_handler(void *arg)
1136 {
1137 struct nicvf *nic = (struct nicvf *)arg;
1138 uint64_t intr;
1139
1140 intr = nicvf_reg_read(nic, NIC_VF_INT);
1141 /* Check for spurious interrupt */
1142 if (!(intr & NICVF_INTR_MBOX_MASK))
1143 return;
1144
1145 nicvf_handle_mbx_intr(nic);
1146 }
1147
1148 static int
1149 nicvf_intr_handler(void *arg)
1150 {
1151 struct nicvf *nic;
1152 struct cmp_queue *cq;
1153 int qidx;
1154
1155 cq = (struct cmp_queue *)arg;
1156 nic = cq->nic;
1157 qidx = cq->idx;
1158
1159 /* Disable interrupts */
1160 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1161
1162 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
1163
1164 /* Clear interrupt */
1165 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1166
1167 return (FILTER_HANDLED);
1168 }
1169
1170 static void
1171 nicvf_rbdr_intr_handler(void *arg)
1172 {
1173 struct nicvf *nic;
1174 struct queue_set *qs;
1175 struct rbdr *rbdr;
1176 int qidx;
1177
1178 nic = (struct nicvf *)arg;
1179
1180 /* Disable RBDR interrupt and schedule softirq */
1181 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1182 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1183 continue;
1184 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1185
1186 qs = nic->qs;
1187 rbdr = &qs->rbdr[qidx];
1188 taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1189 /* Clear interrupt */
1190 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1191 }
1192 }
1193
1194 static void
1195 nicvf_qs_err_intr_handler(void *arg)
1196 {
1197 struct nicvf *nic = (struct nicvf *)arg;
1198 struct queue_set *qs = nic->qs;
1199
1200 /* Disable Qset err interrupt and schedule softirq */
1201 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1202 taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1203 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1204
1205 }
1206
1207 static int
1208 nicvf_enable_msix(struct nicvf *nic)
1209 {
1210 struct pci_devinfo *dinfo;
1211 int rid, count;
1212 int ret;
1213
1214 dinfo = device_get_ivars(nic->dev);
1215 rid = dinfo->cfg.msix.msix_table_bar;
1216 nic->msix_table_res =
1217 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1218 if (nic->msix_table_res == NULL) {
1219 device_printf(nic->dev,
1220 "Could not allocate memory for MSI-X table\n");
1221 return (ENXIO);
1222 }
1223
1224 count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1225
1226 ret = pci_alloc_msix(nic->dev, &count);
1227 if ((ret != 0) || (count != nic->num_vec)) {
1228 device_printf(nic->dev,
1229 "Request for #%d msix vectors failed, error: %d\n",
1230 nic->num_vec, ret);
1231 return (ret);
1232 }
1233
1234 nic->msix_enabled = 1;
1235 return (0);
1236 }
1237
1238 static void
1239 nicvf_disable_msix(struct nicvf *nic)
1240 {
1241
1242 if (nic->msix_enabled) {
1243 pci_release_msi(nic->dev);
1244 nic->msix_enabled = 0;
1245 nic->num_vec = 0;
1246 }
1247 }
1248
1249 static void
1250 nicvf_release_all_interrupts(struct nicvf *nic)
1251 {
1252 struct resource *res;
1253 int irq;
1254 int err;
1255
1256 /* Free registered interrupts */
1257 for (irq = 0; irq < nic->num_vec; irq++) {
1258 res = nic->msix_entries[irq].irq_res;
1259 if (res == NULL)
1260 continue;
1261 /* Teardown interrupt first */
1262 if (nic->msix_entries[irq].handle != NULL) {
1263 err = bus_teardown_intr(nic->dev,
1264 nic->msix_entries[irq].irq_res,
1265 nic->msix_entries[irq].handle);
1266 KASSERT(err == 0,
1267 ("ERROR: Unable to teardown interrupt %d", irq));
1268 nic->msix_entries[irq].handle = NULL;
1269 }
1270
1271 bus_release_resource(nic->dev, SYS_RES_IRQ,
1272 rman_get_rid(res), nic->msix_entries[irq].irq_res);
1273 nic->msix_entries[irq].irq_res = NULL;
1274 }
1275 /* Disable MSI-X */
1276 nicvf_disable_msix(nic);
1277 }
1278
1279 /*
1280 * Initialize MSIX vectors and register MISC interrupt.
1281 * Send READY message to PF to check if its alive
1282 */
1283 static int
1284 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1285 {
1286 struct resource *res;
1287 int irq, rid;
1288 int ret = 0;
1289
1290 /* Return if mailbox interrupt is already registered */
1291 if (nic->msix_enabled)
1292 return (0);
1293
1294 /* Enable MSI-X */
1295 if (nicvf_enable_msix(nic) != 0)
1296 return (ENXIO);
1297
1298 irq = NICVF_INTR_ID_MISC;
1299 rid = irq + 1;
1300 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1301 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1302 if (nic->msix_entries[irq].irq_res == NULL) {
1303 device_printf(nic->dev,
1304 "Could not allocate Mbox interrupt for VF%d\n",
1305 device_get_unit(nic->dev));
1306 return (ENXIO);
1307 }
1308
1309 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1310 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1311 &nic->msix_entries[irq].handle);
1312 if (ret != 0) {
1313 res = nic->msix_entries[irq].irq_res;
1314 bus_release_resource(nic->dev, SYS_RES_IRQ,
1315 rman_get_rid(res), res);
1316 nic->msix_entries[irq].irq_res = NULL;
1317 return (ret);
1318 }
1319
1320 return (0);
1321 }
1322
1323 static int
1324 nicvf_enable_misc_interrupt(struct nicvf *nic)
1325 {
1326
1327 /* Enable mailbox interrupt */
1328 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1329
1330 /* Check if VF is able to communicate with PF */
1331 if (!nicvf_check_pf_ready(nic)) {
1332 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1333 return (ENXIO);
1334 }
1335
1336 return (0);
1337 }
1338
1339 static void
1340 nicvf_release_net_interrupts(struct nicvf *nic)
1341 {
1342 struct resource *res;
1343 int irq;
1344 int err;
1345
1346 for_each_cq_irq(irq) {
1347 res = nic->msix_entries[irq].irq_res;
1348 if (res == NULL)
1349 continue;
1350 /* Teardown active interrupts first */
1351 if (nic->msix_entries[irq].handle != NULL) {
1352 err = bus_teardown_intr(nic->dev,
1353 nic->msix_entries[irq].irq_res,
1354 nic->msix_entries[irq].handle);
1355 KASSERT(err == 0,
1356 ("ERROR: Unable to teardown CQ interrupt %d",
1357 (irq - NICVF_INTR_ID_CQ)));
1358 if (err != 0)
1359 continue;
1360 }
1361
1362 /* Release resource */
1363 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1364 res);
1365 nic->msix_entries[irq].irq_res = NULL;
1366 }
1367
1368 for_each_rbdr_irq(irq) {
1369 res = nic->msix_entries[irq].irq_res;
1370 if (res == NULL)
1371 continue;
1372 /* Teardown active interrupts first */
1373 if (nic->msix_entries[irq].handle != NULL) {
1374 err = bus_teardown_intr(nic->dev,
1375 nic->msix_entries[irq].irq_res,
1376 nic->msix_entries[irq].handle);
1377 KASSERT(err == 0,
1378 ("ERROR: Unable to teardown RDBR interrupt %d",
1379 (irq - NICVF_INTR_ID_RBDR)));
1380 if (err != 0)
1381 continue;
1382 }
1383
1384 /* Release resource */
1385 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1386 res);
1387 nic->msix_entries[irq].irq_res = NULL;
1388 }
1389
1390 irq = NICVF_INTR_ID_QS_ERR;
1391 res = nic->msix_entries[irq].irq_res;
1392 if (res != NULL) {
1393 /* Teardown active interrupts first */
1394 if (nic->msix_entries[irq].handle != NULL) {
1395 err = bus_teardown_intr(nic->dev,
1396 nic->msix_entries[irq].irq_res,
1397 nic->msix_entries[irq].handle);
1398 KASSERT(err == 0,
1399 ("ERROR: Unable to teardown QS Error interrupt %d",
1400 irq));
1401 if (err != 0)
1402 return;
1403 }
1404
1405 /* Release resource */
1406 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1407 res);
1408 nic->msix_entries[irq].irq_res = NULL;
1409 }
1410 }
1411
1412 static int
1413 nicvf_allocate_net_interrupts(struct nicvf *nic)
1414 {
1415 u_int cpuid;
1416 int irq, rid;
1417 int qidx;
1418 int ret = 0;
1419
1420 /* MSI-X must be configured by now */
1421 if (!nic->msix_enabled) {
1422 device_printf(nic->dev, "Cannot alloacte queue interrups. "
1423 "MSI-X interrupts disabled.\n");
1424 return (ENXIO);
1425 }
1426
1427 /* Register CQ interrupts */
1428 for_each_cq_irq(irq) {
1429 if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1430 break;
1431
1432 qidx = irq - NICVF_INTR_ID_CQ;
1433 rid = irq + 1;
1434 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1435 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1436 if (nic->msix_entries[irq].irq_res == NULL) {
1437 device_printf(nic->dev,
1438 "Could not allocate CQ interrupt %d for VF%d\n",
1439 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1440 ret = ENXIO;
1441 goto error;
1442 }
1443 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1444 (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1445 NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1446 if (ret != 0) {
1447 device_printf(nic->dev,
1448 "Could not setup CQ interrupt %d for VF%d\n",
1449 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1450 goto error;
1451 }
1452 cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx;
1453 cpuid %= mp_ncpus;
1454 /*
1455 * Save CPU ID for later use when system-wide RSS is enabled.
1456 * It will be used to pit the CQ task to the same CPU that got
1457 * interrupted.
1458 */
1459 nic->qs->cq[qidx].cmp_cpuid = cpuid;
1460 if (bootverbose) {
1461 device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n",
1462 qidx, cpuid);
1463 }
1464 /* Bind interrupts to the given CPU */
1465 bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid);
1466 }
1467
1468 /* Register RBDR interrupt */
1469 for_each_rbdr_irq(irq) {
1470 if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1471 break;
1472
1473 rid = irq + 1;
1474 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1475 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1476 if (nic->msix_entries[irq].irq_res == NULL) {
1477 device_printf(nic->dev,
1478 "Could not allocate RBDR interrupt %d for VF%d\n",
1479 (irq - NICVF_INTR_ID_RBDR),
1480 device_get_unit(nic->dev));
1481 ret = ENXIO;
1482 goto error;
1483 }
1484 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1485 (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1486 nicvf_rbdr_intr_handler, nic,
1487 &nic->msix_entries[irq].handle);
1488 if (ret != 0) {
1489 device_printf(nic->dev,
1490 "Could not setup RBDR interrupt %d for VF%d\n",
1491 (irq - NICVF_INTR_ID_RBDR),
1492 device_get_unit(nic->dev));
1493 goto error;
1494 }
1495 }
1496
1497 /* Register QS error interrupt */
1498 irq = NICVF_INTR_ID_QS_ERR;
1499 rid = irq + 1;
1500 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1501 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1502 if (nic->msix_entries[irq].irq_res == NULL) {
1503 device_printf(nic->dev,
1504 "Could not allocate QS Error interrupt for VF%d\n",
1505 device_get_unit(nic->dev));
1506 ret = ENXIO;
1507 goto error;
1508 }
1509 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1510 (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1511 nic, &nic->msix_entries[irq].handle);
1512 if (ret != 0) {
1513 device_printf(nic->dev,
1514 "Could not setup QS Error interrupt for VF%d\n",
1515 device_get_unit(nic->dev));
1516 goto error;
1517 }
1518
1519 return (0);
1520 error:
1521 nicvf_release_net_interrupts(nic);
1522 return (ret);
1523 }
1524
1525 static int
1526 nicvf_stop_locked(struct nicvf *nic)
1527 {
1528 struct ifnet *ifp;
1529 int qidx;
1530 struct queue_set *qs = nic->qs;
1531 union nic_mbx mbx = {};
1532
1533 NICVF_CORE_LOCK_ASSERT(nic);
1534 /* Stop callout. Can block here since holding SX lock */
1535 callout_drain(&nic->stats_callout);
1536
1537 ifp = nic->ifp;
1538
1539 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1540 nicvf_send_msg_to_pf(nic, &mbx);
1541
1542 /* Disable RBDR & QS error interrupts */
1543 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1544 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1545 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1546 }
1547 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1548 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1549
1550 /* Deactivate network interface */
1551 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1552
1553 /* Free resources */
1554 nicvf_config_data_transfer(nic, FALSE);
1555
1556 /* Disable HW Qset */
1557 nicvf_qset_config(nic, FALSE);
1558
1559 /* disable mailbox interrupt */
1560 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1561
1562 return (0);
1563 }
1564
1565 static void
1566 nicvf_update_stats(struct nicvf *nic)
1567 {
1568 int qidx;
1569 struct nicvf_hw_stats *stats = &nic->hw_stats;
1570 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1571 struct queue_set *qs = nic->qs;
1572
1573 #define GET_RX_STATS(reg) \
1574 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1575 #define GET_TX_STATS(reg) \
1576 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1577
1578 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1579 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1580 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1581 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1582 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1583 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1584 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1585 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1586 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1587 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1588 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1589 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1590 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1591 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1592
1593 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1594 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1595 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1596 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1597 stats->tx_drops = GET_TX_STATS(TX_DROP);
1598
1599 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1600 stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1601 drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1602 drv_stats->tx_drops = stats->tx_drops;
1603
1604 /* Update RQ and SQ stats */
1605 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1606 nicvf_update_rq_stats(nic, qidx);
1607 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1608 nicvf_update_sq_stats(nic, qidx);
1609 }
1610
1611 static void
1612 nicvf_tick_stats(void *arg)
1613 {
1614 struct nicvf *nic;
1615
1616 nic = (struct nicvf *)arg;
1617
1618 /* Read the statistics */
1619 nicvf_update_stats(nic);
1620
1621 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
1622 }
Cache object: 3fd538e55b1824768c8b50cccd903c8f
|