FreeBSD/Linux Kernel Cross Reference
sys/dev/ixgbe/if_ixv.c
1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39
40 #include "ixgbe.h"
41 #include "ifdi_if.h"
42
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45
46 /************************************************************************
47 * Driver version
48 ************************************************************************/
49 char ixv_driver_version[] = "2.0.1-k";
50
51 /************************************************************************
52 * PCI Device ID Table
53 *
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixv_strings
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60 static pci_vendor_info_t ixv_vendor_info_array[] =
61 {
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
67 /* required last entry */
68 PVID_END
69 };
70
71 /************************************************************************
72 * Function prototypes
73 ************************************************************************/
74 static void *ixv_register(device_t);
75 static int ixv_if_attach_pre(if_ctx_t);
76 static int ixv_if_attach_post(if_ctx_t);
77 static int ixv_if_detach(if_ctx_t);
78
79 static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
80 static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81 static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
82 static void ixv_if_queues_free(if_ctx_t);
83 static void ixv_identify_hardware(if_ctx_t);
84 static void ixv_init_device_features(struct ixgbe_softc *);
85 static int ixv_allocate_pci_resources(if_ctx_t);
86 static void ixv_free_pci_resources(if_ctx_t);
87 static int ixv_setup_interface(if_ctx_t);
88 static void ixv_if_media_status(if_ctx_t, struct ifmediareq *);
89 static int ixv_if_media_change(if_ctx_t);
90 static void ixv_if_update_admin_status(if_ctx_t);
91 static int ixv_if_msix_intr_assign(if_ctx_t, int);
92
93 static int ixv_if_mtu_set(if_ctx_t, uint32_t);
94 static void ixv_if_init(if_ctx_t);
95 static void ixv_if_local_timer(if_ctx_t, uint16_t);
96 static void ixv_if_stop(if_ctx_t);
97 static int ixv_negotiate_api(struct ixgbe_softc *);
98
99 static void ixv_initialize_transmit_units(if_ctx_t);
100 static void ixv_initialize_receive_units(if_ctx_t);
101 static void ixv_initialize_rss_mapping(struct ixgbe_softc *);
102
103 static void ixv_setup_vlan_support(if_ctx_t);
104 static void ixv_configure_ivars(struct ixgbe_softc *);
105 static void ixv_if_enable_intr(if_ctx_t);
106 static void ixv_if_disable_intr(if_ctx_t);
107 static void ixv_if_multi_set(if_ctx_t);
108
109 static void ixv_if_register_vlan(if_ctx_t, u16);
110 static void ixv_if_unregister_vlan(if_ctx_t, u16);
111
112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114
115 static void ixv_save_stats(struct ixgbe_softc *);
116 static void ixv_init_stats(struct ixgbe_softc *);
117 static void ixv_update_stats(struct ixgbe_softc *);
118 static void ixv_add_stats_sysctls(struct ixgbe_softc *);
119
120 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121 static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
122
123 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124
125 /* The MSI-X Interrupt handlers */
126 static int ixv_msix_que(void *);
127 static int ixv_msix_mbx(void *);
128
129 /************************************************************************
130 * FreeBSD Device Interface Entry Points
131 ************************************************************************/
132 static device_method_t ixv_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_register, ixv_register),
135 DEVMETHOD(device_probe, iflib_device_probe),
136 DEVMETHOD(device_attach, iflib_device_attach),
137 DEVMETHOD(device_detach, iflib_device_detach),
138 DEVMETHOD(device_shutdown, iflib_device_shutdown),
139 DEVMETHOD_END
140 };
141
142 static driver_t ixv_driver = {
143 "ixv", ixv_methods, sizeof(struct ixgbe_softc),
144 };
145
146 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0);
147 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
148 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
149 MODULE_DEPEND(ixv, pci, 1, 1, 1);
150 MODULE_DEPEND(ixv, ether, 1, 1, 1);
151
152 static device_method_t ixv_if_methods[] = {
153 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
154 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
155 DEVMETHOD(ifdi_detach, ixv_if_detach),
156 DEVMETHOD(ifdi_init, ixv_if_init),
157 DEVMETHOD(ifdi_stop, ixv_if_stop),
158 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
159 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
160 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
161 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
164 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
165 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
166 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
167 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
168 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
169 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
170 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
171 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
172 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
173 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
174 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
175 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
176 DEVMETHOD_END
177 };
178
179 static driver_t ixv_if_driver = {
180 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
181 };
182
183 /*
184 * TUNEABLE PARAMETERS:
185 */
186
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190
191 /*
192 * Header split: this causes the hardware to DMA
193 * the header into a separate mbuf from the payload,
194 * it can be a performance win in some workloads, but
195 * in others it actually hurts, its off by default.
196 */
197 static int ixv_header_split = false;
198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
199
200 /*
201 * Shadow VFTA table, this is needed because
202 * the real filter table gets cleared during
203 * a soft reset and we need to repopulate it.
204 */
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
206 extern struct if_txrx ixgbe_txrx;
207
208 static struct if_shared_ctx ixv_sctx_init = {
209 .isc_magic = IFLIB_MAGIC,
210 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
211 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 .isc_tx_maxsegsize = PAGE_SIZE,
213 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214 .isc_tso_maxsegsize = PAGE_SIZE,
215 .isc_rx_maxsize = MJUM16BYTES,
216 .isc_rx_nsegments = 1,
217 .isc_rx_maxsegsize = MJUM16BYTES,
218 .isc_nfl = 1,
219 .isc_ntxqs = 1,
220 .isc_nrxqs = 1,
221 .isc_admin_intrcnt = 1,
222 .isc_vendor_info = ixv_vendor_info_array,
223 .isc_driver_version = ixv_driver_version,
224 .isc_driver = &ixv_if_driver,
225 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
226
227 .isc_nrxd_min = {MIN_RXD},
228 .isc_ntxd_min = {MIN_TXD},
229 .isc_nrxd_max = {MAX_RXD},
230 .isc_ntxd_max = {MAX_TXD},
231 .isc_nrxd_default = {DEFAULT_RXD},
232 .isc_ntxd_default = {DEFAULT_TXD},
233 };
234
235 static void *
236 ixv_register(device_t dev)
237 {
238 return (&ixv_sctx_init);
239 }
240
241 /************************************************************************
242 * ixv_if_tx_queues_alloc
243 ************************************************************************/
244 static int
245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
246 int ntxqs, int ntxqsets)
247 {
248 struct ixgbe_softc *sc = iflib_get_softc(ctx);
249 if_softc_ctx_t scctx = sc->shared;
250 struct ix_tx_queue *que;
251 int i, j, error;
252
253 MPASS(sc->num_tx_queues == ntxqsets);
254 MPASS(ntxqs == 1);
255
256 /* Allocate queue structure memory */
257 sc->tx_queues =
258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
259 M_DEVBUF, M_NOWAIT | M_ZERO);
260 if (!sc->tx_queues) {
261 device_printf(iflib_get_dev(ctx),
262 "Unable to allocate TX ring memory\n");
263 return (ENOMEM);
264 }
265
266 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
267 struct tx_ring *txr = &que->txr;
268
269 txr->me = i;
270 txr->sc = que->sc = sc;
271
272 /* Allocate report status array */
273 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
274 error = ENOMEM;
275 goto fail;
276 }
277 for (j = 0; j < scctx->isc_ntxd[0]; j++)
278 txr->tx_rsq[j] = QIDX_INVALID;
279 /* get the virtual and physical address of the hardware queues */
280 txr->tail = IXGBE_VFTDT(txr->me);
281 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
282 txr->tx_paddr = paddrs[i*ntxqs];
283
284 txr->bytes = 0;
285 txr->total_packets = 0;
286
287 }
288
289 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
290 sc->num_tx_queues);
291
292 return (0);
293
294 fail:
295 ixv_if_queues_free(ctx);
296
297 return (error);
298 } /* ixv_if_tx_queues_alloc */
299
300 /************************************************************************
301 * ixv_if_rx_queues_alloc
302 ************************************************************************/
303 static int
304 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
305 int nrxqs, int nrxqsets)
306 {
307 struct ixgbe_softc *sc = iflib_get_softc(ctx);
308 struct ix_rx_queue *que;
309 int i, error;
310
311 MPASS(sc->num_rx_queues == nrxqsets);
312 MPASS(nrxqs == 1);
313
314 /* Allocate queue structure memory */
315 sc->rx_queues =
316 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
317 M_DEVBUF, M_NOWAIT | M_ZERO);
318 if (!sc->rx_queues) {
319 device_printf(iflib_get_dev(ctx),
320 "Unable to allocate TX ring memory\n");
321 error = ENOMEM;
322 goto fail;
323 }
324
325 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
326 struct rx_ring *rxr = &que->rxr;
327 rxr->me = i;
328 rxr->sc = que->sc = sc;
329
330
331 /* get the virtual and physical address of the hw queues */
332 rxr->tail = IXGBE_VFRDT(rxr->me);
333 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
334 rxr->rx_paddr = paddrs[i*nrxqs];
335 rxr->bytes = 0;
336 rxr->que = que;
337 }
338
339 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
340 sc->num_rx_queues);
341
342 return (0);
343
344 fail:
345 ixv_if_queues_free(ctx);
346
347 return (error);
348 } /* ixv_if_rx_queues_alloc */
349
350 /************************************************************************
351 * ixv_if_queues_free
352 ************************************************************************/
353 static void
354 ixv_if_queues_free(if_ctx_t ctx)
355 {
356 struct ixgbe_softc *sc = iflib_get_softc(ctx);
357 struct ix_tx_queue *que = sc->tx_queues;
358 int i;
359
360 if (que == NULL)
361 goto free;
362
363 for (i = 0; i < sc->num_tx_queues; i++, que++) {
364 struct tx_ring *txr = &que->txr;
365 if (txr->tx_rsq == NULL)
366 break;
367
368 free(txr->tx_rsq, M_DEVBUF);
369 txr->tx_rsq = NULL;
370 }
371 if (sc->tx_queues != NULL)
372 free(sc->tx_queues, M_DEVBUF);
373 free:
374 if (sc->rx_queues != NULL)
375 free(sc->rx_queues, M_DEVBUF);
376 sc->tx_queues = NULL;
377 sc->rx_queues = NULL;
378 } /* ixv_if_queues_free */
379
380 /************************************************************************
381 * ixv_if_attach_pre - Device initialization routine
382 *
383 * Called when the driver is being loaded.
384 * Identifies the type of hardware, allocates all resources
385 * and initializes the hardware.
386 *
387 * return 0 on success, positive on failure
388 ************************************************************************/
389 static int
390 ixv_if_attach_pre(if_ctx_t ctx)
391 {
392 struct ixgbe_softc *sc;
393 device_t dev;
394 if_softc_ctx_t scctx;
395 struct ixgbe_hw *hw;
396 int error = 0;
397
398 INIT_DEBUGOUT("ixv_attach: begin");
399
400 /* Allocate, clear, and link in our sc structure */
401 dev = iflib_get_dev(ctx);
402 sc = iflib_get_softc(ctx);
403 sc->dev = dev;
404 sc->ctx = ctx;
405 sc->hw.back = sc;
406 scctx = sc->shared = iflib_get_softc_ctx(ctx);
407 sc->media = iflib_get_media(ctx);
408 hw = &sc->hw;
409
410 /* Do base PCI setup - map BAR0 */
411 if (ixv_allocate_pci_resources(ctx)) {
412 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
413 error = ENXIO;
414 goto err_out;
415 }
416
417 /* SYSCTL APIs */
418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
420 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
421 sc, 0, ixv_sysctl_debug, "I", "Debug Info");
422
423 /* Determine hardware revision */
424 ixv_identify_hardware(ctx);
425 ixv_init_device_features(sc);
426
427 /* Initialize the shared code */
428 error = ixgbe_init_ops_vf(hw);
429 if (error) {
430 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Setup the mailbox */
436 ixgbe_init_mbx_params_vf(hw);
437
438 error = hw->mac.ops.reset_hw(hw);
439 if (error == IXGBE_ERR_RESET_FAILED)
440 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
441 else if (error)
442 device_printf(dev, "...reset_hw() failed with error %d\n",
443 error);
444 if (error) {
445 error = EIO;
446 goto err_out;
447 }
448
449 error = hw->mac.ops.init_hw(hw);
450 if (error) {
451 device_printf(dev, "...init_hw() failed with error %d\n",
452 error);
453 error = EIO;
454 goto err_out;
455 }
456
457 /* Negotiate mailbox API version */
458 error = ixv_negotiate_api(sc);
459 if (error) {
460 device_printf(dev,
461 "Mailbox API negotiation failed during attach!\n");
462 goto err_out;
463 }
464
465 /* Check if VF was disabled by PF */
466 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
467 if (error) {
468 /* PF is not capable of controlling VF state. Enable the link. */
469 sc->link_enabled = true;
470 }
471
472 /* If no mac address was assigned, make a random one */
473 if (!ixv_check_ether_addr(hw->mac.addr)) {
474 ether_gen_addr(iflib_get_ifp(ctx),
475 (struct ether_addr *)hw->mac.addr);
476 bcopy(hw->mac.addr, hw->mac.perm_addr,
477 sizeof(hw->mac.perm_addr));
478 }
479
480 /* Most of the iflib initialization... */
481
482 iflib_set_mac(ctx, hw->mac.addr);
483 switch (sc->hw.mac.type) {
484 case ixgbe_mac_X550_vf:
485 case ixgbe_mac_X550EM_x_vf:
486 case ixgbe_mac_X550EM_a_vf:
487 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
488 break;
489 default:
490 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
491 }
492 scctx->isc_txqsizes[0] =
493 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
494 sizeof(u32), DBA_ALIGN);
495 scctx->isc_rxqsizes[0] =
496 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
497 DBA_ALIGN);
498 /* XXX */
499 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
500 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
501 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
502 scctx->isc_msix_bar = pci_msix_table_bar(dev);
503 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
504 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
505 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
506
507 scctx->isc_txrx = &ixgbe_txrx;
508
509 /*
510 * Tell the upper layer(s) we support everything the PF
511 * driver does except...
512 * Wake-on-LAN
513 */
514 scctx->isc_capabilities = IXGBE_CAPS;
515 scctx->isc_capabilities ^= IFCAP_WOL;
516 scctx->isc_capenable = scctx->isc_capabilities;
517
518 INIT_DEBUGOUT("ixv_if_attach_pre: end");
519
520 return (0);
521
522 err_out:
523 ixv_free_pci_resources(ctx);
524
525 return (error);
526 } /* ixv_if_attach_pre */
527
528 static int
529 ixv_if_attach_post(if_ctx_t ctx)
530 {
531 struct ixgbe_softc *sc = iflib_get_softc(ctx);
532 device_t dev = iflib_get_dev(ctx);
533 int error = 0;
534
535 /* Setup OS specific network interface */
536 error = ixv_setup_interface(ctx);
537 if (error) {
538 device_printf(dev, "Interface setup failed: %d\n", error);
539 goto end;
540 }
541
542 /* Do the stats setup */
543 ixv_save_stats(sc);
544 ixv_init_stats(sc);
545 ixv_add_stats_sysctls(sc);
546
547 end:
548 return error;
549 } /* ixv_if_attach_post */
550
551 /************************************************************************
552 * ixv_detach - Device removal routine
553 *
554 * Called when the driver is being removed.
555 * Stops the adapter and deallocates all the resources
556 * that were allocated for driver operation.
557 *
558 * return 0 on success, positive on failure
559 ************************************************************************/
560 static int
561 ixv_if_detach(if_ctx_t ctx)
562 {
563 INIT_DEBUGOUT("ixv_detach: begin");
564
565 ixv_free_pci_resources(ctx);
566
567 return (0);
568 } /* ixv_if_detach */
569
570 /************************************************************************
571 * ixv_if_mtu_set
572 ************************************************************************/
573 static int
574 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
575 {
576 struct ixgbe_softc *sc = iflib_get_softc(ctx);
577 if_t ifp = iflib_get_ifp(ctx);
578 int error = 0;
579
580 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
581 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
582 error = EINVAL;
583 } else {
584 if_setmtu(ifp, mtu);
585 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
586 }
587
588 return error;
589 } /* ixv_if_mtu_set */
590
591 /************************************************************************
592 * ixv_if_init - Init entry point
593 *
594 * Used in two ways: It is used by the stack as an init entry
595 * point in network interface structure. It is also used
596 * by the driver as a hw/sw initialization routine to get
597 * to a consistent state.
598 *
599 * return 0 on success, positive on failure
600 ************************************************************************/
601 static void
602 ixv_if_init(if_ctx_t ctx)
603 {
604 struct ixgbe_softc *sc = iflib_get_softc(ctx);
605 if_t ifp = iflib_get_ifp(ctx);
606 device_t dev = iflib_get_dev(ctx);
607 struct ixgbe_hw *hw = &sc->hw;
608 int error = 0;
609
610 INIT_DEBUGOUT("ixv_if_init: begin");
611 hw->adapter_stopped = false;
612 hw->mac.ops.stop_adapter(hw);
613
614 /* reprogram the RAR[0] in case user changed it. */
615 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
616
617 /* Get the latest mac address, User can use a LAA */
618 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
619 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
620
621 /* Reset VF and renegotiate mailbox API version */
622 hw->mac.ops.reset_hw(hw);
623 hw->mac.ops.start_hw(hw);
624 error = ixv_negotiate_api(sc);
625 if (error) {
626 device_printf(dev,
627 "Mailbox API negotiation failed in if_init!\n");
628 return;
629 }
630
631 ixv_initialize_transmit_units(ctx);
632
633 /* Setup Multicast table */
634 ixv_if_multi_set(ctx);
635
636 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
637
638 /* Configure RX settings */
639 ixv_initialize_receive_units(ctx);
640
641 /* Set up VLAN offload and filter */
642 ixv_setup_vlan_support(ctx);
643
644 /* Set up MSI-X routing */
645 ixv_configure_ivars(sc);
646
647 /* Set up auto-mask */
648 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
649
650 /* Set moderation on the Link interrupt */
651 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
652
653 /* Stats init */
654 ixv_init_stats(sc);
655
656 /* Config/Enable Link */
657 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
658 if (error) {
659 /* PF is not capable of controlling VF state. Enable the link. */
660 sc->link_enabled = true;
661 } else if (sc->link_enabled == false)
662 device_printf(dev, "VF is disabled by PF\n");
663
664 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
665 false);
666
667 /* And now turn on interrupts */
668 ixv_if_enable_intr(ctx);
669
670 return;
671 } /* ixv_if_init */
672
673 /************************************************************************
674 * ixv_enable_queue
675 ************************************************************************/
676 static inline void
677 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
678 {
679 struct ixgbe_hw *hw = &sc->hw;
680 u32 queue = 1 << vector;
681 u32 mask;
682
683 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
684 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
685 } /* ixv_enable_queue */
686
687 /************************************************************************
688 * ixv_disable_queue
689 ************************************************************************/
690 static inline void
691 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
692 {
693 struct ixgbe_hw *hw = &sc->hw;
694 u64 queue = (u64)(1 << vector);
695 u32 mask;
696
697 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
698 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
699 } /* ixv_disable_queue */
700
701
702 /************************************************************************
703 * ixv_msix_que - MSI-X Queue Interrupt Service routine
704 ************************************************************************/
705 static int
706 ixv_msix_que(void *arg)
707 {
708 struct ix_rx_queue *que = arg;
709 struct ixgbe_softc *sc = que->sc;
710
711 ixv_disable_queue(sc, que->msix);
712 ++que->irqs;
713
714 return (FILTER_SCHEDULE_THREAD);
715 } /* ixv_msix_que */
716
717 /************************************************************************
718 * ixv_msix_mbx
719 ************************************************************************/
720 static int
721 ixv_msix_mbx(void *arg)
722 {
723 struct ixgbe_softc *sc = arg;
724 struct ixgbe_hw *hw = &sc->hw;
725 u32 reg;
726
727 ++sc->link_irq;
728
729 /* First get the cause */
730 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
731 /* Clear interrupt with write */
732 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
733
734 /* Link status change */
735 if (reg & IXGBE_EICR_LSC)
736 iflib_admin_intr_deferred(sc->ctx);
737
738 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
739
740 return (FILTER_HANDLED);
741 } /* ixv_msix_mbx */
742
743 /************************************************************************
744 * ixv_media_status - Media Ioctl callback
745 *
746 * Called whenever the user queries the status of
747 * the interface using ifconfig.
748 ************************************************************************/
749 static void
750 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
751 {
752 struct ixgbe_softc *sc = iflib_get_softc(ctx);
753
754 INIT_DEBUGOUT("ixv_media_status: begin");
755
756 iflib_admin_intr_deferred(ctx);
757
758 ifmr->ifm_status = IFM_AVALID;
759 ifmr->ifm_active = IFM_ETHER;
760
761 if (!sc->link_active)
762 return;
763
764 ifmr->ifm_status |= IFM_ACTIVE;
765
766 switch (sc->link_speed) {
767 case IXGBE_LINK_SPEED_1GB_FULL:
768 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
769 break;
770 case IXGBE_LINK_SPEED_10GB_FULL:
771 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
772 break;
773 case IXGBE_LINK_SPEED_100_FULL:
774 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
775 break;
776 case IXGBE_LINK_SPEED_10_FULL:
777 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
778 break;
779 }
780 } /* ixv_if_media_status */
781
782 /************************************************************************
783 * ixv_if_media_change - Media Ioctl callback
784 *
785 * Called when the user changes speed/duplex using
786 * media/mediopt option with ifconfig.
787 ************************************************************************/
788 static int
789 ixv_if_media_change(if_ctx_t ctx)
790 {
791 struct ixgbe_softc *sc = iflib_get_softc(ctx);
792 struct ifmedia *ifm = iflib_get_media(ctx);
793
794 INIT_DEBUGOUT("ixv_media_change: begin");
795
796 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
797 return (EINVAL);
798
799 switch (IFM_SUBTYPE(ifm->ifm_media)) {
800 case IFM_AUTO:
801 break;
802 default:
803 device_printf(sc->dev, "Only auto media type\n");
804 return (EINVAL);
805 }
806
807 return (0);
808 } /* ixv_if_media_change */
809
810
811 /************************************************************************
812 * ixv_negotiate_api
813 *
814 * Negotiate the Mailbox API with the PF;
815 * start with the most featured API first.
816 ************************************************************************/
817 static int
818 ixv_negotiate_api(struct ixgbe_softc *sc)
819 {
820 struct ixgbe_hw *hw = &sc->hw;
821 int mbx_api[] = { ixgbe_mbox_api_12,
822 ixgbe_mbox_api_11,
823 ixgbe_mbox_api_10,
824 ixgbe_mbox_api_unknown };
825 int i = 0;
826
827 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
828 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
829 return (0);
830 i++;
831 }
832
833 return (EINVAL);
834 } /* ixv_negotiate_api */
835
836
837 static u_int
838 ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
839 {
840 bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
841 IXGBE_ETH_LENGTH_OF_ADDRESS);
842
843 return (++cnt);
844 }
845
846 /************************************************************************
847 * ixv_if_multi_set - Multicast Update
848 *
849 * Called whenever multicast address list is updated.
850 ************************************************************************/
851 static void
852 ixv_if_multi_set(if_ctx_t ctx)
853 {
854 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
855 struct ixgbe_softc *sc = iflib_get_softc(ctx);
856 u8 *update_ptr;
857 if_t ifp = iflib_get_ifp(ctx);
858 int mcnt = 0;
859
860 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
861
862 mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta);
863
864 update_ptr = mta;
865
866 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
867 ixv_mc_array_itr, true);
868 } /* ixv_if_multi_set */
869
870 /************************************************************************
871 * ixv_mc_array_itr
872 *
873 * An iterator function needed by the multicast shared code.
874 * It feeds the shared code routine the addresses in the
875 * array of ixv_set_multi() one by one.
876 ************************************************************************/
877 static u8 *
878 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
879 {
880 u8 *addr = *update_ptr;
881 u8 *newptr;
882
883 *vmdq = 0;
884
885 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
886 *update_ptr = newptr;
887
888 return addr;
889 } /* ixv_mc_array_itr */
890
891 /************************************************************************
892 * ixv_if_local_timer - Timer routine
893 *
894 * Checks for link status, updates statistics,
895 * and runs the watchdog check.
896 ************************************************************************/
897 static void
898 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
899 {
900 if (qid != 0)
901 return;
902
903 /* Fire off the adminq task */
904 iflib_admin_intr_deferred(ctx);
905 } /* ixv_if_local_timer */
906
907 /************************************************************************
908 * ixv_if_update_admin_status - Update OS on link state
909 *
910 * Note: Only updates the OS on the cached link state.
911 * The real check of the hardware only happens with
912 * a link interrupt.
913 ************************************************************************/
914 static void
915 ixv_if_update_admin_status(if_ctx_t ctx)
916 {
917 struct ixgbe_softc *sc = iflib_get_softc(ctx);
918 device_t dev = iflib_get_dev(ctx);
919 s32 status;
920
921 sc->hw.mac.get_link_status = true;
922
923 status = ixgbe_check_link(&sc->hw, &sc->link_speed,
924 &sc->link_up, false);
925
926 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
927 /* Mailbox's Clear To Send status is lost or timeout occurred.
928 * We need reinitialization. */
929 if_init(iflib_get_ifp(ctx), ctx);
930 }
931
932 if (sc->link_up && sc->link_enabled) {
933 if (sc->link_active == false) {
934 if (bootverbose)
935 device_printf(dev, "Link is up %d Gbps %s \n",
936 ((sc->link_speed == 128) ? 10 : 1),
937 "Full Duplex");
938 sc->link_active = true;
939 iflib_link_state_change(ctx, LINK_STATE_UP,
940 IF_Gbps(10));
941 }
942 } else { /* Link down */
943 if (sc->link_active == true) {
944 if (bootverbose)
945 device_printf(dev, "Link is Down\n");
946 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
947 sc->link_active = false;
948 }
949 }
950
951 /* Stats Update */
952 ixv_update_stats(sc);
953 } /* ixv_if_update_admin_status */
954
955
956 /************************************************************************
957 * ixv_if_stop - Stop the hardware
958 *
959 * Disables all traffic on the adapter by issuing a
960 * global reset on the MAC and deallocates TX/RX buffers.
961 ************************************************************************/
962 static void
963 ixv_if_stop(if_ctx_t ctx)
964 {
965 struct ixgbe_softc *sc = iflib_get_softc(ctx);
966 struct ixgbe_hw *hw = &sc->hw;
967
968 INIT_DEBUGOUT("ixv_stop: begin\n");
969
970 ixv_if_disable_intr(ctx);
971
972 hw->mac.ops.reset_hw(hw);
973 sc->hw.adapter_stopped = false;
974 hw->mac.ops.stop_adapter(hw);
975
976 /* Update the stack */
977 sc->link_up = false;
978 ixv_if_update_admin_status(ctx);
979
980 /* reprogram the RAR[0] in case user changed it. */
981 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
982 } /* ixv_if_stop */
983
984
985 /************************************************************************
986 * ixv_identify_hardware - Determine hardware revision.
987 ************************************************************************/
988 static void
989 ixv_identify_hardware(if_ctx_t ctx)
990 {
991 struct ixgbe_softc *sc = iflib_get_softc(ctx);
992 device_t dev = iflib_get_dev(ctx);
993 struct ixgbe_hw *hw = &sc->hw;
994
995 /* Save off the information about this board */
996 hw->vendor_id = pci_get_vendor(dev);
997 hw->device_id = pci_get_device(dev);
998 hw->revision_id = pci_get_revid(dev);
999 hw->subsystem_vendor_id = pci_get_subvendor(dev);
1000 hw->subsystem_device_id = pci_get_subdevice(dev);
1001
1002 /* A subset of set_mac_type */
1003 switch (hw->device_id) {
1004 case IXGBE_DEV_ID_82599_VF:
1005 hw->mac.type = ixgbe_mac_82599_vf;
1006 break;
1007 case IXGBE_DEV_ID_X540_VF:
1008 hw->mac.type = ixgbe_mac_X540_vf;
1009 break;
1010 case IXGBE_DEV_ID_X550_VF:
1011 hw->mac.type = ixgbe_mac_X550_vf;
1012 break;
1013 case IXGBE_DEV_ID_X550EM_X_VF:
1014 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1015 break;
1016 case IXGBE_DEV_ID_X550EM_A_VF:
1017 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1018 break;
1019 default:
1020 device_printf(dev, "unknown mac type\n");
1021 hw->mac.type = ixgbe_mac_unknown;
1022 break;
1023 }
1024 } /* ixv_identify_hardware */
1025
1026 /************************************************************************
1027 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1028 ************************************************************************/
1029 static int
1030 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1031 {
1032 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1033 device_t dev = iflib_get_dev(ctx);
1034 struct ix_rx_queue *rx_que = sc->rx_queues;
1035 struct ix_tx_queue *tx_que;
1036 int error, rid, vector = 0;
1037 char buf[16];
1038
1039 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1040 rid = vector + 1;
1041
1042 snprintf(buf, sizeof(buf), "rxq%d", i);
1043 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1044 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1045
1046 if (error) {
1047 device_printf(iflib_get_dev(ctx),
1048 "Failed to allocate que int %d err: %d", i, error);
1049 sc->num_rx_queues = i + 1;
1050 goto fail;
1051 }
1052
1053 rx_que->msix = vector;
1054 }
1055
1056 for (int i = 0; i < sc->num_tx_queues; i++) {
1057 snprintf(buf, sizeof(buf), "txq%d", i);
1058 tx_que = &sc->tx_queues[i];
1059 tx_que->msix = i % sc->num_rx_queues;
1060 iflib_softirq_alloc_generic(ctx,
1061 &sc->rx_queues[tx_que->msix].que_irq,
1062 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1063 }
1064 rid = vector + 1;
1065 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1066 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1067 if (error) {
1068 device_printf(iflib_get_dev(ctx),
1069 "Failed to register admin handler");
1070 return (error);
1071 }
1072
1073 sc->vector = vector;
1074 /*
1075 * Due to a broken design QEMU will fail to properly
1076 * enable the guest for MSIX unless the vectors in
1077 * the table are all set up, so we must rewrite the
1078 * ENABLE in the MSIX control register again at this
1079 * point to cause it to successfully initialize us.
1080 */
1081 if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1082 int msix_ctrl;
1083 pci_find_cap(dev, PCIY_MSIX, &rid);
1084 rid += PCIR_MSIX_CTRL;
1085 msix_ctrl = pci_read_config(dev, rid, 2);
1086 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1087 pci_write_config(dev, rid, msix_ctrl, 2);
1088 }
1089
1090 return (0);
1091
1092 fail:
1093 iflib_irq_free(ctx, &sc->irq);
1094 rx_que = sc->rx_queues;
1095 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1096 iflib_irq_free(ctx, &rx_que->que_irq);
1097
1098 return (error);
1099 } /* ixv_if_msix_intr_assign */
1100
1101 /************************************************************************
1102 * ixv_allocate_pci_resources
1103 ************************************************************************/
1104 static int
1105 ixv_allocate_pci_resources(if_ctx_t ctx)
1106 {
1107 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1108 device_t dev = iflib_get_dev(ctx);
1109 int rid;
1110
1111 rid = PCIR_BAR(0);
1112 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1113 RF_ACTIVE);
1114
1115 if (!(sc->pci_mem)) {
1116 device_printf(dev, "Unable to allocate bus resource: memory\n");
1117 return (ENXIO);
1118 }
1119
1120 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1121 sc->osdep.mem_bus_space_handle =
1122 rman_get_bushandle(sc->pci_mem);
1123 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1124
1125 return (0);
1126 } /* ixv_allocate_pci_resources */
1127
1128 /************************************************************************
1129 * ixv_free_pci_resources
1130 ************************************************************************/
1131 static void
1132 ixv_free_pci_resources(if_ctx_t ctx)
1133 {
1134 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1135 struct ix_rx_queue *que = sc->rx_queues;
1136 device_t dev = iflib_get_dev(ctx);
1137
1138 /* Release all MSI-X queue resources */
1139 if (sc->intr_type == IFLIB_INTR_MSIX)
1140 iflib_irq_free(ctx, &sc->irq);
1141
1142 if (que != NULL) {
1143 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1144 iflib_irq_free(ctx, &que->que_irq);
1145 }
1146 }
1147
1148 if (sc->pci_mem != NULL)
1149 bus_release_resource(dev, SYS_RES_MEMORY,
1150 rman_get_rid(sc->pci_mem), sc->pci_mem);
1151 } /* ixv_free_pci_resources */
1152
1153 /************************************************************************
1154 * ixv_setup_interface
1155 *
1156 * Setup networking device structure and register an interface.
1157 ************************************************************************/
1158 static int
1159 ixv_setup_interface(if_ctx_t ctx)
1160 {
1161 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1162 if_softc_ctx_t scctx = sc->shared;
1163 if_t ifp = iflib_get_ifp(ctx);
1164
1165 INIT_DEBUGOUT("ixv_setup_interface: begin");
1166
1167 if_setbaudrate(ifp, IF_Gbps(10));
1168 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2);
1169
1170
1171 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
1172 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1173 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1174
1175 return 0;
1176 } /* ixv_setup_interface */
1177
1178 /************************************************************************
1179 * ixv_if_get_counter
1180 ************************************************************************/
1181 static uint64_t
1182 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1183 {
1184 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1185 if_t ifp = iflib_get_ifp(ctx);
1186
1187 switch (cnt) {
1188 case IFCOUNTER_IPACKETS:
1189 return (sc->ipackets);
1190 case IFCOUNTER_OPACKETS:
1191 return (sc->opackets);
1192 case IFCOUNTER_IBYTES:
1193 return (sc->ibytes);
1194 case IFCOUNTER_OBYTES:
1195 return (sc->obytes);
1196 case IFCOUNTER_IMCASTS:
1197 return (sc->imcasts);
1198 default:
1199 return (if_get_counter_default(ifp, cnt));
1200 }
1201 } /* ixv_if_get_counter */
1202
1203 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1204 * @ctx: iflib context
1205 * @event: event code to check
1206 *
1207 * Defaults to returning true for every event.
1208 *
1209 * @returns true if iflib needs to reinit the interface
1210 */
1211 static bool
1212 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1213 {
1214 switch (event) {
1215 case IFLIB_RESTART_VLAN_CONFIG:
1216 /* XXX: This may not need to return true */
1217 default:
1218 return (true);
1219 }
1220 }
1221
1222 /************************************************************************
1223 * ixv_initialize_transmit_units - Enable transmit unit.
1224 ************************************************************************/
1225 static void
1226 ixv_initialize_transmit_units(if_ctx_t ctx)
1227 {
1228 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1229 struct ixgbe_hw *hw = &sc->hw;
1230 if_softc_ctx_t scctx = sc->shared;
1231 struct ix_tx_queue *que = sc->tx_queues;
1232 int i;
1233
1234 for (i = 0; i < sc->num_tx_queues; i++, que++) {
1235 struct tx_ring *txr = &que->txr;
1236 u64 tdba = txr->tx_paddr;
1237 u32 txctrl, txdctl;
1238 int j = txr->me;
1239
1240 /* Set WTHRESH to 8, burst writeback */
1241 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1242 txdctl |= (8 << 16);
1243 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1244
1245 /* Set the HW Tx Head and Tail indices */
1246 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1247 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1248
1249 /* Set Tx Tail register */
1250 txr->tail = IXGBE_VFTDT(j);
1251
1252 txr->tx_rs_cidx = txr->tx_rs_pidx;
1253 /* Initialize the last processed descriptor to be the end of
1254 * the ring, rather than the start, so that we avoid an
1255 * off-by-one error when calculating how many descriptors are
1256 * done in the credits_update function.
1257 */
1258 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1259 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1260 txr->tx_rsq[k] = QIDX_INVALID;
1261
1262 /* Set Ring parameters */
1263 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1264 (tdba & 0x00000000ffffffffULL));
1265 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1266 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1267 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1268 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1269 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1270 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1271
1272 /* Now enable */
1273 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1274 txdctl |= IXGBE_TXDCTL_ENABLE;
1275 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1276 }
1277
1278 return;
1279 } /* ixv_initialize_transmit_units */
1280
1281 /************************************************************************
1282 * ixv_initialize_rss_mapping
1283 ************************************************************************/
1284 static void
1285 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1286 {
1287 struct ixgbe_hw *hw = &sc->hw;
1288 u32 reta = 0, mrqc, rss_key[10];
1289 int queue_id;
1290 int i, j;
1291 u32 rss_hash_config;
1292
1293 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1294 /* Fetch the configured RSS key */
1295 rss_getkey((uint8_t *)&rss_key);
1296 } else {
1297 /* set up random bits */
1298 arc4rand(&rss_key, sizeof(rss_key), 0);
1299 }
1300
1301 /* Now fill out hash function seeds */
1302 for (i = 0; i < 10; i++)
1303 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1304
1305 /* Set up the redirection table */
1306 for (i = 0, j = 0; i < 64; i++, j++) {
1307 if (j == sc->num_rx_queues)
1308 j = 0;
1309
1310 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1311 /*
1312 * Fetch the RSS bucket id for the given indirection
1313 * entry. Cap it at the number of configured buckets
1314 * (which is num_rx_queues.)
1315 */
1316 queue_id = rss_get_indirection_to_bucket(i);
1317 queue_id = queue_id % sc->num_rx_queues;
1318 } else
1319 queue_id = j;
1320
1321 /*
1322 * The low 8 bits are for hash value (n+0);
1323 * The next 8 bits are for hash value (n+1), etc.
1324 */
1325 reta >>= 8;
1326 reta |= ((uint32_t)queue_id) << 24;
1327 if ((i & 3) == 3) {
1328 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1329 reta = 0;
1330 }
1331 }
1332
1333 /* Perform hash on these packet types */
1334 if (sc->feat_en & IXGBE_FEATURE_RSS)
1335 rss_hash_config = rss_gethashconfig();
1336 else {
1337 /*
1338 * Disable UDP - IP fragments aren't currently being handled
1339 * and so we end up with a mix of 2-tuple and 4-tuple
1340 * traffic.
1341 */
1342 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1343 | RSS_HASHTYPE_RSS_TCP_IPV4
1344 | RSS_HASHTYPE_RSS_IPV6
1345 | RSS_HASHTYPE_RSS_TCP_IPV6;
1346 }
1347
1348 mrqc = IXGBE_MRQC_RSSEN;
1349 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1350 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1351 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1352 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1353 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1354 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1355 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1356 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1357 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1358 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1359 __func__);
1360 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1361 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1362 __func__);
1363 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1365 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1367 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1368 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1369 __func__);
1370 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1371 } /* ixv_initialize_rss_mapping */
1372
1373
1374 /************************************************************************
1375 * ixv_initialize_receive_units - Setup receive registers and features.
1376 ************************************************************************/
1377 static void
1378 ixv_initialize_receive_units(if_ctx_t ctx)
1379 {
1380 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1381 if_softc_ctx_t scctx;
1382 struct ixgbe_hw *hw = &sc->hw;
1383 if_t ifp = iflib_get_ifp(ctx);
1384 struct ix_rx_queue *que = sc->rx_queues;
1385 u32 bufsz, psrtype;
1386
1387 if (if_getmtu(ifp) > ETHERMTU)
1388 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1389 else
1390 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1391
1392 psrtype = IXGBE_PSRTYPE_TCPHDR
1393 | IXGBE_PSRTYPE_UDPHDR
1394 | IXGBE_PSRTYPE_IPV4HDR
1395 | IXGBE_PSRTYPE_IPV6HDR
1396 | IXGBE_PSRTYPE_L2HDR;
1397
1398 if (sc->num_rx_queues > 1)
1399 psrtype |= 1 << 29;
1400
1401 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1402
1403 /* Tell PF our max_frame size */
1404 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1405 device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1406 }
1407 scctx = sc->shared;
1408
1409 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1410 struct rx_ring *rxr = &que->rxr;
1411 u64 rdba = rxr->rx_paddr;
1412 u32 reg, rxdctl;
1413 int j = rxr->me;
1414
1415 /* Disable the queue */
1416 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1417 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1419 for (int k = 0; k < 10; k++) {
1420 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1421 IXGBE_RXDCTL_ENABLE)
1422 msec_delay(1);
1423 else
1424 break;
1425 }
1426 wmb();
1427 /* Setup the Base and Length of the Rx Descriptor Ring */
1428 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1429 (rdba & 0x00000000ffffffffULL));
1430 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1431 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1432 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1433
1434 /* Reset the ring indices */
1435 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1436 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1437
1438 /* Set up the SRRCTL register */
1439 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1440 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1441 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1442 reg |= bufsz;
1443 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1445
1446 /* Capture Rx Tail index */
1447 rxr->tail = IXGBE_VFRDT(rxr->me);
1448
1449 /* Do the queue enabling last */
1450 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1451 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1452 for (int l = 0; l < 10; l++) {
1453 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1454 IXGBE_RXDCTL_ENABLE)
1455 break;
1456 msec_delay(1);
1457 }
1458 wmb();
1459
1460 /* Set the Tail Pointer */
1461 #ifdef DEV_NETMAP
1462 /*
1463 * In netmap mode, we must preserve the buffers made
1464 * available to userspace before the if_init()
1465 * (this is true by default on the TX side, because
1466 * init makes all buffers available to userspace).
1467 *
1468 * netmap_reset() and the device specific routines
1469 * (e.g. ixgbe_setup_receive_rings()) map these
1470 * buffers at the end of the NIC ring, so here we
1471 * must set the RDT (tail) register to make sure
1472 * they are not overwritten.
1473 *
1474 * In this driver the NIC ring starts at RDH = 0,
1475 * RDT points to the last slot available for reception (?),
1476 * so RDT = num_rx_desc - 1 means the whole ring is available.
1477 */
1478 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
1479 struct netmap_adapter *na = NA(ifp);
1480 struct netmap_kring *kring = na->rx_rings[j];
1481 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1482
1483 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1484 } else
1485 #endif /* DEV_NETMAP */
1486 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1487 scctx->isc_nrxd[0] - 1);
1488 }
1489
1490 /*
1491 * Do not touch RSS and RETA settings for older hardware
1492 * as those are shared among PF and all VF.
1493 */
1494 if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1495 ixv_initialize_rss_mapping(sc);
1496 } /* ixv_initialize_receive_units */
1497
1498 /************************************************************************
1499 * ixv_setup_vlan_support
1500 ************************************************************************/
1501 static void
1502 ixv_setup_vlan_support(if_ctx_t ctx)
1503 {
1504 if_t ifp = iflib_get_ifp(ctx);
1505 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1506 struct ixgbe_hw *hw = &sc->hw;
1507 u32 ctrl, vid, vfta, retry;
1508
1509 /*
1510 * We get here thru if_init, meaning
1511 * a soft reset, this has already cleared
1512 * the VFTA and other state, so if there
1513 * have been no vlan's registered do nothing.
1514 */
1515 if (sc->num_vlans == 0)
1516 return;
1517
1518 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1519 /* Enable the queues */
1520 for (int i = 0; i < sc->num_rx_queues; i++) {
1521 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1522 ctrl |= IXGBE_RXDCTL_VME;
1523 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1524 /*
1525 * Let Rx path know that it needs to store VLAN tag
1526 * as part of extra mbuf info.
1527 */
1528 sc->rx_queues[i].rxr.vtag_strip = true;
1529 }
1530 }
1531
1532 /*
1533 * If filtering VLAN tags is disabled,
1534 * there is no need to fill VLAN Filter Table Array (VFTA).
1535 */
1536 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1537 return;
1538
1539 /*
1540 * A soft reset zero's out the VFTA, so
1541 * we need to repopulate it now.
1542 */
1543 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1544 if (ixv_shadow_vfta[i] == 0)
1545 continue;
1546 vfta = ixv_shadow_vfta[i];
1547 /*
1548 * Reconstruct the vlan id's
1549 * based on the bits set in each
1550 * of the array ints.
1551 */
1552 for (int j = 0; j < 32; j++) {
1553 retry = 0;
1554 if ((vfta & (1 << j)) == 0)
1555 continue;
1556 vid = (i * 32) + j;
1557 /* Call the shared code mailbox routine */
1558 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1559 if (++retry > 5)
1560 break;
1561 }
1562 }
1563 }
1564 } /* ixv_setup_vlan_support */
1565
1566 /************************************************************************
1567 * ixv_if_register_vlan
1568 *
1569 * Run via a vlan config EVENT, it enables us to use the
1570 * HW Filter table since we can get the vlan id. This just
1571 * creates the entry in the soft version of the VFTA, init
1572 * will repopulate the real table.
1573 ************************************************************************/
1574 static void
1575 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1576 {
1577 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1578 u16 index, bit;
1579
1580 index = (vtag >> 5) & 0x7F;
1581 bit = vtag & 0x1F;
1582 ixv_shadow_vfta[index] |= (1 << bit);
1583 ++sc->num_vlans;
1584 } /* ixv_if_register_vlan */
1585
1586 /************************************************************************
1587 * ixv_if_unregister_vlan
1588 *
1589 * Run via a vlan unconfig EVENT, remove our entry
1590 * in the soft vfta.
1591 ************************************************************************/
1592 static void
1593 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1594 {
1595 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1596 u16 index, bit;
1597
1598 index = (vtag >> 5) & 0x7F;
1599 bit = vtag & 0x1F;
1600 ixv_shadow_vfta[index] &= ~(1 << bit);
1601 --sc->num_vlans;
1602 } /* ixv_if_unregister_vlan */
1603
1604 /************************************************************************
1605 * ixv_if_enable_intr
1606 ************************************************************************/
1607 static void
1608 ixv_if_enable_intr(if_ctx_t ctx)
1609 {
1610 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1611 struct ixgbe_hw *hw = &sc->hw;
1612 struct ix_rx_queue *que = sc->rx_queues;
1613 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1614
1615 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1616
1617 mask = IXGBE_EIMS_ENABLE_MASK;
1618 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1619 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1620
1621 for (int i = 0; i < sc->num_rx_queues; i++, que++)
1622 ixv_enable_queue(sc, que->msix);
1623
1624 IXGBE_WRITE_FLUSH(hw);
1625 } /* ixv_if_enable_intr */
1626
1627 /************************************************************************
1628 * ixv_if_disable_intr
1629 ************************************************************************/
1630 static void
1631 ixv_if_disable_intr(if_ctx_t ctx)
1632 {
1633 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1634 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1635 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1636 IXGBE_WRITE_FLUSH(&sc->hw);
1637 } /* ixv_if_disable_intr */
1638
1639 /************************************************************************
1640 * ixv_if_rx_queue_intr_enable
1641 ************************************************************************/
1642 static int
1643 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1644 {
1645 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1646 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1647
1648 ixv_enable_queue(sc, que->rxr.me);
1649
1650 return (0);
1651 } /* ixv_if_rx_queue_intr_enable */
1652
1653 /************************************************************************
1654 * ixv_set_ivar
1655 *
1656 * Setup the correct IVAR register for a particular MSI-X interrupt
1657 * - entry is the register array entry
1658 * - vector is the MSI-X vector for this queue
1659 * - type is RX/TX/MISC
1660 ************************************************************************/
1661 static void
1662 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1663 {
1664 struct ixgbe_hw *hw = &sc->hw;
1665 u32 ivar, index;
1666
1667 vector |= IXGBE_IVAR_ALLOC_VAL;
1668
1669 if (type == -1) { /* MISC IVAR */
1670 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1671 ivar &= ~0xFF;
1672 ivar |= vector;
1673 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1674 } else { /* RX/TX IVARS */
1675 index = (16 * (entry & 1)) + (8 * type);
1676 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1677 ivar &= ~(0xFF << index);
1678 ivar |= (vector << index);
1679 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1680 }
1681 } /* ixv_set_ivar */
1682
1683 /************************************************************************
1684 * ixv_configure_ivars
1685 ************************************************************************/
1686 static void
1687 ixv_configure_ivars(struct ixgbe_softc *sc)
1688 {
1689 struct ix_rx_queue *que = sc->rx_queues;
1690
1691 MPASS(sc->num_rx_queues == sc->num_tx_queues);
1692
1693 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1694 /* First the RX queue entry */
1695 ixv_set_ivar(sc, i, que->msix, 0);
1696 /* ... and the TX */
1697 ixv_set_ivar(sc, i, que->msix, 1);
1698 /* Set an initial value in EITR */
1699 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1700 IXGBE_EITR_DEFAULT);
1701 }
1702
1703 /* For the mailbox interrupt */
1704 ixv_set_ivar(sc, 1, sc->vector, -1);
1705 } /* ixv_configure_ivars */
1706
1707 /************************************************************************
1708 * ixv_save_stats
1709 *
1710 * The VF stats registers never have a truly virgin
1711 * starting point, so this routine tries to make an
1712 * artificial one, marking ground zero on attach as
1713 * it were.
1714 ************************************************************************/
1715 static void
1716 ixv_save_stats(struct ixgbe_softc *sc)
1717 {
1718 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1719 sc->stats.vf.saved_reset_vfgprc +=
1720 sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1721 sc->stats.vf.saved_reset_vfgptc +=
1722 sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1723 sc->stats.vf.saved_reset_vfgorc +=
1724 sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1725 sc->stats.vf.saved_reset_vfgotc +=
1726 sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1727 sc->stats.vf.saved_reset_vfmprc +=
1728 sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1729 }
1730 } /* ixv_save_stats */
1731
1732 /************************************************************************
1733 * ixv_init_stats
1734 ************************************************************************/
1735 static void
1736 ixv_init_stats(struct ixgbe_softc *sc)
1737 {
1738 struct ixgbe_hw *hw = &sc->hw;
1739
1740 sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1741 sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1742 sc->stats.vf.last_vfgorc |=
1743 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1744
1745 sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1746 sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1747 sc->stats.vf.last_vfgotc |=
1748 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1749
1750 sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1751
1752 sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1753 sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1754 sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1755 sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1756 sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1757 } /* ixv_init_stats */
1758
1759 #define UPDATE_STAT_32(reg, last, count) \
1760 { \
1761 u32 current = IXGBE_READ_REG(hw, reg); \
1762 if (current < last) \
1763 count += 0x100000000LL; \
1764 last = current; \
1765 count &= 0xFFFFFFFF00000000LL; \
1766 count |= current; \
1767 }
1768
1769 #define UPDATE_STAT_36(lsb, msb, last, count) \
1770 { \
1771 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1772 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1773 u64 current = ((cur_msb << 32) | cur_lsb); \
1774 if (current < last) \
1775 count += 0x1000000000LL; \
1776 last = current; \
1777 count &= 0xFFFFFFF000000000LL; \
1778 count |= current; \
1779 }
1780
1781 /************************************************************************
1782 * ixv_update_stats - Update the board statistics counters.
1783 ************************************************************************/
1784 void
1785 ixv_update_stats(struct ixgbe_softc *sc)
1786 {
1787 struct ixgbe_hw *hw = &sc->hw;
1788 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1789
1790 UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1791 sc->stats.vf.vfgprc);
1792 UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1793 sc->stats.vf.vfgptc);
1794 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1795 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1796 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1797 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1798 UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1799 sc->stats.vf.vfmprc);
1800
1801 /* Fill out the OS statistics structure */
1802 IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1803 IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1804 IXGBE_SET_IBYTES(sc, stats->vfgorc);
1805 IXGBE_SET_OBYTES(sc, stats->vfgotc);
1806 IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1807 } /* ixv_update_stats */
1808
1809 /************************************************************************
1810 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1811 ************************************************************************/
1812 static void
1813 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1814 {
1815 device_t dev = sc->dev;
1816 struct ix_tx_queue *tx_que = sc->tx_queues;
1817 struct ix_rx_queue *rx_que = sc->rx_queues;
1818 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1819 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1820 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1821 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1822 struct sysctl_oid *stat_node, *queue_node;
1823 struct sysctl_oid_list *stat_list, *queue_list;
1824
1825 #define QUEUE_NAME_LEN 32
1826 char namebuf[QUEUE_NAME_LEN];
1827
1828 /* Driver Statistics */
1829 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1830 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1831 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1832 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1833
1834 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1835 struct tx_ring *txr = &tx_que->txr;
1836 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1837 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1838 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1839 queue_list = SYSCTL_CHILDREN(queue_node);
1840
1841 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1842 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1843 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1844 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1845 }
1846
1847 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1848 struct rx_ring *rxr = &rx_que->rxr;
1849 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1850 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1851 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1852 queue_list = SYSCTL_CHILDREN(queue_node);
1853
1854 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1855 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1856 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1857 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1858 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1859 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1860 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1861 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1862 }
1863
1864 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1865 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1866 "VF Statistics (read from HW registers)");
1867 stat_list = SYSCTL_CHILDREN(stat_node);
1868
1869 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1870 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1872 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1873 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1874 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1875 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1876 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1877 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1878 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1879 } /* ixv_add_stats_sysctls */
1880
1881 /************************************************************************
1882 * ixv_print_debug_info
1883 *
1884 * Called only when em_display_debug_stats is enabled.
1885 * Provides a way to take a look at important statistics
1886 * maintained by the driver and hardware.
1887 ************************************************************************/
1888 static void
1889 ixv_print_debug_info(struct ixgbe_softc *sc)
1890 {
1891 device_t dev = sc->dev;
1892 struct ixgbe_hw *hw = &sc->hw;
1893
1894 device_printf(dev, "Error Byte Count = %u \n",
1895 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1896
1897 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1898 } /* ixv_print_debug_info */
1899
1900 /************************************************************************
1901 * ixv_sysctl_debug
1902 ************************************************************************/
1903 static int
1904 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1905 {
1906 struct ixgbe_softc *sc;
1907 int error, result;
1908
1909 result = -1;
1910 error = sysctl_handle_int(oidp, &result, 0, req);
1911
1912 if (error || !req->newptr)
1913 return (error);
1914
1915 if (result == 1) {
1916 sc = (struct ixgbe_softc *)arg1;
1917 ixv_print_debug_info(sc);
1918 }
1919
1920 return error;
1921 } /* ixv_sysctl_debug */
1922
1923 /************************************************************************
1924 * ixv_init_device_features
1925 ************************************************************************/
1926 static void
1927 ixv_init_device_features(struct ixgbe_softc *sc)
1928 {
1929 sc->feat_cap = IXGBE_FEATURE_NETMAP
1930 | IXGBE_FEATURE_VF
1931 | IXGBE_FEATURE_LEGACY_TX;
1932
1933 /* A tad short on feature flags for VFs, atm. */
1934 switch (sc->hw.mac.type) {
1935 case ixgbe_mac_82599_vf:
1936 break;
1937 case ixgbe_mac_X540_vf:
1938 break;
1939 case ixgbe_mac_X550_vf:
1940 case ixgbe_mac_X550EM_x_vf:
1941 case ixgbe_mac_X550EM_a_vf:
1942 sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1943 sc->feat_cap |= IXGBE_FEATURE_RSS;
1944 break;
1945 default:
1946 break;
1947 }
1948
1949 /* Enabled by default... */
1950 /* Is a virtual function (VF) */
1951 if (sc->feat_cap & IXGBE_FEATURE_VF)
1952 sc->feat_en |= IXGBE_FEATURE_VF;
1953 /* Netmap */
1954 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1955 sc->feat_en |= IXGBE_FEATURE_NETMAP;
1956 /* Receive-Side Scaling (RSS) */
1957 if (sc->feat_cap & IXGBE_FEATURE_RSS)
1958 sc->feat_en |= IXGBE_FEATURE_RSS;
1959 /* Needs advanced context descriptor regardless of offloads req'd */
1960 if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1961 sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1962 } /* ixv_init_device_features */
1963
Cache object: e4039876bda01d14c74c85dbd3da01ce
|