FreeBSD/Linux Kernel Cross Reference
sys/dev/oce/oce_if.c
1 /*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39 /* $FreeBSD$ */
40
41 #include "opt_inet6.h"
42 #include "opt_inet.h"
43
44 #include "oce_if.h"
45
46 /* UE Status Low CSR */
47 static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80 };
81
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116 };
117
118
119 /* Driver entry points prototypes */
120 static int oce_probe(device_t dev);
121 static int oce_attach(device_t dev);
122 static int oce_detach(device_t dev);
123 static int oce_shutdown(device_t dev);
124 static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125 static void oce_init(void *xsc);
126 static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127 static void oce_multiq_flush(struct ifnet *ifp);
128
129 /* Driver interrupt routines protypes */
130 static void oce_intr(void *arg, int pending);
131 static int oce_setup_intr(POCE_SOFTC sc);
132 static int oce_fast_isr(void *arg);
133 static int oce_alloc_intr(POCE_SOFTC sc, int vector,
134 void (*isr) (void *arg, int pending));
135
136 /* Media callbacks prototypes */
137 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138 static int oce_media_change(struct ifnet *ifp);
139
140 /* Transmit routines prototypes */
141 static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 uint32_t status);
145 static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 struct oce_wq *wq);
147
148 /* Receive routines prototypes */
149 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150 static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151 static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 struct oce_nic_rx_cqe *cqe);
154
155 /* Helper function prototypes in this file */
156 static int oce_attach_ifp(POCE_SOFTC sc);
157 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159 static int oce_vid_config(POCE_SOFTC sc);
160 static void oce_mac_addr_set(POCE_SOFTC sc);
161 static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162 static void oce_local_timer(void *arg);
163 static void oce_if_deactivate(POCE_SOFTC sc);
164 static void oce_if_activate(POCE_SOFTC sc);
165 static void setup_max_queues_want(POCE_SOFTC sc);
166 static void update_queues_got(POCE_SOFTC sc);
167 static void process_link_state(POCE_SOFTC sc,
168 struct oce_async_cqe_link_state *acqe);
169 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170 static void oce_get_config(POCE_SOFTC sc);
171 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173 /* IP specific */
174 #if defined(INET6) || defined(INET)
175 static int oce_init_lro(POCE_SOFTC sc);
176 static void oce_rx_flush_lro(struct oce_rq *rq);
177 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178 #endif
179
180 static device_method_t oce_dispatch[] = {
181 DEVMETHOD(device_probe, oce_probe),
182 DEVMETHOD(device_attach, oce_attach),
183 DEVMETHOD(device_detach, oce_detach),
184 DEVMETHOD(device_shutdown, oce_shutdown),
185 {0, 0}
186 };
187
188 static driver_t oce_driver = {
189 "oce",
190 oce_dispatch,
191 sizeof(OCE_SOFTC)
192 };
193 static devclass_t oce_devclass;
194
195
196 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
197 MODULE_DEPEND(oce, pci, 1, 1, 1);
198 MODULE_DEPEND(oce, ether, 1, 1, 1);
199 MODULE_VERSION(oce, 1);
200
201
202 /* global vars */
203 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
204
205 /* Module capabilites and parameters */
206 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
207 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
208
209
210 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
211 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
212
213
214 /* Supported devices table */
215 static uint32_t supportedDevices[] = {
216 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
217 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
218 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
219 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
220 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
221 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
222 };
223
224
225
226
227 /*****************************************************************************
228 * Driver entry points functions *
229 *****************************************************************************/
230
231 static int
232 oce_probe(device_t dev)
233 {
234 uint16_t vendor = 0;
235 uint16_t device = 0;
236 int i = 0;
237 char str[256] = {0};
238 POCE_SOFTC sc;
239
240 sc = device_get_softc(dev);
241 bzero(sc, sizeof(OCE_SOFTC));
242 sc->dev = dev;
243
244 vendor = pci_get_vendor(dev);
245 device = pci_get_device(dev);
246
247 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
248 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
249 if (device == (supportedDevices[i] & 0xffff)) {
250 sprintf(str, "%s:%s", "Emulex CNA NIC function",
251 component_revision);
252 device_set_desc_copy(dev, str);
253
254 switch (device) {
255 case PCI_PRODUCT_BE2:
256 sc->flags |= OCE_FLAGS_BE2;
257 break;
258 case PCI_PRODUCT_BE3:
259 sc->flags |= OCE_FLAGS_BE3;
260 break;
261 case PCI_PRODUCT_XE201:
262 case PCI_PRODUCT_XE201_VF:
263 sc->flags |= OCE_FLAGS_XE201;
264 break;
265 case PCI_PRODUCT_SH:
266 sc->flags |= OCE_FLAGS_SH;
267 break;
268 default:
269 return ENXIO;
270 }
271 return BUS_PROBE_DEFAULT;
272 }
273 }
274 }
275
276 return ENXIO;
277 }
278
279
280 static int
281 oce_attach(device_t dev)
282 {
283 POCE_SOFTC sc;
284 int rc = 0;
285
286 sc = device_get_softc(dev);
287
288 rc = oce_hw_pci_alloc(sc);
289 if (rc)
290 return rc;
291
292 sc->tx_ring_size = OCE_TX_RING_SIZE;
293 sc->rx_ring_size = OCE_RX_RING_SIZE;
294 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
295 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
296 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
297
298 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
299 LOCK_CREATE(&sc->dev_lock, "Device_lock");
300
301 /* initialise the hardware */
302 rc = oce_hw_init(sc);
303 if (rc)
304 goto pci_res_free;
305
306 oce_get_config(sc);
307
308 setup_max_queues_want(sc);
309
310 rc = oce_setup_intr(sc);
311 if (rc)
312 goto mbox_free;
313
314 rc = oce_queue_init_all(sc);
315 if (rc)
316 goto intr_free;
317
318 rc = oce_attach_ifp(sc);
319 if (rc)
320 goto queues_free;
321
322 #if defined(INET6) || defined(INET)
323 rc = oce_init_lro(sc);
324 if (rc)
325 goto ifp_free;
326 #endif
327
328 rc = oce_hw_start(sc);
329 if (rc)
330 goto lro_free;
331
332 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
333 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
334 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
335 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
336
337 rc = oce_stats_init(sc);
338 if (rc)
339 goto vlan_free;
340
341 oce_add_sysctls(sc);
342
343 callout_init(&sc->timer, CALLOUT_MPSAFE);
344 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
345 if (rc)
346 goto stats_free;
347
348 return 0;
349
350 stats_free:
351 callout_drain(&sc->timer);
352 oce_stats_free(sc);
353 vlan_free:
354 if (sc->vlan_attach)
355 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
356 if (sc->vlan_detach)
357 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
358 oce_hw_intr_disable(sc);
359 lro_free:
360 #if defined(INET6) || defined(INET)
361 oce_free_lro(sc);
362 ifp_free:
363 #endif
364 ether_ifdetach(sc->ifp);
365 if_free(sc->ifp);
366 queues_free:
367 oce_queue_release_all(sc);
368 intr_free:
369 oce_intr_free(sc);
370 mbox_free:
371 oce_dma_free(sc, &sc->bsmbx);
372 pci_res_free:
373 oce_hw_pci_free(sc);
374 LOCK_DESTROY(&sc->dev_lock);
375 LOCK_DESTROY(&sc->bmbx_lock);
376 return rc;
377
378 }
379
380
381 static int
382 oce_detach(device_t dev)
383 {
384 POCE_SOFTC sc = device_get_softc(dev);
385
386 LOCK(&sc->dev_lock);
387 oce_if_deactivate(sc);
388 UNLOCK(&sc->dev_lock);
389
390 callout_drain(&sc->timer);
391
392 if (sc->vlan_attach != NULL)
393 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
394 if (sc->vlan_detach != NULL)
395 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
396
397 ether_ifdetach(sc->ifp);
398
399 if_free(sc->ifp);
400
401 oce_hw_shutdown(sc);
402
403 bus_generic_detach(dev);
404
405 return 0;
406 }
407
408
409 static int
410 oce_shutdown(device_t dev)
411 {
412 int rc;
413
414 rc = oce_detach(dev);
415
416 return rc;
417 }
418
419
420 static int
421 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
422 {
423 struct ifreq *ifr = (struct ifreq *)data;
424 POCE_SOFTC sc = ifp->if_softc;
425 int rc = 0;
426 uint32_t u;
427
428 switch (command) {
429
430 case SIOCGIFMEDIA:
431 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
432 break;
433
434 case SIOCSIFMTU:
435 if (ifr->ifr_mtu > OCE_MAX_MTU)
436 rc = EINVAL;
437 else
438 ifp->if_mtu = ifr->ifr_mtu;
439 break;
440
441 case SIOCSIFFLAGS:
442 if (ifp->if_flags & IFF_UP) {
443 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
444 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
445 oce_init(sc);
446 }
447 device_printf(sc->dev, "Interface Up\n");
448 } else {
449 LOCK(&sc->dev_lock);
450
451 sc->ifp->if_drv_flags &=
452 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
453 oce_if_deactivate(sc);
454
455 UNLOCK(&sc->dev_lock);
456
457 device_printf(sc->dev, "Interface Down\n");
458 }
459
460 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
461 if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
462 sc->promisc = TRUE;
463 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
464 if (!oce_rxf_set_promiscuous(sc, 0))
465 sc->promisc = FALSE;
466 }
467
468 break;
469
470 case SIOCADDMULTI:
471 case SIOCDELMULTI:
472 rc = oce_hw_update_multicast(sc);
473 if (rc)
474 device_printf(sc->dev,
475 "Update multicast address failed\n");
476 break;
477
478 case SIOCSIFCAP:
479 u = ifr->ifr_reqcap ^ ifp->if_capenable;
480
481 if (u & IFCAP_TXCSUM) {
482 ifp->if_capenable ^= IFCAP_TXCSUM;
483 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
484
485 if (IFCAP_TSO & ifp->if_capenable &&
486 !(IFCAP_TXCSUM & ifp->if_capenable)) {
487 ifp->if_capenable &= ~IFCAP_TSO;
488 ifp->if_hwassist &= ~CSUM_TSO;
489 if_printf(ifp,
490 "TSO disabled due to -txcsum.\n");
491 }
492 }
493
494 if (u & IFCAP_RXCSUM)
495 ifp->if_capenable ^= IFCAP_RXCSUM;
496
497 if (u & IFCAP_TSO4) {
498 ifp->if_capenable ^= IFCAP_TSO4;
499
500 if (IFCAP_TSO & ifp->if_capenable) {
501 if (IFCAP_TXCSUM & ifp->if_capenable)
502 ifp->if_hwassist |= CSUM_TSO;
503 else {
504 ifp->if_capenable &= ~IFCAP_TSO;
505 ifp->if_hwassist &= ~CSUM_TSO;
506 if_printf(ifp,
507 "Enable txcsum first.\n");
508 rc = EAGAIN;
509 }
510 } else
511 ifp->if_hwassist &= ~CSUM_TSO;
512 }
513
514 if (u & IFCAP_VLAN_HWTAGGING)
515 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
516
517 if (u & IFCAP_VLAN_HWFILTER) {
518 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
519 oce_vid_config(sc);
520 }
521 #if defined(INET6) || defined(INET)
522 if (u & IFCAP_LRO)
523 ifp->if_capenable ^= IFCAP_LRO;
524 #endif
525
526 break;
527
528 case SIOCGPRIVATE_0:
529 rc = oce_handle_passthrough(ifp, data);
530 break;
531 default:
532 rc = ether_ioctl(ifp, command, data);
533 break;
534 }
535
536 return rc;
537 }
538
539
540 static void
541 oce_init(void *arg)
542 {
543 POCE_SOFTC sc = arg;
544
545 LOCK(&sc->dev_lock);
546
547 if (sc->ifp->if_flags & IFF_UP) {
548 oce_if_deactivate(sc);
549 oce_if_activate(sc);
550 }
551
552 UNLOCK(&sc->dev_lock);
553
554 }
555
556
557 static int
558 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
559 {
560 POCE_SOFTC sc = ifp->if_softc;
561 struct oce_wq *wq = NULL;
562 int queue_index = 0;
563 int status = 0;
564
565 if (!sc->link_status)
566 return ENXIO;
567
568 if ((m->m_flags & M_FLOWID) != 0)
569 queue_index = m->m_pkthdr.flowid % sc->nwqs;
570
571 wq = sc->wq[queue_index];
572
573 LOCK(&wq->tx_lock);
574 status = oce_multiq_transmit(ifp, m, wq);
575 UNLOCK(&wq->tx_lock);
576
577 return status;
578
579 }
580
581
582 static void
583 oce_multiq_flush(struct ifnet *ifp)
584 {
585 POCE_SOFTC sc = ifp->if_softc;
586 struct mbuf *m;
587 int i = 0;
588
589 for (i = 0; i < sc->nwqs; i++) {
590 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
591 m_freem(m);
592 }
593 if_qflush(ifp);
594 }
595
596
597
598 /*****************************************************************************
599 * Driver interrupt routines functions *
600 *****************************************************************************/
601
602 static void
603 oce_intr(void *arg, int pending)
604 {
605
606 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
607 POCE_SOFTC sc = ii->sc;
608 struct oce_eq *eq = ii->eq;
609 struct oce_eqe *eqe;
610 struct oce_cq *cq = NULL;
611 int i, num_eqes = 0;
612
613
614 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
615 BUS_DMASYNC_POSTWRITE);
616 do {
617 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
618 if (eqe->evnt == 0)
619 break;
620 eqe->evnt = 0;
621 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
622 BUS_DMASYNC_POSTWRITE);
623 RING_GET(eq->ring, 1);
624 num_eqes++;
625
626 } while (TRUE);
627
628 if (!num_eqes)
629 goto eq_arm; /* Spurious */
630
631 /* Clear EQ entries, but dont arm */
632 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
633
634 /* Process TX, RX and MCC. But dont arm CQ*/
635 for (i = 0; i < eq->cq_valid; i++) {
636 cq = eq->cq[i];
637 (*cq->cq_handler)(cq->cb_arg);
638 }
639
640 /* Arm all cqs connected to this EQ */
641 for (i = 0; i < eq->cq_valid; i++) {
642 cq = eq->cq[i];
643 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
644 }
645
646 eq_arm:
647 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
648
649 return;
650 }
651
652
653 static int
654 oce_setup_intr(POCE_SOFTC sc)
655 {
656 int rc = 0, use_intx = 0;
657 int vector = 0, req_vectors = 0;
658
659 if (is_rss_enabled(sc))
660 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
661 else
662 req_vectors = 1;
663
664 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
665 sc->intr_count = req_vectors;
666 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
667 if (rc != 0) {
668 use_intx = 1;
669 pci_release_msi(sc->dev);
670 } else
671 sc->flags |= OCE_FLAGS_USING_MSIX;
672 } else
673 use_intx = 1;
674
675 if (use_intx)
676 sc->intr_count = 1;
677
678 /* Scale number of queues based on intr we got */
679 update_queues_got(sc);
680
681 if (use_intx) {
682 device_printf(sc->dev, "Using legacy interrupt\n");
683 rc = oce_alloc_intr(sc, vector, oce_intr);
684 if (rc)
685 goto error;
686 } else {
687 for (; vector < sc->intr_count; vector++) {
688 rc = oce_alloc_intr(sc, vector, oce_intr);
689 if (rc)
690 goto error;
691 }
692 }
693
694 return 0;
695 error:
696 oce_intr_free(sc);
697 return rc;
698 }
699
700
701 static int
702 oce_fast_isr(void *arg)
703 {
704 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
705 POCE_SOFTC sc = ii->sc;
706
707 if (ii->eq == NULL)
708 return FILTER_STRAY;
709
710 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
711
712 taskqueue_enqueue_fast(ii->tq, &ii->task);
713
714 ii->eq->intr++;
715
716 return FILTER_HANDLED;
717 }
718
719
720 static int
721 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
722 {
723 POCE_INTR_INFO ii = &sc->intrs[vector];
724 int rc = 0, rr;
725
726 if (vector >= OCE_MAX_EQ)
727 return (EINVAL);
728
729 /* Set the resource id for the interrupt.
730 * MSIx is vector + 1 for the resource id,
731 * INTx is 0 for the resource id.
732 */
733 if (sc->flags & OCE_FLAGS_USING_MSIX)
734 rr = vector + 1;
735 else
736 rr = 0;
737 ii->intr_res = bus_alloc_resource_any(sc->dev,
738 SYS_RES_IRQ,
739 &rr, RF_ACTIVE|RF_SHAREABLE);
740 ii->irq_rr = rr;
741 if (ii->intr_res == NULL) {
742 device_printf(sc->dev,
743 "Could not allocate interrupt\n");
744 rc = ENXIO;
745 return rc;
746 }
747
748 TASK_INIT(&ii->task, 0, isr, ii);
749 ii->vector = vector;
750 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
751 ii->tq = taskqueue_create_fast(ii->task_name,
752 M_NOWAIT,
753 taskqueue_thread_enqueue,
754 &ii->tq);
755 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
756 device_get_nameunit(sc->dev));
757
758 ii->sc = sc;
759 rc = bus_setup_intr(sc->dev,
760 ii->intr_res,
761 INTR_TYPE_NET,
762 oce_fast_isr, NULL, ii, &ii->tag);
763 return rc;
764
765 }
766
767
768 void
769 oce_intr_free(POCE_SOFTC sc)
770 {
771 int i = 0;
772
773 for (i = 0; i < sc->intr_count; i++) {
774
775 if (sc->intrs[i].tag != NULL)
776 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
777 sc->intrs[i].tag);
778 if (sc->intrs[i].tq != NULL)
779 taskqueue_free(sc->intrs[i].tq);
780
781 if (sc->intrs[i].intr_res != NULL)
782 bus_release_resource(sc->dev, SYS_RES_IRQ,
783 sc->intrs[i].irq_rr,
784 sc->intrs[i].intr_res);
785 sc->intrs[i].tag = NULL;
786 sc->intrs[i].intr_res = NULL;
787 }
788
789 if (sc->flags & OCE_FLAGS_USING_MSIX)
790 pci_release_msi(sc->dev);
791
792 }
793
794
795
796 /******************************************************************************
797 * Media callbacks functions *
798 ******************************************************************************/
799
800 static void
801 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
802 {
803 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
804
805
806 req->ifm_status = IFM_AVALID;
807 req->ifm_active = IFM_ETHER;
808
809 if (sc->link_status == 1)
810 req->ifm_status |= IFM_ACTIVE;
811 else
812 return;
813
814 switch (sc->link_speed) {
815 case 1: /* 10 Mbps */
816 req->ifm_active |= IFM_10_T | IFM_FDX;
817 sc->speed = 10;
818 break;
819 case 2: /* 100 Mbps */
820 req->ifm_active |= IFM_100_TX | IFM_FDX;
821 sc->speed = 100;
822 break;
823 case 3: /* 1 Gbps */
824 req->ifm_active |= IFM_1000_T | IFM_FDX;
825 sc->speed = 1000;
826 break;
827 case 4: /* 10 Gbps */
828 req->ifm_active |= IFM_10G_SR | IFM_FDX;
829 sc->speed = 10000;
830 break;
831 case 5: /* 20 Gbps */
832 req->ifm_active |= IFM_10G_SR | IFM_FDX;
833 sc->speed = 20000;
834 break;
835 case 6: /* 25 Gbps */
836 req->ifm_active |= IFM_10G_SR | IFM_FDX;
837 sc->speed = 25000;
838 break;
839 case 7: /* 40 Gbps */
840 req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
841 sc->speed = 40000;
842 break;
843 default:
844 sc->speed = 0;
845 break;
846 }
847
848 return;
849 }
850
851
852 int
853 oce_media_change(struct ifnet *ifp)
854 {
855 return 0;
856 }
857
858
859
860
861 /*****************************************************************************
862 * Transmit routines functions *
863 *****************************************************************************/
864
865 static int
866 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
867 {
868 int rc = 0, i, retry_cnt = 0;
869 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
870 struct mbuf *m, *m_temp;
871 struct oce_wq *wq = sc->wq[wq_index];
872 struct oce_packet_desc *pd;
873 struct oce_nic_hdr_wqe *nichdr;
874 struct oce_nic_frag_wqe *nicfrag;
875 int num_wqes;
876 uint32_t reg_value;
877 boolean_t complete = TRUE;
878
879 m = *mpp;
880 if (!m)
881 return EINVAL;
882
883 if (!(m->m_flags & M_PKTHDR)) {
884 rc = ENXIO;
885 goto free_ret;
886 }
887
888 if(oce_tx_asic_stall_verify(sc, m)) {
889 m = oce_insert_vlan_tag(sc, m, &complete);
890 if(!m) {
891 device_printf(sc->dev, "Insertion unsuccessful\n");
892 return 0;
893 }
894
895 }
896
897 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
898 /* consolidate packet buffers for TSO/LSO segment offload */
899 #if defined(INET6) || defined(INET)
900 m = oce_tso_setup(sc, mpp);
901 #else
902 m = NULL;
903 #endif
904 if (m == NULL) {
905 rc = ENXIO;
906 goto free_ret;
907 }
908 }
909
910 pd = &wq->pckts[wq->pkt_desc_head];
911 retry:
912 rc = bus_dmamap_load_mbuf_sg(wq->tag,
913 pd->map,
914 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
915 if (rc == 0) {
916 num_wqes = pd->nsegs + 1;
917 if (IS_BE(sc) || IS_SH(sc)) {
918 /*Dummy required only for BE3.*/
919 if (num_wqes & 1)
920 num_wqes++;
921 }
922 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
923 bus_dmamap_unload(wq->tag, pd->map);
924 return EBUSY;
925 }
926 atomic_store_rel_int(&wq->pkt_desc_head,
927 (wq->pkt_desc_head + 1) % \
928 OCE_WQ_PACKET_ARRAY_SIZE);
929 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
930 pd->mbuf = m;
931
932 nichdr =
933 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
934 nichdr->u0.dw[0] = 0;
935 nichdr->u0.dw[1] = 0;
936 nichdr->u0.dw[2] = 0;
937 nichdr->u0.dw[3] = 0;
938
939 nichdr->u0.s.complete = complete;
940 nichdr->u0.s.event = 1;
941 nichdr->u0.s.crc = 1;
942 nichdr->u0.s.forward = 0;
943 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
944 nichdr->u0.s.udpcs =
945 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
946 nichdr->u0.s.tcpcs =
947 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
948 nichdr->u0.s.num_wqe = num_wqes;
949 nichdr->u0.s.total_length = m->m_pkthdr.len;
950
951 if (m->m_flags & M_VLANTAG) {
952 nichdr->u0.s.vlan = 1; /*Vlan present*/
953 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
954 }
955
956 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
957 if (m->m_pkthdr.tso_segsz) {
958 nichdr->u0.s.lso = 1;
959 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
960 }
961 if (!IS_BE(sc) || !IS_SH(sc))
962 nichdr->u0.s.ipcs = 1;
963 }
964
965 RING_PUT(wq->ring, 1);
966 atomic_add_int(&wq->ring->num_used, 1);
967
968 for (i = 0; i < pd->nsegs; i++) {
969 nicfrag =
970 RING_GET_PRODUCER_ITEM_VA(wq->ring,
971 struct oce_nic_frag_wqe);
972 nicfrag->u0.s.rsvd0 = 0;
973 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
974 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
975 nicfrag->u0.s.frag_len = segs[i].ds_len;
976 pd->wqe_idx = wq->ring->pidx;
977 RING_PUT(wq->ring, 1);
978 atomic_add_int(&wq->ring->num_used, 1);
979 }
980 if (num_wqes > (pd->nsegs + 1)) {
981 nicfrag =
982 RING_GET_PRODUCER_ITEM_VA(wq->ring,
983 struct oce_nic_frag_wqe);
984 nicfrag->u0.dw[0] = 0;
985 nicfrag->u0.dw[1] = 0;
986 nicfrag->u0.dw[2] = 0;
987 nicfrag->u0.dw[3] = 0;
988 pd->wqe_idx = wq->ring->pidx;
989 RING_PUT(wq->ring, 1);
990 atomic_add_int(&wq->ring->num_used, 1);
991 pd->nsegs++;
992 }
993
994 sc->ifp->if_opackets++;
995 wq->tx_stats.tx_reqs++;
996 wq->tx_stats.tx_wrbs += num_wqes;
997 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
998 wq->tx_stats.tx_pkts++;
999
1000 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1001 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1002 reg_value = (num_wqes << 16) | wq->wq_id;
1003 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1004
1005 } else if (rc == EFBIG) {
1006 if (retry_cnt == 0) {
1007 m_temp = m_defrag(m, M_NOWAIT);
1008 if (m_temp == NULL)
1009 goto free_ret;
1010 m = m_temp;
1011 *mpp = m_temp;
1012 retry_cnt = retry_cnt + 1;
1013 goto retry;
1014 } else
1015 goto free_ret;
1016 } else if (rc == ENOMEM)
1017 return rc;
1018 else
1019 goto free_ret;
1020
1021 return 0;
1022
1023 free_ret:
1024 m_freem(*mpp);
1025 *mpp = NULL;
1026 return rc;
1027 }
1028
1029
1030 static void
1031 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1032 {
1033 struct oce_packet_desc *pd;
1034 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1035 struct mbuf *m;
1036
1037 pd = &wq->pckts[wq->pkt_desc_tail];
1038 atomic_store_rel_int(&wq->pkt_desc_tail,
1039 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1040 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1041 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1042 bus_dmamap_unload(wq->tag, pd->map);
1043
1044 m = pd->mbuf;
1045 m_freem(m);
1046 pd->mbuf = NULL;
1047
1048
1049 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1050 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1051 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1052 oce_tx_restart(sc, wq);
1053 }
1054 }
1055 }
1056
1057
1058 static void
1059 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1060 {
1061
1062 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1063 return;
1064
1065 #if __FreeBSD_version >= 800000
1066 if (!drbr_empty(sc->ifp, wq->br))
1067 #else
1068 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1069 #endif
1070 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1071
1072 }
1073
1074
1075 #if defined(INET6) || defined(INET)
1076 static struct mbuf *
1077 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1078 {
1079 struct mbuf *m;
1080 #ifdef INET
1081 struct ip *ip;
1082 #endif
1083 #ifdef INET6
1084 struct ip6_hdr *ip6;
1085 #endif
1086 struct ether_vlan_header *eh;
1087 struct tcphdr *th;
1088 uint16_t etype;
1089 int total_len = 0, ehdrlen = 0;
1090
1091 m = *mpp;
1092
1093 if (M_WRITABLE(m) == 0) {
1094 m = m_dup(*mpp, M_NOWAIT);
1095 if (!m)
1096 return NULL;
1097 m_freem(*mpp);
1098 *mpp = m;
1099 }
1100
1101 eh = mtod(m, struct ether_vlan_header *);
1102 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1103 etype = ntohs(eh->evl_proto);
1104 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1105 } else {
1106 etype = ntohs(eh->evl_encap_proto);
1107 ehdrlen = ETHER_HDR_LEN;
1108 }
1109
1110 switch (etype) {
1111 #ifdef INET
1112 case ETHERTYPE_IP:
1113 ip = (struct ip *)(m->m_data + ehdrlen);
1114 if (ip->ip_p != IPPROTO_TCP)
1115 return NULL;
1116 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1117
1118 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1119 break;
1120 #endif
1121 #ifdef INET6
1122 case ETHERTYPE_IPV6:
1123 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1124 if (ip6->ip6_nxt != IPPROTO_TCP)
1125 return NULL;
1126 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1127
1128 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1129 break;
1130 #endif
1131 default:
1132 return NULL;
1133 }
1134
1135 m = m_pullup(m, total_len);
1136 if (!m)
1137 return NULL;
1138 *mpp = m;
1139 return m;
1140
1141 }
1142 #endif /* INET6 || INET */
1143
1144 void
1145 oce_tx_task(void *arg, int npending)
1146 {
1147 struct oce_wq *wq = arg;
1148 POCE_SOFTC sc = wq->parent;
1149 struct ifnet *ifp = sc->ifp;
1150 int rc = 0;
1151
1152 #if __FreeBSD_version >= 800000
1153 LOCK(&wq->tx_lock);
1154 rc = oce_multiq_transmit(ifp, NULL, wq);
1155 if (rc) {
1156 device_printf(sc->dev,
1157 "TX[%d] restart failed\n", wq->queue_index);
1158 }
1159 UNLOCK(&wq->tx_lock);
1160 #else
1161 oce_start(ifp);
1162 #endif
1163
1164 }
1165
1166
1167 void
1168 oce_start(struct ifnet *ifp)
1169 {
1170 POCE_SOFTC sc = ifp->if_softc;
1171 struct mbuf *m;
1172 int rc = 0;
1173 int def_q = 0; /* Defualt tx queue is 0*/
1174
1175 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1176 IFF_DRV_RUNNING)
1177 return;
1178
1179 if (!sc->link_status)
1180 return;
1181
1182 do {
1183 IF_DEQUEUE(&sc->ifp->if_snd, m);
1184 if (m == NULL)
1185 break;
1186
1187 LOCK(&sc->wq[def_q]->tx_lock);
1188 rc = oce_tx(sc, &m, def_q);
1189 UNLOCK(&sc->wq[def_q]->tx_lock);
1190 if (rc) {
1191 if (m != NULL) {
1192 sc->wq[def_q]->tx_stats.tx_stops ++;
1193 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1194 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1195 m = NULL;
1196 }
1197 break;
1198 }
1199 if (m != NULL)
1200 ETHER_BPF_MTAP(ifp, m);
1201
1202 } while (TRUE);
1203
1204 return;
1205 }
1206
1207
1208 /* Handle the Completion Queue for transmit */
1209 uint16_t
1210 oce_wq_handler(void *arg)
1211 {
1212 struct oce_wq *wq = (struct oce_wq *)arg;
1213 POCE_SOFTC sc = wq->parent;
1214 struct oce_cq *cq = wq->cq;
1215 struct oce_nic_tx_cqe *cqe;
1216 int num_cqes = 0;
1217
1218 bus_dmamap_sync(cq->ring->dma.tag,
1219 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1220 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1221 while (cqe->u0.dw[3]) {
1222 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1223
1224 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1225 if (wq->ring->cidx >= wq->ring->num_items)
1226 wq->ring->cidx -= wq->ring->num_items;
1227
1228 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1229 wq->tx_stats.tx_compl++;
1230 cqe->u0.dw[3] = 0;
1231 RING_GET(cq->ring, 1);
1232 bus_dmamap_sync(cq->ring->dma.tag,
1233 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1234 cqe =
1235 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1236 num_cqes++;
1237 }
1238
1239 if (num_cqes)
1240 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1241
1242 return 0;
1243 }
1244
1245
1246 static int
1247 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1248 {
1249 POCE_SOFTC sc = ifp->if_softc;
1250 int status = 0, queue_index = 0;
1251 struct mbuf *next = NULL;
1252 struct buf_ring *br = NULL;
1253
1254 br = wq->br;
1255 queue_index = wq->queue_index;
1256
1257 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1258 IFF_DRV_RUNNING) {
1259 if (m != NULL)
1260 status = drbr_enqueue(ifp, br, m);
1261 return status;
1262 }
1263
1264 if (m != NULL) {
1265 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1266 return status;
1267 }
1268 while ((next = drbr_peek(ifp, br)) != NULL) {
1269 if (oce_tx(sc, &next, queue_index)) {
1270 if (next == NULL) {
1271 drbr_advance(ifp, br);
1272 } else {
1273 drbr_putback(ifp, br, next);
1274 wq->tx_stats.tx_stops ++;
1275 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1276 status = drbr_enqueue(ifp, br, next);
1277 }
1278 break;
1279 }
1280 drbr_advance(ifp, br);
1281 ifp->if_obytes += next->m_pkthdr.len;
1282 if (next->m_flags & M_MCAST)
1283 ifp->if_omcasts++;
1284 ETHER_BPF_MTAP(ifp, next);
1285 }
1286
1287 return status;
1288 }
1289
1290
1291
1292
1293 /*****************************************************************************
1294 * Receive routines functions *
1295 *****************************************************************************/
1296
1297 static void
1298 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1299 {
1300 uint32_t out;
1301 struct oce_packet_desc *pd;
1302 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1303 int i, len, frag_len;
1304 struct mbuf *m = NULL, *tail = NULL;
1305 uint16_t vtag;
1306
1307 len = cqe->u0.s.pkt_size;
1308 if (!len) {
1309 /*partial DMA workaround for Lancer*/
1310 oce_discard_rx_comp(rq, cqe);
1311 goto exit;
1312 }
1313
1314 /* Get vlan_tag value */
1315 if(IS_BE(sc) || IS_SH(sc))
1316 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1317 else
1318 vtag = cqe->u0.s.vlan_tag;
1319
1320
1321 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1322
1323 if (rq->packets_out == rq->packets_in) {
1324 device_printf(sc->dev,
1325 "RQ transmit descriptor missing\n");
1326 }
1327 out = rq->packets_out + 1;
1328 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1329 out = 0;
1330 pd = &rq->pckts[rq->packets_out];
1331 rq->packets_out = out;
1332
1333 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1334 bus_dmamap_unload(rq->tag, pd->map);
1335 rq->pending--;
1336
1337 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1338 pd->mbuf->m_len = frag_len;
1339
1340 if (tail != NULL) {
1341 /* additional fragments */
1342 pd->mbuf->m_flags &= ~M_PKTHDR;
1343 tail->m_next = pd->mbuf;
1344 tail = pd->mbuf;
1345 } else {
1346 /* first fragment, fill out much of the packet header */
1347 pd->mbuf->m_pkthdr.len = len;
1348 pd->mbuf->m_pkthdr.csum_flags = 0;
1349 if (IF_CSUM_ENABLED(sc)) {
1350 if (cqe->u0.s.l4_cksum_pass) {
1351 pd->mbuf->m_pkthdr.csum_flags |=
1352 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1353 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1354 }
1355 if (cqe->u0.s.ip_cksum_pass) {
1356 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1357 pd->mbuf->m_pkthdr.csum_flags |=
1358 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1359 }
1360 }
1361 }
1362 m = tail = pd->mbuf;
1363 }
1364 pd->mbuf = NULL;
1365 len -= frag_len;
1366 }
1367
1368 if (m) {
1369 if (!oce_cqe_portid_valid(sc, cqe)) {
1370 m_freem(m);
1371 goto exit;
1372 }
1373
1374 m->m_pkthdr.rcvif = sc->ifp;
1375 #if __FreeBSD_version >= 800000
1376 if (rq->queue_index)
1377 m->m_pkthdr.flowid = (rq->queue_index - 1);
1378 else
1379 m->m_pkthdr.flowid = rq->queue_index;
1380 m->m_flags |= M_FLOWID;
1381 #endif
1382 /* This deternies if vlan tag is Valid */
1383 if (oce_cqe_vtp_valid(sc, cqe)) {
1384 if (sc->function_mode & FNM_FLEX10_MODE) {
1385 /* FLEX10. If QnQ is not set, neglect VLAN */
1386 if (cqe->u0.s.qnq) {
1387 m->m_pkthdr.ether_vtag = vtag;
1388 m->m_flags |= M_VLANTAG;
1389 }
1390 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1391 /* In UMC mode generally pvid will be striped by
1392 hw. But in some cases we have seen it comes
1393 with pvid. So if pvid == vlan, neglect vlan.
1394 */
1395 m->m_pkthdr.ether_vtag = vtag;
1396 m->m_flags |= M_VLANTAG;
1397 }
1398 }
1399
1400 sc->ifp->if_ipackets++;
1401 #if defined(INET6) || defined(INET)
1402 /* Try to queue to LRO */
1403 if (IF_LRO_ENABLED(sc) &&
1404 (cqe->u0.s.ip_cksum_pass) &&
1405 (cqe->u0.s.l4_cksum_pass) &&
1406 (!cqe->u0.s.ip_ver) &&
1407 (rq->lro.lro_cnt != 0)) {
1408
1409 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1410 rq->lro_pkts_queued ++;
1411 goto post_done;
1412 }
1413 /* If LRO posting fails then try to post to STACK */
1414 }
1415 #endif
1416
1417 (*sc->ifp->if_input) (sc->ifp, m);
1418 #if defined(INET6) || defined(INET)
1419 post_done:
1420 #endif
1421 /* Update rx stats per queue */
1422 rq->rx_stats.rx_pkts++;
1423 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1424 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1425 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1426 rq->rx_stats.rx_mcast_pkts++;
1427 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1428 rq->rx_stats.rx_ucast_pkts++;
1429 }
1430 exit:
1431 return;
1432 }
1433
1434
1435 static void
1436 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1437 {
1438 uint32_t out, i = 0;
1439 struct oce_packet_desc *pd;
1440 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1441 int num_frags = cqe->u0.s.num_fragments;
1442
1443 for (i = 0; i < num_frags; i++) {
1444 if (rq->packets_out == rq->packets_in) {
1445 device_printf(sc->dev,
1446 "RQ transmit descriptor missing\n");
1447 }
1448 out = rq->packets_out + 1;
1449 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1450 out = 0;
1451 pd = &rq->pckts[rq->packets_out];
1452 rq->packets_out = out;
1453
1454 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1455 bus_dmamap_unload(rq->tag, pd->map);
1456 rq->pending--;
1457 m_freem(pd->mbuf);
1458 }
1459
1460 }
1461
1462
1463 static int
1464 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1465 {
1466 struct oce_nic_rx_cqe_v1 *cqe_v1;
1467 int vtp = 0;
1468
1469 if (sc->be3_native) {
1470 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1471 vtp = cqe_v1->u0.s.vlan_tag_present;
1472 } else
1473 vtp = cqe->u0.s.vlan_tag_present;
1474
1475 return vtp;
1476
1477 }
1478
1479
1480 static int
1481 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1482 {
1483 struct oce_nic_rx_cqe_v1 *cqe_v1;
1484 int port_id = 0;
1485
1486 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1487 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1488 port_id = cqe_v1->u0.s.port;
1489 if (sc->port_id != port_id)
1490 return 0;
1491 } else
1492 ;/* For BE3 legacy and Lancer this is dummy */
1493
1494 return 1;
1495
1496 }
1497
1498 #if defined(INET6) || defined(INET)
1499 static void
1500 oce_rx_flush_lro(struct oce_rq *rq)
1501 {
1502 struct lro_ctrl *lro = &rq->lro;
1503 struct lro_entry *queued;
1504 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1505
1506 if (!IF_LRO_ENABLED(sc))
1507 return;
1508
1509 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1510 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1511 tcp_lro_flush(lro, queued);
1512 }
1513 rq->lro_pkts_queued = 0;
1514
1515 return;
1516 }
1517
1518
1519 static int
1520 oce_init_lro(POCE_SOFTC sc)
1521 {
1522 struct lro_ctrl *lro = NULL;
1523 int i = 0, rc = 0;
1524
1525 for (i = 0; i < sc->nrqs; i++) {
1526 lro = &sc->rq[i]->lro;
1527 rc = tcp_lro_init(lro);
1528 if (rc != 0) {
1529 device_printf(sc->dev, "LRO init failed\n");
1530 return rc;
1531 }
1532 lro->ifp = sc->ifp;
1533 }
1534
1535 return rc;
1536 }
1537
1538
1539 void
1540 oce_free_lro(POCE_SOFTC sc)
1541 {
1542 struct lro_ctrl *lro = NULL;
1543 int i = 0;
1544
1545 for (i = 0; i < sc->nrqs; i++) {
1546 lro = &sc->rq[i]->lro;
1547 if (lro)
1548 tcp_lro_free(lro);
1549 }
1550 }
1551 #endif
1552
1553 int
1554 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1555 {
1556 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1557 int i, in, rc;
1558 struct oce_packet_desc *pd;
1559 bus_dma_segment_t segs[6];
1560 int nsegs, added = 0;
1561 struct oce_nic_rqe *rqe;
1562 pd_rxulp_db_t rxdb_reg;
1563
1564 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1565 for (i = 0; i < count; i++) {
1566 in = rq->packets_in + 1;
1567 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1568 in = 0;
1569 if (in == rq->packets_out)
1570 break; /* no more room */
1571
1572 pd = &rq->pckts[rq->packets_in];
1573 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1574 if (pd->mbuf == NULL)
1575 break;
1576
1577 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1578 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1579 pd->map,
1580 pd->mbuf,
1581 segs, &nsegs, BUS_DMA_NOWAIT);
1582 if (rc) {
1583 m_free(pd->mbuf);
1584 break;
1585 }
1586
1587 if (nsegs != 1) {
1588 i--;
1589 continue;
1590 }
1591
1592 rq->packets_in = in;
1593 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1594
1595 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1596 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1597 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1598 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1599 RING_PUT(rq->ring, 1);
1600 added++;
1601 rq->pending++;
1602 }
1603 if (added != 0) {
1604 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1605 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1606 rxdb_reg.bits.qid = rq->rq_id;
1607 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1608 added -= OCE_MAX_RQ_POSTS;
1609 }
1610 if (added > 0) {
1611 rxdb_reg.bits.qid = rq->rq_id;
1612 rxdb_reg.bits.num_posted = added;
1613 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1614 }
1615 }
1616
1617 return 0;
1618 }
1619
1620
1621 /* Handle the Completion Queue for receive */
1622 uint16_t
1623 oce_rq_handler(void *arg)
1624 {
1625 struct oce_rq *rq = (struct oce_rq *)arg;
1626 struct oce_cq *cq = rq->cq;
1627 POCE_SOFTC sc = rq->parent;
1628 struct oce_nic_rx_cqe *cqe;
1629 int num_cqes = 0, rq_buffers_used = 0;
1630
1631
1632 bus_dmamap_sync(cq->ring->dma.tag,
1633 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1634 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1635 while (cqe->u0.dw[2]) {
1636 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1637
1638 RING_GET(rq->ring, 1);
1639 if (cqe->u0.s.error == 0) {
1640 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1641 } else {
1642 rq->rx_stats.rxcp_err++;
1643 sc->ifp->if_ierrors++;
1644 /* Post L3/L4 errors to stack.*/
1645 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1646 }
1647 rq->rx_stats.rx_compl++;
1648 cqe->u0.dw[2] = 0;
1649
1650 #if defined(INET6) || defined(INET)
1651 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1652 oce_rx_flush_lro(rq);
1653 }
1654 #endif
1655
1656 RING_GET(cq->ring, 1);
1657 bus_dmamap_sync(cq->ring->dma.tag,
1658 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1659 cqe =
1660 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1661 num_cqes++;
1662 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1663 break;
1664 }
1665
1666 #if defined(INET6) || defined(INET)
1667 if (IF_LRO_ENABLED(sc))
1668 oce_rx_flush_lro(rq);
1669 #endif
1670
1671 if (num_cqes) {
1672 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1673 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1674 if (rq_buffers_used > 1)
1675 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1676 }
1677
1678 return 0;
1679
1680 }
1681
1682
1683
1684
1685 /*****************************************************************************
1686 * Helper function prototypes in this file *
1687 *****************************************************************************/
1688
1689 static int
1690 oce_attach_ifp(POCE_SOFTC sc)
1691 {
1692
1693 sc->ifp = if_alloc(IFT_ETHER);
1694 if (!sc->ifp)
1695 return ENOMEM;
1696
1697 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1698 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1699 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1700
1701 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1702 sc->ifp->if_ioctl = oce_ioctl;
1703 sc->ifp->if_start = oce_start;
1704 sc->ifp->if_init = oce_init;
1705 sc->ifp->if_mtu = ETHERMTU;
1706 sc->ifp->if_softc = sc;
1707 #if __FreeBSD_version >= 800000
1708 sc->ifp->if_transmit = oce_multiq_start;
1709 sc->ifp->if_qflush = oce_multiq_flush;
1710 #endif
1711
1712 if_initname(sc->ifp,
1713 device_get_name(sc->dev), device_get_unit(sc->dev));
1714
1715 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1716 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1717 IFQ_SET_READY(&sc->ifp->if_snd);
1718
1719 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1720 sc->ifp->if_hwassist |= CSUM_TSO;
1721 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1722
1723 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1724 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1725 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1726
1727 #if defined(INET6) || defined(INET)
1728 sc->ifp->if_capabilities |= IFCAP_TSO;
1729 sc->ifp->if_capabilities |= IFCAP_LRO;
1730 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1731 #endif
1732
1733 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1734 sc->ifp->if_baudrate = IF_Gbps(10UL);
1735
1736 #if __FreeBSD_version >= 1000000
1737 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1738 sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1739 sc->ifp->if_hw_tsomaxsegsize = 4096;
1740 #endif
1741
1742 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1743
1744 return 0;
1745 }
1746
1747
1748 static void
1749 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1750 {
1751 POCE_SOFTC sc = ifp->if_softc;
1752
1753 if (ifp->if_softc != arg)
1754 return;
1755 if ((vtag == 0) || (vtag > 4095))
1756 return;
1757
1758 sc->vlan_tag[vtag] = 1;
1759 sc->vlans_added++;
1760 if (sc->vlans_added <= (sc->max_vlans + 1))
1761 oce_vid_config(sc);
1762 }
1763
1764
1765 static void
1766 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1767 {
1768 POCE_SOFTC sc = ifp->if_softc;
1769
1770 if (ifp->if_softc != arg)
1771 return;
1772 if ((vtag == 0) || (vtag > 4095))
1773 return;
1774
1775 sc->vlan_tag[vtag] = 0;
1776 sc->vlans_added--;
1777 oce_vid_config(sc);
1778 }
1779
1780
1781 /*
1782 * A max of 64 vlans can be configured in BE. If the user configures
1783 * more, place the card in vlan promiscuous mode.
1784 */
1785 static int
1786 oce_vid_config(POCE_SOFTC sc)
1787 {
1788 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1789 uint16_t ntags = 0, i;
1790 int status = 0;
1791
1792 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1793 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1794 for (i = 0; i < MAX_VLANS; i++) {
1795 if (sc->vlan_tag[i]) {
1796 vtags[ntags].vtag = i;
1797 ntags++;
1798 }
1799 }
1800 if (ntags)
1801 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1802 vtags, ntags, 1, 0);
1803 } else
1804 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1805 NULL, 0, 1, 1);
1806 return status;
1807 }
1808
1809
1810 static void
1811 oce_mac_addr_set(POCE_SOFTC sc)
1812 {
1813 uint32_t old_pmac_id = sc->pmac_id;
1814 int status = 0;
1815
1816
1817 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1818 sc->macaddr.size_of_struct);
1819 if (!status)
1820 return;
1821
1822 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1823 sc->if_id, &sc->pmac_id);
1824 if (!status) {
1825 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1826 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1827 sc->macaddr.size_of_struct);
1828 }
1829 if (status)
1830 device_printf(sc->dev, "Failed update macaddress\n");
1831
1832 }
1833
1834
1835 static int
1836 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1837 {
1838 POCE_SOFTC sc = ifp->if_softc;
1839 struct ifreq *ifr = (struct ifreq *)data;
1840 int rc = ENXIO;
1841 char cookie[32] = {0};
1842 void *priv_data = (void *)ifr->ifr_data;
1843 void *ioctl_ptr;
1844 uint32_t req_size;
1845 struct mbx_hdr req;
1846 OCE_DMA_MEM dma_mem;
1847 struct mbx_common_get_cntl_attr *fw_cmd;
1848
1849 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1850 return EFAULT;
1851
1852 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1853 return EINVAL;
1854
1855 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1856 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1857 return EFAULT;
1858
1859 req_size = le32toh(req.u0.req.request_length);
1860 if (req_size > 65536)
1861 return EINVAL;
1862
1863 req_size += sizeof(struct mbx_hdr);
1864 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1865 if (rc)
1866 return ENOMEM;
1867
1868 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1869 rc = EFAULT;
1870 goto dma_free;
1871 }
1872
1873 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1874 if (rc) {
1875 rc = EIO;
1876 goto dma_free;
1877 }
1878
1879 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1880 rc = EFAULT;
1881
1882 /*
1883 firmware is filling all the attributes for this ioctl except
1884 the driver version..so fill it
1885 */
1886 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1887 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1888 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1889 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1890 }
1891
1892 dma_free:
1893 oce_dma_free(sc, &dma_mem);
1894 return rc;
1895
1896 }
1897
1898 static void
1899 oce_eqd_set_periodic(POCE_SOFTC sc)
1900 {
1901 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1902 struct oce_aic_obj *aic;
1903 struct oce_eq *eqo;
1904 uint64_t now = 0, delta;
1905 int eqd, i, num = 0;
1906 uint32_t ips = 0;
1907 int tps;
1908
1909 for (i = 0 ; i < sc->neqs; i++) {
1910 eqo = sc->eq[i];
1911 aic = &sc->aic_obj[i];
1912 /* When setting the static eq delay from the user space */
1913 if (!aic->enable) {
1914 eqd = aic->et_eqd;
1915 goto modify_eqd;
1916 }
1917
1918 now = ticks;
1919
1920 /* Over flow check */
1921 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1922 goto done;
1923
1924 delta = now - aic->ticks;
1925 tps = delta/hz;
1926
1927 /* Interrupt rate based on elapsed ticks */
1928 if(tps)
1929 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1930
1931 if (ips > INTR_RATE_HWM)
1932 eqd = aic->cur_eqd + 20;
1933 else if (ips < INTR_RATE_LWM)
1934 eqd = aic->cur_eqd / 2;
1935 else
1936 goto done;
1937
1938 if (eqd < 10)
1939 eqd = 0;
1940
1941 /* Make sure that the eq delay is in the known range */
1942 eqd = min(eqd, aic->max_eqd);
1943 eqd = max(eqd, aic->min_eqd);
1944
1945 modify_eqd:
1946 if (eqd != aic->cur_eqd) {
1947 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1948 set_eqd[num].eq_id = eqo->eq_id;
1949 aic->cur_eqd = eqd;
1950 num++;
1951 }
1952 done:
1953 aic->intr_prev = eqo->intr;
1954 aic->ticks = now;
1955 }
1956
1957 /* Is there atleast one eq that needs to be modified? */
1958 if(num)
1959 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1960 }
1961
1962 static void oce_detect_hw_error(POCE_SOFTC sc)
1963 {
1964
1965 uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1966 uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1967 uint32_t i;
1968
1969 if (sc->hw_error)
1970 return;
1971
1972 if (IS_XE201(sc)) {
1973 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1974 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1975 sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1976 sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1977 }
1978 } else {
1979 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1980 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1981 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1982 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1983
1984 ue_low = (ue_low & ~ue_low_mask);
1985 ue_high = (ue_high & ~ue_high_mask);
1986 }
1987
1988 /* On certain platforms BE hardware can indicate spurious UEs.
1989 * Allow the h/w to stop working completely in case of a real UE.
1990 * Hence not setting the hw_error for UE detection.
1991 */
1992 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1993 sc->hw_error = TRUE;
1994 device_printf(sc->dev, "Error detected in the card\n");
1995 }
1996
1997 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1998 device_printf(sc->dev,
1999 "ERR: sliport status 0x%x\n", sliport_status);
2000 device_printf(sc->dev,
2001 "ERR: sliport error1 0x%x\n", sliport_err1);
2002 device_printf(sc->dev,
2003 "ERR: sliport error2 0x%x\n", sliport_err2);
2004 }
2005
2006 if (ue_low) {
2007 for (i = 0; ue_low; ue_low >>= 1, i++) {
2008 if (ue_low & 1)
2009 device_printf(sc->dev, "UE: %s bit set\n",
2010 ue_status_low_desc[i]);
2011 }
2012 }
2013
2014 if (ue_high) {
2015 for (i = 0; ue_high; ue_high >>= 1, i++) {
2016 if (ue_high & 1)
2017 device_printf(sc->dev, "UE: %s bit set\n",
2018 ue_status_hi_desc[i]);
2019 }
2020 }
2021
2022 }
2023
2024
2025 static void
2026 oce_local_timer(void *arg)
2027 {
2028 POCE_SOFTC sc = arg;
2029 int i = 0;
2030
2031 oce_detect_hw_error(sc);
2032 oce_refresh_nic_stats(sc);
2033 oce_refresh_queue_stats(sc);
2034 oce_mac_addr_set(sc);
2035
2036 /* TX Watch Dog*/
2037 for (i = 0; i < sc->nwqs; i++)
2038 oce_tx_restart(sc, sc->wq[i]);
2039
2040 /* calculate and set the eq delay for optimal interrupt rate */
2041 if (IS_BE(sc) || IS_SH(sc))
2042 oce_eqd_set_periodic(sc);
2043
2044 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2045 }
2046
2047
2048 /* NOTE : This should only be called holding
2049 * DEVICE_LOCK.
2050 */
2051 static void
2052 oce_if_deactivate(POCE_SOFTC sc)
2053 {
2054 int i, mtime = 0;
2055 int wait_req = 0;
2056 struct oce_rq *rq;
2057 struct oce_wq *wq;
2058 struct oce_eq *eq;
2059
2060 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2061
2062 /*Wait for max of 400ms for TX completions to be done */
2063 while (mtime < 400) {
2064 wait_req = 0;
2065 for_all_wq_queues(sc, wq, i) {
2066 if (wq->ring->num_used) {
2067 wait_req = 1;
2068 DELAY(1);
2069 break;
2070 }
2071 }
2072 mtime += 1;
2073 if (!wait_req)
2074 break;
2075 }
2076
2077 /* Stop intrs and finish any bottom halves pending */
2078 oce_hw_intr_disable(sc);
2079
2080 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2081 any other lock. So unlock device lock and require after
2082 completing taskqueue_drain.
2083 */
2084 UNLOCK(&sc->dev_lock);
2085 for (i = 0; i < sc->intr_count; i++) {
2086 if (sc->intrs[i].tq != NULL) {
2087 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2088 }
2089 }
2090 LOCK(&sc->dev_lock);
2091
2092 /* Delete RX queue in card with flush param */
2093 oce_stop_rx(sc);
2094
2095 /* Invalidate any pending cq and eq entries*/
2096 for_all_evnt_queues(sc, eq, i)
2097 oce_drain_eq(eq);
2098 for_all_rq_queues(sc, rq, i)
2099 oce_drain_rq_cq(rq);
2100 for_all_wq_queues(sc, wq, i)
2101 oce_drain_wq_cq(wq);
2102
2103 /* But still we need to get MCC aync events.
2104 So enable intrs and also arm first EQ
2105 */
2106 oce_hw_intr_enable(sc);
2107 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2108
2109 DELAY(10);
2110 }
2111
2112
2113 static void
2114 oce_if_activate(POCE_SOFTC sc)
2115 {
2116 struct oce_eq *eq;
2117 struct oce_rq *rq;
2118 struct oce_wq *wq;
2119 int i, rc = 0;
2120
2121 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2122
2123 oce_hw_intr_disable(sc);
2124
2125 oce_start_rx(sc);
2126
2127 for_all_rq_queues(sc, rq, i) {
2128 rc = oce_start_rq(rq);
2129 if (rc)
2130 device_printf(sc->dev, "Unable to start RX\n");
2131 }
2132
2133 for_all_wq_queues(sc, wq, i) {
2134 rc = oce_start_wq(wq);
2135 if (rc)
2136 device_printf(sc->dev, "Unable to start TX\n");
2137 }
2138
2139
2140 for_all_evnt_queues(sc, eq, i)
2141 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2142
2143 oce_hw_intr_enable(sc);
2144
2145 }
2146
2147 static void
2148 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2149 {
2150 /* Update Link status */
2151 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2152 ASYNC_EVENT_LINK_UP) {
2153 sc->link_status = ASYNC_EVENT_LINK_UP;
2154 if_link_state_change(sc->ifp, LINK_STATE_UP);
2155 } else {
2156 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2157 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2158 }
2159 }
2160
2161
2162 /* Handle the Completion Queue for the Mailbox/Async notifications */
2163 uint16_t
2164 oce_mq_handler(void *arg)
2165 {
2166 struct oce_mq *mq = (struct oce_mq *)arg;
2167 POCE_SOFTC sc = mq->parent;
2168 struct oce_cq *cq = mq->cq;
2169 int num_cqes = 0, evt_type = 0, optype = 0;
2170 struct oce_mq_cqe *cqe;
2171 struct oce_async_cqe_link_state *acqe;
2172 struct oce_async_event_grp5_pvid_state *gcqe;
2173 struct oce_async_event_qnq *dbgcqe;
2174
2175
2176 bus_dmamap_sync(cq->ring->dma.tag,
2177 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2178 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2179
2180 while (cqe->u0.dw[3]) {
2181 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2182 if (cqe->u0.s.async_event) {
2183 evt_type = cqe->u0.s.event_type;
2184 optype = cqe->u0.s.async_type;
2185 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2186 /* Link status evt */
2187 acqe = (struct oce_async_cqe_link_state *)cqe;
2188 process_link_state(sc, acqe);
2189 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2190 (optype == ASYNC_EVENT_PVID_STATE)) {
2191 /* GRP5 PVID */
2192 gcqe =
2193 (struct oce_async_event_grp5_pvid_state *)cqe;
2194 if (gcqe->enabled)
2195 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2196 else
2197 sc->pvid = 0;
2198
2199 }
2200 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2201 optype == ASYNC_EVENT_DEBUG_QNQ) {
2202 dbgcqe =
2203 (struct oce_async_event_qnq *)cqe;
2204 if(dbgcqe->valid)
2205 sc->qnqid = dbgcqe->vlan_tag;
2206 sc->qnq_debug_event = TRUE;
2207 }
2208 }
2209 cqe->u0.dw[3] = 0;
2210 RING_GET(cq->ring, 1);
2211 bus_dmamap_sync(cq->ring->dma.tag,
2212 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2213 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2214 num_cqes++;
2215 }
2216
2217 if (num_cqes)
2218 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2219
2220 return 0;
2221 }
2222
2223
2224 static void
2225 setup_max_queues_want(POCE_SOFTC sc)
2226 {
2227 /* Check if it is FLEX machine. Is so dont use RSS */
2228 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2229 (sc->function_mode & FNM_UMC_MODE) ||
2230 (sc->function_mode & FNM_VNIC_MODE) ||
2231 (!is_rss_enabled(sc)) ||
2232 IS_BE2(sc)) {
2233 sc->nrqs = 1;
2234 sc->nwqs = 1;
2235 } else {
2236 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2237 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2238 }
2239
2240 if (IS_BE2(sc) && is_rss_enabled(sc))
2241 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2242 }
2243
2244
2245 static void
2246 update_queues_got(POCE_SOFTC sc)
2247 {
2248 if (is_rss_enabled(sc)) {
2249 sc->nrqs = sc->intr_count + 1;
2250 sc->nwqs = sc->intr_count;
2251 } else {
2252 sc->nrqs = 1;
2253 sc->nwqs = 1;
2254 }
2255
2256 if (IS_BE2(sc))
2257 sc->nwqs = 1;
2258 }
2259
2260 static int
2261 oce_check_ipv6_ext_hdr(struct mbuf *m)
2262 {
2263 struct ether_header *eh = mtod(m, struct ether_header *);
2264 caddr_t m_datatemp = m->m_data;
2265
2266 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2267 m->m_data += sizeof(struct ether_header);
2268 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2269
2270 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2271 (ip6->ip6_nxt != IPPROTO_UDP)){
2272 struct ip6_ext *ip6e = NULL;
2273 m->m_data += sizeof(struct ip6_hdr);
2274
2275 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2276 if(ip6e->ip6e_len == 0xff) {
2277 m->m_data = m_datatemp;
2278 return TRUE;
2279 }
2280 }
2281 m->m_data = m_datatemp;
2282 }
2283 return FALSE;
2284 }
2285
2286 static int
2287 is_be3_a1(POCE_SOFTC sc)
2288 {
2289 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2290 return TRUE;
2291 }
2292 return FALSE;
2293 }
2294
2295 static struct mbuf *
2296 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2297 {
2298 uint16_t vlan_tag = 0;
2299
2300 if(!M_WRITABLE(m))
2301 return NULL;
2302
2303 /* Embed vlan tag in the packet if it is not part of it */
2304 if(m->m_flags & M_VLANTAG) {
2305 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2306 m->m_flags &= ~M_VLANTAG;
2307 }
2308
2309 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2310 if(sc->pvid) {
2311 if(!vlan_tag)
2312 vlan_tag = sc->pvid;
2313 *complete = FALSE;
2314 }
2315
2316 if(vlan_tag) {
2317 m = ether_vlanencap(m, vlan_tag);
2318 }
2319
2320 if(sc->qnqid) {
2321 m = ether_vlanencap(m, sc->qnqid);
2322 *complete = FALSE;
2323 }
2324 return m;
2325 }
2326
2327 static int
2328 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2329 {
2330 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2331 oce_check_ipv6_ext_hdr(m)) {
2332 return TRUE;
2333 }
2334 return FALSE;
2335 }
2336
2337 static void
2338 oce_get_config(POCE_SOFTC sc)
2339 {
2340 int rc = 0;
2341 uint32_t max_rss = 0;
2342
2343 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2344 max_rss = OCE_LEGACY_MODE_RSS;
2345 else
2346 max_rss = OCE_MAX_RSS;
2347
2348 if (!IS_BE(sc)) {
2349 rc = oce_get_profile_config(sc, max_rss);
2350 if (rc) {
2351 sc->nwqs = OCE_MAX_WQ;
2352 sc->nrssqs = max_rss;
2353 sc->nrqs = sc->nrssqs + 1;
2354 }
2355 }
2356 else { /* For BE3 don't rely on fw for determining the resources */
2357 sc->nrssqs = max_rss;
2358 sc->nrqs = sc->nrssqs + 1;
2359 sc->nwqs = OCE_MAX_WQ;
2360 sc->max_vlans = MAX_VLANFILTER_SIZE;
2361 }
2362 }
Cache object: 981e43dfd676d4c3f35c3b6dc9dc3305
|