FreeBSD/Linux Kernel Cross Reference
sys/dev/qlxge/qls_os.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013-2014 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * File: qls_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45 #include <sys/smp.h>
46
47 /*
48 * Some PCI Configuration Space Related Defines
49 */
50
51 #ifndef PCI_VENDOR_QLOGIC
52 #define PCI_VENDOR_QLOGIC 0x1077
53 #endif
54
55 #ifndef PCI_DEVICE_QLOGIC_8000
56 #define PCI_DEVICE_QLOGIC_8000 0x8000
57 #endif
58
59 #define PCI_QLOGIC_DEV8000 \
60 ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
61
62 /*
63 * static functions
64 */
65 static int qls_alloc_parent_dma_tag(qla_host_t *ha);
66 static void qls_free_parent_dma_tag(qla_host_t *ha);
67
68 static void qls_flush_xmt_bufs(qla_host_t *ha);
69
70 static int qls_alloc_rcv_bufs(qla_host_t *ha);
71 static void qls_free_rcv_bufs(qla_host_t *ha);
72
73 static void qls_init_ifnet(device_t dev, qla_host_t *ha);
74 static void qls_release(qla_host_t *ha);
75 static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76 int error);
77 static void qls_stop(qla_host_t *ha);
78 static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
79 static void qls_tx_done(void *context, int pending);
80
81 static int qls_config_lro(qla_host_t *ha);
82 static void qls_free_lro(qla_host_t *ha);
83
84 static void qls_error_recovery(void *context, int pending);
85
86 /*
87 * Hooks to the Operating Systems
88 */
89 static int qls_pci_probe (device_t);
90 static int qls_pci_attach (device_t);
91 static int qls_pci_detach (device_t);
92
93 static void qls_start(struct ifnet *ifp);
94 static void qls_init(void *arg);
95 static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96 static int qls_media_change(struct ifnet *ifp);
97 static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
98
99 static device_method_t qla_pci_methods[] = {
100 /* Device interface */
101 DEVMETHOD(device_probe, qls_pci_probe),
102 DEVMETHOD(device_attach, qls_pci_attach),
103 DEVMETHOD(device_detach, qls_pci_detach),
104 { 0, 0 }
105 };
106
107 static driver_t qla_pci_driver = {
108 "ql", qla_pci_methods, sizeof (qla_host_t),
109 };
110
111 DRIVER_MODULE(qla8000, pci, qla_pci_driver, 0, 0);
112
113 MODULE_DEPEND(qla8000, pci, 1, 1, 1);
114 MODULE_DEPEND(qla8000, ether, 1, 1, 1);
115
116 MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
117
118 static char dev_str[64];
119 static char ver_str[64];
120
121 /*
122 * Name: qls_pci_probe
123 * Function: Validate the PCI device to be a QLA80XX device
124 */
125 static int
126 qls_pci_probe(device_t dev)
127 {
128 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
129 case PCI_QLOGIC_DEV8000:
130 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
131 "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
132 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
133 QLA_VERSION_BUILD);
134 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
135 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136 QLA_VERSION_BUILD);
137 device_set_desc(dev, dev_str);
138 break;
139 default:
140 return (ENXIO);
141 }
142
143 if (bootverbose)
144 printf("%s: %s\n ", __func__, dev_str);
145
146 return (BUS_PROBE_DEFAULT);
147 }
148
149 static int
150 qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
151 {
152 int err = 0, ret;
153 qla_host_t *ha;
154 uint32_t i;
155
156 err = sysctl_handle_int(oidp, &ret, 0, req);
157
158 if (err || !req->newptr)
159 return (err);
160
161 if (ret == 1) {
162 ha = (qla_host_t *)arg1;
163
164 for (i = 0; i < ha->num_tx_rings; i++) {
165 device_printf(ha->pci_dev,
166 "%s: tx_ring[%d].tx_frames= %p\n",
167 __func__, i,
168 (void *)ha->tx_ring[i].tx_frames);
169
170 device_printf(ha->pci_dev,
171 "%s: tx_ring[%d].tx_tso_frames= %p\n",
172 __func__, i,
173 (void *)ha->tx_ring[i].tx_tso_frames);
174
175 device_printf(ha->pci_dev,
176 "%s: tx_ring[%d].tx_vlan_frames= %p\n",
177 __func__, i,
178 (void *)ha->tx_ring[i].tx_vlan_frames);
179
180 device_printf(ha->pci_dev,
181 "%s: tx_ring[%d].txr_free= 0x%08x\n",
182 __func__, i,
183 ha->tx_ring[i].txr_free);
184
185 device_printf(ha->pci_dev,
186 "%s: tx_ring[%d].txr_next= 0x%08x\n",
187 __func__, i,
188 ha->tx_ring[i].txr_next);
189
190 device_printf(ha->pci_dev,
191 "%s: tx_ring[%d].txr_done= 0x%08x\n",
192 __func__, i,
193 ha->tx_ring[i].txr_done);
194
195 device_printf(ha->pci_dev,
196 "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
197 __func__, i,
198 *(ha->tx_ring[i].txr_cons_vaddr));
199 }
200
201 for (i = 0; i < ha->num_rx_rings; i++) {
202 device_printf(ha->pci_dev,
203 "%s: rx_ring[%d].rx_int= %p\n",
204 __func__, i,
205 (void *)ha->rx_ring[i].rx_int);
206
207 device_printf(ha->pci_dev,
208 "%s: rx_ring[%d].rss_int= %p\n",
209 __func__, i,
210 (void *)ha->rx_ring[i].rss_int);
211
212 device_printf(ha->pci_dev,
213 "%s: rx_ring[%d].lbq_next= 0x%08x\n",
214 __func__, i,
215 ha->rx_ring[i].lbq_next);
216
217 device_printf(ha->pci_dev,
218 "%s: rx_ring[%d].lbq_free= 0x%08x\n",
219 __func__, i,
220 ha->rx_ring[i].lbq_free);
221
222 device_printf(ha->pci_dev,
223 "%s: rx_ring[%d].lbq_in= 0x%08x\n",
224 __func__, i,
225 ha->rx_ring[i].lbq_in);
226
227 device_printf(ha->pci_dev,
228 "%s: rx_ring[%d].sbq_next= 0x%08x\n",
229 __func__, i,
230 ha->rx_ring[i].sbq_next);
231
232 device_printf(ha->pci_dev,
233 "%s: rx_ring[%d].sbq_free= 0x%08x\n",
234 __func__, i,
235 ha->rx_ring[i].sbq_free);
236
237 device_printf(ha->pci_dev,
238 "%s: rx_ring[%d].sbq_in= 0x%08x\n",
239 __func__, i,
240 ha->rx_ring[i].sbq_in);
241 }
242
243 device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
244 __func__, ha->err_m_getcl);
245 device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
246 __func__, ha->err_m_getjcl);
247 device_printf(ha->pci_dev,
248 "%s: err_tx_dmamap_create = 0x%08x\n",
249 __func__, ha->err_tx_dmamap_create);
250 device_printf(ha->pci_dev,
251 "%s: err_tx_dmamap_load = 0x%08x\n",
252 __func__, ha->err_tx_dmamap_load);
253 device_printf(ha->pci_dev,
254 "%s: err_tx_defrag = 0x%08x\n",
255 __func__, ha->err_tx_defrag);
256 }
257 return (err);
258 }
259
260 static void
261 qls_add_sysctls(qla_host_t *ha)
262 {
263 device_t dev = ha->pci_dev;
264
265 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
266 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
267 OID_AUTO, "version", CTLFLAG_RD,
268 ver_str, 0, "Driver Version");
269
270 qls_dbg_level = 0;
271 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
272 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
273 OID_AUTO, "debug", CTLFLAG_RW,
274 &qls_dbg_level, qls_dbg_level, "Debug Level");
275
276 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
277 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
278 OID_AUTO, "drvr_stats",
279 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
280 qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
281
282 return;
283 }
284
285 static void
286 qls_watchdog(void *arg)
287 {
288 qla_host_t *ha = arg;
289 struct ifnet *ifp;
290
291 ifp = ha->ifp;
292
293 if (ha->flags.qla_watchdog_exit) {
294 ha->qla_watchdog_exited = 1;
295 return;
296 }
297 ha->qla_watchdog_exited = 0;
298
299 if (!ha->flags.qla_watchdog_pause) {
300 if (ha->qla_initiate_recovery) {
301 ha->qla_watchdog_paused = 1;
302 ha->qla_initiate_recovery = 0;
303 ha->err_inject = 0;
304 taskqueue_enqueue(ha->err_tq, &ha->err_task);
305
306 } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
307 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
308 }
309
310 ha->qla_watchdog_paused = 0;
311 } else {
312 ha->qla_watchdog_paused = 1;
313 }
314
315 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
316 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
317 qls_watchdog, ha);
318
319 return;
320 }
321
322 /*
323 * Name: qls_pci_attach
324 * Function: attaches the device to the operating system
325 */
326 static int
327 qls_pci_attach(device_t dev)
328 {
329 qla_host_t *ha = NULL;
330 int i;
331
332 QL_DPRINT2((dev, "%s: enter\n", __func__));
333
334 if ((ha = device_get_softc(dev)) == NULL) {
335 device_printf(dev, "cannot get softc\n");
336 return (ENOMEM);
337 }
338
339 memset(ha, 0, sizeof (qla_host_t));
340
341 if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
342 device_printf(dev, "device is not QLE8000\n");
343 return (ENXIO);
344 }
345
346 ha->pci_func = pci_get_function(dev);
347
348 ha->pci_dev = dev;
349
350 pci_enable_busmaster(dev);
351
352 ha->reg_rid = PCIR_BAR(1);
353 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
354 RF_ACTIVE);
355
356 if (ha->pci_reg == NULL) {
357 device_printf(dev, "unable to map any ports\n");
358 goto qls_pci_attach_err;
359 }
360
361 ha->reg_rid1 = PCIR_BAR(3);
362 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
363 &ha->reg_rid1, RF_ACTIVE);
364
365 if (ha->pci_reg1 == NULL) {
366 device_printf(dev, "unable to map any ports\n");
367 goto qls_pci_attach_err;
368 }
369
370 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
371 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
372
373 qls_add_sysctls(ha);
374 qls_hw_add_sysctls(ha);
375
376 ha->flags.lock_init = 1;
377
378 ha->msix_count = pci_msix_count(dev);
379
380 if (ha->msix_count < qls_get_msix_count(ha)) {
381 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
382 ha->msix_count);
383 goto qls_pci_attach_err;
384 }
385
386 ha->msix_count = qls_get_msix_count(ha);
387
388 device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
389 " pci_reg %p pci_reg1 %p\n", __func__, ha,
390 ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
391
392 if (pci_alloc_msix(dev, &ha->msix_count)) {
393 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
394 ha->msix_count);
395 ha->msix_count = 0;
396 goto qls_pci_attach_err;
397 }
398
399 for (i = 0; i < ha->num_rx_rings; i++) {
400 ha->irq_vec[i].cq_idx = i;
401 ha->irq_vec[i].ha = ha;
402 ha->irq_vec[i].irq_rid = 1 + i;
403
404 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
405 &ha->irq_vec[i].irq_rid,
406 (RF_ACTIVE | RF_SHAREABLE));
407
408 if (ha->irq_vec[i].irq == NULL) {
409 device_printf(dev, "could not allocate interrupt\n");
410 goto qls_pci_attach_err;
411 }
412
413 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
414 (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
415 &ha->irq_vec[i], &ha->irq_vec[i].handle)) {
416 device_printf(dev,
417 "could not setup interrupt\n");
418 goto qls_pci_attach_err;
419 }
420 }
421
422 qls_rd_nic_params(ha);
423
424 /* allocate parent dma tag */
425 if (qls_alloc_parent_dma_tag(ha)) {
426 device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
427 __func__);
428 goto qls_pci_attach_err;
429 }
430
431 /* alloc all dma buffers */
432 if (qls_alloc_dma(ha)) {
433 device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
434 goto qls_pci_attach_err;
435 }
436
437 /* create the o.s ethernet interface */
438 qls_init_ifnet(dev, ha);
439
440 ha->flags.qla_watchdog_active = 1;
441 ha->flags.qla_watchdog_pause = 1;
442
443 TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
444 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
445 taskqueue_thread_enqueue, &ha->tx_tq);
446 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
447 device_get_nameunit(ha->pci_dev));
448
449 callout_init(&ha->tx_callout, 1);
450 ha->flags.qla_callout_init = 1;
451
452 /* create ioctl device interface */
453 if (qls_make_cdev(ha)) {
454 device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
455 goto qls_pci_attach_err;
456 }
457
458 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
459 qls_watchdog, ha);
460
461 TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
462 ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
463 taskqueue_thread_enqueue, &ha->err_tq);
464 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
465 device_get_nameunit(ha->pci_dev));
466
467 QL_DPRINT2((dev, "%s: exit 0\n", __func__));
468 return (0);
469
470 qls_pci_attach_err:
471
472 qls_release(ha);
473
474 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
475 return (ENXIO);
476 }
477
478 /*
479 * Name: qls_pci_detach
480 * Function: Unhooks the device from the operating system
481 */
482 static int
483 qls_pci_detach(device_t dev)
484 {
485 qla_host_t *ha = NULL;
486
487 QL_DPRINT2((dev, "%s: enter\n", __func__));
488
489 if ((ha = device_get_softc(dev)) == NULL) {
490 device_printf(dev, "cannot get softc\n");
491 return (ENOMEM);
492 }
493
494 (void)QLA_LOCK(ha, __func__, 0);
495 qls_stop(ha);
496 QLA_UNLOCK(ha, __func__);
497
498 qls_release(ha);
499
500 QL_DPRINT2((dev, "%s: exit\n", __func__));
501
502 return (0);
503 }
504
505 /*
506 * Name: qls_release
507 * Function: Releases the resources allocated for the device
508 */
509 static void
510 qls_release(qla_host_t *ha)
511 {
512 device_t dev;
513 int i;
514
515 dev = ha->pci_dev;
516
517 if (ha->err_tq) {
518 taskqueue_drain(ha->err_tq, &ha->err_task);
519 taskqueue_free(ha->err_tq);
520 }
521
522 if (ha->tx_tq) {
523 taskqueue_drain(ha->tx_tq, &ha->tx_task);
524 taskqueue_free(ha->tx_tq);
525 }
526
527 qls_del_cdev(ha);
528
529 if (ha->flags.qla_watchdog_active) {
530 ha->flags.qla_watchdog_exit = 1;
531
532 while (ha->qla_watchdog_exited == 0)
533 qls_mdelay(__func__, 1);
534 }
535
536 if (ha->flags.qla_callout_init)
537 callout_stop(&ha->tx_callout);
538
539 if (ha->ifp != NULL)
540 ether_ifdetach(ha->ifp);
541
542 qls_free_dma(ha);
543 qls_free_parent_dma_tag(ha);
544
545 for (i = 0; i < ha->num_rx_rings; i++) {
546 if (ha->irq_vec[i].handle) {
547 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
548 ha->irq_vec[i].handle);
549 }
550
551 if (ha->irq_vec[i].irq) {
552 (void)bus_release_resource(dev, SYS_RES_IRQ,
553 ha->irq_vec[i].irq_rid,
554 ha->irq_vec[i].irq);
555 }
556 }
557
558 if (ha->msix_count)
559 pci_release_msi(dev);
560
561 if (ha->flags.lock_init) {
562 mtx_destroy(&ha->tx_lock);
563 mtx_destroy(&ha->hw_lock);
564 }
565
566 if (ha->pci_reg)
567 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
568 ha->pci_reg);
569
570 if (ha->pci_reg1)
571 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
572 ha->pci_reg1);
573 }
574
575 /*
576 * DMA Related Functions
577 */
578
579 static void
580 qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
581 {
582 *((bus_addr_t *)arg) = 0;
583
584 if (error) {
585 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
586 return;
587 }
588
589 *((bus_addr_t *)arg) = segs[0].ds_addr;
590
591 return;
592 }
593
594 int
595 qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
596 {
597 int ret = 0;
598 device_t dev;
599 bus_addr_t b_addr;
600
601 dev = ha->pci_dev;
602
603 QL_DPRINT2((dev, "%s: enter\n", __func__));
604
605 ret = bus_dma_tag_create(
606 ha->parent_tag,/* parent */
607 dma_buf->alignment,
608 ((bus_size_t)(1ULL << 32)),/* boundary */
609 BUS_SPACE_MAXADDR, /* lowaddr */
610 BUS_SPACE_MAXADDR, /* highaddr */
611 NULL, NULL, /* filter, filterarg */
612 dma_buf->size, /* maxsize */
613 1, /* nsegments */
614 dma_buf->size, /* maxsegsize */
615 0, /* flags */
616 NULL, NULL, /* lockfunc, lockarg */
617 &dma_buf->dma_tag);
618
619 if (ret) {
620 device_printf(dev, "%s: could not create dma tag\n", __func__);
621 goto qls_alloc_dmabuf_exit;
622 }
623 ret = bus_dmamem_alloc(dma_buf->dma_tag,
624 (void **)&dma_buf->dma_b,
625 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
626 &dma_buf->dma_map);
627 if (ret) {
628 bus_dma_tag_destroy(dma_buf->dma_tag);
629 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
630 goto qls_alloc_dmabuf_exit;
631 }
632
633 ret = bus_dmamap_load(dma_buf->dma_tag,
634 dma_buf->dma_map,
635 dma_buf->dma_b,
636 dma_buf->size,
637 qls_dmamap_callback,
638 &b_addr, BUS_DMA_NOWAIT);
639
640 if (ret || !b_addr) {
641 bus_dma_tag_destroy(dma_buf->dma_tag);
642 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
643 dma_buf->dma_map);
644 ret = -1;
645 goto qls_alloc_dmabuf_exit;
646 }
647
648 dma_buf->dma_addr = b_addr;
649
650 qls_alloc_dmabuf_exit:
651 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
652 __func__, ret, (void *)dma_buf->dma_tag,
653 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
654 dma_buf->size));
655
656 return ret;
657 }
658
659 void
660 qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
661 {
662 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
663 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
664 bus_dma_tag_destroy(dma_buf->dma_tag);
665 }
666
667 static int
668 qls_alloc_parent_dma_tag(qla_host_t *ha)
669 {
670 int ret;
671 device_t dev;
672
673 dev = ha->pci_dev;
674
675 /*
676 * Allocate parent DMA Tag
677 */
678 ret = bus_dma_tag_create(
679 bus_get_dma_tag(dev), /* parent */
680 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
681 BUS_SPACE_MAXADDR, /* lowaddr */
682 BUS_SPACE_MAXADDR, /* highaddr */
683 NULL, NULL, /* filter, filterarg */
684 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
685 0, /* nsegments */
686 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
687 0, /* flags */
688 NULL, NULL, /* lockfunc, lockarg */
689 &ha->parent_tag);
690
691 if (ret) {
692 device_printf(dev, "%s: could not create parent dma tag\n",
693 __func__);
694 return (-1);
695 }
696
697 ha->flags.parent_tag = 1;
698
699 return (0);
700 }
701
702 static void
703 qls_free_parent_dma_tag(qla_host_t *ha)
704 {
705 if (ha->flags.parent_tag) {
706 bus_dma_tag_destroy(ha->parent_tag);
707 ha->flags.parent_tag = 0;
708 }
709 }
710
711 /*
712 * Name: qls_init_ifnet
713 * Function: Creates the Network Device Interface and Registers it with the O.S
714 */
715
716 static void
717 qls_init_ifnet(device_t dev, qla_host_t *ha)
718 {
719 struct ifnet *ifp;
720
721 QL_DPRINT2((dev, "%s: enter\n", __func__));
722
723 ifp = ha->ifp = if_alloc(IFT_ETHER);
724
725 if (ifp == NULL)
726 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
727
728 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
729 ifp->if_baudrate = IF_Gbps(10);
730 ifp->if_init = qls_init;
731 ifp->if_softc = ha;
732 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
733 ifp->if_ioctl = qls_ioctl;
734 ifp->if_start = qls_start;
735
736 IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
737 ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
738 IFQ_SET_READY(&ifp->if_snd);
739
740 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
741 if (ha->max_frame_size <= MCLBYTES) {
742 ha->msize = MCLBYTES;
743 } else if (ha->max_frame_size <= MJUMPAGESIZE) {
744 ha->msize = MJUMPAGESIZE;
745 } else
746 ha->msize = MJUM9BYTES;
747
748 ether_ifattach(ifp, qls_get_mac_addr(ha));
749
750 ifp->if_capabilities = IFCAP_JUMBO_MTU;
751
752 ifp->if_capabilities |= IFCAP_HWCSUM;
753 ifp->if_capabilities |= IFCAP_VLAN_MTU;
754
755 ifp->if_capabilities |= IFCAP_TSO4;
756 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
757 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
758 ifp->if_capabilities |= IFCAP_LINKSTATE;
759
760 ifp->if_capenable = ifp->if_capabilities;
761
762 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
763
764 ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
765
766 ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
767 NULL);
768 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
769
770 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
771
772 QL_DPRINT2((dev, "%s: exit\n", __func__));
773
774 return;
775 }
776
777 static void
778 qls_init_locked(qla_host_t *ha)
779 {
780 struct ifnet *ifp = ha->ifp;
781
782 qls_stop(ha);
783
784 qls_flush_xmt_bufs(ha);
785
786 if (qls_alloc_rcv_bufs(ha) != 0)
787 return;
788
789 if (qls_config_lro(ha))
790 return;
791
792 bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
793
794 ifp->if_hwassist = CSUM_IP;
795 ifp->if_hwassist |= CSUM_TCP;
796 ifp->if_hwassist |= CSUM_UDP;
797 ifp->if_hwassist |= CSUM_TSO;
798
799 if (qls_init_hw_if(ha) == 0) {
800 ifp = ha->ifp;
801 ifp->if_drv_flags |= IFF_DRV_RUNNING;
802 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
803 ha->flags.qla_watchdog_pause = 0;
804 }
805
806 return;
807 }
808
809 static void
810 qls_init(void *arg)
811 {
812 qla_host_t *ha;
813
814 ha = (qla_host_t *)arg;
815
816 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
817
818 (void)QLA_LOCK(ha, __func__, 0);
819 qls_init_locked(ha);
820 QLA_UNLOCK(ha, __func__);
821
822 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
823 }
824
825 static u_int
826 qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
827 {
828 uint8_t *mta = arg;
829
830 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
831 return (0);
832
833 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
834
835 return (1);
836 }
837
838 static void
839 qls_set_multi(qla_host_t *ha, uint32_t add_multi)
840 {
841 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
842 struct ifnet *ifp = ha->ifp;
843 int mcnt;
844
845 mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta);
846
847 if (QLA_LOCK(ha, __func__, 1) == 0) {
848 qls_hw_set_multi(ha, mta, mcnt, add_multi);
849 QLA_UNLOCK(ha, __func__);
850 }
851
852 return;
853 }
854
855 static int
856 qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
857 {
858 int ret = 0;
859 struct ifreq *ifr = (struct ifreq *)data;
860 #ifdef INET
861 struct ifaddr *ifa = (struct ifaddr *)data;
862 #endif
863 qla_host_t *ha;
864
865 ha = (qla_host_t *)ifp->if_softc;
866
867 switch (cmd) {
868 case SIOCSIFADDR:
869 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
870 __func__, cmd));
871
872 #ifdef INET
873 if (ifa->ifa_addr->sa_family == AF_INET) {
874 ifp->if_flags |= IFF_UP;
875 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
876 (void)QLA_LOCK(ha, __func__, 0);
877 qls_init_locked(ha);
878 QLA_UNLOCK(ha, __func__);
879 }
880 QL_DPRINT4((ha->pci_dev,
881 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
882 __func__, cmd,
883 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
884
885 arp_ifinit(ifp, ifa);
886 break;
887 }
888 #endif
889 ether_ioctl(ifp, cmd, data);
890 break;
891
892 case SIOCSIFMTU:
893 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
894 __func__, cmd));
895
896 if (ifr->ifr_mtu > QLA_MAX_MTU) {
897 ret = EINVAL;
898 } else {
899 (void) QLA_LOCK(ha, __func__, 0);
900
901 ifp->if_mtu = ifr->ifr_mtu;
902 ha->max_frame_size =
903 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
904
905 QLA_UNLOCK(ha, __func__);
906
907 if (ret)
908 ret = EINVAL;
909 }
910
911 break;
912
913 case SIOCSIFFLAGS:
914 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
915 __func__, cmd));
916
917 (void)QLA_LOCK(ha, __func__, 0);
918
919 if (ifp->if_flags & IFF_UP) {
920 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
921 if ((ifp->if_flags ^ ha->if_flags) &
922 IFF_PROMISC) {
923 ret = qls_set_promisc(ha);
924 } else if ((ifp->if_flags ^ ha->if_flags) &
925 IFF_ALLMULTI) {
926 ret = qls_set_allmulti(ha);
927 }
928 } else {
929 ha->max_frame_size = ifp->if_mtu +
930 ETHER_HDR_LEN + ETHER_CRC_LEN;
931 qls_init_locked(ha);
932 }
933 } else {
934 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
935 qls_stop(ha);
936 ha->if_flags = ifp->if_flags;
937 }
938
939 QLA_UNLOCK(ha, __func__);
940 break;
941
942 case SIOCADDMULTI:
943 QL_DPRINT4((ha->pci_dev,
944 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
945
946 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
947 qls_set_multi(ha, 1);
948 }
949 break;
950
951 case SIOCDELMULTI:
952 QL_DPRINT4((ha->pci_dev,
953 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
954
955 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
956 qls_set_multi(ha, 0);
957 }
958 break;
959
960 case SIOCSIFMEDIA:
961 case SIOCGIFMEDIA:
962 QL_DPRINT4((ha->pci_dev,
963 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
964 __func__, cmd));
965 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
966 break;
967
968 case SIOCSIFCAP:
969 {
970 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
971
972 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
973 __func__, cmd));
974
975 if (mask & IFCAP_HWCSUM)
976 ifp->if_capenable ^= IFCAP_HWCSUM;
977 if (mask & IFCAP_TSO4)
978 ifp->if_capenable ^= IFCAP_TSO4;
979 if (mask & IFCAP_VLAN_HWTAGGING)
980 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
981 if (mask & IFCAP_VLAN_HWTSO)
982 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
983
984 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
985 qls_init(ha);
986
987 VLAN_CAPABILITIES(ifp);
988 break;
989 }
990
991 default:
992 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
993 __func__, cmd));
994 ret = ether_ioctl(ifp, cmd, data);
995 break;
996 }
997
998 return (ret);
999 }
1000
1001 static int
1002 qls_media_change(struct ifnet *ifp)
1003 {
1004 qla_host_t *ha;
1005 struct ifmedia *ifm;
1006 int ret = 0;
1007
1008 ha = (qla_host_t *)ifp->if_softc;
1009
1010 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1011
1012 ifm = &ha->media;
1013
1014 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1015 ret = EINVAL;
1016
1017 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1018
1019 return (ret);
1020 }
1021
1022 static void
1023 qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1024 {
1025 qla_host_t *ha;
1026
1027 ha = (qla_host_t *)ifp->if_softc;
1028
1029 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1030
1031 ifmr->ifm_status = IFM_AVALID;
1032 ifmr->ifm_active = IFM_ETHER;
1033
1034 qls_update_link_state(ha);
1035 if (ha->link_up) {
1036 ifmr->ifm_status |= IFM_ACTIVE;
1037 ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1038 }
1039
1040 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1041 (ha->link_up ? "link_up" : "link_down")));
1042
1043 return;
1044 }
1045
1046 static void
1047 qls_start(struct ifnet *ifp)
1048 {
1049 int i, ret = 0;
1050 struct mbuf *m_head;
1051 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1052
1053 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1054
1055 if (!mtx_trylock(&ha->tx_lock)) {
1056 QL_DPRINT8((ha->pci_dev,
1057 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1058 return;
1059 }
1060
1061 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1062 IFF_DRV_RUNNING) {
1063 for (i = 0; i < ha->num_tx_rings; i++) {
1064 ret |= qls_hw_tx_done(ha, i);
1065 }
1066
1067 if (ret == 0)
1068 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1069 }
1070
1071 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1072 IFF_DRV_RUNNING) {
1073 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1074 QLA_TX_UNLOCK(ha);
1075 return;
1076 }
1077
1078 if (!ha->link_up) {
1079 qls_update_link_state(ha);
1080 if (!ha->link_up) {
1081 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1082 QLA_TX_UNLOCK(ha);
1083 return;
1084 }
1085 }
1086
1087 while (ifp->if_snd.ifq_head != NULL) {
1088 IF_DEQUEUE(&ifp->if_snd, m_head);
1089
1090 if (m_head == NULL) {
1091 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1092 __func__));
1093 break;
1094 }
1095
1096 if (qls_send(ha, &m_head)) {
1097 if (m_head == NULL)
1098 break;
1099 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1100 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1101 IF_PREPEND(&ifp->if_snd, m_head);
1102 break;
1103 }
1104 /* Send a copy of the frame to the BPF listener */
1105 ETHER_BPF_MTAP(ifp, m_head);
1106 }
1107
1108 QLA_TX_UNLOCK(ha);
1109 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1110 return;
1111 }
1112
1113 static int
1114 qls_send(qla_host_t *ha, struct mbuf **m_headp)
1115 {
1116 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1117 bus_dmamap_t map;
1118 int nsegs;
1119 int ret = -1;
1120 uint32_t tx_idx;
1121 struct mbuf *m_head = *m_headp;
1122 uint32_t txr_idx = 0;
1123
1124 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1125
1126 /* check if flowid is set */
1127 if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1128 txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1129
1130 tx_idx = ha->tx_ring[txr_idx].txr_next;
1131
1132 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1133
1134 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1135 BUS_DMA_NOWAIT);
1136
1137 if (ret == EFBIG) {
1138 struct mbuf *m;
1139
1140 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1141 m_head->m_pkthdr.len));
1142
1143 m = m_defrag(m_head, M_NOWAIT);
1144 if (m == NULL) {
1145 ha->err_tx_defrag++;
1146 m_freem(m_head);
1147 *m_headp = NULL;
1148 device_printf(ha->pci_dev,
1149 "%s: m_defrag() = NULL [%d]\n",
1150 __func__, ret);
1151 return (ENOBUFS);
1152 }
1153 m_head = m;
1154 *m_headp = m_head;
1155
1156 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1157 segs, &nsegs, BUS_DMA_NOWAIT))) {
1158 ha->err_tx_dmamap_load++;
1159
1160 device_printf(ha->pci_dev,
1161 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1162 __func__, ret, m_head->m_pkthdr.len);
1163
1164 if (ret != ENOMEM) {
1165 m_freem(m_head);
1166 *m_headp = NULL;
1167 }
1168 return (ret);
1169 }
1170
1171 } else if (ret) {
1172 ha->err_tx_dmamap_load++;
1173
1174 device_printf(ha->pci_dev,
1175 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1176 __func__, ret, m_head->m_pkthdr.len);
1177
1178 if (ret != ENOMEM) {
1179 m_freem(m_head);
1180 *m_headp = NULL;
1181 }
1182 return (ret);
1183 }
1184
1185 QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1186
1187 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1188
1189 if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1190 ha->tx_ring[txr_idx].count++;
1191 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1192 ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1193 } else {
1194 if (ret == EINVAL) {
1195 if (m_head)
1196 m_freem(m_head);
1197 *m_headp = NULL;
1198 }
1199 }
1200
1201 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1202 return (ret);
1203 }
1204
1205 static void
1206 qls_stop(qla_host_t *ha)
1207 {
1208 struct ifnet *ifp = ha->ifp;
1209
1210 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1211
1212 ha->flags.qla_watchdog_pause = 1;
1213
1214 while (!ha->qla_watchdog_paused)
1215 qls_mdelay(__func__, 1);
1216
1217 qls_del_hw_if(ha);
1218
1219 qls_free_lro(ha);
1220
1221 qls_flush_xmt_bufs(ha);
1222 qls_free_rcv_bufs(ha);
1223
1224 return;
1225 }
1226
1227 /*
1228 * Buffer Management Functions for Transmit and Receive Rings
1229 */
1230 /*
1231 * Release mbuf after it sent on the wire
1232 */
1233 static void
1234 qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1235 {
1236 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1237
1238 if (txb->m_head) {
1239 bus_dmamap_unload(ha->tx_tag, txb->map);
1240
1241 m_freem(txb->m_head);
1242 txb->m_head = NULL;
1243 }
1244
1245 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1246 }
1247
1248 static void
1249 qls_flush_xmt_bufs(qla_host_t *ha)
1250 {
1251 int i, j;
1252
1253 for (j = 0; j < ha->num_tx_rings; j++) {
1254 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1255 qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1256 }
1257
1258 return;
1259 }
1260
1261 static int
1262 qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1263 {
1264 int i, j, ret = 0;
1265 qla_rx_buf_t *rxb;
1266 qla_rx_ring_t *rx_ring;
1267 volatile q81_bq_addr_e_t *sbq_e;
1268
1269 rx_ring = &ha->rx_ring[r];
1270
1271 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1272 rxb = &rx_ring->rx_buf[i];
1273
1274 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1275
1276 if (ret) {
1277 device_printf(ha->pci_dev,
1278 "%s: dmamap[%d, %d] failed\n", __func__, r, i);
1279
1280 for (j = 0; j < i; j++) {
1281 rxb = &rx_ring->rx_buf[j];
1282 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1283 }
1284 goto qls_alloc_rcv_mbufs_err;
1285 }
1286 }
1287
1288 rx_ring = &ha->rx_ring[r];
1289
1290 sbq_e = rx_ring->sbq_vaddr;
1291
1292 rxb = &rx_ring->rx_buf[0];
1293
1294 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1295 if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1296 /*
1297 * set the physical address in the
1298 * corresponding descriptor entry in the
1299 * receive ring/queue for the hba
1300 */
1301
1302 sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1303 sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1304
1305 } else {
1306 device_printf(ha->pci_dev,
1307 "%s: qls_get_mbuf [%d, %d] failed\n",
1308 __func__, r, i);
1309 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1310 goto qls_alloc_rcv_mbufs_err;
1311 }
1312
1313 rxb++;
1314 sbq_e++;
1315 }
1316 return 0;
1317
1318 qls_alloc_rcv_mbufs_err:
1319 return (-1);
1320 }
1321
1322 static void
1323 qls_free_rcv_bufs(qla_host_t *ha)
1324 {
1325 int i, r;
1326 qla_rx_buf_t *rxb;
1327 qla_rx_ring_t *rxr;
1328
1329 for (r = 0; r < ha->num_rx_rings; r++) {
1330 rxr = &ha->rx_ring[r];
1331
1332 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1333 rxb = &rxr->rx_buf[i];
1334
1335 if (rxb->m_head != NULL) {
1336 bus_dmamap_unload(ha->rx_tag, rxb->map);
1337 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1338 m_freem(rxb->m_head);
1339 }
1340 }
1341 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1342 }
1343 return;
1344 }
1345
1346 static int
1347 qls_alloc_rcv_bufs(qla_host_t *ha)
1348 {
1349 int r, ret = 0;
1350 qla_rx_ring_t *rxr;
1351
1352 for (r = 0; r < ha->num_rx_rings; r++) {
1353 rxr = &ha->rx_ring[r];
1354 bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1355 }
1356
1357 for (r = 0; r < ha->num_rx_rings; r++) {
1358 ret = qls_alloc_rcv_mbufs(ha, r);
1359
1360 if (ret)
1361 qls_free_rcv_bufs(ha);
1362 }
1363
1364 return (ret);
1365 }
1366
1367 int
1368 qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1369 {
1370 struct mbuf *mp = nmp;
1371 int ret = 0;
1372 uint32_t offset;
1373 bus_dma_segment_t segs[1];
1374 int nsegs;
1375
1376 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1377
1378 if (mp == NULL) {
1379 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
1380
1381 if (mp == NULL) {
1382 if (ha->msize == MCLBYTES)
1383 ha->err_m_getcl++;
1384 else
1385 ha->err_m_getjcl++;
1386
1387 ret = ENOBUFS;
1388 device_printf(ha->pci_dev,
1389 "%s: m_getcl failed\n", __func__);
1390 goto exit_qls_get_mbuf;
1391 }
1392 mp->m_len = mp->m_pkthdr.len = ha->msize;
1393 } else {
1394 mp->m_len = mp->m_pkthdr.len = ha->msize;
1395 mp->m_data = mp->m_ext.ext_buf;
1396 mp->m_next = NULL;
1397 }
1398
1399 /* align the receive buffers to 8 byte boundary */
1400 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1401 if (offset) {
1402 offset = 8 - offset;
1403 m_adj(mp, offset);
1404 }
1405
1406 /*
1407 * Using memory from the mbuf cluster pool, invoke the bus_dma
1408 * machinery to arrange the memory mapping.
1409 */
1410 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1411 mp, segs, &nsegs, BUS_DMA_NOWAIT);
1412 rxb->paddr = segs[0].ds_addr;
1413
1414 if (ret || !rxb->paddr || (nsegs != 1)) {
1415 m_freem(mp);
1416 rxb->m_head = NULL;
1417 device_printf(ha->pci_dev,
1418 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1419 __func__, ret, (long long unsigned int)rxb->paddr,
1420 nsegs);
1421 ret = -1;
1422 goto exit_qls_get_mbuf;
1423 }
1424 rxb->m_head = mp;
1425 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1426
1427 exit_qls_get_mbuf:
1428 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1429 return (ret);
1430 }
1431
1432 static void
1433 qls_tx_done(void *context, int pending)
1434 {
1435 qla_host_t *ha = context;
1436 struct ifnet *ifp;
1437
1438 ifp = ha->ifp;
1439
1440 if (!ifp)
1441 return;
1442
1443 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1444 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1445 return;
1446 }
1447
1448 qls_start(ha->ifp);
1449 return;
1450 }
1451
1452 static int
1453 qls_config_lro(qla_host_t *ha)
1454 {
1455 #if defined(INET) || defined(INET6)
1456 int i;
1457 struct lro_ctrl *lro;
1458
1459 for (i = 0; i < ha->num_rx_rings; i++) {
1460 lro = &ha->rx_ring[i].lro;
1461 if (tcp_lro_init(lro)) {
1462 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1463 __func__);
1464 return (-1);
1465 }
1466 lro->ifp = ha->ifp;
1467 }
1468 ha->flags.lro_init = 1;
1469
1470 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1471 #endif
1472 return (0);
1473 }
1474
1475 static void
1476 qls_free_lro(qla_host_t *ha)
1477 {
1478 #if defined(INET) || defined(INET6)
1479 int i;
1480 struct lro_ctrl *lro;
1481
1482 if (!ha->flags.lro_init)
1483 return;
1484
1485 for (i = 0; i < ha->num_rx_rings; i++) {
1486 lro = &ha->rx_ring[i].lro;
1487 tcp_lro_free(lro);
1488 }
1489 ha->flags.lro_init = 0;
1490 #endif
1491 }
1492
1493 static void
1494 qls_error_recovery(void *context, int pending)
1495 {
1496 qla_host_t *ha = context;
1497
1498 qls_init(ha);
1499
1500 return;
1501 }
Cache object: e2e9d637e9b5084d123af9b982edb230
|