FreeBSD/Linux Kernel Cross Reference
sys/dev/qlxgb/qla_os.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011-2013 Qlogic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * File: qla_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "qla_os.h"
39 #include "qla_reg.h"
40 #include "qla_hw.h"
41 #include "qla_def.h"
42 #include "qla_inline.h"
43 #include "qla_ver.h"
44 #include "qla_glbl.h"
45 #include "qla_dbg.h"
46
47 /*
48 * Some PCI Configuration Space Related Defines
49 */
50
51 #ifndef PCI_VENDOR_QLOGIC
52 #define PCI_VENDOR_QLOGIC 0x1077
53 #endif
54
55 #ifndef PCI_PRODUCT_QLOGIC_ISP8020
56 #define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
57 #endif
58
59 #define PCI_QLOGIC_ISP8020 \
60 ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
61
62 /*
63 * static functions
64 */
65 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
66 static void qla_free_parent_dma_tag(qla_host_t *ha);
67 static int qla_alloc_xmt_bufs(qla_host_t *ha);
68 static void qla_free_xmt_bufs(qla_host_t *ha);
69 static int qla_alloc_rcv_bufs(qla_host_t *ha);
70 static void qla_free_rcv_bufs(qla_host_t *ha);
71
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static void qla_release(qla_host_t *ha);
75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76 int error);
77 static void qla_stop(qla_host_t *ha);
78 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
79 static void qla_tx_done(void *context, int pending);
80
81 /*
82 * Hooks to the Operating Systems
83 */
84 static int qla_pci_probe (device_t);
85 static int qla_pci_attach (device_t);
86 static int qla_pci_detach (device_t);
87
88 static void qla_init(void *arg);
89 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
90 static int qla_media_change(struct ifnet *ifp);
91 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
92
93 static device_method_t qla_pci_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, qla_pci_probe),
96 DEVMETHOD(device_attach, qla_pci_attach),
97 DEVMETHOD(device_detach, qla_pci_detach),
98 { 0, 0 }
99 };
100
101 static driver_t qla_pci_driver = {
102 "ql", qla_pci_methods, sizeof (qla_host_t),
103 };
104
105 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, 0, 0);
106
107 MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
108 MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
109
110 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
111
112 uint32_t std_replenish = 8;
113 uint32_t jumbo_replenish = 2;
114 uint32_t rcv_pkt_thres = 128;
115 uint32_t rcv_pkt_thres_d = 32;
116 uint32_t snd_pkt_thres = 16;
117 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
118
119 static char dev_str[64];
120
121 /*
122 * Name: qla_pci_probe
123 * Function: Validate the PCI device to be a QLA80XX device
124 */
125 static int
126 qla_pci_probe(device_t dev)
127 {
128 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
129 case PCI_QLOGIC_ISP8020:
130 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
131 "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
132 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
133 QLA_VERSION_BUILD);
134 device_set_desc(dev, dev_str);
135 break;
136 default:
137 return (ENXIO);
138 }
139
140 if (bootverbose)
141 printf("%s: %s\n ", __func__, dev_str);
142
143 return (BUS_PROBE_DEFAULT);
144 }
145
146 static void
147 qla_add_sysctls(qla_host_t *ha)
148 {
149 device_t dev = ha->pci_dev;
150
151 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
152 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
153 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
154 (void *)ha, 0, qla_sysctl_get_stats, "I", "Statistics");
155
156 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
157 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
158 OID_AUTO, "fw_version", CTLFLAG_RD,
159 ha->fw_ver_str, 0, "firmware version");
160
161 dbg_level = 0;
162 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
163 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
164 OID_AUTO, "debug", CTLFLAG_RW,
165 &dbg_level, dbg_level, "Debug Level");
166
167 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
168 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
169 OID_AUTO, "std_replenish", CTLFLAG_RW,
170 &std_replenish, std_replenish,
171 "Threshold for Replenishing Standard Frames");
172
173 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175 OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
176 &jumbo_replenish, jumbo_replenish,
177 "Threshold for Replenishing Jumbo Frames");
178
179 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
180 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
181 OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
182 &rcv_pkt_thres, rcv_pkt_thres,
183 "Threshold for # of rcv pkts to trigger indication isr");
184
185 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
186 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
187 OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
188 &rcv_pkt_thres_d, rcv_pkt_thres_d,
189 "Threshold for # of rcv pkts to trigger indication defered");
190
191 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
192 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
193 OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
194 &snd_pkt_thres, snd_pkt_thres,
195 "Threshold for # of snd packets");
196
197 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
198 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
199 OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
200 &free_pkt_thres, free_pkt_thres,
201 "Threshold for # of packets to free at a time");
202
203 return;
204 }
205
206 static void
207 qla_watchdog(void *arg)
208 {
209 qla_host_t *ha = arg;
210 qla_hw_t *hw;
211 struct ifnet *ifp;
212
213 hw = &ha->hw;
214 ifp = ha->ifp;
215
216 if (ha->flags.qla_watchdog_exit)
217 return;
218
219 if (!ha->flags.qla_watchdog_pause) {
220 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
221 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
222 } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
223 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
224 }
225 }
226 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
227 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
228 qla_watchdog, ha);
229 }
230
231 /*
232 * Name: qla_pci_attach
233 * Function: attaches the device to the operating system
234 */
235 static int
236 qla_pci_attach(device_t dev)
237 {
238 qla_host_t *ha = NULL;
239 uint32_t rsrc_len, i;
240
241 QL_DPRINT2((dev, "%s: enter\n", __func__));
242
243 if ((ha = device_get_softc(dev)) == NULL) {
244 device_printf(dev, "cannot get softc\n");
245 return (ENOMEM);
246 }
247
248 memset(ha, 0, sizeof (qla_host_t));
249
250 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
251 device_printf(dev, "device is not ISP8020\n");
252 return (ENXIO);
253 }
254
255 ha->pci_func = pci_get_function(dev);
256
257 ha->pci_dev = dev;
258
259 pci_enable_busmaster(dev);
260
261 ha->reg_rid = PCIR_BAR(0);
262 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
263 RF_ACTIVE);
264
265 if (ha->pci_reg == NULL) {
266 device_printf(dev, "unable to map any ports\n");
267 goto qla_pci_attach_err;
268 }
269
270 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
271 ha->reg_rid);
272
273 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
274 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
275 mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
276 mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
277 ha->flags.lock_init = 1;
278
279 ha->msix_count = pci_msix_count(dev);
280
281 if (ha->msix_count < qla_get_msix_count(ha)) {
282 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
283 ha->msix_count);
284 goto qla_pci_attach_err;
285 }
286
287 QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
288 " msix_count 0x%x pci_reg %p\n", __func__, ha,
289 ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
290
291 ha->msix_count = qla_get_msix_count(ha);
292
293 if (pci_alloc_msix(dev, &ha->msix_count)) {
294 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
295 ha->msix_count);
296 ha->msix_count = 0;
297 goto qla_pci_attach_err;
298 }
299
300 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
301 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
302 taskqueue_thread_enqueue, &ha->tx_tq);
303 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
304 device_get_nameunit(ha->pci_dev));
305
306 for (i = 0; i < ha->msix_count; i++) {
307 ha->irq_vec[i].irq_rid = i+1;
308 ha->irq_vec[i].ha = ha;
309
310 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
311 &ha->irq_vec[i].irq_rid,
312 (RF_ACTIVE | RF_SHAREABLE));
313
314 if (ha->irq_vec[i].irq == NULL) {
315 device_printf(dev, "could not allocate interrupt\n");
316 goto qla_pci_attach_err;
317 }
318
319 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
320 (INTR_TYPE_NET | INTR_MPSAFE),
321 NULL, qla_isr, &ha->irq_vec[i],
322 &ha->irq_vec[i].handle)) {
323 device_printf(dev, "could not setup interrupt\n");
324 goto qla_pci_attach_err;
325 }
326
327 TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
328 &ha->irq_vec[i]);
329
330 ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
331 M_NOWAIT, taskqueue_thread_enqueue,
332 &ha->irq_vec[i].rcv_tq);
333
334 taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
335 "%s rcvq",
336 device_get_nameunit(ha->pci_dev));
337 }
338
339 qla_add_sysctls(ha);
340
341 /* add hardware specific sysctls */
342 qla_hw_add_sysctls(ha);
343
344 /* initialize hardware */
345 if (qla_init_hw(ha)) {
346 device_printf(dev, "%s: qla_init_hw failed\n", __func__);
347 goto qla_pci_attach_err;
348 }
349
350 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
351 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
352 ha->fw_ver_build);
353
354 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
355 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
356 ha->fw_ver_build);
357
358 //qla_get_hw_caps(ha);
359 qla_read_mac_addr(ha);
360
361 /* allocate parent dma tag */
362 if (qla_alloc_parent_dma_tag(ha)) {
363 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
364 __func__);
365 goto qla_pci_attach_err;
366 }
367
368 /* alloc all dma buffers */
369 if (qla_alloc_dma(ha)) {
370 device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
371 goto qla_pci_attach_err;
372 }
373
374 /* create the o.s ethernet interface */
375 qla_init_ifnet(dev, ha);
376
377 ha->flags.qla_watchdog_active = 1;
378 ha->flags.qla_watchdog_pause = 1;
379
380 callout_init(&ha->tx_callout, 1);
381
382 /* create ioctl device interface */
383 if (qla_make_cdev(ha)) {
384 device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
385 goto qla_pci_attach_err;
386 }
387
388 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
389 qla_watchdog, ha);
390
391 QL_DPRINT2((dev, "%s: exit 0\n", __func__));
392 return (0);
393
394 qla_pci_attach_err:
395
396 qla_release(ha);
397
398 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
399 return (ENXIO);
400 }
401
402 /*
403 * Name: qla_pci_detach
404 * Function: Unhooks the device from the operating system
405 */
406 static int
407 qla_pci_detach(device_t dev)
408 {
409 qla_host_t *ha = NULL;
410 int i;
411
412 QL_DPRINT2((dev, "%s: enter\n", __func__));
413
414 if ((ha = device_get_softc(dev)) == NULL) {
415 device_printf(dev, "cannot get softc\n");
416 return (ENOMEM);
417 }
418
419 QLA_LOCK(ha, __func__);
420 qla_stop(ha);
421 QLA_UNLOCK(ha, __func__);
422
423 if (ha->tx_tq) {
424 taskqueue_drain(ha->tx_tq, &ha->tx_task);
425 taskqueue_free(ha->tx_tq);
426 }
427
428 for (i = 0; i < ha->msix_count; i++) {
429 taskqueue_drain(ha->irq_vec[i].rcv_tq,
430 &ha->irq_vec[i].rcv_task);
431 taskqueue_free(ha->irq_vec[i].rcv_tq);
432 }
433
434 qla_release(ha);
435
436 QL_DPRINT2((dev, "%s: exit\n", __func__));
437
438 return (0);
439 }
440
441 /*
442 * SYSCTL Related Callbacks
443 */
444 static int
445 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
446 {
447 int err, ret = 0;
448 qla_host_t *ha;
449
450 err = sysctl_handle_int(oidp, &ret, 0, req);
451
452 if (err)
453 return (err);
454
455 ha = (qla_host_t *)arg1;
456 //qla_get_stats(ha);
457 QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
458 return (err);
459 }
460
461 /*
462 * Name: qla_release
463 * Function: Releases the resources allocated for the device
464 */
465 static void
466 qla_release(qla_host_t *ha)
467 {
468 device_t dev;
469 int i;
470
471 dev = ha->pci_dev;
472
473 qla_del_cdev(ha);
474
475 if (ha->flags.qla_watchdog_active)
476 ha->flags.qla_watchdog_exit = 1;
477
478 callout_stop(&ha->tx_callout);
479 qla_mdelay(__func__, 100);
480
481 if (ha->ifp != NULL)
482 ether_ifdetach(ha->ifp);
483
484 qla_free_dma(ha);
485 qla_free_parent_dma_tag(ha);
486
487 for (i = 0; i < ha->msix_count; i++) {
488 if (ha->irq_vec[i].handle)
489 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
490 ha->irq_vec[i].handle);
491 if (ha->irq_vec[i].irq)
492 (void) bus_release_resource(dev, SYS_RES_IRQ,
493 ha->irq_vec[i].irq_rid,
494 ha->irq_vec[i].irq);
495 }
496 if (ha->msix_count)
497 pci_release_msi(dev);
498
499 if (ha->flags.lock_init) {
500 mtx_destroy(&ha->tx_lock);
501 mtx_destroy(&ha->rx_lock);
502 mtx_destroy(&ha->rxj_lock);
503 mtx_destroy(&ha->hw_lock);
504 }
505
506 if (ha->pci_reg)
507 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
508 ha->pci_reg);
509 }
510
511 /*
512 * DMA Related Functions
513 */
514
515 static void
516 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
517 {
518 *((bus_addr_t *)arg) = 0;
519
520 if (error) {
521 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
522 return;
523 }
524
525 QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
526
527 *((bus_addr_t *)arg) = segs[0].ds_addr;
528
529 return;
530 }
531
532 int
533 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
534 {
535 int ret = 0;
536 device_t dev;
537 bus_addr_t b_addr;
538
539 dev = ha->pci_dev;
540
541 QL_DPRINT2((dev, "%s: enter\n", __func__));
542
543 ret = bus_dma_tag_create(
544 ha->parent_tag,/* parent */
545 dma_buf->alignment,
546 ((bus_size_t)(1ULL << 32)),/* boundary */
547 BUS_SPACE_MAXADDR, /* lowaddr */
548 BUS_SPACE_MAXADDR, /* highaddr */
549 NULL, NULL, /* filter, filterarg */
550 dma_buf->size, /* maxsize */
551 1, /* nsegments */
552 dma_buf->size, /* maxsegsize */
553 0, /* flags */
554 NULL, NULL, /* lockfunc, lockarg */
555 &dma_buf->dma_tag);
556
557 if (ret) {
558 device_printf(dev, "%s: could not create dma tag\n", __func__);
559 goto qla_alloc_dmabuf_exit;
560 }
561 ret = bus_dmamem_alloc(dma_buf->dma_tag,
562 (void **)&dma_buf->dma_b,
563 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
564 &dma_buf->dma_map);
565 if (ret) {
566 bus_dma_tag_destroy(dma_buf->dma_tag);
567 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
568 goto qla_alloc_dmabuf_exit;
569 }
570
571 ret = bus_dmamap_load(dma_buf->dma_tag,
572 dma_buf->dma_map,
573 dma_buf->dma_b,
574 dma_buf->size,
575 qla_dmamap_callback,
576 &b_addr, BUS_DMA_NOWAIT);
577
578 if (ret || !b_addr) {
579 bus_dma_tag_destroy(dma_buf->dma_tag);
580 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
581 dma_buf->dma_map);
582 ret = -1;
583 goto qla_alloc_dmabuf_exit;
584 }
585
586 dma_buf->dma_addr = b_addr;
587
588 qla_alloc_dmabuf_exit:
589 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
590 __func__, ret, (void *)dma_buf->dma_tag,
591 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
592 dma_buf->size));
593
594 return ret;
595 }
596
597 void
598 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
599 {
600 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
601 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
602 bus_dma_tag_destroy(dma_buf->dma_tag);
603 }
604
605 static int
606 qla_alloc_parent_dma_tag(qla_host_t *ha)
607 {
608 int ret;
609 device_t dev;
610
611 dev = ha->pci_dev;
612
613 /*
614 * Allocate parent DMA Tag
615 */
616 ret = bus_dma_tag_create(
617 bus_get_dma_tag(dev), /* parent */
618 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
619 BUS_SPACE_MAXADDR, /* lowaddr */
620 BUS_SPACE_MAXADDR, /* highaddr */
621 NULL, NULL, /* filter, filterarg */
622 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
623 0, /* nsegments */
624 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
625 0, /* flags */
626 NULL, NULL, /* lockfunc, lockarg */
627 &ha->parent_tag);
628
629 if (ret) {
630 device_printf(dev, "%s: could not create parent dma tag\n",
631 __func__);
632 return (-1);
633 }
634
635 ha->flags.parent_tag = 1;
636
637 return (0);
638 }
639
640 static void
641 qla_free_parent_dma_tag(qla_host_t *ha)
642 {
643 if (ha->flags.parent_tag) {
644 bus_dma_tag_destroy(ha->parent_tag);
645 ha->flags.parent_tag = 0;
646 }
647 }
648
649 /*
650 * Name: qla_init_ifnet
651 * Function: Creates the Network Device Interface and Registers it with the O.S
652 */
653
654 static void
655 qla_init_ifnet(device_t dev, qla_host_t *ha)
656 {
657 struct ifnet *ifp;
658
659 QL_DPRINT2((dev, "%s: enter\n", __func__));
660
661 ifp = ha->ifp = if_alloc(IFT_ETHER);
662
663 if (ifp == NULL)
664 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
665
666 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
667
668 ifp->if_mtu = ETHERMTU;
669 ifp->if_baudrate = IF_Gbps(10);
670 ifp->if_init = qla_init;
671 ifp->if_softc = ha;
672 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
673 ifp->if_ioctl = qla_ioctl;
674 ifp->if_start = qla_start;
675
676 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
677 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
678 IFQ_SET_READY(&ifp->if_snd);
679
680 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
681
682 ether_ifattach(ifp, qla_get_mac_addr(ha));
683
684 ifp->if_capabilities = IFCAP_HWCSUM |
685 IFCAP_TSO4 |
686 IFCAP_JUMBO_MTU;
687
688 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
689 ifp->if_capabilities |= IFCAP_LINKSTATE;
690
691 #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
692 ifp->if_timer = 0;
693 ifp->if_watchdog = NULL;
694 #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
695
696 ifp->if_capenable = ifp->if_capabilities;
697
698 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
699
700 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
701
702 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
703 NULL);
704 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
705
706 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
707
708 QL_DPRINT2((dev, "%s: exit\n", __func__));
709
710 return;
711 }
712
713 static void
714 qla_init_locked(qla_host_t *ha)
715 {
716 struct ifnet *ifp = ha->ifp;
717
718 qla_stop(ha);
719
720 if (qla_alloc_xmt_bufs(ha) != 0)
721 return;
722
723 if (qla_alloc_rcv_bufs(ha) != 0)
724 return;
725
726 if (qla_config_lro(ha))
727 return;
728
729 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
730
731 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
732
733 ha->flags.stop_rcv = 0;
734 if (qla_init_hw_if(ha) == 0) {
735 ifp = ha->ifp;
736 ifp->if_drv_flags |= IFF_DRV_RUNNING;
737 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
738 ha->flags.qla_watchdog_pause = 0;
739 }
740
741 return;
742 }
743
744 static void
745 qla_init(void *arg)
746 {
747 qla_host_t *ha;
748
749 ha = (qla_host_t *)arg;
750
751 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
752
753 QLA_LOCK(ha, __func__);
754 qla_init_locked(ha);
755 QLA_UNLOCK(ha, __func__);
756
757 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
758 }
759
760 static u_int
761 qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
762 {
763 uint8_t *mta = arg;
764
765 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
766 return (0);
767 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
768
769 return (1);
770 }
771
772 static void
773 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
774 {
775 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
776 struct ifnet *ifp = ha->ifp;
777 int mcnt;
778
779 mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
780 qla_hw_set_multi(ha, mta, mcnt, add_multi);
781
782 return;
783 }
784
785 static int
786 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
787 {
788 int ret = 0;
789 struct ifreq *ifr = (struct ifreq *)data;
790 #ifdef INET
791 struct ifaddr *ifa = (struct ifaddr *)data;
792 #endif
793 qla_host_t *ha;
794
795 ha = (qla_host_t *)ifp->if_softc;
796
797 switch (cmd) {
798 case SIOCSIFADDR:
799 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
800 __func__, cmd));
801
802 #ifdef INET
803 if (ifa->ifa_addr->sa_family == AF_INET) {
804 ifp->if_flags |= IFF_UP;
805 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
806 QLA_LOCK(ha, __func__);
807 qla_init_locked(ha);
808 QLA_UNLOCK(ha, __func__);
809 }
810 QL_DPRINT4((ha->pci_dev,
811 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
812 __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
813
814 arp_ifinit(ifp, ifa);
815 if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
816 qla_config_ipv4_addr(ha,
817 (IA_SIN(ifa)->sin_addr.s_addr));
818 }
819 break;
820 }
821 #endif
822 ether_ioctl(ifp, cmd, data);
823 break;
824
825 case SIOCSIFMTU:
826 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
827 __func__, cmd));
828
829 if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
830 ret = EINVAL;
831 } else {
832 QLA_LOCK(ha, __func__);
833 ifp->if_mtu = ifr->ifr_mtu;
834 ha->max_frame_size =
835 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
836 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
837 ret = qla_set_max_mtu(ha, ha->max_frame_size,
838 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
839 }
840 QLA_UNLOCK(ha, __func__);
841
842 if (ret)
843 ret = EINVAL;
844 }
845
846 break;
847
848 case SIOCSIFFLAGS:
849 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
850 __func__, cmd));
851
852 if (ifp->if_flags & IFF_UP) {
853 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
854 if ((ifp->if_flags ^ ha->if_flags) &
855 IFF_PROMISC) {
856 qla_set_promisc(ha);
857 } else if ((ifp->if_flags ^ ha->if_flags) &
858 IFF_ALLMULTI) {
859 qla_set_allmulti(ha);
860 }
861 } else {
862 QLA_LOCK(ha, __func__);
863 qla_init_locked(ha);
864 ha->max_frame_size = ifp->if_mtu +
865 ETHER_HDR_LEN + ETHER_CRC_LEN;
866 ret = qla_set_max_mtu(ha, ha->max_frame_size,
867 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
868 QLA_UNLOCK(ha, __func__);
869 }
870 } else {
871 QLA_LOCK(ha, __func__);
872 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
873 qla_stop(ha);
874 ha->if_flags = ifp->if_flags;
875 QLA_UNLOCK(ha, __func__);
876 }
877 break;
878
879 case SIOCADDMULTI:
880 QL_DPRINT4((ha->pci_dev,
881 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
882
883 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
884 qla_set_multi(ha, 1);
885 }
886 break;
887
888 case SIOCDELMULTI:
889 QL_DPRINT4((ha->pci_dev,
890 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
891
892 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
893 qla_set_multi(ha, 0);
894 }
895 break;
896
897 case SIOCSIFMEDIA:
898 case SIOCGIFMEDIA:
899 QL_DPRINT4((ha->pci_dev,
900 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
901 __func__, cmd));
902 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
903 break;
904
905 case SIOCSIFCAP:
906 {
907 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
908
909 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
910 __func__, cmd));
911
912 if (mask & IFCAP_HWCSUM)
913 ifp->if_capenable ^= IFCAP_HWCSUM;
914 if (mask & IFCAP_TSO4)
915 ifp->if_capenable ^= IFCAP_TSO4;
916 if (mask & IFCAP_TSO6)
917 ifp->if_capenable ^= IFCAP_TSO6;
918 if (mask & IFCAP_VLAN_HWTAGGING)
919 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
920
921 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
922 qla_init(ha);
923
924 VLAN_CAPABILITIES(ifp);
925 break;
926 }
927
928 default:
929 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
930 __func__, cmd));
931 ret = ether_ioctl(ifp, cmd, data);
932 break;
933 }
934
935 return (ret);
936 }
937
938 static int
939 qla_media_change(struct ifnet *ifp)
940 {
941 qla_host_t *ha;
942 struct ifmedia *ifm;
943 int ret = 0;
944
945 ha = (qla_host_t *)ifp->if_softc;
946
947 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
948
949 ifm = &ha->media;
950
951 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
952 ret = EINVAL;
953
954 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
955
956 return (ret);
957 }
958
959 static void
960 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
961 {
962 qla_host_t *ha;
963
964 ha = (qla_host_t *)ifp->if_softc;
965
966 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
967
968 ifmr->ifm_status = IFM_AVALID;
969 ifmr->ifm_active = IFM_ETHER;
970
971 qla_update_link_state(ha);
972 if (ha->hw.flags.link_up) {
973 ifmr->ifm_status |= IFM_ACTIVE;
974 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
975 }
976
977 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
978 (ha->hw.flags.link_up ? "link_up" : "link_down")));
979
980 return;
981 }
982
983 void
984 qla_start(struct ifnet *ifp)
985 {
986 struct mbuf *m_head;
987 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
988
989 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
990
991 if (!mtx_trylock(&ha->tx_lock)) {
992 QL_DPRINT8((ha->pci_dev,
993 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
994 return;
995 }
996
997 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
998 IFF_DRV_RUNNING) {
999 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1000 QLA_TX_UNLOCK(ha);
1001 return;
1002 }
1003
1004 if (!ha->watchdog_ticks)
1005 qla_update_link_state(ha);
1006
1007 if (!ha->hw.flags.link_up) {
1008 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1009 QLA_TX_UNLOCK(ha);
1010 return;
1011 }
1012
1013 while (ifp->if_snd.ifq_head != NULL) {
1014 IF_DEQUEUE(&ifp->if_snd, m_head);
1015
1016 if (m_head == NULL) {
1017 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1018 __func__));
1019 break;
1020 }
1021
1022 if (qla_send(ha, &m_head)) {
1023 if (m_head == NULL)
1024 break;
1025 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1026 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1027 IF_PREPEND(&ifp->if_snd, m_head);
1028 break;
1029 }
1030 /* Send a copy of the frame to the BPF listener */
1031 ETHER_BPF_MTAP(ifp, m_head);
1032 }
1033 QLA_TX_UNLOCK(ha);
1034 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1035 return;
1036 }
1037
1038 static int
1039 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1040 {
1041 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1042 bus_dmamap_t map;
1043 int nsegs;
1044 int ret = -1;
1045 uint32_t tx_idx;
1046 struct mbuf *m_head = *m_headp;
1047
1048 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1049
1050 if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
1051 ha->err_tx_dmamap_create++;
1052 device_printf(ha->pci_dev,
1053 "%s: bus_dmamap_create failed[%d, %d]\n",
1054 __func__, ret, m_head->m_pkthdr.len);
1055 return (ret);
1056 }
1057
1058 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1059 BUS_DMA_NOWAIT);
1060
1061 if (ret == EFBIG) {
1062 struct mbuf *m;
1063
1064 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1065 m_head->m_pkthdr.len));
1066
1067 m = m_defrag(m_head, M_NOWAIT);
1068 if (m == NULL) {
1069 ha->err_tx_defrag++;
1070 m_freem(m_head);
1071 *m_headp = NULL;
1072 device_printf(ha->pci_dev,
1073 "%s: m_defrag() = NULL [%d]\n",
1074 __func__, ret);
1075 return (ENOBUFS);
1076 }
1077 m_head = m;
1078
1079 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1080 segs, &nsegs, BUS_DMA_NOWAIT))) {
1081 ha->err_tx_dmamap_load++;
1082
1083 device_printf(ha->pci_dev,
1084 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1085 __func__, ret, m_head->m_pkthdr.len);
1086
1087 bus_dmamap_destroy(ha->tx_tag, map);
1088 if (ret != ENOMEM) {
1089 m_freem(m_head);
1090 *m_headp = NULL;
1091 }
1092 return (ret);
1093 }
1094 } else if (ret) {
1095 ha->err_tx_dmamap_load++;
1096
1097 device_printf(ha->pci_dev,
1098 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1099 __func__, ret, m_head->m_pkthdr.len);
1100
1101 bus_dmamap_destroy(ha->tx_tag, map);
1102
1103 if (ret != ENOMEM) {
1104 m_freem(m_head);
1105 *m_headp = NULL;
1106 }
1107 return (ret);
1108 }
1109
1110 QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
1111
1112 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1113
1114 if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
1115 ha->tx_buf[tx_idx].m_head = m_head;
1116 ha->tx_buf[tx_idx].map = map;
1117 } else {
1118 if (ret == EINVAL) {
1119 m_freem(m_head);
1120 *m_headp = NULL;
1121 }
1122 }
1123
1124 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1125 return (ret);
1126 }
1127
1128 static void
1129 qla_stop(qla_host_t *ha)
1130 {
1131 struct ifnet *ifp = ha->ifp;
1132
1133 ha->flags.qla_watchdog_pause = 1;
1134 qla_mdelay(__func__, 100);
1135
1136 ha->flags.stop_rcv = 1;
1137 qla_hw_stop_rcv(ha);
1138
1139 qla_del_hw_if(ha);
1140
1141 qla_free_lro(ha);
1142
1143 qla_free_xmt_bufs(ha);
1144 qla_free_rcv_bufs(ha);
1145
1146 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1147
1148 return;
1149 }
1150
1151 /*
1152 * Buffer Management Functions for Transmit and Receive Rings
1153 */
1154 static int
1155 qla_alloc_xmt_bufs(qla_host_t *ha)
1156 {
1157 if (bus_dma_tag_create(NULL, /* parent */
1158 1, 0, /* alignment, bounds */
1159 BUS_SPACE_MAXADDR, /* lowaddr */
1160 BUS_SPACE_MAXADDR, /* highaddr */
1161 NULL, NULL, /* filter, filterarg */
1162 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1163 QLA_MAX_SEGMENTS, /* nsegments */
1164 PAGE_SIZE, /* maxsegsize */
1165 BUS_DMA_ALLOCNOW, /* flags */
1166 NULL, /* lockfunc */
1167 NULL, /* lockfuncarg */
1168 &ha->tx_tag)) {
1169 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1170 __func__);
1171 return (ENOMEM);
1172 }
1173 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1174
1175 return 0;
1176 }
1177
1178 /*
1179 * Release mbuf after it sent on the wire
1180 */
1181 static void
1182 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1183 {
1184 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1185
1186 if (txb->m_head) {
1187 bus_dmamap_unload(ha->tx_tag, txb->map);
1188 bus_dmamap_destroy(ha->tx_tag, txb->map);
1189
1190 m_freem(txb->m_head);
1191 txb->m_head = NULL;
1192 }
1193
1194 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1195 }
1196
1197 static void
1198 qla_free_xmt_bufs(qla_host_t *ha)
1199 {
1200 int i;
1201
1202 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1203 qla_clear_tx_buf(ha, &ha->tx_buf[i]);
1204
1205 if (ha->tx_tag != NULL) {
1206 bus_dma_tag_destroy(ha->tx_tag);
1207 ha->tx_tag = NULL;
1208 }
1209 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1210
1211 return;
1212 }
1213
1214 static int
1215 qla_alloc_rcv_bufs(qla_host_t *ha)
1216 {
1217 int i, j, ret = 0;
1218 qla_rx_buf_t *rxb;
1219
1220 if (bus_dma_tag_create(NULL, /* parent */
1221 1, 0, /* alignment, bounds */
1222 BUS_SPACE_MAXADDR, /* lowaddr */
1223 BUS_SPACE_MAXADDR, /* highaddr */
1224 NULL, NULL, /* filter, filterarg */
1225 MJUM9BYTES, /* maxsize */
1226 1, /* nsegments */
1227 MJUM9BYTES, /* maxsegsize */
1228 BUS_DMA_ALLOCNOW, /* flags */
1229 NULL, /* lockfunc */
1230 NULL, /* lockfuncarg */
1231 &ha->rx_tag)) {
1232 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1233 __func__);
1234
1235 return (ENOMEM);
1236 }
1237
1238 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1239 bzero((void *)ha->rx_jbuf,
1240 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1241
1242 for (i = 0; i < MAX_SDS_RINGS; i++) {
1243 ha->hw.sds[i].sdsr_next = 0;
1244 ha->hw.sds[i].rxb_free = NULL;
1245 ha->hw.sds[i].rx_free = 0;
1246 ha->hw.sds[i].rxjb_free = NULL;
1247 ha->hw.sds[i].rxj_free = 0;
1248 }
1249
1250 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1251 rxb = &ha->rx_buf[i];
1252
1253 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1254
1255 if (ret) {
1256 device_printf(ha->pci_dev,
1257 "%s: dmamap[%d] failed\n", __func__, i);
1258
1259 for (j = 0; j < i; j++) {
1260 bus_dmamap_destroy(ha->rx_tag,
1261 ha->rx_buf[j].map);
1262 }
1263 goto qla_alloc_rcv_bufs_failed;
1264 }
1265 }
1266
1267 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
1268
1269 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1270 rxb = &ha->rx_buf[i];
1271 rxb->handle = i;
1272 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
1273 /*
1274 * set the physical address in the corresponding
1275 * descriptor entry in the receive ring/queue for the
1276 * hba
1277 */
1278 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
1279 rxb->handle, rxb->paddr,
1280 (rxb->m_head)->m_pkthdr.len);
1281 } else {
1282 device_printf(ha->pci_dev,
1283 "%s: qla_get_mbuf [standard(%d)] failed\n",
1284 __func__, i);
1285 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1286 goto qla_alloc_rcv_bufs_failed;
1287 }
1288 }
1289
1290 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1291 rxb = &ha->rx_jbuf[i];
1292
1293 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1294
1295 if (ret) {
1296 device_printf(ha->pci_dev,
1297 "%s: dmamap[%d] failed\n", __func__, i);
1298
1299 for (j = 0; j < i; j++) {
1300 bus_dmamap_destroy(ha->rx_tag,
1301 ha->rx_jbuf[j].map);
1302 }
1303 goto qla_alloc_rcv_bufs_failed;
1304 }
1305 }
1306
1307 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
1308
1309 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1310 rxb = &ha->rx_jbuf[i];
1311 rxb->handle = i;
1312 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
1313 /*
1314 * set the physical address in the corresponding
1315 * descriptor entry in the receive ring/queue for the
1316 * hba
1317 */
1318 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
1319 rxb->handle, rxb->paddr,
1320 (rxb->m_head)->m_pkthdr.len);
1321 } else {
1322 device_printf(ha->pci_dev,
1323 "%s: qla_get_mbuf [jumbo(%d)] failed\n",
1324 __func__, i);
1325 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1326 goto qla_alloc_rcv_bufs_failed;
1327 }
1328 }
1329
1330 return (0);
1331
1332 qla_alloc_rcv_bufs_failed:
1333 qla_free_rcv_bufs(ha);
1334 return (ret);
1335 }
1336
1337 static void
1338 qla_free_rcv_bufs(qla_host_t *ha)
1339 {
1340 int i;
1341 qla_rx_buf_t *rxb;
1342
1343 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1344 rxb = &ha->rx_buf[i];
1345 if (rxb->m_head != NULL) {
1346 bus_dmamap_unload(ha->rx_tag, rxb->map);
1347 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1348 m_freem(rxb->m_head);
1349 rxb->m_head = NULL;
1350 }
1351 }
1352
1353 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1354 rxb = &ha->rx_jbuf[i];
1355 if (rxb->m_head != NULL) {
1356 bus_dmamap_unload(ha->rx_tag, rxb->map);
1357 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1358 m_freem(rxb->m_head);
1359 rxb->m_head = NULL;
1360 }
1361 }
1362
1363 if (ha->rx_tag != NULL) {
1364 bus_dma_tag_destroy(ha->rx_tag);
1365 ha->rx_tag = NULL;
1366 }
1367
1368 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1369 bzero((void *)ha->rx_jbuf,
1370 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1371
1372 for (i = 0; i < MAX_SDS_RINGS; i++) {
1373 ha->hw.sds[i].sdsr_next = 0;
1374 ha->hw.sds[i].rxb_free = NULL;
1375 ha->hw.sds[i].rx_free = 0;
1376 ha->hw.sds[i].rxjb_free = NULL;
1377 ha->hw.sds[i].rxj_free = 0;
1378 }
1379
1380 return;
1381 }
1382
1383 int
1384 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
1385 uint32_t jumbo)
1386 {
1387 struct mbuf *mp = nmp;
1388 int ret = 0;
1389 uint32_t offset;
1390
1391 QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
1392
1393 if (mp == NULL) {
1394 if (!jumbo) {
1395 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1396
1397 if (mp == NULL) {
1398 ha->err_m_getcl++;
1399 ret = ENOBUFS;
1400 device_printf(ha->pci_dev,
1401 "%s: m_getcl failed\n", __func__);
1402 goto exit_qla_get_mbuf;
1403 }
1404 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1405 } else {
1406 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1407 MJUM9BYTES);
1408 if (mp == NULL) {
1409 ha->err_m_getjcl++;
1410 ret = ENOBUFS;
1411 device_printf(ha->pci_dev,
1412 "%s: m_getjcl failed\n", __func__);
1413 goto exit_qla_get_mbuf;
1414 }
1415 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1416 }
1417 } else {
1418 if (!jumbo)
1419 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1420 else
1421 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1422
1423 mp->m_data = mp->m_ext.ext_buf;
1424 mp->m_next = NULL;
1425 }
1426
1427 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1428 if (offset) {
1429 offset = 8 - offset;
1430 m_adj(mp, offset);
1431 }
1432
1433 /*
1434 * Using memory from the mbuf cluster pool, invoke the bus_dma
1435 * machinery to arrange the memory mapping.
1436 */
1437 ret = bus_dmamap_load(ha->rx_tag, rxb->map,
1438 mtod(mp, void *), mp->m_len,
1439 qla_dmamap_callback, &rxb->paddr,
1440 BUS_DMA_NOWAIT);
1441 if (ret || !rxb->paddr) {
1442 m_free(mp);
1443 rxb->m_head = NULL;
1444 device_printf(ha->pci_dev,
1445 "%s: bus_dmamap_load failed\n", __func__);
1446 ret = -1;
1447 goto exit_qla_get_mbuf;
1448 }
1449 rxb->m_head = mp;
1450 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1451
1452 exit_qla_get_mbuf:
1453 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1454 return (ret);
1455 }
1456
1457 static void
1458 qla_tx_done(void *context, int pending)
1459 {
1460 qla_host_t *ha = context;
1461
1462 qla_hw_tx_done(ha);
1463 qla_start(ha->ifp);
1464 }
Cache object: 29b66dd74a019d97360ef338f40a0ece
|