FreeBSD/Linux Kernel Cross Reference
sys/dev/qlxgb/qla_hw.c
1 /*
2 * Copyright (c) 2010-2011 Qlogic Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File: qla_hw.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/9.0/sys/dev/qlxgb/qla_hw.c 227064 2011-11-03 21:20:22Z bz $");
36
37 #include "qla_os.h"
38 #include "qla_reg.h"
39 #include "qla_hw.h"
40 #include "qla_def.h"
41 #include "qla_inline.h"
42 #include "qla_ver.h"
43 #include "qla_glbl.h"
44 #include "qla_dbg.h"
45
46 static uint32_t sysctl_num_rds_rings = 2;
47 static uint32_t sysctl_num_sds_rings = 4;
48
49 /*
50 * Static Functions
51 */
52
53 static void qla_init_cntxt_regions(qla_host_t *ha);
54 static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp);
55 static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size);
56 static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
57 uint16_t cntxt_id, uint32_t add_multi);
58 static void qla_del_rcv_cntxt(qla_host_t *ha);
59 static int qla_init_rcv_cntxt(qla_host_t *ha);
60 static void qla_del_xmt_cntxt(qla_host_t *ha);
61 static int qla_init_xmt_cntxt(qla_host_t *ha);
62 static int qla_get_max_rds(qla_host_t *ha);
63 static int qla_get_max_sds(qla_host_t *ha);
64 static int qla_get_max_rules(qla_host_t *ha);
65 static int qla_get_max_rcv_cntxts(qla_host_t *ha);
66 static int qla_get_max_tx_cntxts(qla_host_t *ha);
67 static int qla_get_max_mtu(qla_host_t *ha);
68 static int qla_get_max_lro(qla_host_t *ha);
69 static int qla_get_flow_control(qla_host_t *ha);
70 static void qla_hw_tx_done_locked(qla_host_t *ha);
71
72 int
73 qla_get_msix_count(qla_host_t *ha)
74 {
75 return (sysctl_num_sds_rings);
76 }
77
78 /*
79 * Name: qla_hw_add_sysctls
80 * Function: Add P3Plus specific sysctls
81 */
82 void
83 qla_hw_add_sysctls(qla_host_t *ha)
84 {
85 device_t dev;
86
87 dev = ha->pci_dev;
88
89 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
90 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
91 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings,
92 sysctl_num_rds_rings, "Number of Rcv Descriptor Rings");
93
94 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
95 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
96 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings,
97 sysctl_num_sds_rings, "Number of Status Descriptor Rings");
98 }
99
100 /*
101 * Name: qla_free_dma
102 * Function: Frees the DMA'able memory allocated in qla_alloc_dma()
103 */
104 void
105 qla_free_dma(qla_host_t *ha)
106 {
107 uint32_t i;
108
109 if (ha->hw.dma_buf.flags.context) {
110 qla_free_dmabuf(ha, &ha->hw.dma_buf.context);
111 ha->hw.dma_buf.flags.context = 0;
112 }
113
114 if (ha->hw.dma_buf.flags.sds_ring) {
115 for (i = 0; i < ha->hw.num_sds_rings; i++)
116 qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
117 ha->hw.dma_buf.flags.sds_ring = 0;
118 }
119
120 if (ha->hw.dma_buf.flags.rds_ring) {
121 for (i = 0; i < ha->hw.num_rds_rings; i++)
122 qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
123 ha->hw.dma_buf.flags.rds_ring = 0;
124 }
125
126 if (ha->hw.dma_buf.flags.tx_ring) {
127 qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
128 ha->hw.dma_buf.flags.tx_ring = 0;
129 }
130 }
131
132 /*
133 * Name: qla_alloc_dma
134 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
135 */
136 int
137 qla_alloc_dma(qla_host_t *ha)
138 {
139 device_t dev;
140 uint32_t i, j, size;
141
142 dev = ha->pci_dev;
143
144 QL_DPRINT2((dev, "%s: enter\n", __func__));
145
146 ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings;
147 ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings;
148
149 /*
150 * Allocate Transmit Ring
151 */
152
153 ha->hw.dma_buf.tx_ring.alignment = 8;
154 ha->hw.dma_buf.tx_ring.size =
155 (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS;
156
157 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
158 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
159 goto qla_alloc_dma_exit;
160 }
161 ha->hw.dma_buf.flags.tx_ring = 1;
162
163 QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n",
164 __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
165 ha->hw.dma_buf.tx_ring.dma_b));
166 /*
167 * Allocate Receive Descriptor Rings
168 */
169
170 for (i = 0; i < ha->hw.num_rds_rings; i++) {
171 ha->hw.dma_buf.rds_ring[i].alignment = 8;
172
173 if (i == RDS_RING_INDEX_NORMAL) {
174 ha->hw.dma_buf.rds_ring[i].size =
175 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
176 } else if (i == RDS_RING_INDEX_JUMBO) {
177 ha->hw.dma_buf.rds_ring[i].size =
178 (sizeof(q80_recv_desc_t)) *
179 NUM_RX_JUMBO_DESCRIPTORS;
180 } else
181 break;
182
183 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) {
184 QL_DPRINT4((dev, "%s: rds ring alloc failed\n",
185 __func__));
186
187 for (j = 0; j < i; j++)
188 qla_free_dmabuf(ha,
189 &ha->hw.dma_buf.rds_ring[j]);
190
191 goto qla_alloc_dma_exit;
192 }
193 QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n",
194 __func__, i,
195 (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr),
196 ha->hw.dma_buf.rds_ring[i].dma_b));
197 }
198 ha->hw.dma_buf.flags.rds_ring = 1;
199
200 /*
201 * Allocate Status Descriptor Rings
202 */
203
204 for (i = 0; i < ha->hw.num_sds_rings; i++) {
205 ha->hw.dma_buf.sds_ring[i].alignment = 8;
206 ha->hw.dma_buf.sds_ring[i].size =
207 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
208
209 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) {
210 device_printf(dev, "%s: sds ring alloc failed\n",
211 __func__);
212
213 for (j = 0; j < i; j++)
214 qla_free_dmabuf(ha,
215 &ha->hw.dma_buf.sds_ring[j]);
216
217 goto qla_alloc_dma_exit;
218 }
219 QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n",
220 __func__, i,
221 (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr),
222 ha->hw.dma_buf.sds_ring[i].dma_b));
223 }
224 ha->hw.dma_buf.flags.sds_ring = 1;
225
226 /*
227 * Allocate Context Area
228 */
229 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
230
231 size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
232
233 size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
234
235 size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
236
237 size += sizeof (uint32_t); /* for tx consumer index */
238
239 size = QL_ALIGN(size, PAGE_SIZE);
240
241 ha->hw.dma_buf.context.alignment = 8;
242 ha->hw.dma_buf.context.size = size;
243
244 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) {
245 device_printf(dev, "%s: context alloc failed\n", __func__);
246 goto qla_alloc_dma_exit;
247 }
248 ha->hw.dma_buf.flags.context = 1;
249 QL_DPRINT2((dev, "%s: context phys %p virt %p\n",
250 __func__, (void *)(ha->hw.dma_buf.context.dma_addr),
251 ha->hw.dma_buf.context.dma_b));
252
253 qla_init_cntxt_regions(ha);
254
255 return 0;
256
257 qla_alloc_dma_exit:
258 qla_free_dma(ha);
259 return -1;
260 }
261
262 /*
263 * Name: qla_init_cntxt_regions
264 * Function: Initializes Tx/Rx Contexts.
265 */
266 static void
267 qla_init_cntxt_regions(qla_host_t *ha)
268 {
269 qla_hw_t *hw;
270 q80_tx_cntxt_req_t *tx_cntxt_req;
271 q80_rcv_cntxt_req_t *rx_cntxt_req;
272 bus_addr_t phys_addr;
273 uint32_t i;
274 device_t dev;
275 uint32_t size;
276
277 dev = ha->pci_dev;
278
279 hw = &ha->hw;
280
281 hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
282
283 for (i = 0; i < ha->hw.num_sds_rings; i++)
284 hw->sds[i].sds_ring_base =
285 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
286
287
288 phys_addr = hw->dma_buf.context.dma_addr;
289
290 memset((void *)hw->dma_buf.context.dma_b, 0,
291 ha->hw.dma_buf.context.size);
292
293 hw->tx_cntxt_req =
294 (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b;
295 hw->tx_cntxt_req_paddr = phys_addr;
296
297 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
298
299 hw->tx_cntxt_rsp =
300 (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size);
301 hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size;
302
303 size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
304
305 hw->rx_cntxt_req =
306 (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size);
307 hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size;
308
309 size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
310
311 hw->rx_cntxt_rsp =
312 (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size);
313 hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size;
314
315 size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
316
317 hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size);
318 hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size;
319
320 /*
321 * Initialize the Transmit Context Request so that we don't need to
322 * do it everytime we need to create a context
323 */
324 tx_cntxt_req = hw->tx_cntxt_req;
325
326 tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr);
327
328 tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr);
329
330 tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW |
331 CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO));
332
333 tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
334
335 tx_cntxt_req->phys_addr =
336 qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);
337
338 tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS);
339
340 /*
341 * Initialize the Receive Context Request
342 */
343
344 rx_cntxt_req = hw->rx_cntxt_req;
345
346 rx_cntxt_req->rx_req.rsp_dma_addr =
347 qla_host_to_le64(hw->rx_cntxt_rsp_paddr);
348
349 rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW |
350 CNTXT_CAP0_LEGACY_MN |
351 CNTXT_CAP0_JUMBO |
352 CNTXT_CAP0_LRO|
353 CNTXT_CAP0_HW_LRO);
354
355 rx_cntxt_req->rx_req.intr_mode =
356 qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
357
358 rx_cntxt_req->rx_req.rds_intr_mode =
359 qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE);
360
361 rx_cntxt_req->rx_req.rds_ring_offset = 0;
362 rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32(
363 (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t)));
364 rx_cntxt_req->rx_req.num_rds_rings =
365 qla_host_to_le16(hw->num_rds_rings);
366 rx_cntxt_req->rx_req.num_sds_rings =
367 qla_host_to_le16(hw->num_sds_rings);
368
369 for (i = 0; i < hw->num_rds_rings; i++) {
370 rx_cntxt_req->rds_req[i].phys_addr =
371 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
372
373 if (i == RDS_RING_INDEX_NORMAL) {
374 rx_cntxt_req->rds_req[i].buf_size =
375 qla_host_to_le64(MCLBYTES);
376 rx_cntxt_req->rds_req[i].size =
377 qla_host_to_le32(NUM_RX_DESCRIPTORS);
378 } else {
379 rx_cntxt_req->rds_req[i].buf_size =
380 qla_host_to_le64(MJUM9BYTES);
381 rx_cntxt_req->rds_req[i].size =
382 qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS);
383 }
384 }
385
386 for (i = 0; i < hw->num_sds_rings; i++) {
387 rx_cntxt_req->sds_req[i].phys_addr =
388 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
389 rx_cntxt_req->sds_req[i].size =
390 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
391 rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i);
392 }
393
394 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n",
395 __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr));
396 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n",
397 __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr));
398 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n",
399 __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr));
400 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n",
401 __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr));
402 QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n",
403 __func__, hw->tx_cons, (void *)hw->tx_cons_paddr));
404 }
405
406 /*
407 * Name: qla_issue_cmd
408 * Function: Issues commands on the CDRP interface and returns responses.
409 */
410 static int
411 qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp)
412 {
413 int ret = 0;
414 uint32_t signature;
415 uint32_t count = 400; /* 4 seconds or 400 10ms intervals */
416 uint32_t data;
417 device_t dev;
418
419 dev = ha->pci_dev;
420
421 signature = 0xcafe0000 | 0x0100 | ha->pci_func;
422
423 ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func);
424
425 if (ret) {
426 device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__);
427 return (ret);
428 }
429
430 WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature);
431
432 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1));
433 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2));
434 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3));
435
436 WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd);
437
438 while (count) {
439 qla_mdelay(__func__, 10);
440
441 data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
442
443 if ((!(data & 0x80000000)))
444 break;
445 count--;
446 }
447 if ((!count) || (data != 1))
448 ret = -1;
449
450 cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
451 cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1);
452 cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2);
453 cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3);
454
455 qla_sem_unlock(ha, Q8_SEM5_UNLOCK);
456
457 if (ret) {
458 device_printf(dev, "%s: "
459 "cmd[0x%08x] = 0x%08x\n"
460 "\tsig[0x%08x] = 0x%08x\n"
461 "\targ1[0x%08x] = 0x%08x\n"
462 "\targ2[0x%08x] = 0x%08x\n"
463 "\targ3[0x%08x] = 0x%08x\n",
464 __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd,
465 Q8_NX_CDRP_SIGNATURE, signature,
466 Q8_NX_CDRP_ARG1, cdrp->cmd_arg1,
467 Q8_NX_CDRP_ARG2, cdrp->cmd_arg2,
468 Q8_NX_CDRP_ARG3, cdrp->cmd_arg3);
469
470 device_printf(dev, "%s: exit (ret = 0x%x)\n"
471 "\t\t rsp = 0x%08x\n"
472 "\t\t arg1 = 0x%08x\n"
473 "\t\t arg2 = 0x%08x\n"
474 "\t\t arg3 = 0x%08x\n",
475 __func__, ret, cdrp->rsp,
476 cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3);
477 }
478
479 return (ret);
480 }
481
482 #define QLA_TX_MIN_FREE 2
483
484 /*
485 * Name: qla_fw_cmd
486 * Function: Issues firmware control commands on the Tx Ring.
487 */
488 static int
489 qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size)
490 {
491 device_t dev;
492 q80_tx_cmd_t *tx_cmd;
493 qla_hw_t *hw = &ha->hw;
494 int count = 100;
495
496 dev = ha->pci_dev;
497
498 QLA_TX_LOCK(ha);
499
500 if (hw->txr_free <= QLA_TX_MIN_FREE) {
501 while (count--) {
502 qla_hw_tx_done_locked(ha);
503 if (hw->txr_free > QLA_TX_MIN_FREE)
504 break;
505
506 QLA_TX_UNLOCK(ha);
507 qla_mdelay(__func__, 10);
508 QLA_TX_LOCK(ha);
509 }
510 if (hw->txr_free <= QLA_TX_MIN_FREE) {
511 QLA_TX_UNLOCK(ha);
512 device_printf(dev, "%s: xmit queue full\n", __func__);
513 return (-1);
514 }
515 }
516 tx_cmd = &hw->tx_ring_base[hw->txr_next];
517
518 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
519
520 bcopy(fw_cmd, tx_cmd, size);
521
522 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
523 hw->txr_free--;
524
525 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
526
527 QLA_TX_UNLOCK(ha);
528
529 return (0);
530 }
531
532 /*
533 * Name: qla_config_rss
534 * Function: Configure RSS for the context/interface.
535 */
536 const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
537 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
538 0x255b0ec26d5a56daULL };
539
540 static int
541 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
542 {
543 qla_fw_cds_config_rss_t rss_config;
544 int ret, i;
545
546 bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t));
547
548 rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
549 rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS;
550 rss_config.hdr.cntxt_id = cntxt_id;
551
552 rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP |
553 Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP);
554 rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS;
555
556 rss_config.ind_tbl_mask = 0x7;
557
558 for (i = 0; i < 5; i++)
559 rss_config.rss_key[i] = rss_key[i];
560
561 ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t));
562
563 return ret;
564 }
565
566 /*
567 * Name: qla_config_intr_coalesce
568 * Function: Configure Interrupt Coalescing.
569 */
570 static int
571 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
572 {
573 qla_fw_cds_config_intr_coalesc_t intr_coalesce;
574 int ret;
575
576 bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t));
577
578 intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ;
579 intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING;
580 intr_coalesce.hdr.cntxt_id = cntxt_id;
581
582 intr_coalesce.flags = 0x04;
583 intr_coalesce.max_rcv_pkts = 256;
584 intr_coalesce.max_rcv_usecs = 3;
585 intr_coalesce.max_snd_pkts = 64;
586 intr_coalesce.max_snd_usecs = 4;
587
588 if (tenable) {
589 intr_coalesce.usecs_to = 1000; /* 1 millisecond */
590 intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC;
591 intr_coalesce.sds_ring_bitmask =
592 Q8_FWCMD_INTR_COALESC_SDS_RING_0;
593 }
594
595 ret = qla_fw_cmd(ha, &intr_coalesce,
596 sizeof(qla_fw_cds_config_intr_coalesc_t));
597
598 return ret;
599 }
600
601
602 /*
603 * Name: qla_config_mac_addr
604 * Function: binds a MAC address to the context/interface.
605 * Can be unicast, multicast or broadcast.
606 */
607 static int
608 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id,
609 uint32_t add_multi)
610 {
611 qla_fw_cds_config_mac_addr_t mac_config;
612 int ret;
613
614 // device_printf(ha->pci_dev,
615 // "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
616 // mac_addr[0], mac_addr[1], mac_addr[2],
617 // mac_addr[3], mac_addr[4], mac_addr[5]);
618
619 bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
620
621 mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
622 mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR;
623 mac_config.hdr.cntxt_id = cntxt_id;
624
625 if (add_multi)
626 mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR;
627 else
628 mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR;
629 bcopy(mac_addr, mac_config.mac_addr,6);
630
631 ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
632
633 return ret;
634 }
635
636
637 /*
638 * Name: qla_set_mac_rcv_mode
639 * Function: Enable/Disable AllMulticast and Promiscous Modes.
640 */
641 static int
642 qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode)
643 {
644 qla_set_mac_rcv_mode_t rcv_mode;
645 int ret;
646
647 bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
648
649 rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ;
650 rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE;
651 rcv_mode.hdr.cntxt_id = cntxt_id;
652
653 rcv_mode.mode = mode;
654
655 ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
656
657 return ret;
658 }
659
660 void
661 qla_set_promisc(qla_host_t *ha)
662 {
663 (void)qla_set_mac_rcv_mode(ha,
664 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
665 Q8_MAC_RCV_ENABLE_PROMISCUOUS);
666 }
667
668 void
669 qla_set_allmulti(qla_host_t *ha)
670 {
671 (void)qla_set_mac_rcv_mode(ha,
672 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
673 Q8_MAC_RCV_ENABLE_ALLMULTI);
674 }
675
676 void
677 qla_reset_promisc_allmulti(qla_host_t *ha)
678 {
679 (void)qla_set_mac_rcv_mode(ha,
680 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
681 Q8_MAC_RCV_RESET_PROMISC_ALLMULTI);
682 }
683
684 /*
685 * Name: qla_config_ipv4_addr
686 * Function: Configures the Destination IP Addr for LRO.
687 */
688 void
689 qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr)
690 {
691 qla_config_ipv4_t ip_conf;
692
693 bzero(&ip_conf, sizeof(qla_config_ipv4_t));
694
695 ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ;
696 ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR;
697 ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
698
699 ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE;
700 ip_conf.ipv4_addr = (uint64_t)ipv4_addr;
701
702 (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t));
703
704 return;
705 }
706
707 /*
708 * Name: qla_tx_tso
709 * Function: Checks if the packet to be transmitted is a candidate for
710 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
711 * Ring Structure are plugged in.
712 */
713 static int
714 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
715 {
716 struct ether_vlan_header *eh;
717 struct ip *ip = NULL;
718 struct tcphdr *th = NULL;
719 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen;
720 uint16_t etype, opcode, offload = 1;
721 device_t dev;
722
723 dev = ha->pci_dev;
724
725 if (mp->m_pkthdr.len <= ha->max_frame_size)
726 return (-1);
727
728 eh = mtod(mp, struct ether_vlan_header *);
729
730 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
731 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
732 etype = ntohs(eh->evl_proto);
733 } else {
734 ehdrlen = ETHER_HDR_LEN;
735 etype = ntohs(eh->evl_encap_proto);
736 }
737
738 switch (etype) {
739 case ETHERTYPE_IP:
740 ip = (struct ip *)(mp->m_data + ehdrlen);
741 ip_hlen = ip->ip_hl << 2;
742 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
743
744 if (ip->ip_p != IPPROTO_TCP) {
745 offload = 0;
746 } else
747 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
748 break;
749
750 default:
751 QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
752 offload = 0;
753 break;
754 }
755
756 if (!offload)
757 return (-1);
758
759 tcp_hlen = th->th_off << 2;
760
761 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
762
763 if (mp->m_len < hdrlen) {
764 device_printf(dev, "%s: (mp->m_len < hdrlen)\n", __func__);
765 return (-1);
766 }
767
768 tx_cmd->flags_opcode = opcode ;
769 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
770 tx_cmd->ip_hdr_off = ehdrlen;
771 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
772 tx_cmd->total_hdr_len = hdrlen;
773
774 /* Check for Multicast least significant bit of MSB == 1 */
775 if (eh->evl_dhost[0] & 0x01) {
776 tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
777 }
778
779 return (0);
780 }
781
782 /*
783 * Name: qla_tx_chksum
784 * Function: Checks if the packet to be transmitted is a candidate for
785 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
786 * Ring Structure are plugged in.
787 */
788 static int
789 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
790 {
791 struct ether_vlan_header *eh;
792 struct ip *ip;
793 struct ip6_hdr *ip6;
794 uint32_t ehdrlen, ip_hlen;
795 uint16_t etype, opcode, offload = 1;
796 device_t dev;
797
798 dev = ha->pci_dev;
799
800 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
801 return (-1);
802
803 eh = mtod(mp, struct ether_vlan_header *);
804
805 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
806 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
807 etype = ntohs(eh->evl_proto);
808 } else {
809 ehdrlen = ETHER_HDR_LEN;
810 etype = ntohs(eh->evl_encap_proto);
811 }
812
813
814 switch (etype) {
815 case ETHERTYPE_IP:
816 ip = (struct ip *)(mp->m_data + ehdrlen);
817
818 ip_hlen = ip->ip_hl << 2;
819
820 if (mp->m_len < (ehdrlen + ip_hlen)) {
821 device_printf(dev, "%s: ipv4 mlen\n", __func__);
822 offload = 0;
823 break;
824 }
825
826 if (ip->ip_p == IPPROTO_TCP)
827 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
828 else if (ip->ip_p == IPPROTO_UDP)
829 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
830 else {
831 device_printf(dev, "%s: ipv4\n", __func__);
832 offload = 0;
833 }
834 break;
835
836 case ETHERTYPE_IPV6:
837 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
838
839 ip_hlen = sizeof(struct ip6_hdr);
840
841 if (mp->m_len < (ehdrlen + ip_hlen)) {
842 device_printf(dev, "%s: ipv6 mlen\n", __func__);
843 offload = 0;
844 break;
845 }
846
847 if (ip6->ip6_nxt == IPPROTO_TCP)
848 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
849 else if (ip6->ip6_nxt == IPPROTO_UDP)
850 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
851 else {
852 device_printf(dev, "%s: ipv6\n", __func__);
853 offload = 0;
854 }
855 break;
856
857 default:
858 offload = 0;
859 break;
860 }
861 if (!offload)
862 return (-1);
863
864 tx_cmd->flags_opcode = opcode;
865
866 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
867
868 return (0);
869 }
870
871 /*
872 * Name: qla_hw_send
873 * Function: Transmits a packet. It first checks if the packet is a
874 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
875 * offload. If either of these creteria are not met, it is transmitted
876 * as a regular ethernet frame.
877 */
878 int
879 qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
880 uint32_t *tx_idx, struct mbuf *mp)
881 {
882 struct ether_vlan_header *eh;
883 qla_hw_t *hw = &ha->hw;
884 q80_tx_cmd_t *tx_cmd, tso_cmd;
885 bus_dma_segment_t *c_seg;
886 uint32_t num_tx_cmds, hdr_len = 0;
887 uint32_t total_length = 0, bytes, tx_cmd_count = 0;
888 device_t dev;
889 int i;
890
891 dev = ha->pci_dev;
892
893 /*
894 * Always make sure there is atleast one empty slot in the tx_ring
895 * tx_ring is considered full when there only one entry available
896 */
897 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
898
899 total_length = mp->m_pkthdr.len;
900 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
901 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
902 __func__, total_length);
903 return (-1);
904 }
905
906 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
907
908 if (qla_tx_tso(ha, mp, &tso_cmd) == 0) {
909 /* find the additional tx_cmd descriptors required */
910
911 hdr_len = tso_cmd.total_hdr_len;
912
913 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
914 bytes = QL_MIN(bytes, hdr_len);
915
916 num_tx_cmds++;
917 hdr_len -= bytes;
918
919 while (hdr_len) {
920 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
921 hdr_len -= bytes;
922 num_tx_cmds++;
923 }
924 hdr_len = tso_cmd.total_hdr_len;
925 }
926
927 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
928 qla_hw_tx_done_locked(ha);
929 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
930 QL_DPRINT8((dev, "%s: (hw->txr_free <= "
931 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
932 __func__));
933 return (-1);
934 }
935 }
936
937 *tx_idx = hw->txr_next;
938
939 tx_cmd = &hw->tx_ring_base[hw->txr_next];
940
941 if (hdr_len == 0) {
942 if ((nsegs > Q8_TX_MAX_SEGMENTS) ||
943 (mp->m_pkthdr.len > ha->max_frame_size)){
944 /* TBD: copy into private buffer and send it */
945 device_printf(dev,
946 "%s: (nsegs[%d, %d, 0x%x] > Q8_TX_MAX_SEGMENTS)\n",
947 __func__, nsegs, mp->m_pkthdr.len,
948 mp->m_pkthdr.csum_flags);
949 qla_dump_buf8(ha, "qla_hw_send: wrong pkt",
950 mtod(mp, char *), mp->m_len);
951 return (EINVAL);
952 }
953 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
954 if (qla_tx_chksum(ha, mp, tx_cmd) != 0)
955 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
956 } else {
957 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
958 }
959
960 eh = mtod(mp, struct ether_vlan_header *);
961 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
962 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
963 else if (mp->m_flags & M_VLANTAG) {
964 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
965 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
966 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
967 }
968
969
970 tx_cmd->n_bufs = (uint8_t)nsegs;
971 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
972 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
973 tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
974
975 c_seg = segs;
976
977 while (1) {
978 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
979
980 switch (i) {
981 case 0:
982 tx_cmd->buf1_addr = c_seg->ds_addr;
983 tx_cmd->buf1_len = c_seg->ds_len;
984 break;
985
986 case 1:
987 tx_cmd->buf2_addr = c_seg->ds_addr;
988 tx_cmd->buf2_len = c_seg->ds_len;
989 break;
990
991 case 2:
992 tx_cmd->buf3_addr = c_seg->ds_addr;
993 tx_cmd->buf3_len = c_seg->ds_len;
994 break;
995
996 case 3:
997 tx_cmd->buf4_addr = c_seg->ds_addr;
998 tx_cmd->buf4_len = c_seg->ds_len;
999 break;
1000 }
1001
1002 c_seg++;
1003 nsegs--;
1004 }
1005
1006 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1007 tx_cmd_count++;
1008
1009 if (!nsegs)
1010 break;
1011
1012 tx_cmd = &hw->tx_ring_base[hw->txr_next];
1013 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1014 }
1015
1016 if (hdr_len) {
1017 /* TSO : Copy the header in the following tx cmd descriptors */
1018 uint8_t *src, *dst;
1019
1020 src = (uint8_t *)eh;
1021
1022 tx_cmd = &hw->tx_ring_base[hw->txr_next];
1023 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1024
1025 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1026 bytes = QL_MIN(bytes, hdr_len);
1027
1028 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1029
1030 if (mp->m_flags & M_VLANTAG) {
1031 /* first copy the src/dst MAC addresses */
1032 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1033 dst += (ETHER_ADDR_LEN * 2);
1034 src += (ETHER_ADDR_LEN * 2);
1035
1036 hdr_len -= (ETHER_ADDR_LEN * 2);
1037
1038 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1039 dst += 2;
1040 *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag;
1041 dst += 2;
1042
1043 bytes -= ((ETHER_ADDR_LEN * 2) + 4);
1044
1045 bcopy(src, dst, bytes);
1046 src += bytes;
1047 hdr_len -= bytes;
1048 } else {
1049 bcopy(src, dst, bytes);
1050 src += bytes;
1051 hdr_len -= bytes;
1052 }
1053
1054 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1055 tx_cmd_count++;
1056
1057 while (hdr_len) {
1058 tx_cmd = &hw->tx_ring_base[hw->txr_next];
1059 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1060
1061 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1062
1063 bcopy(src, tx_cmd, bytes);
1064 src += bytes;
1065 hdr_len -= bytes;
1066 hw->txr_next =
1067 (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1068 tx_cmd_count++;
1069 }
1070 }
1071
1072 hw->txr_free = hw->txr_free - tx_cmd_count;
1073
1074 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
1075 QL_DPRINT8((dev, "%s: return\n", __func__));
1076 return (0);
1077 }
1078
1079 /*
1080 * Name: qla_del_hw_if
1081 * Function: Destroys the hardware specific entities corresponding to an
1082 * Ethernet Interface
1083 */
1084 void
1085 qla_del_hw_if(qla_host_t *ha)
1086 {
1087 int i;
1088
1089 for (i = 0; i < ha->hw.num_sds_rings; i++)
1090 QL_DISABLE_INTERRUPTS(ha, i);
1091
1092 qla_del_rcv_cntxt(ha);
1093 qla_del_xmt_cntxt(ha);
1094
1095 ha->hw.flags.lro = 0;
1096 }
1097
1098 /*
1099 * Name: qla_init_hw_if
1100 * Function: Creates the hardware specific entities corresponding to an
1101 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1102 * corresponding to the interface. Enables LRO if allowed.
1103 */
1104 int
1105 qla_init_hw_if(qla_host_t *ha)
1106 {
1107 device_t dev;
1108 int i;
1109 uint8_t bcast_mac[6];
1110
1111 qla_get_hw_caps(ha);
1112
1113 dev = ha->pci_dev;
1114
1115 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1116 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1117 ha->hw.dma_buf.sds_ring[i].size);
1118 }
1119 /*
1120 * Create Receive Context
1121 */
1122 if (qla_init_rcv_cntxt(ha)) {
1123 return (-1);
1124 }
1125
1126 ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2;
1127 ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2;
1128 ha->hw.rx_in = ha->hw.rxj_in = 0;
1129
1130 /* Update the RDS Producer Indices */
1131 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
1132 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
1133
1134 /*
1135 * Create Transmit Context
1136 */
1137 if (qla_init_xmt_cntxt(ha)) {
1138 qla_del_rcv_cntxt(ha);
1139 return (-1);
1140 }
1141
1142 qla_config_mac_addr(ha, ha->hw.mac_addr,
1143 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
1144
1145 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1146 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1147 qla_config_mac_addr(ha, bcast_mac,
1148 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
1149
1150 qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
1151
1152 qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0);
1153
1154 for (i = 0; i < ha->hw.num_sds_rings; i++)
1155 QL_ENABLE_INTERRUPTS(ha, i);
1156
1157 return (0);
1158 }
1159
1160 /*
1161 * Name: qla_init_rcv_cntxt
1162 * Function: Creates the Receive Context.
1163 */
1164 static int
1165 qla_init_rcv_cntxt(qla_host_t *ha)
1166 {
1167 device_t dev;
1168 qla_cdrp_t cdrp;
1169 q80_rcv_cntxt_rsp_t *rsp;
1170 q80_stat_desc_t *sdesc;
1171 bus_addr_t phys_addr;
1172 int i, j;
1173 qla_hw_t *hw = &ha->hw;
1174
1175 dev = ha->pci_dev;
1176
1177 /*
1178 * Create Receive Context
1179 */
1180
1181 for (i = 0; i < hw->num_sds_rings; i++) {
1182 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
1183 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
1184 sdesc->data[0] =
1185 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
1186 }
1187 }
1188
1189 phys_addr = ha->hw.rx_cntxt_req_paddr;
1190
1191 bzero(&cdrp, sizeof(qla_cdrp_t));
1192
1193 cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT;
1194 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
1195 cdrp.cmd_arg2 = (uint32_t)(phys_addr);
1196 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t));
1197
1198 if (qla_issue_cmd(ha, &cdrp)) {
1199 device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n",
1200 __func__);
1201 return (-1);
1202 } else {
1203 rsp = ha->hw.rx_cntxt_rsp;
1204
1205 QL_DPRINT2((dev, "%s: rcv cntxt successful"
1206 " rds_ring_offset = 0x%08x"
1207 " sds_ring_offset = 0x%08x"
1208 " cntxt_state = 0x%08x"
1209 " funcs_per_port = 0x%08x"
1210 " num_rds_rings = 0x%04x"
1211 " num_sds_rings = 0x%04x"
1212 " cntxt_id = 0x%04x"
1213 " phys_port = 0x%02x"
1214 " virt_port = 0x%02x\n",
1215 __func__,
1216 rsp->rx_rsp.rds_ring_offset,
1217 rsp->rx_rsp.sds_ring_offset,
1218 rsp->rx_rsp.cntxt_state,
1219 rsp->rx_rsp.funcs_per_port,
1220 rsp->rx_rsp.num_rds_rings,
1221 rsp->rx_rsp.num_sds_rings,
1222 rsp->rx_rsp.cntxt_id,
1223 rsp->rx_rsp.phys_port,
1224 rsp->rx_rsp.virt_port));
1225
1226 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1227 QL_DPRINT2((dev,
1228 "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n",
1229 __func__, i, rsp->rds_rsp[i].producer_reg));
1230 }
1231 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1232 QL_DPRINT2((dev,
1233 "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x"
1234 " sds[%i].intr_mask_reg = 0x%08x\n",
1235 __func__, i, rsp->sds_rsp[i].consumer_reg,
1236 i, rsp->sds_rsp[i].intr_mask_reg));
1237 }
1238 }
1239 ha->hw.flags.init_rx_cnxt = 1;
1240 return (0);
1241 }
1242
1243 /*
1244 * Name: qla_del_rcv_cntxt
1245 * Function: Destroys the Receive Context.
1246 */
1247 void
1248 qla_del_rcv_cntxt(qla_host_t *ha)
1249 {
1250 qla_cdrp_t cdrp;
1251 device_t dev = ha->pci_dev;
1252
1253 if (!ha->hw.flags.init_rx_cnxt)
1254 return;
1255
1256 bzero(&cdrp, sizeof(qla_cdrp_t));
1257
1258 cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT;
1259 cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
1260
1261 if (qla_issue_cmd(ha, &cdrp)) {
1262 device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n",
1263 __func__);
1264 }
1265 ha->hw.flags.init_rx_cnxt = 0;
1266 }
1267
1268 /*
1269 * Name: qla_init_xmt_cntxt
1270 * Function: Creates the Transmit Context.
1271 */
1272 static int
1273 qla_init_xmt_cntxt(qla_host_t *ha)
1274 {
1275 bus_addr_t phys_addr;
1276 device_t dev;
1277 q80_tx_cntxt_rsp_t *tx_rsp;
1278 qla_cdrp_t cdrp;
1279 qla_hw_t *hw = &ha->hw;
1280
1281 dev = ha->pci_dev;
1282
1283 /*
1284 * Create Transmit Context
1285 */
1286 phys_addr = ha->hw.tx_cntxt_req_paddr;
1287 tx_rsp = ha->hw.tx_cntxt_rsp;
1288
1289 hw->txr_comp = hw->txr_next = 0;
1290 *(hw->tx_cons) = 0;
1291
1292 bzero(&cdrp, sizeof(qla_cdrp_t));
1293
1294 cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT;
1295 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
1296 cdrp.cmd_arg2 = (uint32_t)(phys_addr);
1297 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t));
1298
1299 if (qla_issue_cmd(ha, &cdrp)) {
1300 device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n",
1301 __func__);
1302 return (-1);
1303 } else {
1304 ha->hw.tx_prod_reg = tx_rsp->producer_reg;
1305
1306 QL_DPRINT2((dev, "%s: tx cntxt successful"
1307 " cntxt_state = 0x%08x "
1308 " cntxt_id = 0x%04x "
1309 " phys_port_id = 0x%02x "
1310 " virt_port_id = 0x%02x "
1311 " producer_reg = 0x%08x "
1312 " intr_mask_reg = 0x%08x\n",
1313 __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id,
1314 tx_rsp->phys_port_id, tx_rsp->virt_port_id,
1315 tx_rsp->producer_reg, tx_rsp->intr_mask_reg));
1316 }
1317 ha->hw.txr_free = NUM_TX_DESCRIPTORS;
1318
1319 ha->hw.flags.init_tx_cnxt = 1;
1320 return (0);
1321 }
1322
1323 /*
1324 * Name: qla_del_xmt_cntxt
1325 * Function: Destroys the Transmit Context.
1326 */
1327 static void
1328 qla_del_xmt_cntxt(qla_host_t *ha)
1329 {
1330 qla_cdrp_t cdrp;
1331 device_t dev = ha->pci_dev;
1332
1333 if (!ha->hw.flags.init_tx_cnxt)
1334 return;
1335
1336 bzero(&cdrp, sizeof(qla_cdrp_t));
1337
1338 cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT;
1339 cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id;
1340
1341 if (qla_issue_cmd(ha, &cdrp)) {
1342 device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n",
1343 __func__);
1344 }
1345 ha->hw.flags.init_tx_cnxt = 0;
1346 }
1347
1348 /*
1349 * Name: qla_get_max_rds
1350 * Function: Returns the maximum number of Receive Descriptor Rings per context.
1351 */
1352 static int
1353 qla_get_max_rds(qla_host_t *ha)
1354 {
1355 qla_cdrp_t cdrp;
1356 device_t dev;
1357
1358 dev = ha->pci_dev;
1359
1360 bzero(&cdrp, sizeof(qla_cdrp_t));
1361
1362 cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT;
1363
1364 if (qla_issue_cmd(ha, &cdrp)) {
1365 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
1366 __func__);
1367 return (-1);
1368 } else {
1369 ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1;
1370 QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n",
1371 __func__, ha->hw.max_rds_per_cntxt));
1372 }
1373 return 0;
1374 }
1375
1376 /*
1377 * Name: qla_get_max_sds
1378 * Function: Returns the maximum number of Status Descriptor Rings per context.
1379 */
1380 static int
1381 qla_get_max_sds(qla_host_t *ha)
1382 {
1383 qla_cdrp_t cdrp;
1384 device_t dev;
1385
1386 dev = ha->pci_dev;
1387
1388 bzero(&cdrp, sizeof(qla_cdrp_t));
1389
1390 cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT;
1391
1392 if (qla_issue_cmd(ha, &cdrp)) {
1393 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
1394 __func__);
1395 return (-1);
1396 } else {
1397 ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1;
1398 QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n",
1399 __func__, ha->hw.max_sds_per_cntxt));
1400 }
1401 return 0;
1402 }
1403
1404 /*
1405 * Name: qla_get_max_rules
1406 * Function: Returns the maximum number of Rules per context.
1407 */
1408 static int
1409 qla_get_max_rules(qla_host_t *ha)
1410 {
1411 qla_cdrp_t cdrp;
1412 device_t dev;
1413
1414 dev = ha->pci_dev;
1415
1416 bzero(&cdrp, sizeof(qla_cdrp_t));
1417
1418 cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT;
1419
1420 if (qla_issue_cmd(ha, &cdrp)) {
1421 device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n",
1422 __func__);
1423 return (-1);
1424 } else {
1425 ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1;
1426 QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n",
1427 __func__, ha->hw.max_rules_per_cntxt));
1428 }
1429 return 0;
1430 }
1431
1432 /*
1433 * Name: qla_get_max_rcv_cntxts
1434 * Function: Returns the maximum number of Receive Contexts supported.
1435 */
1436 static int
1437 qla_get_max_rcv_cntxts(qla_host_t *ha)
1438 {
1439 qla_cdrp_t cdrp;
1440 device_t dev;
1441
1442 dev = ha->pci_dev;
1443
1444 bzero(&cdrp, sizeof(qla_cdrp_t));
1445
1446 cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT;
1447
1448 if (qla_issue_cmd(ha, &cdrp)) {
1449 device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n",
1450 __func__);
1451 return (-1);
1452 } else {
1453 ha->hw.max_rcv_cntxts = cdrp.rsp_arg1;
1454 QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n",
1455 __func__, ha->hw.max_rcv_cntxts));
1456 }
1457 return 0;
1458 }
1459
1460 /*
1461 * Name: qla_get_max_tx_cntxts
1462 * Function: Returns the maximum number of Transmit Contexts supported.
1463 */
1464 static int
1465 qla_get_max_tx_cntxts(qla_host_t *ha)
1466 {
1467 qla_cdrp_t cdrp;
1468 device_t dev;
1469
1470 dev = ha->pci_dev;
1471
1472 bzero(&cdrp, sizeof(qla_cdrp_t));
1473
1474 cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT;
1475
1476 if (qla_issue_cmd(ha, &cdrp)) {
1477 device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n",
1478 __func__);
1479 return (-1);
1480 } else {
1481 ha->hw.max_xmt_cntxts = cdrp.rsp_arg1;
1482 QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n",
1483 __func__, ha->hw.max_xmt_cntxts));
1484 }
1485 return 0;
1486 }
1487
1488 /*
1489 * Name: qla_get_max_mtu
1490 * Function: Returns the MTU supported for a context.
1491 */
1492 static int
1493 qla_get_max_mtu(qla_host_t *ha)
1494 {
1495 qla_cdrp_t cdrp;
1496 device_t dev;
1497
1498 dev = ha->pci_dev;
1499
1500 bzero(&cdrp, sizeof(qla_cdrp_t));
1501
1502 cdrp.cmd = Q8_CMD_RD_MAX_MTU;
1503
1504 if (qla_issue_cmd(ha, &cdrp)) {
1505 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
1506 return (-1);
1507 } else {
1508 ha->hw.max_mtu = cdrp.rsp_arg1;
1509 QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__,
1510 ha->hw.max_mtu));
1511 }
1512 return 0;
1513 }
1514
1515 /*
1516 * Name: qla_set_max_mtu
1517 * Function:
1518 * Sets the maximum transfer unit size for the specified rcv context.
1519 */
1520 int
1521 qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1522 {
1523 qla_cdrp_t cdrp;
1524 device_t dev;
1525
1526 dev = ha->pci_dev;
1527
1528 bzero(&cdrp, sizeof(qla_cdrp_t));
1529
1530 cdrp.cmd = Q8_CMD_SET_MTU;
1531 cdrp.cmd_arg1 = (uint32_t)cntxt_id;
1532 cdrp.cmd_arg2 = mtu;
1533
1534 if (qla_issue_cmd(ha, &cdrp)) {
1535 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
1536 return (-1);
1537 } else {
1538 ha->hw.max_mtu = cdrp.rsp_arg1;
1539 }
1540 return 0;
1541 }
1542
1543 /*
1544 * Name: qla_get_max_lro
1545 * Function: Returns the maximum number of TCP Connection which can be supported
1546 * with LRO.
1547 */
1548 static int
1549 qla_get_max_lro(qla_host_t *ha)
1550 {
1551 qla_cdrp_t cdrp;
1552 device_t dev;
1553
1554 dev = ha->pci_dev;
1555
1556 bzero(&cdrp, sizeof(qla_cdrp_t));
1557
1558 cdrp.cmd = Q8_CMD_RD_MAX_LRO;
1559
1560 if (qla_issue_cmd(ha, &cdrp)) {
1561 device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__);
1562 return (-1);
1563 } else {
1564 ha->hw.max_lro = cdrp.rsp_arg1;
1565 QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__,
1566 ha->hw.max_lro));
1567 }
1568 return 0;
1569 }
1570
1571 /*
1572 * Name: qla_get_flow_control
1573 * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for
1574 * PCI function.
1575 */
1576 static int
1577 qla_get_flow_control(qla_host_t *ha)
1578 {
1579 qla_cdrp_t cdrp;
1580 device_t dev;
1581
1582 dev = ha->pci_dev;
1583
1584 bzero(&cdrp, sizeof(qla_cdrp_t));
1585
1586 cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL;
1587
1588 if (qla_issue_cmd(ha, &cdrp)) {
1589 device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n",
1590 __func__);
1591 return (-1);
1592 } else {
1593 QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__,
1594 cdrp.rsp_arg1));
1595 }
1596 return 0;
1597 }
1598
1599 /*
1600 * Name: qla_get_flow_control
1601 * Function: Retrieves hardware capabilities
1602 */
1603 void
1604 qla_get_hw_caps(qla_host_t *ha)
1605 {
1606 //qla_read_mac_addr(ha);
1607 qla_get_max_rds(ha);
1608 qla_get_max_sds(ha);
1609 qla_get_max_rules(ha);
1610 qla_get_max_rcv_cntxts(ha);
1611 qla_get_max_tx_cntxts(ha);
1612 qla_get_max_mtu(ha);
1613 qla_get_max_lro(ha);
1614 qla_get_flow_control(ha);
1615 return;
1616 }
1617
1618 /*
1619 * Name: qla_hw_set_multi
1620 * Function: Sets the Multicast Addresses provided the host O.S into the
1621 * hardware (for the given interface)
1622 */
1623 void
1624 qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1625 uint32_t add_multi)
1626 {
1627 q80_rcv_cntxt_rsp_t *rsp;
1628 int i;
1629
1630 rsp = ha->hw.rx_cntxt_rsp;
1631 for (i = 0; i < mcnt; i++) {
1632 qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi);
1633 mta += Q8_MAC_ADDR_LEN;
1634 }
1635 return;
1636 }
1637
1638 /*
1639 * Name: qla_hw_tx_done_locked
1640 * Function: Handle Transmit Completions
1641 */
1642 static void
1643 qla_hw_tx_done_locked(qla_host_t *ha)
1644 {
1645 qla_tx_buf_t *txb;
1646 qla_hw_t *hw = &ha->hw;
1647 uint32_t comp_idx, comp_count = 0;
1648
1649 /* retrieve index of last entry in tx ring completed */
1650 comp_idx = qla_le32_to_host(*(hw->tx_cons));
1651
1652 while (comp_idx != hw->txr_comp) {
1653
1654 txb = &ha->tx_buf[hw->txr_comp];
1655
1656 hw->txr_comp++;
1657 if (hw->txr_comp == NUM_TX_DESCRIPTORS)
1658 hw->txr_comp = 0;
1659
1660 comp_count++;
1661
1662 if (txb->m_head) {
1663 bus_dmamap_sync(ha->tx_tag, txb->map,
1664 BUS_DMASYNC_POSTWRITE);
1665 bus_dmamap_unload(ha->tx_tag, txb->map);
1666 bus_dmamap_destroy(ha->tx_tag, txb->map);
1667 m_freem(txb->m_head);
1668
1669 txb->map = (bus_dmamap_t)0;
1670 txb->m_head = NULL;
1671 }
1672 }
1673
1674 hw->txr_free += comp_count;
1675
1676 QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__,
1677 hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000))));
1678
1679 return;
1680 }
1681
1682 /*
1683 * Name: qla_hw_tx_done
1684 * Function: Handle Transmit Completions
1685 */
1686 void
1687 qla_hw_tx_done(qla_host_t *ha)
1688 {
1689 if (!mtx_trylock(&ha->tx_lock)) {
1690 QL_DPRINT8((ha->pci_dev,
1691 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
1692 return;
1693 }
1694 qla_hw_tx_done_locked(ha);
1695
1696 if (ha->hw.txr_free > free_pkt_thres)
1697 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1698
1699 mtx_unlock(&ha->tx_lock);
1700 return;
1701 }
1702
1703 void
1704 qla_update_link_state(qla_host_t *ha)
1705 {
1706 uint32_t link_state;
1707
1708 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1709 ha->hw.flags.link_up = 0;
1710 return;
1711 }
1712 link_state = READ_REG32(ha, Q8_LINK_STATE);
1713
1714 if (ha->pci_func == 0)
1715 ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0);
1716 else
1717 ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
1718 }
1719
1720 int
1721 qla_config_lro(qla_host_t *ha)
1722 {
1723 int i;
1724 qla_hw_t *hw = &ha->hw;
1725 struct lro_ctrl *lro;
1726
1727 for (i = 0; i < hw->num_sds_rings; i++) {
1728 lro = &hw->sds[i].lro;
1729 if (tcp_lro_init(lro)) {
1730 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1731 __func__);
1732 return (-1);
1733 }
1734 lro->ifp = ha->ifp;
1735 }
1736 ha->flags.lro_init = 1;
1737
1738 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1739 return (0);
1740 }
1741
1742 void
1743 qla_free_lro(qla_host_t *ha)
1744 {
1745 int i;
1746 qla_hw_t *hw = &ha->hw;
1747 struct lro_ctrl *lro;
1748
1749 if (!ha->flags.lro_init)
1750 return;
1751
1752 for (i = 0; i < hw->num_sds_rings; i++) {
1753 lro = &hw->sds[i].lro;
1754 tcp_lro_free(lro);
1755 }
1756 ha->flags.lro_init = 0;
1757 }
1758
1759 void
1760 qla_hw_stop_rcv(qla_host_t *ha)
1761 {
1762 int i, done, count = 100;
1763
1764 while (count--) {
1765 done = 1;
1766 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1767 if (ha->hw.sds[i].rcv_active)
1768 done = 0;
1769 }
1770 if (done)
1771 break;
1772 else
1773 qla_mdelay(__func__, 10);
1774 }
1775 }
1776
Cache object: 8025f07586105ea8e9310e1ec676410c
|