1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 2013-2014 Qlogic Corporation
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * File: qls_isr.c
34 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "qls_os.h"
40 #include "qls_hw.h"
41 #include "qls_def.h"
42 #include "qls_inline.h"
43 #include "qls_ver.h"
44 #include "qls_glbl.h"
45 #include "qls_dbg.h"
46
47 static void
48 qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
49 {
50 qla_tx_buf_t *txb;
51 uint32_t tx_idx = tx_comp->tid_lo;
52
53 if (tx_idx >= NUM_TX_DESCRIPTORS) {
54 ha->qla_initiate_recovery = 1;
55 return;
56 }
57
58 txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
59
60 if (txb->m_head) {
61 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
62 bus_dmamap_sync(ha->tx_tag, txb->map,
63 BUS_DMASYNC_POSTWRITE);
64 bus_dmamap_unload(ha->tx_tag, txb->map);
65 m_freem(txb->m_head);
66
67 txb->m_head = NULL;
68 }
69
70 ha->tx_ring[txr_idx].txr_done++;
71
72 if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
73 ha->tx_ring[txr_idx].txr_done = 0;
74 }
75
76 static void
77 qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
78 {
79 qla_rx_buf_t *rxb;
80 qla_rx_ring_t *rxr;
81 int count;
82 volatile q81_bq_addr_e_t *sbq_e;
83
84 rxr = &ha->rx_ring[r_idx];
85
86 count = rxr->rx_free;
87 sbq_e = rxr->sbq_vaddr;
88
89 while (count--) {
90 rxb = &rxr->rx_buf[rxr->sbq_next];
91
92 if (rxb->m_head == NULL) {
93 if (qls_get_mbuf(ha, rxb, NULL) != 0) {
94 device_printf(ha->pci_dev,
95 "%s: qls_get_mbuf [0,%d,%d] failed\n",
96 __func__, rxr->sbq_next, r_idx);
97 rxb->m_head = NULL;
98 break;
99 }
100 }
101
102 if (rxb->m_head != NULL) {
103 sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
104 sbq_e[rxr->sbq_next].addr_hi =
105 (uint32_t)(rxb->paddr >> 32);
106
107 rxr->sbq_next++;
108 if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
109 rxr->sbq_next = 0;
110
111 rxr->sbq_free++;
112 rxr->rx_free--;
113 }
114
115 if (rxr->sbq_free == 16) {
116 rxr->sbq_in += 16;
117 rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
118 rxr->sbq_free = 0;
119
120 Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
121 }
122 }
123 }
124
125 static int
126 qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
127 {
128 qla_rx_buf_t *rxb;
129 qla_rx_ring_t *rxr;
130 device_t dev = ha->pci_dev;
131 struct mbuf *mp = NULL;
132 struct ifnet *ifp = ha->ifp;
133 #if defined(INET) || defined(INET6)
134 struct lro_ctrl *lro;
135 #endif
136 struct ether_vlan_header *eh;
137
138 rxr = &ha->rx_ring[rxr_idx];
139
140 #if defined(INET) || defined(INET6)
141 lro = &rxr->lro;
142 #endif
143
144 rxb = &rxr->rx_buf[rxr->rx_next];
145
146 if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
147 device_printf(dev, "%s: DS bit not set \n", __func__);
148 return -1;
149 }
150 if (rxb->paddr != cq_e->b_paddr) {
151 device_printf(dev,
152 "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
153 __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
154
155 Q81_SET_CQ_INVALID(cq_idx);
156
157 ha->qla_initiate_recovery = 1;
158
159 return(-1);
160 }
161
162 rxr->rx_int++;
163
164 if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
165 mp = rxb->m_head;
166 rxb->m_head = NULL;
167
168 if (mp == NULL) {
169 device_printf(dev, "%s: mp == NULL\n", __func__);
170 } else {
171 mp->m_flags |= M_PKTHDR;
172 mp->m_pkthdr.len = cq_e->length;
173 mp->m_pkthdr.rcvif = ifp;
174 mp->m_len = cq_e->length;
175
176 eh = mtod(mp, struct ether_vlan_header *);
177
178 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
179 uint32_t *data = (uint32_t *)eh;
180
181 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
182 mp->m_flags |= M_VLANTAG;
183
184 *(data + 3) = *(data + 2);
185 *(data + 2) = *(data + 1);
186 *(data + 1) = *data;
187
188 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
189 }
190
191 if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
192 rxr->rss_int++;
193 mp->m_pkthdr.flowid = cq_e->rss;
194 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
195 }
196 if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
197 Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
198 mp->m_pkthdr.csum_flags = 0;
199 } else {
200 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
201 CSUM_IP_VALID | CSUM_DATA_VALID |
202 CSUM_PSEUDO_HDR;
203 mp->m_pkthdr.csum_data = 0xFFFF;
204 }
205 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
206
207 #if defined(INET) || defined(INET6)
208 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
209 /* LRO packet has been successfully queued */
210 } else
211 #endif
212 {
213 (*ifp->if_input)(ifp, mp);
214 }
215 }
216 } else {
217 device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
218 }
219
220 rxr->rx_free++;
221 rxr->rx_next++;
222
223 if (rxr->rx_next == NUM_RX_DESCRIPTORS)
224 rxr->rx_next = 0;
225
226 if ((rxr->rx_free + rxr->sbq_free) >= 16)
227 qls_replenish_rx(ha, rxr_idx);
228
229 return 0;
230 }
231
232 static void
233 qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
234 {
235 q81_cq_e_t *cq_e, *cq_b;
236 uint32_t i, cq_comp_idx;
237 int ret = 0, tx_comp_done = 0;
238 #if defined(INET) || defined(INET6)
239 struct lro_ctrl *lro = &ha->rx_ring[cq_idx].lro;
240 #endif
241
242 cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
243
244 cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
245
246 i = ha->rx_ring[cq_idx].cq_next;
247
248 while (i != cq_comp_idx) {
249 cq_e = &cq_b[i];
250
251 switch (cq_e->opcode) {
252 case Q81_IOCB_TX_MAC:
253 case Q81_IOCB_TX_TSO:
254 qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
255 tx_comp_done++;
256 break;
257
258 case Q81_IOCB_RX:
259 ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
260
261 break;
262
263 case Q81_IOCB_MPI:
264 case Q81_IOCB_SYS:
265 default:
266 device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
267 __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
268 cq_e->opcode);
269 qls_dump_buf32(ha, __func__, cq_e,
270 (sizeof (q81_cq_e_t) >> 2));
271 break;
272 }
273
274 i++;
275 if (i == NUM_CQ_ENTRIES)
276 i = 0;
277
278 if (ret) {
279 break;
280 }
281
282 if (i == cq_comp_idx) {
283 cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
284 }
285
286 if (tx_comp_done) {
287 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
288 tx_comp_done = 0;
289 }
290 }
291
292 #if defined(INET) || defined(INET6)
293 tcp_lro_flush_all(lro);
294 #endif
295
296 ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
297
298 if (!ret) {
299 Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
300 }
301 if (tx_comp_done)
302 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
303
304 return;
305 }
306
307 static void
308 qls_mbx_isr(qla_host_t *ha)
309 {
310 uint32_t data;
311 int i;
312 device_t dev = ha->pci_dev;
313
314 if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
315 if ((data & 0xF000) == 0x4000) {
316 ha->mbox[0] = data;
317 for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
318 if (qls_mbx_rd_reg(ha, i, &data))
319 break;
320 ha->mbox[i] = data;
321 }
322 ha->mbx_done = 1;
323 } else if ((data & 0xF000) == 0x8000) {
324 /* we have an AEN */
325
326 ha->aen[0] = data;
327 for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
328 if (qls_mbx_rd_reg(ha, i, &data))
329 break;
330 ha->aen[i] = data;
331 }
332 device_printf(dev,"%s: AEN "
333 "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
334 " 0x%08x 0x%08x 0x%08x 0x%08x]\n",
335 __func__,
336 ha->aen[0], ha->aen[1], ha->aen[2],
337 ha->aen[3], ha->aen[4], ha->aen[5],
338 ha->aen[6], ha->aen[7], ha->aen[8]);
339
340 switch ((ha->aen[0] & 0xFFFF)) {
341 case 0x8011:
342 ha->link_up = 1;
343 break;
344
345 case 0x8012:
346 ha->link_up = 0;
347 break;
348
349 case 0x8130:
350 ha->link_hw_info = ha->aen[1];
351 break;
352
353 case 0x8131:
354 ha->link_hw_info = 0;
355 break;
356 }
357 }
358 }
359 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
360
361 return;
362 }
363
364 void
365 qls_isr(void *arg)
366 {
367 qla_ivec_t *ivec = arg;
368 qla_host_t *ha;
369 uint32_t status;
370 uint32_t cq_idx;
371 device_t dev;
372
373 ha = ivec->ha;
374 cq_idx = ivec->cq_idx;
375 dev = ha->pci_dev;
376
377 status = READ_REG32(ha, Q81_CTL_STATUS);
378
379 if (status & Q81_CTL_STATUS_FE) {
380 device_printf(dev, "%s fatal error\n", __func__);
381 return;
382 }
383
384 if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
385 qls_mbx_isr(ha);
386 }
387
388 status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
389
390 if (status & ( 0x1 << cq_idx))
391 qls_cq_isr(ha, cq_idx);
392
393 Q81_ENABLE_INTR(ha, cq_idx);
394
395 return;
396 }
Cache object: 53907f2c5c76cf0ddad74e4df57203f7
|