1 /******************************************************************************
2
3 Copyright (c) 2013-2020, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixl_pf.h"
36
37 void
38 ixl_configure_tx_itr(struct ixl_pf *pf)
39 {
40 struct i40e_hw *hw = &pf->hw;
41 struct ixl_vsi *vsi = &pf->vsi;
42 struct ixl_tx_queue *que = vsi->tx_queues;
43
44 vsi->tx_itr_setting = pf->tx_itr;
45
46 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
47 struct tx_ring *txr = &que->txr;
48
49 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
50 vsi->tx_itr_setting);
51 txr->itr = vsi->tx_itr_setting;
52 txr->latency = IXL_AVE_LATENCY;
53 }
54 }
55
56 void
57 ixl_configure_rx_itr(struct ixl_pf *pf)
58 {
59 struct i40e_hw *hw = &pf->hw;
60 struct ixl_vsi *vsi = &pf->vsi;
61 struct ixl_rx_queue *que = vsi->rx_queues;
62
63 vsi->rx_itr_setting = pf->rx_itr;
64
65 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
66 struct rx_ring *rxr = &que->rxr;
67
68 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
69 vsi->rx_itr_setting);
70 rxr->itr = vsi->rx_itr_setting;
71 rxr->latency = IXL_AVE_LATENCY;
72 }
73 }
74
75 int
76 ixl_intr(void *arg)
77 {
78 struct ixl_pf *pf = arg;
79 struct i40e_hw *hw = &pf->hw;
80 struct ixl_vsi *vsi = &pf->vsi;
81 struct ixl_rx_queue *que = vsi->rx_queues;
82 u32 icr0;
83
84 ++que->irqs;
85
86 /* Clear PBA at start of ISR if using legacy interrupts */
87 if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
88 wr32(hw, I40E_PFINT_DYN_CTL0,
89 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
90 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
91
92 icr0 = rd32(hw, I40E_PFINT_ICR0);
93
94
95 #ifdef PCI_IOV
96 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
97 iflib_iov_intr_deferred(vsi->ctx);
98 #endif
99
100 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
101 iflib_admin_intr_deferred(vsi->ctx);
102
103 ixl_enable_intr0(hw);
104
105 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
106 return (FILTER_SCHEDULE_THREAD);
107 else
108 return (FILTER_HANDLED);
109 }
110
111 /*********************************************************************
112 *
113 * MSI-X VSI Interrupt Service routine
114 *
115 **********************************************************************/
116 int
117 ixl_msix_que(void *arg)
118 {
119 struct ixl_rx_queue *rx_que = arg;
120
121 ++rx_que->irqs;
122
123 ixl_set_queue_rx_itr(rx_que);
124
125 return (FILTER_SCHEDULE_THREAD);
126 }
127
128 /*********************************************************************
129 *
130 * MSI-X Admin Queue Interrupt Service routine
131 *
132 **********************************************************************/
133 int
134 ixl_msix_adminq(void *arg)
135 {
136 struct ixl_pf *pf = arg;
137 struct i40e_hw *hw = &pf->hw;
138 device_t dev = pf->dev;
139 u32 reg, mask, rstat_reg;
140 bool do_task = FALSE;
141
142 DDPRINTF(dev, "begin");
143
144 ++pf->admin_irq;
145
146 reg = rd32(hw, I40E_PFINT_ICR0);
147 /*
148 * For masking off interrupt causes that need to be handled before
149 * they can be re-enabled
150 */
151 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
152
153 /* Check on the cause */
154 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
155 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
156 do_task = TRUE;
157 }
158
159 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
160 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
161 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
162 do_task = TRUE;
163 }
164
165 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
166 const char *reset_type;
167 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
168 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
169 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
170 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
171 switch (rstat_reg) {
172 /* These others might be handled similarly to an EMPR reset */
173 case I40E_RESET_CORER:
174 reset_type = "CORER";
175 break;
176 case I40E_RESET_GLOBR:
177 reset_type = "GLOBR";
178 break;
179 case I40E_RESET_EMPR:
180 reset_type = "EMPR";
181 break;
182 default:
183 reset_type = "POR";
184 break;
185 }
186 device_printf(dev, "Reset Requested! (%s)\n", reset_type);
187 /* overload admin queue task to check reset progress */
188 atomic_set_int(&pf->state, IXL_PF_STATE_RESETTING);
189 do_task = TRUE;
190 }
191
192 /*
193 * PE / PCI / ECC exceptions are all handled in the same way:
194 * mask out these three causes, then request a PF reset
195 */
196 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
197 device_printf(dev, "ECC Error detected!\n");
198 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
199 device_printf(dev, "PCI Exception detected!\n");
200 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
201 device_printf(dev, "Critical Protocol Engine Error detected!\n");
202 /* Checks against the conditions above */
203 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
204 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
205 atomic_set_32(&pf->state,
206 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
207 do_task = TRUE;
208 }
209
210 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
211 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
212 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
213 device_printf(dev, "HMC Error detected!\n");
214 device_printf(dev, "INFO 0x%08x\n", reg);
215 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
216 device_printf(dev, "DATA 0x%08x\n", reg);
217 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
218 }
219 }
220
221 #ifdef PCI_IOV
222 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
223 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
224 iflib_iov_intr_deferred(pf->vsi.ctx);
225 }
226 #endif
227
228 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
229 ixl_enable_intr0(hw);
230
231 if (do_task)
232 return (FILTER_SCHEDULE_THREAD);
233 else
234 return (FILTER_HANDLED);
235 }
236
237 /*
238 * Configure queue interrupt cause registers in hardware.
239 *
240 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
241 */
242 void
243 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
244 {
245 struct i40e_hw *hw = &pf->hw;
246 struct ixl_vsi *vsi = &pf->vsi;
247 u32 reg;
248 u16 vector = 1;
249
250 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
251 /* Make sure interrupt is disabled */
252 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
253 /* Set linked list head to point to corresponding RX queue
254 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
255 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
256 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
257 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
258 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
259 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
260
261 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
262 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
263 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
264 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
265 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
266 wr32(hw, I40E_QINT_RQCTL(i), reg);
267
268 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
269 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
270 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
271 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
272 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
273 wr32(hw, I40E_QINT_TQCTL(i), reg);
274 }
275 }
276
277 /*
278 * Configure for single interrupt vector operation
279 */
280 void
281 ixl_configure_legacy(struct ixl_pf *pf)
282 {
283 struct i40e_hw *hw = &pf->hw;
284 struct ixl_vsi *vsi = &pf->vsi;
285 u32 reg;
286
287 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
288
289 /* Setup "other" causes */
290 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
291 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
292 | I40E_PFINT_ICR0_ENA_GRST_MASK
293 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
294 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
295 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
296 | I40E_PFINT_ICR0_ENA_VFLR_MASK
297 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
298 ;
299 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
300
301 /* No ITR for non-queue interrupts */
302 wr32(hw, I40E_PFINT_STAT_CTL0,
303 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
304
305 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
306 wr32(hw, I40E_PFINT_LNKLST0, 0);
307
308 /* Associate the queue pair to the vector and enable the q int */
309 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
310 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
311 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
312 wr32(hw, I40E_QINT_RQCTL(0), reg);
313
314 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
315 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
316 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
317 wr32(hw, I40E_QINT_TQCTL(0), reg);
318 }
319
320 void
321 ixl_free_pci_resources(struct ixl_pf *pf)
322 {
323 struct ixl_vsi *vsi = &pf->vsi;
324 device_t dev = iflib_get_dev(vsi->ctx);
325 struct ixl_rx_queue *rx_que = vsi->rx_queues;
326
327 /* We may get here before stations are set up */
328 if (rx_que == NULL)
329 goto early;
330
331 /*
332 ** Release all MSI-X VSI resources:
333 */
334 iflib_irq_free(vsi->ctx, &vsi->irq);
335
336 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
337 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
338 early:
339 if (pf->pci_mem != NULL)
340 bus_release_resource(dev, SYS_RES_MEMORY,
341 rman_get_rid(pf->pci_mem), pf->pci_mem);
342 }
343
344 /*********************************************************************
345 *
346 * Setup networking device structure and register an interface.
347 *
348 **********************************************************************/
349 int
350 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
351 {
352 struct ixl_vsi *vsi = &pf->vsi;
353 if_ctx_t ctx = vsi->ctx;
354 struct i40e_hw *hw = &pf->hw;
355 if_t ifp = iflib_get_ifp(ctx);
356 struct i40e_aq_get_phy_abilities_resp abilities;
357 enum i40e_status_code aq_error = 0;
358
359 INIT_DBG_DEV(dev, "begin");
360
361 vsi->shared->isc_max_frame_size =
362 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN
363 + ETHER_VLAN_ENCAP_LEN;
364
365 if (IXL_PF_IN_RECOVERY_MODE(pf))
366 goto only_auto;
367
368 aq_error = i40e_aq_get_phy_capabilities(hw,
369 FALSE, TRUE, &abilities, NULL);
370 /* May need delay to detect fiber correctly */
371 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
372 i40e_msec_delay(200);
373 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
374 TRUE, &abilities, NULL);
375 }
376 if (aq_error) {
377 if (aq_error == I40E_ERR_UNKNOWN_PHY)
378 device_printf(dev, "Unknown PHY type detected!\n");
379 else
380 device_printf(dev,
381 "Error getting supported media types, err %d,"
382 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
383 } else {
384 pf->supported_speeds = abilities.link_speed;
385 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
386
387 ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
388 }
389
390 only_auto:
391 /* Use autoselect media by default */
392 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
393 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
394
395 return (0);
396 }
397
398 /*
399 ** Run when the Admin Queue gets a link state change interrupt.
400 */
401 void
402 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
403 {
404 struct i40e_hw *hw = &pf->hw;
405 device_t dev = iflib_get_dev(pf->vsi.ctx);
406 struct i40e_link_status *link_info = &hw->phy.link_info;
407
408 /* Driver needs to re-enable delivering of link status events
409 * by FW after each event reception. Call i40e_get_link_status
410 * to do that. To not lose information about link state changes,
411 * which happened between receiving an event and the call,
412 * do not rely on status from event but use most recent
413 * status information retrieved by the call. */
414 hw->phy.get_link_info = TRUE;
415 i40e_get_link_status(hw, &pf->link_up);
416
417 /* Print out message if an unqualified module is found */
418 if ((link_info->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
419 (pf->advertised_speed) &&
420 (if_getflags(pf->vsi.ifp) & IFF_UP) &&
421 (!(link_info->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
422 (!(link_info->link_info & I40E_AQ_LINK_UP)))
423 device_printf(dev, "Link failed because "
424 "an unqualified module was detected!\n");
425
426 /* OS link info is updated elsewhere */
427 }
428
429 /*********************************************************************
430 *
431 * Initialize the VSI: this handles contexts, which means things
432 * like the number of descriptors, buffer size,
433 * plus we init the rings thru this function.
434 *
435 **********************************************************************/
436 int
437 ixl_initialize_vsi(struct ixl_vsi *vsi)
438 {
439 struct ixl_pf *pf = vsi->back;
440 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
441 struct ixl_tx_queue *tx_que = vsi->tx_queues;
442 struct ixl_rx_queue *rx_que = vsi->rx_queues;
443 device_t dev = iflib_get_dev(vsi->ctx);
444 struct i40e_hw *hw = vsi->hw;
445 struct i40e_vsi_context ctxt;
446 int tc_queues;
447 int err = 0;
448
449 memset(&ctxt, 0, sizeof(ctxt));
450 ctxt.seid = vsi->seid;
451 if (pf->veb_seid != 0)
452 ctxt.uplink_seid = pf->veb_seid;
453 ctxt.pf_num = hw->pf_id;
454 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
455 if (err) {
456 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
457 " aq_error %d\n", err, hw->aq.asq_last_status);
458 return (err);
459 }
460 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
461 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
462 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
463 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
464 ctxt.uplink_seid, ctxt.vsi_number,
465 ctxt.vsis_allocated, ctxt.vsis_unallocated,
466 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
467 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
468 /*
469 ** Set the queue and traffic class bits
470 ** - when multiple traffic classes are supported
471 ** this will need to be more robust.
472 */
473 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
474 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
475 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
476 ctxt.info.queue_mapping[0] = 0;
477 /*
478 * This VSI will only use traffic class 0; start traffic class 0's
479 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
480 * the driver may not use all of them).
481 */
482 tc_queues = fls(pf->qtag.num_allocated) - 1;
483 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
484 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
485 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
486 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
487
488 /* Set VLAN receive stripping mode */
489 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
490 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
491 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
492 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
493 else
494 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
495
496 #ifdef IXL_IW
497 /* Set TCP Enable for iWARP capable VSI */
498 if (ixl_enable_iwarp && pf->iw_enabled) {
499 ctxt.info.valid_sections |=
500 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
501 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
502 }
503 #endif
504 /* Save VSI number and info for use later */
505 vsi->vsi_num = ctxt.vsi_number;
506 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
507
508 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
509
510 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
511 if (err) {
512 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
513 " aq_error %d\n", err, hw->aq.asq_last_status);
514 return (err);
515 }
516
517 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
518 struct tx_ring *txr = &tx_que->txr;
519 struct i40e_hmc_obj_txq tctx;
520 u32 txctl;
521
522 /* Setup the HMC TX Context */
523 bzero(&tctx, sizeof(tctx));
524 tctx.new_context = 1;
525 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
526 tctx.qlen = scctx->isc_ntxd[0];
527 tctx.fc_ena = 0; /* Disable FCoE */
528 /*
529 * This value needs to pulled from the VSI that this queue
530 * is assigned to. Index into array is traffic class.
531 */
532 tctx.rdylist = vsi->info.qs_handle[0];
533 /*
534 * Set these to enable Head Writeback
535 * - Address is last entry in TX ring (reserved for HWB index)
536 * Leave these as 0 for Descriptor Writeback
537 */
538 if (vsi->enable_head_writeback) {
539 tctx.head_wb_ena = 1;
540 tctx.head_wb_addr = txr->tx_paddr +
541 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
542 } else {
543 tctx.head_wb_ena = 0;
544 tctx.head_wb_addr = 0;
545 }
546 tctx.rdylist_act = 0;
547 err = i40e_clear_lan_tx_queue_context(hw, i);
548 if (err) {
549 device_printf(dev, "Unable to clear TX context\n");
550 break;
551 }
552 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
553 if (err) {
554 device_printf(dev, "Unable to set TX context\n");
555 break;
556 }
557 /* Associate the ring with this PF */
558 txctl = I40E_QTX_CTL_PF_QUEUE;
559 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
560 I40E_QTX_CTL_PF_INDX_MASK);
561 wr32(hw, I40E_QTX_CTL(i), txctl);
562 ixl_flush(hw);
563
564 /* Do ring (re)init */
565 ixl_init_tx_ring(vsi, tx_que);
566 }
567 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
568 struct rx_ring *rxr = &rx_que->rxr;
569 struct i40e_hmc_obj_rxq rctx;
570
571 /* Next setup the HMC RX Context */
572 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
573
574 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
575
576 /* Set up an RX context for the HMC */
577 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
578 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
579 /* ignore header split for now */
580 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
581 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
582 scctx->isc_max_frame_size : max_rxmax;
583 rctx.dtype = 0;
584 rctx.dsize = 1; /* do 32byte descriptors */
585 rctx.hsplit_0 = 0; /* no header split */
586 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
587 rctx.qlen = scctx->isc_nrxd[0];
588 rctx.tphrdesc_ena = 1;
589 rctx.tphwdesc_ena = 1;
590 rctx.tphdata_ena = 0; /* Header Split related */
591 rctx.tphhead_ena = 0; /* Header Split related */
592 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
593 rctx.crcstrip = 1;
594 rctx.l2tsel = 1;
595 rctx.showiv = 1; /* Strip inner VLAN header */
596 rctx.fc_ena = 0; /* Disable FCoE */
597 rctx.prefena = 1; /* Prefetch descriptors */
598
599 err = i40e_clear_lan_rx_queue_context(hw, i);
600 if (err) {
601 device_printf(dev,
602 "Unable to clear RX context %d\n", i);
603 break;
604 }
605 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
606 if (err) {
607 device_printf(dev, "Unable to set RX context %d\n", i);
608 break;
609 }
610 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
611 }
612 return (err);
613 }
614
615
616 /*
617 ** Provide a update to the queue RX
618 ** interrupt moderation value.
619 */
620 void
621 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
622 {
623 struct ixl_vsi *vsi = que->vsi;
624 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
625 struct i40e_hw *hw = vsi->hw;
626 struct rx_ring *rxr = &que->rxr;
627 u16 rx_itr;
628 u16 rx_latency = 0;
629 int rx_bytes;
630
631 /* Idle, do nothing */
632 if (rxr->bytes == 0)
633 return;
634
635 if (pf->dynamic_rx_itr) {
636 rx_bytes = rxr->bytes/rxr->itr;
637 rx_itr = rxr->itr;
638
639 /* Adjust latency range */
640 switch (rxr->latency) {
641 case IXL_LOW_LATENCY:
642 if (rx_bytes > 10) {
643 rx_latency = IXL_AVE_LATENCY;
644 rx_itr = IXL_ITR_20K;
645 }
646 break;
647 case IXL_AVE_LATENCY:
648 if (rx_bytes > 20) {
649 rx_latency = IXL_BULK_LATENCY;
650 rx_itr = IXL_ITR_8K;
651 } else if (rx_bytes <= 10) {
652 rx_latency = IXL_LOW_LATENCY;
653 rx_itr = IXL_ITR_100K;
654 }
655 break;
656 case IXL_BULK_LATENCY:
657 if (rx_bytes <= 20) {
658 rx_latency = IXL_AVE_LATENCY;
659 rx_itr = IXL_ITR_20K;
660 }
661 break;
662 }
663
664 rxr->latency = rx_latency;
665
666 if (rx_itr != rxr->itr) {
667 /* do an exponential smoothing */
668 rx_itr = (10 * rx_itr * rxr->itr) /
669 ((9 * rx_itr) + rxr->itr);
670 rxr->itr = min(rx_itr, IXL_MAX_ITR);
671 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
672 rxr->me), rxr->itr);
673 }
674 } else { /* We may have have toggled to non-dynamic */
675 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
676 vsi->rx_itr_setting = pf->rx_itr;
677 /* Update the hardware if needed */
678 if (rxr->itr != vsi->rx_itr_setting) {
679 rxr->itr = vsi->rx_itr_setting;
680 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
681 rxr->me), rxr->itr);
682 }
683 }
684 rxr->bytes = 0;
685 rxr->packets = 0;
686 }
687
688
689 /*
690 ** Provide a update to the queue TX
691 ** interrupt moderation value.
692 */
693 void
694 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
695 {
696 struct ixl_vsi *vsi = que->vsi;
697 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
698 struct i40e_hw *hw = vsi->hw;
699 struct tx_ring *txr = &que->txr;
700 u16 tx_itr;
701 u16 tx_latency = 0;
702 int tx_bytes;
703
704
705 /* Idle, do nothing */
706 if (txr->bytes == 0)
707 return;
708
709 if (pf->dynamic_tx_itr) {
710 tx_bytes = txr->bytes/txr->itr;
711 tx_itr = txr->itr;
712
713 switch (txr->latency) {
714 case IXL_LOW_LATENCY:
715 if (tx_bytes > 10) {
716 tx_latency = IXL_AVE_LATENCY;
717 tx_itr = IXL_ITR_20K;
718 }
719 break;
720 case IXL_AVE_LATENCY:
721 if (tx_bytes > 20) {
722 tx_latency = IXL_BULK_LATENCY;
723 tx_itr = IXL_ITR_8K;
724 } else if (tx_bytes <= 10) {
725 tx_latency = IXL_LOW_LATENCY;
726 tx_itr = IXL_ITR_100K;
727 }
728 break;
729 case IXL_BULK_LATENCY:
730 if (tx_bytes <= 20) {
731 tx_latency = IXL_AVE_LATENCY;
732 tx_itr = IXL_ITR_20K;
733 }
734 break;
735 }
736
737 txr->latency = tx_latency;
738
739 if (tx_itr != txr->itr) {
740 /* do an exponential smoothing */
741 tx_itr = (10 * tx_itr * txr->itr) /
742 ((9 * tx_itr) + txr->itr);
743 txr->itr = min(tx_itr, IXL_MAX_ITR);
744 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
745 txr->me), txr->itr);
746 }
747
748 } else { /* We may have have toggled to non-dynamic */
749 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
750 vsi->tx_itr_setting = pf->tx_itr;
751 /* Update the hardware if needed */
752 if (txr->itr != vsi->tx_itr_setting) {
753 txr->itr = vsi->tx_itr_setting;
754 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
755 txr->me), txr->itr);
756 }
757 }
758 txr->bytes = 0;
759 txr->packets = 0;
760 return;
761 }
762
763 #ifdef IXL_DEBUG
764 /**
765 * ixl_sysctl_qtx_tail_handler
766 * Retrieves I40E_QTX_TAIL value from hardware
767 * for a sysctl.
768 */
769 int
770 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
771 {
772 struct ixl_tx_queue *tx_que;
773 int error;
774 u32 val;
775
776 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
777 if (!tx_que) return 0;
778
779 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
780 error = sysctl_handle_int(oidp, &val, 0, req);
781 if (error || !req->newptr)
782 return error;
783 return (0);
784 }
785
786 /**
787 * ixl_sysctl_qrx_tail_handler
788 * Retrieves I40E_QRX_TAIL value from hardware
789 * for a sysctl.
790 */
791 int
792 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
793 {
794 struct ixl_rx_queue *rx_que;
795 int error;
796 u32 val;
797
798 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
799 if (!rx_que) return 0;
800
801 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
802 error = sysctl_handle_int(oidp, &val, 0, req);
803 if (error || !req->newptr)
804 return error;
805 return (0);
806 }
807 #endif
808
809 void
810 ixl_add_hw_stats(struct ixl_pf *pf)
811 {
812 struct ixl_vsi *vsi = &pf->vsi;
813 device_t dev = iflib_get_dev(vsi->ctx);
814 struct i40e_hw_port_stats *pf_stats = &pf->stats;
815
816 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
817 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
818 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
819
820 /* Driver statistics */
821 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
822 CTLFLAG_RD, &pf->admin_irq,
823 "Admin Queue IRQs received");
824
825 sysctl_ctx_init(&vsi->sysctl_ctx);
826 ixl_vsi_add_sysctls(vsi, "pf", true);
827
828 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
829 }
830
831 void
832 ixl_set_rss_hlut(struct ixl_pf *pf)
833 {
834 struct i40e_hw *hw = &pf->hw;
835 struct ixl_vsi *vsi = &pf->vsi;
836 device_t dev = iflib_get_dev(vsi->ctx);
837 int i, que_id;
838 int lut_entry_width;
839 u32 lut = 0;
840 enum i40e_status_code status;
841
842 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
843
844 /* Populate the LUT with max no. of queues in round robin fashion */
845 u8 hlut_buf[512];
846 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
847 #ifdef RSS
848 /*
849 * Fetch the RSS bucket id for the given indirection entry.
850 * Cap it at the number of configured buckets (which is
851 * num_queues.)
852 */
853 que_id = rss_get_indirection_to_bucket(i);
854 que_id = que_id % vsi->num_rx_queues;
855 #else
856 que_id = i % vsi->num_rx_queues;
857 #endif
858 lut = (que_id & ((0x1 << lut_entry_width) - 1));
859 hlut_buf[i] = lut;
860 }
861
862 if (hw->mac.type == I40E_MAC_X722) {
863 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
864 if (status)
865 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
866 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
867 } else {
868 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
869 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
870 ixl_flush(hw);
871 }
872 }
873
874 /* For PF VSI only */
875 int
876 ixl_enable_rings(struct ixl_vsi *vsi)
877 {
878 struct ixl_pf *pf = vsi->back;
879 int error = 0;
880
881 for (int i = 0; i < vsi->num_tx_queues; i++)
882 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
883
884 for (int i = 0; i < vsi->num_rx_queues; i++)
885 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
886
887 return (error);
888 }
889
890 int
891 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
892 {
893 int error = 0;
894
895 for (int i = 0; i < vsi->num_tx_queues; i++)
896 error = ixl_disable_tx_ring(pf, qtag, i);
897
898 for (int i = 0; i < vsi->num_rx_queues; i++)
899 error = ixl_disable_rx_ring(pf, qtag, i);
900
901 return (error);
902 }
903
904 void
905 ixl_enable_intr(struct ixl_vsi *vsi)
906 {
907 struct i40e_hw *hw = vsi->hw;
908 struct ixl_rx_queue *que = vsi->rx_queues;
909
910 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
911 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
912 ixl_enable_queue(hw, que->rxr.me);
913 } else
914 ixl_enable_intr0(hw);
915 }
916
917 void
918 ixl_disable_rings_intr(struct ixl_vsi *vsi)
919 {
920 struct i40e_hw *hw = vsi->hw;
921 struct ixl_rx_queue *que = vsi->rx_queues;
922
923 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
924 ixl_disable_queue(hw, que->rxr.me);
925 }
926
927 int
928 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
929 {
930 struct i40e_hw *hw = &pf->hw;
931 device_t dev = pf->dev;
932 int error = 0;
933
934 if (is_up)
935 ixl_if_stop(pf->vsi.ctx);
936
937 ixl_shutdown_hmc(pf);
938
939 ixl_disable_intr0(hw);
940
941 error = i40e_shutdown_adminq(hw);
942 if (error)
943 device_printf(dev,
944 "Shutdown Admin queue failed with code %d\n", error);
945
946 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
947 return (error);
948 }
949
950 int
951 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
952 {
953 struct i40e_hw *hw = &pf->hw;
954 struct ixl_vsi *vsi = &pf->vsi;
955 device_t dev = pf->dev;
956 enum i40e_get_fw_lldp_status_resp lldp_status;
957 int error = 0;
958
959 device_printf(dev, "Rebuilding driver state...\n");
960
961 /* Setup */
962 error = i40e_init_adminq(hw);
963 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
964 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
965 error);
966 goto ixl_rebuild_hw_structs_after_reset_err;
967 }
968
969 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
970 /* Keep admin queue interrupts active while driver is loaded */
971 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
972 ixl_configure_intr0_msix(pf);
973 ixl_enable_intr0(hw);
974 }
975
976 return (0);
977 }
978
979 i40e_clear_pxe_mode(hw);
980
981 error = ixl_get_hw_capabilities(pf);
982 if (error) {
983 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
984 goto ixl_rebuild_hw_structs_after_reset_err;
985 }
986
987 error = ixl_setup_hmc(pf);
988 if (error)
989 goto ixl_rebuild_hw_structs_after_reset_err;
990
991 /* reserve a contiguous allocation for the PF's VSI */
992 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
993 if (error) {
994 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
995 error);
996 }
997
998 error = ixl_switch_config(pf);
999 if (error) {
1000 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
1001 error);
1002 error = EIO;
1003 goto ixl_rebuild_hw_structs_after_reset_err;
1004 }
1005
1006 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1007 NULL);
1008 if (error) {
1009 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1010 " aq_err %d\n", error, hw->aq.asq_last_status);
1011 error = EIO;
1012 goto ixl_rebuild_hw_structs_after_reset_err;
1013 }
1014
1015 u8 set_fc_err_mask;
1016 error = i40e_set_fc(hw, &set_fc_err_mask, true);
1017 if (error) {
1018 device_printf(dev, "init: setting link flow control failed; retcode %d,"
1019 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1020 error = EIO;
1021 goto ixl_rebuild_hw_structs_after_reset_err;
1022 }
1023
1024 /* Remove default filters reinstalled by FW on reset */
1025 ixl_del_default_hw_filters(vsi);
1026
1027 /* Receive broadcast Ethernet frames */
1028 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1029
1030 /* Determine link state */
1031 if (ixl_attach_get_link_status(pf)) {
1032 error = EINVAL;
1033 }
1034
1035 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
1036
1037 /* Query device FW LLDP status */
1038 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
1039 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
1040 atomic_set_32(&pf->state,
1041 IXL_PF_STATE_FW_LLDP_DISABLED);
1042 } else {
1043 atomic_clear_32(&pf->state,
1044 IXL_PF_STATE_FW_LLDP_DISABLED);
1045 }
1046 }
1047
1048 /* Keep admin queue interrupts active while driver is loaded */
1049 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1050 ixl_configure_intr0_msix(pf);
1051 ixl_enable_intr0(hw);
1052 }
1053
1054 if (is_up) {
1055 iflib_request_reset(vsi->ctx);
1056 iflib_admin_intr_deferred(vsi->ctx);
1057 }
1058
1059 device_printf(dev, "Rebuilding driver state done.\n");
1060 return (0);
1061
1062 ixl_rebuild_hw_structs_after_reset_err:
1063 device_printf(dev, "Reload the driver to recover\n");
1064 return (error);
1065 }
1066
1067 /*
1068 ** Set flow control using sysctl:
1069 ** 0 - off
1070 ** 1 - rx pause
1071 ** 2 - tx pause
1072 ** 3 - full
1073 */
1074 int
1075 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
1076 {
1077 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1078 struct i40e_hw *hw = &pf->hw;
1079 device_t dev = pf->dev;
1080 int requested_fc, error = 0;
1081 enum i40e_status_code aq_error = 0;
1082 u8 fc_aq_err = 0;
1083
1084 /* Get request */
1085 requested_fc = pf->fc;
1086 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
1087 if ((error) || (req->newptr == NULL))
1088 return (error);
1089 if (requested_fc < 0 || requested_fc > 3) {
1090 device_printf(dev,
1091 "Invalid fc mode; valid modes are 0 through 3\n");
1092 return (EINVAL);
1093 }
1094
1095 /* Set fc ability for port */
1096 hw->fc.requested_mode = requested_fc;
1097 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
1098 if (aq_error) {
1099 device_printf(dev,
1100 "%s: Error setting Flow Control mode %d; fc_err %#x\n",
1101 __func__, aq_error, fc_aq_err);
1102 return (EIO);
1103 }
1104 pf->fc = requested_fc;
1105
1106 return (0);
1107 }
Cache object: 8ba9b48e76bb04a7cc87992217174075
|