1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2017 - 2022 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 /*$FreeBSD$*/
35
36 #include "osdep.h"
37 #include "irdma_type.h"
38 #include "icrdma_hw.h"
39
40 void disable_prefetch(struct irdma_hw *hw);
41
42 void disable_tx_spad(struct irdma_hw *hw);
43
44 void rdpu_ackreqpmthresh(struct irdma_hw *hw);
45
46 static u32 icrdma_regs[IRDMA_MAX_REGS] = {
47 PFPE_CQPTAIL,
48 PFPE_CQPDB,
49 PFPE_CCQPSTATUS,
50 PFPE_CCQPHIGH,
51 PFPE_CCQPLOW,
52 PFPE_CQARM,
53 PFPE_CQACK,
54 PFPE_AEQALLOC,
55 PFPE_CQPERRCODES,
56 PFPE_WQEALLOC,
57 GLINT_DYN_CTL(0),
58 ICRDMA_DB_ADDR_OFFSET,
59
60 GLPCI_LBARCTRL,
61 GLPE_CPUSTATUS0,
62 GLPE_CPUSTATUS1,
63 GLPE_CPUSTATUS2,
64 PFINT_AEQCTL,
65 GLINT_CEQCTL(0),
66 VSIQF_PE_CTL1(0),
67 PFHMC_PDINV,
68 GLHMC_VFPDINV(0),
69 GLPE_CRITERR,
70 GLINT_RATE(0),
71 };
72
73 static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
74 ICRDMA_CCQPSTATUS_CCQP_DONE,
75 ICRDMA_CCQPSTATUS_CCQP_ERR,
76 ICRDMA_CQPSQ_STAG_PDID,
77 ICRDMA_CQPSQ_CQ_CEQID,
78 ICRDMA_CQPSQ_CQ_CQID,
79 ICRDMA_COMMIT_FPM_CQCNT,
80 ICRDMA_CQPSQ_UPESD_HMCFNID
81 };
82
83 static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
84 ICRDMA_CCQPSTATUS_CCQP_DONE_S,
85 ICRDMA_CCQPSTATUS_CCQP_ERR_S,
86 ICRDMA_CQPSQ_STAG_PDID_S,
87 ICRDMA_CQPSQ_CQ_CEQID_S,
88 ICRDMA_CQPSQ_CQ_CQID_S,
89 ICRDMA_COMMIT_FPM_CQCNT_S,
90 ICRDMA_CQPSQ_UPESD_HMCFNID_S
91 };
92
93 /**
94 * icrdma_ena_irq - Enable interrupt
95 * @dev: pointer to the device structure
96 * @idx: vector index
97 */
98 static void
99 icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
100 {
101 u32 val;
102 u32 interval = 0;
103
104 if (dev->ceq_itr && dev->aeq->msix_idx != idx)
105 interval = dev->ceq_itr >> 1; /* 2 usec units */
106 val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) |
107 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
108 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) |
109 FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true);
110 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
111 }
112
113 /**
114 * icrdma_disable_irq - Disable interrupt
115 * @dev: pointer to the device structure
116 * @idx: vector index
117 */
118 static void
119 icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
120 {
121 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
122 }
123
124 /**
125 * icrdma_cfg_ceq- Configure CEQ interrupt
126 * @dev: pointer to the device structure
127 * @ceq_id: Completion Event Queue ID
128 * @idx: vector index
129 * @enable: True to enable, False disables
130 */
131 static void
132 icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
133 bool enable)
134 {
135 u32 reg_val;
136
137 reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0;
138 reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
139 IRDMA_GLINT_CEQCTL_ITR_INDX;
140
141 writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
142 }
143
144 static const struct irdma_irq_ops icrdma_irq_ops = {
145 .irdma_cfg_aeq = irdma_cfg_aeq,
146 .irdma_cfg_ceq = icrdma_cfg_ceq,
147 .irdma_dis_irq = icrdma_disable_irq,
148 .irdma_en_irq = icrdma_ena_irq,
149 };
150
151 static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
152 [IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24},
153 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48},
154 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48},
155 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32},
156 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32},
157 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48},
158 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48},
159 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48},
160 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48},
161 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48},
162 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32},
163 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32},
164 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48},
165 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48},
166 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48},
167 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48},
168 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48},
169 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48},
170 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48},
171 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48},
172 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48},
173 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48},
174 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48},
175 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48},
176 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48},
177 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24},
178 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24},
179 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48},
180 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24},
181 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24},
182 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48},
183 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32},
184 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48},
185 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48},
186 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48},
187 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48},
188 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48},
189 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48},
190 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48},
191 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48},
192 [IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48},
193 [IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48},
194 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48},
195 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16},
196 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32},
197 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32},
198 };
199
200 void
201 icrdma_init_hw(struct irdma_sc_dev *dev)
202 {
203 int i;
204 u8 IOMEM *hw_addr;
205
206 for (i = 0; i < IRDMA_MAX_REGS; ++i) {
207 hw_addr = dev->hw->hw_addr;
208
209 if (i == IRDMA_DB_ADDR_OFFSET)
210 hw_addr = NULL;
211
212 dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
213 }
214 dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
215 dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
216
217 for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
218 dev->hw_shifts[i] = icrdma_shifts[i];
219
220 for (i = 0; i < IRDMA_MAX_MASKS; ++i)
221 dev->hw_masks[i] = icrdma_masks[i];
222
223 dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
224 dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
225 dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
226 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
227 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
228 dev->irq_ops = &icrdma_irq_ops;
229 dev->hw_stats_map = icrdma_hw_stat_map;
230 dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
231 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
232 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
233 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
234 dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
235
236 dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
237 dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
238 dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
239 dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
240 disable_tx_spad(dev->hw);
241 disable_prefetch(dev->hw);
242 rdpu_ackreqpmthresh(dev->hw);
243 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER;
244 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
245 IRDMA_FEATURE_CQ_RESIZE;
246 }
247
248 void
249 irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle)
250 {
251 cc->config_ok = false;
252 cc->traffic_class = traffic_class;
253 cc->qs_handle = qs_handle;
254 cc->lfc_set = 0;
255 cc->pfc_set = 0;
256 }
257
258 static bool
259 irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
260 {
261 u32 lfc = 1;
262 u8 fn_id = vsi->dev->hmc_fn_id;
263
264 lfc &= (rd32(vsi->dev->hw,
265 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
266 lfc &= (rd32(vsi->dev->hw,
267 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
268 lfc &= rd32(vsi->dev->hw,
269 PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
270
271 if (lfc)
272 return true;
273 return false;
274 }
275
276 static bool
277 irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class)
278 {
279 u32 value, pfc = 0;
280 u32 i;
281
282 value = rd32(vsi->dev->hw, reg_offset);
283 for (i = 0; i < 4; i++)
284 pfc |= (value >> (8 * i + traffic_class)) & 0x1;
285
286 if (pfc)
287 return true;
288 return false;
289 }
290
291 static bool
292 irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
293 {
294 u32 pause;
295 u8 fn_id = vsi->dev->hmc_fn_id;
296
297 pause = (rd32(vsi->dev->hw,
298 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >>
299 cc->traffic_class) & BIT(0);
300 pause &= (rd32(vsi->dev->hw,
301 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >>
302 cc->traffic_class) & BIT(0);
303
304 return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
305 pause;
306 }
307
308 bool
309 irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
310 {
311 cc->lfc_set = irdma_is_lfc_set(cc, vsi);
312 cc->pfc_set = irdma_is_pfc_set(cc, vsi);
313
314 cc->config_ok = cc->lfc_set || cc->pfc_set;
315
316 return cc->config_ok;
317 }
318
319 #define IRDMA_RCV_WND_NO_FC 65536
320 #define IRDMA_RCV_WND_FC 65536
321
322 #define IRDMA_CWND_NO_FC 0x1
323 #define IRDMA_CWND_FC 0x18
324
325 #define IRDMA_RTOMIN_NO_FC 0x5
326 #define IRDMA_RTOMIN_FC 0x32
327
328 #define IRDMA_ACKCREDS_NO_FC 0x02
329 #define IRDMA_ACKCREDS_FC 0x06
330
331 static void
332 irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
333 {
334 struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio];
335
336 if (!irdma_is_config_ok(cfg_chk, vsi)) {
337 if (vsi->tc_print_warning[traffic_class]) {
338 irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class);
339 vsi->tc_print_warning[traffic_class] = false;
340 }
341 } else {
342 if (vsi->tc_print_warning[traffic_class]) {
343 irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class);
344 vsi->tc_print_warning[traffic_class] = false;
345 }
346 }
347 }
348
349 void
350 irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
351 struct irdma_l2params *l2params)
352 {
353 u8 i;
354
355 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
356 vsi->tc_print_warning[i] = true;
357
358 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
359 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
360 u8 tc = l2params->up2tc[i];
361
362 cfg_chk->traffic_class = tc;
363 cfg_chk->qs_handle = vsi->qos[i].qs_handle;
364 irdma_check_flow_ctrl(vsi, i, tc);
365 }
366 }
367
368 void
369 irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
370 {
371 u8 i;
372
373 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
374 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
375
376 irdma_init_config_check(cfg_chk,
377 vsi->qos[i].traffic_class,
378 vsi->qos[i].qs_handle);
379 if (sc_qp->qs_handle == cfg_chk->qs_handle)
380 irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
381 }
382 }
383
384 #define GLPE_WQMTXIDXADDR 0x50E000
385 #define GLPE_WQMTXIDXDATA 0x50E004
386
387 void
388 disable_prefetch(struct irdma_hw *hw)
389 {
390 u32 wqm_data;
391
392 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
393 irdma_mb();
394
395 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
396 wqm_data &= ~(1);
397 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
398 }
399
400 void
401 disable_tx_spad(struct irdma_hw *hw)
402 {
403 u32 wqm_data;
404
405 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
406 irdma_mb();
407
408 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
409 wqm_data &= ~(1 << 3);
410 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
411 }
412
413 #define GL_RDPU_CNTRL 0x52054
414 void
415 rdpu_ackreqpmthresh(struct irdma_hw *hw)
416 {
417 u32 val;
418
419 val = rd32(hw, GL_RDPU_CNTRL);
420 val &= ~(0x3f << 10);
421 val |= (3 << 10);
422 wr32(hw, GL_RDPU_CNTRL, val);
423 }
Cache object: 33790fd185ee456c9384fc540d924bc4
|