1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "adf_c4xxx_ras.h"
5 #include "adf_accel_devices.h"
6 #include "adf_c4xxx_hw_data.h"
7 #include <adf_dev_err.h>
8 #include "adf_c4xxx_inline.h"
9
10 #define ADF_RAS_STR_LEN 64
11
12 static int adf_sysctl_read_ras_correctable(SYSCTL_HANDLER_ARGS)
13 {
14 struct adf_accel_dev *accel_dev = arg1;
15 unsigned long counter = 0;
16
17 if (accel_dev->ras_counters)
18 counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_CORR]);
19
20 return SYSCTL_OUT(req, &counter, sizeof(counter));
21 }
22
23 static int adf_sysctl_read_ras_uncorrectable(SYSCTL_HANDLER_ARGS)
24 {
25 struct adf_accel_dev *accel_dev = arg1;
26 unsigned long counter = 0;
27
28 if (accel_dev->ras_counters)
29 counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
30
31 return SYSCTL_OUT(req, &counter, sizeof(counter));
32 }
33
34 static int adf_sysctl_read_ras_fatal(SYSCTL_HANDLER_ARGS)
35 {
36 struct adf_accel_dev *accel_dev = arg1;
37 unsigned long counter = 0;
38
39 if (accel_dev->ras_counters)
40 counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_FATAL]);
41
42 return SYSCTL_OUT(req, &counter, sizeof(counter));
43 }
44
45 static int adf_sysctl_write_ras_reset(SYSCTL_HANDLER_ARGS)
46 {
47 struct adf_accel_dev *accel_dev = arg1;
48 int value = 0;
49 int ret = SYSCTL_IN(req, &value, sizeof(value));
50
51 if (!ret && value != 0 && accel_dev->ras_counters) {
52 }
53
54 return SYSCTL_OUT(req, &value, sizeof(value));
55 }
56
57 int
58 adf_init_ras(struct adf_accel_dev *accel_dev)
59 {
60 struct sysctl_ctx_list *qat_sysctl_ctx;
61 struct sysctl_oid *qat_sysctl_tree;
62 struct sysctl_oid *ras_corr;
63 struct sysctl_oid *ras_uncor;
64 struct sysctl_oid *ras_fat;
65 struct sysctl_oid *ras_res;
66 int i;
67
68 accel_dev->ras_counters = kcalloc(ADF_RAS_ERRORS,
69 sizeof(*accel_dev->ras_counters),
70 GFP_KERNEL);
71 if (!accel_dev->ras_counters)
72 return -ENOMEM;
73
74 for (i = 0; i < ADF_RAS_ERRORS; ++i)
75
76 qat_sysctl_ctx =
77 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
78 qat_sysctl_tree =
79 device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
80 ras_corr = SYSCTL_ADD_OID(qat_sysctl_ctx,
81 SYSCTL_CHILDREN(qat_sysctl_tree),
82 OID_AUTO,
83 "ras_correctable",
84 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN,
85 accel_dev,
86 0,
87 adf_sysctl_read_ras_correctable,
88 "LU",
89 "QAT RAS correctable");
90 accel_dev->ras_correctable = ras_corr;
91 if (!accel_dev->ras_correctable) {
92 device_printf(GET_DEV(accel_dev),
93 "Failed to register ras_correctable sysctl\n");
94 return -EINVAL;
95 }
96 ras_uncor = SYSCTL_ADD_OID(qat_sysctl_ctx,
97 SYSCTL_CHILDREN(qat_sysctl_tree),
98 OID_AUTO,
99 "ras_uncorrectable",
100 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN,
101 accel_dev,
102 0,
103 adf_sysctl_read_ras_uncorrectable,
104 "LU",
105 "QAT RAS uncorrectable");
106 accel_dev->ras_uncorrectable = ras_uncor;
107 if (!accel_dev->ras_uncorrectable) {
108 device_printf(GET_DEV(accel_dev),
109 "Failed to register ras_uncorrectable sysctl\n");
110 return -EINVAL;
111 }
112
113 ras_fat = SYSCTL_ADD_OID(qat_sysctl_ctx,
114 SYSCTL_CHILDREN(qat_sysctl_tree),
115 OID_AUTO,
116 "ras_fatal",
117 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_DYN,
118 accel_dev,
119 0,
120 adf_sysctl_read_ras_fatal,
121 "LU",
122 "QAT RAS fatal");
123 accel_dev->ras_fatal = ras_fat;
124 if (!accel_dev->ras_fatal) {
125 device_printf(GET_DEV(accel_dev),
126 "Failed to register ras_fatal sysctl\n");
127 return -EINVAL;
128 }
129
130 ras_res = SYSCTL_ADD_OID(qat_sysctl_ctx,
131 SYSCTL_CHILDREN(qat_sysctl_tree),
132 OID_AUTO,
133 "ras_reset",
134 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_DYN,
135 accel_dev,
136 0,
137 adf_sysctl_write_ras_reset,
138 "I",
139 "QAT RAS reset");
140 accel_dev->ras_reset = ras_res;
141 if (!accel_dev->ras_reset) {
142 device_printf(GET_DEV(accel_dev),
143 "Failed to register ras_reset sysctl\n");
144 return -EINVAL;
145 }
146
147 return 0;
148 }
149
150 void
151 adf_exit_ras(struct adf_accel_dev *accel_dev)
152 {
153 if (accel_dev->ras_counters) {
154 remove_oid(accel_dev, accel_dev->ras_correctable);
155 remove_oid(accel_dev, accel_dev->ras_uncorrectable);
156 remove_oid(accel_dev, accel_dev->ras_fatal);
157 remove_oid(accel_dev, accel_dev->ras_reset);
158
159 accel_dev->ras_correctable = NULL;
160 accel_dev->ras_uncorrectable = NULL;
161 accel_dev->ras_fatal = NULL;
162 accel_dev->ras_reset = NULL;
163
164 kfree(accel_dev->ras_counters);
165 accel_dev->ras_counters = NULL;
166 }
167 }
168
169 static inline void
170 adf_log_source_iastatssm(struct adf_accel_dev *accel_dev,
171 struct resource *pmisc,
172 u32 iastatssm,
173 u32 accel_num)
174 {
175 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMSH_MASK)
176 device_printf(
177 GET_DEV(accel_dev),
178 "Uncorrectable error shared memory detected in accel: %u\n",
179 accel_num);
180
181 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMSH_MASK)
182 device_printf(
183 GET_DEV(accel_dev),
184 "Correctable error shared memory detected in accel: %u\n",
185 accel_num);
186
187 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP0_MASK)
188 device_printf(
189 GET_DEV(accel_dev),
190 "Uncorrectable error MMP0 detected in accel: %u\n",
191 accel_num);
192
193 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP0_MASK)
194 device_printf(GET_DEV(accel_dev),
195 "Correctable error MMP0 detected in accel: %u\n",
196 accel_num);
197
198 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP1_MASK)
199 device_printf(
200 GET_DEV(accel_dev),
201 "Uncorrectable error MMP1 detected in accel: %u\n",
202 accel_num);
203
204 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP1_MASK)
205 device_printf(GET_DEV(accel_dev),
206 "Correctable error MMP1 detected in accel: %u\n",
207 accel_num);
208
209 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP2_MASK)
210 device_printf(
211 GET_DEV(accel_dev),
212 "Uncorrectable error MMP2 detected in accel: %u\n",
213 accel_num);
214
215 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP2_MASK)
216 device_printf(GET_DEV(accel_dev),
217 "Correctable error MMP2 detected in accel: %u\n",
218 accel_num);
219
220 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP3_MASK)
221 device_printf(
222 GET_DEV(accel_dev),
223 "Uncorrectable error MMP3 detected in accel: %u\n",
224 accel_num);
225
226 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP3_MASK)
227 device_printf(GET_DEV(accel_dev),
228 "Correctable error MMP3 detected in accel: %u\n",
229 accel_num);
230
231 if (iastatssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP4_MASK)
232 device_printf(
233 GET_DEV(accel_dev),
234 "Uncorrectable error MMP4 detected in accel: %u\n",
235 accel_num);
236
237 if (iastatssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP4_MASK)
238 device_printf(GET_DEV(accel_dev),
239 "Correctable error MMP4 detected in accel: %u\n",
240 accel_num);
241
242 if (iastatssm & ADF_C4XXX_IASTATSSM_PPERR_MASK)
243 device_printf(
244 GET_DEV(accel_dev),
245 "Uncorrectable error Push or Pull detected in accel: %u\n",
246 accel_num);
247
248 if (iastatssm & ADF_C4XXX_IASTATSSM_CPPPAR_ERR_MASK)
249 device_printf(
250 GET_DEV(accel_dev),
251 "Uncorrectable CPP parity error detected in accel: %u\n",
252 accel_num);
253
254 if (iastatssm & ADF_C4XXX_IASTATSSM_RFPAR_ERR_MASK)
255 device_printf(
256 GET_DEV(accel_dev),
257 "Uncorrectable SSM RF parity error detected in accel: %u\n",
258 accel_num);
259 }
260
261 static inline void
262 adf_clear_source_statssm(struct adf_accel_dev *accel_dev,
263 struct resource *pmisc,
264 u32 statssm,
265 u32 accel_num)
266 {
267 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMSH_MASK)
268 adf_csr_fetch_and_and(pmisc,
269 ADF_C4XXX_UERRSSMSH(accel_num),
270 ADF_C4XXX_UERRSSMSH_INTS_CLEAR_MASK);
271
272 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMSH_MASK)
273 adf_csr_fetch_and_and(pmisc,
274 ADF_C4XXX_CERRSSMSH(accel_num),
275 ADF_C4XXX_CERRSSMSH_INTS_CLEAR_MASK);
276
277 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP0_MASK)
278 adf_csr_fetch_and_and(pmisc,
279 ADF_C4XXX_UERRSSMMMP(accel_num, 0),
280 ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK);
281
282 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP0_MASK)
283 adf_csr_fetch_and_and(pmisc,
284 ADF_C4XXX_CERRSSMMMP(accel_num, 0),
285 ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK);
286
287 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP1_MASK)
288 adf_csr_fetch_and_and(pmisc,
289 ADF_C4XXX_UERRSSMMMP(accel_num, 1),
290 ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK);
291
292 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP1_MASK)
293 adf_csr_fetch_and_and(pmisc,
294 ADF_C4XXX_CERRSSMMMP(accel_num, 1),
295 ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK);
296
297 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP2_MASK)
298 adf_csr_fetch_and_and(pmisc,
299 ADF_C4XXX_UERRSSMMMP(accel_num, 2),
300 ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK);
301
302 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP2_MASK)
303 adf_csr_fetch_and_and(pmisc,
304 ADF_C4XXX_CERRSSMMMP(accel_num, 2),
305 ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK);
306
307 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP3_MASK)
308 adf_csr_fetch_and_and(pmisc,
309 ADF_C4XXX_UERRSSMMMP(accel_num, 3),
310 ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK);
311
312 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP3_MASK)
313 adf_csr_fetch_and_and(pmisc,
314 ADF_C4XXX_CERRSSMMMP(accel_num, 3),
315 ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK);
316
317 if (statssm & ADF_C4XXX_IASTATSSM_UERRSSMMMP4_MASK)
318 adf_csr_fetch_and_and(pmisc,
319 ADF_C4XXX_UERRSSMMMP(accel_num, 4),
320 ~ADF_C4XXX_UERRSSMMMP_INTS_CLEAR_MASK);
321
322 if (statssm & ADF_C4XXX_IASTATSSM_CERRSSMMMP4_MASK)
323 adf_csr_fetch_and_and(pmisc,
324 ADF_C4XXX_CERRSSMMMP(accel_num, 4),
325 ~ADF_C4XXX_CERRSSMMMP_INTS_CLEAR_MASK);
326
327 if (statssm & ADF_C4XXX_IASTATSSM_PPERR_MASK)
328 adf_csr_fetch_and_and(pmisc,
329 ADF_PPERR(accel_num),
330 ~ADF_C4XXX_PPERR_INTS_CLEAR_MASK);
331
332 if (statssm & ADF_C4XXX_IASTATSSM_RFPAR_ERR_MASK)
333 adf_csr_fetch_and_or(pmisc,
334 ADF_C4XXX_SSMSOFTERRORPARITY(accel_num),
335 0UL);
336
337 if (statssm & ADF_C4XXX_IASTATSSM_CPPPAR_ERR_MASK)
338 adf_csr_fetch_and_or(pmisc,
339 ADF_C4XXX_SSMCPPERR(accel_num),
340 0UL);
341 }
342
343 static inline void
344 adf_process_errsou8(struct adf_accel_dev *accel_dev, struct resource *pmisc)
345 {
346 int i;
347 u32 mecorrerr = ADF_CSR_RD(pmisc, ADF_C4XXX_HI_ME_COR_ERRLOG);
348 const unsigned long tmp_mecorrerr = mecorrerr;
349
350 /* For each correctable error in ME increment RAS counter */
351 for_each_set_bit(i,
352 &tmp_mecorrerr,
353 ADF_C4XXX_HI_ME_COR_ERRLOG_SIZE_IN_BITS)
354 {
355 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
356 device_printf(GET_DEV(accel_dev),
357 "Correctable error detected in AE%d\n",
358 i);
359 }
360
361 /* Clear interrupt from errsou8 (RW1C) */
362 ADF_CSR_WR(pmisc, ADF_C4XXX_HI_ME_COR_ERRLOG, mecorrerr);
363 }
364
365 static inline void
366 adf_handle_ae_uncorr_err(struct adf_accel_dev *accel_dev,
367 struct resource *pmisc)
368 {
369 int i;
370 u32 me_uncorr_err = ADF_CSR_RD(pmisc, ADF_C4XXX_HI_ME_UNCERR_LOG);
371 const unsigned long tmp_me_uncorr_err = me_uncorr_err;
372
373 /* For each uncorrectable fatal error in AE increment RAS error
374 * counter.
375 */
376 for_each_set_bit(i,
377 &tmp_me_uncorr_err,
378 ADF_C4XXX_HI_ME_UNCOR_ERRLOG_BITS)
379 {
380 atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]);
381 device_printf(GET_DEV(accel_dev),
382 "Uncorrectable error detected in AE%d\n",
383 i);
384 }
385
386 /* Clear interrupt from me_uncorr_err (RW1C) */
387 ADF_CSR_WR(pmisc, ADF_C4XXX_HI_ME_UNCERR_LOG, me_uncorr_err);
388 }
389
390 static inline void
391 adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev,
392 struct resource *pmisc,
393 bool *reset_required)
394 {
395 u32 ri_mem_par_err_sts = 0;
396 u32 ri_mem_par_err_ferr = 0;
397
398 ri_mem_par_err_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_STS);
399
400 ri_mem_par_err_ferr = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_FERR);
401
402 if (ri_mem_par_err_sts & ADF_C4XXX_RI_MEM_PAR_ERR_STS_MASK) {
403 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
404 device_printf(
405 GET_DEV(accel_dev),
406 "Uncorrectable RI memory parity error detected.\n");
407 }
408
409 if (ri_mem_par_err_sts & ADF_C4XXX_RI_MEM_MSIX_TBL_INT_MASK) {
410 atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]);
411 device_printf(
412 GET_DEV(accel_dev),
413 "Uncorrectable fatal MSIX table parity error detected.\n");
414 *reset_required = true;
415 }
416
417 device_printf(GET_DEV(accel_dev),
418 "ri_mem_par_err_sts=0x%X\tri_mem_par_err_ferr=%u\n",
419 ri_mem_par_err_sts,
420 ri_mem_par_err_ferr);
421
422 ADF_CSR_WR(pmisc, ADF_C4XXX_RI_MEM_PAR_ERR_STS, ri_mem_par_err_sts);
423 }
424
425 static inline void
426 adf_handle_ti_mem_par_err(struct adf_accel_dev *accel_dev,
427 struct resource *pmisc)
428 {
429 u32 ti_mem_par_err_sts0 = 0;
430 u32 ti_mem_par_err_sts1 = 0;
431 u32 ti_mem_par_err_ferr = 0;
432
433 ti_mem_par_err_sts0 = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS0);
434 ti_mem_par_err_sts1 = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS1);
435 ti_mem_par_err_ferr =
436 ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_FIRST_ERROR);
437
438 atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]);
439 ti_mem_par_err_sts1 &= ADF_C4XXX_TI_MEM_PAR_ERR_STS1_MASK;
440
441 device_printf(GET_DEV(accel_dev),
442 "Uncorrectable TI memory parity error detected.\n");
443 device_printf(GET_DEV(accel_dev),
444 "ti_mem_par_err_sts0=0x%X\tti_mem_par_err_sts1=0x%X\t"
445 "ti_mem_par_err_ferr=0x%X\n",
446 ti_mem_par_err_sts0,
447 ti_mem_par_err_sts1,
448 ti_mem_par_err_ferr);
449
450 ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS0, ti_mem_par_err_sts0);
451 ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MEM_PAR_ERR_STS1, ti_mem_par_err_sts1);
452 }
453
454 static inline void
455 adf_log_fatal_cmd_par_err(struct adf_accel_dev *accel_dev, char *err_type)
456 {
457 atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]);
458 device_printf(GET_DEV(accel_dev),
459 "Fatal error detected: %s command parity\n",
460 err_type);
461 }
462
463 static inline void
464 adf_handle_host_cpp_par_err(struct adf_accel_dev *accel_dev,
465 struct resource *pmisc)
466 {
467 u32 host_cpp_par_err = 0;
468
469 host_cpp_par_err =
470 ADF_CSR_RD(pmisc, ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG);
471
472 if (host_cpp_par_err & ADF_C4XXX_TI_CMD_PAR_ERR)
473 adf_log_fatal_cmd_par_err(accel_dev, "TI");
474
475 if (host_cpp_par_err & ADF_C4XXX_RI_CMD_PAR_ERR)
476 adf_log_fatal_cmd_par_err(accel_dev, "RI");
477
478 if (host_cpp_par_err & ADF_C4XXX_ICI_CMD_PAR_ERR)
479 adf_log_fatal_cmd_par_err(accel_dev, "ICI");
480
481 if (host_cpp_par_err & ADF_C4XXX_ICE_CMD_PAR_ERR)
482 adf_log_fatal_cmd_par_err(accel_dev, "ICE");
483
484 if (host_cpp_par_err & ADF_C4XXX_ARAM_CMD_PAR_ERR)
485 adf_log_fatal_cmd_par_err(accel_dev, "ARAM");
486
487 if (host_cpp_par_err & ADF_C4XXX_CFC_CMD_PAR_ERR)
488 adf_log_fatal_cmd_par_err(accel_dev, "CFC");
489
490 if (ADF_C4XXX_SSM_CMD_PAR_ERR(host_cpp_par_err))
491 adf_log_fatal_cmd_par_err(accel_dev, "SSM");
492
493 /* Clear interrupt from host_cpp_par_err (RW1C) */
494 ADF_CSR_WR(pmisc,
495 ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG,
496 host_cpp_par_err);
497 }
498
499 static inline void
500 adf_process_errsou9(struct adf_accel_dev *accel_dev,
501 struct resource *pmisc,
502 u32 errsou,
503 bool *reset_required)
504 {
505 if (errsou & ADF_C4XXX_ME_UNCORR_ERROR) {
506 adf_handle_ae_uncorr_err(accel_dev, pmisc);
507
508 /* Notify caller that function level reset is required. */
509 *reset_required = true;
510 }
511
512 if (errsou & ADF_C4XXX_CPP_CMD_PAR_ERR) {
513 adf_handle_host_cpp_par_err(accel_dev, pmisc);
514 *reset_required = true;
515 }
516
517 /* RI memory parity errors are uncorrectable non-fatal errors
518 * with exception of bit 22 MSIX table parity error, which should
519 * be treated as fatal error, followed by device restart.
520 */
521 if (errsou & ADF_C4XXX_RI_MEM_PAR_ERR)
522 adf_handle_ri_mem_par_err(accel_dev, pmisc, reset_required);
523
524 if (errsou & ADF_C4XXX_TI_MEM_PAR_ERR) {
525 adf_handle_ti_mem_par_err(accel_dev, pmisc);
526 *reset_required = true;
527 }
528 }
529
530 static inline void
531 adf_process_exprpssmcpr(struct adf_accel_dev *accel_dev,
532 struct resource *pmisc,
533 u32 accel)
534 {
535 u32 exprpssmcpr;
536
537 /* CPR0 */
538 exprpssmcpr = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMCPR0(accel));
539 if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_FATAL_MASK) {
540 device_printf(GET_DEV(accel_dev),
541 "Uncorrectable error CPR0 detected in accel %u\n",
542 accel);
543 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
544 }
545 if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_SOFT_MASK) {
546 device_printf(GET_DEV(accel_dev),
547 "Correctable error CPR0 detected in accel %u\n",
548 accel);
549 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
550 }
551 ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMCPR0(accel), 0);
552
553 /* CPR1 */
554 exprpssmcpr = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMCPR1(accel));
555 if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_FATAL_MASK) {
556 device_printf(GET_DEV(accel_dev),
557 "Uncorrectable error CPR1 detected in accel %u\n",
558 accel);
559 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
560 }
561 if (exprpssmcpr & ADF_C4XXX_EXPRPSSM_SOFT_MASK) {
562 device_printf(GET_DEV(accel_dev),
563 "Correctable error CPR1 detected in accel %u\n",
564 accel);
565 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
566 }
567 ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMCPR1(accel), 0);
568 }
569
570 static inline void
571 adf_process_exprpssmxlt(struct adf_accel_dev *accel_dev,
572 struct resource *pmisc,
573 u32 accel)
574 {
575 u32 exprpssmxlt;
576
577 /* XTL0 */
578 exprpssmxlt = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel));
579 if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_FATAL_MASK) {
580 device_printf(GET_DEV(accel_dev),
581 "Uncorrectable error XLT0 detected in accel %u\n",
582 accel);
583 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
584 }
585 if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_SOFT_MASK) {
586 device_printf(GET_DEV(accel_dev),
587 "Correctable error XLT0 detected in accel %u\n",
588 accel);
589 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
590 }
591 ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel), 0);
592
593 /* XTL1 */
594 exprpssmxlt = ADF_CSR_RD(pmisc, ADF_C4XXX_EXPRPSSMXLT1(accel));
595 if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_FATAL_MASK) {
596 device_printf(GET_DEV(accel_dev),
597 "Uncorrectable error XLT1 detected in accel %u\n",
598 accel);
599 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
600 }
601 if (exprpssmxlt & ADF_C4XXX_EXPRPSSM_SOFT_MASK) {
602 device_printf(GET_DEV(accel_dev),
603 "Correctable error XLT1 detected in accel %u\n",
604 accel);
605 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
606 }
607 ADF_CSR_WR(pmisc, ADF_C4XXX_EXPRPSSMXLT0(accel), 0);
608 }
609
610 static inline void
611 adf_process_spp_par_err(struct adf_accel_dev *accel_dev,
612 struct resource *pmisc,
613 u32 accel,
614 bool *reset_required)
615 {
616 /* All SPP parity errors are treated as uncorrectable fatal errors */
617 atomic_inc(&accel_dev->ras_counters[ADF_RAS_FATAL]);
618 *reset_required = true;
619 device_printf(GET_DEV(accel_dev),
620 "Uncorrectable fatal SPP parity error detected\n");
621 }
622
623 static inline void
624 adf_process_statssm(struct adf_accel_dev *accel_dev,
625 struct resource *pmisc,
626 u32 accel,
627 bool *reset_required)
628 {
629 u32 i;
630 u32 statssm = ADF_CSR_RD(pmisc, ADF_INTSTATSSM(accel));
631 u32 iastatssm = ADF_CSR_RD(pmisc, ADF_C4XXX_IAINTSTATSSM(accel));
632 bool type;
633 const unsigned long tmp_iastatssm = iastatssm;
634
635 /* First collect all errors */
636 for_each_set_bit(i, &tmp_iastatssm, ADF_C4XXX_IASTATSSM_BITS)
637 {
638 if (i == ADF_C4XXX_IASTATSSM_SLICE_HANG_ERR_BIT) {
639 /* Slice Hang error is being handled in
640 * separate function adf_check_slice_hang_c4xxx(),
641 * which also increments RAS counters for
642 * SliceHang error.
643 */
644 continue;
645 }
646 if (i == ADF_C4XXX_IASTATSSM_SPP_PAR_ERR_BIT) {
647 adf_process_spp_par_err(accel_dev,
648 pmisc,
649 accel,
650 reset_required);
651 continue;
652 }
653
654 type = (i % 2) ? ADF_RAS_CORR : ADF_RAS_UNCORR;
655 if (i == ADF_C4XXX_IASTATSSM_CPP_PAR_ERR_BIT)
656 type = ADF_RAS_UNCORR;
657
658 atomic_inc(&accel_dev->ras_counters[type]);
659 }
660
661 /* If iastatssm is set, we need to log the error */
662 if (iastatssm & ADF_C4XXX_IASTATSSM_MASK)
663 adf_log_source_iastatssm(accel_dev, pmisc, iastatssm, accel);
664 /* If statssm is set, we need to clear the error sources */
665 if (statssm & ADF_C4XXX_IASTATSSM_MASK)
666 adf_clear_source_statssm(accel_dev, pmisc, statssm, accel);
667 /* Clear the iastatssm after clearing error sources */
668 if (iastatssm & ADF_C4XXX_IASTATSSM_MASK)
669 adf_csr_fetch_and_and(pmisc,
670 ADF_C4XXX_IAINTSTATSSM(accel),
671 ADF_C4XXX_IASTATSSM_CLR_MASK);
672 }
673
674 static inline void
675 adf_process_errsou10(struct adf_accel_dev *accel_dev,
676 struct resource *pmisc,
677 u32 errsou,
678 u32 num_accels,
679 bool *reset_required)
680 {
681 int accel;
682 const unsigned long tmp_errsou = errsou;
683
684 for_each_set_bit(accel, &tmp_errsou, num_accels)
685 {
686 adf_process_statssm(accel_dev, pmisc, accel, reset_required);
687 adf_process_exprpssmcpr(accel_dev, pmisc, accel);
688 adf_process_exprpssmxlt(accel_dev, pmisc, accel);
689 }
690 }
691
692 /* ERRSOU 11 */
693 static inline void
694 adf_handle_ti_misc_err(struct adf_accel_dev *accel_dev, struct resource *pmisc)
695 {
696 u32 ti_misc_sts = 0;
697 u32 err_type = 0;
698
699 ti_misc_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_MISC_STS);
700 dev_dbg(GET_DEV(accel_dev), "ti_misc_sts = 0x%X\n", ti_misc_sts);
701
702 if (ti_misc_sts & ADF_C4XXX_TI_MISC_ERR_MASK) {
703 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
704
705 /* If TI misc error occurred then check its type */
706 err_type = ADF_C4XXX_GET_TI_MISC_ERR_TYPE(ti_misc_sts);
707 if (err_type == ADF_C4XXX_TI_BME_RESP_ORDER_ERR) {
708 device_printf(
709 GET_DEV(accel_dev),
710 "Uncorrectable non-fatal BME response order error.\n");
711
712 } else if (err_type == ADF_C4XXX_TI_RESP_ORDER_ERR) {
713 device_printf(
714 GET_DEV(accel_dev),
715 "Uncorrectable non-fatal response order error.\n");
716 }
717
718 /* Clear the interrupt and allow the next error to be
719 * logged.
720 */
721 ADF_CSR_WR(pmisc, ADF_C4XXX_TI_MISC_STS, BIT(0));
722 }
723 }
724
725 static inline void
726 adf_handle_ri_push_pull_par_err(struct adf_accel_dev *accel_dev,
727 struct resource *pmisc)
728 {
729 u32 ri_cpp_int_sts = 0;
730 u32 err_clear_mask = 0;
731
732 ri_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_CPP_INT_STS);
733 dev_dbg(GET_DEV(accel_dev), "ri_cpp_int_sts = 0x%X\n", ri_cpp_int_sts);
734
735 if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PUSH_ERR) {
736 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
737 device_printf(
738 GET_DEV(accel_dev),
739 "CPP%d: Uncorrectable non-fatal RI push error detected.\n",
740 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts));
741
742 err_clear_mask |= ADF_C4XXX_RI_CPP_INT_STS_PUSH_ERR;
743 }
744
745 if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PULL_ERR) {
746 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
747 device_printf(
748 GET_DEV(accel_dev),
749 "CPP%d: Uncorrectable non-fatal RI pull error detected.\n",
750 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts));
751
752 err_clear_mask |= ADF_C4XXX_RI_CPP_INT_STS_PULL_ERR;
753 }
754
755 /* Clear the interrupt for handled errors and allow the next error
756 * to be logged.
757 */
758 ADF_CSR_WR(pmisc, ADF_C4XXX_RI_CPP_INT_STS, err_clear_mask);
759 }
760
761 static inline void
762 adf_handle_ti_push_pull_par_err(struct adf_accel_dev *accel_dev,
763 struct resource *pmisc)
764 {
765 u32 ti_cpp_int_sts = 0;
766 u32 err_clear_mask = 0;
767
768 ti_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_CPP_INT_STS);
769 dev_dbg(GET_DEV(accel_dev), "ti_cpp_int_sts = 0x%X\n", ti_cpp_int_sts);
770
771 if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PUSH_ERR) {
772 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
773 device_printf(
774 GET_DEV(accel_dev),
775 "CPP%d: Uncorrectable non-fatal TI push error detected.\n",
776 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts));
777
778 err_clear_mask |= ADF_C4XXX_TI_CPP_INT_STS_PUSH_ERR;
779 }
780
781 if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PULL_ERR) {
782 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
783 device_printf(
784 GET_DEV(accel_dev),
785 "CPP%d: Uncorrectable non-fatal TI pull error detected.\n",
786 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts));
787
788 err_clear_mask |= ADF_C4XXX_TI_CPP_INT_STS_PULL_ERR;
789 }
790
791 /* Clear the interrupt for handled errors and allow the next error
792 * to be logged.
793 */
794 ADF_CSR_WR(pmisc, ADF_C4XXX_TI_CPP_INT_STS, err_clear_mask);
795 }
796
797 static inline void
798 adf_handle_aram_corr_err(struct adf_accel_dev *accel_dev,
799 struct resource *aram_base_addr)
800 {
801 u32 aram_cerr = 0;
802
803 aram_cerr = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_ARAMCERR);
804 dev_dbg(GET_DEV(accel_dev), "aram_cerr = 0x%X\n", aram_cerr);
805
806 if (aram_cerr & ADF_C4XXX_ARAM_CORR_ERR_MASK) {
807 atomic_inc(&accel_dev->ras_counters[ADF_RAS_CORR]);
808 device_printf(GET_DEV(accel_dev),
809 "Correctable ARAM error detected.\n");
810 }
811
812 /* Clear correctable ARAM error interrupt. */
813 ADF_C4XXX_CLEAR_CSR_BIT(aram_cerr, 0);
814 ADF_CSR_WR(aram_base_addr, ADF_C4XXX_ARAMCERR, aram_cerr);
815 }
816
817 static inline void
818 adf_handle_aram_uncorr_err(struct adf_accel_dev *accel_dev,
819 struct resource *aram_base_addr)
820 {
821 u32 aram_uerr = 0;
822
823 aram_uerr = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_ARAMUERR);
824 dev_dbg(GET_DEV(accel_dev), "aram_uerr = 0x%X\n", aram_uerr);
825
826 if (aram_uerr & ADF_C4XXX_ARAM_UNCORR_ERR_MASK) {
827 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
828 device_printf(GET_DEV(accel_dev),
829 "Uncorrectable non-fatal ARAM error detected.\n");
830 }
831
832 /* Clear uncorrectable ARAM error interrupt. */
833 ADF_C4XXX_CLEAR_CSR_BIT(aram_uerr, 0);
834 ADF_CSR_WR(aram_base_addr, ADF_C4XXX_ARAMUERR, aram_uerr);
835 }
836
837 static inline void
838 adf_handle_ti_pull_par_err(struct adf_accel_dev *accel_dev,
839 struct resource *pmisc)
840 {
841 u32 ti_cpp_int_sts = 0;
842
843 ti_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_TI_CPP_INT_STS);
844 dev_dbg(GET_DEV(accel_dev), "ti_cpp_int_sts = 0x%X\n", ti_cpp_int_sts);
845
846 if (ti_cpp_int_sts & ADF_C4XXX_TI_CPP_INT_STS_PUSH_DATA_PAR_ERR) {
847 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
848 device_printf(
849 GET_DEV(accel_dev),
850 "CPP%d: Uncorrectable non-fatal TI pull data parity error detected.\n",
851 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ti_cpp_int_sts));
852 }
853
854 /* Clear the interrupt and allow the next error to be logged. */
855 ADF_CSR_WR(pmisc,
856 ADF_C4XXX_TI_CPP_INT_STS,
857 ADF_C4XXX_TI_CPP_INT_STS_PUSH_DATA_PAR_ERR);
858 }
859
860 static inline void
861 adf_handle_ri_push_par_err(struct adf_accel_dev *accel_dev,
862 struct resource *pmisc)
863 {
864 u32 ri_cpp_int_sts = 0;
865
866 ri_cpp_int_sts = ADF_CSR_RD(pmisc, ADF_C4XXX_RI_CPP_INT_STS);
867 dev_dbg(GET_DEV(accel_dev), "ri_cpp_int_sts = 0x%X\n", ri_cpp_int_sts);
868
869 if (ri_cpp_int_sts & ADF_C4XXX_RI_CPP_INT_STS_PUSH_DATA_PAR_ERR) {
870 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
871 device_printf(
872 GET_DEV(accel_dev),
873 "CPP%d: Uncorrectable non-fatal RI push data parity error detected.\n",
874 ADF_C4XXX_GET_CPP_BUS_FROM_STS(ri_cpp_int_sts));
875 }
876
877 /* Clear the interrupt and allow the next error to be logged. */
878 ADF_CSR_WR(pmisc,
879 ADF_C4XXX_RI_CPP_INT_STS,
880 ADF_C4XXX_RI_CPP_INT_STS_PUSH_DATA_PAR_ERR);
881 }
882
883 static inline void
884 adf_log_inln_err(struct adf_accel_dev *accel_dev,
885 u32 offset,
886 u8 ras_type,
887 char *msg)
888 {
889 if (ras_type >= ADF_RAS_ERRORS) {
890 device_printf(GET_DEV(accel_dev),
891 "Invalid ras type %u\n",
892 ras_type);
893 return;
894 }
895
896 if (offset == ADF_C4XXX_INLINE_INGRESS_OFFSET) {
897 if (ras_type == ADF_RAS_CORR)
898 dev_dbg(GET_DEV(accel_dev), "Detect ici %s\n", msg);
899 else
900 device_printf(GET_DEV(accel_dev),
901 "Detect ici %s\n",
902 msg);
903 } else {
904 if (ras_type == ADF_RAS_CORR)
905 dev_dbg(GET_DEV(accel_dev), "Detect ice %s\n", msg);
906 else
907 device_printf(GET_DEV(accel_dev),
908 "Detect ice %s\n",
909 msg);
910 }
911 atomic_inc(&accel_dev->ras_counters[ras_type]);
912 }
913
914 static inline void
915 adf_handle_parser_uerr(struct adf_accel_dev *accel_dev,
916 struct resource *aram_base_addr,
917 u32 offset,
918 bool *reset_required)
919 {
920 u32 reg_val = 0;
921
922 reg_val = ADF_CSR_RD(aram_base_addr, ADF_C4XXX_IC_PARSER_UERR + offset);
923 if (reg_val & ADF_C4XXX_PARSER_UERR_INTR) {
924 /* Mask inten */
925 reg_val &= ~ADF_C4XXX_PARSER_DESC_UERR_INTR_ENA;
926 ADF_CSR_WR(aram_base_addr,
927 ADF_C4XXX_IC_PARSER_UERR + offset,
928 reg_val);
929
930 /* Fatal error then increase RAS error counter
931 * and reset CPM
932 */
933 adf_log_inln_err(accel_dev,
934 offset,
935 ADF_RAS_FATAL,
936 "parser uncorr fatal err");
937 *reset_required = true;
938 }
939 }
940
941 static inline void
942 adf_handle_mac_intr(struct adf_accel_dev *accel_dev,
943 struct resource *aram_base_addr,
944 u32 offset,
945 bool *reset_required)
946 {
947 u64 reg_val;
948
949 reg_val = ADF_CSR_RD64(aram_base_addr, ADF_C4XXX_MAC_IP + offset);
950
951 /* Handle the MAC interrupts masked out in MAC_IM */
952 if (reg_val & ADF_C4XXX_MAC_ERROR_TX_UNDERRUN)
953 adf_log_inln_err(accel_dev,
954 offset,
955 ADF_RAS_CORR,
956 "err tx underrun");
957
958 if (reg_val & ADF_C4XXX_MAC_ERROR_TX_FCS)
959 adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err tx fcs");
960
961 if (reg_val & ADF_C4XXX_MAC_ERROR_TX_DATA_CORRUPT)
962 adf_log_inln_err(accel_dev,
963 offset,
964 ADF_RAS_CORR,
965 "err tx data corrupt");
966
967 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_OVERRUN) {
968 *reset_required = true;
969 adf_log_inln_err(accel_dev,
970 offset,
971 ADF_RAS_FATAL,
972 "err rx overrun fatal err");
973 }
974
975 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_RUNT) {
976 *reset_required = true;
977 adf_log_inln_err(accel_dev,
978 offset,
979 ADF_RAS_FATAL,
980 "err rx runt fatal err");
981 }
982
983 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_UNDERSIZE) {
984 *reset_required = true;
985 adf_log_inln_err(accel_dev,
986 offset,
987 ADF_RAS_FATAL,
988 "err rx undersize fatal err");
989 }
990
991 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_JABBER) {
992 *reset_required = true;
993 adf_log_inln_err(accel_dev,
994 offset,
995 ADF_RAS_FATAL,
996 "err rx jabber fatal err");
997 }
998
999 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_OVERSIZE) {
1000 *reset_required = true;
1001 adf_log_inln_err(accel_dev,
1002 offset,
1003 ADF_RAS_FATAL,
1004 "err rx oversize fatal err");
1005 }
1006
1007 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_FCS)
1008 adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "err rx fcs");
1009
1010 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_FRAME)
1011 adf_log_inln_err(accel_dev,
1012 offset,
1013 ADF_RAS_CORR,
1014 "err rx frame");
1015
1016 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_CODE)
1017 adf_log_inln_err(accel_dev,
1018 offset,
1019 ADF_RAS_CORR,
1020 "err rx code");
1021
1022 if (reg_val & ADF_C4XXX_MAC_ERROR_RX_PREAMBLE)
1023 adf_log_inln_err(accel_dev,
1024 offset,
1025 ADF_RAS_CORR,
1026 "err rx preamble");
1027
1028 if (reg_val & ADF_C4XXX_MAC_RX_LINK_UP)
1029 adf_log_inln_err(accel_dev, offset, ADF_RAS_CORR, "rx link up");
1030
1031 if (reg_val & ADF_C4XXX_MAC_INVALID_SPEED)
1032 adf_log_inln_err(accel_dev,
1033 offset,
1034 ADF_RAS_CORR,
1035 "invalid speed");
1036
1037 if (reg_val & ADF_C4XXX_MAC_PIA_RX_FIFO_OVERRUN) {
1038 *reset_required = true;
1039 adf_log_inln_err(accel_dev,
1040 offset,
1041 ADF_RAS_FATAL,
1042 "pia rx fifo overrun fatal err");
1043 }
1044
1045 if (reg_val & ADF_C4XXX_MAC_PIA_TX_FIFO_OVERRUN) {
1046 *reset_required = true;
1047 adf_log_inln_err(accel_dev,
1048 offset,
1049 ADF_RAS_FATAL,
1050 "pia tx fifo overrun fatal err");
1051 }
1052
1053 if (reg_val & ADF_C4XXX_MAC_PIA_TX_FIFO_UNDERRUN) {
1054 *reset_required = true;
1055 adf_log_inln_err(accel_dev,
1056 offset,
1057 ADF_RAS_FATAL,
1058 "pia tx fifo underrun fatal err");
1059 }
1060
1061 /* Clear the interrupt and allow the next error to be logged. */
1062 ADF_CSR_WR64(aram_base_addr, ADF_C4XXX_MAC_IP + offset, reg_val);
1063 }
1064
1065 static inline bool
1066 adf_handle_rf_par_err(struct adf_accel_dev *accel_dev,
1067 struct resource *aram_base_addr,
1068 u32 rf_par_addr,
1069 u32 rf_par_msk,
1070 u32 offset,
1071 char *msg)
1072 {
1073 u32 reg_val;
1074 unsigned long intr_status;
1075 int i;
1076 char strbuf[ADF_C4XXX_MAX_STR_LEN];
1077
1078 /* Handle rf parity error */
1079 reg_val = ADF_CSR_RD(aram_base_addr, rf_par_addr + offset);
1080 intr_status = reg_val & rf_par_msk;
1081 if (intr_status) {
1082 for_each_set_bit(i, &intr_status, ADF_C4XXX_RF_PAR_ERR_BITS)
1083 {
1084 if (i % 2 == 0)
1085 snprintf(strbuf,
1086 sizeof(strbuf),
1087 "%s mul par %u uncorr fatal err",
1088 msg,
1089 RF_PAR_MUL_MAP(i));
1090
1091 else
1092 snprintf(strbuf,
1093 sizeof(strbuf),
1094 "%s par %u uncorr fatal err",
1095 msg,
1096 RF_PAR_MAP(i));
1097
1098 adf_log_inln_err(accel_dev,
1099 offset,
1100 ADF_RAS_FATAL,
1101 strbuf);
1102 }
1103
1104 /* Clear the interrupt and allow the next error to be logged. */
1105 ADF_CSR_WR(aram_base_addr, rf_par_addr + offset, reg_val);
1106 return true;
1107 }
1108 return false;
1109 }
1110
1111 static inline void
1112 adf_handle_cd_rf_par_err(struct adf_accel_dev *accel_dev,
1113 struct resource *aram_base_addr,
1114 u32 offset,
1115 bool *reset_required)
1116 {
1117 /* Handle reg_cd_rf_parity_err[1] */
1118 *reset_required |=
1119 adf_handle_rf_par_err(accel_dev,
1120 aram_base_addr,
1121 ADF_C4XXX_IC_CD_RF_PARITY_ERR_1,
1122 ADF_C4XXX_CD_RF_PAR_ERR_1_INTR,
1123 offset,
1124 "cd rf par[1]:") ?
1125 true :
1126 false;
1127 }
1128
1129 static inline void
1130 adf_handle_inln_rf_par_err(struct adf_accel_dev *accel_dev,
1131 struct resource *aram_base_addr,
1132 u32 offset,
1133 bool *reset_required)
1134 {
1135 /* Handle reg_inln_rf_parity_err[0] */
1136 *reset_required |=
1137 adf_handle_rf_par_err(accel_dev,
1138 aram_base_addr,
1139 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0,
1140 ADF_C4XXX_INLN_RF_PAR_ERR_0_INTR,
1141 offset,
1142 "inln rf par[0]:") ?
1143 true :
1144 false;
1145
1146 /* Handle reg_inln_rf_parity_err[1] */
1147 *reset_required |=
1148 adf_handle_rf_par_err(accel_dev,
1149 aram_base_addr,
1150 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1,
1151 ADF_C4XXX_INLN_RF_PAR_ERR_1_INTR,
1152 offset,
1153 "inln rf par[1]:") ?
1154 true :
1155 false;
1156
1157 /* Handle reg_inln_rf_parity_err[2] */
1158 *reset_required |=
1159 adf_handle_rf_par_err(accel_dev,
1160 aram_base_addr,
1161 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2,
1162 ADF_C4XXX_INLN_RF_PAR_ERR_2_INTR,
1163 offset,
1164 "inln rf par[2]:") ?
1165 true :
1166 false;
1167
1168 /* Handle reg_inln_rf_parity_err[5] */
1169 *reset_required |=
1170 adf_handle_rf_par_err(accel_dev,
1171 aram_base_addr,
1172 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5,
1173 ADF_C4XXX_INLN_RF_PAR_ERR_5_INTR,
1174 offset,
1175 "inln rf par[5]:") ?
1176 true :
1177 false;
1178 }
1179
1180 static inline void
1181 adf_handle_congest_mngt_intr(struct adf_accel_dev *accel_dev,
1182 struct resource *aram_base_addr,
1183 u32 offset,
1184 bool *reset_required)
1185 {
1186 u32 reg_val;
1187
1188 reg_val = ADF_CSR_RD(aram_base_addr,
1189 ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset);
1190
1191 /* A mis-configuration of CPM, a mis-configuration of the Ethernet
1192 * Complex or that the traffic profile has deviated from that for
1193 * which the resources were configured
1194 */
1195 if (reg_val & ADF_C4XXX_CONGESTION_MGMT_CTPB_GLOBAL_CROSSED) {
1196 adf_log_inln_err(
1197 accel_dev,
1198 offset,
1199 ADF_RAS_FATAL,
1200 "congestion mgmt ctpb global crossed fatal err");
1201 *reset_required = true;
1202 }
1203
1204 if (reg_val & ADF_C4XXX_CONGESTION_MGMT_XOFF_CIRQ_OUT) {
1205 adf_log_inln_err(accel_dev,
1206 offset,
1207 ADF_RAS_CORR,
1208 "congestion mgmt XOFF cirq out err");
1209 }
1210
1211 if (reg_val & ADF_C4XXX_CONGESTION_MGMT_XOFF_CIRQ_IN) {
1212 adf_log_inln_err(accel_dev,
1213 offset,
1214 ADF_RAS_CORR,
1215 "congestion mgmt XOFF cirq in err");
1216 }
1217
1218 /* Clear the interrupt and allow the next error to be logged */
1219 ADF_CSR_WR(aram_base_addr,
1220 ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset,
1221 reg_val);
1222 }
1223
1224 static inline void
1225 adf_handle_inline_intr(struct adf_accel_dev *accel_dev,
1226 struct resource *aram_base_addr,
1227 u32 csr_offset,
1228 bool *reset_required)
1229 {
1230 adf_handle_cd_rf_par_err(accel_dev,
1231 aram_base_addr,
1232 csr_offset,
1233 reset_required);
1234
1235 adf_handle_parser_uerr(accel_dev,
1236 aram_base_addr,
1237 csr_offset,
1238 reset_required);
1239
1240 adf_handle_inln_rf_par_err(accel_dev,
1241 aram_base_addr,
1242 csr_offset,
1243 reset_required);
1244
1245 adf_handle_congest_mngt_intr(accel_dev,
1246 aram_base_addr,
1247 csr_offset,
1248 reset_required);
1249
1250 adf_handle_mac_intr(accel_dev,
1251 aram_base_addr,
1252 csr_offset,
1253 reset_required);
1254 }
1255
1256 static inline void
1257 adf_process_errsou11(struct adf_accel_dev *accel_dev,
1258 struct resource *pmisc,
1259 u32 errsou,
1260 bool *reset_required)
1261 {
1262 struct resource *aram_base_addr =
1263 (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1264
1265 if (errsou & ADF_C4XXX_TI_MISC)
1266 adf_handle_ti_misc_err(accel_dev, pmisc);
1267
1268 if (errsou & ADF_C4XXX_RI_PUSH_PULL_PAR_ERR)
1269 adf_handle_ri_push_pull_par_err(accel_dev, pmisc);
1270
1271 if (errsou & ADF_C4XXX_TI_PUSH_PULL_PAR_ERR)
1272 adf_handle_ti_push_pull_par_err(accel_dev, pmisc);
1273
1274 if (errsou & ADF_C4XXX_ARAM_CORR_ERR)
1275 adf_handle_aram_corr_err(accel_dev, aram_base_addr);
1276
1277 if (errsou & ADF_C4XXX_ARAM_UNCORR_ERR)
1278 adf_handle_aram_uncorr_err(accel_dev, aram_base_addr);
1279
1280 if (errsou & ADF_C4XXX_TI_PULL_PAR_ERR)
1281 adf_handle_ti_pull_par_err(accel_dev, pmisc);
1282
1283 if (errsou & ADF_C4XXX_RI_PUSH_PAR_ERR)
1284 adf_handle_ri_push_par_err(accel_dev, pmisc);
1285
1286 if (errsou & ADF_C4XXX_INLINE_INGRESS_INTR)
1287 adf_handle_inline_intr(accel_dev,
1288 aram_base_addr,
1289 ADF_C4XXX_INLINE_INGRESS_OFFSET,
1290 reset_required);
1291
1292 if (errsou & ADF_C4XXX_INLINE_EGRESS_INTR)
1293 adf_handle_inline_intr(accel_dev,
1294 aram_base_addr,
1295 ADF_C4XXX_INLINE_EGRESS_OFFSET,
1296 reset_required);
1297 }
1298
1299 bool
1300 adf_ras_interrupts(struct adf_accel_dev *accel_dev, bool *reset_required)
1301 {
1302 u32 errsou = 0;
1303 bool handled = false;
1304 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1305 u32 num_accels = hw_data->get_num_accels(hw_data);
1306 struct resource *pmisc =
1307 (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1308
1309 if (unlikely(!reset_required)) {
1310 device_printf(GET_DEV(accel_dev),
1311 "Invalid pointer reset_required\n");
1312 return false;
1313 }
1314
1315 /* errsou8 */
1316 errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU8);
1317 if (errsou & ADF_C4XXX_ERRSOU8_MECORR_MASK) {
1318 adf_process_errsou8(accel_dev, pmisc);
1319 handled = true;
1320 }
1321
1322 /* errsou9 */
1323 errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU9);
1324 if (errsou & ADF_C4XXX_ERRSOU9_ERROR_MASK) {
1325 adf_process_errsou9(accel_dev, pmisc, errsou, reset_required);
1326 handled = true;
1327 }
1328
1329 /* errsou10 */
1330 errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU10);
1331 if (errsou & ADF_C4XXX_ERRSOU10_RAS_MASK) {
1332 adf_process_errsou10(
1333 accel_dev, pmisc, errsou, num_accels, reset_required);
1334 handled = true;
1335 }
1336
1337 /* errsou11 */
1338 errsou = ADF_CSR_RD(pmisc, ADF_C4XXX_ERRSOU11);
1339 if (errsou & ADF_C4XXX_ERRSOU11_ERROR_MASK) {
1340 adf_process_errsou11(accel_dev, pmisc, errsou, reset_required);
1341 handled = true;
1342 }
1343
1344 return handled;
1345 }
Cache object: 4534a37693580f818c15b3f0f54f6824
|