1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include <linux/iopoll.h>
5 #include <adf_accel_devices.h>
6 #include <adf_cfg.h>
7 #include <adf_common_drv.h>
8 #include <adf_dev_err.h>
9 #include <adf_pf2vf_msg.h>
10 #include <adf_gen4_hw_data.h>
11 #include "adf_4xxx_hw_data.h"
12 #include "adf_heartbeat.h"
13 #include "icp_qat_fw_init_admin.h"
14 #include "icp_qat_hw.h"
15
16 #define ADF_CONST_TABLE_SIZE 1024
17
18 struct adf_fw_config {
19 u32 ae_mask;
20 char *obj_name;
21 };
22
23 /* Accel unit information */
24 static const struct adf_accel_unit adf_4xxx_au_a_ae[] = {
25 { 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL },
26 { 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL },
27 { 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN },
28 };
29
30 /* Worker thread to service arbiter mappings */
31 static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555,
32 0x5555555, 0x5555555,
33 0xAAAAAAA, 0xAAAAAAA,
34 0xAAAAAAA, 0xAAAAAAA,
35 0x0 };
36
37 /* Masks representing ME thread-service mappings.
38 * Thread 7 carries out Admin work and is thus
39 * left out.
40 */
41 static u8 default_active_thd_mask = 0x7F;
42 static u8 dc_me_active_thd_mask = 0x03;
43
44 static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 };
45
46 #define ADF_4XXX_ASYM_SYM \
47 (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
48 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
49 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
50
51 #define ADF_4XXX_DC \
52 (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
53 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
54 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
55
56 #define ADF_4XXX_SYM \
57 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
58 SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
59 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
60
61 #define ADF_4XXX_ASYM \
62 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
63 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
64 ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
65
66 #define ADF_4XXX_ASYM_DC \
67 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
68 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
69 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
70
71 #define ADF_4XXX_SYM_DC \
72 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
73 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
74 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
75
76 #define ADF_4XXX_NA \
77 (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
78 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
79 NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
80
81 #define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM
82
83 struct adf_enabled_services {
84 const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
85 u16 rng_to_svc_msk;
86 };
87
88 static struct adf_enabled_services adf_4xxx_svcs[] = {
89 { "dc", ADF_4XXX_DC },
90 { "sym", ADF_4XXX_SYM },
91 { "asym", ADF_4XXX_ASYM },
92 { "dc;asym", ADF_4XXX_ASYM_DC },
93 { "asym;dc", ADF_4XXX_ASYM_DC },
94 { "sym;dc", ADF_4XXX_SYM_DC },
95 { "dc;sym", ADF_4XXX_SYM_DC },
96 { "asym;sym", ADF_4XXX_ASYM_SYM },
97 { "sym;asym", ADF_4XXX_ASYM_SYM },
98 };
99
100 static struct adf_hw_device_class adf_4xxx_class = {
101 .name = ADF_4XXX_DEVICE_NAME,
102 .type = DEV_4XXX,
103 .instances = 0,
104 };
105
106 static u32
107 get_accel_mask(struct adf_accel_dev *accel_dev)
108 {
109 return ADF_4XXX_ACCELERATORS_MASK;
110 }
111
112 static u32
113 get_ae_mask(struct adf_accel_dev *accel_dev)
114 {
115 u32 fusectl4 = accel_dev->hw_device->fuses;
116
117 return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK;
118 }
119
120 static int
121 get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
122 {
123 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
124 char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
125 u32 i = 0;
126
127 *ring_to_svc_map = 0;
128 /* Get the services enabled by user */
129 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
130 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
131 return EFAULT;
132
133 for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) {
134 if (!strncmp(val,
135 adf_4xxx_svcs[i].svcs_enabled,
136 ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
137 *ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk;
138 return 0;
139 }
140 }
141
142 device_printf(GET_DEV(accel_dev),
143 "Invalid services enabled: %s\n",
144 val);
145 return EFAULT;
146 }
147
148 static u32
149 get_num_accels(struct adf_hw_device_data *self)
150 {
151 return ADF_4XXX_MAX_ACCELERATORS;
152 }
153
154 static u32
155 get_num_aes(struct adf_hw_device_data *self)
156 {
157 if (!self || !self->ae_mask)
158 return 0;
159
160 return hweight32(self->ae_mask);
161 }
162
163 static u32
164 get_misc_bar_id(struct adf_hw_device_data *self)
165 {
166 return ADF_4XXX_PMISC_BAR;
167 }
168
169 static u32
170 get_etr_bar_id(struct adf_hw_device_data *self)
171 {
172 return ADF_4XXX_ETR_BAR;
173 }
174
175 static u32
176 get_sram_bar_id(struct adf_hw_device_data *self)
177 {
178 return ADF_4XXX_SRAM_BAR;
179 }
180
181 /*
182 * The vector routing table is used to select the MSI-X entry to use for each
183 * interrupt source.
184 * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
185 * The final entry corresponds to VF2PF or error interrupts.
186 * This vector table could be used to configure one MSI-X entry to be shared
187 * between multiple interrupt sources.
188 *
189 * The default routing is set to have a one to one correspondence between the
190 * interrupt source and the MSI-X entry used.
191 */
192 static void
193 set_msix_default_rttable(struct adf_accel_dev *accel_dev)
194 {
195 struct resource *csr;
196 int i;
197
198 csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
199 for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
200 ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
201 }
202
203 static u32
204 adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
205 {
206 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
207 u32 fusectl1;
208 u32 capabilities;
209
210 /* Read accelerator capabilities mask */
211 fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4);
212 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
213 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
214 ICP_ACCEL_CAPABILITIES_CIPHER |
215 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
216 ICP_ACCEL_CAPABILITIES_COMPRESSION |
217 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
218 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
219 ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
220 ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
221 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
222 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
223 ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_RL;
224
225 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
226 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
227 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
228 }
229 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE)
230 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
231 if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE)
232 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
233 if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
234 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
235 capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
236 }
237 if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
238 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
239 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
240 }
241 return capabilities;
242 }
243
244 static u32
245 get_hb_clock(struct adf_hw_device_data *self)
246 {
247 /*
248 * 4XXX uses KPT counter for HB
249 */
250 return ADF_4XXX_KPT_COUNTER_FREQ;
251 }
252
253 static u32
254 get_ae_clock(struct adf_hw_device_data *self)
255 {
256 /*
257 * Clock update interval is <16> ticks for qat_4xxx.
258 */
259 return self->clock_frequency / 16;
260 }
261
262 static int
263 measure_clock(struct adf_accel_dev *accel_dev)
264 {
265 u32 frequency;
266 int ret = 0;
267
268 ret = adf_dev_measure_clock(accel_dev,
269 &frequency,
270 ADF_4XXX_MIN_AE_FREQ,
271 ADF_4XXX_MAX_AE_FREQ);
272 if (ret)
273 return ret;
274
275 accel_dev->hw_device->clock_frequency = frequency;
276 return 0;
277 }
278
279 static int
280 adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
281 {
282 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
283 char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
284
285 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
286 goto err;
287
288 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
289 snprintf(val_str,
290 sizeof(val_str),
291 ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM);
292
293 if (adf_cfg_add_key_value_param(
294 accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
295 goto err;
296
297 return 0;
298 err:
299 device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
300 return EINVAL;
301 }
302
303 static u32
304 get_num_accel_units(struct adf_hw_device_data *self)
305 {
306 return ADF_4XXX_MAX_ACCELUNITS;
307 }
308
309 static void
310 get_accel_unit(struct adf_hw_device_data *self,
311 struct adf_accel_unit **accel_unit)
312 {
313 memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae));
314 }
315
316 static void
317 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
318 {
319 if (accel_dev->au_info) {
320 kfree(accel_dev->au_info->au);
321 accel_dev->au_info->au = NULL;
322 kfree(accel_dev->au_info);
323 accel_dev->au_info = NULL;
324 }
325 }
326
327 static int
328 get_accel_unit_config(struct adf_accel_dev *accel_dev,
329 u8 *num_sym_au,
330 u8 *num_dc_au,
331 u8 *num_asym_au)
332 {
333 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
334 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
335 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
336 u32 num_au = hw_data->get_num_accel_units(hw_data);
337 /* One AU will be allocated by default if a service enabled */
338 u32 alloc_au = 1;
339 /* There's always one AU that is used for Admin AE */
340 u32 service_mask = ADF_ACCEL_ADMIN;
341 char *token, *cur_str;
342 u32 disabled_caps = 0;
343
344 /* Get the services enabled by user */
345 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
346 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
347 return EFAULT;
348 cur_str = val;
349 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
350 while (token) {
351 if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
352 service_mask |= ADF_ACCEL_CRYPTO;
353 if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
354 service_mask |= ADF_ACCEL_ASYM;
355
356 /* cy means both asym & crypto should be enabled
357 * Hardware resources allocation check will be done later
358 */
359 if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY)))
360 service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO;
361 if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC)))
362 service_mask |= ADF_ACCEL_COMPRESSION;
363
364 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
365 }
366
367 /* Ensure the user won't enable more services than it can support */
368 if (hweight32(service_mask) > num_au) {
369 device_printf(GET_DEV(accel_dev),
370 "Can't enable more services than ");
371 device_printf(GET_DEV(accel_dev), "%d!\n", num_au);
372 return EFAULT;
373 } else if (hweight32(service_mask) == 2) {
374 /* Due to limitation, besides AU for Admin AE
375 * only 2 more AUs can be allocated
376 */
377 alloc_au = 2;
378 }
379
380 if (service_mask & ADF_ACCEL_CRYPTO)
381 *num_sym_au = alloc_au;
382 if (service_mask & ADF_ACCEL_ASYM)
383 *num_asym_au = alloc_au;
384 if (service_mask & ADF_ACCEL_COMPRESSION)
385 *num_dc_au = alloc_au;
386
387 /*update capability*/
388 if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) {
389 disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
390 ICP_ACCEL_CAPABILITIES_CIPHER |
391 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
392 ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
393 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
394 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
395 ICP_ACCEL_CAPABILITIES_AES_V2;
396 }
397 if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) {
398 disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
399 ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
400 }
401 if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) {
402 disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
403 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
404 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
405 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
406 accel_dev->hw_device->extended_dc_capabilities = 0;
407 }
408 accel_dev->hw_device->accel_capabilities_mask =
409 adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
410
411 hw_data->service_mask = service_mask;
412 hw_data->service_to_load_mask = service_mask;
413
414 return 0;
415 }
416
417 static int
418 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
419 {
420 u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0;
421 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
422 u32 num_au = hw_data->get_num_accel_units(hw_data);
423 u32 au_size = num_au * sizeof(struct adf_accel_unit);
424 u8 i;
425
426 if (get_accel_unit_config(
427 accel_dev, &num_sym_au, &num_dc_au, &num_asym_au))
428 return EFAULT;
429
430 accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
431 if (!accel_dev->au_info)
432 return ENOMEM;
433
434 accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL);
435 if (!accel_dev->au_info->au) {
436 kfree(accel_dev->au_info);
437 accel_dev->au_info = NULL;
438 return ENOMEM;
439 }
440
441 accel_dev->au_info->num_cy_au = num_sym_au;
442 accel_dev->au_info->num_dc_au = num_dc_au;
443 accel_dev->au_info->num_asym_au = num_asym_au;
444
445 get_accel_unit(hw_data, &accel_dev->au_info->au);
446
447 /* Enable ASYM accel units */
448 for (i = 0; i < num_au && num_asym_au > 0; i++) {
449 if (accel_dev->au_info->au[i].services ==
450 ADF_ACCEL_SERVICE_NULL) {
451 accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM;
452 num_asym_au--;
453 }
454 }
455 /* Enable SYM accel units */
456 for (i = 0; i < num_au && num_sym_au > 0; i++) {
457 if (accel_dev->au_info->au[i].services ==
458 ADF_ACCEL_SERVICE_NULL) {
459 accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO;
460 num_sym_au--;
461 }
462 }
463 /* Enable compression accel units */
464 for (i = 0; i < num_au && num_dc_au > 0; i++) {
465 if (accel_dev->au_info->au[i].services ==
466 ADF_ACCEL_SERVICE_NULL) {
467 accel_dev->au_info->au[i].services =
468 ADF_ACCEL_COMPRESSION;
469 num_dc_au--;
470 }
471 }
472 accel_dev->au_info->dc_ae_msk |=
473 hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION);
474
475 return 0;
476 }
477
478 static int
479 adf_init_accel_units(struct adf_accel_dev *accel_dev)
480 {
481 return adf_init_accel_unit_services(accel_dev);
482 }
483
484 static void
485 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
486 {
487 /* reset the AU service */
488 adf_exit_accel_unit_services(accel_dev);
489 }
490
491 static const char *
492 get_obj_name(struct adf_accel_dev *accel_dev,
493 enum adf_accel_unit_services service)
494 {
495 switch (service) {
496 case ADF_ACCEL_ASYM:
497 return ADF_4XXX_ASYM_OBJ;
498 case ADF_ACCEL_CRYPTO:
499 return ADF_4XXX_SYM_OBJ;
500 case ADF_ACCEL_COMPRESSION:
501 return ADF_4XXX_DC_OBJ;
502 case ADF_ACCEL_ADMIN:
503 return ADF_4XXX_ADMIN_OBJ;
504 default:
505 return NULL;
506 }
507 }
508
509 static uint32_t
510 get_objs_num(struct adf_accel_dev *accel_dev)
511 {
512 return ADF_4XXX_MAX_OBJ;
513 }
514
515 static uint32_t
516 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
517 enum adf_accel_unit_services service)
518 {
519 u32 ae_mask = 0;
520 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
521 u32 num_au = hw_data->get_num_accel_units(hw_data);
522 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
523 u32 i = 0;
524
525 if (service == ADF_ACCEL_SERVICE_NULL)
526 return 0;
527
528 for (i = 0; i < num_au; i++) {
529 if (accel_unit[i].services == service)
530 ae_mask |= accel_unit[i].ae_mask;
531 }
532
533 return ae_mask;
534 }
535
536 static enum adf_accel_unit_services
537 adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num)
538 {
539 struct adf_accel_unit *accel_unit;
540 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
541 u8 num_au = hw_data->get_num_accel_units(hw_data);
542 int i;
543
544 if (!hw_data->service_to_load_mask)
545 return ADF_ACCEL_SERVICE_NULL;
546
547 if (accel_dev->au_info && accel_dev->au_info->au)
548 accel_unit = accel_dev->au_info->au;
549 else
550 return ADF_ACCEL_SERVICE_NULL;
551
552 for (i = num_au - 2; i >= 0; i--) {
553 if (hw_data->service_to_load_mask & accel_unit[i].services) {
554 hw_data->service_to_load_mask &=
555 ~accel_unit[i].services;
556 return accel_unit[i].services;
557 }
558 }
559
560 /* admin AE should be loaded last */
561 if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) {
562 hw_data->service_to_load_mask &=
563 ~accel_unit[num_au - 1].services;
564 return accel_unit[num_au - 1].services;
565 }
566
567 return ADF_ACCEL_SERVICE_NULL;
568 }
569
570 static void
571 get_ring_svc_map_data(int ring_pair_index,
572 u16 ring_to_svc_map,
573 u8 *serv_type,
574 int *ring_index,
575 int *num_rings_per_srv,
576 int bundle_num)
577 {
578 *serv_type =
579 GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES);
580 *ring_index = 0;
581 *num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2;
582 }
583
584 static int
585 adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities)
586 {
587 struct icp_qat_fw_init_admin_req req;
588 struct icp_qat_fw_init_admin_resp resp;
589 u8 i;
590 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
591 u8 num_au = hw_data->get_num_accel_units(hw_data);
592 u32 first_dc_ae = 0;
593
594 for (i = 0; i < num_au; i++) {
595 if (accel_dev->au_info->au[i].services &
596 ADF_ACCEL_COMPRESSION) {
597 first_dc_ae = accel_dev->au_info->au[i].ae_mask;
598 first_dc_ae &= ~(first_dc_ae - 1);
599 }
600 }
601
602 memset(&req, 0, sizeof(req));
603 memset(&resp, 0, sizeof(resp));
604 req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
605
606 if (likely(first_dc_ae)) {
607 if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) ||
608 resp.status) {
609 *capabilities = 0;
610 return EFAULT;
611 }
612
613 *capabilities = resp.extended_features;
614 }
615
616 return 0;
617 }
618
619 static int
620 adf_get_fw_status(struct adf_accel_dev *accel_dev,
621 u8 *major,
622 u8 *minor,
623 u8 *patch)
624 {
625 struct icp_qat_fw_init_admin_req req;
626 struct icp_qat_fw_init_admin_resp resp;
627 u32 ae_mask = 1;
628
629 memset(&req, 0, sizeof(req));
630 memset(&resp, 0, sizeof(resp));
631 req.cmd_id = ICP_QAT_FW_STATUS_GET;
632
633 if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
634 return EFAULT;
635
636 *major = resp.version_major_num;
637 *minor = resp.version_minor_num;
638 *patch = resp.version_patch_num;
639
640 return 0;
641 }
642
643 static int
644 adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev)
645 {
646 int ret = 0;
647 struct icp_qat_fw_init_admin_req req;
648 struct icp_qat_fw_init_admin_resp resp;
649 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
650 u32 ae_mask = hw_data->ae_mask;
651 u32 admin_ae_mask = hw_data->admin_ae_mask;
652 u8 num_au = hw_data->get_num_accel_units(hw_data);
653 u8 i;
654 u32 dc_capabilities = 0;
655
656 for (i = 0; i < num_au; i++) {
657 if (accel_dev->au_info->au[i].services ==
658 ADF_ACCEL_SERVICE_NULL)
659 ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
660
661 if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN)
662 admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
663 }
664
665 if (!accel_dev->admin) {
666 device_printf(GET_DEV(accel_dev), "adf_admin not available\n");
667 return EFAULT;
668 }
669
670 memset(&req, 0, sizeof(req));
671 memset(&resp, 0, sizeof(resp));
672
673 req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
674 req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
675 req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
676 if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) {
677 device_printf(GET_DEV(accel_dev),
678 "Error sending constants config message\n");
679 return EFAULT;
680 }
681
682 memset(&req, 0, sizeof(req));
683 memset(&resp, 0, sizeof(resp));
684 req.cmd_id = ICP_QAT_FW_INIT_ME;
685 if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) {
686 device_printf(GET_DEV(accel_dev),
687 "Error sending init message\n");
688 return EFAULT;
689 }
690
691 memset(&req, 0, sizeof(req));
692 memset(&resp, 0, sizeof(resp));
693 req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
694 req.init_cfg_ptr = accel_dev->admin->phy_hb_addr;
695 if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks))
696 return EINVAL;
697
698 if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
699 device_printf(GET_DEV(accel_dev),
700 "Heartbeat is not supported\n");
701
702 ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities);
703 if (unlikely(ret)) {
704 device_printf(GET_DEV(accel_dev),
705 "Could not get FW ext. capabilities\n");
706 }
707
708 accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
709
710 adf_get_fw_status(accel_dev,
711 &accel_dev->fw_versions.fw_version_major,
712 &accel_dev->fw_versions.fw_version_minor,
713 &accel_dev->fw_versions.fw_version_patch);
714
715 device_printf(GET_DEV(accel_dev),
716 "FW version: %d.%d.%d\n",
717 accel_dev->fw_versions.fw_version_major,
718 accel_dev->fw_versions.fw_version_minor,
719 accel_dev->fw_versions.fw_version_patch);
720
721 return ret;
722 }
723
724 static enum dev_sku_info
725 get_sku(struct adf_hw_device_data *self)
726 {
727 return DEV_SKU_1;
728 }
729
730 static struct adf_accel_unit *
731 get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num)
732 {
733 int i = 0;
734 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
735
736 if (!accel_unit)
737 return NULL;
738
739 for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++)
740 if (accel_unit[i].ae_mask & BIT(ae_num))
741 return &accel_unit[i];
742
743 return NULL;
744 }
745
746 static bool
747 check_accel_unit_service(enum adf_accel_unit_services au_srv,
748 enum adf_cfg_service_type ring_srv)
749 {
750 if ((au_srv & ADF_ACCEL_SERVICE_NULL) && ring_srv == NA)
751 return true;
752 if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP)
753 return true;
754 if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM)
755 return true;
756 if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM)
757 return true;
758
759 return false;
760 }
761
762 static void
763 adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
764 u32 *thrd_to_arb_map_gen)
765 {
766 struct adf_accel_unit *au = NULL;
767 int engine = 0;
768 int thread = 0;
769 int service;
770 u16 ena_srv_mask;
771 u16 service_type;
772 u32 service_mask;
773 unsigned long thd_srv_mask = default_active_thd_mask;
774
775 ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
776 /* If ring_to_svc_map is not changed, return default arbiter value */
777 if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) {
778 memcpy(thrd_to_arb_map_gen,
779 thrd_to_arb_map,
780 sizeof(thrd_to_arb_map_gen[0]) *
781 ADF_4XXX_MAX_ACCELENGINES);
782 return;
783 }
784
785 for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) {
786 thrd_to_arb_map_gen[engine] = 0;
787 service_mask = 0;
788 au = get_au_by_ae(accel_dev, engine);
789 if (!au)
790 continue;
791
792 for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) {
793 service_type = GET_SRV_TYPE(ena_srv_mask, service);
794 if (check_accel_unit_service(au->services,
795 service_type))
796 service_mask |= BIT(service);
797 }
798
799 if (au->services == ADF_ACCEL_COMPRESSION)
800 thd_srv_mask = dc_me_active_thd_mask;
801 else
802 thd_srv_mask = default_active_thd_mask;
803
804 for_each_set_bit(thread, &thd_srv_mask, 8)
805 {
806 thrd_to_arb_map_gen[engine] |=
807 (service_mask << (ADF_CFG_MAX_SERVICES * thread));
808 }
809 }
810 }
811
812 static void
813 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
814 u32 const **arb_map_config)
815 {
816 int i;
817 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
818
819 for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) {
820 if (~hw_device->ae_mask & (1 << i))
821 thrd_to_arb_map[i] = 0;
822 }
823 adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen);
824 *arb_map_config = thrd_to_arb_map_gen;
825 }
826
827 static void
828 get_arb_info(struct arb_info *arb_info)
829 {
830 arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG;
831 arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET;
832 arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
833 }
834
835 static void
836 get_admin_info(struct admin_info *admin_csrs_info)
837 {
838 admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
839 admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
840 admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
841 }
842
843 static void
844 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
845 {
846 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
847 struct resource *csr = misc_bar->virt_addr;
848
849 /* Enable all in errsou3 except VFLR notification on host */
850 ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
851 }
852
853 static void
854 adf_enable_ints(struct adf_accel_dev *accel_dev)
855 {
856 struct resource *addr;
857
858 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
859
860 /* Enable bundle interrupts */
861 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
862 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
863
864 /* Enable misc interrupts */
865 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
866 }
867
868 static int
869 adf_init_device(struct adf_accel_dev *accel_dev)
870 {
871 struct resource *addr;
872 u32 status;
873 u32 csr;
874 int ret;
875
876 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
877
878 /* Temporarily mask PM interrupt */
879 csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
880 csr |= ADF_4XXX_PM_SOU;
881 ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
882
883 /* Set DRV_ACTIVE bit to power up the device */
884 ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
885
886 /* Poll status register to make sure the device is powered up */
887 status = 0;
888 ret = read_poll_timeout(ADF_CSR_RD,
889 status,
890 status & ADF_4XXX_PM_INIT_STATE,
891 ADF_4XXX_PM_POLL_DELAY_US,
892 ADF_4XXX_PM_POLL_TIMEOUT_US,
893 true,
894 addr,
895 ADF_4XXX_PM_STATUS);
896 if (ret)
897 device_printf(GET_DEV(accel_dev),
898 "Failed to power up the device\n");
899
900 return ret;
901 }
902
903 void
904 adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
905 {
906 hw_data->dev_class = &adf_4xxx_class;
907 hw_data->instance_id = adf_4xxx_class.instances++;
908 hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
909 hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
910 hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
911 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
912 hw_data->num_logical_accel = 1;
913 hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
914 hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
915 hw_data->alloc_irq = adf_isr_resource_alloc;
916 hw_data->free_irq = adf_isr_resource_free;
917 hw_data->enable_error_correction = adf_enable_error_correction;
918 hw_data->get_accel_mask = get_accel_mask;
919 hw_data->get_ae_mask = get_ae_mask;
920 hw_data->get_num_accels = get_num_accels;
921 hw_data->get_num_aes = get_num_aes;
922 hw_data->get_sram_bar_id = get_sram_bar_id;
923 hw_data->get_etr_bar_id = get_etr_bar_id;
924 hw_data->get_misc_bar_id = get_misc_bar_id;
925 hw_data->get_arb_info = get_arb_info;
926 hw_data->get_admin_info = get_admin_info;
927 hw_data->get_accel_cap = adf_4xxx_get_hw_cap;
928 hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
929 hw_data->get_sku = get_sku;
930 hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
931 hw_data->fw_name = ADF_4XXX_FW;
932 hw_data->fw_mmp_name = ADF_4XXX_MMP;
933 hw_data->init_admin_comms = adf_init_admin_comms;
934 hw_data->exit_admin_comms = adf_exit_admin_comms;
935 hw_data->send_admin_init = adf_4xxx_send_admin_init;
936 hw_data->init_arb = adf_init_gen2_arb;
937 hw_data->exit_arb = adf_exit_arb;
938 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
939 hw_data->enable_ints = adf_enable_ints;
940 hw_data->init_device = adf_init_device;
941 hw_data->reset_device = adf_reset_flr;
942 hw_data->restore_device = adf_dev_restore;
943 hw_data->init_accel_units = adf_init_accel_units;
944 hw_data->exit_accel_units = adf_exit_accel_units;
945 hw_data->get_num_accel_units = get_num_accel_units;
946 hw_data->configure_accel_units = adf_4xxx_configure_accel_units;
947 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
948 hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
949 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
950 hw_data->get_objs_num = get_objs_num;
951 hw_data->get_obj_name = get_obj_name;
952 hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
953 hw_data->get_service_type = adf_4xxx_get_service_type;
954 hw_data->set_msix_rttable = set_msix_default_rttable;
955 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
956 hw_data->disable_iov = adf_disable_sriov;
957 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
958 hw_data->config_device = adf_config_device;
959 hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
960 hw_data->get_hb_clock = get_hb_clock;
961 hw_data->get_heartbeat_status = adf_get_heartbeat_status;
962 hw_data->get_ae_clock = get_ae_clock;
963 hw_data->measure_clock = measure_clock;
964 hw_data->query_storage_cap = 1;
965
966 adf_gen4_init_hw_csr_info(&hw_data->csr_info);
967 }
968
969 void
970 adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
971 {
972 hw_data->dev_class->instances--;
973 }
Cache object: b9f048ef9f49b3eaac6913823b021008
|