1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <sys/ctype.h>
15 #include <sys/kernel.h>
16 #include <linux/delay.h>
17 #include "adf_accel_devices.h"
18 #include "adf_common_drv.h"
19 #include "icp_qat_uclo.h"
20 #include "icp_qat_hal.h"
21 #include "icp_qat_fw_loader_handle.h"
22
23 #define UWORD_CPYBUF_SIZE 1024
24 #define INVLD_UWORD 0xffffffffffull
25 #define PID_MINOR_REV 0xf
26 #define PID_MAJOR_REV (0xf << 4)
27 #define MAX_UINT32_VAL 0xfffffffful
28
29 static int
30 qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
31 unsigned int ae,
32 unsigned int image_num)
33 {
34 struct icp_qat_uclo_aedata *ae_data;
35 struct icp_qat_uclo_encapme *encap_image;
36 struct icp_qat_uclo_page *page = NULL;
37 struct icp_qat_uclo_aeslice *ae_slice = NULL;
38
39 ae_data = &obj_handle->ae_data[ae];
40 encap_image = &obj_handle->ae_uimage[image_num];
41 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
42 ae_slice->encap_image = encap_image;
43
44 if (encap_image->img_ptr) {
45 ae_slice->ctx_mask_assigned =
46 encap_image->img_ptr->ctx_assigned;
47 ae_data->shareable_ustore =
48 ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode);
49 if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE)
50 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
51 else {
52 ae_data->eff_ustore_size = ae_data->shareable_ustore ?
53 (obj_handle->ustore_phy_size << 1) :
54 obj_handle->ustore_phy_size;
55 }
56 } else {
57 ae_slice->ctx_mask_assigned = 0;
58 }
59 ae_slice->region =
60 malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO);
61 ae_slice->page =
62 malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO);
63 page = ae_slice->page;
64 page->encap_page = encap_image->page;
65 ae_slice->page->region = ae_slice->region;
66 ae_data->slice_num++;
67 return 0;
68 }
69
70 static int
71 qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
72 {
73 unsigned int i;
74
75 if (!ae_data) {
76 pr_err("QAT: bad argument, ae_data is NULL\n ");
77 return EINVAL;
78 }
79
80 for (i = 0; i < ae_data->slice_num; i++) {
81 free(ae_data->ae_slices[i].region, M_QAT);
82 ae_data->ae_slices[i].region = NULL;
83 free(ae_data->ae_slices[i].page, M_QAT);
84 ae_data->ae_slices[i].page = NULL;
85 }
86 return 0;
87 }
88
89 static char *
90 qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
91 unsigned int str_offset)
92 {
93 if (!str_table->table_len || str_offset > str_table->table_len)
94 return NULL;
95 return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
96 }
97
98 static int
99 qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
100 {
101 int maj = hdr->maj_ver & 0xff;
102 int min = hdr->min_ver & 0xff;
103
104 if (hdr->file_id != ICP_QAT_UOF_FID) {
105 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
106 return EINVAL;
107 }
108 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
109 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
110 maj,
111 min);
112 return EINVAL;
113 }
114 return 0;
115 }
116
117 static int
118 qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr)
119 {
120 int maj = suof_hdr->maj_ver & 0xff;
121 int min = suof_hdr->min_ver & 0xff;
122
123 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
124 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
125 return EINVAL;
126 }
127 if (suof_hdr->fw_type != 0) {
128 pr_err("QAT: unsupported firmware type\n");
129 return EINVAL;
130 }
131 if (suof_hdr->num_chunks <= 0x1) {
132 pr_err("QAT: SUOF chunk amount is incorrect\n");
133 return EINVAL;
134 }
135 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
136 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
137 maj,
138 min);
139 return EINVAL;
140 }
141 return 0;
142 }
143
144 static int
145 qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
146 unsigned int addr,
147 const unsigned int *val,
148 unsigned int num_in_bytes)
149 {
150 unsigned int outval;
151 const unsigned char *ptr = (const unsigned char *)val;
152
153 if (num_in_bytes > handle->hal_sram_size) {
154 pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes);
155 return EINVAL;
156 }
157 while (num_in_bytes) {
158 memcpy(&outval, ptr, 4);
159 SRAM_WRITE(handle, addr, outval);
160 num_in_bytes -= 4;
161 ptr += 4;
162 addr += 4;
163 }
164 return 0;
165 }
166
167 static void
168 qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
169 unsigned char ae,
170 unsigned int addr,
171 unsigned int *val,
172 unsigned int num_in_bytes)
173 {
174 unsigned int outval;
175 unsigned char *ptr = (unsigned char *)val;
176
177 addr >>= 0x2; /* convert to uword address */
178
179 while (num_in_bytes) {
180 memcpy(&outval, ptr, 4);
181 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
182 num_in_bytes -= 4;
183 ptr += 4;
184 }
185 }
186
187 static void
188 qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
189 unsigned char ae,
190 struct icp_qat_uof_batch_init *umem_init_header)
191 {
192 struct icp_qat_uof_batch_init *umem_init;
193
194 if (!umem_init_header)
195 return;
196 umem_init = umem_init_header->next;
197 while (umem_init) {
198 unsigned int addr, *value, size;
199
200 ae = umem_init->ae;
201 addr = umem_init->addr;
202 value = umem_init->value;
203 size = umem_init->size;
204 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
205 umem_init = umem_init->next;
206 }
207 }
208
209 static void
210 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
211 struct icp_qat_uof_batch_init **base)
212 {
213 struct icp_qat_uof_batch_init *umem_init;
214
215 umem_init = *base;
216 while (umem_init) {
217 struct icp_qat_uof_batch_init *pre;
218
219 pre = umem_init;
220 umem_init = umem_init->next;
221 free(pre, M_QAT);
222 }
223 *base = NULL;
224 }
225
226 static int
227 qat_uclo_parse_num(char *str, unsigned int *num)
228 {
229 char buf[16] = { 0 };
230 unsigned long ae = 0;
231 int i;
232
233 strncpy(buf, str, 15);
234 for (i = 0; i < 16; i++) {
235 if (!isdigit(buf[i])) {
236 buf[i] = '\0';
237 break;
238 }
239 }
240 if ((compat_strtoul(buf, 10, &ae)))
241 return EFAULT;
242
243 if (ae > MAX_UINT32_VAL)
244 return EFAULT;
245
246 *num = (unsigned int)ae;
247 return 0;
248 }
249
250 static int
251 qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
252 struct icp_qat_uof_initmem *init_mem,
253 unsigned int size_range,
254 unsigned int *ae)
255 {
256 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
257 char *str;
258
259 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
260 pr_err("QAT: initmem is out of range");
261 return EINVAL;
262 }
263 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
264 pr_err("QAT: Memory scope for init_mem error\n");
265 return EINVAL;
266 }
267 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
268 if (!str) {
269 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
270 return EINVAL;
271 }
272 if (qat_uclo_parse_num(str, ae)) {
273 pr_err("QAT: Parse num for AE number failed\n");
274 return EINVAL;
275 }
276 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
277 pr_err("QAT: ae %d out of range\n", *ae);
278 return EINVAL;
279 }
280 return 0;
281 }
282
283 static int
284 qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle,
285 struct icp_qat_uof_initmem *init_mem,
286 unsigned int ae,
287 struct icp_qat_uof_batch_init **init_tab_base)
288 {
289 struct icp_qat_uof_batch_init *init_header, *tail;
290 struct icp_qat_uof_batch_init *mem_init, *tail_old;
291 struct icp_qat_uof_memvar_attr *mem_val_attr;
292 unsigned int i = 0;
293
294 mem_val_attr =
295 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
296 sizeof(
297 struct icp_qat_uof_initmem));
298
299 init_header = *init_tab_base;
300 if (!init_header) {
301 init_header =
302 malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO);
303 init_header->size = 1;
304 *init_tab_base = init_header;
305 }
306 tail_old = init_header;
307 while (tail_old->next)
308 tail_old = tail_old->next;
309 tail = tail_old;
310 for (i = 0; i < init_mem->val_attr_num; i++) {
311 mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO);
312 mem_init->ae = ae;
313 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
314 mem_init->value = &mem_val_attr->value;
315 mem_init->size = 4;
316 mem_init->next = NULL;
317 tail->next = mem_init;
318 tail = mem_init;
319 init_header->size += qat_hal_get_ins_num();
320 mem_val_attr++;
321 }
322 return 0;
323 }
324
325 static int
326 qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
327 struct icp_qat_uof_initmem *init_mem)
328 {
329 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
330 unsigned int ae;
331 unsigned int lmem;
332
333 lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ?
334 ICP_QAT_UCLO_MAX_LMEM_REG_2X :
335 ICP_QAT_UCLO_MAX_LMEM_REG;
336
337 if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae))
338 return EINVAL;
339 if (qat_uclo_create_batch_init_list(
340 handle, init_mem, ae, &obj_handle->lm_init_tab[ae]))
341 return EINVAL;
342 return 0;
343 }
344
345 static int
346 qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
347 struct icp_qat_uof_initmem *init_mem)
348 {
349 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
350 unsigned int ae, ustore_size, uaddr, i;
351 struct icp_qat_uclo_aedata *aed;
352
353 ustore_size = obj_handle->ustore_phy_size;
354 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
355 return EINVAL;
356 if (qat_uclo_create_batch_init_list(
357 handle, init_mem, ae, &obj_handle->umem_init_tab[ae]))
358 return EINVAL;
359 /* set the highest ustore address referenced */
360 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
361 aed = &obj_handle->ae_data[ae];
362 for (i = 0; i < aed->slice_num; i++) {
363 if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
364 aed->ae_slices[i].encap_image->uwords_num = uaddr;
365 }
366 return 0;
367 }
368
369 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
370 static int
371 qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
372 struct icp_qat_uof_initmem *init_mem)
373 {
374 switch (init_mem->region) {
375 case ICP_QAT_UOF_LMEM_REGION:
376 if (qat_uclo_init_lmem_seg(handle, init_mem))
377 return EINVAL;
378 break;
379 case ICP_QAT_UOF_UMEM_REGION:
380 if (qat_uclo_init_umem_seg(handle, init_mem))
381 return EINVAL;
382 break;
383 default:
384 pr_err("QAT: initmem region error. region type=0x%x\n",
385 init_mem->region);
386 return EINVAL;
387 }
388 return 0;
389 }
390
391 static int
392 qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
393 struct icp_qat_uclo_encapme *image)
394 {
395 unsigned int i;
396 struct icp_qat_uclo_encap_page *page;
397 struct icp_qat_uof_image *uof_image;
398 unsigned char ae = 0;
399 unsigned char neigh_ae;
400 unsigned int ustore_size;
401 unsigned int patt_pos;
402 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
403 uint64_t *fill_data;
404 static unsigned int init[32] = { 0 };
405 unsigned long ae_mask = handle->hal_handle->ae_mask;
406
407 uof_image = image->img_ptr;
408 /*if shared CS mode, the ustore size should be 2*ustore_phy_size*/
409 fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t),
410 M_QAT,
411 M_WAITOK | M_ZERO);
412 for (i = 0; i < obj_handle->ustore_phy_size * 2; i++)
413 memcpy(&fill_data[i],
414 &uof_image->fill_pattern,
415 sizeof(uint64_t));
416 page = image->page;
417
418 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
419 {
420 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
421 unsigned long ae_assigned = uof_image->ae_assigned;
422 const bool gen4 =
423 IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)));
424
425 if (!test_bit(ae, &cfg_ae_mask))
426 continue;
427
428 if (!test_bit(ae, &ae_assigned))
429 continue;
430
431 if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) &&
432 !gen4) {
433 qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
434
435 if (test_bit(neigh_ae, &ae_assigned))
436 continue;
437 }
438
439 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
440 patt_pos = page->beg_addr_p + page->micro_words_num;
441 if (obj_handle->ae_data[ae].shareable_ustore && !gen4) {
442 qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
443 if (init[ae] == 0 && page->beg_addr_p != 0) {
444 qat_hal_wr_coalesce_uwords(handle,
445 (unsigned char)ae,
446 0,
447 page->beg_addr_p,
448 &fill_data[0]);
449 }
450 qat_hal_wr_coalesce_uwords(
451 handle,
452 (unsigned char)ae,
453 patt_pos,
454 ustore_size - patt_pos,
455 &fill_data[page->beg_addr_p]);
456 init[ae] = 1;
457 init[neigh_ae] = 1;
458 } else {
459 if (gen4 && (ae % 4 != 0))
460 continue;
461
462 qat_hal_wr_uwords(handle,
463 (unsigned char)ae,
464 0,
465 page->beg_addr_p,
466 &fill_data[0]);
467 qat_hal_wr_uwords(handle,
468 (unsigned char)ae,
469 patt_pos,
470 ustore_size - patt_pos + 1,
471 &fill_data[page->beg_addr_p]);
472 }
473 }
474 free(fill_data, M_QAT);
475 return 0;
476 }
477
478 static int
479 qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
480 {
481 int i;
482 int ae = 0;
483 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
484 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
485 unsigned long ae_mask = handle->hal_handle->ae_mask;
486
487 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
488 if (initmem->num_in_bytes) {
489 if (qat_uclo_init_ae_memory(handle, initmem))
490 return EINVAL;
491 }
492 initmem =
493 (struct icp_qat_uof_initmem
494 *)((uintptr_t)((uintptr_t)initmem +
495 sizeof(struct icp_qat_uof_initmem)) +
496 (sizeof(struct icp_qat_uof_memvar_attr) *
497 initmem->val_attr_num));
498 }
499
500 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
501 {
502 if (qat_hal_batch_wr_lm(handle,
503 ae,
504 obj_handle->lm_init_tab[ae])) {
505 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
506 return EINVAL;
507 }
508 qat_uclo_cleanup_batch_init_list(handle,
509 &obj_handle->lm_init_tab[ae]);
510 qat_uclo_batch_wr_umem(handle,
511 ae,
512 obj_handle->umem_init_tab[ae]);
513 qat_uclo_cleanup_batch_init_list(
514 handle, &obj_handle->umem_init_tab[ae]);
515 }
516 return 0;
517 }
518
519 static void *
520 qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
521 char *chunk_id,
522 void *cur)
523 {
524 int i;
525 struct icp_qat_uof_chunkhdr *chunk_hdr =
526 (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr +
527 sizeof(struct icp_qat_uof_objhdr));
528
529 for (i = 0; i < obj_hdr->num_chunks; i++) {
530 if ((cur < (void *)&chunk_hdr[i]) &&
531 !strncmp(chunk_hdr[i].chunk_id,
532 chunk_id,
533 ICP_QAT_UOF_OBJID_LEN)) {
534 return &chunk_hdr[i];
535 }
536 }
537 return NULL;
538 }
539
540 static unsigned int
541 qat_uclo_calc_checksum(unsigned int reg, int ch)
542 {
543 int i;
544 unsigned int topbit = 1 << 0xF;
545 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
546
547 reg ^= inbyte << 0x8;
548 for (i = 0; i < 0x8; i++) {
549 if (reg & topbit)
550 reg = (reg << 1) ^ 0x1021;
551 else
552 reg <<= 1;
553 }
554 return reg & 0xFFFF;
555 }
556
557 static unsigned int
558 qat_uclo_calc_str_checksum(const char *ptr, int num)
559 {
560 unsigned int chksum = 0;
561
562 if (ptr)
563 while (num--)
564 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
565 return chksum;
566 }
567
568 static struct icp_qat_uclo_objhdr *
569 qat_uclo_map_chunk(char *buf,
570 struct icp_qat_uof_filehdr *file_hdr,
571 char *chunk_id)
572 {
573 struct icp_qat_uof_filechunkhdr *file_chunk;
574 struct icp_qat_uclo_objhdr *obj_hdr;
575 char *chunk;
576 int i;
577
578 file_chunk = (struct icp_qat_uof_filechunkhdr
579 *)(buf + sizeof(struct icp_qat_uof_filehdr));
580 for (i = 0; i < file_hdr->num_chunks; i++) {
581 if (!strncmp(file_chunk->chunk_id,
582 chunk_id,
583 ICP_QAT_UOF_OBJID_LEN)) {
584 chunk = buf + file_chunk->offset;
585 if (file_chunk->checksum !=
586 qat_uclo_calc_str_checksum(chunk, file_chunk->size))
587 break;
588 obj_hdr =
589 malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO);
590 obj_hdr->file_buff = chunk;
591 obj_hdr->checksum = file_chunk->checksum;
592 obj_hdr->size = file_chunk->size;
593 return obj_hdr;
594 }
595 file_chunk++;
596 }
597 return NULL;
598 }
599
600 static unsigned int
601 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
602 struct icp_qat_uof_image *image)
603 {
604 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
605 struct icp_qat_uof_objtable *neigh_reg_tab;
606 struct icp_qat_uof_code_page *code_page;
607
608 code_page =
609 (struct icp_qat_uof_code_page *)((char *)image +
610 sizeof(struct icp_qat_uof_image));
611 uc_var_tab =
612 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
613 code_page->uc_var_tab_offset);
614 imp_var_tab =
615 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
616 code_page->imp_var_tab_offset);
617 imp_expr_tab =
618 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
619 code_page->imp_expr_tab_offset);
620 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
621 imp_expr_tab->entry_num) {
622 pr_err("QAT: UOF can't contain imported variable to be parsed");
623 return EINVAL;
624 }
625 neigh_reg_tab =
626 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
627 code_page->neigh_reg_tab_offset);
628 if (neigh_reg_tab->entry_num) {
629 pr_err("QAT: UOF can't contain neighbor register table\n");
630 return EINVAL;
631 }
632 if (image->numpages > 1) {
633 pr_err("QAT: UOF can't contain multiple pages\n");
634 return EINVAL;
635 }
636 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
637 pr_err("QAT: UOF can't use reloadable feature\n");
638 return EFAULT;
639 }
640 return 0;
641 }
642
643 static void
644 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj,
645 struct icp_qat_uof_image *img,
646 struct icp_qat_uclo_encap_page *page)
647 {
648 struct icp_qat_uof_code_page *code_page;
649 struct icp_qat_uof_code_area *code_area;
650 struct icp_qat_uof_objtable *uword_block_tab;
651 struct icp_qat_uof_uword_block *uwblock;
652 int i;
653
654 code_page =
655 (struct icp_qat_uof_code_page *)((char *)img +
656 sizeof(struct icp_qat_uof_image));
657 page->def_page = code_page->def_page;
658 page->page_region = code_page->page_region;
659 page->beg_addr_v = code_page->beg_addr_v;
660 page->beg_addr_p = code_page->beg_addr_p;
661 code_area =
662 (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
663 code_page->code_area_offset);
664 page->micro_words_num = code_area->micro_words_num;
665 uword_block_tab =
666 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
667 code_area->uword_block_tab);
668 page->uwblock_num = uword_block_tab->entry_num;
669 uwblock = (struct icp_qat_uof_uword_block
670 *)((char *)uword_block_tab +
671 sizeof(struct icp_qat_uof_objtable));
672 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
673 for (i = 0; i < uword_block_tab->entry_num; i++)
674 page->uwblock[i].micro_words =
675 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
676 }
677
678 static int
679 qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
680 struct icp_qat_uclo_encapme *ae_uimage,
681 int max_image)
682 {
683 int i, j;
684 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
685 struct icp_qat_uof_image *image;
686 struct icp_qat_uof_objtable *ae_regtab;
687 struct icp_qat_uof_objtable *init_reg_sym_tab;
688 struct icp_qat_uof_objtable *sbreak_tab;
689 struct icp_qat_uof_encap_obj *encap_uof_obj =
690 &obj_handle->encap_uof_obj;
691
692 for (j = 0; j < max_image; j++) {
693 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
694 ICP_QAT_UOF_IMAG,
695 chunk_hdr);
696 if (!chunk_hdr)
697 break;
698 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
699 chunk_hdr->offset);
700 ae_regtab =
701 (struct icp_qat_uof_objtable *)(image->reg_tab_offset +
702 obj_handle->obj_hdr
703 ->file_buff);
704 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
705 ae_uimage[j].ae_reg =
706 (struct icp_qat_uof_ae_reg
707 *)(((char *)ae_regtab) +
708 sizeof(struct icp_qat_uof_objtable));
709 init_reg_sym_tab =
710 (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab +
711 obj_handle->obj_hdr
712 ->file_buff);
713 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
714 ae_uimage[j].init_regsym =
715 (struct icp_qat_uof_init_regsym
716 *)(((char *)init_reg_sym_tab) +
717 sizeof(struct icp_qat_uof_objtable));
718 sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab +
719 obj_handle->obj_hdr
720 ->file_buff);
721 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
722 ae_uimage[j].sbreak =
723 (struct icp_qat_uof_sbreak
724 *)(((char *)sbreak_tab) +
725 sizeof(struct icp_qat_uof_objtable));
726 ae_uimage[j].img_ptr = image;
727 if (qat_uclo_check_image_compat(encap_uof_obj, image))
728 goto out_err;
729 ae_uimage[j].page =
730 malloc(sizeof(struct icp_qat_uclo_encap_page),
731 M_QAT,
732 M_WAITOK | M_ZERO);
733 qat_uclo_map_image_page(encap_uof_obj,
734 image,
735 ae_uimage[j].page);
736 }
737 return j;
738 out_err:
739 for (i = 0; i < j; i++)
740 free(ae_uimage[i].page, M_QAT);
741 return 0;
742 }
743
744 static int
745 UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle)
746 {
747 int i;
748 unsigned int swAe = 0;
749 unsigned int ii, jj;
750 struct icp_qat_uclo_aedata *ae_data0, *ae_datax;
751 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
752
753 for (i = 0; i < obj_handle->uimage_num; i++) {
754 struct icp_qat_uof_image *image =
755 obj_handle->ae_uimage[i].img_ptr;
756 if (image->numpages > 1) {
757 pr_err(
758 "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n",
759 image->numpages,
760 qat_uclo_get_string(&obj_handle->str_table,
761 image->img_name));
762 return EINVAL;
763 }
764 }
765
766 for (swAe = 0;
767 (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE);
768 swAe += AE_TG_NUM_CPM2X) {
769 if (!qat_hal_check_ae_active(handle, swAe)) {
770 continue;
771 }
772
773 for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) {
774 ae_data0 = &obj_handle->ae_data[ii];
775 if (ae_data0->slice_num != 1) // not assigned
776 continue;
777
778 for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) {
779 ae_datax = &obj_handle->ae_data[jj];
780 if (ae_datax->slice_num != 1) // not assigned
781 continue;
782 if (ae_data0->ae_slices[0]
783 .encap_image->img_ptr !=
784 ae_datax->ae_slices[0]
785 .encap_image->img_ptr) {
786 pr_err("Only 1 list is allowed in a ");
787 pr_err("Tgroup for CPM2X;\n");
788 pr_err("ME%d, %d is assigned", ii, jj);
789 pr_err(" different list files\n");
790 return EINVAL;
791 }
792 }
793 }
794 }
795
796 return 0;
797 }
798
799 static int
800 qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
801 {
802 int i;
803 int ae = 0;
804 unsigned long ae_mask = handle->hal_handle->ae_mask;
805 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
806 int mflag = 0;
807 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
808
809 for_each_set_bit(ae, &ae_mask, max_ae)
810 {
811 if (!test_bit(ae, &cfg_ae_mask))
812 continue;
813
814 for (i = 0; i < obj_handle->uimage_num; i++) {
815 unsigned long ae_assigned =
816 obj_handle->ae_uimage[i].img_ptr->ae_assigned;
817 if (!test_bit(ae, &ae_assigned))
818 continue;
819 mflag = 1;
820 if (qat_uclo_init_ae_data(obj_handle, ae, i))
821 return EINVAL;
822 }
823 }
824 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
825 if (UcLo_checkTGroupList2X(handle)) {
826 return EINVAL;
827 }
828 }
829 if (!mflag) {
830 pr_err("QAT: uimage uses AE not set");
831 return EINVAL;
832 }
833 return 0;
834 }
835
836 static struct icp_qat_uof_strtable *
837 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
838 char *tab_name,
839 struct icp_qat_uof_strtable *str_table)
840 {
841 struct icp_qat_uof_chunkhdr *chunk_hdr;
842
843 chunk_hdr =
844 qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff,
845 tab_name,
846 NULL);
847 if (chunk_hdr) {
848 int hdr_size;
849
850 memcpy(&str_table->table_len,
851 obj_hdr->file_buff + chunk_hdr->offset,
852 sizeof(str_table->table_len));
853 hdr_size = (char *)&str_table->strings - (char *)str_table;
854 str_table->strings = (uintptr_t)obj_hdr->file_buff +
855 chunk_hdr->offset + hdr_size;
856 return str_table;
857 }
858 return NULL;
859 }
860
861 static void
862 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
863 struct icp_qat_uclo_init_mem_table *init_mem_tab)
864 {
865 struct icp_qat_uof_chunkhdr *chunk_hdr;
866
867 chunk_hdr =
868 qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL);
869 if (chunk_hdr) {
870 memmove(&init_mem_tab->entry_num,
871 encap_uof_obj->beg_uof + chunk_hdr->offset,
872 sizeof(unsigned int));
873 init_mem_tab->init_mem =
874 (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof +
875 chunk_hdr->offset +
876 sizeof(unsigned int));
877 }
878 }
879
880 static unsigned int
881 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
882 {
883 switch (pci_get_device(GET_DEV(handle->accel_dev))) {
884 case ADF_DH895XCC_PCI_DEVICE_ID:
885 return ICP_QAT_AC_895XCC_DEV_TYPE;
886 case ADF_C62X_PCI_DEVICE_ID:
887 return ICP_QAT_AC_C62X_DEV_TYPE;
888 case ADF_C3XXX_PCI_DEVICE_ID:
889 return ICP_QAT_AC_C3XXX_DEV_TYPE;
890 case ADF_200XX_PCI_DEVICE_ID:
891 return ICP_QAT_AC_200XX_DEV_TYPE;
892 case ADF_C4XXX_PCI_DEVICE_ID:
893 return ICP_QAT_AC_C4XXX_DEV_TYPE;
894 case ADF_4XXX_PCI_DEVICE_ID:
895 case ADF_401XX_PCI_DEVICE_ID:
896 return ICP_QAT_AC_4XXX_A_DEV_TYPE;
897 default:
898 pr_err("QAT: unsupported device 0x%x\n",
899 pci_get_device(GET_DEV(handle->accel_dev)));
900 return 0;
901 }
902 }
903
904 static int
905 qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
906 {
907 unsigned int maj_ver, prod_type = obj_handle->prod_type;
908
909 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
910 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
911 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
912 prod_type);
913 return EINVAL;
914 }
915 maj_ver = obj_handle->prod_rev & 0xff;
916 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
917 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
918 pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver);
919 return EINVAL;
920 }
921 return 0;
922 }
923
924 static int
925 qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
926 unsigned char ae,
927 unsigned char ctx_mask,
928 enum icp_qat_uof_regtype reg_type,
929 unsigned short reg_addr,
930 unsigned int value)
931 {
932 switch (reg_type) {
933 case ICP_GPA_ABS:
934 case ICP_GPB_ABS:
935 ctx_mask = 0;
936 return qat_hal_init_gpr(
937 handle, ae, ctx_mask, reg_type, reg_addr, value);
938 case ICP_GPA_REL:
939 case ICP_GPB_REL:
940 return qat_hal_init_gpr(
941 handle, ae, ctx_mask, reg_type, reg_addr, value);
942 case ICP_SR_ABS:
943 case ICP_DR_ABS:
944 case ICP_SR_RD_ABS:
945 case ICP_DR_RD_ABS:
946 ctx_mask = 0;
947 return qat_hal_init_rd_xfer(
948 handle, ae, ctx_mask, reg_type, reg_addr, value);
949 case ICP_SR_REL:
950 case ICP_DR_REL:
951 case ICP_SR_RD_REL:
952 case ICP_DR_RD_REL:
953 return qat_hal_init_rd_xfer(
954 handle, ae, ctx_mask, reg_type, reg_addr, value);
955 case ICP_SR_WR_ABS:
956 case ICP_DR_WR_ABS:
957 ctx_mask = 0;
958 return qat_hal_init_wr_xfer(
959 handle, ae, ctx_mask, reg_type, reg_addr, value);
960 case ICP_SR_WR_REL:
961 case ICP_DR_WR_REL:
962 return qat_hal_init_wr_xfer(
963 handle, ae, ctx_mask, reg_type, reg_addr, value);
964 case ICP_NEIGH_REL:
965 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
966 default:
967 pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type);
968 return EFAULT;
969 }
970 return 0;
971 }
972
973 static int
974 qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
975 unsigned int ae,
976 struct icp_qat_uclo_encapme *encap_ae)
977 {
978 unsigned int i;
979 unsigned char ctx_mask;
980 struct icp_qat_uof_init_regsym *init_regsym;
981
982 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
983 ICP_QAT_UCLO_MAX_CTX)
984 ctx_mask = 0xff;
985 else
986 ctx_mask = 0x55;
987
988 for (i = 0; i < encap_ae->init_regsym_num; i++) {
989 unsigned int exp_res;
990
991 init_regsym = &encap_ae->init_regsym[i];
992 exp_res = init_regsym->value;
993 switch (init_regsym->init_type) {
994 case ICP_QAT_UOF_INIT_REG:
995 qat_uclo_init_reg(handle,
996 ae,
997 ctx_mask,
998 (enum icp_qat_uof_regtype)
999 init_regsym->reg_type,
1000 (unsigned short)init_regsym->reg_addr,
1001 exp_res);
1002 break;
1003 case ICP_QAT_UOF_INIT_REG_CTX:
1004 /* check if ctx is appropriate for the ctxMode */
1005 if (!((1 << init_regsym->ctx) & ctx_mask)) {
1006 pr_err("QAT: invalid ctx num = 0x%x\n",
1007 init_regsym->ctx);
1008 return EINVAL;
1009 }
1010 qat_uclo_init_reg(
1011 handle,
1012 ae,
1013 (unsigned char)(1 << init_regsym->ctx),
1014 (enum icp_qat_uof_regtype)init_regsym->reg_type,
1015 (unsigned short)init_regsym->reg_addr,
1016 exp_res);
1017 break;
1018 case ICP_QAT_UOF_INIT_EXPR:
1019 pr_err("QAT: INIT_EXPR feature not supported\n");
1020 return EINVAL;
1021 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
1022 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n");
1023 return EINVAL;
1024 default:
1025 break;
1026 }
1027 }
1028 return 0;
1029 }
1030
1031 static int
1032 qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
1033 {
1034 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1035 unsigned int s;
1036 unsigned int ae = 0;
1037 struct icp_qat_uclo_aedata *aed;
1038 unsigned long ae_mask = handle->hal_handle->ae_mask;
1039
1040 if (obj_handle->global_inited)
1041 return 0;
1042 if (obj_handle->init_mem_tab.entry_num) {
1043 if (qat_uclo_init_memory(handle)) {
1044 pr_err("QAT: initialize memory failed\n");
1045 return EINVAL;
1046 }
1047 }
1048
1049 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1050 {
1051 aed = &obj_handle->ae_data[ae];
1052 for (s = 0; s < aed->slice_num; s++) {
1053 if (!aed->ae_slices[s].encap_image)
1054 continue;
1055 if (qat_uclo_init_reg_sym(
1056 handle, ae, aed->ae_slices[s].encap_image))
1057 return EINVAL;
1058 }
1059 }
1060 obj_handle->global_inited = 1;
1061 return 0;
1062 }
1063
1064 static int
1065 qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
1066 struct icp_qat_uclo_objhandle *obj_handle,
1067 unsigned char ae,
1068 struct icp_qat_uof_image *uof_image)
1069 {
1070 unsigned char nn_mode;
1071 char ae_mode = 0;
1072
1073 ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode);
1074 if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) {
1075 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
1076 return EFAULT;
1077 }
1078
1079 ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode);
1080 qat_hal_set_ae_scs_mode(handle, ae, ae_mode);
1081 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1082 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
1083
1084 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
1085 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
1086 return EFAULT;
1087 }
1088 }
1089 ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
1090 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) {
1091 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
1092 return EFAULT;
1093 }
1094 ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
1095 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) {
1096 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
1097 return EFAULT;
1098 }
1099 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1100 ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
1101 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) {
1102 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
1103 return EFAULT;
1104 }
1105 ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
1106 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) {
1107 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
1108 return EFAULT;
1109 }
1110 ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
1111 qat_hal_set_ae_tindex_mode(handle, ae, ae_mode);
1112 }
1113 return 0;
1114 }
1115
1116 static int
1117 qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
1118 {
1119 int error;
1120 unsigned char s;
1121 unsigned char ae = 0;
1122 struct icp_qat_uof_image *uof_image;
1123 struct icp_qat_uclo_aedata *ae_data;
1124 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1125 unsigned long ae_mask = handle->hal_handle->ae_mask;
1126
1127 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1128 {
1129 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
1130
1131 if (!test_bit(ae, &cfg_ae_mask))
1132 continue;
1133
1134 ae_data = &obj_handle->ae_data[ae];
1135 for (s = 0; s < min_t(unsigned int,
1136 ae_data->slice_num,
1137 ICP_QAT_UCLO_MAX_CTX);
1138 s++) {
1139 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
1140 continue;
1141 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
1142 error = qat_hal_set_modes(handle,
1143 obj_handle,
1144 ae,
1145 uof_image);
1146 if (error)
1147 return error;
1148 }
1149 }
1150 return 0;
1151 }
1152
1153 static void
1154 qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
1155 {
1156 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1157 struct icp_qat_uclo_encapme *image;
1158 int a;
1159
1160 for (a = 0; a < obj_handle->uimage_num; a++) {
1161 image = &obj_handle->ae_uimage[a];
1162 image->uwords_num =
1163 image->page->beg_addr_p + image->page->micro_words_num;
1164 }
1165 }
1166
1167 static int
1168 qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
1169 {
1170 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1171 unsigned int ae;
1172
1173 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
1174 obj_handle->encap_uof_obj.obj_hdr =
1175 (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff;
1176 obj_handle->uword_in_bytes = 6;
1177 obj_handle->prod_type = qat_uclo_get_dev_type(handle);
1178 obj_handle->prod_rev =
1179 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1180 if (qat_uclo_check_uof_compat(obj_handle)) {
1181 pr_err("QAT: UOF incompatible\n");
1182 return EINVAL;
1183 }
1184 obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t),
1185 M_QAT,
1186 M_WAITOK | M_ZERO);
1187 obj_handle->ustore_phy_size =
1188 (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 :
1189 0x4000;
1190 if (!obj_handle->obj_hdr->file_buff ||
1191 !qat_uclo_map_str_table(obj_handle->obj_hdr,
1192 ICP_QAT_UOF_STRT,
1193 &obj_handle->str_table)) {
1194 pr_err("QAT: UOF doesn't have effective images\n");
1195 goto out_err;
1196 }
1197 obj_handle->uimage_num =
1198 qat_uclo_map_uimage(obj_handle,
1199 obj_handle->ae_uimage,
1200 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1201 if (!obj_handle->uimage_num)
1202 goto out_err;
1203 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1204 pr_err("QAT: Bad object\n");
1205 goto out_check_uof_aemask_err;
1206 }
1207 qat_uclo_init_uword_num(handle);
1208 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1209 &obj_handle->init_mem_tab);
1210 if (qat_uclo_set_ae_mode(handle))
1211 goto out_check_uof_aemask_err;
1212 return 0;
1213 out_check_uof_aemask_err:
1214 for (ae = 0; ae < obj_handle->uimage_num; ae++)
1215 free(obj_handle->ae_uimage[ae].page, M_QAT);
1216 out_err:
1217 free(obj_handle->uword_buf, M_QAT);
1218 obj_handle->uword_buf = NULL;
1219 return EFAULT;
1220 }
1221
1222 static int
1223 qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle,
1224 const struct icp_qat_suof_filehdr *suof_ptr,
1225 int suof_size)
1226 {
1227 unsigned int check_sum = 0;
1228 unsigned int min_ver_offset = 0;
1229 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1230
1231 suof_handle->file_id = ICP_QAT_SUOF_FID;
1232 suof_handle->suof_buf = (const char *)suof_ptr;
1233 suof_handle->suof_size = suof_size;
1234 min_ver_offset =
1235 suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver);
1236 check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver,
1237 min_ver_offset);
1238 if (check_sum != suof_ptr->check_sum) {
1239 pr_err("QAT: incorrect SUOF checksum\n");
1240 return EINVAL;
1241 }
1242 suof_handle->check_sum = suof_ptr->check_sum;
1243 suof_handle->min_ver = suof_ptr->min_ver;
1244 suof_handle->maj_ver = suof_ptr->maj_ver;
1245 suof_handle->fw_type = suof_ptr->fw_type;
1246 return 0;
1247 }
1248
1249 static void
1250 qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1251 struct icp_qat_suof_img_hdr *suof_img_hdr,
1252 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1253 {
1254 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1255 const struct icp_qat_simg_ae_mode *ae_mode;
1256 struct icp_qat_suof_objhdr *suof_objhdr;
1257 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1258
1259 suof_img_hdr->simg_buf =
1260 (suof_handle->suof_buf + suof_chunk_hdr->offset +
1261 sizeof(*suof_objhdr));
1262 suof_img_hdr->simg_len =
1263 ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf +
1264 suof_chunk_hdr->offset))
1265 ->img_length;
1266
1267 suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1268 suof_img_hdr->css_key =
1269 (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr));
1270 suof_img_hdr->css_signature = suof_img_hdr->css_key +
1271 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1272 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id);
1273 suof_img_hdr->css_simg =
1274 suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1275
1276 ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1277 suof_img_hdr->ae_mask = ae_mode->ae_mask;
1278 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1279 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1280 suof_img_hdr->fw_type = ae_mode->fw_type;
1281 }
1282
1283 static void
1284 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1285 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1286 {
1287 char **sym_str = (char **)&suof_handle->sym_str;
1288 unsigned int *sym_size = &suof_handle->sym_size;
1289 struct icp_qat_suof_strtable *str_table_obj;
1290
1291 *sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset +
1292 suof_handle->suof_buf);
1293 *sym_str =
1294 (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset +
1295 sizeof(str_table_obj->tab_length));
1296 }
1297
1298 static int
1299 qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1300 struct icp_qat_suof_img_hdr *img_hdr)
1301 {
1302 const struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1303 unsigned int prod_rev, maj_ver, prod_type;
1304
1305 prod_type = qat_uclo_get_dev_type(handle);
1306 img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1307 prod_rev =
1308 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1309 if (img_ae_mode->dev_type != prod_type) {
1310 pr_err("QAT: incompatible product type %x\n",
1311 img_ae_mode->dev_type);
1312 return EINVAL;
1313 }
1314 maj_ver = prod_rev & 0xff;
1315 if (maj_ver > img_ae_mode->devmax_ver ||
1316 maj_ver < img_ae_mode->devmin_ver) {
1317 pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver);
1318 return EINVAL;
1319 }
1320 return 0;
1321 }
1322
1323 static void
1324 qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1325 {
1326 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1327
1328 free(sobj_handle->img_table.simg_hdr, M_QAT);
1329 sobj_handle->img_table.simg_hdr = NULL;
1330 free(handle->sobj_handle, M_QAT);
1331 handle->sobj_handle = NULL;
1332 }
1333
1334 static void
1335 qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1336 unsigned int img_id,
1337 unsigned int num_simgs)
1338 {
1339 struct icp_qat_suof_img_hdr img_header;
1340
1341 if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) {
1342 memcpy(&img_header,
1343 &suof_img_hdr[num_simgs - 1],
1344 sizeof(*suof_img_hdr));
1345 memcpy(&suof_img_hdr[num_simgs - 1],
1346 &suof_img_hdr[img_id],
1347 sizeof(*suof_img_hdr));
1348 memcpy(&suof_img_hdr[img_id],
1349 &img_header,
1350 sizeof(*suof_img_hdr));
1351 }
1352 }
1353
1354 static int
1355 qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1356 const struct icp_qat_suof_filehdr *suof_ptr,
1357 int suof_size)
1358 {
1359 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1360 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1361 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1362 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE,
1363 aeMax_img = ICP_QAT_UCLO_MAX_AE;
1364 unsigned int i = 0;
1365 struct icp_qat_suof_img_hdr img_header;
1366
1367 if (!suof_ptr || suof_size == 0) {
1368 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1369 return EINVAL;
1370 }
1371 if (qat_uclo_check_suof_format(suof_ptr))
1372 return EINVAL;
1373 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1374 if (ret)
1375 return ret;
1376 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr +
1377 sizeof(*suof_ptr));
1378
1379 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1380 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1381
1382 if (suof_handle->img_table.num_simgs != 0) {
1383 suof_img_hdr = malloc(suof_handle->img_table.num_simgs *
1384 sizeof(img_header),
1385 M_QAT,
1386 M_WAITOK | M_ZERO);
1387 suof_handle->img_table.simg_hdr = suof_img_hdr;
1388 }
1389
1390 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1391 qat_uclo_map_simg(handle,
1392 &suof_img_hdr[i],
1393 &suof_chunk_hdr[1 + i]);
1394 ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]);
1395 if (ret)
1396 return ret;
1397 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1398 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1399 ae0_img = i;
1400 }
1401
1402 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1403 qat_uclo_tail_img(suof_img_hdr,
1404 ae0_img,
1405 suof_handle->img_table.num_simgs);
1406 } else {
1407 if (suof_handle->img_table.num_simgs == 1)
1408 return 0;
1409 qat_uclo_tail_img(suof_img_hdr,
1410 ae0_img,
1411 suof_handle->img_table.num_simgs - 1);
1412 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1413 if ((suof_img_hdr[i].ae_mask &
1414 (0x1 << (handle->hal_handle->ae_max_num - 1))) !=
1415 0) {
1416 aeMax_img = i;
1417 break;
1418 }
1419 }
1420 qat_uclo_tail_img(suof_img_hdr,
1421 aeMax_img,
1422 suof_handle->img_table.num_simgs);
1423 }
1424 return 0;
1425 }
1426
1427 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low))
1428 #define BITS_IN_DWORD 32
1429
1430 static int
1431 qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1432 struct icp_qat_fw_auth_desc *desc)
1433 {
1434 unsigned int fcu_sts, mem_cfg_err, retry = 0;
1435 unsigned int fcu_ctl_csr, fcu_sts_csr;
1436 unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr;
1437 u64 bus_addr;
1438
1439 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) -
1440 sizeof(struct icp_qat_auth_chunk);
1441 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1442 fcu_ctl_csr = FCU_CONTROL_C4XXX;
1443 fcu_sts_csr = FCU_STATUS_C4XXX;
1444 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX;
1445 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX;
1446 } else {
1447 fcu_ctl_csr = FCU_CONTROL;
1448 fcu_sts_csr = FCU_STATUS;
1449 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI;
1450 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO;
1451 }
1452 SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1453 SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr);
1454 SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1455
1456 do {
1457 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1458 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1459 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1460 goto auth_fail;
1461 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1462 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1463 return 0;
1464 } while (retry++ < FW_AUTH_MAX_RETRY);
1465 auth_fail:
1466 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1467 fcu_sts & FCU_AUTH_STS_MASK,
1468 retry);
1469 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1470 mem_cfg_err =
1471 (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT);
1472 if (mem_cfg_err)
1473 pr_err("QAT: MEM_CFG_ERR\n");
1474 }
1475 return EINVAL;
1476 }
1477
1478 static int
1479 qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid)
1480 {
1481 struct icp_qat_suof_handle *sobj_handle;
1482
1483 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
1484 return 0;
1485
1486 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1487 if (handle->hal_handle->admin_ae_mask &
1488 sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1489 return 0;
1490
1491 return 1;
1492 }
1493
1494 static int
1495 qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1496 struct icp_qat_fw_auth_desc *desc)
1497 {
1498 unsigned int i = 0;
1499 unsigned int fcuSts = 0, fcuAeBroadcastMask = 0;
1500 unsigned int retry = 0;
1501 unsigned int fcuStsCsr = 0;
1502 unsigned int fcuCtlCsr = 0;
1503 unsigned int loadedAes = 0;
1504 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1505
1506 if (IS_QAT_GEN4(device_id)) {
1507 fcuCtlCsr = FCU_CONTROL_4XXX;
1508 fcuStsCsr = FCU_STATUS_4XXX;
1509 } else {
1510 pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n");
1511 return EINVAL;
1512 }
1513
1514 for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) {
1515 if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask))
1516 continue;
1517
1518 if (qat_hal_check_ae_active(handle, (unsigned char)i)) {
1519 pr_err(
1520 "Uclo_BroadcastLoadFW error (invalid AE status)\n");
1521 return EINVAL;
1522 }
1523
1524 if ((desc->ae_mask >> i) & 0x1) {
1525 fcuAeBroadcastMask |= 1 << i;
1526 }
1527 }
1528
1529 if (fcuAeBroadcastMask) {
1530 retry = 0;
1531 SET_FCU_CSR(handle,
1532 FCU_ME_BROADCAST_MASK_TYPE,
1533 fcuAeBroadcastMask);
1534 SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD);
1535 do {
1536 msleep(FW_AUTH_WAIT_PERIOD);
1537 fcuSts = GET_FCU_CSR(handle, fcuStsCsr);
1538
1539 if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) {
1540 pr_err(
1541 "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n",
1542 fcuSts & FCU_AUTH_STS_MASK);
1543 return EINVAL;
1544 } else if ((fcuSts & FCU_AUTH_STS_MASK) ==
1545 FCU_STS_LOAD_DONE) {
1546 if (IS_QAT_GEN4(device_id))
1547 loadedAes =
1548 GET_FCU_CSR(handle,
1549 FCU_AE_LOADED_4XXX);
1550 else
1551 loadedAes =
1552 (fcuSts >> FCU_LOADED_AE_POS);
1553
1554 if ((loadedAes & fcuAeBroadcastMask) ==
1555 fcuAeBroadcastMask)
1556 break;
1557 } else if ((fcuSts & FCU_AUTH_STS_MASK) ==
1558 FCU_STS_VERI_DONE) {
1559 SET_FCU_CSR(handle,
1560 fcuCtlCsr,
1561 FCU_CTRL_CMD_LOAD);
1562 }
1563 } while (retry++ < FW_BROADCAST_MAX_RETRY);
1564 if (retry > FW_BROADCAST_MAX_RETRY) {
1565 pr_err(
1566 "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n",
1567 fcuSts & FCU_AUTH_STS_MASK,
1568 retry);
1569 return EINVAL;
1570 }
1571 }
1572 return 0;
1573 }
1574
1575 static int
1576 qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1577 struct icp_firml_dram_desc *dram_desc,
1578 unsigned int size)
1579 {
1580 int ret;
1581
1582 ret = bus_dma_mem_create(&dram_desc->dram_mem,
1583 handle->accel_dev->dma_tag,
1584 1,
1585 BUS_SPACE_MAXADDR,
1586 size,
1587 0);
1588 if (ret != 0)
1589 return ret;
1590 dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr;
1591 dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr;
1592 dram_desc->dram_size = size;
1593 return 0;
1594 }
1595
1596 static void
1597 qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1598 struct icp_firml_dram_desc *dram_desc)
1599 {
1600 if (handle && dram_desc && dram_desc->dram_base_addr_v)
1601 bus_dma_mem_free(&dram_desc->dram_mem);
1602
1603 if (dram_desc)
1604 explicit_bzero(dram_desc, sizeof(*dram_desc));
1605 }
1606
1607 static int
1608 qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1609 const char *image,
1610 unsigned int size,
1611 struct icp_firml_dram_desc *img_desc,
1612 struct icp_qat_fw_auth_desc **desc)
1613 {
1614 const struct icp_qat_css_hdr *css_hdr =
1615 (const struct icp_qat_css_hdr *)image;
1616 struct icp_qat_fw_auth_desc *auth_desc;
1617 struct icp_qat_auth_chunk *auth_chunk;
1618 u64 virt_addr, bus_addr, virt_base;
1619 unsigned int length, simg_offset = sizeof(*auth_chunk);
1620 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1621
1622 if (size >
1623 (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1624 pr_err("QAT: error, input image size overflow %d\n", size);
1625 return EINVAL;
1626 }
1627 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1628 ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset :
1629 size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset;
1630 if (qat_uclo_simg_alloc(handle, img_desc, length)) {
1631 pr_err("QAT: error, allocate continuous dram fail\n");
1632 return -ENOMEM;
1633 }
1634
1635 auth_chunk = img_desc->dram_base_addr_v;
1636 auth_chunk->chunk_size = img_desc->dram_size;
1637 auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr;
1638 virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset;
1639 bus_addr = img_desc->dram_bus_addr + simg_offset;
1640 auth_desc = img_desc->dram_base_addr_v;
1641 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1642 auth_desc->css_hdr_low = (unsigned int)bus_addr;
1643 virt_addr = virt_base;
1644
1645 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1646 /* pub key */
1647 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1648 sizeof(*css_hdr);
1649 virt_addr = virt_addr + sizeof(*css_hdr);
1650
1651 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1652 auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1653
1654 memcpy((void *)(uintptr_t)virt_addr,
1655 (const void *)(image + sizeof(*css_hdr)),
1656 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id));
1657 /* padding */
1658 explicit_bzero((void *)(uintptr_t)(
1659 virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
1660 ICP_QAT_CSS_FWSK_PAD_LEN(device_id));
1661
1662 /* exponent */
1663 memcpy((void *)(uintptr_t)(virt_addr +
1664 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1665 ICP_QAT_CSS_FWSK_PAD_LEN(device_id)),
1666 (const void *)(image + sizeof(*css_hdr) +
1667 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
1668 sizeof(unsigned int));
1669
1670 /* signature */
1671 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) +
1672 ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
1673 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
1674 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1675 auth_desc->signature_low = (unsigned int)bus_addr;
1676
1677 memcpy((void *)(uintptr_t)virt_addr,
1678 (const void *)(image + sizeof(*css_hdr) +
1679 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1680 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)),
1681 ICP_QAT_CSS_SIGNATURE_LEN(device_id));
1682
1683 bus_addr =
1684 ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) +
1685 ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1686 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1687
1688 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1689 auth_desc->img_low = (unsigned int)bus_addr;
1690 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id);
1691 memcpy((void *)(uintptr_t)virt_addr,
1692 (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)),
1693 auth_desc->img_len);
1694 virt_addr = virt_base;
1695 /* AE firmware */
1696 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1697 CSS_AE_FIRMWARE) {
1698 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1699 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1700 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1701 auth_desc->img_ae_mode_data_low) +
1702 sizeof(struct icp_qat_simg_ae_mode);
1703
1704 auth_desc->img_ae_init_data_high =
1705 (unsigned int)(bus_addr >> BITS_IN_DWORD);
1706 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1707 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1708 auth_desc->img_ae_insts_high =
1709 (unsigned int)(bus_addr >> BITS_IN_DWORD);
1710 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1711 virt_addr += sizeof(struct icp_qat_css_hdr) +
1712 ICP_QAT_CSS_FWSK_PUB_LEN(device_id) +
1713 ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1714 auth_desc->ae_mask =
1715 ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask &
1716 handle->cfg_ae_mask;
1717 } else {
1718 auth_desc->img_ae_insts_high = auth_desc->img_high;
1719 auth_desc->img_ae_insts_low = auth_desc->img_low;
1720 }
1721 *desc = auth_desc;
1722 return 0;
1723 }
1724
1725 static int
1726 qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1727 struct icp_qat_fw_auth_desc *desc)
1728 {
1729 unsigned int i = 0;
1730 unsigned int fcu_sts;
1731 unsigned int fcu_sts_csr, fcu_ctl_csr;
1732 unsigned int loaded_aes = FCU_LOADED_AE_POS;
1733 unsigned long ae_mask = handle->hal_handle->ae_mask;
1734
1735 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1736 fcu_ctl_csr = FCU_CONTROL_C4XXX;
1737 fcu_sts_csr = FCU_STATUS_C4XXX;
1738
1739 } else {
1740 fcu_ctl_csr = FCU_CONTROL;
1741 fcu_sts_csr = FCU_STATUS;
1742 }
1743
1744 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num)
1745 {
1746 int retry = 0;
1747
1748 if (!((desc->ae_mask >> i) & 0x1))
1749 continue;
1750 if (qat_hal_check_ae_active(handle, i)) {
1751 pr_err("QAT: AE %d is active\n", i);
1752 return EINVAL;
1753 }
1754 SET_FCU_CSR(handle,
1755 fcu_ctl_csr,
1756 (FCU_CTRL_CMD_LOAD |
1757 (IS_QAT_GEN4(
1758 pci_get_device(GET_DEV(handle->accel_dev))) ?
1759 (1 << FCU_CTRL_BROADCAST_POS) :
1760 0) |
1761 (i << FCU_CTRL_AE_POS)));
1762
1763 do {
1764 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1765 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1766 if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1767 FCU_STS_LOAD_DONE) {
1768 loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device(
1769 GET_DEV(handle->accel_dev))) ?
1770 GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) :
1771 (fcu_sts >> FCU_LOADED_AE_POS);
1772 if (loaded_aes & (1 << i))
1773 break;
1774 }
1775 } while (retry++ < FW_AUTH_MAX_RETRY);
1776 if (retry > FW_AUTH_MAX_RETRY) {
1777 pr_err("QAT: firmware load failed timeout %x\n", retry);
1778 return EINVAL;
1779 }
1780 }
1781 return 0;
1782 }
1783
1784 static int
1785 qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1786 const void *addr_ptr,
1787 int mem_size)
1788 {
1789 struct icp_qat_suof_handle *suof_handle;
1790
1791 suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO);
1792 handle->sobj_handle = suof_handle;
1793 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1794 qat_uclo_del_suof(handle);
1795 pr_err("QAT: map SUOF failed\n");
1796 return EINVAL;
1797 }
1798 return 0;
1799 }
1800
1801 int
1802 qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1803 const void *addr_ptr,
1804 int mem_size)
1805 {
1806 struct icp_qat_fw_auth_desc *desc = NULL;
1807 struct icp_firml_dram_desc img_desc;
1808 int status = 0;
1809
1810 if (handle->fw_auth) {
1811 status = qat_uclo_map_auth_fw(
1812 handle, addr_ptr, mem_size, &img_desc, &desc);
1813 if (!status)
1814 status = qat_uclo_auth_fw(handle, desc);
1815
1816 qat_uclo_simg_free(handle, &img_desc);
1817 } else {
1818 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1819 device_printf(
1820 NULL, "QAT: PKE service is not allowed because ");
1821 device_printf(NULL, "MMP fw will not be loaded for ");
1822 device_printf(NULL,
1823 "device 0x%x",
1824 pci_get_device(
1825 GET_DEV(handle->accel_dev)));
1826 return status;
1827 }
1828 if (pci_get_device(GET_DEV(handle->accel_dev)) ==
1829 ADF_C3XXX_PCI_DEVICE_ID) {
1830 pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1831 return EINVAL;
1832 }
1833 status = qat_uclo_wr_sram_by_words(handle,
1834 handle->hal_sram_offset,
1835 addr_ptr,
1836 mem_size);
1837 }
1838 return status;
1839 }
1840
1841 static int
1842 qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1843 const void *addr_ptr,
1844 int mem_size)
1845 {
1846 struct icp_qat_uof_filehdr *filehdr;
1847 struct icp_qat_uclo_objhandle *objhdl;
1848
1849 objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO);
1850 objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK);
1851 bcopy(addr_ptr, objhdl->obj_buf, mem_size);
1852 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1853 if (qat_uclo_check_uof_format(filehdr))
1854 goto out_objhdr_err;
1855 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf,
1856 filehdr,
1857 ICP_QAT_UOF_OBJS);
1858 if (!objhdl->obj_hdr) {
1859 pr_err("QAT: object file chunk is null\n");
1860 goto out_objhdr_err;
1861 }
1862 handle->obj_handle = objhdl;
1863 if (qat_uclo_parse_uof_obj(handle))
1864 goto out_overlay_obj_err;
1865 return 0;
1866
1867 out_overlay_obj_err:
1868 handle->obj_handle = NULL;
1869 free(objhdl->obj_hdr, M_QAT);
1870 out_objhdr_err:
1871 free(objhdl->obj_buf, M_QAT);
1872 free(objhdl, M_QAT);
1873 return ENOMEM;
1874 }
1875
1876 static int
1877 qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1878 const struct icp_qat_mof_file_hdr *mof_ptr,
1879 u32 mof_size)
1880 {
1881 unsigned int checksum = 0;
1882 unsigned int min_ver_offset = 0;
1883 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1884
1885 mobj_handle->file_id = ICP_QAT_MOF_FID;
1886 mobj_handle->mof_buf = (const char *)mof_ptr;
1887 mobj_handle->mof_size = mof_size;
1888
1889 min_ver_offset =
1890 mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver);
1891 checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver,
1892 min_ver_offset);
1893 if (checksum != mof_ptr->checksum) {
1894 pr_err("QAT: incorrect MOF checksum\n");
1895 return EINVAL;
1896 }
1897 mobj_handle->checksum = mof_ptr->checksum;
1898 mobj_handle->min_ver = mof_ptr->min_ver;
1899 mobj_handle->maj_ver = mof_ptr->maj_ver;
1900 return 0;
1901 }
1902
1903 void
1904 qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1905 {
1906 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1907
1908 free(mobj_handle->obj_table.obj_hdr, M_QAT);
1909 mobj_handle->obj_table.obj_hdr = NULL;
1910 free(handle->mobj_handle, M_QAT);
1911 handle->mobj_handle = NULL;
1912 }
1913
1914 static int
1915 qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1916 const char *obj_name,
1917 const char **obj_ptr,
1918 unsigned int *obj_size)
1919 {
1920 unsigned int i;
1921 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1922
1923 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1924 if (!strncmp(obj_hdr[i].obj_name,
1925 obj_name,
1926 ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1927 *obj_ptr = obj_hdr[i].obj_buf;
1928 *obj_size = obj_hdr[i].obj_size;
1929 break;
1930 }
1931 }
1932
1933 if (i >= mobj_handle->obj_table.num_objs) {
1934 pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1935 return EFAULT;
1936 }
1937 return 0;
1938 }
1939
1940 static int
1941 qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1942 struct icp_qat_mof_objhdr *mobj_hdr,
1943 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1944 {
1945 if ((strncmp((char *)obj_chunkhdr->chunk_id,
1946 ICP_QAT_UOF_IMAG,
1947 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1948 mobj_hdr->obj_buf =
1949 (const char *)((unsigned long)obj_chunkhdr->offset +
1950 mobj_handle->uobjs_hdr);
1951 } else if ((strncmp((char *)(obj_chunkhdr->chunk_id),
1952 ICP_QAT_SUOF_IMAG,
1953 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1954 mobj_hdr->obj_buf =
1955 (const char *)((unsigned long)obj_chunkhdr->offset +
1956 mobj_handle->sobjs_hdr);
1957
1958 } else {
1959 pr_err("QAT: unsupported chunk id\n");
1960 return EINVAL;
1961 }
1962 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1963 mobj_hdr->obj_name =
1964 (char *)(obj_chunkhdr->name + mobj_handle->sym_str);
1965 return 0;
1966 }
1967
1968 static int
1969 qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1970 {
1971 struct icp_qat_mof_objhdr *mof_obj_hdr;
1972 const struct icp_qat_mof_obj_hdr *uobj_hdr;
1973 const struct icp_qat_mof_obj_hdr *sobj_hdr;
1974 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1975 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1976 unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0;
1977 unsigned int *valid_chunks = 0;
1978 int ret, i;
1979
1980 uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1981 sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1982 if (uobj_hdr)
1983 uobj_chunk_num = uobj_hdr->num_chunks;
1984 if (sobj_hdr)
1985 sobj_chunk_num = sobj_hdr->num_chunks;
1986
1987 mof_obj_hdr = (struct icp_qat_mof_objhdr *)
1988 malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr),
1989 M_QAT,
1990 M_WAITOK | M_ZERO);
1991
1992 mobj_handle->obj_table.obj_hdr = mof_obj_hdr;
1993 valid_chunks = &mobj_handle->obj_table.num_objs;
1994 uobj_chunkhdr =
1995 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr +
1996 sizeof(*uobj_hdr));
1997 sobj_chunkhdr =
1998 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr +
1999 sizeof(*sobj_hdr));
2000
2001 /* map uof objects */
2002 for (i = 0; i < uobj_chunk_num; i++) {
2003 ret = qat_uclo_map_obj_from_mof(mobj_handle,
2004 &mof_obj_hdr[*valid_chunks],
2005 &uobj_chunkhdr[i]);
2006 if (ret)
2007 return ret;
2008 (*valid_chunks)++;
2009 }
2010
2011 /* map suof objects */
2012 for (i = 0; i < sobj_chunk_num; i++) {
2013 ret = qat_uclo_map_obj_from_mof(mobj_handle,
2014 &mof_obj_hdr[*valid_chunks],
2015 &sobj_chunkhdr[i]);
2016 if (ret)
2017 return ret;
2018 (*valid_chunks)++;
2019 }
2020
2021 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) {
2022 pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
2023 return EINVAL;
2024 }
2025 return 0;
2026 }
2027
2028 static void
2029 qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
2030 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
2031 {
2032 char **sym_str = (char **)&mobj_handle->sym_str;
2033 unsigned int *sym_size = &mobj_handle->sym_size;
2034 struct icp_qat_mof_str_table *str_table_obj;
2035
2036 *sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset +
2037 mobj_handle->mof_buf);
2038 *sym_str =
2039 (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset +
2040 sizeof(str_table_obj->tab_len));
2041 }
2042
2043 static void
2044 qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
2045 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
2046 {
2047 if (!strncmp(mof_chunkhdr->chunk_id,
2048 ICP_QAT_MOF_SYM_OBJS,
2049 ICP_QAT_MOF_OBJ_ID_LEN))
2050 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
2051 else if (!strncmp(mof_chunkhdr->chunk_id,
2052 ICP_QAT_UOF_OBJS,
2053 ICP_QAT_MOF_OBJ_ID_LEN))
2054 mobj_handle->uobjs_hdr =
2055 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
2056 else if (!strncmp(mof_chunkhdr->chunk_id,
2057 ICP_QAT_SUOF_OBJS,
2058 ICP_QAT_MOF_OBJ_ID_LEN))
2059 mobj_handle->sobjs_hdr =
2060 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
2061 }
2062
2063 static int
2064 qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr)
2065 {
2066 int maj = mof_hdr->maj_ver & 0xff;
2067 int min = mof_hdr->min_ver & 0xff;
2068
2069 if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
2070 pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
2071 return EINVAL;
2072 }
2073
2074 if (mof_hdr->num_chunks <= 0x1) {
2075 pr_err("QAT: MOF chunk amount is incorrect\n");
2076 return EINVAL;
2077 }
2078 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
2079 pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
2080 maj,
2081 min);
2082 return EINVAL;
2083 }
2084 return 0;
2085 }
2086
2087 static int
2088 qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
2089 const struct icp_qat_mof_file_hdr *mof_ptr,
2090 u32 mof_size,
2091 const char *obj_name,
2092 const char **obj_ptr,
2093 unsigned int *obj_size)
2094 {
2095 struct icp_qat_mof_handle *mobj_handle;
2096 struct icp_qat_mof_chunkhdr *mof_chunkhdr;
2097 unsigned short chunks_num;
2098 int ret;
2099 unsigned int i;
2100
2101 if (mof_ptr->file_id == ICP_QAT_UOF_FID ||
2102 mof_ptr->file_id == ICP_QAT_SUOF_FID) {
2103 if (obj_ptr)
2104 *obj_ptr = (const char *)mof_ptr;
2105 if (obj_size)
2106 *obj_size = (unsigned int)mof_size;
2107 return 0;
2108 }
2109 if (qat_uclo_check_mof_format(mof_ptr))
2110 return EINVAL;
2111 mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO);
2112 handle->mobj_handle = mobj_handle;
2113 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
2114 if (ret)
2115 return ret;
2116 mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr +
2117 sizeof(*mof_ptr));
2118 chunks_num = mof_ptr->num_chunks;
2119 /*Parse MOF file chunks*/
2120 for (i = 0; i < chunks_num; i++)
2121 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
2122 /*All sym_objs uobjs and sobjs should be available*/
2123 if (!mobj_handle->sym_str ||
2124 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
2125 return EINVAL;
2126 ret = qat_uclo_map_objs_from_mof(mobj_handle);
2127 if (ret)
2128 return ret;
2129 /*Seek specified uof object in MOF*/
2130 ret = qat_uclo_seek_obj_inside_mof(mobj_handle,
2131 obj_name,
2132 obj_ptr,
2133 obj_size);
2134 if (ret)
2135 return ret;
2136 return 0;
2137 }
2138
2139 int
2140 qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
2141 const void *addr_ptr,
2142 u32 mem_size,
2143 const char *obj_name)
2144 {
2145 const char *obj_addr;
2146 u32 obj_size;
2147 int ret;
2148
2149 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >
2150 (sizeof(handle->hal_handle->ae_mask) * 8));
2151
2152 if (!handle || !addr_ptr || mem_size < 24)
2153 return EINVAL;
2154
2155 if (obj_name) {
2156 ret = qat_uclo_map_mof_obj(
2157 handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size);
2158 if (ret)
2159 return ret;
2160 } else {
2161 obj_addr = addr_ptr;
2162 obj_size = mem_size;
2163 }
2164
2165 return (handle->fw_auth) ?
2166 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
2167 qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
2168 }
2169
2170 void
2171 qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
2172 {
2173 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2174 unsigned int a;
2175 unsigned long ae_mask = handle->hal_handle->ae_mask;
2176
2177 if (handle->mobj_handle)
2178 qat_uclo_del_mof(handle);
2179 if (handle->sobj_handle)
2180 qat_uclo_del_suof(handle);
2181 if (!obj_handle)
2182 return;
2183
2184 free(obj_handle->uword_buf, M_QAT);
2185 for (a = 0; a < obj_handle->uimage_num; a++)
2186 free(obj_handle->ae_uimage[a].page, M_QAT);
2187
2188 for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num)
2189 {
2190 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
2191 }
2192
2193 free(obj_handle->obj_hdr, M_QAT);
2194 free(obj_handle->obj_buf, M_QAT);
2195 free(obj_handle, M_QAT);
2196 handle->obj_handle = NULL;
2197 }
2198
2199 static void
2200 qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
2201 struct icp_qat_uclo_encap_page *encap_page,
2202 uint64_t *uword,
2203 unsigned int addr_p,
2204 unsigned int raddr,
2205 uint64_t fill)
2206 {
2207 uint64_t uwrd = 0;
2208 unsigned int i, addr;
2209
2210 if (!encap_page) {
2211 *uword = fill;
2212 return;
2213 }
2214 addr = (encap_page->page_region) ? raddr : addr_p;
2215 for (i = 0; i < encap_page->uwblock_num; i++) {
2216 if (addr >= encap_page->uwblock[i].start_addr &&
2217 addr <= encap_page->uwblock[i].start_addr +
2218 encap_page->uwblock[i].words_num - 1) {
2219 addr -= encap_page->uwblock[i].start_addr;
2220 addr *= obj_handle->uword_in_bytes;
2221 memcpy(&uwrd,
2222 (void *)(((uintptr_t)encap_page->uwblock[i]
2223 .micro_words) +
2224 addr),
2225 obj_handle->uword_in_bytes);
2226 uwrd = uwrd & 0xbffffffffffull;
2227 }
2228 }
2229 *uword = uwrd;
2230 if (*uword == INVLD_UWORD)
2231 *uword = fill;
2232 }
2233
2234 static void
2235 qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
2236 struct icp_qat_uclo_encap_page *encap_page,
2237 unsigned int ae)
2238 {
2239 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
2240 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2241 uint64_t fill_pat;
2242
2243 /* load the page starting at appropriate ustore address */
2244 /* get fill-pattern from an image -- they are all the same */
2245 memcpy(&fill_pat,
2246 obj_handle->ae_uimage[0].img_ptr->fill_pattern,
2247 sizeof(uint64_t));
2248 uw_physical_addr = encap_page->beg_addr_p;
2249 uw_relative_addr = 0;
2250 words_num = encap_page->micro_words_num;
2251 while (words_num) {
2252 if (words_num < UWORD_CPYBUF_SIZE)
2253 cpylen = words_num;
2254 else
2255 cpylen = UWORD_CPYBUF_SIZE;
2256
2257 /* load the buffer */
2258 for (i = 0; i < cpylen; i++)
2259 qat_uclo_fill_uwords(obj_handle,
2260 encap_page,
2261 &obj_handle->uword_buf[i],
2262 uw_physical_addr + i,
2263 uw_relative_addr + i,
2264 fill_pat);
2265
2266 if (obj_handle->ae_data[ae].shareable_ustore &&
2267 !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
2268 /* copy the buffer to ustore */
2269 qat_hal_wr_coalesce_uwords(handle,
2270 (unsigned char)ae,
2271 uw_physical_addr,
2272 cpylen,
2273 obj_handle->uword_buf);
2274 else
2275 /* copy the buffer to ustore */
2276 qat_hal_wr_uwords(handle,
2277 (unsigned char)ae,
2278 uw_physical_addr,
2279 cpylen,
2280 obj_handle->uword_buf);
2281 uw_physical_addr += cpylen;
2282 uw_relative_addr += cpylen;
2283 words_num -= cpylen;
2284 }
2285 }
2286
2287 static void
2288 qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2289 struct icp_qat_uof_image *image)
2290 {
2291 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2292 unsigned int ctx_mask, s;
2293 struct icp_qat_uclo_page *page;
2294 unsigned char ae = 0;
2295 int ctx;
2296 struct icp_qat_uclo_aedata *aed;
2297 unsigned long ae_mask = handle->hal_handle->ae_mask;
2298
2299 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2300 ctx_mask = 0xff;
2301 else
2302 ctx_mask = 0x55;
2303 /* load the default page and set assigned CTX PC
2304 * to the entrypoint address
2305 */
2306 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
2307 {
2308 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2309 unsigned long ae_assigned = image->ae_assigned;
2310
2311 if (!test_bit(ae, &cfg_ae_mask))
2312 continue;
2313
2314 if (!test_bit(ae, &ae_assigned))
2315 continue;
2316
2317 aed = &obj_handle->ae_data[ae];
2318 /* find the slice to which this image is assigned */
2319 for (s = 0; s < aed->slice_num; s++) {
2320 if (image->ctx_assigned &
2321 aed->ae_slices[s].ctx_mask_assigned)
2322 break;
2323 }
2324 if (s >= aed->slice_num)
2325 continue;
2326 page = aed->ae_slices[s].page;
2327 if (!page->encap_page->def_page)
2328 continue;
2329 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2330
2331 page = aed->ae_slices[s].page;
2332 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2333 aed->ae_slices[s].cur_page[ctx] =
2334 (ctx_mask & (1 << ctx)) ? page : NULL;
2335 qat_hal_set_live_ctx(handle,
2336 (unsigned char)ae,
2337 image->ctx_assigned);
2338 qat_hal_set_pc(handle,
2339 (unsigned char)ae,
2340 image->ctx_assigned,
2341 image->entry_address);
2342 }
2343 }
2344
2345 static int
2346 qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2347 {
2348 unsigned int i;
2349 struct icp_qat_fw_auth_desc *desc = NULL;
2350 struct icp_firml_dram_desc img_desc;
2351 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2352 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2353
2354 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2355 if (qat_uclo_map_auth_fw(handle,
2356 (const char *)simg_hdr[i].simg_buf,
2357 (unsigned int)(simg_hdr[i].simg_len),
2358 &img_desc,
2359 &desc))
2360 goto wr_err;
2361 if (qat_uclo_auth_fw(handle, desc))
2362 goto wr_err;
2363 if (qat_uclo_is_broadcast(handle, i)) {
2364 if (qat_uclo_broadcast_load_fw(handle, desc))
2365 goto wr_err;
2366 } else {
2367 if (qat_uclo_load_fw(handle, desc))
2368 goto wr_err;
2369 }
2370 qat_uclo_simg_free(handle, &img_desc);
2371 }
2372
2373 return 0;
2374 wr_err:
2375 qat_uclo_simg_free(handle, &img_desc);
2376 return -EINVAL;
2377 }
2378
2379 static int
2380 qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2381 {
2382 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2383 unsigned int i;
2384
2385 if (qat_uclo_init_globals(handle))
2386 return EINVAL;
2387 for (i = 0; i < obj_handle->uimage_num; i++) {
2388 if (!obj_handle->ae_uimage[i].img_ptr)
2389 return EINVAL;
2390 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2391 return EINVAL;
2392 qat_uclo_wr_uimage_page(handle,
2393 obj_handle->ae_uimage[i].img_ptr);
2394 }
2395 return 0;
2396 }
2397
2398 int
2399 qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2400 {
2401 return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2402 qat_uclo_wr_uof_img(handle);
2403 }
2404
2405 int
2406 qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2407 unsigned int cfg_ae_mask)
2408 {
2409 if (!cfg_ae_mask)
2410 return EINVAL;
2411
2412 handle->cfg_ae_mask = cfg_ae_mask;
2413 return 0;
2414 }
Cache object: b45fa845e631d000136c715eddd6c9d6
|