1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <linux/delay.h>
15 #include "adf_accel_devices.h"
16 #include "adf_common_drv.h"
17 #include "icp_qat_hal.h"
18 #include "icp_qat_uclo.h"
19
20 #define BAD_REGADDR 0xffff
21 #define MAX_RETRY_TIMES 1000000
22 #define INIT_CTX_ARB_VALUE 0x0
23 #define INIT_CTX_ENABLE_VALUE 0x0
24 #define INIT_PC_VALUE 0x0
25 #define INIT_WAKEUP_EVENTS_VALUE 0x1
26 #define INIT_SIG_EVENTS_VALUE 0x1
27 #define INIT_CCENABLE_VALUE 0x2000
28 #define RST_CSR_QAT_LSB 20
29 #define RST_CSR_AE_LSB 0
30 #define MC_TIMESTAMP_ENABLE (0x1 << 7)
31
32 #define IGNORE_W1C_MASK \
33 ((~(1 << CE_BREAKPOINT_BITPOS)) & \
34 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
35 (~(1 << CE_REG_PAR_ERR_BITPOS)))
36 #define INSERT_IMMED_GPRA_CONST(inst, const_val) \
37 (inst = ((inst & 0xFFFF00C03FFull) | \
38 ((((const_val) << 12) & 0x0FF00000ull) | \
39 (((const_val) << 10) & 0x0003FC00ull))))
40 #define INSERT_IMMED_GPRB_CONST(inst, const_val) \
41 (inst = ((inst & 0xFFFF00FFF00ull) | \
42 ((((const_val) << 12) & 0x0FF00000ull) | \
43 (((const_val) << 0) & 0x000000FFull))))
44
45 #define AE(handle, ae) ((handle)->hal_handle->aes[ae])
46
47 static const uint64_t inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull,
48 0x0F040000300ull, 0x0F440000300ull,
49 0x0FC066C0000ull, 0x0F0000C0300ull,
50 0x0F0000C0300ull, 0x0F0000C0300ull,
51 0x0A021000000ull };
52
53 static const uint64_t inst[] = {
54 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
55 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
56 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
57 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
58 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
59 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
60 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
61 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
62 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
63 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
64 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
65 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
66 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
67 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
68 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
69 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
70 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
71 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
72 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
73 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
74 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
75 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
76 };
77
78 static const uint64_t inst_CPM2X[] = {
79 0x0F0000C0000ull, 0x0D802C00011ull, 0x0F0000C0001ull, 0x0FC066C0001ull,
80 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000500300ull,
81 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0A0580C0000ull,
82 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull,
83 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull,
84 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull,
85 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull,
86 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull,
87 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull,
88 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull,
89 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull,
90 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull,
91 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull,
92 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull,
93 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull,
94 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull,
95 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull,
96 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull,
97 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81341C010ull,
98 0x0E000000001ull, 0x0E000010000ull,
99 };
100
101 void
102 qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
103 unsigned char ae,
104 unsigned int ctx_mask)
105 {
106 AE(handle, ae).live_ctx_mask = ctx_mask;
107 }
108
109 #define CSR_RETRY_TIMES 500
110 static int
111 qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
112 unsigned char ae,
113 unsigned int csr,
114 unsigned int *value)
115 {
116 unsigned int iterations = CSR_RETRY_TIMES;
117
118 do {
119 *value = GET_AE_CSR(handle, ae, csr);
120 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
121 return 0;
122 } while (iterations--);
123
124 pr_err("QAT: Read CSR timeout\n");
125 return EFAULT;
126 }
127
128 static int
129 qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
130 unsigned char ae,
131 unsigned int csr,
132 unsigned int value)
133 {
134 unsigned int iterations = CSR_RETRY_TIMES;
135
136 do {
137 SET_AE_CSR(handle, ae, csr, value);
138 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
139 return 0;
140 } while (iterations--);
141
142 pr_err("QAT: Write CSR Timeout\n");
143 return EFAULT;
144 }
145
146 static void
147 qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
148 unsigned char ae,
149 unsigned char ctx,
150 unsigned int *events)
151 {
152 unsigned int cur_ctx;
153
154 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
155 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
156 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
157 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
158 }
159
160 static int
161 qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
162 unsigned char ae,
163 unsigned int cycles,
164 int chk_inactive)
165 {
166 unsigned int base_cnt = 0, cur_cnt = 0;
167 unsigned int csr = (1 << ACS_ABO_BITPOS);
168 int times = MAX_RETRY_TIMES;
169 int elapsed_cycles = 0;
170
171 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
172 base_cnt &= 0xffff;
173 while ((int)cycles > elapsed_cycles && times--) {
174 if (chk_inactive)
175 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
176
177 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
178 cur_cnt &= 0xffff;
179 elapsed_cycles = cur_cnt - base_cnt;
180
181 if (elapsed_cycles < 0)
182 elapsed_cycles += 0x10000;
183
184 /* ensure at least 8 time cycles elapsed in wait_cycles */
185 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
186 return 0;
187 }
188 if (times < 0) {
189 pr_err("QAT: wait_num_cycles time out\n");
190 return EFAULT;
191 }
192 return 0;
193 }
194
195 void
196 qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh)
197 {
198 *ae_neigh = (ae & 0x1) ? (ae - 1) : (ae + 1);
199 }
200
201 #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
202 #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
203
204 int
205 qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
206 unsigned char ae,
207 unsigned char mode)
208 {
209 unsigned int csr, new_csr;
210
211 if (mode != 4 && mode != 8) {
212 pr_err("QAT: bad ctx mode=%d\n", mode);
213 return EINVAL;
214 }
215
216 /* Sets the accelaration engine context mode to either four or eight */
217 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
218 csr = IGNORE_W1C_MASK & csr;
219 new_csr = (mode == 4) ? SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
220 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
221 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
222 return 0;
223 }
224
225 int
226 qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
227 unsigned char ae,
228 unsigned char mode)
229 {
230 unsigned int csr, new_csr;
231
232 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
233 pr_err("QAT: No next neigh for CPM2X\n");
234 return EINVAL;
235 }
236
237 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
238 csr &= IGNORE_W1C_MASK;
239
240 new_csr = (mode) ? SET_BIT(csr, CE_NN_MODE_BITPOS) :
241 CLR_BIT(csr, CE_NN_MODE_BITPOS);
242
243 if (new_csr != csr)
244 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
245
246 return 0;
247 }
248
249 int
250 qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
251 unsigned char ae,
252 enum icp_qat_uof_regtype lm_type,
253 unsigned char mode)
254 {
255 unsigned int csr, new_csr;
256
257 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
258 csr &= IGNORE_W1C_MASK;
259 switch (lm_type) {
260 case ICP_LMEM0:
261 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
262 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
263 break;
264 case ICP_LMEM1:
265 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
266 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
267 break;
268 case ICP_LMEM2:
269 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
270 CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
271 break;
272 case ICP_LMEM3:
273 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
274 CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
275 break;
276 default:
277 pr_err("QAT: lmType = 0x%x\n", lm_type);
278 return EINVAL;
279 }
280
281 if (new_csr != csr)
282 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
283 return 0;
284 }
285
286 void
287 qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
288 unsigned char ae,
289 unsigned char mode)
290 {
291 unsigned int csr, new_csr;
292
293 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
294 csr &= IGNORE_W1C_MASK;
295 new_csr = (mode) ? SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
296 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
297 if (new_csr != csr)
298 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
299 }
300
301 void
302 qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle,
303 unsigned char ae,
304 unsigned char mode)
305 {
306 unsigned int csr, new_csr;
307
308 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr);
309 new_csr = (mode) ? SET_BIT(csr, MMC_SHARE_CS_BITPOS) :
310 CLR_BIT(csr, MMC_SHARE_CS_BITPOS);
311 if (new_csr != csr)
312 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, new_csr);
313 }
314
315 static unsigned short
316 qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num)
317 {
318 unsigned short reg_addr;
319
320 switch (type) {
321 case ICP_GPA_ABS:
322 case ICP_GPB_ABS:
323 reg_addr = 0x80 | (reg_num & 0x7f);
324 break;
325 case ICP_GPA_REL:
326 case ICP_GPB_REL:
327 reg_addr = reg_num & 0x1f;
328 break;
329 case ICP_SR_RD_REL:
330 case ICP_SR_WR_REL:
331 case ICP_SR_REL:
332 reg_addr = 0x180 | (reg_num & 0x1f);
333 break;
334 case ICP_SR_ABS:
335 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
336 break;
337 case ICP_DR_RD_REL:
338 case ICP_DR_WR_REL:
339 case ICP_DR_REL:
340 reg_addr = 0x1c0 | (reg_num & 0x1f);
341 break;
342 case ICP_DR_ABS:
343 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
344 break;
345 case ICP_NEIGH_REL:
346 reg_addr = 0x280 | (reg_num & 0x1f);
347 break;
348 case ICP_LMEM0:
349 reg_addr = 0x200;
350 break;
351 case ICP_LMEM1:
352 reg_addr = 0x220;
353 break;
354 case ICP_LMEM2:
355 reg_addr = 0x2c0;
356 break;
357 case ICP_LMEM3:
358 reg_addr = 0x2e0;
359 break;
360 case ICP_NO_DEST:
361 reg_addr = 0x300 | (reg_num & 0xff);
362 break;
363 default:
364 reg_addr = BAD_REGADDR;
365 break;
366 }
367 return reg_addr;
368 }
369
370 static u32
371 qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle *handle)
372 {
373 u32 tg = 0, ae;
374 u32 valid_ae_mask = 0;
375
376 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
377 if (handle->hal_handle->ae_mask & (1 << ae)) {
378 tg = ae / 4;
379 valid_ae_mask |= (1 << (tg * 2));
380 }
381 }
382 return valid_ae_mask;
383 }
384
385 void
386 qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
387 {
388 unsigned int ae_reset_csr[MAX_CPP_NUM];
389 unsigned int ae_reset_val[MAX_CPP_NUM];
390 unsigned int valid_ae_mask, valid_slice_mask;
391 unsigned int cpp_num = 1;
392 unsigned int i;
393
394 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
395 ae_reset_csr[0] = ICP_RESET_CPP0;
396 ae_reset_csr[1] = ICP_RESET_CPP1;
397 if (handle->hal_handle->ae_mask > 0xffff)
398 ++cpp_num;
399 } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
400 ae_reset_csr[0] = ICP_RESET_CPP0;
401 } else {
402 ae_reset_csr[0] = ICP_RESET;
403 }
404
405 for (i = 0; i < cpp_num; i++) {
406 if (i == 0) {
407 if (IS_QAT_GEN4(
408 pci_get_device(GET_DEV(handle->accel_dev)))) {
409 valid_ae_mask =
410 qat_hal_get_ae_mask_gen4(handle);
411 valid_slice_mask =
412 handle->hal_handle->slice_mask;
413 } else {
414 valid_ae_mask =
415 handle->hal_handle->ae_mask & 0xFFFF;
416 valid_slice_mask =
417 handle->hal_handle->slice_mask & 0x3F;
418 }
419 } else {
420 valid_ae_mask =
421 (handle->hal_handle->ae_mask >> AES_PER_CPP) &
422 0xFFFF;
423 valid_slice_mask =
424 (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
425 0x3F;
426 }
427
428 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
429 ae_reset_val[i] |= valid_ae_mask << RST_CSR_AE_LSB;
430 ae_reset_val[i] |= valid_slice_mask << RST_CSR_QAT_LSB;
431 SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
432 }
433 }
434
435 static void
436 qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
437 unsigned char ae,
438 unsigned int ctx_mask,
439 unsigned int ae_csr,
440 unsigned int csr_val)
441 {
442 unsigned int ctx, cur_ctx;
443
444 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
445
446 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
447 if (!(ctx_mask & (1 << ctx)))
448 continue;
449 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
450 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
451 }
452
453 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
454 }
455
456 static void
457 qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
458 unsigned char ae,
459 unsigned char ctx,
460 unsigned int ae_csr,
461 unsigned int *csr_val)
462 {
463 unsigned int cur_ctx;
464
465 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
466 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
467 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
468 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
469 }
470
471 static void
472 qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
473 unsigned char ae,
474 unsigned int ctx_mask,
475 unsigned int events)
476 {
477 unsigned int ctx, cur_ctx;
478
479 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
480 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
481 if (!(ctx_mask & (1 << ctx)))
482 continue;
483 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
484 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
485 }
486 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
487 }
488
489 static void
490 qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
491 unsigned char ae,
492 unsigned int ctx_mask,
493 unsigned int events)
494 {
495 unsigned int ctx, cur_ctx;
496
497 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
498 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
499 if (!(ctx_mask & (1 << ctx)))
500 continue;
501 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
502 qat_hal_wr_ae_csr(handle,
503 ae,
504 CTX_WAKEUP_EVENTS_INDIRECT,
505 events);
506 }
507 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
508 }
509
510 static int
511 qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
512 {
513 unsigned int base_cnt, cur_cnt;
514 unsigned char ae;
515 unsigned long ae_mask = handle->hal_handle->ae_mask;
516 int times = MAX_RETRY_TIMES;
517
518 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
519 {
520 qat_hal_rd_ae_csr(handle,
521 ae,
522 PROFILE_COUNT,
523 (unsigned int *)&base_cnt);
524 base_cnt &= 0xffff;
525
526 do {
527 qat_hal_rd_ae_csr(handle,
528 ae,
529 PROFILE_COUNT,
530 (unsigned int *)&cur_cnt);
531 cur_cnt &= 0xffff;
532 } while (times-- && (cur_cnt == base_cnt));
533
534 if (times < 0) {
535 pr_err("QAT: AE%d is inactive!!\n", ae);
536 return EFAULT;
537 }
538 }
539
540 return 0;
541 }
542
543 int
544 qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
545 unsigned int ae)
546 {
547 unsigned int enable = 0, active = 0;
548
549 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
550 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
551 if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
552 (active & (1 << ACS_ABO_BITPOS)))
553 return 1;
554 else
555 return 0;
556 }
557
558 static void
559 qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
560 {
561 unsigned int misc_ctl_csr, misc_ctl;
562 unsigned char ae;
563 unsigned long ae_mask = handle->hal_handle->ae_mask;
564
565 misc_ctl_csr =
566 (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) ?
567 MISC_CONTROL_C4XXX :
568 MISC_CONTROL;
569 /* stop the timestamp timers */
570 misc_ctl = GET_GLB_CSR(handle, misc_ctl_csr);
571 if (misc_ctl & MC_TIMESTAMP_ENABLE)
572 SET_GLB_CSR(handle,
573 misc_ctl_csr,
574 misc_ctl & (~MC_TIMESTAMP_ENABLE));
575
576 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
577 {
578 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
579 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
580 }
581 /* start timestamp timers */
582 SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
583 }
584
585 #define ESRAM_AUTO_TINIT BIT(2)
586 #define ESRAM_AUTO_TINIT_DONE BIT(3)
587 #define ESRAM_AUTO_INIT_USED_CYCLES (1640)
588 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
589
590 static int
591 qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
592 {
593 uintptr_t csr_addr =
594 ((uintptr_t)handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET);
595 unsigned int csr_val;
596 int times = 30;
597
598 if (pci_get_device(GET_DEV(handle->accel_dev)) !=
599 ADF_DH895XCC_PCI_DEVICE_ID)
600 return 0;
601
602 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
603 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
604 return 0;
605 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
606 csr_val |= ESRAM_AUTO_TINIT;
607
608 ADF_CSR_WR(handle->hal_misc_addr_v, csr_addr, csr_val);
609 do {
610 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
611 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
612
613 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
614 if (times < 0) {
615 pr_err("QAT: Fail to init eSram!\n");
616 return EFAULT;
617 }
618 return 0;
619 }
620
621 #define SHRAM_INIT_CYCLES 2060
622 int
623 qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
624 {
625 unsigned int ae_reset_csr[MAX_CPP_NUM];
626 unsigned int ae_reset_val[MAX_CPP_NUM];
627 unsigned int cpp_num = 1;
628 unsigned int valid_ae_mask, valid_slice_mask;
629 unsigned char ae;
630 unsigned int i;
631 unsigned int clk_csr[MAX_CPP_NUM];
632 unsigned int clk_val[MAX_CPP_NUM];
633 unsigned int times = 100;
634 unsigned long ae_mask = handle->hal_handle->ae_mask;
635
636 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
637 ae_reset_csr[0] = ICP_RESET_CPP0;
638 ae_reset_csr[1] = ICP_RESET_CPP1;
639 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
640 clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1;
641 if (handle->hal_handle->ae_mask > 0xffff)
642 ++cpp_num;
643 } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
644 ae_reset_csr[0] = ICP_RESET_CPP0;
645 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
646 } else {
647 ae_reset_csr[0] = ICP_RESET;
648 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE;
649 }
650
651 for (i = 0; i < cpp_num; i++) {
652 if (i == 0) {
653 if (IS_QAT_GEN4(
654 pci_get_device(GET_DEV(handle->accel_dev)))) {
655 valid_ae_mask =
656 qat_hal_get_ae_mask_gen4(handle);
657 valid_slice_mask =
658 handle->hal_handle->slice_mask;
659 } else {
660 valid_ae_mask =
661 handle->hal_handle->ae_mask & 0xFFFF;
662 valid_slice_mask =
663 handle->hal_handle->slice_mask & 0x3F;
664 }
665 } else {
666 valid_ae_mask =
667 (handle->hal_handle->ae_mask >> AES_PER_CPP) &
668 0xFFFF;
669 valid_slice_mask =
670 (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
671 0x3F;
672 }
673 /* write to the reset csr */
674 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
675 ae_reset_val[i] &= ~(valid_ae_mask << RST_CSR_AE_LSB);
676 ae_reset_val[i] &= ~(valid_slice_mask << RST_CSR_QAT_LSB);
677 do {
678 SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
679 if (!(times--))
680 goto out_err;
681 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
682 } while (
683 (valid_ae_mask | (valid_slice_mask << RST_CSR_QAT_LSB)) &
684 ae_reset_val[i]);
685 /* enable clock */
686 clk_val[i] = GET_GLB_CSR(handle, clk_csr[i]);
687 clk_val[i] |= valid_ae_mask << 0;
688 clk_val[i] |= valid_slice_mask << 20;
689 SET_GLB_CSR(handle, clk_csr[i], clk_val[i]);
690 }
691 if (qat_hal_check_ae_alive(handle))
692 goto out_err;
693
694 /* Set undefined power-up/reset states to reasonable default values */
695 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
696 {
697 qat_hal_wr_ae_csr(handle,
698 ae,
699 CTX_ENABLES,
700 INIT_CTX_ENABLE_VALUE);
701 qat_hal_wr_indr_csr(handle,
702 ae,
703 ICP_QAT_UCLO_AE_ALL_CTX,
704 CTX_STS_INDIRECT,
705 handle->hal_handle->upc_mask &
706 INIT_PC_VALUE);
707 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
708 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
709 qat_hal_put_wakeup_event(handle,
710 ae,
711 ICP_QAT_UCLO_AE_ALL_CTX,
712 INIT_WAKEUP_EVENTS_VALUE);
713 qat_hal_put_sig_event(handle,
714 ae,
715 ICP_QAT_UCLO_AE_ALL_CTX,
716 INIT_SIG_EVENTS_VALUE);
717 }
718 if (qat_hal_init_esram(handle))
719 goto out_err;
720 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
721 goto out_err;
722 qat_hal_reset_timestamp(handle);
723
724 return 0;
725 out_err:
726 pr_err("QAT: failed to get device out of reset\n");
727 return EFAULT;
728 }
729
730 static void
731 qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
732 unsigned char ae,
733 unsigned int ctx_mask)
734 {
735 unsigned int ctx;
736
737 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
738 ctx &= IGNORE_W1C_MASK &
739 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
740 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
741 }
742
743 static uint64_t
744 qat_hal_parity_64bit(uint64_t word)
745 {
746 word ^= word >> 1;
747 word ^= word >> 2;
748 word ^= word >> 4;
749 word ^= word >> 8;
750 word ^= word >> 16;
751 word ^= word >> 32;
752 return word & 1;
753 }
754
755 static uint64_t
756 qat_hal_set_uword_ecc(uint64_t uword)
757 {
758 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
759 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
760 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
761 bit6_mask = 0xdaf69a46910ULL;
762
763 /* clear the ecc bits */
764 uword &= ~(0x7fULL << 0x2C);
765 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
766 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
767 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
768 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
769 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
770 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
771 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
772 return uword;
773 }
774
775 void
776 qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
777 unsigned char ae,
778 unsigned int uaddr,
779 unsigned int words_num,
780 const uint64_t *uword)
781 {
782 unsigned int ustore_addr;
783 unsigned int i, ae_in_group;
784
785 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
786 ae_in_group = ae / 4 * 4;
787
788 for (i = 0; i < AE_TG_NUM_CPM2X; i++) {
789 if (ae_in_group + i == ae)
790 continue;
791 if (ae_in_group + i >= handle->hal_handle->ae_max_num)
792 break;
793 if (qat_hal_check_ae_active(handle, ae_in_group + i)) {
794 pr_err(
795 "ae%d in T_group is active, cannot write to ustore!\n",
796 ae_in_group + i);
797 return;
798 }
799 }
800 }
801
802 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
803 uaddr |= UA_ECS;
804 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
805 for (i = 0; i < words_num; i++) {
806 unsigned int uwrd_lo, uwrd_hi;
807 uint64_t tmp;
808
809 tmp = qat_hal_set_uword_ecc(uword[i]);
810 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
811 uwrd_hi = (unsigned int)(tmp >> 0x20);
812 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
813 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
814 }
815 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
816 }
817
818 void
819 qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle,
820 unsigned char ae,
821 unsigned int uaddr,
822 unsigned int words_num,
823 u64 *uword)
824 {
825 u64 *even_uwrods, *odd_uwords;
826 unsigned char neigh_ae, odd_ae, even_ae;
827 int i, even_cpy_cnt = 0, odd_cpy_cnt = 0;
828
829 even_uwrods =
830 malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
831 odd_uwords =
832 malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
833 qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
834 if (ae & 1) {
835 odd_ae = ae;
836 even_ae = neigh_ae;
837 } else {
838 odd_ae = neigh_ae;
839 even_ae = ae;
840 }
841 for (i = 0; i < words_num; i++) {
842 if ((uaddr + i) & 1)
843 odd_uwords[odd_cpy_cnt++] = uword[i];
844 else
845 even_uwrods[even_cpy_cnt++] = uword[i];
846 }
847 if (even_cpy_cnt)
848 qat_hal_wr_uwords(handle,
849 even_ae,
850 (uaddr + 1) / 2,
851 even_cpy_cnt,
852 even_uwrods);
853 if (odd_cpy_cnt)
854 qat_hal_wr_uwords(
855 handle, odd_ae, uaddr / 2, odd_cpy_cnt, odd_uwords);
856 free(even_uwrods, M_QAT);
857 free(odd_uwords, M_QAT);
858 }
859
860 static void
861 qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
862 unsigned char ae,
863 unsigned int ctx_mask)
864 {
865 unsigned int ctx;
866
867 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
868 ctx &= IGNORE_W1C_MASK;
869 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
870 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
871 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
872 }
873
874 static void
875 qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
876 {
877 unsigned char ae;
878 unsigned short reg;
879 unsigned long ae_mask = handle->hal_handle->ae_mask;
880
881 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
882 {
883 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
884 qat_hal_init_rd_xfer(
885 handle, ae, 0, ICP_SR_RD_ABS, reg, 0);
886 qat_hal_init_rd_xfer(
887 handle, ae, 0, ICP_DR_RD_ABS, reg, 0);
888 }
889 }
890 }
891
892 static int
893 qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
894 {
895 unsigned char ae;
896 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
897 int times = MAX_RETRY_TIMES;
898 unsigned int csr_val = 0;
899 unsigned int savctx = 0;
900 unsigned int scs_flag = 0;
901 unsigned long ae_mask = handle->hal_handle->ae_mask;
902 int ret = 0;
903
904 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
905 {
906 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
907 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
908 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
909 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
910 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
911 csr_val &= IGNORE_W1C_MASK;
912 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
913 csr_val |= CE_NN_MODE;
914 }
915 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
916
917 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
918 if (ae % 4 == 0)
919 qat_hal_wr_uwords(handle,
920 ae,
921 0,
922 ARRAY_SIZE(inst_CPM2X),
923 (const uint64_t *)inst_CPM2X);
924 } else {
925 qat_hal_wr_uwords(handle,
926 ae,
927 0,
928 ARRAY_SIZE(inst),
929 (const uint64_t *)inst);
930 }
931 qat_hal_wr_indr_csr(handle,
932 ae,
933 ctx_mask,
934 CTX_STS_INDIRECT,
935 handle->hal_handle->upc_mask &
936 INIT_PC_VALUE);
937 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
938 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
939 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
940 qat_hal_wr_indr_csr(
941 handle, ae, ctx_mask, CTX_SIG_EVENTS_INDIRECT, 0);
942 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
943 qat_hal_enable_ctx(handle, ae, ctx_mask);
944 }
945
946 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
947 {
948 /* wait for AE to finish */
949 do {
950 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
951 } while (ret && times--);
952
953 if (times < 0) {
954 pr_err("QAT: clear GPR of AE %d failed", ae);
955 return EINVAL;
956 }
957 qat_hal_disable_ctx(handle, ae, ctx_mask);
958 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
959 if (scs_flag)
960 csr_val |= (1 << MMC_SHARE_CS_BITPOS);
961 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
962 qat_hal_wr_ae_csr(handle,
963 ae,
964 ACTIVE_CTX_STATUS,
965 savctx & ACS_ACNO);
966 qat_hal_wr_ae_csr(handle,
967 ae,
968 CTX_ENABLES,
969 INIT_CTX_ENABLE_VALUE);
970 qat_hal_wr_indr_csr(handle,
971 ae,
972 ctx_mask,
973 CTX_STS_INDIRECT,
974 handle->hal_handle->upc_mask &
975 INIT_PC_VALUE);
976 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
977 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
978 qat_hal_put_wakeup_event(handle,
979 ae,
980 ctx_mask,
981 INIT_WAKEUP_EVENTS_VALUE);
982 qat_hal_put_sig_event(handle,
983 ae,
984 ctx_mask,
985 INIT_SIG_EVENTS_VALUE);
986 }
987 return 0;
988 }
989
990 static int
991 qat_hal_check_imr(struct icp_qat_fw_loader_handle *handle)
992 {
993 device_t dev = accel_to_pci_dev(handle->accel_dev);
994 u8 reg_val = 0;
995
996 if (pci_get_device(GET_DEV(handle->accel_dev)) !=
997 ADF_C3XXX_PCI_DEVICE_ID &&
998 pci_get_device(GET_DEV(handle->accel_dev)) !=
999 ADF_200XX_PCI_DEVICE_ID)
1000 return 0;
1001
1002 reg_val = pci_read_config(dev, 0x04, 1);
1003 /*
1004 * PCI command register memory bit and rambaseaddr_lo address
1005 * are checked to confirm IMR2 is enabled in BIOS settings
1006 */
1007 if ((reg_val & 0x2) && GET_FCU_CSR(handle, FCU_RAMBASE_ADDR_LO))
1008 return 0;
1009
1010 return EINVAL;
1011 }
1012
1013 int
1014 qat_hal_init(struct adf_accel_dev *accel_dev)
1015 {
1016 unsigned char ae;
1017 unsigned int cap_offset, ae_offset, ep_offset;
1018 unsigned int sram_offset = 0;
1019 unsigned int max_en_ae_id = 0;
1020 int ret = 0;
1021 unsigned long ae_mask;
1022 struct icp_qat_fw_loader_handle *handle;
1023 if (!accel_dev) {
1024 return EFAULT;
1025 }
1026 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
1027 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1028 struct adf_bar *misc_bar =
1029 &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
1030 struct adf_bar *sram_bar;
1031
1032 handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
1033
1034 handle->hal_misc_addr_v = misc_bar->virt_addr;
1035 handle->accel_dev = accel_dev;
1036 if (pci_get_device(GET_DEV(handle->accel_dev)) ==
1037 ADF_DH895XCC_PCI_DEVICE_ID ||
1038 IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1039 sram_bar =
1040 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
1041 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev))))
1042 sram_offset =
1043 0x400000 + accel_dev->aram_info->mmp_region_offset;
1044 handle->hal_sram_addr_v = sram_bar->virt_addr;
1045 handle->hal_sram_offset = sram_offset;
1046 handle->hal_sram_size = sram_bar->size;
1047 }
1048 GET_CSR_OFFSET(pci_get_device(GET_DEV(handle->accel_dev)),
1049 cap_offset,
1050 ae_offset,
1051 ep_offset);
1052 handle->hal_cap_g_ctl_csr_addr_v = cap_offset;
1053 handle->hal_cap_ae_xfer_csr_addr_v = ae_offset;
1054 handle->hal_ep_csr_addr_v = ep_offset;
1055 handle->hal_cap_ae_local_csr_addr_v =
1056 ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
1057 LOCAL_TO_XFER_REG_OFFSET);
1058 handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) ==
1059 ADF_DH895XCC_PCI_DEVICE_ID) ?
1060 false :
1061 true;
1062 if (handle->fw_auth && qat_hal_check_imr(handle)) {
1063 device_printf(GET_DEV(accel_dev), "IMR2 not enabled in BIOS\n");
1064 ret = EINVAL;
1065 goto out_hal_handle;
1066 }
1067
1068 handle->hal_handle =
1069 malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO);
1070 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
1071 handle->hal_handle->ae_mask = hw_data->ae_mask;
1072 handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
1073 handle->hal_handle->slice_mask = hw_data->accel_mask;
1074 handle->cfg_ae_mask = 0xFFFFFFFF;
1075 /* create AE objects */
1076 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1077 handle->hal_handle->upc_mask = 0xffff;
1078 handle->hal_handle->max_ustore = 0x2000;
1079 } else {
1080 handle->hal_handle->upc_mask = 0x1ffff;
1081 handle->hal_handle->max_ustore = 0x4000;
1082 }
1083
1084 ae_mask = hw_data->ae_mask;
1085
1086 for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE)
1087 {
1088 handle->hal_handle->aes[ae].free_addr = 0;
1089 handle->hal_handle->aes[ae].free_size =
1090 handle->hal_handle->max_ustore;
1091 handle->hal_handle->aes[ae].ustore_size =
1092 handle->hal_handle->max_ustore;
1093 handle->hal_handle->aes[ae].live_ctx_mask =
1094 ICP_QAT_UCLO_AE_ALL_CTX;
1095 max_en_ae_id = ae;
1096 }
1097 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
1098 /* take all AEs out of reset */
1099 if (qat_hal_clr_reset(handle)) {
1100 device_printf(GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
1101 ret = EIO;
1102 goto out_err;
1103 }
1104 qat_hal_clear_xfer(handle);
1105 if (!handle->fw_auth) {
1106 if (qat_hal_clear_gpr(handle)) {
1107 ret = EIO;
1108 goto out_err;
1109 }
1110 }
1111
1112 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
1113 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1114 {
1115 unsigned int csr_val = 0;
1116
1117 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
1118 csr_val |= 0x1;
1119 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
1120 }
1121 accel_dev->fw_loader->fw_loader = handle;
1122 return 0;
1123
1124 out_err:
1125 free(handle->hal_handle, M_QAT);
1126 out_hal_handle:
1127 free(handle, M_QAT);
1128 return ret;
1129 }
1130
1131 void
1132 qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
1133 {
1134 if (!handle)
1135 return;
1136 free(handle->hal_handle, M_QAT);
1137 free(handle, M_QAT);
1138 }
1139
1140 int
1141 qat_hal_start(struct icp_qat_fw_loader_handle *handle)
1142 {
1143 unsigned char ae = 0;
1144 int retry = 0;
1145 unsigned int fcu_sts = 0;
1146 unsigned int fcu_ctl_csr, fcu_sts_csr;
1147 unsigned long ae_mask = handle->hal_handle->ae_mask;
1148 u32 ae_ctr = 0;
1149
1150 if (handle->fw_auth) {
1151 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1152 {
1153 ae_ctr++;
1154 }
1155 if (IS_QAT_GEN3_OR_GEN4(
1156 pci_get_device(GET_DEV(handle->accel_dev)))) {
1157 fcu_ctl_csr = FCU_CONTROL_C4XXX;
1158 fcu_sts_csr = FCU_STATUS_C4XXX;
1159
1160 } else {
1161 fcu_ctl_csr = FCU_CONTROL;
1162 fcu_sts_csr = FCU_STATUS;
1163 }
1164 SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
1165 do {
1166 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1167 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1168 if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
1169 return ae_ctr;
1170 } while (retry++ < FW_AUTH_MAX_RETRY);
1171 pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n",
1172 ae,
1173 fcu_sts);
1174 return 0;
1175 } else {
1176 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1177 {
1178 qat_hal_put_wakeup_event(handle,
1179 ae,
1180 0,
1181 IS_QAT_GEN4(
1182 pci_get_device(GET_DEV(
1183 handle->accel_dev))) ?
1184 0x80000000 :
1185 0x10000);
1186 qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
1187 ae_ctr++;
1188 }
1189 return ae_ctr;
1190 }
1191 }
1192
1193 void
1194 qat_hal_stop(struct icp_qat_fw_loader_handle *handle,
1195 unsigned char ae,
1196 unsigned int ctx_mask)
1197 {
1198 if (!handle->fw_auth)
1199 qat_hal_disable_ctx(handle, ae, ctx_mask);
1200 }
1201
1202 void
1203 qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
1204 unsigned char ae,
1205 unsigned int ctx_mask,
1206 unsigned int upc)
1207 {
1208 qat_hal_wr_indr_csr(handle,
1209 ae,
1210 ctx_mask,
1211 CTX_STS_INDIRECT,
1212 handle->hal_handle->upc_mask & upc);
1213 }
1214
1215 static void
1216 qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
1217 unsigned char ae,
1218 unsigned int uaddr,
1219 unsigned int words_num,
1220 uint64_t *uword)
1221 {
1222 unsigned int i, uwrd_lo, uwrd_hi;
1223 unsigned int ustore_addr, misc_control;
1224 unsigned int scs_flag = 0;
1225
1226 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
1227 scs_flag = misc_control & (0x1 << MMC_SHARE_CS_BITPOS);
1228 /*disable scs*/
1229 qat_hal_wr_ae_csr(handle,
1230 ae,
1231 AE_MISC_CONTROL,
1232 misc_control & 0xfffffffb);
1233 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1234 uaddr |= UA_ECS;
1235 for (i = 0; i < words_num; i++) {
1236 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1237 uaddr++;
1238 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
1239 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
1240 uword[i] = uwrd_hi;
1241 uword[i] = (uword[i] << 0x20) | uwrd_lo;
1242 }
1243 if (scs_flag)
1244 misc_control |= (0x1 << MMC_SHARE_CS_BITPOS);
1245 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
1246 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1247 }
1248
1249 void
1250 qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
1251 unsigned char ae,
1252 unsigned int uaddr,
1253 unsigned int words_num,
1254 unsigned int *data)
1255 {
1256 unsigned int i, ustore_addr;
1257
1258 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1259 uaddr |= UA_ECS;
1260 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1261 for (i = 0; i < words_num; i++) {
1262 unsigned int uwrd_lo, uwrd_hi, tmp;
1263
1264 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
1265 ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff);
1266 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
1267 uwrd_hi |= (bitcount32(data[i] & 0xffff) & 0x1) << 8;
1268 tmp = ((data[i] >> 0x10) & 0xffff);
1269 uwrd_hi |= (bitcount32(tmp) & 0x1) << 9;
1270 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1271 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1272 }
1273 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1274 }
1275
1276 #define MAX_EXEC_INST 100
1277 static int
1278 qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
1279 unsigned char ae,
1280 unsigned char ctx,
1281 uint64_t *micro_inst,
1282 unsigned int inst_num,
1283 int code_off,
1284 unsigned int max_cycle,
1285 unsigned int *endpc)
1286 {
1287 uint64_t savuwords[MAX_EXEC_INST];
1288 unsigned int ind_lm_addr0, ind_lm_addr1;
1289 unsigned int ind_lm_addr2, ind_lm_addr3;
1290 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
1291 unsigned int ind_lm_addr_byte2, ind_lm_addr_byte3;
1292 unsigned int ind_t_index, ind_t_index_byte;
1293 unsigned int ind_cnt_sig;
1294 unsigned int ind_sig, act_sig;
1295 unsigned int csr_val = 0, newcsr_val;
1296 unsigned int savctx, scs_flag;
1297 unsigned int savcc, wakeup_events, savpc;
1298 unsigned int ctxarb_ctl, ctx_enables;
1299
1300 if (inst_num > handle->hal_handle->max_ustore || !micro_inst) {
1301 pr_err("QAT: invalid instruction num %d\n", inst_num);
1302 return EINVAL;
1303 }
1304 /* save current context */
1305 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
1306 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
1307 qat_hal_rd_indr_csr(
1308 handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0);
1309 qat_hal_rd_indr_csr(
1310 handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1);
1311 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1312 qat_hal_rd_indr_csr(
1313 handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2);
1314 qat_hal_rd_indr_csr(
1315 handle, ae, ctx, LM_ADDR_3_INDIRECT, &ind_lm_addr3);
1316 qat_hal_rd_indr_csr(handle,
1317 ae,
1318 ctx,
1319 INDIRECT_LM_ADDR_2_BYTE_INDEX,
1320 &ind_lm_addr_byte2);
1321 qat_hal_rd_indr_csr(handle,
1322 ae,
1323 ctx,
1324 INDIRECT_LM_ADDR_3_BYTE_INDEX,
1325 &ind_lm_addr_byte3);
1326 qat_hal_rd_indr_csr(
1327 handle, ae, ctx, INDIRECT_T_INDEX, &ind_t_index);
1328 qat_hal_rd_indr_csr(handle,
1329 ae,
1330 ctx,
1331 INDIRECT_T_INDEX_BYTE_INDEX,
1332 &ind_t_index_byte);
1333 }
1334 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1335 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1336 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1337 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1338 if (inst_num <= MAX_EXEC_INST)
1339 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
1340 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
1341 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
1342 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
1343 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1344 ctx_enables &= IGNORE_W1C_MASK;
1345 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
1346 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1347 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
1348 qat_hal_rd_indr_csr(
1349 handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &ind_cnt_sig);
1350 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
1351 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
1352 /* execute micro codes */
1353 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1354 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
1355 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
1356 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
1357 if (code_off)
1358 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
1359 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
1360 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
1361 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
1362 qat_hal_enable_ctx(handle, ae, (1 << ctx));
1363 /* wait for micro codes to finish */
1364 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
1365 return EFAULT;
1366 if (endpc) {
1367 unsigned int ctx_status;
1368
1369 qat_hal_rd_indr_csr(
1370 handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status);
1371 *endpc = ctx_status & handle->hal_handle->upc_mask;
1372 }
1373 /* retore to saved context */
1374 qat_hal_disable_ctx(handle, ae, (1 << ctx));
1375 if (inst_num <= MAX_EXEC_INST)
1376 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
1377 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
1378 qat_hal_wr_indr_csr(handle,
1379 ae,
1380 (1 << ctx),
1381 CTX_STS_INDIRECT,
1382 handle->hal_handle->upc_mask & savpc);
1383 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1384 newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1385 CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1386 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1387 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
1388 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
1389 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
1390 qat_hal_wr_indr_csr(
1391 handle, ae, (1 << ctx), LM_ADDR_0_INDIRECT, ind_lm_addr0);
1392 qat_hal_wr_indr_csr(
1393 handle, ae, (1 << ctx), LM_ADDR_1_INDIRECT, ind_lm_addr1);
1394 qat_hal_wr_indr_csr(handle,
1395 ae,
1396 (1 << ctx),
1397 INDIRECT_LM_ADDR_0_BYTE_INDEX,
1398 ind_lm_addr_byte0);
1399 qat_hal_wr_indr_csr(handle,
1400 ae,
1401 (1 << ctx),
1402 INDIRECT_LM_ADDR_1_BYTE_INDEX,
1403 ind_lm_addr_byte1);
1404 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1405 qat_hal_wr_indr_csr(
1406 handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2);
1407 qat_hal_wr_indr_csr(
1408 handle, ae, (1 << ctx), LM_ADDR_3_INDIRECT, ind_lm_addr3);
1409 qat_hal_wr_indr_csr(handle,
1410 ae,
1411 (1 << ctx),
1412 INDIRECT_LM_ADDR_2_BYTE_INDEX,
1413 ind_lm_addr_byte2);
1414 qat_hal_wr_indr_csr(handle,
1415 ae,
1416 (1 << ctx),
1417 INDIRECT_LM_ADDR_3_BYTE_INDEX,
1418 ind_lm_addr_byte3);
1419 qat_hal_wr_indr_csr(
1420 handle, ae, (1 << ctx), INDIRECT_T_INDEX, ind_t_index);
1421 qat_hal_wr_indr_csr(handle,
1422 ae,
1423 (1 << ctx),
1424 INDIRECT_T_INDEX_BYTE_INDEX,
1425 ind_t_index_byte);
1426 }
1427 qat_hal_wr_indr_csr(
1428 handle, ae, (1 << ctx), FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
1429 qat_hal_wr_indr_csr(
1430 handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig);
1431 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
1432 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1433
1434 return 0;
1435 }
1436
1437 static int
1438 qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
1439 unsigned char ae,
1440 unsigned char ctx,
1441 enum icp_qat_uof_regtype reg_type,
1442 unsigned short reg_num,
1443 unsigned int *data)
1444 {
1445 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
1446 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
1447 unsigned short reg_addr;
1448 int status = 0;
1449 unsigned int scs_flag = 0;
1450 unsigned int csr_val = 0, newcsr_val = 0;
1451 u64 insts, savuword;
1452
1453 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1454 if (reg_addr == BAD_REGADDR) {
1455 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
1456 return EINVAL;
1457 }
1458 switch (reg_type) {
1459 case ICP_GPA_REL:
1460 insts = 0xA070000000ull | (reg_addr & 0x3ff);
1461 break;
1462 default:
1463 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
1464 break;
1465 }
1466 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1467 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1468 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1469 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1470 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1471 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
1472 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1473 ctx_enables &= IGNORE_W1C_MASK;
1474 if (ctx != (savctx & ACS_ACNO))
1475 qat_hal_wr_ae_csr(handle,
1476 ae,
1477 ACTIVE_CTX_STATUS,
1478 ctx & ACS_ACNO);
1479 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
1480 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1481 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1482 uaddr = UA_ECS;
1483 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1484 insts = qat_hal_set_uword_ecc(insts);
1485 uwrd_lo = (unsigned int)(insts & 0xffffffff);
1486 uwrd_hi = (unsigned int)(insts >> 0x20);
1487 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1488 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1489 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1490 /* delay for at least 8 cycles */
1491 qat_hal_wait_cycles(handle, ae, 0x8, 0);
1492 /*
1493 * read ALU output
1494 * the instruction should have been executed
1495 * prior to clearing the ECS in putUwords
1496 */
1497 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
1498 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1499 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
1500 if (ctx != (savctx & ACS_ACNO))
1501 qat_hal_wr_ae_csr(handle,
1502 ae,
1503 ACTIVE_CTX_STATUS,
1504 savctx & ACS_ACNO);
1505 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
1506 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1507 newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1508 CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1509 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1510 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1511
1512 return status;
1513 }
1514
1515 static int
1516 qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
1517 unsigned char ae,
1518 unsigned char ctx,
1519 enum icp_qat_uof_regtype reg_type,
1520 unsigned short reg_num,
1521 unsigned int data)
1522 {
1523 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
1524 uint64_t insts[] = { 0x0F440000000ull,
1525 0x0F040000000ull,
1526 0x0F0000C0300ull,
1527 0x0E000010000ull };
1528 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1529 const int imm_w1 = 0, imm_w0 = 1;
1530
1531 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1532 if (dest_addr == BAD_REGADDR) {
1533 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1534 return EINVAL;
1535 }
1536
1537 data16lo = 0xffff & data;
1538 data16hi = 0xffff & (data >> 0x10);
1539 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1540 (unsigned short)(0xff & data16hi));
1541 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1542 (unsigned short)(0xff & data16lo));
1543 switch (reg_type) {
1544 case ICP_GPA_REL:
1545 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1546 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1547 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1548 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1549 break;
1550 default:
1551 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1552 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1553
1554 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1555 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1556 break;
1557 }
1558
1559 return qat_hal_exec_micro_inst(
1560 handle, ae, ctx, insts, num_inst, code_off, num_inst * 0x5, NULL);
1561 }
1562
1563 int
1564 qat_hal_get_ins_num(void)
1565 {
1566 return ARRAY_SIZE(inst_4b);
1567 }
1568
1569 static int
1570 qat_hal_concat_micro_code(uint64_t *micro_inst,
1571 unsigned int inst_num,
1572 unsigned int size,
1573 unsigned int addr,
1574 unsigned int *value)
1575 {
1576 int i;
1577 unsigned int cur_value;
1578 const uint64_t *inst_arr;
1579 unsigned int fixup_offset;
1580 int usize = 0;
1581 unsigned int orig_num;
1582 unsigned int delta;
1583
1584 orig_num = inst_num;
1585 fixup_offset = inst_num;
1586 cur_value = value[0];
1587 inst_arr = inst_4b;
1588 usize = ARRAY_SIZE(inst_4b);
1589 for (i = 0; i < usize; i++)
1590 micro_inst[inst_num++] = inst_arr[i];
1591 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1592 fixup_offset++;
1593 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1594 fixup_offset++;
1595 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1596 fixup_offset++;
1597 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1598
1599 delta = inst_num - orig_num;
1600
1601 return (int)delta;
1602 }
1603
1604 static int
1605 qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1606 unsigned char ae,
1607 unsigned char ctx,
1608 int *pfirst_exec,
1609 uint64_t *micro_inst,
1610 unsigned int inst_num)
1611 {
1612 int stat = 0;
1613 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1614 unsigned int gprb0 = 0, gprb1 = 0;
1615
1616 if (*pfirst_exec) {
1617 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1618 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1619 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1620 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1621 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1622 *pfirst_exec = 0;
1623 }
1624 stat = qat_hal_exec_micro_inst(
1625 handle, ae, ctx, micro_inst, inst_num, 1, inst_num * 0x5, NULL);
1626 if (stat != 0)
1627 return EFAULT;
1628 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1629 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1630 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1631 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1632 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1633
1634 return 0;
1635 }
1636
1637 int
1638 qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1639 unsigned char ae,
1640 struct icp_qat_uof_batch_init *lm_init_header)
1641 {
1642 struct icp_qat_uof_batch_init *plm_init;
1643 uint64_t *micro_inst_arry;
1644 int micro_inst_num;
1645 int alloc_inst_size;
1646 int first_exec = 1;
1647 int stat = 0;
1648
1649 if (!lm_init_header)
1650 return 0;
1651 plm_init = lm_init_header->next;
1652 alloc_inst_size = lm_init_header->size;
1653 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1654 alloc_inst_size = handle->hal_handle->max_ustore;
1655 micro_inst_arry = malloc(alloc_inst_size * sizeof(uint64_t),
1656 M_QAT,
1657 M_WAITOK | M_ZERO);
1658 micro_inst_num = 0;
1659 while (plm_init) {
1660 unsigned int addr, *value, size;
1661
1662 ae = plm_init->ae;
1663 addr = plm_init->addr;
1664 value = plm_init->value;
1665 size = plm_init->size;
1666 micro_inst_num += qat_hal_concat_micro_code(
1667 micro_inst_arry, micro_inst_num, size, addr, value);
1668 plm_init = plm_init->next;
1669 }
1670 /* exec micro codes */
1671 if (micro_inst_arry && micro_inst_num > 0) {
1672 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1673 stat = qat_hal_exec_micro_init_lm(handle,
1674 ae,
1675 0,
1676 &first_exec,
1677 micro_inst_arry,
1678 micro_inst_num);
1679 }
1680 free(micro_inst_arry, M_QAT);
1681 return stat;
1682 }
1683
1684 static int
1685 qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1686 unsigned char ae,
1687 unsigned char ctx,
1688 enum icp_qat_uof_regtype reg_type,
1689 unsigned short reg_num,
1690 unsigned int val)
1691 {
1692 int status = 0;
1693 unsigned int reg_addr;
1694 unsigned int ctx_enables;
1695 unsigned short mask;
1696 unsigned short dr_offset = 0x10;
1697
1698 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1699 if (CE_INUSE_CONTEXTS & ctx_enables) {
1700 if (ctx & 0x1) {
1701 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1702 return EINVAL;
1703 }
1704 mask = 0x1f;
1705 dr_offset = 0x20;
1706 } else {
1707 mask = 0x0f;
1708 }
1709 if (reg_num & ~mask)
1710 return EINVAL;
1711 reg_addr = reg_num + (ctx << 0x5);
1712 switch (reg_type) {
1713 case ICP_SR_RD_REL:
1714 case ICP_SR_REL:
1715 SET_AE_XFER(handle, ae, reg_addr, val);
1716 break;
1717 case ICP_DR_RD_REL:
1718 case ICP_DR_REL:
1719 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1720 break;
1721 default:
1722 status = EINVAL;
1723 break;
1724 }
1725 return status;
1726 }
1727
1728 static int
1729 qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1730 unsigned char ae,
1731 unsigned char ctx,
1732 enum icp_qat_uof_regtype reg_type,
1733 unsigned short reg_num,
1734 unsigned int data)
1735 {
1736 unsigned int gprval, ctx_enables;
1737 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1738 data16low;
1739 unsigned short reg_mask;
1740 int status = 0;
1741 uint64_t micro_inst[] = { 0x0F440000000ull,
1742 0x0F040000000ull,
1743 0x0A000000000ull,
1744 0x0F0000C0300ull,
1745 0x0E000010000ull };
1746 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1747 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1748
1749 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1750 if (CE_INUSE_CONTEXTS & ctx_enables) {
1751 if (ctx & 0x1) {
1752 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1753 return EINVAL;
1754 }
1755 reg_mask = (unsigned short)~0x1f;
1756 } else {
1757 reg_mask = (unsigned short)~0xf;
1758 }
1759 if (reg_num & reg_mask)
1760 return EINVAL;
1761 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1762 if (xfr_addr == BAD_REGADDR) {
1763 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1764 return EINVAL;
1765 }
1766 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1767 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1768 data16low = 0xffff & data;
1769 data16hi = 0xffff & (data >> 0x10);
1770 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1771 (unsigned short)(0xff & data16hi));
1772 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1773 (unsigned short)(0xff & data16low));
1774 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1775 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1776 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1777 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1778 micro_inst[0x2] = micro_inst[0x2] | ((xfr_addr & 0x3ff) << 20) |
1779 ((gpr_addr & 0x3ff) << 10);
1780 status = qat_hal_exec_micro_inst(
1781 handle, ae, ctx, micro_inst, num_inst, code_off, dly, NULL);
1782 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1783 return status;
1784 }
1785
1786 static int
1787 qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1788 unsigned char ae,
1789 unsigned char ctx,
1790 unsigned short nn,
1791 unsigned int val)
1792 {
1793 unsigned int ctx_enables;
1794 int stat = 0;
1795
1796 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1797 ctx_enables &= IGNORE_W1C_MASK;
1798 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1799
1800 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1801 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1802 return stat;
1803 }
1804
1805 static int
1806 qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle *handle,
1807 unsigned char ae,
1808 unsigned short absreg_num,
1809 unsigned short *relreg,
1810 unsigned char *ctx)
1811 {
1812 unsigned int ctx_enables;
1813
1814 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1815 if (ctx_enables & CE_INUSE_CONTEXTS) {
1816 /* 4-ctx mode */
1817 *relreg = absreg_num & 0x1F;
1818 *ctx = (absreg_num >> 0x4) & 0x6;
1819 } else {
1820 /* 8-ctx mode */
1821 *relreg = absreg_num & 0x0F;
1822 *ctx = (absreg_num >> 0x4) & 0x7;
1823 }
1824 return 0;
1825 }
1826
1827 int
1828 qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1829 unsigned char ae,
1830 unsigned long ctx_mask,
1831 enum icp_qat_uof_regtype reg_type,
1832 unsigned short reg_num,
1833 unsigned int regdata)
1834 {
1835 int stat = 0;
1836 unsigned short reg;
1837 unsigned char ctx = 0;
1838 enum icp_qat_uof_regtype type;
1839
1840 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1841 return EINVAL;
1842
1843 do {
1844 if (ctx_mask == 0) {
1845 qat_hal_convert_abs_to_rel(
1846 handle, ae, reg_num, ®, &ctx);
1847 type = reg_type - 1;
1848 } else {
1849 reg = reg_num;
1850 type = reg_type;
1851 if (!test_bit(ctx, &ctx_mask))
1852 continue;
1853 }
1854 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1855 if (stat) {
1856 pr_err("QAT: write gpr fail\n");
1857 return EINVAL;
1858 }
1859 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1860
1861 return 0;
1862 }
1863
1864 int
1865 qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1866 unsigned char ae,
1867 unsigned long ctx_mask,
1868 enum icp_qat_uof_regtype reg_type,
1869 unsigned short reg_num,
1870 unsigned int regdata)
1871 {
1872 int stat = 0;
1873 unsigned short reg;
1874 unsigned char ctx = 0;
1875 enum icp_qat_uof_regtype type;
1876
1877 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1878 return EINVAL;
1879
1880 do {
1881 if (ctx_mask == 0) {
1882 qat_hal_convert_abs_to_rel(
1883 handle, ae, reg_num, ®, &ctx);
1884 type = reg_type - 3;
1885 } else {
1886 reg = reg_num;
1887 type = reg_type;
1888 if (!test_bit(ctx, &ctx_mask))
1889 continue;
1890 }
1891 stat = qat_hal_put_rel_wr_xfer(
1892 handle, ae, ctx, type, reg, regdata);
1893 if (stat) {
1894 pr_err("QAT: write wr xfer fail\n");
1895 return EINVAL;
1896 }
1897 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1898
1899 return 0;
1900 }
1901
1902 int
1903 qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1904 unsigned char ae,
1905 unsigned long ctx_mask,
1906 enum icp_qat_uof_regtype reg_type,
1907 unsigned short reg_num,
1908 unsigned int regdata)
1909 {
1910 int stat = 0;
1911 unsigned short reg;
1912 unsigned char ctx = 0;
1913 enum icp_qat_uof_regtype type;
1914
1915 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1916 return EINVAL;
1917
1918 do {
1919 if (ctx_mask == 0) {
1920 qat_hal_convert_abs_to_rel(
1921 handle, ae, reg_num, ®, &ctx);
1922 type = reg_type - 3;
1923 } else {
1924 reg = reg_num;
1925 type = reg_type;
1926 if (!test_bit(ctx, &ctx_mask))
1927 continue;
1928 }
1929 stat = qat_hal_put_rel_rd_xfer(
1930 handle, ae, ctx, type, reg, regdata);
1931 if (stat) {
1932 pr_err("QAT: write rd xfer fail\n");
1933 return EINVAL;
1934 }
1935 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1936
1937 return 0;
1938 }
1939
1940 int
1941 qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1942 unsigned char ae,
1943 unsigned long ctx_mask,
1944 unsigned short reg_num,
1945 unsigned int regdata)
1946 {
1947 int stat = 0;
1948 unsigned char ctx;
1949
1950 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1951 pr_err("QAT: No next neigh for CPM2X\n");
1952 return EINVAL;
1953 }
1954
1955 if (ctx_mask == 0)
1956 return EINVAL;
1957
1958 for_each_set_bit(ctx, &ctx_mask, ICP_QAT_UCLO_MAX_CTX)
1959 {
1960 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1961 if (stat) {
1962 pr_err("QAT: write neigh error\n");
1963 return EINVAL;
1964 }
1965 }
1966
1967 return 0;
1968 }
Cache object: ac9b11b308c60e6b5a5ae23b911dff2e
|