1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include <linux/atomic.h>
5 #include <linux/compiler.h>
6 #include <adf_accel_devices.h>
7 #include <adf_common_drv.h>
8 #include <adf_pf2vf_msg.h>
9 #include <adf_dev_err.h>
10 #include <adf_cfg.h>
11 #include <adf_fw_counters.h>
12 #include <adf_gen2_hw_data.h>
13 #include "adf_c4xxx_hw_data.h"
14 #include "adf_c4xxx_reset.h"
15 #include "adf_c4xxx_inline.h"
16 #include "adf_c4xxx_ras.h"
17 #include "adf_c4xxx_misc_error_stats.h"
18 #include "adf_c4xxx_pke_replay_stats.h"
19 #include "adf_heartbeat.h"
20 #include "icp_qat_fw_init_admin.h"
21 #include "icp_qat_hw.h"
22
23 /* accel unit information */
24 static struct adf_accel_unit adf_c4xxx_au_32_ae[] =
25 { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
26 { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
27 { 0x4, 0x30, 0xF000, 0xF000, 4, ADF_ACCEL_SERVICE_NULL },
28 { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
29 { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
30 { 0x20, 0xC00, 0xF0000000, 0xF0000000, 4, ADF_ACCEL_SERVICE_NULL } };
31
32 static struct adf_accel_unit adf_c4xxx_au_24_ae[] = {
33 { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
34 { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
35 { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
36 { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
37 };
38
39 static struct adf_accel_unit adf_c4xxx_au_12_ae[] = {
40 { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
41 { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
42 };
43
44 static struct adf_accel_unit adf_c4xxx_au_emulation[] =
45 { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
46 { 0x2, 0xC, 0xC0, 0xC0, 2, ADF_ACCEL_SERVICE_NULL } };
47
48 /* Accel engine threads for each of the following services
49 * <num_asym_thd> , <num_sym_thd> , <num_dc_thd>,
50 */
51
52 /* Thread mapping for SKU capable of symmetric cryptography */
53 static const struct adf_ae_info adf_c4xxx_32_ae_sym[] =
54 { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
55 { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
56 { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 },
57 { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
58 { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
59 { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
60 { 2, 6, 3 }, { 2, 6, 3 } };
61
62 static const struct adf_ae_info adf_c4xxx_24_ae_sym[] =
63 { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
64 { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
65 { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
66 { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
67 { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
68 { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
69 { 0, 0, 0 }, { 0, 0, 0 } };
70
71 static const struct adf_ae_info adf_c4xxx_12_ae_sym[] =
72 { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
73 { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
74 { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
75 { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
76 { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
77 { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
78 { 0, 0, 0 }, { 0, 0, 0 } };
79
80 /* Thread mapping for SKU capable of asymmetric and symmetric cryptography */
81 static const struct adf_ae_info adf_c4xxx_32_ae[] =
82 { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
83 { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
84 { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 },
85 { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
86 { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
87 { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
88 { 2, 5, 3 }, { 2, 5, 3 } };
89
90 static const struct adf_ae_info adf_c4xxx_24_ae[] =
91 { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
92 { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
93 { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
94 { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
95 { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
96 { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
97 { 0, 0, 0 }, { 0, 0, 0 } };
98
99 static const struct adf_ae_info adf_c4xxx_12_ae[] =
100 { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
101 { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
102 { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
103 { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
104 { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
105 { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
106 { 0, 0, 0 }, { 0, 0, 0 } };
107
108 static struct adf_hw_device_class c4xxx_class = {.name = ADF_C4XXX_DEVICE_NAME,
109 .type = DEV_C4XXX,
110 .instances = 0 };
111
112 struct icp_qat_fw_init_c4xxx_admin_hb_stats {
113 struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_THREADS_PER_AE];
114 };
115
116 struct adf_hb_count {
117 u16 ae_thread[ADF_NUM_THREADS_PER_AE];
118 };
119
120 static const int sku_cy_au[] = ADF_C4XXX_NUM_CY_AU;
121 static const int sku_dc_au[] = ADF_C4XXX_NUM_DC_AU;
122 static const int sku_inline_au[] = ADF_C4XXX_NUM_INLINE_AU;
123
124 /*
125 * C4xxx devices introduce new fuses and soft straps and
126 * are different from previous gen device implementations.
127 */
128
129 static u32
130 get_accel_mask(struct adf_accel_dev *accel_dev)
131 {
132 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
133 u32 fusectl0;
134 u32 softstrappull0;
135
136 fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
137 softstrappull0 =
138 pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
139
140 return (~(fusectl0 | softstrappull0)) & ADF_C4XXX_ACCELERATORS_MASK;
141 }
142
143 static u32
144 get_ae_mask(struct adf_accel_dev *accel_dev)
145 {
146 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
147 u32 fusectl1;
148 u32 softstrappull1;
149
150 fusectl1 = pci_read_config(pdev, ADF_C4XXX_FUSECTL1_OFFSET, 4);
151 softstrappull1 =
152 pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL1_OFFSET, 4);
153
154 /* Assume that AE and AU disable masks are consistent, so no
155 * checks against the AU mask are performed
156 */
157 return (~(fusectl1 | softstrappull1)) & ADF_C4XXX_ACCELENGINES_MASK;
158 }
159
160 static u32
161 get_num_accels(struct adf_hw_device_data *self)
162 {
163 return self ? hweight32(self->accel_mask) : 0;
164 }
165
166 static u32
167 get_num_aes(struct adf_hw_device_data *self)
168 {
169 return self ? hweight32(self->ae_mask) : 0;
170 }
171
172 static u32
173 get_misc_bar_id(struct adf_hw_device_data *self)
174 {
175 return ADF_C4XXX_PMISC_BAR;
176 }
177
178 static u32
179 get_etr_bar_id(struct adf_hw_device_data *self)
180 {
181 return ADF_C4XXX_ETR_BAR;
182 }
183
184 static u32
185 get_sram_bar_id(struct adf_hw_device_data *self)
186 {
187 return ADF_C4XXX_SRAM_BAR;
188 }
189
190 static inline void
191 c4xxx_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower)
192 {
193 *lower = lower_32_bits(value);
194 *upper = upper_32_bits(value);
195 }
196
197 /**
198 * c4xxx_set_ssm_wdtimer() - Initialize the slice hang watchdog timer.
199 *
200 * @param accel_dev Structure holding accelerator data.
201 * @return 0 on success, error code otherwise.
202 */
203 static int
204 c4xxx_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
205 {
206 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
207 struct adf_bar *misc_bar =
208 &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
209 struct resource *csr = misc_bar->virt_addr;
210 unsigned long accel_mask = hw_device->accel_mask;
211 u32 accel = 0;
212 u64 timer_val = ADF_C4XXX_SSM_WDT_64BIT_DEFAULT_VALUE;
213 u64 timer_val_pke = ADF_C4XXX_SSM_WDT_PKE_64BIT_DEFAULT_VALUE;
214 u32 ssm_wdt_low = 0, ssm_wdt_high = 0;
215 u32 ssm_wdt_pke_low = 0, ssm_wdt_pke_high = 0;
216
217 /* Convert 64bit Slice Hang watchdog value into 32bit values for
218 * mmio write to 32bit CSRs.
219 */
220 c4xxx_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
221 c4xxx_unpack_ssm_wdtimer(timer_val_pke,
222 &ssm_wdt_pke_high,
223 &ssm_wdt_pke_low);
224
225 /* Configures Slice Hang watchdogs */
226 for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
227 {
228 ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTL_OFFSET(accel), ssm_wdt_low);
229 ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTH_OFFSET(accel), ssm_wdt_high);
230 ADF_CSR_WR(csr,
231 ADF_C4XXX_SSMWDTPKEL_OFFSET(accel),
232 ssm_wdt_pke_low);
233 ADF_CSR_WR(csr,
234 ADF_C4XXX_SSMWDTPKEH_OFFSET(accel),
235 ssm_wdt_pke_high);
236 }
237
238 return 0;
239 }
240
241 /**
242 * c4xxx_check_slice_hang() - Check slice hang status
243 *
244 * Return: true if a slice hange interrupt is serviced..
245 */
246 static bool
247 c4xxx_check_slice_hang(struct adf_accel_dev *accel_dev)
248 {
249 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
250 struct adf_bar *misc_bar =
251 &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
252 struct resource *csr = misc_bar->virt_addr;
253 u32 slice_hang_offset;
254 u32 ia_slice_hang_offset;
255 u32 fw_irq_source;
256 u32 ia_irq_source;
257 u32 accel_num = 0;
258 bool handled = false;
259 u32 errsou10 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU10);
260 unsigned long accel_mask;
261
262 accel_mask = hw_device->accel_mask;
263
264 for_each_set_bit(accel_num, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
265 {
266 if (!(errsou10 & ADF_C4XXX_IRQ_SRC_MASK(accel_num)))
267 continue;
268
269 fw_irq_source = ADF_CSR_RD(csr, ADF_INTSTATSSM(accel_num));
270 ia_irq_source =
271 ADF_CSR_RD(csr, ADF_C4XXX_IAINTSTATSSM(accel_num));
272 ia_slice_hang_offset =
273 ADF_C4XXX_IASLICEHANGSTATUS_OFFSET(accel_num);
274
275 /* FW did not clear SliceHang error, IA logs and clears
276 * the error
277 */
278 if ((fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
279 (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
280 slice_hang_offset =
281 ADF_C4XXX_SLICEHANGSTATUS_OFFSET(accel_num);
282
283 /* Bring hung slice out of reset */
284 adf_csr_fetch_and_and(csr, slice_hang_offset, ~0);
285
286 /* Log SliceHang error and clear an interrupt */
287 handled = adf_handle_slice_hang(accel_dev,
288 accel_num,
289 csr,
290 ia_slice_hang_offset);
291 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
292 }
293 /* FW cleared SliceHang, IA only logs an error */
294 else if (!(fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
295 (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
296 /* Log SliceHang error and clear an interrupt */
297 handled = adf_handle_slice_hang(accel_dev,
298 accel_num,
299 csr,
300 ia_slice_hang_offset);
301
302 atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
303 }
304
305 /* Clear the associated IA interrupt */
306 adf_csr_fetch_and_and(csr,
307 ADF_C4XXX_IAINTSTATSSM(accel_num),
308 ~BIT(13));
309 }
310
311 return handled;
312 }
313
314 static bool
315 get_eth_doorbell_msg(struct adf_accel_dev *accel_dev)
316 {
317 struct resource *csr =
318 (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
319 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
320 u32 errsou11 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU11);
321 u32 doorbell_int = ADF_CSR_RD(csr, ADF_C4XXX_ETH_DOORBELL_INT);
322 u32 eth_doorbell_reg[ADF_C4XXX_NUM_ETH_DOORBELL_REGS];
323 bool handled = false;
324 u32 data_reg;
325 u8 i;
326
327 /* Reset cannot be acknowledged until the reset */
328 hw_device->reset_ack = false;
329
330 /* Check if doorbell interrupt occurred. */
331 if (errsou11 & ADF_C4XXX_DOORBELL_INT_SRC) {
332 /* Decode doorbell messages from ethernet device */
333 for (i = 0; i < ADF_C4XXX_NUM_ETH_DOORBELL_REGS; i++) {
334 eth_doorbell_reg[i] = 0;
335 if (doorbell_int & BIT(i)) {
336 data_reg = ADF_C4XXX_ETH_DOORBELL(i);
337 eth_doorbell_reg[i] = ADF_CSR_RD(csr, data_reg);
338 device_printf(
339 GET_DEV(accel_dev),
340 "Receives Doorbell message(0x%08x)\n",
341 eth_doorbell_reg[i]);
342 }
343 }
344 /* Only need to check PF0 */
345 if (eth_doorbell_reg[0] == ADF_C4XXX_IOSFSB_RESET_ACK) {
346 device_printf(GET_DEV(accel_dev),
347 "Receives pending reset ACK\n");
348 hw_device->reset_ack = true;
349 }
350 /* Clear the interrupt source */
351 ADF_CSR_WR(csr,
352 ADF_C4XXX_ETH_DOORBELL_INT,
353 ADF_C4XXX_ETH_DOORBELL_MASK);
354 handled = true;
355 }
356
357 return handled;
358 }
359
360 static enum dev_sku_info
361 get_sku(struct adf_hw_device_data *self)
362 {
363 int aes = get_num_aes(self);
364 u32 capabilities = self->accel_capabilities_mask;
365 bool sym_only_sku = false;
366
367 /* Check if SKU is capable only of symmetric cryptography
368 * via device capabilities.
369 */
370 if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
371 !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
372 !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
373 sym_only_sku = true;
374
375 switch (aes) {
376 case ADF_C4XXX_HIGH_SKU_AES:
377 if (sym_only_sku)
378 return DEV_SKU_1_CY;
379 return DEV_SKU_1;
380 case ADF_C4XXX_MED_SKU_AES:
381 if (sym_only_sku)
382 return DEV_SKU_2_CY;
383 return DEV_SKU_2;
384 case ADF_C4XXX_LOW_SKU_AES:
385 if (sym_only_sku)
386 return DEV_SKU_3_CY;
387 return DEV_SKU_3;
388 };
389
390 return DEV_SKU_UNKNOWN;
391 }
392
393 static bool
394 c4xxx_check_prod_sku(struct adf_accel_dev *accel_dev)
395 {
396 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
397 u32 fusectl0 = 0;
398
399 fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
400
401 if (fusectl0 & ADF_C4XXX_FUSE_PROD_SKU_MASK)
402 return true;
403 else
404 return false;
405 }
406
407 static bool
408 adf_check_sym_only_sku_c4xxx(struct adf_accel_dev *accel_dev)
409 {
410 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
411 u32 legfuse = 0;
412
413 legfuse = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
414
415 if (legfuse & ADF_C4XXX_LEGFUSE_BASE_SKU_MASK)
416 return true;
417 else
418 return false;
419 }
420
421 static void
422 adf_enable_slice_hang_detection(struct adf_accel_dev *accel_dev)
423 {
424 struct resource *csr;
425 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
426 u32 accel = 0;
427 unsigned long accel_mask;
428
429 csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
430 accel_mask = hw_device->accel_mask;
431
432 for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
433 {
434 /* Unmasks Slice Hang interrupts so they can be seen by IA. */
435 ADF_CSR_WR(csr,
436 ADF_C4XXX_SHINTMASKSSM_OFFSET(accel),
437 ADF_C4XXX_SHINTMASKSSM_VAL);
438 }
439 }
440
441 static void
442 adf_enable_ras(struct adf_accel_dev *accel_dev)
443 {
444 struct resource *csr;
445 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
446 u32 accel = 0;
447 unsigned long accel_mask;
448
449 csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
450 accel_mask = hw_device->accel_mask;
451
452 for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
453 {
454 ADF_CSR_WR(csr,
455 ADF_C4XXX_GET_SSMFEATREN_OFFSET(accel),
456 ADF_C4XXX_SSMFEATREN_VAL);
457 }
458 }
459
460 static u32
461 get_clock_speed(struct adf_hw_device_data *self)
462 {
463 /* c4xxx CPP clock is equal to high-speed clock */
464 return self->clock_frequency;
465 }
466
467 static void
468 adf_enable_error_interrupts(struct adf_accel_dev *accel_dev)
469 {
470 struct resource *csr, *aram_csr;
471 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
472 u32 accel = 0;
473 unsigned long accel_mask;
474
475 csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
476 aram_csr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
477 accel_mask = hw_device->accel_mask;
478
479 for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
480 {
481 /* Enable shared memory, MMP, CPP, PPERR interrupts
482 * for a given accel
483 */
484 ADF_CSR_WR(csr, ADF_C4XXX_GET_INTMASKSSM_OFFSET(accel), 0);
485
486 /* Enable SPP parity error interrupts for a given accel */
487 ADF_CSR_WR(csr, ADF_C4XXX_GET_SPPPARERRMSK_OFFSET(accel), 0);
488
489 /* Enable ssm soft parity errors on given accel */
490 ADF_CSR_WR(csr,
491 ADF_C4XXX_GET_SSMSOFTERRORPARITY_MASK_OFFSET(accel),
492 ADF_C4XXX_SSMSOFTERRORPARITY_MASK_VAL);
493 }
494
495 /* Enable interrupts for VFtoPF0_127. */
496 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK4, ADF_C4XXX_VF2PF0_31);
497 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK5, ADF_C4XXX_VF2PF32_63);
498 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK6, ADF_C4XXX_VF2PF64_95);
499 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK7, ADF_C4XXX_VF2PF96_127);
500
501 /* Enable interrupts signaling ECC correctable errors for all AEs */
502 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK8, ADF_C4XXX_ERRMSK8_COERR);
503 ADF_CSR_WR(csr,
504 ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE,
505 ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE_MASK);
506
507 /* Enable error interrupts reported by ERRSOU9 */
508 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK9, ADF_C4XXX_ERRMSK9_IRQ_MASK);
509
510 /* Enable uncorrectable errors on all the AE */
511 ADF_CSR_WR(csr,
512 ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE,
513 ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE_MASK);
514
515 /* Enable CPP Agent to report command parity errors */
516 ADF_CSR_WR(csr,
517 ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE,
518 ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE_MASK);
519
520 /* Enable reporting of RI memory parity errors */
521 ADF_CSR_WR(csr,
522 ADF_C4XXX_RI_MEM_PAR_ERR_EN0,
523 ADF_C4XXX_RI_MEM_PAR_ERR_EN0_MASK);
524
525 /* Enable reporting of TI memory parity errors */
526 ADF_CSR_WR(csr,
527 ADF_C4XXX_TI_MEM_PAR_ERR_EN0,
528 ADF_C4XXX_TI_MEM_PAR_ERR_EN0_MASK);
529 ADF_CSR_WR(csr,
530 ADF_C4XXX_TI_MEM_PAR_ERR_EN1,
531 ADF_C4XXX_TI_MEM_PAR_ERR_EN1_MASK);
532
533 /* Enable SSM errors */
534 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK10, ADF_C4XXX_ERRMSK10_SSM_ERR);
535
536 /* Enable miscellaneous errors (ethernet doorbell aram, ici, ice) */
537 ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR);
538
539 /* RI CPP bus interface error detection and reporting. */
540 ADF_CSR_WR(csr, ADF_C4XXX_RICPPINTCTL, ADF_C4XXX_RICPP_EN);
541
542 /* TI CPP bus interface error detection and reporting. */
543 ADF_CSR_WR(csr, ADF_C4XXX_TICPPINTCTL, ADF_C4XXX_TICPP_EN);
544
545 /* Enable CFC Error interrupts and logging. */
546 ADF_CSR_WR(csr, ADF_C4XXX_CPP_CFC_ERR_CTRL, ADF_C4XXX_CPP_CFC_UE);
547
548 /* Enable ARAM correctable error detection. */
549 ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMCERR, ADF_C4XXX_ARAM_CERR);
550
551 /* Enable ARAM uncorrectable error detection. */
552 ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMUERR, ADF_C4XXX_ARAM_UERR);
553
554 /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
555 ADF_CSR_WR(aram_csr, ADF_C4XXX_CPPMEMTGTERR, ADF_C4XXX_TGT_UERR);
556 }
557
558 static void
559 adf_enable_mmp_error_correction(struct resource *csr,
560 struct adf_hw_device_data *hw_data)
561 {
562 unsigned int accel = 0, mmp;
563 unsigned long uerrssmmmp_mask, cerrssmmmp_mask;
564 enum operation op;
565 unsigned long accel_mask;
566
567 /* Prepare values and operation that will be performed on
568 * UERRSSMMMP and CERRSSMMMP registers on each MMP
569 */
570 if (hw_data->accel_capabilities_mask &
571 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) {
572 uerrssmmmp_mask = ADF_C4XXX_UERRSSMMMP_EN;
573 cerrssmmmp_mask = ADF_C4XXX_CERRSSMMMP_EN;
574 op = OR;
575 } else {
576 uerrssmmmp_mask = ~ADF_C4XXX_UERRSSMMMP_EN;
577 cerrssmmmp_mask = ~ADF_C4XXX_CERRSSMMMP_EN;
578 op = AND;
579 }
580
581 accel_mask = hw_data->accel_mask;
582
583 /* Enable MMP Logging */
584 for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
585 {
586 /* Set power-up */
587 adf_csr_fetch_and_and(csr,
588 ADF_C4XXX_SLICEPWRDOWN(accel),
589 ~ADF_C4XXX_MMP_PWR_UP_MSK);
590
591 for (mmp = 0; mmp < ADF_C4XXX_MAX_MMP; ++mmp) {
592 adf_csr_fetch_and_update(op,
593 csr,
594 ADF_C4XXX_UERRSSMMMP(accel,
595 mmp),
596 uerrssmmmp_mask);
597 adf_csr_fetch_and_update(op,
598 csr,
599 ADF_C4XXX_CERRSSMMMP(accel,
600 mmp),
601 cerrssmmmp_mask);
602 }
603
604 /* Restore power-down value */
605 adf_csr_fetch_and_or(csr,
606 ADF_C4XXX_SLICEPWRDOWN(accel),
607 ADF_C4XXX_MMP_PWR_UP_MSK);
608 }
609 }
610
611 static u32
612 get_pf2vf_offset(u32 i)
613 {
614 return ADF_C4XXX_PF2VF_OFFSET(i);
615 }
616
617 static u32
618 get_vintmsk_offset(u32 i)
619 {
620 return ADF_C4XXX_VINTMSK_OFFSET(i);
621 }
622
623 static void
624 get_arb_info(struct arb_info *arb_csrs_info)
625 {
626 arb_csrs_info->arbiter_offset = ADF_C4XXX_ARB_OFFSET;
627 arb_csrs_info->wrk_cfg_offset = ADF_C4XXX_ARB_WQCFG_OFFSET;
628 }
629
630 static void
631 get_admin_info(struct admin_info *admin_csrs_info)
632 {
633 admin_csrs_info->mailbox_offset = ADF_C4XXX_MAILBOX_BASE_OFFSET;
634 admin_csrs_info->admin_msg_ur = ADF_C4XXX_ADMINMSGUR_OFFSET;
635 admin_csrs_info->admin_msg_lr = ADF_C4XXX_ADMINMSGLR_OFFSET;
636 }
637
638 static void
639 get_errsou_offset(u32 *errsou3, u32 *errsou5)
640 {
641 *errsou3 = ADF_C4XXX_ERRSOU3;
642 *errsou5 = ADF_C4XXX_ERRSOU5;
643 }
644
645 static void
646 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
647 {
648 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
649 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR];
650 struct resource *csr = misc_bar->virt_addr;
651 unsigned int val, i = 0;
652 unsigned long ae_mask;
653 unsigned long accel_mask;
654
655 ae_mask = hw_device->ae_mask;
656
657 /* Enable Accel Engine error detection & correction */
658 for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES)
659 {
660 val = ADF_CSR_RD(csr, ADF_C4XXX_AE_CTX_ENABLES(i));
661 val |= ADF_C4XXX_ENABLE_AE_ECC_ERR;
662 ADF_CSR_WR(csr, ADF_C4XXX_AE_CTX_ENABLES(i), val);
663 val = ADF_CSR_RD(csr, ADF_C4XXX_AE_MISC_CONTROL(i));
664 val |= ADF_C4XXX_ENABLE_AE_ECC_PARITY_CORR;
665 ADF_CSR_WR(csr, ADF_C4XXX_AE_MISC_CONTROL(i), val);
666 }
667
668 accel_mask = hw_device->accel_mask;
669
670 /* Enable shared memory error detection & correction */
671 for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
672 {
673 val = ADF_CSR_RD(csr, ADF_C4XXX_UERRSSMSH(i));
674 val |= ADF_C4XXX_ERRSSMSH_EN;
675 ADF_CSR_WR(csr, ADF_C4XXX_UERRSSMSH(i), val);
676 val = ADF_CSR_RD(csr, ADF_C4XXX_CERRSSMSH(i));
677 val |= ADF_C4XXX_ERRSSMSH_EN;
678 ADF_CSR_WR(csr, ADF_C4XXX_CERRSSMSH(i), val);
679 }
680
681 adf_enable_ras(accel_dev);
682 adf_enable_mmp_error_correction(csr, hw_device);
683 adf_enable_slice_hang_detection(accel_dev);
684 adf_enable_error_interrupts(accel_dev);
685 }
686
687 static void
688 adf_enable_ints(struct adf_accel_dev *accel_dev)
689 {
690 struct resource *addr;
691
692 addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
693
694 /* Enable bundle interrupts */
695 ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF0_MASK_OFFSET, ADF_C4XXX_SMIA0_MASK);
696 ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF1_MASK_OFFSET, ADF_C4XXX_SMIA1_MASK);
697 ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF2_MASK_OFFSET, ADF_C4XXX_SMIA2_MASK);
698 ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF3_MASK_OFFSET, ADF_C4XXX_SMIA3_MASK);
699 /*Enable misc interrupts*/
700 ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF4_MASK_OFFSET, ADF_C4XXX_SMIA4_MASK);
701 }
702
703 static u32
704 get_ae_clock(struct adf_hw_device_data *self)
705 {
706 /* Clock update interval is <16> ticks for c4xxx. */
707 return self->clock_frequency / 16;
708 }
709
710 static int
711 measure_clock(struct adf_accel_dev *accel_dev)
712 {
713 u32 frequency;
714 int ret = 0;
715
716 ret = adf_dev_measure_clock(accel_dev,
717 &frequency,
718 ADF_C4XXX_MIN_AE_FREQ,
719 ADF_C4XXX_MAX_AE_FREQ);
720 if (ret)
721 return ret;
722
723 accel_dev->hw_device->clock_frequency = frequency;
724 return 0;
725 }
726
727 static int
728 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
729 {
730 if (accel_dev->au_info->num_dc_au > 0) {
731 *storage_enabled = 1;
732 GET_HW_DATA(accel_dev)->extended_dc_capabilities =
733 ICP_ACCEL_CAPABILITIES_ADVANCED_COMPRESSION;
734 }
735 return 0;
736 }
737
738 static u32
739 c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
740 {
741 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
742 u32 legfuses;
743 u32 softstrappull0, softstrappull2;
744 u32 fusectl0, fusectl2;
745 u32 capabilities;
746
747 /* Read accelerator capabilities mask */
748 legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
749 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
750 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
751 ICP_ACCEL_CAPABILITIES_CIPHER |
752 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
753 ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_ZUC |
754 ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
755 ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
756 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
757 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
758 ICP_ACCEL_CAPABILITIES_ECEDMONT;
759
760 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
761 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
762 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
763 }
764 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
765 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
766 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
767 capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
768 ICP_ACCEL_CAPABILITIES_ECEDMONT);
769 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) {
770 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
771 capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
772 }
773 if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
774 capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
775 if (legfuses & ICP_ACCEL_MASK_SM3_SLICE)
776 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
777 if (legfuses & ICP_ACCEL_MASK_SM4_SLICE)
778 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
779
780 /* Read fusectl0 & softstrappull0 registers to ensure inline
781 * acceleration is not disabled
782 */
783 softstrappull0 =
784 pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
785 fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
786 if ((fusectl0 | softstrappull0) & ADF_C4XXX_FUSE_DISABLE_INLINE_MASK)
787 capabilities &= ~ICP_ACCEL_CAPABILITIES_INLINE;
788
789 /* Read fusectl2 & softstrappull2 registers to check out if
790 * PKE/DC are enabled/disabled
791 */
792 softstrappull2 =
793 pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL2_OFFSET, 4);
794 fusectl2 = pci_read_config(pdev, ADF_C4XXX_FUSECTL2_OFFSET, 4);
795 /* Disable PKE/DC cap if there are no PKE/DC-enabled AUs. */
796 if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_PKE_MASK))
797 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
798 if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_COMP_MASK))
799 capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION |
800 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY);
801
802 return capabilities;
803 }
804
805 static int
806 c4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
807 {
808 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
809 unsigned long val;
810 char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
811 int sku;
812 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
813
814 sku = get_sku(hw_data);
815
816 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
817 goto err;
818
819 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
820
821 /* Base station SKU supports symmetric cryptography only. */
822 if (adf_check_sym_only_sku_c4xxx(accel_dev))
823 snprintf(val_str, sizeof(val_str), ADF_SERVICE_SYM);
824 else
825 snprintf(val_str, sizeof(val_str), ADF_SERVICE_CY);
826
827 val = sku_dc_au[sku];
828 if (val) {
829 strncat(val_str,
830 ADF_SERVICES_SEPARATOR ADF_SERVICE_DC,
831 ADF_CFG_MAX_VAL_LEN_IN_BYTES -
832 strnlen(val_str, sizeof(val_str)) -
833 ADF_CFG_NULL_TERM_SIZE);
834 }
835
836 if (adf_cfg_add_key_value_param(
837 accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
838 goto err;
839
840 snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
841 val = sku_cy_au[sku];
842 if (adf_cfg_add_key_value_param(
843 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
844 goto err;
845
846 snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
847 val = sku_dc_au[sku];
848 if (adf_cfg_add_key_value_param(
849 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
850 goto err;
851
852 snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
853 val = sku_inline_au[sku];
854 if (adf_cfg_add_key_value_param(
855 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
856 goto err;
857
858 return 0;
859 err:
860 device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
861 return EINVAL;
862 }
863
864 static void
865 update_hw_capability(struct adf_accel_dev *accel_dev)
866 {
867 struct adf_accel_unit_info *au_info = accel_dev->au_info;
868 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
869 u32 disabled_caps = 0;
870
871 if (!au_info->asym_ae_msk)
872 disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
873 ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
874
875 if (!au_info->sym_ae_msk)
876 disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
877 ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC |
878 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
879 ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
880 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
881 ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
882
883 if (!au_info->dc_ae_msk) {
884 disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
885 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
886 hw_device->extended_dc_capabilities = 0;
887 }
888
889 if (!au_info->inline_ingress_msk && !au_info->inline_egress_msk)
890 disabled_caps |= ICP_ACCEL_CAPABILITIES_INLINE;
891
892 hw_device->accel_capabilities_mask =
893 c4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
894 }
895
896 static void
897 c4xxx_set_sadb_size(struct adf_accel_dev *accel_dev)
898 {
899 u32 sadb_reg_value = 0;
900 struct resource *aram_csr_base;
901
902 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
903 if (accel_dev->au_info->num_inline_au) {
904 /* REG_SA_DB_CTRL register initialisation */
905 sadb_reg_value = ADF_C4XXX_SADB_REG_VALUE(accel_dev);
906 ADF_CSR_WR(aram_csr_base,
907 ADF_C4XXX_REG_SA_DB_CTRL,
908 sadb_reg_value);
909 } else {
910 /* Zero the SADB size when inline is disabled. */
911 adf_csr_fetch_and_and(aram_csr_base,
912 ADF_C4XXX_REG_SA_DB_CTRL,
913 ADF_C4XXX_SADB_SIZE_BIT);
914 }
915 /* REG_SA_CTRL_LOCK register initialisation. We set the lock
916 * bit in order to prevent the REG_SA_DB_CTRL to be
917 * overwritten
918 */
919 ADF_CSR_WR(aram_csr_base,
920 ADF_C4XXX_REG_SA_CTRL_LOCK,
921 ADF_C4XXX_DEFAULT_SA_CTRL_LOCKOUT);
922 }
923
924 static void
925 c4xxx_init_error_notification_configuration(struct adf_accel_dev *accel_dev,
926 u32 offset)
927 {
928 struct resource *aram_csr_base;
929
930 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
931
932 /* configure error notification configuration registers */
933 /* Set CD Parity error */
934 ADF_CSR_WR(aram_csr_base,
935 ADF_C4XXX_IC_CD_RF_PARITY_ERR_0 + offset,
936 ADF_C4XXX_CD_RF_PARITY_ERR_0_VAL);
937 ADF_CSR_WR(aram_csr_base,
938 ADF_C4XXX_IC_CD_RF_PARITY_ERR_1 + offset,
939 ADF_C4XXX_CD_RF_PARITY_ERR_1_VAL);
940 ADF_CSR_WR(aram_csr_base,
941 ADF_C4XXX_IC_CD_RF_PARITY_ERR_2 + offset,
942 ADF_C4XXX_CD_RF_PARITY_ERR_2_VAL);
943 ADF_CSR_WR(aram_csr_base,
944 ADF_C4XXX_IC_CD_RF_PARITY_ERR_3 + offset,
945 ADF_C4XXX_CD_RF_PARITY_ERR_3_VAL);
946 /* Set CD RAM ECC Correctable Error */
947 ADF_CSR_WR(aram_csr_base,
948 ADF_C4XXX_IC_CD_CERR + offset,
949 ADF_C4XXX_CD_CERR_VAL);
950 /* Set CD RAM ECC UnCorrectable Error */
951 ADF_CSR_WR(aram_csr_base,
952 ADF_C4XXX_IC_CD_UERR + offset,
953 ADF_C4XXX_CD_UERR_VAL);
954 /* Set Inline (excl cmd_dis) Parity Error */
955 ADF_CSR_WR(aram_csr_base,
956 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0 + offset,
957 ADF_C4XXX_INLN_RF_PARITY_ERR_0_VAL);
958 ADF_CSR_WR(aram_csr_base,
959 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1 + offset,
960 ADF_C4XXX_INLN_RF_PARITY_ERR_1_VAL);
961 ADF_CSR_WR(aram_csr_base,
962 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2 + offset,
963 ADF_C4XXX_INLN_RF_PARITY_ERR_2_VAL);
964 ADF_CSR_WR(aram_csr_base,
965 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_3 + offset,
966 ADF_C4XXX_INLN_RF_PARITY_ERR_3_VAL);
967 ADF_CSR_WR(aram_csr_base,
968 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_4 + offset,
969 ADF_C4XXX_INLN_RF_PARITY_ERR_4_VAL);
970 ADF_CSR_WR(aram_csr_base,
971 ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5 + offset,
972 ADF_C4XXX_INLN_RF_PARITY_ERR_5_VAL);
973 /* Set Parser RAM ECC Correctable Error */
974 ADF_CSR_WR(aram_csr_base,
975 ADF_C4XXX_IC_PARSER_CERR + offset,
976 ADF_C4XXX_PARSER_CERR_VAL);
977 /* Set Parser RAM ECC UnCorrectable Error */
978 ADF_CSR_WR(aram_csr_base,
979 ADF_C4XXX_IC_PARSER_UERR + offset,
980 ADF_C4XXX_PARSER_UERR_VAL);
981 /* Set CTPB RAM ECC Correctable Error */
982 ADF_CSR_WR(aram_csr_base,
983 ADF_C4XXX_IC_CTPB_CERR + offset,
984 ADF_C4XXX_CTPB_CERR_VAL);
985 /* Set CTPB RAM ECC UnCorrectable Error */
986 ADF_CSR_WR(aram_csr_base,
987 ADF_C4XXX_IC_CTPB_UERR + offset,
988 ADF_C4XXX_CTPB_UERR_VAL);
989 /* Set CPP Interface Status */
990 ADF_CSR_WR(aram_csr_base,
991 ADF_C4XXX_IC_CPPM_ERR_STAT + offset,
992 ADF_C4XXX_CPPM_ERR_STAT_VAL);
993 /* Set CGST_MGMT_INT */
994 ADF_CSR_WR(aram_csr_base,
995 ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset,
996 ADF_C4XXX_CONGESTION_MGMT_INI_VAL);
997 /* CPP Interface Status */
998 ADF_CSR_WR(aram_csr_base,
999 ADF_C4XXX_IC_CPPT_ERR_STAT + offset,
1000 ADF_C4XXX_CPPT_ERR_STAT_VAL);
1001 /* MAC Interrupt Mask */
1002 ADF_CSR_WR64(aram_csr_base,
1003 ADF_C4XXX_IC_MAC_IM + offset,
1004 ADF_C4XXX_MAC_IM_VAL);
1005 }
1006
1007 static void
1008 c4xxx_enable_parse_extraction(struct adf_accel_dev *accel_dev)
1009 {
1010 struct resource *aram_csr_base;
1011
1012 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1013
1014 /* Enable Inline Parse Extraction CRSs */
1015
1016 /* Set IC_PARSE_CTRL register */
1017 ADF_CSR_WR(aram_csr_base,
1018 ADF_C4XXX_IC_PARSE_CTRL_OFFSET,
1019 ADF_C4XXX_IC_PARSE_CTRL_OFFSET_DEFAULT_VALUE);
1020
1021 /* Set IC_PARSE_FIXED_DATA(0) */
1022 ADF_CSR_WR(aram_csr_base,
1023 ADF_C4XXX_IC_PARSE_FIXED_DATA(0),
1024 ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_DATA_0);
1025
1026 /* Set IC_PARSE_FIXED_LENGTH */
1027 ADF_CSR_WR(aram_csr_base,
1028 ADF_C4XXX_IC_PARSE_FIXED_LENGTH,
1029 ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_LEN);
1030
1031 /* Configure ESP protocol from an IPv4 header */
1032 ADF_CSR_WR(aram_csr_base,
1033 ADF_C4XXX_IC_PARSE_IPV4_OFFSET_0,
1034 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_0_VALUE);
1035 ADF_CSR_WR(aram_csr_base,
1036 ADF_C4XXX_IC_PARSE_IPV4_LENGTH_0,
1037 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_0_VALUE);
1038 /* Configure protocol extraction field from an IPv4 header */
1039 ADF_CSR_WR(aram_csr_base,
1040 ADF_C4XXX_IC_PARSE_IPV4_OFFSET_1,
1041 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_1_VALUE);
1042 ADF_CSR_WR(aram_csr_base,
1043 ADF_C4XXX_IC_PARSE_IPV4_LENGTH_1,
1044 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_1_VALUE);
1045 /* Configure SPI extraction field from an IPv4 header */
1046 ADF_CSR_WR(aram_csr_base,
1047 ADF_C4XXX_IC_PARSE_IPV4_OFFSET_2,
1048 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_2_VALUE);
1049 ADF_CSR_WR(aram_csr_base,
1050 ADF_C4XXX_IC_PARSE_IPV4_LENGTH_2,
1051 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_2_VALUE);
1052 /* Configure destination field IP address from an IPv4 header */
1053 ADF_CSR_WR(aram_csr_base,
1054 ADF_C4XXX_IC_PARSE_IPV4_OFFSET_3,
1055 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_3_VALUE);
1056 ADF_CSR_WR(aram_csr_base,
1057 ADF_C4XXX_IC_PARSE_IPV4_LENGTH_3,
1058 ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_3_VALUE);
1059
1060 /* Configure function number extraction field from an IPv6 header */
1061 ADF_CSR_WR(aram_csr_base,
1062 ADF_C4XXX_IC_PARSE_IPV6_OFFSET_0,
1063 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_0_VALUE);
1064 ADF_CSR_WR(aram_csr_base,
1065 ADF_C4XXX_IC_PARSE_IPV6_LENGTH_0,
1066 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_0_VALUE);
1067 /* Configure protocol extraction field from an IPv6 header */
1068 ADF_CSR_WR(aram_csr_base,
1069 ADF_C4XXX_IC_PARSE_IPV6_OFFSET_1,
1070 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_1_VALUE);
1071 ADF_CSR_WR(aram_csr_base,
1072 ADF_C4XXX_IC_PARSE_IPV6_LENGTH_1,
1073 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_1_VALUE);
1074 /* Configure SPI extraction field from an IPv6 header */
1075 ADF_CSR_WR(aram_csr_base,
1076 ADF_C4XXX_IC_PARSE_IPV6_OFFSET_2,
1077 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_2_VALUE);
1078 ADF_CSR_WR(aram_csr_base,
1079 ADF_C4XXX_IC_PARSE_IPV6_LENGTH_2,
1080 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_2_VALUE);
1081 /* Configure destination field IP address from an IPv6 header */
1082 ADF_CSR_WR(aram_csr_base,
1083 ADF_C4XXX_IC_PARSE_IPV6_OFFSET_3,
1084 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_3_VALUE);
1085 ADF_CSR_WR(aram_csr_base,
1086 ADF_C4XXX_IC_PARSE_IPV6_LENGTH_3,
1087 ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_3_VALUE);
1088 }
1089
1090 static int
1091 adf_get_inline_ipsec_algo_group(struct adf_accel_dev *accel_dev,
1092 unsigned long *ipsec_algo_group)
1093 {
1094 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1095
1096 if (adf_cfg_get_param_value(
1097 accel_dev, ADF_INLINE_SEC, ADF_INLINE_IPSEC_ALGO_GROUP, val))
1098 return EFAULT;
1099 if (kstrtoul(val, 0, ipsec_algo_group))
1100 return EFAULT;
1101
1102 /* Verify the ipsec_algo_group */
1103 if (*ipsec_algo_group >= IPSEC_ALGO_GROUP_DELIMITER) {
1104 device_printf(
1105 GET_DEV(accel_dev),
1106 "Unsupported IPSEC algo group %lu in config file!\n",
1107 *ipsec_algo_group);
1108 return EFAULT;
1109 }
1110
1111 return 0;
1112 }
1113
1114 static int
1115 c4xxx_init_inline_hw(struct adf_accel_dev *accel_dev)
1116 {
1117 u32 sa_entry_reg_value = 0;
1118 u32 sa_fn_lim = 0;
1119 u32 supported_algo = 0;
1120 struct resource *aram_csr_base;
1121 u32 offset;
1122 unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1123
1124 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1125
1126 if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group))
1127 return EFAULT;
1128
1129 sa_entry_reg_value |=
1130 (ADF_C4XXX_DEFAULT_LU_KEY_LEN << ADF_C4XXX_LU_KEY_LEN_BIT_OFFSET);
1131 if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1132 sa_entry_reg_value |= ADF_C4XXX_DEFAULT_SA_SIZE;
1133 sa_fn_lim =
1134 ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_DEFAULT_SA_SIZE);
1135 supported_algo = ADF_C4XXX_DEFAULT_SUPPORTED_ALGORITHMS;
1136 } else if (ipsec_algo_group == IPSEC_ALGO_GROUP1) {
1137 sa_entry_reg_value |= ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1138 sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev,
1139 ADF_C4XXX_ALGO_GROUP1_SA_SIZE);
1140 supported_algo = ADF_C4XXX_SUPPORTED_ALGORITHMS_GROUP1;
1141 } else {
1142 return EFAULT;
1143 }
1144
1145 /* REG_SA_ENTRY_CTRL register initialisation */
1146 ADF_CSR_WR(aram_csr_base,
1147 ADF_C4XXX_REG_SA_ENTRY_CTRL,
1148 sa_entry_reg_value);
1149
1150 /* REG_SAL_FUNC_LIMITS register initialisation. Only the first register
1151 * needs to be initialised to enable as it is assigned to a physical
1152 * function. Other registers will be initialised by the LAN PF driver.
1153 * The function limits is initialised to its maximal value.
1154 */
1155 ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_FUNC_LIMITS, sa_fn_lim);
1156
1157 /* Initialize REG_SA_SCRATCH[0] register to
1158 * advertise supported crypto algorithms
1159 */
1160 ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_0, supported_algo);
1161
1162 /* REG_SA_SCRATCH[2] register initialisation
1163 * to advertise supported crypto offload features.
1164 */
1165 ADF_CSR_WR(aram_csr_base,
1166 ADF_C4XXX_REG_SA_SCRATCH_2,
1167 ADF_C4XXX_DEFAULT_CY_OFFLOAD_FEATURES);
1168
1169 /* Overwrite default MAC_CFG register in ingress offset */
1170 ADF_CSR_WR64(aram_csr_base,
1171 ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1172 ADF_C4XXX_MAC_CFG_VALUE);
1173
1174 /* Overwrite default MAC_CFG register in egress offset */
1175 ADF_CSR_WR64(aram_csr_base,
1176 ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1177 ADF_C4XXX_MAC_CFG_VALUE);
1178
1179 /* Overwrite default MAC_PIA_CFG
1180 * (Packet Interface Adapter Configuration) registers
1181 * in ingress offset
1182 */
1183 ADF_CSR_WR64(aram_csr_base,
1184 ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1185 ADF_C4XXX_MAC_PIA_CFG_VALUE);
1186
1187 /* Overwrite default MAC_PIA_CFG in egress offset */
1188 ADF_CSR_WR64(aram_csr_base,
1189 ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1190 ADF_C4XXX_MAC_PIA_CFG_VALUE);
1191
1192 c4xxx_enable_parse_extraction(accel_dev);
1193
1194 ADF_CSR_WR(aram_csr_base,
1195 ADF_C4XXX_INGRESS_CMD_DIS_MISC,
1196 ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1197
1198 ADF_CSR_WR(aram_csr_base,
1199 ADF_C4XXX_EGRESS_CMD_DIS_MISC,
1200 ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1201
1202 /* Set bits<1:0> in ADF_C4XXX_INLINE_CAPABILITY register to
1203 * advertize that both ingress and egress directions are available
1204 */
1205 ADF_CSR_WR(aram_csr_base,
1206 ADF_C4XXX_REG_SA_INLINE_CAPABILITY,
1207 ADF_C4XXX_INLINE_CAPABILITIES);
1208
1209 /* Set error notification configuration of ingress */
1210 offset = ADF_C4XXX_INLINE_INGRESS_OFFSET;
1211 c4xxx_init_error_notification_configuration(accel_dev, offset);
1212 /* Set error notification configuration of egress */
1213 offset = ADF_C4XXX_INLINE_EGRESS_OFFSET;
1214 c4xxx_init_error_notification_configuration(accel_dev, offset);
1215
1216 return 0;
1217 }
1218
1219 static void
1220 adf_enable_inline_notification(struct adf_accel_dev *accel_dev)
1221 {
1222 struct resource *aram_csr_base;
1223
1224 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1225
1226 /* Set bit<0> in ADF_C4XXX_REG_SA_INLINE_ENABLE to advertise
1227 * that inline is enabled.
1228 */
1229 ADF_CSR_WR(aram_csr_base,
1230 ADF_C4XXX_REG_SA_INLINE_ENABLE,
1231 ADF_C4XXX_INLINE_ENABLED);
1232 }
1233
1234 static int
1235 c4xxx_init_aram_config(struct adf_accel_dev *accel_dev)
1236 {
1237 u32 aram_size = ADF_C4XXX_2MB_ARAM_SIZE;
1238 u32 ibuff_mem_needed = 0;
1239 u32 usable_aram_size = 0;
1240 struct adf_hw_aram_info *aram_info;
1241 u32 sa_db_ctl_value;
1242 struct resource *aram_csr_base;
1243 u8 profile = 0;
1244 u32 sadb_size = 0;
1245 u32 sa_size = 0;
1246 unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1247 u32 i;
1248
1249 if (accel_dev->au_info->num_inline_au > 0)
1250 if (adf_get_inline_ipsec_algo_group(accel_dev,
1251 &ipsec_algo_group))
1252 return EFAULT;
1253
1254 /* Allocate memory for adf_hw_aram_info */
1255 aram_info = kzalloc(sizeof(*accel_dev->aram_info), GFP_KERNEL);
1256 if (!aram_info)
1257 return ENOMEM;
1258
1259 /* Initialise Inline direction */
1260 aram_info->inline_direction_egress_mask = 0;
1261 if (accel_dev->au_info->num_inline_au) {
1262 /* Set inline direction bitmap in the ARAM to
1263 * inform firmware which ME is egress
1264 */
1265 aram_info->inline_direction_egress_mask =
1266 accel_dev->au_info->inline_egress_msk;
1267
1268 /* User profile is valid, we can now add it
1269 * in the ARAM partition table
1270 */
1271 aram_info->inline_congest_mngt_profile = profile;
1272 }
1273 /* Initialise DC ME mask, "1" = ME is used for DC operations */
1274 aram_info->dc_ae_mask = accel_dev->au_info->dc_ae_msk;
1275
1276 /* Initialise CY ME mask, "1" = ME is used for CY operations
1277 * Since asym service can also be enabled on inline AEs, here
1278 * we use the sym ae mask for configuring the cy_ae_msk
1279 */
1280 aram_info->cy_ae_mask = accel_dev->au_info->sym_ae_msk;
1281
1282 /* Configure number of long words in the ARAM */
1283 aram_info->num_aram_lw_entries = ADF_C4XXX_NUM_ARAM_ENTRIES;
1284
1285 /* Reset region offset values to 0xffffffff */
1286 aram_info->mmp_region_offset = ~aram_info->mmp_region_offset;
1287 aram_info->skm_region_offset = ~aram_info->skm_region_offset;
1288 aram_info->inter_buff_aram_region_offset =
1289 ~aram_info->inter_buff_aram_region_offset;
1290
1291 /* Determine ARAM size */
1292 aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1293 sa_db_ctl_value = ADF_CSR_RD(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL);
1294
1295 aram_size = (sa_db_ctl_value & ADF_C4XXX_SADB_SIZE_BIT) ?
1296 ADF_C4XXX_2MB_ARAM_SIZE :
1297 ADF_C4XXX_4MB_ARAM_SIZE;
1298 device_printf(GET_DEV(accel_dev),
1299 "Total available accelerator memory: %uMB\n",
1300 aram_size / ADF_C4XXX_1MB_SIZE);
1301
1302 /* Compute MMP region offset */
1303 aram_info->mmp_region_size = ADF_C4XXX_DEFAULT_MMP_REGION_SIZE;
1304 aram_info->mmp_region_offset = aram_size - aram_info->mmp_region_size;
1305
1306 if (accel_dev->au_info->num_cy_au ||
1307 accel_dev->au_info->num_inline_au) {
1308 /* Crypto is available therefore we must
1309 * include space in the ARAM for SKM.
1310 */
1311 aram_info->skm_region_size = ADF_C4XXX_DEFAULT_SKM_REGION_SIZE;
1312 /* Compute SKM region offset */
1313 aram_info->skm_region_offset = aram_size -
1314 (aram_info->mmp_region_size + aram_info->skm_region_size);
1315 }
1316
1317 /* SADB always start at offset 0. */
1318 if (accel_dev->au_info->num_inline_au) {
1319 /* Inline is available therefore we must
1320 * use remaining ARAM for the SADB.
1321 */
1322 sadb_size = aram_size -
1323 (aram_info->mmp_region_size + aram_info->skm_region_size);
1324
1325 /*
1326 * When the inline service is enabled, the policy is that
1327 * compression gives up it's space in ARAM to allow for a
1328 * larger SADB. Compression must use DRAM instead of ARAM.
1329 */
1330 aram_info->inter_buff_aram_region_size = 0;
1331
1332 /* the SADB size must be an integral multiple of the SA size */
1333 if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1334 sa_size = ADF_C4XXX_DEFAULT_SA_SIZE;
1335 } else {
1336 /* IPSEC_ALGO_GROUP1
1337 * Total 2 algo groups.
1338 */
1339 sa_size = ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1340 }
1341
1342 sadb_size = sadb_size -
1343 (sadb_size % ADF_C4XXX_SA_SIZE_IN_BYTES(sa_size));
1344 aram_info->sadb_region_size = sadb_size;
1345 }
1346
1347 if (accel_dev->au_info->num_dc_au &&
1348 !accel_dev->au_info->num_inline_au) {
1349 /* Compression is available therefore we must see if there is
1350 * space in the ARAM for intermediate buffers.
1351 */
1352 aram_info->inter_buff_aram_region_size = 0;
1353 usable_aram_size = aram_size -
1354 (aram_info->mmp_region_size + aram_info->skm_region_size);
1355
1356 for (i = 1; i <= accel_dev->au_info->num_dc_au; i++) {
1357 if ((i * ADF_C4XXX_AU_COMPR_INTERM_SIZE) >
1358 usable_aram_size)
1359 break;
1360
1361 ibuff_mem_needed = i * ADF_C4XXX_AU_COMPR_INTERM_SIZE;
1362 }
1363
1364 /* Set remaining ARAM to intermediate buffers. Firmware handles
1365 * fallback to DRAM for cases were number of AU assigned
1366 * to compression exceeds available ARAM memory.
1367 */
1368 aram_info->inter_buff_aram_region_size = ibuff_mem_needed;
1369
1370 /* If ARAM is used for compression set its initial offset. */
1371 if (aram_info->inter_buff_aram_region_size)
1372 aram_info->inter_buff_aram_region_offset = 0;
1373 }
1374
1375 accel_dev->aram_info = aram_info;
1376
1377 return 0;
1378 }
1379
1380 static void
1381 c4xxx_exit_aram_config(struct adf_accel_dev *accel_dev)
1382 {
1383 kfree(accel_dev->aram_info);
1384 accel_dev->aram_info = NULL;
1385 }
1386
1387 static u32
1388 get_num_accel_units(struct adf_hw_device_data *self)
1389 {
1390 u32 i = 0, num_accel = 0;
1391 unsigned long accel_mask = 0;
1392
1393 if (!self || !self->accel_mask)
1394 return 0;
1395
1396 accel_mask = self->accel_mask;
1397
1398 for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
1399 {
1400 num_accel++;
1401 }
1402
1403 return num_accel / ADF_C4XXX_NUM_ACCEL_PER_AU;
1404 }
1405
1406 static int
1407 get_accel_unit(struct adf_hw_device_data *self,
1408 struct adf_accel_unit **accel_unit)
1409 {
1410 enum dev_sku_info sku;
1411
1412 sku = get_sku(self);
1413
1414 switch (sku) {
1415 case DEV_SKU_1:
1416 case DEV_SKU_1_CY:
1417 *accel_unit = adf_c4xxx_au_32_ae;
1418 break;
1419 case DEV_SKU_2:
1420 case DEV_SKU_2_CY:
1421 *accel_unit = adf_c4xxx_au_24_ae;
1422 break;
1423 case DEV_SKU_3:
1424 case DEV_SKU_3_CY:
1425 *accel_unit = adf_c4xxx_au_12_ae;
1426 break;
1427 default:
1428 *accel_unit = adf_c4xxx_au_emulation;
1429 break;
1430 }
1431 return 0;
1432 }
1433
1434 static int
1435 get_ae_info(struct adf_hw_device_data *self, const struct adf_ae_info **ae_info)
1436 {
1437 enum dev_sku_info sku;
1438
1439 sku = get_sku(self);
1440
1441 switch (sku) {
1442 case DEV_SKU_1:
1443 *ae_info = adf_c4xxx_32_ae;
1444 break;
1445 case DEV_SKU_1_CY:
1446 *ae_info = adf_c4xxx_32_ae_sym;
1447 break;
1448 case DEV_SKU_2:
1449 *ae_info = adf_c4xxx_24_ae;
1450 break;
1451 case DEV_SKU_2_CY:
1452 *ae_info = adf_c4xxx_24_ae_sym;
1453 break;
1454 case DEV_SKU_3:
1455 *ae_info = adf_c4xxx_12_ae;
1456 break;
1457 case DEV_SKU_3_CY:
1458 *ae_info = adf_c4xxx_12_ae_sym;
1459 break;
1460 default:
1461 *ae_info = adf_c4xxx_12_ae;
1462 break;
1463 }
1464 return 0;
1465 }
1466
1467 static int
1468 adf_add_debugfs_info(struct adf_accel_dev *accel_dev)
1469 {
1470 /* Add Accel Unit configuration table to debug FS interface */
1471 if (c4xxx_init_ae_config(accel_dev)) {
1472 device_printf(GET_DEV(accel_dev),
1473 "Failed to create entry for AE configuration\n");
1474 return EFAULT;
1475 }
1476
1477 return 0;
1478 }
1479
1480 static void
1481 adf_remove_debugfs_info(struct adf_accel_dev *accel_dev)
1482 {
1483 /* Remove Accel Unit configuration table from debug FS interface */
1484 c4xxx_exit_ae_config(accel_dev);
1485 }
1486
1487 static int
1488 check_svc_to_hw_capabilities(struct adf_accel_dev *accel_dev,
1489 const char *svc_name,
1490 enum icp_qat_capabilities_mask cap)
1491 {
1492 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1493 u32 hw_cap = hw_data->accel_capabilities_mask;
1494
1495 hw_cap &= cap;
1496 if (hw_cap != cap) {
1497 device_printf(GET_DEV(accel_dev),
1498 "Service not supported by accelerator: %s\n",
1499 svc_name);
1500 return EPERM;
1501 }
1502
1503 return 0;
1504 }
1505
1506 static int
1507 check_accel_unit_config(struct adf_accel_dev *accel_dev,
1508 u8 num_cy_au,
1509 u8 num_dc_au,
1510 u8 num_inline_au)
1511 {
1512 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1513 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1514 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1515 u32 num_au = hw_data->get_num_accel_units(hw_data);
1516 u32 service_mask = ADF_ACCEL_SERVICE_NULL;
1517 char *token, *cur_str;
1518 int ret = 0;
1519
1520 /* Get the services enabled by user */
1521 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1522 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1523 return EFAULT;
1524 cur_str = val;
1525 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1526 while (token) {
1527 if (!strncmp(token, ADF_SERVICE_CY, strlen(ADF_SERVICE_CY))) {
1528 service_mask |= ADF_ACCEL_CRYPTO;
1529 ret |= check_svc_to_hw_capabilities(
1530 accel_dev,
1531 token,
1532 ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
1533 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1534 }
1535
1536 if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) {
1537 service_mask |= ADF_ACCEL_CRYPTO;
1538 ret |= check_svc_to_hw_capabilities(
1539 accel_dev,
1540 token,
1541 ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC);
1542 }
1543
1544 if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) {
1545 /* Handle a special case of services 'asym;inline'
1546 * enabled where ASYM is handled by Inline firmware
1547 * at AE level. This configuration allows to enable
1548 * ASYM service without accel units assigned to
1549 * CRYPTO service, e.g.
1550 * num_inline_au = 6
1551 * num_cy_au = 0
1552 */
1553 if (num_inline_au < num_au)
1554 service_mask |= ADF_ACCEL_CRYPTO;
1555
1556 ret |= check_svc_to_hw_capabilities(
1557 accel_dev,
1558 token,
1559 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1560 }
1561
1562 if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) {
1563 service_mask |= ADF_ACCEL_COMPRESSION;
1564 ret |= check_svc_to_hw_capabilities(
1565 accel_dev,
1566 token,
1567 ICP_ACCEL_CAPABILITIES_COMPRESSION);
1568 }
1569
1570 if (!strncmp(token,
1571 ADF_SERVICE_INLINE,
1572 strlen(ADF_SERVICE_INLINE))) {
1573 service_mask |= ADF_ACCEL_INLINE_CRYPTO;
1574 ret |= check_svc_to_hw_capabilities(
1575 accel_dev, token, ICP_ACCEL_CAPABILITIES_INLINE);
1576 }
1577
1578 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1579 }
1580
1581 /* Ensure the user doesn't enable services that are not supported by
1582 * accelerator.
1583 */
1584 if (ret) {
1585 device_printf(GET_DEV(accel_dev),
1586 "Invalid accelerator configuration.\n");
1587 return EFAULT;
1588 }
1589
1590 if (!(service_mask & ADF_ACCEL_COMPRESSION) && num_dc_au > 0) {
1591 device_printf(GET_DEV(accel_dev),
1592 "Invalid accel unit config.\n");
1593 device_printf(
1594 GET_DEV(accel_dev),
1595 "DC accel units set when dc service not enabled\n");
1596 return EFAULT;
1597 }
1598
1599 if (!(service_mask & ADF_ACCEL_CRYPTO) && num_cy_au > 0) {
1600 device_printf(GET_DEV(accel_dev),
1601 "Invalid accel unit config.\n");
1602 device_printf(
1603 GET_DEV(accel_dev),
1604 "CY accel units set when cy service not enabled\n");
1605 return EFAULT;
1606 }
1607
1608 if (!(service_mask & ADF_ACCEL_INLINE_CRYPTO) && num_inline_au > 0) {
1609 device_printf(GET_DEV(accel_dev),
1610 "Invalid accel unit config.\n"
1611 "Inline feature not supported.\n");
1612 return EFAULT;
1613 }
1614
1615 hw_data->service_mask = service_mask;
1616 /* Ensure the user doesn't allocate more than max accel units */
1617 if (num_au != (num_cy_au + num_dc_au + num_inline_au)) {
1618 device_printf(GET_DEV(accel_dev),
1619 "Invalid accel unit config.\n");
1620 device_printf(GET_DEV(accel_dev),
1621 "Max accel units is %d\n",
1622 num_au);
1623 return EFAULT;
1624 }
1625
1626 /* Ensure user allocates hardware resources for enabled services */
1627 if (!num_cy_au && (service_mask & ADF_ACCEL_CRYPTO)) {
1628 device_printf(GET_DEV(accel_dev),
1629 "Failed to enable cy service!\n");
1630 device_printf(GET_DEV(accel_dev),
1631 "%s should not be 0",
1632 ADF_NUM_CY_ACCEL_UNITS);
1633 return EFAULT;
1634 }
1635 if (!num_dc_au && (service_mask & ADF_ACCEL_COMPRESSION)) {
1636 device_printf(GET_DEV(accel_dev),
1637 "Failed to enable dc service!\n");
1638 device_printf(GET_DEV(accel_dev),
1639 "%s should not be 0",
1640 ADF_NUM_DC_ACCEL_UNITS);
1641 return EFAULT;
1642 }
1643 if (!num_inline_au && (service_mask & ADF_ACCEL_INLINE_CRYPTO)) {
1644 device_printf(GET_DEV(accel_dev), "Failed to enable");
1645 device_printf(GET_DEV(accel_dev), " inline service!");
1646 device_printf(GET_DEV(accel_dev),
1647 " %s should not be 0\n",
1648 ADF_NUM_INLINE_ACCEL_UNITS);
1649 return EFAULT;
1650 }
1651
1652 return 0;
1653 }
1654
1655 static int
1656 get_accel_unit_config(struct adf_accel_dev *accel_dev,
1657 u8 *num_cy_au,
1658 u8 *num_dc_au,
1659 u8 *num_inline_au)
1660 {
1661 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1662 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1663
1664 /* Get the number of accel units allocated for each service */
1665 snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
1666 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1667 return EFAULT;
1668 if (compat_strtou8(val, 10, num_cy_au))
1669 return EFAULT;
1670 snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
1671 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1672 return EFAULT;
1673 if (compat_strtou8(val, 10, num_dc_au))
1674 return EFAULT;
1675
1676 snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
1677 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1678 return EFAULT;
1679 if (compat_strtou8(val, 10, num_inline_au))
1680 return EFAULT;
1681
1682 return 0;
1683 }
1684
1685 /* Function reads the inline ingress/egress configuration
1686 * and returns the number of AEs reserved for ingress
1687 * and egress for accel units which are allocated for
1688 * inline service
1689 */
1690 static int
1691 adf_get_inline_config(struct adf_accel_dev *accel_dev, u32 *num_ingress_aes)
1692 {
1693 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1694 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1695 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1696 char *value;
1697 u32 num_au = hw_data->get_num_accel_units(hw_data);
1698 unsigned long ingress, egress = 0;
1699 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1700 u32 num_inline_aes = 0, num_ingress_ae = 0;
1701 u32 i = 0;
1702
1703 snprintf(key, sizeof(key), ADF_INLINE_INGRESS);
1704 if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1705 device_printf(GET_DEV(accel_dev), "Failed to find ingress\n");
1706 return EFAULT;
1707 }
1708 value = val;
1709 value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1710 if (compat_strtoul(value, 10, &ingress))
1711 return EFAULT;
1712
1713 snprintf(key, sizeof(key), ADF_INLINE_EGRESS);
1714 if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1715 device_printf(GET_DEV(accel_dev), "Failed to find egress\n");
1716 return EFAULT;
1717 }
1718 value = val;
1719 value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1720 if (compat_strtoul(value, 10, &egress))
1721 return EFAULT;
1722
1723 if (ingress + egress != ADF_C4XXX_100) {
1724 device_printf(GET_DEV(accel_dev),
1725 "The sum of ingress and egress should be 100\n");
1726 return EFAULT;
1727 }
1728
1729 for (i = 0; i < num_au; i++) {
1730 if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO)
1731 num_inline_aes += accel_unit[i].num_ae;
1732 }
1733
1734 num_ingress_ae = num_inline_aes * ingress / ADF_C4XXX_100;
1735 if (((num_inline_aes * ingress) % ADF_C4XXX_100) >
1736 ADF_C4XXX_ROUND_LIMIT)
1737 num_ingress_ae++;
1738
1739 *num_ingress_aes = num_ingress_ae;
1740 return 0;
1741 }
1742
1743 static int
1744 adf_set_inline_ae_mask(struct adf_accel_dev *accel_dev)
1745 {
1746 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1747 u32 num_au = hw_data->get_num_accel_units(hw_data);
1748 struct adf_accel_unit_info *au_info = accel_dev->au_info;
1749 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1750 u32 num_ingress_ae = 0;
1751 u32 ingress_msk = 0;
1752 u32 i, j, ae_mask;
1753
1754 if (adf_get_inline_config(accel_dev, &num_ingress_ae))
1755 return EFAULT;
1756
1757 for (i = 0; i < num_au; i++) {
1758 j = 0;
1759 if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) {
1760 /* AEs with inline service enabled are also used
1761 * for asymmetric crypto
1762 */
1763 au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1764 ae_mask = accel_unit[i].ae_mask;
1765 while (num_ingress_ae && ae_mask) {
1766 if (ae_mask & 1) {
1767 ingress_msk |= BIT(j);
1768 num_ingress_ae--;
1769 }
1770 ae_mask = ae_mask >> 1;
1771 j++;
1772 }
1773 au_info->inline_ingress_msk |= ingress_msk;
1774
1775 au_info->inline_egress_msk |=
1776 ~(au_info->inline_ingress_msk) &
1777 accel_unit[i].ae_mask;
1778 }
1779 }
1780
1781 return 0;
1782 }
1783
1784 static int
1785 adf_set_ae_mask(struct adf_accel_dev *accel_dev)
1786 {
1787 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1788 u32 num_au = hw_data->get_num_accel_units(hw_data);
1789 struct adf_accel_unit_info *au_info = accel_dev->au_info;
1790 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1791 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1792 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1793 char *token, *cur_str;
1794 bool asym_en = false, sym_en = false;
1795 u32 i;
1796
1797 /* Get the services enabled by user */
1798 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1799 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1800 return EFAULT;
1801 cur_str = val;
1802 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1803 while (token) {
1804 if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
1805 asym_en = true;
1806 if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
1807 sym_en = true;
1808 if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) {
1809 sym_en = true;
1810 asym_en = true;
1811 }
1812 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1813 }
1814
1815 for (i = 0; i < num_au; i++) {
1816 if (accel_unit[i].services == ADF_ACCEL_CRYPTO) {
1817 /* AEs that support crypto can perform both
1818 * symmetric and asymmetric crypto, however
1819 * we only enable the threads if the relevant
1820 * service is also enabled
1821 */
1822 if (asym_en)
1823 au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1824 if (sym_en)
1825 au_info->sym_ae_msk |= accel_unit[i].ae_mask;
1826 } else if (accel_unit[i].services == ADF_ACCEL_COMPRESSION) {
1827 au_info->dc_ae_msk |= accel_unit[i].comp_ae_mask;
1828 }
1829 }
1830 return 0;
1831 }
1832
1833 static int
1834 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
1835 {
1836 u8 num_cy_au, num_dc_au, num_inline_au;
1837 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1838 u32 num_au = hw_data->get_num_accel_units(hw_data);
1839 struct adf_accel_unit *accel_unit;
1840 const struct adf_ae_info *ae_info;
1841 int i;
1842
1843 if (get_accel_unit_config(
1844 accel_dev, &num_cy_au, &num_dc_au, &num_inline_au)) {
1845 device_printf(GET_DEV(accel_dev), "Invalid accel unit cfg\n");
1846 return EFAULT;
1847 }
1848
1849 if (check_accel_unit_config(
1850 accel_dev, num_cy_au, num_dc_au, num_inline_au))
1851 return EFAULT;
1852
1853 accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
1854 if (!accel_dev->au_info)
1855 return ENOMEM;
1856
1857 accel_dev->au_info->num_cy_au = num_cy_au;
1858 accel_dev->au_info->num_dc_au = num_dc_au;
1859 accel_dev->au_info->num_inline_au = num_inline_au;
1860
1861 if (get_ae_info(hw_data, &ae_info)) {
1862 device_printf(GET_DEV(accel_dev), "Failed to get ae info\n");
1863 goto err_au_info;
1864 }
1865 accel_dev->au_info->ae_info = ae_info;
1866
1867 if (get_accel_unit(hw_data, &accel_unit)) {
1868 device_printf(GET_DEV(accel_dev), "Failed to get accel unit\n");
1869 goto err_ae_info;
1870 }
1871
1872 /* Enable compression accel units */
1873 /* Accel units with 4AEs are reserved for compression first */
1874 for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1875 if (accel_unit[i].num_ae == ADF_C4XXX_4_AE) {
1876 accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1877 num_dc_au--;
1878 }
1879 }
1880 for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1881 if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1882 accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1883 num_dc_au--;
1884 }
1885 }
1886
1887 /* Enable inline accel units */
1888 for (i = 0; i < num_au && num_inline_au > 0; i++) {
1889 if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1890 accel_unit[i].services = ADF_ACCEL_INLINE_CRYPTO;
1891 num_inline_au--;
1892 }
1893 }
1894
1895 /* Enable crypto accel units */
1896 for (i = 0; i < num_au && num_cy_au > 0; i++) {
1897 if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1898 accel_unit[i].services = ADF_ACCEL_CRYPTO;
1899 num_cy_au--;
1900 }
1901 }
1902 accel_dev->au_info->au = accel_unit;
1903 return 0;
1904
1905 err_ae_info:
1906 accel_dev->au_info->ae_info = NULL;
1907 err_au_info:
1908 kfree(accel_dev->au_info);
1909 accel_dev->au_info = NULL;
1910 return EFAULT;
1911 }
1912
1913 static void
1914 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
1915 {
1916 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1917 u32 num_au = hw_data->get_num_accel_units(hw_data);
1918 int i;
1919
1920 if (accel_dev->au_info) {
1921 if (accel_dev->au_info->au) {
1922 for (i = 0; i < num_au; i++) {
1923 accel_dev->au_info->au[i].services =
1924 ADF_ACCEL_SERVICE_NULL;
1925 }
1926 }
1927 accel_dev->au_info->au = NULL;
1928 accel_dev->au_info->ae_info = NULL;
1929 kfree(accel_dev->au_info);
1930 accel_dev->au_info = NULL;
1931 }
1932 }
1933
1934 static inline void
1935 adf_c4xxx_reset_hw_units(struct adf_accel_dev *accel_dev)
1936 {
1937 struct resource *pmisc =
1938 (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1939
1940 u32 global_clk_enable = ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ARAM |
1941 ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICI_ENABLE |
1942 ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICE_ENABLE;
1943
1944 u32 ixp_reset_generic = ADF_C4XXX_IXP_RESET_GENERIC_ARAM |
1945 ADF_C4XXX_IXP_RESET_GENERIC_INLINE_EGRESS |
1946 ADF_C4XXX_IXP_RESET_GENERIC_INLINE_INGRESS;
1947
1948 /* To properly reset each of the units driver must:
1949 * 1)Call out resetactive state using ixp reset generic
1950 * register;
1951 * 2)Disable generic clock;
1952 * 3)Take device out of reset by clearing ixp reset
1953 * generic register;
1954 * 4)Re-enable generic clock;
1955 */
1956 ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ixp_reset_generic);
1957 ADF_CSR_WR(pmisc,
1958 ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1959 ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_DISABLE_ALL);
1960 ADF_CSR_WR(pmisc,
1961 ADF_C4XXX_IXP_RESET_GENERIC,
1962 ADF_C4XXX_IXP_RESET_GENERIC_OUT_OF_RESET_TRIGGER);
1963 ADF_CSR_WR(pmisc,
1964 ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1965 global_clk_enable);
1966 }
1967
1968 static int
1969 adf_init_accel_units(struct adf_accel_dev *accel_dev)
1970 {
1971 struct resource *csr =
1972 (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1973
1974 if (adf_init_accel_unit_services(accel_dev))
1975 return EFAULT;
1976
1977 /* Set cy and dc enabled AE masks */
1978 if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_dc_au) {
1979 if (adf_set_ae_mask(accel_dev)) {
1980 device_printf(GET_DEV(accel_dev),
1981 "Failed to set ae masks\n");
1982 goto err_au;
1983 }
1984 }
1985 /* Set ingress/egress ae mask if inline is enabled */
1986 if (accel_dev->au_info->num_inline_au) {
1987 if (adf_set_inline_ae_mask(accel_dev)) {
1988 device_printf(GET_DEV(accel_dev),
1989 "Failed to set inline ae masks\n");
1990 goto err_au;
1991 }
1992 }
1993 /* Define ARAM regions */
1994 if (c4xxx_init_aram_config(accel_dev)) {
1995 device_printf(GET_DEV(accel_dev),
1996 "Failed to init aram config\n");
1997 goto err_au;
1998 }
1999 /* Configure h/w registers for inline operations */
2000 if (accel_dev->au_info->num_inline_au > 0)
2001 /* Initialise configuration parsing registers */
2002 if (c4xxx_init_inline_hw(accel_dev))
2003 goto err_au;
2004
2005 c4xxx_set_sadb_size(accel_dev);
2006
2007 if (accel_dev->au_info->num_inline_au > 0) {
2008 /* ici/ice interrupt shall be enabled after msi-x enabled */
2009 ADF_CSR_WR(csr,
2010 ADF_C4XXX_ERRMSK11,
2011 ADF_C4XXX_ERRMSK11_ERR_DISABLE_ICI_ICE_INTR);
2012 adf_enable_inline_notification(accel_dev);
2013 }
2014
2015 update_hw_capability(accel_dev);
2016 if (adf_add_debugfs_info(accel_dev)) {
2017 device_printf(GET_DEV(accel_dev),
2018 "Failed to add debug FS information\n");
2019 goto err_au;
2020 }
2021 return 0;
2022
2023 err_au:
2024 /* Free and clear accel unit data structures */
2025 adf_exit_accel_unit_services(accel_dev);
2026 return EFAULT;
2027 }
2028
2029 static void
2030 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
2031 {
2032 adf_exit_accel_unit_services(accel_dev);
2033 /* Free aram mapping structure */
2034 c4xxx_exit_aram_config(accel_dev);
2035 /* Remove entries in debug FS */
2036 adf_remove_debugfs_info(accel_dev);
2037 }
2038
2039 static const char *
2040 get_obj_name(struct adf_accel_dev *accel_dev,
2041 enum adf_accel_unit_services service)
2042 {
2043 u32 capabilities = GET_HW_DATA(accel_dev)->accel_capabilities_mask;
2044 bool sym_only_sku = false;
2045
2046 /* Check if SKU is capable only of symmetric cryptography
2047 * via device capabilities.
2048 */
2049 if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
2050 !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
2051 !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
2052 sym_only_sku = true;
2053
2054 switch (service) {
2055 case ADF_ACCEL_INLINE_CRYPTO:
2056 return ADF_C4XXX_INLINE_OBJ;
2057 case ADF_ACCEL_CRYPTO:
2058 if (sym_only_sku)
2059 return ADF_C4XXX_SYM_OBJ;
2060 else
2061 return ADF_C4XXX_CY_OBJ;
2062 break;
2063 case ADF_ACCEL_COMPRESSION:
2064 return ADF_C4XXX_DC_OBJ;
2065 default:
2066 return NULL;
2067 }
2068 }
2069
2070 static uint32_t
2071 get_objs_num(struct adf_accel_dev *accel_dev)
2072 {
2073 u32 srv = 0;
2074 u32 max_srv_id = 0;
2075 unsigned long service_mask = accel_dev->hw_device->service_mask;
2076
2077 /* The objects number corresponds to the number of services */
2078 for_each_set_bit(srv, &service_mask, ADF_C4XXX_MAX_OBJ)
2079 {
2080 max_srv_id = srv;
2081 }
2082
2083 return (max_srv_id + 1);
2084 }
2085
2086 static uint32_t
2087 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
2088 enum adf_accel_unit_services service)
2089 {
2090 u32 ae_mask = 0;
2091 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2092 u32 num_au = hw_data->get_num_accel_units(hw_data);
2093 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
2094 u32 i = 0;
2095
2096 if (service == ADF_ACCEL_SERVICE_NULL)
2097 return 0;
2098
2099 for (i = 0; i < num_au; i++) {
2100 if (accel_unit[i].services == service)
2101 ae_mask |= accel_unit[i].ae_mask;
2102 }
2103 return ae_mask;
2104 }
2105
2106 static void
2107 configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
2108 {
2109 struct resource *addr;
2110 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2111 u32 num_aes = hw_data->get_num_aes(hw_data);
2112 u32 reg = 0x0;
2113 u32 i;
2114
2115 addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
2116
2117 /* Set/Unset Valid bits in AE Thread to PCIe Function Mapping */
2118 for (i = 0; i < ADF_C4XXX_AE2FUNC_REG_PER_AE * num_aes; i++) {
2119 reg = ADF_CSR_RD(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2120 i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE);
2121 if (enable)
2122 reg |= ADF_C4XXX_AE2FUNC_MAP_VALID;
2123 else
2124 reg &= ~ADF_C4XXX_AE2FUNC_MAP_VALID;
2125 ADF_CSR_WR(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2126 i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE,
2127 reg);
2128 }
2129 }
2130
2131 void
2132 adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2133 {
2134 hw_data->dev_class = &c4xxx_class;
2135 hw_data->instance_id = c4xxx_class.instances++;
2136 hw_data->num_banks = ADF_C4XXX_ETR_MAX_BANKS;
2137 hw_data->num_rings_per_bank = ADF_C4XXX_NUM_RINGS_PER_BANK;
2138 hw_data->num_accel = ADF_C4XXX_MAX_ACCELERATORS;
2139 hw_data->num_engines = ADF_C4XXX_MAX_ACCELENGINES;
2140 hw_data->num_logical_accel = 1;
2141 hw_data->tx_rx_gap = ADF_C4XXX_RX_RINGS_OFFSET;
2142 hw_data->tx_rings_mask = ADF_C4XXX_TX_RINGS_MASK;
2143 hw_data->alloc_irq = adf_isr_resource_alloc;
2144 hw_data->free_irq = adf_isr_resource_free;
2145 hw_data->enable_error_correction = adf_enable_error_correction;
2146 hw_data->init_ras = adf_init_ras;
2147 hw_data->exit_ras = adf_exit_ras;
2148 hw_data->ras_interrupts = adf_ras_interrupts;
2149 hw_data->get_accel_mask = get_accel_mask;
2150 hw_data->get_ae_mask = get_ae_mask;
2151 hw_data->get_num_accels = get_num_accels;
2152 hw_data->get_num_aes = get_num_aes;
2153 hw_data->get_num_accel_units = get_num_accel_units;
2154 hw_data->get_sram_bar_id = get_sram_bar_id;
2155 hw_data->get_etr_bar_id = get_etr_bar_id;
2156 hw_data->get_misc_bar_id = get_misc_bar_id;
2157 hw_data->get_pf2vf_offset = get_pf2vf_offset;
2158 hw_data->get_vintmsk_offset = get_vintmsk_offset;
2159 hw_data->get_arb_info = get_arb_info;
2160 hw_data->get_admin_info = get_admin_info;
2161 hw_data->get_errsou_offset = get_errsou_offset;
2162 hw_data->get_clock_speed = get_clock_speed;
2163 hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg;
2164 hw_data->get_sku = get_sku;
2165 hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE;
2166 hw_data->check_prod_sku = c4xxx_check_prod_sku;
2167 hw_data->fw_name = ADF_C4XXX_FW;
2168 hw_data->fw_mmp_name = ADF_C4XXX_MMP;
2169 hw_data->get_obj_name = get_obj_name;
2170 hw_data->get_objs_num = get_objs_num;
2171 hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
2172 hw_data->init_admin_comms = adf_init_admin_comms;
2173 hw_data->exit_admin_comms = adf_exit_admin_comms;
2174 hw_data->configure_iov_threads = configure_iov_threads;
2175 hw_data->disable_iov = adf_disable_sriov;
2176 hw_data->send_admin_init = adf_send_admin_init;
2177 hw_data->init_arb = adf_init_arb_c4xxx;
2178 hw_data->exit_arb = adf_exit_arb_c4xxx;
2179 hw_data->disable_arb = adf_disable_arb;
2180 hw_data->enable_ints = adf_enable_ints;
2181 hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer;
2182 hw_data->check_slice_hang = c4xxx_check_slice_hang;
2183 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
2184 hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
2185 hw_data->reset_device = adf_reset_flr;
2186 hw_data->restore_device = adf_c4xxx_dev_restore;
2187 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
2188 hw_data->init_accel_units = adf_init_accel_units;
2189 hw_data->reset_hw_units = adf_c4xxx_reset_hw_units;
2190 hw_data->exit_accel_units = adf_exit_accel_units;
2191 hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
2192 hw_data->get_heartbeat_status = adf_get_heartbeat_status;
2193 hw_data->get_ae_clock = get_ae_clock;
2194 hw_data->clock_frequency = ADF_C4XXX_AE_FREQ;
2195 hw_data->measure_clock = measure_clock;
2196 hw_data->add_pke_stats = adf_pke_replay_counters_add_c4xxx;
2197 hw_data->remove_pke_stats = adf_pke_replay_counters_remove_c4xxx;
2198 hw_data->add_misc_error = adf_misc_error_add_c4xxx;
2199 hw_data->remove_misc_error = adf_misc_error_remove_c4xxx;
2200 hw_data->extended_dc_capabilities = 0;
2201 hw_data->get_storage_enabled = get_storage_enabled;
2202 hw_data->query_storage_cap = 0;
2203 hw_data->get_accel_cap = c4xxx_get_hw_cap;
2204 hw_data->configure_accel_units = c4xxx_configure_accel_units;
2205 hw_data->pre_reset = adf_dev_pre_reset;
2206 hw_data->post_reset = adf_dev_post_reset;
2207 hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
2208 hw_data->count_ras_event = adf_fw_count_ras_event;
2209 hw_data->config_device = adf_config_device;
2210 hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
2211
2212 adf_gen2_init_hw_csr_info(&hw_data->csr_info);
2213 hw_data->csr_info.arb_enable_mask = 0xF;
2214 }
2215
2216 void
2217 adf_clean_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2218 {
2219 hw_data->dev_class->instances--;
2220 }
2221
2222 void
2223 remove_oid(struct adf_accel_dev *accel_dev, struct sysctl_oid *oid)
2224 {
2225 struct sysctl_ctx_list *qat_sysctl_ctx;
2226 int ret;
2227
2228 qat_sysctl_ctx =
2229 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
2230
2231 ret = sysctl_ctx_entry_del(qat_sysctl_ctx, oid);
2232 if (ret)
2233 device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
2234
2235 ret = sysctl_remove_oid(oid, 1, 1);
2236 if (ret)
2237 device_printf(GET_DEV(accel_dev), "Failed to delete oid\n");
2238 }
Cache object: 201a87219d7eef09bafe65963d298c3a
|