1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include <adf_accel_devices.h>
5 #include <adf_common_drv.h>
6 #include <adf_cfg.h>
7 #include <adf_pf2vf_msg.h>
8 #include <adf_dev_err.h>
9 #include <adf_gen2_hw_data.h>
10 #include "adf_200xx_hw_data.h"
11 #include "icp_qat_hw.h"
12 #include "adf_heartbeat.h"
13
14 /* Worker thread to service arbiter mappings */
15 static const u32 thrd_to_arb_map[ADF_200XX_MAX_ACCELENGINES] =
16 { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA };
17
18 enum { DEV_200XX_SKU_1 = 0, DEV_200XX_SKU_2 = 1, DEV_200XX_SKU_3 = 2 };
19
20 static u32 thrd_to_arb_map_gen[ADF_200XX_MAX_ACCELENGINES] = { 0 };
21
22 static struct adf_hw_device_class qat_200xx_class = {.name =
23 ADF_200XX_DEVICE_NAME,
24 .type = DEV_200XX,
25 .instances = 0 };
26
27 static u32
28 get_accel_mask(struct adf_accel_dev *accel_dev)
29 {
30 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
31
32 u32 fuse;
33 u32 straps;
34
35 fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
36 straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
37
38 return (~(fuse | straps)) >> ADF_200XX_ACCELERATORS_REG_OFFSET &
39 ADF_200XX_ACCELERATORS_MASK;
40 }
41
42 static u32
43 get_ae_mask(struct adf_accel_dev *accel_dev)
44 {
45 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
46 u32 fuse;
47 u32 me_straps;
48 u32 me_disable;
49 u32 ssms_disabled;
50
51 fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
52 me_straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
53
54 /* If SSMs are disabled, then disable the corresponding MEs */
55 ssms_disabled =
56 (~get_accel_mask(accel_dev)) & ADF_200XX_ACCELERATORS_MASK;
57 me_disable = 0x3;
58 while (ssms_disabled) {
59 if (ssms_disabled & 1)
60 me_straps |= me_disable;
61 ssms_disabled >>= 1;
62 me_disable <<= 2;
63 }
64
65 return (~(fuse | me_straps)) & ADF_200XX_ACCELENGINES_MASK;
66 }
67
68 static u32
69 get_num_accels(struct adf_hw_device_data *self)
70 {
71 u32 i, ctr = 0;
72
73 if (!self || !self->accel_mask)
74 return 0;
75
76 for (i = 0; i < ADF_200XX_MAX_ACCELERATORS; i++) {
77 if (self->accel_mask & (1 << i))
78 ctr++;
79 }
80 return ctr;
81 }
82
83 static u32
84 get_num_aes(struct adf_hw_device_data *self)
85 {
86 u32 i, ctr = 0;
87
88 if (!self || !self->ae_mask)
89 return 0;
90
91 for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) {
92 if (self->ae_mask & (1 << i))
93 ctr++;
94 }
95 return ctr;
96 }
97
98 static u32
99 get_misc_bar_id(struct adf_hw_device_data *self)
100 {
101 return ADF_200XX_PMISC_BAR;
102 }
103
104 static u32
105 get_etr_bar_id(struct adf_hw_device_data *self)
106 {
107 return ADF_200XX_ETR_BAR;
108 }
109
110 static u32
111 get_sram_bar_id(struct adf_hw_device_data *self)
112 {
113 return 0;
114 }
115
116 static enum dev_sku_info
117 get_sku(struct adf_hw_device_data *self)
118 {
119 int aes = get_num_aes(self);
120
121 if (aes == 6)
122 return DEV_SKU_4;
123
124 return DEV_SKU_UNKNOWN;
125 }
126
127 static void
128 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
129 u32 const **arb_map_config)
130 {
131 int i;
132 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
133
134 for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) {
135 thrd_to_arb_map_gen[i] = 0;
136 if (hw_device->ae_mask & (1 << i))
137 thrd_to_arb_map_gen[i] = thrd_to_arb_map[i];
138 }
139 adf_cfg_gen_dispatch_arbiter(accel_dev,
140 thrd_to_arb_map,
141 thrd_to_arb_map_gen,
142 ADF_200XX_MAX_ACCELENGINES);
143 *arb_map_config = thrd_to_arb_map_gen;
144 }
145
146 static u32
147 get_pf2vf_offset(u32 i)
148 {
149 return ADF_200XX_PF2VF_OFFSET(i);
150 }
151
152 static u32
153 get_vintmsk_offset(u32 i)
154 {
155 return ADF_200XX_VINTMSK_OFFSET(i);
156 }
157
158 static void
159 get_arb_info(struct arb_info *arb_csrs_info)
160 {
161 arb_csrs_info->arbiter_offset = ADF_200XX_ARB_OFFSET;
162 arb_csrs_info->wrk_thd_2_srv_arb_map =
163 ADF_200XX_ARB_WRK_2_SER_MAP_OFFSET;
164 arb_csrs_info->wrk_cfg_offset = ADF_200XX_ARB_WQCFG_OFFSET;
165 }
166
167 static void
168 get_admin_info(struct admin_info *admin_csrs_info)
169 {
170 admin_csrs_info->mailbox_offset = ADF_200XX_MAILBOX_BASE_OFFSET;
171 admin_csrs_info->admin_msg_ur = ADF_200XX_ADMINMSGUR_OFFSET;
172 admin_csrs_info->admin_msg_lr = ADF_200XX_ADMINMSGLR_OFFSET;
173 }
174
175 static void
176 get_errsou_offset(u32 *errsou3, u32 *errsou5)
177 {
178 *errsou3 = ADF_200XX_ERRSOU3;
179 *errsou5 = ADF_200XX_ERRSOU5;
180 }
181
182 static u32
183 get_clock_speed(struct adf_hw_device_data *self)
184 {
185 /* CPP clock is half high-speed clock */
186 return self->clock_frequency / 2;
187 }
188
189 static void
190 adf_enable_error_interrupts(struct resource *csr)
191 {
192 ADF_CSR_WR(csr, ADF_ERRMSK0, ADF_200XX_ERRMSK0_CERR); /* ME0-ME3 */
193 ADF_CSR_WR(csr, ADF_ERRMSK1, ADF_200XX_ERRMSK1_CERR); /* ME4-ME5 */
194 ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_CERR); /* SSM2 */
195
196 /* Reset everything except VFtoPF1_16. */
197 adf_csr_fetch_and_and(csr, ADF_ERRMSK3, ADF_200XX_VF2PF1_16);
198
199 /* RI CPP bus interface error detection and reporting. */
200 ADF_CSR_WR(csr, ADF_200XX_RICPPINTCTL, ADF_200XX_RICPP_EN);
201
202 /* TI CPP bus interface error detection and reporting. */
203 ADF_CSR_WR(csr, ADF_200XX_TICPPINTCTL, ADF_200XX_TICPP_EN);
204
205 /* Enable CFC Error interrupts and logging. */
206 ADF_CSR_WR(csr, ADF_200XX_CPP_CFC_ERR_CTRL, ADF_200XX_CPP_CFC_UE);
207 }
208
209 static void
210 adf_disable_error_interrupts(struct adf_accel_dev *accel_dev)
211 {
212 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
213 struct resource *csr = misc_bar->virt_addr;
214
215 /* ME0-ME3 */
216 ADF_CSR_WR(csr,
217 ADF_ERRMSK0,
218 ADF_200XX_ERRMSK0_UERR | ADF_200XX_ERRMSK0_CERR);
219 /* ME4-ME5 */
220 ADF_CSR_WR(csr,
221 ADF_ERRMSK1,
222 ADF_200XX_ERRMSK1_UERR | ADF_200XX_ERRMSK1_CERR);
223 /* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
224 ADF_CSR_WR(csr, ADF_ERRMSK3, ADF_200XX_ERRMSK3_UERR);
225 /* SSM2 */
226 ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_UERR);
227 }
228
229 static int
230 adf_check_uncorrectable_error(struct adf_accel_dev *accel_dev)
231 {
232 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
233 struct resource *csr = misc_bar->virt_addr;
234
235 u32 errsou0 = ADF_CSR_RD(csr, ADF_ERRSOU0) & ADF_200XX_ERRMSK0_UERR;
236 u32 errsou1 = ADF_CSR_RD(csr, ADF_ERRSOU1) & ADF_200XX_ERRMSK1_UERR;
237 u32 errsou3 = ADF_CSR_RD(csr, ADF_ERRSOU3) & ADF_200XX_ERRMSK3_UERR;
238 u32 errsou5 = ADF_CSR_RD(csr, ADF_ERRSOU5) & ADF_200XX_ERRMSK5_UERR;
239
240 return (errsou0 | errsou1 | errsou3 | errsou5);
241 }
242
243 static void
244 adf_enable_mmp_error_correction(struct resource *csr,
245 struct adf_hw_device_data *hw_data)
246 {
247 unsigned int dev, mmp;
248 unsigned int mask;
249
250 /* Enable MMP Logging */
251 for (dev = 0, mask = hw_data->accel_mask; mask; dev++, mask >>= 1) {
252 if (!(mask & 1))
253 continue;
254 /* Set power-up */
255 adf_csr_fetch_and_and(csr,
256 ADF_200XX_SLICEPWRDOWN(dev),
257 ~ADF_200XX_MMP_PWR_UP_MSK);
258
259 if (hw_data->accel_capabilities_mask &
260 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) {
261 for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) {
262 /*
263 * The device supports PKE,
264 * so enable error reporting from MMP memory
265 */
266 adf_csr_fetch_and_or(csr,
267 ADF_UERRSSMMMP(dev, mmp),
268 ADF_200XX_UERRSSMMMP_EN);
269 /*
270 * The device supports PKE,
271 * so enable error correction from MMP memory
272 */
273 adf_csr_fetch_and_or(csr,
274 ADF_CERRSSMMMP(dev, mmp),
275 ADF_200XX_CERRSSMMMP_EN);
276 }
277 } else {
278 for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) {
279 /*
280 * The device doesn't support PKE,
281 * so disable error reporting from MMP memory
282 */
283 adf_csr_fetch_and_and(csr,
284 ADF_UERRSSMMMP(dev, mmp),
285 ~ADF_200XX_UERRSSMMMP_EN);
286 /*
287 * The device doesn't support PKE,
288 * so disable error correction from MMP memory
289 */
290 adf_csr_fetch_and_and(csr,
291 ADF_CERRSSMMMP(dev, mmp),
292 ~ADF_200XX_CERRSSMMMP_EN);
293 }
294 }
295
296 /* Restore power-down value */
297 adf_csr_fetch_and_or(csr,
298 ADF_200XX_SLICEPWRDOWN(dev),
299 ADF_200XX_MMP_PWR_UP_MSK);
300
301 /* Disabling correctable error interrupts. */
302 ADF_CSR_WR(csr,
303 ADF_200XX_INTMASKSSM(dev),
304 ADF_200XX_INTMASKSSM_UERR);
305 }
306 }
307
308 static void
309 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
310 {
311 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
312 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
313 struct resource *csr = misc_bar->virt_addr;
314 unsigned int val, i;
315 unsigned int mask;
316
317 /* Enable Accel Engine error detection & correction */
318 mask = hw_device->ae_mask;
319 for (i = 0; mask; i++, mask >>= 1) {
320 if (!(mask & 1))
321 continue;
322 val = ADF_CSR_RD(csr, ADF_200XX_AE_CTX_ENABLES(i));
323 val |= ADF_200XX_ENABLE_AE_ECC_ERR;
324 ADF_CSR_WR(csr, ADF_200XX_AE_CTX_ENABLES(i), val);
325 val = ADF_CSR_RD(csr, ADF_200XX_AE_MISC_CONTROL(i));
326 val |= ADF_200XX_ENABLE_AE_ECC_PARITY_CORR;
327 ADF_CSR_WR(csr, ADF_200XX_AE_MISC_CONTROL(i), val);
328 }
329
330 /* Enable shared memory error detection & correction */
331 mask = hw_device->accel_mask;
332 for (i = 0; mask; i++, mask >>= 1) {
333 if (!(mask & 1))
334 continue;
335 val = ADF_CSR_RD(csr, ADF_200XX_UERRSSMSH(i));
336 val |= ADF_200XX_ERRSSMSH_EN;
337 ADF_CSR_WR(csr, ADF_200XX_UERRSSMSH(i), val);
338 val = ADF_CSR_RD(csr, ADF_200XX_CERRSSMSH(i));
339 val |= ADF_200XX_ERRSSMSH_EN;
340 ADF_CSR_WR(csr, ADF_200XX_CERRSSMSH(i), val);
341 val = ADF_CSR_RD(csr, ADF_PPERR(i));
342 val |= ADF_200XX_PPERR_EN;
343 ADF_CSR_WR(csr, ADF_PPERR(i), val);
344 }
345
346 adf_enable_error_interrupts(csr);
347 adf_enable_mmp_error_correction(csr, hw_device);
348 }
349
350 static void
351 adf_enable_ints(struct adf_accel_dev *accel_dev)
352 {
353 struct resource *addr;
354
355 addr = (&GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR])->virt_addr;
356
357 /* Enable bundle and misc interrupts */
358 ADF_CSR_WR(addr, ADF_200XX_SMIAPF0_MASK_OFFSET, ADF_200XX_SMIA0_MASK);
359 ADF_CSR_WR(addr, ADF_200XX_SMIAPF1_MASK_OFFSET, ADF_200XX_SMIA1_MASK);
360 }
361
362 static u32
363 get_ae_clock(struct adf_hw_device_data *self)
364 {
365 /*
366 * Clock update interval is <16> ticks for 200xx.
367 */
368 return self->clock_frequency / 16;
369 }
370
371 static int
372 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
373 {
374 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
375 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
376
377 strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key));
378 if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) {
379 if (kstrtouint(val, 0, storage_enabled))
380 return -EFAULT;
381 }
382 return 0;
383 }
384
385 static int
386 measure_clock(struct adf_accel_dev *accel_dev)
387 {
388 u32 frequency;
389 int ret = 0;
390
391 ret = adf_dev_measure_clock(accel_dev,
392 &frequency,
393 ADF_200XX_MIN_AE_FREQ,
394 ADF_200XX_MAX_AE_FREQ);
395 if (ret)
396 return ret;
397
398 accel_dev->hw_device->clock_frequency = frequency;
399 return 0;
400 }
401
402 static u32
403 adf_200xx_get_hw_cap(struct adf_accel_dev *accel_dev)
404 {
405 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
406 u32 legfuses;
407 u32 capabilities;
408 u32 straps;
409 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
410 u32 fuses = hw_data->fuses;
411
412 /* Read accelerator capabilities mask */
413 legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
414 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
415 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
416 ICP_ACCEL_CAPABILITIES_CIPHER +
417 ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
418 ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC +
419 ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF +
420 ICP_ACCEL_CAPABILITIES_ECEDMONT +
421 ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
422 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
423 capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
424 ICP_ACCEL_CAPABILITIES_CIPHER |
425 ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN);
426 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
427 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
428 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
429 capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
430 ICP_ACCEL_CAPABILITIES_ECEDMONT);
431 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
432 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
433 if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
434 capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
435 if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE)
436 capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
437
438 straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
439 if ((straps | fuses) & ADF_200XX_POWERGATE_PKE)
440 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
441 if ((straps | fuses) & ADF_200XX_POWERGATE_CY)
442 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
443
444 return capabilities;
445 }
446
447 static const char *
448 get_obj_name(struct adf_accel_dev *accel_dev,
449 enum adf_accel_unit_services service)
450 {
451 return ADF_CXXX_AE_FW_NAME_CUSTOM1;
452 }
453
454 static uint32_t
455 get_objs_num(struct adf_accel_dev *accel_dev)
456 {
457 return 1;
458 }
459
460 static uint32_t
461 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
462 enum adf_accel_unit_services services)
463 {
464 return accel_dev->hw_device->ae_mask;
465 }
466
467 void
468 adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
469 {
470 hw_data->dev_class = &qat_200xx_class;
471 hw_data->instance_id = qat_200xx_class.instances++;
472 hw_data->num_banks = ADF_200XX_ETR_MAX_BANKS;
473 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
474 hw_data->num_accel = ADF_200XX_MAX_ACCELERATORS;
475 hw_data->num_logical_accel = 1;
476 hw_data->num_engines = ADF_200XX_MAX_ACCELENGINES;
477 hw_data->tx_rx_gap = ADF_200XX_RX_RINGS_OFFSET;
478 hw_data->tx_rings_mask = ADF_200XX_TX_RINGS_MASK;
479 hw_data->alloc_irq = adf_isr_resource_alloc;
480 hw_data->free_irq = adf_isr_resource_free;
481 hw_data->enable_error_correction = adf_enable_error_correction;
482 hw_data->check_uncorrectable_error = adf_check_uncorrectable_error;
483 hw_data->print_err_registers = adf_print_err_registers;
484 hw_data->disable_error_interrupts = adf_disable_error_interrupts;
485 hw_data->get_accel_mask = get_accel_mask;
486 hw_data->get_ae_mask = get_ae_mask;
487 hw_data->get_num_accels = get_num_accels;
488 hw_data->get_num_aes = get_num_aes;
489 hw_data->get_sram_bar_id = get_sram_bar_id;
490 hw_data->get_etr_bar_id = get_etr_bar_id;
491 hw_data->get_misc_bar_id = get_misc_bar_id;
492 hw_data->get_pf2vf_offset = get_pf2vf_offset;
493 hw_data->get_vintmsk_offset = get_vintmsk_offset;
494 hw_data->get_arb_info = get_arb_info;
495 hw_data->get_admin_info = get_admin_info;
496 hw_data->get_errsou_offset = get_errsou_offset;
497 hw_data->get_clock_speed = get_clock_speed;
498 hw_data->get_sku = get_sku;
499 hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
500 hw_data->fw_name = ADF_200XX_FW;
501 hw_data->fw_mmp_name = ADF_200XX_MMP;
502 hw_data->init_admin_comms = adf_init_admin_comms;
503 hw_data->exit_admin_comms = adf_exit_admin_comms;
504 hw_data->disable_iov = adf_disable_sriov;
505 hw_data->send_admin_init = adf_send_admin_init;
506 hw_data->init_arb = adf_init_gen2_arb;
507 hw_data->exit_arb = adf_exit_arb;
508 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
509 hw_data->enable_ints = adf_enable_ints;
510 hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
511 hw_data->check_slice_hang = adf_check_slice_hang;
512 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
513 hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
514 hw_data->restore_device = adf_dev_restore;
515 hw_data->reset_device = adf_reset_flr;
516 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
517 hw_data->measure_clock = measure_clock;
518 hw_data->get_ae_clock = get_ae_clock;
519 hw_data->reset_device = adf_reset_flr;
520 hw_data->get_objs_num = get_objs_num;
521 hw_data->get_obj_name = get_obj_name;
522 hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
523 hw_data->get_accel_cap = adf_200xx_get_hw_cap;
524 hw_data->clock_frequency = ADF_200XX_AE_FREQ;
525 hw_data->extended_dc_capabilities = 0;
526 hw_data->get_storage_enabled = get_storage_enabled;
527 hw_data->query_storage_cap = 1;
528 hw_data->get_heartbeat_status = adf_get_heartbeat_status;
529 hw_data->get_ae_clock = get_ae_clock;
530 hw_data->storage_enable = 0;
531 hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
532 hw_data->config_device = adf_config_device;
533 hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
534 hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
535 hw_data->pre_reset = adf_dev_pre_reset;
536 hw_data->post_reset = adf_dev_post_reset;
537
538 adf_gen2_init_hw_csr_info(&hw_data->csr_info);
539 }
540
541 void
542 adf_clean_hw_data_200xx(struct adf_hw_device_data *hw_data)
543 {
544 hw_data->dev_class->instances--;
545 }
Cache object: a488f97a996af6ea824bbe29efa64634
|