1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #ifndef ADF_ACCEL_DEVICES_H_
5 #define ADF_ACCEL_DEVICES_H_
6
7 #include "qat_freebsd.h"
8 #include "adf_cfg_common.h"
9
10 #define ADF_CFG_NUM_SERVICES 4
11
12 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
13 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
14 #define ADF_C62X_DEVICE_NAME "c6xx"
15 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
16 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
17 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
18 #define ADF_200XX_DEVICE_NAME "200xx"
19 #define ADF_200XXVF_DEVICE_NAME "200xxvf"
20 #define ADF_C4XXX_DEVICE_NAME "c4xxx"
21 #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf"
22 #define ADF_4XXX_DEVICE_NAME "4xxx"
23 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
24 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
25 #define ADF_C62X_PCI_DEVICE_ID 0x37c8
26 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
27 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
28 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
29 #define ADF_200XX_PCI_DEVICE_ID 0x18ee
30 #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef
31 #define ADF_D15XX_PCI_DEVICE_ID 0x6f54
32 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55
33 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0
34 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1
35 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
36 #define ADF_401XX_PCI_DEVICE_ID 0x4942
37
38 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
39 static inline bool
40 IS_QAT_GEN4(const unsigned int id)
41 {
42 return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID);
43 }
44
45 #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID))
46 #define ADF_VF2PF_SET_SIZE 32
47 #define ADF_MAX_VF2PF_SET 4
48 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE)
49 #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE)
50 #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \
51 ({ \
52 u32 vf_nr_ = (vf_nr); \
53 BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \
54 vf_nr_)); \
55 })
56
57 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
58 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
59 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
60 #define ADF_PCI_MAX_BARS 3
61 #define ADF_DEVICE_NAME_LENGTH 32
62 #define ADF_ETR_MAX_RINGS_PER_BANK 16
63 #define ADF_MAX_MSIX_VECTOR_NAME 32
64 #define ADF_DEVICE_NAME_PREFIX "qat_"
65 #define ADF_STOP_RETRY 50
66 #define ADF_NUM_THREADS_PER_AE (8)
67 #define ADF_AE_ADMIN_THREAD (7)
68 #define ADF_NUM_PKE_STRAND (2)
69 #define ADF_AE_STRAND0_THREAD (8)
70 #define ADF_AE_STRAND1_THREAD (9)
71 #define ADF_CFG_NUM_SERVICES 4
72 #define ADF_SRV_TYPE_BIT_LEN 3
73 #define ADF_SRV_TYPE_MASK 0x7
74 #define ADF_RINGS_PER_SRV_TYPE 2
75 #define ADF_THRD_ABILITY_BIT_LEN 4
76 #define ADF_THRD_ABILITY_MASK 0xf
77 #define ADF_VF_OFFSET 0x8
78 #define ADF_MAX_FUNC_PER_DEV 0x7
79 #define ADF_PCI_DEV_OFFSET 0x3
80
81 #define ADF_SRV_TYPE_BIT_LEN 3
82 #define ADF_SRV_TYPE_MASK 0x7
83
84 #define GET_SRV_TYPE(ena_srv_mask, srv) \
85 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
86
87 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
88
89 #define ADF_DEFAULT_RING_TO_SRV_MAP \
90 (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
91 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
92 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
93
94 enum adf_accel_capabilities {
95 ADF_ACCEL_CAPABILITIES_NULL = 0,
96 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
97 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
98 ADF_ACCEL_CAPABILITIES_CIPHER = 4,
99 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
100 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
101 ADF_ACCEL_CAPABILITIES_DEPRECATED = 64,
102 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
103 };
104
105 struct adf_bar {
106 rman_res_t base_addr;
107 struct resource *virt_addr;
108 rman_res_t size;
109 } __packed;
110
111 struct adf_accel_msix {
112 struct msix_entry *entries;
113 u32 num_entries;
114 } __packed;
115
116 struct adf_accel_pci {
117 device_t pci_dev;
118 struct adf_accel_msix msix_entries;
119 struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
120 uint8_t revid;
121 uint8_t sku;
122 int node;
123 } __packed;
124
125 enum dev_state { DEV_DOWN = 0, DEV_UP };
126
127 enum dev_sku_info {
128 DEV_SKU_1 = 0,
129 DEV_SKU_2,
130 DEV_SKU_3,
131 DEV_SKU_4,
132 DEV_SKU_VF,
133 DEV_SKU_1_CY,
134 DEV_SKU_2_CY,
135 DEV_SKU_3_CY,
136 DEV_SKU_UNKNOWN
137 };
138
139 static inline const char *
140 get_sku_info(enum dev_sku_info info)
141 {
142 switch (info) {
143 case DEV_SKU_1:
144 return "SKU1";
145 case DEV_SKU_1_CY:
146 return "SKU1CY";
147 case DEV_SKU_2:
148 return "SKU2";
149 case DEV_SKU_2_CY:
150 return "SKU2CY";
151 case DEV_SKU_3:
152 return "SKU3";
153 case DEV_SKU_3_CY:
154 return "SKU3CY";
155 case DEV_SKU_4:
156 return "SKU4";
157 case DEV_SKU_VF:
158 return "SKUVF";
159 case DEV_SKU_UNKNOWN:
160 default:
161 break;
162 }
163 return "Unknown SKU";
164 }
165
166 enum adf_accel_unit_services {
167 ADF_ACCEL_SERVICE_NULL = 0,
168 ADF_ACCEL_INLINE_CRYPTO = 1,
169 ADF_ACCEL_CRYPTO = 2,
170 ADF_ACCEL_COMPRESSION = 4,
171 ADF_ACCEL_ASYM = 8,
172 ADF_ACCEL_ADMIN = 16
173 };
174
175 struct adf_ae_info {
176 u32 num_asym_thd;
177 u32 num_sym_thd;
178 u32 num_dc_thd;
179 } __packed;
180
181 struct adf_accel_unit {
182 u8 au_mask;
183 u32 accel_mask;
184 u64 ae_mask;
185 u64 comp_ae_mask;
186 u32 num_ae;
187 enum adf_accel_unit_services services;
188 } __packed;
189
190 struct adf_accel_unit_info {
191 u32 inline_ingress_msk;
192 u32 inline_egress_msk;
193 u32 sym_ae_msk;
194 u32 asym_ae_msk;
195 u32 dc_ae_msk;
196 u8 num_cy_au;
197 u8 num_dc_au;
198 u8 num_asym_au;
199 u8 num_inline_au;
200 struct adf_accel_unit *au;
201 const struct adf_ae_info *ae_info;
202 } __packed;
203
204 struct adf_hw_aram_info {
205 /* Inline Egress mask. "1" = AE is working with egress traffic */
206 u32 inline_direction_egress_mask;
207 /* Inline congestion managmenet profiles set in config file */
208 u32 inline_congest_mngt_profile;
209 /* Initialise CY AE mask, "1" = AE is used for CY operations */
210 u32 cy_ae_mask;
211 /* Initialise DC AE mask, "1" = AE is used for DC operations */
212 u32 dc_ae_mask;
213 /* Number of long words used to define the ARAM regions */
214 u32 num_aram_lw_entries;
215 /* ARAM region definitions */
216 u32 mmp_region_size;
217 u32 mmp_region_offset;
218 u32 skm_region_size;
219 u32 skm_region_offset;
220 /*
221 * Defines size and offset of compression intermediate buffers stored
222 * in ARAM (device's on-chip memory).
223 */
224 u32 inter_buff_aram_region_size;
225 u32 inter_buff_aram_region_offset;
226 u32 sadb_region_size;
227 u32 sadb_region_offset;
228 } __packed;
229
230 struct adf_hw_device_class {
231 const char *name;
232 const enum adf_device_type type;
233 uint32_t instances;
234 } __packed;
235
236 struct arb_info {
237 u32 arbiter_offset;
238 u32 wrk_thd_2_srv_arb_map;
239 u32 wrk_cfg_offset;
240 } __packed;
241
242 struct admin_info {
243 u32 admin_msg_ur;
244 u32 admin_msg_lr;
245 u32 mailbox_offset;
246 } __packed;
247
248 struct adf_hw_csr_ops {
249 u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size);
250 u32 (*read_csr_ring_head)(struct resource *csr_base_addr,
251 u32 bank,
252 u32 ring);
253 void (*write_csr_ring_head)(struct resource *csr_base_addr,
254 u32 bank,
255 u32 ring,
256 u32 value);
257 u32 (*read_csr_ring_tail)(struct resource *csr_base_addr,
258 u32 bank,
259 u32 ring);
260 void (*write_csr_ring_tail)(struct resource *csr_base_addr,
261 u32 bank,
262 u32 ring,
263 u32 value);
264 u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank);
265 void (*write_csr_ring_config)(struct resource *csr_base_addr,
266 u32 bank,
267 u32 ring,
268 u32 value);
269 void (*write_csr_ring_base)(struct resource *csr_base_addr,
270 u32 bank,
271 u32 ring,
272 bus_addr_t addr);
273 void (*write_csr_int_flag)(struct resource *csr_base_addr,
274 u32 bank,
275 u32 value);
276 void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank);
277 void (*write_csr_int_col_en)(struct resource *csr_base_addr,
278 u32 bank,
279 u32 value);
280 void (*write_csr_int_col_ctl)(struct resource *csr_base_addr,
281 u32 bank,
282 u32 value);
283 void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr,
284 u32 bank,
285 u32 value);
286 u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
287 u32 bank);
288 void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
289 u32 bank,
290 u32 value);
291 };
292
293 struct adf_hw_csr_info {
294 struct adf_hw_csr_ops csr_ops;
295 u32 csr_addr_offset;
296 u32 ring_bundle_size;
297 u32 bank_int_flag_clear_mask;
298 u32 num_rings_per_int_srcsel;
299 u32 arb_enable_mask;
300 };
301
302 struct adf_cfg_device_data;
303 struct adf_accel_dev;
304 struct adf_etr_data;
305 struct adf_etr_ring_data;
306
307 struct adf_hw_device_data {
308 struct adf_hw_device_class *dev_class;
309 uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev);
310 uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev);
311 uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
312 uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
313 uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
314 uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
315 uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
316 void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev);
317 bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev);
318 uint32_t (*get_pf2vf_offset)(uint32_t i);
319 uint32_t (*get_vintmsk_offset)(uint32_t i);
320 u32 (*get_vintsou_offset)(void);
321 void (*get_arb_info)(struct arb_info *arb_csrs_info);
322 void (*get_admin_info)(struct admin_info *admin_csrs_info);
323 void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5);
324 uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self);
325 int (*init_accel_units)(struct adf_accel_dev *accel_dev);
326 void (*exit_accel_units)(struct adf_accel_dev *accel_dev);
327 uint32_t (*get_clock_speed)(struct adf_hw_device_data *self);
328 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
329 bool (*check_prod_sku)(struct adf_accel_dev *accel_dev);
330 int (*alloc_irq)(struct adf_accel_dev *accel_dev);
331 void (*free_irq)(struct adf_accel_dev *accel_dev);
332 void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
333 int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev);
334 void (*print_err_registers)(struct adf_accel_dev *accel_dev);
335 void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev);
336 int (*init_ras)(struct adf_accel_dev *accel_dev);
337 void (*exit_ras)(struct adf_accel_dev *accel_dev);
338 void (*disable_arb)(struct adf_accel_dev *accel_dev);
339 void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error);
340 bool (*ras_interrupts)(struct adf_accel_dev *accel_dev,
341 bool *reset_required);
342 int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
343 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
344 int (*send_admin_init)(struct adf_accel_dev *accel_dev);
345 void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev);
346 int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev,
347 u16 *ring_to_svc_map);
348 uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev);
349 int (*init_arb)(struct adf_accel_dev *accel_dev);
350 void (*exit_arb)(struct adf_accel_dev *accel_dev);
351 void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
352 const uint32_t **cfg);
353 int (*init_device)(struct adf_accel_dev *accel_dev);
354 int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev);
355 uint32_t (*get_ae_clock)(struct adf_hw_device_data *self);
356 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
357 void (*disable_iov)(struct adf_accel_dev *accel_dev);
358 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
359 bool enable);
360 void (*enable_ints)(struct adf_accel_dev *accel_dev);
361 bool (*check_slice_hang)(struct adf_accel_dev *accel_dev);
362 int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
363 int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
364 int (*disable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
365 void (*reset_device)(struct adf_accel_dev *accel_dev);
366 void (*reset_hw_units)(struct adf_accel_dev *accel_dev);
367 int (*measure_clock)(struct adf_accel_dev *accel_dev);
368 void (*restore_device)(struct adf_accel_dev *accel_dev);
369 uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev,
370 enum adf_accel_unit_services services);
371 enum adf_accel_unit_services (
372 *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num);
373 int (*add_pke_stats)(struct adf_accel_dev *accel_dev);
374 void (*remove_pke_stats)(struct adf_accel_dev *accel_dev);
375 int (*add_misc_error)(struct adf_accel_dev *accel_dev);
376 int (*count_ras_event)(struct adf_accel_dev *accel_dev,
377 u32 *ras_event,
378 char *aeidstr);
379 void (*remove_misc_error)(struct adf_accel_dev *accel_dev);
380 int (*configure_accel_units)(struct adf_accel_dev *accel_dev);
381 uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev);
382 const char *(*get_obj_name)(struct adf_accel_dev *accel_dev,
383 enum adf_accel_unit_services services);
384 void (*pre_reset)(struct adf_accel_dev *accel_dev);
385 void (*post_reset)(struct adf_accel_dev *accel_dev);
386 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
387 void (*get_ring_svc_map_data)(int ring_pair_index,
388 u16 ring_to_svc_map,
389 u8 *serv_type,
390 int *ring_index,
391 int *num_rings_per_srv,
392 int bundle_num);
393 struct adf_hw_csr_info csr_info;
394 const char *fw_name;
395 const char *fw_mmp_name;
396 bool reset_ack;
397 uint32_t fuses;
398 uint32_t accel_capabilities_mask;
399 uint32_t instance_id;
400 uint16_t accel_mask;
401 u32 aerucm_mask;
402 u32 ae_mask;
403 u32 admin_ae_mask;
404 u32 service_mask;
405 u32 service_to_load_mask;
406 u32 heartbeat_ctr_num;
407 uint16_t tx_rings_mask;
408 uint8_t tx_rx_gap;
409 uint8_t num_banks;
410 u8 num_rings_per_bank;
411 uint8_t num_accel;
412 uint8_t num_logical_accel;
413 uint8_t num_engines;
414 uint8_t min_iov_compat_ver;
415 int (*get_storage_enabled)(struct adf_accel_dev *accel_dev,
416 uint32_t *storage_enabled);
417 u8 query_storage_cap;
418 u32 clock_frequency;
419 u8 storage_enable;
420 u32 extended_dc_capabilities;
421 int (*config_device)(struct adf_accel_dev *accel_dev);
422 u16 asym_rings_mask;
423 int (*get_fw_image_type)(struct adf_accel_dev *accel_dev,
424 enum adf_cfg_fw_image_type *fw_image_type);
425 u16 ring_to_svc_map;
426 } __packed;
427
428 /* helper enum for performing CSR operations */
429 enum operation {
430 AND,
431 OR,
432 };
433
434 /* 32-bit CSR write macro */
435 #define ADF_CSR_WR(csr_base, csr_offset, val) \
436 bus_write_4(csr_base, csr_offset, val)
437
438 /* 64-bit CSR write macro */
439 #ifdef __x86_64__
440 #define ADF_CSR_WR64(csr_base, csr_offset, val) \
441 bus_write_8(csr_base, csr_offset, val)
442 #else
443 static __inline void
444 adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value)
445 {
446 bus_write_4(csr_base, offset, (uint32_t)value);
447 bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32));
448 }
449 #define ADF_CSR_WR64(csr_base, csr_offset, val) \
450 adf_csr_wr64(csr_base, csr_offset, val)
451 #endif
452
453 /* 32-bit CSR read macro */
454 #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset)
455
456 /* 64-bit CSR read macro */
457 #ifdef __x86_64__
458 #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset)
459 #else
460 static __inline uint64_t
461 adf_csr_rd64(struct resource *csr_base, bus_size_t offset)
462 {
463 return (((uint64_t)bus_read_4(csr_base, offset)) |
464 (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32));
465 }
466 #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset)
467 #endif
468
469 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev)
470 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
471 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
472 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
473 #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku)
474 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
475 (GET_HW_DATA(accel_dev)->num_rings_per_bank)
476 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
477 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
478 #define GET_SRV_TYPE(ena_srv_mask, srv) \
479 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
480 #define SET_ASYM_MASK(asym_mask, srv) \
481 ({ \
482 typeof(srv) srv_ = (srv); \
483 (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \
484 (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \
485 })
486
487 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
488 (GET_HW_DATA(accel_dev)->num_rings_per_bank)
489 #define GET_MAX_PROCESSES(accel_dev) \
490 ({ \
491 typeof(accel_dev) dev = (accel_dev); \
492 (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \
493 })
494 #define GET_DU_TABLE(accel_dev) (accel_dev->du_table)
495
496 static inline void
497 adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask)
498 {
499 unsigned int val = ADF_CSR_RD(csr, offs);
500
501 val &= mask;
502 ADF_CSR_WR(csr, offs, val);
503 }
504
505 static inline void
506 adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask)
507 {
508 unsigned int val = ADF_CSR_RD(csr, offs);
509
510 val |= mask;
511 ADF_CSR_WR(csr, offs, val);
512 }
513
514 static inline void
515 adf_csr_fetch_and_update(enum operation op,
516 struct resource *csr,
517 size_t offs,
518 unsigned long mask)
519 {
520 switch (op) {
521 case AND:
522 adf_csr_fetch_and_and(csr, offs, mask);
523 break;
524 case OR:
525 adf_csr_fetch_and_or(csr, offs, mask);
526 break;
527 }
528 }
529
530 struct pfvf_stats {
531 struct dentry *stats_file;
532 /* Messages put in CSR */
533 unsigned int tx;
534 /* Messages read from CSR */
535 unsigned int rx;
536 /* Interrupt fired but int bit was clear */
537 unsigned int spurious;
538 /* Block messages sent */
539 unsigned int blk_tx;
540 /* Block messages received */
541 unsigned int blk_rx;
542 /* Blocks received with CRC errors */
543 unsigned int crc_err;
544 /* CSR in use by other side */
545 unsigned int busy;
546 /* Receiver did not acknowledge */
547 unsigned int no_ack;
548 /* Collision detected */
549 unsigned int collision;
550 /* Couldn't send a response */
551 unsigned int tx_timeout;
552 /* Didn't receive a response */
553 unsigned int rx_timeout;
554 /* Responses received */
555 unsigned int rx_rsp;
556 /* Messages re-transmitted */
557 unsigned int retry;
558 /* Event put timeout */
559 unsigned int event_timeout;
560 };
561
562 #define NUM_PFVF_COUNTERS 14
563
564 void adf_get_admin_info(struct admin_info *admin_csrs_info);
565 struct adf_admin_comms {
566 bus_addr_t phy_addr;
567 bus_addr_t const_tbl_addr;
568 bus_addr_t aram_map_phys_addr;
569 bus_addr_t phy_hb_addr;
570 bus_dmamap_t aram_map;
571 bus_dmamap_t const_tbl_map;
572 bus_dmamap_t hb_map;
573 char *virt_addr;
574 char *virt_hb_addr;
575 struct resource *mailbox_addr;
576 struct sx lock;
577 struct bus_dmamem dma_mem;
578 struct bus_dmamem dma_hb;
579 };
580
581 struct icp_qat_fw_loader_handle;
582 struct adf_fw_loader_data {
583 struct icp_qat_fw_loader_handle *fw_loader;
584 const struct firmware *uof_fw;
585 const struct firmware *mmp_fw;
586 };
587
588 struct adf_accel_vf_info {
589 struct adf_accel_dev *accel_dev;
590 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
591 u32 vf_nr;
592 bool init;
593 u8 compat_ver;
594 struct pfvf_stats pfvf_counters;
595 };
596
597 struct adf_fw_versions {
598 u8 fw_version_major;
599 u8 fw_version_minor;
600 u8 fw_version_patch;
601 u8 mmp_version_major;
602 u8 mmp_version_minor;
603 u8 mmp_version_patch;
604 };
605
606 #define ADF_COMPAT_CHECKER_MAX 8
607 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev,
608 u8 vf_compat_ver);
609 struct adf_accel_compat_manager {
610 u8 num_chker;
611 adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX];
612 };
613
614 struct adf_heartbeat;
615 struct adf_accel_dev {
616 struct adf_hw_aram_info *aram_info;
617 struct adf_accel_unit_info *au_info;
618 struct adf_etr_data *transport;
619 struct adf_hw_device_data *hw_device;
620 struct adf_cfg_device_data *cfg;
621 struct adf_fw_loader_data *fw_loader;
622 struct adf_admin_comms *admin;
623 struct adf_heartbeat *heartbeat;
624 struct adf_fw_versions fw_versions;
625 unsigned int autoreset_on_error;
626 struct adf_fw_counters_data *fw_counters_data;
627 struct sysctl_oid *debugfs_ae_config;
628 struct list_head crypto_list;
629 atomic_t *ras_counters;
630 unsigned long status;
631 atomic_t ref_count;
632 bus_dma_tag_t dma_tag;
633 struct sysctl_ctx_list sysctl_ctx;
634 struct sysctl_oid *ras_correctable;
635 struct sysctl_oid *ras_uncorrectable;
636 struct sysctl_oid *ras_fatal;
637 struct sysctl_oid *ras_reset;
638 struct sysctl_oid *pke_replay_dbgfile;
639 struct sysctl_oid *misc_error_dbgfile;
640 struct list_head list;
641 struct adf_accel_pci accel_pci_dev;
642 struct adf_accel_compat_manager *cm;
643 u8 compat_ver;
644 union {
645 struct {
646 /* vf_info is non-zero when SR-IOV is init'ed */
647 struct adf_accel_vf_info *vf_info;
648 int num_vfs;
649 } pf;
650 struct {
651 struct resource *irq;
652 void *cookie;
653 char *irq_name;
654 struct task pf2vf_bh_tasklet;
655 struct mutex vf2pf_lock; /* protect CSR access */
656 int iov_msg_completion;
657 uint8_t compatible;
658 uint8_t pf_version;
659 u8 pf2vf_block_byte;
660 u8 pf2vf_block_resp_type;
661 struct pfvf_stats pfvf_counters;
662 } vf;
663 } u1;
664 bool is_vf;
665 u32 accel_id;
666 void *lac_dev;
667 };
668 #endif
Cache object: 00f346449840a07d66f12eebfce40830
|