1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <sys/types.h>
15 #include <sys/bus.h>
16 #include <sys/smp.h>
17 #include <dev/pci/pcivar.h>
18 #include <sys/malloc.h>
19 #include "adf_accel_devices.h"
20 #include "adf_common_drv.h"
21 #include "adf_cfg.h"
22 #include "adf_cfg_strings.h"
23 #include "adf_cfg_common.h"
24 #include "adf_transport_access_macros.h"
25 #include "adf_transport_internal.h"
26 #include "adf_dev_err.h"
27
28 TASKQUEUE_DEFINE_THREAD(qat_pf);
29
30 static int
31 adf_enable_msix(struct adf_accel_dev *accel_dev)
32 {
33 struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
34 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
35 int msix_num_entries = 1;
36 int count = 0;
37 int error = 0;
38 int num_vectors = 0;
39 u_int *vectors;
40
41 if (hw_data->set_msix_rttable)
42 hw_data->set_msix_rttable(accel_dev);
43
44 /* If SR-IOV is disabled, add entries for each bank */
45 if (!accel_dev->u1.pf.vf_info) {
46 msix_num_entries += hw_data->num_banks;
47 num_vectors = 0;
48 vectors = NULL;
49 } else {
50 num_vectors = hw_data->num_banks + 1;
51 vectors = malloc(num_vectors * sizeof(u_int),
52 M_QAT,
53 M_WAITOK | M_ZERO);
54 vectors[hw_data->num_banks] = 1;
55 }
56
57 count = msix_num_entries;
58 error = pci_alloc_msix(info_pci_dev->pci_dev, &count);
59 if (error == 0 && count != msix_num_entries) {
60 pci_release_msi(info_pci_dev->pci_dev);
61 error = EFBIG;
62 }
63 if (error) {
64 device_printf(GET_DEV(accel_dev),
65 "Failed to enable MSI-X IRQ(s)\n");
66 free(vectors, M_QAT);
67 return error;
68 }
69
70 if (vectors != NULL) {
71 error =
72 pci_remap_msix(info_pci_dev->pci_dev, num_vectors, vectors);
73 free(vectors, M_QAT);
74 if (error) {
75 device_printf(GET_DEV(accel_dev),
76 "Failed to remap MSI-X IRQ(s)\n");
77 pci_release_msi(info_pci_dev->pci_dev);
78 return error;
79 }
80 }
81
82 return 0;
83 }
84
85 static void
86 adf_disable_msix(struct adf_accel_pci *info_pci_dev)
87 {
88 pci_release_msi(info_pci_dev->pci_dev);
89 }
90
91 static void
92 adf_msix_isr_bundle(void *bank_ptr)
93 {
94 struct adf_etr_bank_data *bank = bank_ptr;
95 struct adf_etr_data *priv_data = bank->accel_dev->transport;
96 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
97
98 csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
99 bank->bank_number,
100 0);
101 adf_response_handler((uintptr_t)&priv_data->banks[bank->bank_number]);
102 return;
103 }
104
105 static void
106 adf_msix_isr_ae(void *dev_ptr)
107 {
108 struct adf_accel_dev *accel_dev = dev_ptr;
109 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
110 struct adf_bar *pmisc =
111 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
112 struct resource *pmisc_bar_addr = pmisc->virt_addr;
113 u32 errsou3;
114 u32 errsou5;
115 bool reset_required = false;
116
117 if (hw_data->ras_interrupts &&
118 hw_data->ras_interrupts(accel_dev, &reset_required))
119 if (reset_required) {
120 adf_notify_fatal_error(accel_dev);
121 goto exit;
122 }
123
124 if (hw_data->check_slice_hang && hw_data->check_slice_hang(accel_dev)) {
125 }
126
127 exit:
128 errsou3 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3);
129 errsou5 = ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5);
130 if (errsou3 | errsou5)
131 adf_print_err_registers(accel_dev);
132 else
133 device_printf(GET_DEV(accel_dev), "spurious AE interrupt\n");
134
135 return;
136 }
137
138 static int
139 adf_get_irq_affinity(struct adf_accel_dev *accel_dev, int bank)
140 {
141 int core = CPU_FIRST();
142 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
143 char bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
144
145 snprintf(bankName,
146 ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1,
147 ADF_ETRMGR_CORE_AFFINITY_FORMAT,
148 bank);
149 bankName[ADF_CFG_MAX_KEY_LEN_IN_BYTES - 1] = '\0';
150
151 if (adf_cfg_get_param_value(accel_dev, "Accelerator0", bankName, val)) {
152 device_printf(GET_DEV(accel_dev),
153 "No CoreAffinity Set - using default core: %d\n",
154 core);
155 } else {
156 if (compat_strtouint(val, 10, &core)) {
157 device_printf(GET_DEV(accel_dev),
158 "Can't get cpu core ID\n");
159 }
160 }
161 return (core);
162 }
163
164 static int
165 adf_request_irqs(struct adf_accel_dev *accel_dev)
166 {
167 struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
168 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
169 struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
170 int ret = 0, rid = 0, i = 0;
171 struct adf_etr_data *etr_data = accel_dev->transport;
172 int computed_core = 0;
173
174 /* Request msix irq for all banks unless SR-IOV enabled */
175 if (!accel_dev->u1.pf.vf_info) {
176 for (i = 0; i < hw_data->num_banks; i++) {
177 struct adf_etr_bank_data *bank = &etr_data->banks[i];
178
179 rid = i + 1;
180 msixe[i].irq =
181 bus_alloc_resource_any(info_pci_dev->pci_dev,
182 SYS_RES_IRQ,
183 &rid,
184 RF_ACTIVE);
185 if (msixe[i].irq == NULL) {
186 device_printf(
187 GET_DEV(accel_dev),
188 "failed to allocate IRQ for bundle %d\n",
189 i);
190 return ENXIO;
191 }
192
193 ret = bus_setup_intr(info_pci_dev->pci_dev,
194 msixe[i].irq,
195 INTR_TYPE_MISC | INTR_MPSAFE,
196 NULL,
197 adf_msix_isr_bundle,
198 bank,
199 &msixe[i].cookie);
200 if (ret) {
201 device_printf(
202 GET_DEV(accel_dev),
203 "failed to enable IRQ for bundle %d\n",
204 i);
205 bus_release_resource(info_pci_dev->pci_dev,
206 SYS_RES_IRQ,
207 rid,
208 msixe[i].irq);
209 msixe[i].irq = NULL;
210 return ret;
211 }
212
213 computed_core = adf_get_irq_affinity(accel_dev, i);
214 bus_describe_intr(info_pci_dev->pci_dev,
215 msixe[i].irq,
216 msixe[i].cookie,
217 "b%d",
218 i);
219 bus_bind_intr(info_pci_dev->pci_dev,
220 msixe[i].irq,
221 computed_core);
222 }
223 }
224
225 /* Request msix irq for AE */
226 rid = hw_data->num_banks + 1;
227 msixe[i].irq = bus_alloc_resource_any(info_pci_dev->pci_dev,
228 SYS_RES_IRQ,
229 &rid,
230 RF_ACTIVE);
231 if (msixe[i].irq == NULL) {
232 device_printf(GET_DEV(accel_dev),
233 "failed to allocate IRQ for ae-cluster\n");
234 return ENXIO;
235 }
236
237 ret = bus_setup_intr(info_pci_dev->pci_dev,
238 msixe[i].irq,
239 INTR_TYPE_MISC | INTR_MPSAFE,
240 NULL,
241 adf_msix_isr_ae,
242 accel_dev,
243 &msixe[i].cookie);
244 if (ret) {
245 device_printf(GET_DEV(accel_dev),
246 "failed to enable IRQ for ae-cluster\n");
247 bus_release_resource(info_pci_dev->pci_dev,
248 SYS_RES_IRQ,
249 rid,
250 msixe[i].irq);
251 msixe[i].irq = NULL;
252 return ret;
253 }
254
255 bus_describe_intr(info_pci_dev->pci_dev,
256 msixe[i].irq,
257 msixe[i].cookie,
258 "ae");
259 return ret;
260 }
261
262 static void
263 adf_free_irqs(struct adf_accel_dev *accel_dev)
264 {
265 struct adf_accel_pci *info_pci_dev = &accel_dev->accel_pci_dev;
266 struct msix_entry *msixe = info_pci_dev->msix_entries.entries;
267 int i = 0;
268
269 if (info_pci_dev->msix_entries.num_entries > 0) {
270 for (i = 0; i < info_pci_dev->msix_entries.num_entries; i++) {
271 if (msixe[i].irq != NULL && msixe[i].cookie != NULL) {
272 bus_teardown_intr(info_pci_dev->pci_dev,
273 msixe[i].irq,
274 msixe[i].cookie);
275 bus_free_resource(info_pci_dev->pci_dev,
276 SYS_RES_IRQ,
277 msixe[i].irq);
278 }
279 }
280 }
281 }
282
283 static int
284 adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
285 {
286 struct msix_entry *entries;
287 u32 msix_num_entries = 1;
288
289 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
290 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
291 if (!accel_dev->u1.pf.vf_info)
292 msix_num_entries += hw_data->num_banks;
293
294 entries = malloc(msix_num_entries * sizeof(struct msix_entry),
295 M_QAT,
296 M_WAITOK | M_ZERO);
297
298 accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
299 accel_dev->accel_pci_dev.msix_entries.entries = entries;
300 return 0;
301 }
302
303 static void
304 adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
305 {
306
307 free(accel_dev->accel_pci_dev.msix_entries.entries, M_QAT);
308 accel_dev->accel_pci_dev.msix_entries.entries = NULL;
309 }
310
311 /**
312 * adf_vf_isr_resource_free() - Free IRQ for acceleration device
313 * @accel_dev: Pointer to acceleration device.
314 *
315 * Function frees interrupts for acceleration device.
316 */
317 void
318 adf_isr_resource_free(struct adf_accel_dev *accel_dev)
319 {
320 adf_free_irqs(accel_dev);
321 adf_disable_msix(&accel_dev->accel_pci_dev);
322 adf_isr_free_msix_entry_table(accel_dev);
323 }
324
325 /**
326 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
327 * @accel_dev: Pointer to acceleration device.
328 *
329 * Function allocates interrupts for acceleration device.
330 *
331 * Return: 0 on success, error code otherwise.
332 */
333 int
334 adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
335 {
336 int ret;
337
338 ret = adf_isr_alloc_msix_entry_table(accel_dev);
339 if (ret)
340 return ret;
341 if (adf_enable_msix(accel_dev))
342 goto err_out;
343
344 if (adf_request_irqs(accel_dev))
345 goto err_out;
346
347 return 0;
348 err_out:
349 adf_isr_resource_free(accel_dev);
350 return EFAULT;
351 }
Cache object: 9ab036ebf49966caf0cde91e5e157598
|