1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include <sys/kernel.h>
6 #include <sys/systm.h>
7 #include <sys/cdefs.h>
8 #include <sys/types.h>
9 #include <dev/pci/pcivar.h>
10 #include <sys/param.h>
11 #include <linux/workqueue.h>
12 #include "adf_accel_devices.h"
13 #include "adf_common_drv.h"
14 #include "adf_cfg.h"
15 #include "adf_cfg_strings.h"
16 #include "adf_cfg_common.h"
17 #include "adf_transport_access_macros.h"
18 #include "adf_transport_internal.h"
19 #include "adf_pf2vf_msg.h"
20
21 #define ADF_VINTSOU_BUN BIT(0)
22 #define ADF_VINTSOU_PF2VF BIT(1)
23
24 static TASKQUEUE_DEFINE_THREAD(qat_vf);
25
26 static struct workqueue_struct *adf_vf_stop_wq;
27 static DEFINE_MUTEX(vf_stop_wq_lock);
28
29 struct adf_vf_stop_data {
30 struct adf_accel_dev *accel_dev;
31 struct work_struct vf_stop_work;
32 };
33
34 static int
35 adf_enable_msi(struct adf_accel_dev *accel_dev)
36 {
37 int stat;
38 int count = 1;
39 stat = pci_alloc_msi(accel_to_pci_dev(accel_dev), &count);
40 if (stat) {
41 device_printf(GET_DEV(accel_dev),
42 "Failed to enable MSI interrupts\n");
43 return stat;
44 }
45
46 return stat;
47 }
48
49 static void
50 adf_disable_msi(struct adf_accel_dev *accel_dev)
51 {
52 device_t pdev = accel_to_pci_dev(accel_dev);
53 pci_release_msi(pdev);
54 }
55
56 static void
57 adf_dev_stop_async(struct work_struct *work)
58 {
59 struct adf_vf_stop_data *stop_data =
60 container_of(work, struct adf_vf_stop_data, vf_stop_work);
61 struct adf_accel_dev *accel_dev = stop_data->accel_dev;
62
63 adf_dev_restarting_notify(accel_dev);
64 adf_dev_stop(accel_dev);
65 adf_dev_shutdown(accel_dev);
66
67 /* Re-enable PF2VF interrupts */
68 adf_enable_pf2vf_interrupts(accel_dev);
69 kfree(stop_data);
70 }
71
72 static void
73 adf_pf2vf_bh_handler(void *data, int pending)
74 {
75 struct adf_accel_dev *accel_dev = data;
76 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
77 struct adf_bar *pmisc =
78 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
79 struct resource *pmisc_bar_addr = pmisc->virt_addr;
80 u32 msg;
81 bool is_notification = false;
82
83 /* Read the message from PF */
84 msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
85 if (!(msg & ADF_PF2VF_INT)) {
86 device_printf(GET_DEV(accel_dev),
87 "Spurious PF2VF interrupt. msg %X. Ignored\n",
88 msg);
89 accel_dev->u1.vf.pfvf_counters.spurious++;
90 goto out;
91 }
92 accel_dev->u1.vf.pfvf_counters.rx++;
93
94 if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) {
95 device_printf(GET_DEV(accel_dev),
96 "Ignore non-system PF2VF message(0x%x)\n",
97 msg);
98 /*
99 * To ack, clear the VF2PFINT bit.
100 * Because this must be a legacy message, the far side
101 * must clear the in-use pattern.
102 */
103 msg &= ~ADF_PF2VF_INT;
104 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
105 goto out;
106 }
107
108 switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
109 case ADF_PF2VF_MSGTYPE_RESTARTING: {
110 struct adf_vf_stop_data *stop_data;
111
112 is_notification = true;
113
114 device_printf(GET_DEV(accel_dev),
115 "Restarting msg received from PF 0x%x\n",
116 msg);
117
118 clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
119 stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
120 if (!stop_data) {
121 device_printf(GET_DEV(accel_dev),
122 "Couldn't schedule stop for vf_%d\n",
123 accel_dev->accel_id);
124 goto out;
125 }
126 stop_data->accel_dev = accel_dev;
127 INIT_WORK(&stop_data->vf_stop_work, adf_dev_stop_async);
128 queue_work(adf_vf_stop_wq, &stop_data->vf_stop_work);
129 break;
130 }
131 case ADF_PF2VF_MSGTYPE_VERSION_RESP:
132 device_printf(GET_DEV(accel_dev),
133 "Version resp received from PF 0x%x\n",
134 msg);
135 is_notification = false;
136 accel_dev->u1.vf.pf_version =
137 (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
138 ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
139 accel_dev->u1.vf.compatible =
140 (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
141 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
142 accel_dev->u1.vf.iov_msg_completion = 1;
143 wakeup(&accel_dev->u1.vf.iov_msg_completion);
144 break;
145 case ADF_PF2VF_MSGTYPE_BLOCK_RESP:
146 is_notification = false;
147 accel_dev->u1.vf.pf2vf_block_byte =
148 (msg & ADF_PF2VF_BLOCK_RESP_DATA_MASK) >>
149 ADF_PF2VF_BLOCK_RESP_DATA_SHIFT;
150 accel_dev->u1.vf.pf2vf_block_resp_type =
151 (msg & ADF_PF2VF_BLOCK_RESP_TYPE_MASK) >>
152 ADF_PF2VF_BLOCK_RESP_TYPE_SHIFT;
153 accel_dev->u1.vf.iov_msg_completion = 1;
154 wakeup(&accel_dev->u1.vf.iov_msg_completion);
155 break;
156 case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
157 device_printf(GET_DEV(accel_dev),
158 "Fatal error received from PF 0x%x\n",
159 msg);
160 is_notification = true;
161 if (adf_notify_fatal_error(accel_dev))
162 device_printf(GET_DEV(accel_dev),
163 "Couldn't notify fatal error\n");
164 break;
165 default:
166 device_printf(GET_DEV(accel_dev),
167 "Unknown PF2VF message(0x%x)\n",
168 msg);
169 }
170
171 /* To ack, clear the PF2VFINT bit */
172 msg &= ~ADF_PF2VF_INT;
173 /*
174 * Clear the in-use pattern if the sender won't do it.
175 * Because the compatibility version must be the first message
176 * exchanged between the VF and PF, the pf.version must be
177 * set at this time.
178 * The in-use pattern is not cleared for notifications so that
179 * it can be used for collision detection.
180 */
181 if (accel_dev->u1.vf.pf_version >= ADF_PFVF_COMPATIBILITY_FAST_ACK &&
182 !is_notification)
183 msg &= ~ADF_PF2VF_IN_USE_BY_PF_MASK;
184 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
185
186 out:
187 /* Re-enable PF2VF interrupts */
188 adf_enable_pf2vf_interrupts(accel_dev);
189 return;
190 }
191
192 static int
193 adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
194 {
195 TASK_INIT(&accel_dev->u1.vf.pf2vf_bh_tasklet,
196 0,
197 adf_pf2vf_bh_handler,
198 accel_dev);
199 mutex_init(&accel_dev->u1.vf.vf2pf_lock);
200
201 return 0;
202 }
203
204 static void
205 adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
206 {
207 taskqueue_cancel(taskqueue_qat_vf,
208 &accel_dev->u1.vf.pf2vf_bh_tasklet,
209 NULL);
210 taskqueue_drain(taskqueue_qat_vf, &accel_dev->u1.vf.pf2vf_bh_tasklet);
211 mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
212 }
213
214 static void
215 adf_isr(void *privdata)
216 {
217 struct adf_accel_dev *accel_dev = privdata;
218 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
219 struct adf_bar *pmisc =
220 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
221 struct resource *pmisc_bar_addr = pmisc->virt_addr;
222 u32 v_int, v_mask;
223 int handled = 0;
224
225 /* Read VF INT source CSR to determine the source of VF interrupt */
226 v_int = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_vintsou_offset());
227 v_mask = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_vintmsk_offset(0));
228
229 /* Check for PF2VF interrupt */
230 if ((v_int & ~v_mask) & ADF_VINTSOU_PF2VF) {
231 /* Disable PF to VF interrupt */
232 adf_disable_pf2vf_interrupts(accel_dev);
233
234 /* Schedule tasklet to handle interrupt BH */
235 taskqueue_enqueue(taskqueue_qat_vf,
236 &accel_dev->u1.vf.pf2vf_bh_tasklet);
237 handled = 1;
238 }
239
240 if ((v_int & ~v_mask) & ADF_VINTSOU_BUN) {
241 struct adf_etr_data *etr_data = accel_dev->transport;
242 struct adf_etr_bank_data *bank = &etr_data->banks[0];
243
244 /* Disable Flag and Coalesce Ring Interrupts */
245 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr,
246 bank->bank_number,
247 0);
248 adf_response_handler((uintptr_t)&etr_data->banks[0]);
249 handled = 1;
250 }
251
252 if (handled)
253 return;
254 }
255
256 static int
257 adf_request_msi_irq(struct adf_accel_dev *accel_dev)
258 {
259 device_t pdev = accel_to_pci_dev(accel_dev);
260 int ret;
261 int rid = 1;
262 accel_dev->u1.vf.irq =
263 bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE);
264 if (accel_dev->u1.vf.irq == NULL) {
265 device_printf(GET_DEV(accel_dev), "failed to allocate IRQ\n");
266 return ENXIO;
267 }
268 ret = bus_setup_intr(pdev,
269 accel_dev->u1.vf.irq,
270 INTR_TYPE_MISC | INTR_MPSAFE,
271 NULL,
272 adf_isr,
273 accel_dev,
274 &accel_dev->u1.vf.cookie);
275 if (ret) {
276 device_printf(GET_DEV(accel_dev),
277 "failed to enable irq for %s\n",
278 accel_dev->u1.vf.irq_name);
279 return ret;
280 }
281 return ret;
282 }
283
284 static int
285 adf_setup_bh(struct adf_accel_dev *accel_dev)
286 {
287 return 0;
288 }
289
290 static void
291 adf_cleanup_bh(struct adf_accel_dev *accel_dev)
292 {
293 }
294
295 /**
296 * adf_vf_isr_resource_free() - Free IRQ for acceleration device
297 * @accel_dev: Pointer to acceleration device.
298 *
299 * Function frees interrupts for acceleration device virtual function.
300 */
301 void
302 adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
303 {
304 device_t pdev = accel_to_pci_dev(accel_dev);
305 bus_teardown_intr(pdev, accel_dev->u1.vf.irq, accel_dev->u1.vf.cookie);
306 bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
307 adf_cleanup_bh(accel_dev);
308 adf_cleanup_pf2vf_bh(accel_dev);
309 adf_disable_msi(accel_dev);
310 }
311
312 /**
313 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
314 * @accel_dev: Pointer to acceleration device.
315 *
316 * Function allocates interrupts for acceleration device virtual function.
317 *
318 * Return: 0 on success, error code otherwise.
319 */
320 int
321 adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
322 {
323 if (adf_enable_msi(accel_dev))
324 goto err_out;
325
326 if (adf_setup_pf2vf_bh(accel_dev))
327 goto err_out;
328
329 if (adf_setup_bh(accel_dev))
330 goto err_out;
331
332 if (adf_request_msi_irq(accel_dev))
333 goto err_out;
334
335 return 0;
336 err_out:
337 adf_vf_isr_resource_free(accel_dev);
338 return EFAULT;
339 }
340
341 /**
342 * adf_flush_vf_wq() - Flush workqueue for VF
343 *
344 * Function flushes workqueue 'adf_vf_stop_wq' for VF.
345 *
346 * Return: void.
347 */
348 void
349 adf_flush_vf_wq(void)
350 {
351 if (adf_vf_stop_wq)
352 flush_workqueue(adf_vf_stop_wq);
353 }
354
355 /**
356 * adf_init_vf_wq() - Init workqueue for VF
357 *
358 * Function init workqueue 'adf_vf_stop_wq' for VF.
359 *
360 * Return: 0 on success, error code otherwise.
361 */
362 int
363 adf_init_vf_wq(void)
364 {
365 int ret = 0;
366
367 mutex_lock(&vf_stop_wq_lock);
368 if (!adf_vf_stop_wq)
369 adf_vf_stop_wq =
370 alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
371
372 if (!adf_vf_stop_wq)
373 ret = ENOMEM;
374
375 mutex_unlock(&vf_stop_wq_lock);
376 return ret;
377 }
378
379 /**
380 * adf_exit_vf_wq() - Destroy workqueue for VF
381 *
382 * Function destroy workqueue 'adf_vf_stop_wq' for VF.
383 *
384 * Return: void.
385 */
386 void
387 adf_exit_vf_wq(void)
388 {
389 if (adf_vf_stop_wq) {
390 destroy_workqueue(adf_vf_stop_wq);
391 adf_vf_stop_wq = NULL;
392 }
393 }
Cache object: 7db73d0ebb75d79c9bd924db48a06c07
|