1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include <linux/delay.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_pf2vf_msg.h"
8
9 adf_iov_block_provider
10 pf2vf_message_providers[ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE + 1];
11 unsigned char pfvf_crc8_table[] =
12 { 0x00, 0x97, 0xB9, 0x2E, 0xE5, 0x72, 0x5C, 0xCB, 0x5D, 0xCA, 0xE4, 0x73,
13 0xB8, 0x2F, 0x01, 0x96, 0xBA, 0x2D, 0x03, 0x94, 0x5F, 0xC8, 0xE6, 0x71,
14 0xE7, 0x70, 0x5E, 0xC9, 0x02, 0x95, 0xBB, 0x2C, 0xE3, 0x74, 0x5A, 0xCD,
15 0x06, 0x91, 0xBF, 0x28, 0xBE, 0x29, 0x07, 0x90, 0x5B, 0xCC, 0xE2, 0x75,
16 0x59, 0xCE, 0xE0, 0x77, 0xBC, 0x2B, 0x05, 0x92, 0x04, 0x93, 0xBD, 0x2A,
17 0xE1, 0x76, 0x58, 0xCF, 0x51, 0xC6, 0xE8, 0x7F, 0xB4, 0x23, 0x0D, 0x9A,
18 0x0C, 0x9B, 0xB5, 0x22, 0xE9, 0x7E, 0x50, 0xC7, 0xEB, 0x7C, 0x52, 0xC5,
19 0x0E, 0x99, 0xB7, 0x20, 0xB6, 0x21, 0x0F, 0x98, 0x53, 0xC4, 0xEA, 0x7D,
20 0xB2, 0x25, 0x0B, 0x9C, 0x57, 0xC0, 0xEE, 0x79, 0xEF, 0x78, 0x56, 0xC1,
21 0x0A, 0x9D, 0xB3, 0x24, 0x08, 0x9F, 0xB1, 0x26, 0xED, 0x7A, 0x54, 0xC3,
22 0x55, 0xC2, 0xEC, 0x7B, 0xB0, 0x27, 0x09, 0x9E, 0xA2, 0x35, 0x1B, 0x8C,
23 0x47, 0xD0, 0xFE, 0x69, 0xFF, 0x68, 0x46, 0xD1, 0x1A, 0x8D, 0xA3, 0x34,
24 0x18, 0x8F, 0xA1, 0x36, 0xFD, 0x6A, 0x44, 0xD3, 0x45, 0xD2, 0xFC, 0x6B,
25 0xA0, 0x37, 0x19, 0x8E, 0x41, 0xD6, 0xF8, 0x6F, 0xA4, 0x33, 0x1D, 0x8A,
26 0x1C, 0x8B, 0xA5, 0x32, 0xF9, 0x6E, 0x40, 0xD7, 0xFB, 0x6C, 0x42, 0xD5,
27 0x1E, 0x89, 0xA7, 0x30, 0xA6, 0x31, 0x1F, 0x88, 0x43, 0xD4, 0xFA, 0x6D,
28 0xF3, 0x64, 0x4A, 0xDD, 0x16, 0x81, 0xAF, 0x38, 0xAE, 0x39, 0x17, 0x80,
29 0x4B, 0xDC, 0xF2, 0x65, 0x49, 0xDE, 0xF0, 0x67, 0xAC, 0x3B, 0x15, 0x82,
30 0x14, 0x83, 0xAD, 0x3A, 0xF1, 0x66, 0x48, 0xDF, 0x10, 0x87, 0xA9, 0x3E,
31 0xF5, 0x62, 0x4C, 0xDB, 0x4D, 0xDA, 0xF4, 0x63, 0xA8, 0x3F, 0x11, 0x86,
32 0xAA, 0x3D, 0x13, 0x84, 0x4F, 0xD8, 0xF6, 0x61, 0xF7, 0x60, 0x4E, 0xD9,
33 0x12, 0x85, 0xAB, 0x3C };
34
35 void
36 adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
37 {
38 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
39 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
40 struct resource *pmisc_bar_addr =
41 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
42
43 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
44 }
45
46 void
47 adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
48 {
49 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
50 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
51 struct resource *pmisc_bar_addr =
52 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
53
54 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
55 }
56
57 static int
58 __adf_iov_putmsg(struct adf_accel_dev *accel_dev,
59 u32 msg,
60 u8 vf_nr,
61 bool is_notification)
62 {
63 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
64 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
65 struct resource *pmisc_bar_addr =
66 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
67 u32 val, pf2vf_offset;
68 u32 total_delay = 0, mdelay = ADF_IOV_MSG_ACK_DELAY_MS,
69 udelay = ADF_IOV_MSG_ACK_DELAY_US;
70 u32 local_in_use_mask, local_in_use_pattern;
71 u32 remote_in_use_mask, remote_in_use_pattern;
72 struct mutex *lock; /* lock preventing concurrent acces of CSR */
73 u32 int_bit;
74 int ret = 0;
75 struct pfvf_stats *pfvf_counters = NULL;
76
77 if (accel_dev->is_vf) {
78 pf2vf_offset = hw_data->get_pf2vf_offset(0);
79 lock = &accel_dev->u1.vf.vf2pf_lock;
80 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
81 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
82 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
83 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
84 int_bit = ADF_VF2PF_INT;
85 pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
86 } else {
87 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
88 lock = &accel_dev->u1.pf.vf_info[vf_nr].pf2vf_lock;
89 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
90 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
91 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
92 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
93 int_bit = ADF_PF2VF_INT;
94 pfvf_counters = &accel_dev->u1.pf.vf_info[vf_nr].pfvf_counters;
95 }
96
97 mutex_lock(lock);
98
99 /* Check if PF2VF CSR is in use by remote function */
100 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
101 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
102 device_printf(GET_DEV(accel_dev),
103 "PF2VF CSR in use by remote function\n");
104 ret = EAGAIN;
105 pfvf_counters->busy++;
106 goto out;
107 }
108
109 /* Attempt to get ownership of PF2VF CSR */
110 msg &= ~local_in_use_mask;
111 msg |= local_in_use_pattern;
112 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
113 pfvf_counters->tx++;
114
115 /* Wait for confirmation from remote func it received the message */
116 do {
117 if (udelay < ADF_IOV_MSG_ACK_EXP_MAX_DELAY_US) {
118 usleep_range(udelay, udelay * 2);
119 udelay = udelay * 2;
120 total_delay = total_delay + udelay;
121 } else {
122 pause_ms("adfstop", mdelay);
123 total_delay = total_delay + (mdelay * 1000);
124 }
125 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
126 } while ((val & int_bit) &&
127 (total_delay < ADF_IOV_MSG_ACK_LIN_MAX_DELAY_US));
128
129 if (val & int_bit) {
130 device_printf(GET_DEV(accel_dev),
131 "ACK not received from remote\n");
132 pfvf_counters->no_ack++;
133 val &= ~int_bit;
134 ret = EIO;
135 }
136
137 /* For fire-and-forget notifications, the receiver does not clear
138 * the in-use pattern. This is used to detect collisions.
139 */
140 if (is_notification && (val & ~int_bit) != msg) {
141 /* Collision must have overwritten the message */
142 device_printf(GET_DEV(accel_dev),
143 "Collision on notification\n");
144 pfvf_counters->collision++;
145 ret = EAGAIN;
146 goto out;
147 }
148
149 /*
150 * If the far side did not clear the in-use pattern it is either
151 * 1) Notification - message left intact to detect collision
152 * 2) Older protocol (compatibility version < 3) on the far side
153 * where the sender is responsible for clearing the in-use
154 * pattern after the received has acknowledged receipt.
155 * In either case, clear the in-use pattern now.
156 */
157 if ((val & local_in_use_mask) == local_in_use_pattern)
158 ADF_CSR_WR(pmisc_bar_addr,
159 pf2vf_offset,
160 val & ~local_in_use_mask);
161
162 out:
163 mutex_unlock(lock);
164 return ret;
165 }
166
167 static int
168 adf_iov_put(struct adf_accel_dev *accel_dev,
169 u32 msg,
170 u8 vf_nr,
171 bool is_notification)
172 {
173 u32 count = 0, delay = ADF_IOV_MSG_RETRY_DELAY;
174 int ret;
175 struct pfvf_stats *pfvf_counters = NULL;
176
177 if (accel_dev->is_vf)
178 pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
179 else
180 pfvf_counters = &accel_dev->u1.pf.vf_info[vf_nr].pfvf_counters;
181
182 do {
183 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr, is_notification);
184 if (ret == EAGAIN)
185 pause_ms("adfstop", delay);
186 delay = delay * 2;
187 } while (ret == EAGAIN && ++count < ADF_IOV_MSG_MAX_RETRIES);
188 if (ret == EAGAIN) {
189 if (is_notification)
190 pfvf_counters->event_timeout++;
191 else
192 pfvf_counters->tx_timeout++;
193 }
194
195 return ret;
196 }
197
198 /**
199 * adf_iov_putmsg() - send PF2VF message
200 * @accel_dev: Pointer to acceleration device.
201 * @msg: Message to send
202 * @vf_nr: VF number to which the message will be sent
203 *
204 * Function sends a messge from the PF to a VF
205 *
206 * Return: 0 on success, error code otherwise.
207 */
208 int
209 adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
210 {
211 return adf_iov_put(accel_dev, msg, vf_nr, false);
212 }
213
214 /**
215 * adf_iov_notify() - send PF2VF notification message
216 * @accel_dev: Pointer to acceleration device.
217 * @msg: Message to send
218 * @vf_nr: VF number to which the message will be sent
219 *
220 * Function sends a notification messge from the PF to a VF
221 *
222 * Return: 0 on success, error code otherwise.
223 */
224 int
225 adf_iov_notify(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
226 {
227 return adf_iov_put(accel_dev, msg, vf_nr, true);
228 }
229
230 u8
231 adf_pfvf_crc(u8 start_crc, u8 *buf, u8 len)
232 {
233 u8 crc = start_crc;
234
235 while (len-- > 0)
236 crc = pfvf_crc8_table[(crc ^ *buf++) & 0xff];
237
238 return crc;
239 }
240
241 int
242 adf_iov_block_provider_register(u8 msg_type,
243 const adf_iov_block_provider provider)
244 {
245 if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
246 pr_err("QAT: invalid message type %d for PF2VF provider\n",
247 msg_type);
248 return -EINVAL;
249 }
250 if (pf2vf_message_providers[msg_type]) {
251 pr_err("QAT: Provider %ps already registered for message %d\n",
252 pf2vf_message_providers[msg_type],
253 msg_type);
254 return -EINVAL;
255 }
256
257 pf2vf_message_providers[msg_type] = provider;
258 return 0;
259 }
260
261 u8
262 adf_iov_is_block_provider_registered(u8 msg_type)
263 {
264 if (pf2vf_message_providers[msg_type])
265 return 1;
266 else
267 return 0;
268 }
269
270 int
271 adf_iov_block_provider_unregister(u8 msg_type,
272 const adf_iov_block_provider provider)
273 {
274 if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
275 pr_err("QAT: invalid message type %d for PF2VF provider\n",
276 msg_type);
277 return -EINVAL;
278 }
279 if (pf2vf_message_providers[msg_type] != provider) {
280 pr_err("QAT: Provider %ps not registered for message %d\n",
281 provider,
282 msg_type);
283 return -EINVAL;
284 }
285
286 pf2vf_message_providers[msg_type] = NULL;
287 return 0;
288 }
289
290 static int
291 adf_iov_block_get_data(struct adf_accel_dev *accel_dev,
292 u8 msg_type,
293 u8 byte_num,
294 u8 *data,
295 u8 compatibility,
296 bool crc)
297 {
298 u8 *buffer;
299 u8 size;
300 u8 msg_ver;
301 u8 crc8;
302
303 if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
304 pr_err("QAT: invalid message type %d for PF2VF provider\n",
305 msg_type);
306 *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
307 return -EINVAL;
308 }
309
310 if (!pf2vf_message_providers[msg_type]) {
311 pr_err("QAT: No registered provider for message %d\n",
312 msg_type);
313 *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
314 return -EINVAL;
315 }
316
317 if ((*pf2vf_message_providers[msg_type])(
318 accel_dev, &buffer, &size, &msg_ver, compatibility, byte_num)) {
319 pr_err("QAT: unknown error from provider for message %d\n",
320 msg_type);
321 *data = ADF_PF2VF_UNSPECIFIED_ERROR;
322 return -EINVAL;
323 }
324
325 if ((msg_type <= ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE &&
326 size > ADF_VF2PF_SMALL_PAYLOAD_SIZE) ||
327 (msg_type <= ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE &&
328 size > ADF_VF2PF_MEDIUM_PAYLOAD_SIZE) ||
329 size > ADF_VF2PF_LARGE_PAYLOAD_SIZE) {
330 pr_err("QAT: Invalid size %d provided for message type %d\n",
331 size,
332 msg_type);
333 *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
334 return -EINVAL;
335 }
336
337 if ((!byte_num && crc) || byte_num >= size + ADF_VF2PF_BLOCK_DATA) {
338 pr_err("QAT: Invalid byte number %d for message %d\n",
339 byte_num,
340 msg_type);
341 *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
342 return -EINVAL;
343 }
344
345 if (crc) {
346 crc8 = adf_pfvf_crc(ADF_CRC8_INIT_VALUE, &msg_ver, 1);
347 crc8 = adf_pfvf_crc(crc8, &size, 1);
348 *data = adf_pfvf_crc(crc8, buffer, byte_num - 1);
349 } else {
350 if (byte_num == 0)
351 *data = msg_ver;
352 else if (byte_num == 1)
353 *data = size;
354 else
355 *data = buffer[byte_num - 2];
356 }
357
358 return 0;
359 }
360
361 static int
362 adf_iov_block_get_byte(struct adf_accel_dev *accel_dev,
363 u8 msg_type,
364 u8 byte_num,
365 u8 *data,
366 u8 compatibility)
367 {
368 return adf_iov_block_get_data(
369 accel_dev, msg_type, byte_num, data, compatibility, false);
370 }
371
372 static int
373 adf_iov_block_get_crc(struct adf_accel_dev *accel_dev,
374 u8 msg_type,
375 u8 byte_num,
376 u8 *data,
377 u8 compatibility)
378 {
379 return adf_iov_block_get_data(
380 accel_dev, msg_type, byte_num, data, compatibility, true);
381 }
382
383 int adf_iov_compatibility_check(struct adf_accel_dev *accel_dev, u8 compat_ver);
384
385 void
386 adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
387 {
388 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
389 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
390 int bar_id = hw_data->get_misc_bar_id(hw_data);
391 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
392 struct resource *pmisc_addr = pmisc->virt_addr;
393 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
394 u8 byte_num = 0;
395 u8 msg_type = 0;
396 u8 resp_type;
397 int res;
398 u8 data;
399 u8 compat = 0x0;
400 int vf_compat_ver = 0;
401 bool is_notification = false;
402
403 /* Read message from the VF */
404 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
405 if (!(msg & ADF_VF2PF_INT)) {
406 device_printf(GET_DEV(accel_dev),
407 "Spurious VF2PF interrupt. msg %X. Ignored\n",
408 msg);
409 vf_info->pfvf_counters.spurious++;
410 goto out;
411 }
412 vf_info->pfvf_counters.rx++;
413
414 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM)) {
415 /* Ignore legacy non-system (non-kernel) VF2PF messages */
416 device_printf(GET_DEV(accel_dev),
417 "Ignored non-system message from VF%d (0x%x);\n",
418 vf_nr + 1,
419 msg);
420 /*
421 * To ack, clear the VF2PFINT bit.
422 * Because this must be a legacy message, the far side
423 * must clear the in-use pattern.
424 */
425 msg &= ~(ADF_VF2PF_INT);
426 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
427
428 goto out;
429 }
430
431 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
432 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
433
434 {
435 is_notification = false;
436 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
437 vf_info->compat_ver = vf_compat_ver;
438
439 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
440 (ADF_PF2VF_MSGTYPE_VERSION_RESP
441 << ADF_PF2VF_MSGTYPE_SHIFT) |
442 (ADF_PFVF_COMPATIBILITY_VERSION
443 << ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
444
445 device_printf(
446 GET_DEV(accel_dev),
447 "Compatibility Version Request from VF%d vers=%u\n",
448 vf_nr + 1,
449 vf_info->compat_ver);
450
451 if (vf_compat_ver < ADF_PFVF_COMPATIBILITY_VERSION)
452 compat = adf_iov_compatibility_check(accel_dev,
453 vf_compat_ver);
454 else if (vf_compat_ver == ADF_PFVF_COMPATIBILITY_VERSION)
455 compat = ADF_PF2VF_VF_COMPATIBLE;
456 else
457 compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
458
459 resp |= compat << ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
460
461 if (compat == ADF_PF2VF_VF_INCOMPATIBLE)
462 device_printf(GET_DEV(accel_dev),
463 "VF%d and PF are incompatible.\n",
464 vf_nr + 1);
465 } break;
466 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
467 device_printf(GET_DEV(accel_dev),
468 "Legacy VersionRequest received from VF%d 0x%x\n",
469 vf_nr + 1,
470 msg);
471 is_notification = false;
472
473 /* legacy driver, VF compat_ver is 0 */
474 vf_info->compat_ver = 0;
475
476 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
477 (ADF_PF2VF_MSGTYPE_VERSION_RESP
478 << ADF_PF2VF_MSGTYPE_SHIFT));
479
480 /* PF always newer than legacy VF */
481 compat =
482 adf_iov_compatibility_check(accel_dev, vf_info->compat_ver);
483 resp |= compat << ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
484
485 /* Set legacy major and minor version num */
486 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
487 1 << ADF_PF2VF_MINORVERSION_SHIFT;
488
489 if (compat == ADF_PF2VF_VF_INCOMPATIBLE)
490 device_printf(GET_DEV(accel_dev),
491 "VF%d and PF are incompatible.\n",
492 vf_nr + 1);
493 break;
494 case ADF_VF2PF_MSGTYPE_INIT: {
495 device_printf(GET_DEV(accel_dev),
496 "Init message received from VF%d 0x%x\n",
497 vf_nr + 1,
498 msg);
499 is_notification = true;
500 vf_info->init = true;
501 } break;
502 case ADF_VF2PF_MSGTYPE_SHUTDOWN: {
503 device_printf(GET_DEV(accel_dev),
504 "Shutdown message received from VF%d 0x%x\n",
505 vf_nr + 1,
506 msg);
507 is_notification = true;
508 vf_info->init = false;
509 } break;
510 case ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ:
511 case ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ:
512 case ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ: {
513 is_notification = false;
514 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >>
515 ADF_VF2PF_MSGTYPE_SHIFT) {
516 case ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ:
517 byte_num =
518 ((msg & ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_MASK) >>
519 ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_SHIFT);
520 msg_type =
521 ((msg & ADF_VF2PF_LARGE_BLOCK_REQ_TYPE_MASK) >>
522 ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
523 msg_type += ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE;
524 break;
525 case ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ:
526 byte_num =
527 ((msg & ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_MASK) >>
528 ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_SHIFT);
529 msg_type =
530 ((msg & ADF_VF2PF_MEDIUM_BLOCK_REQ_TYPE_MASK) >>
531 ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
532 msg_type += ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE;
533 break;
534 case ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ:
535 byte_num =
536 ((msg & ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_MASK) >>
537 ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_SHIFT);
538 msg_type =
539 ((msg & ADF_VF2PF_SMALL_BLOCK_REQ_TYPE_MASK) >>
540 ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
541 msg_type += ADF_VF2PF_MIN_SMALL_MESSAGE_TYPE;
542 break;
543 }
544
545 if (msg >> ADF_VF2PF_BLOCK_REQ_CRC_SHIFT) {
546 res = adf_iov_block_get_crc(accel_dev,
547 msg_type,
548 byte_num,
549 &data,
550 vf_info->compat_ver);
551 if (res)
552 resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_ERROR;
553 else
554 resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_CRC;
555 } else {
556 if (!byte_num)
557 vf_info->pfvf_counters.blk_tx++;
558
559 res = adf_iov_block_get_byte(accel_dev,
560 msg_type,
561 byte_num,
562 &data,
563 vf_info->compat_ver);
564 if (res)
565 resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_ERROR;
566 else
567 resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_DATA;
568 }
569 resp =
570 (ADF_PF2VF_MSGORIGIN_SYSTEM |
571 (ADF_PF2VF_MSGTYPE_BLOCK_RESP << ADF_PF2VF_MSGTYPE_SHIFT) |
572 (resp_type << ADF_PF2VF_BLOCK_RESP_TYPE_SHIFT) |
573 (data << ADF_PF2VF_BLOCK_RESP_DATA_SHIFT));
574 } break;
575 default:
576 device_printf(GET_DEV(accel_dev),
577 "Unknown message from VF%d (0x%x);\n",
578 vf_nr + 1,
579 msg);
580 }
581
582 /* To ack, clear the VF2PFINT bit and the in-use-by */
583 msg &= ~ADF_VF2PF_INT;
584 /*
585 * Clear the in-use pattern if the sender won't do it.
586 * Because the compatibility version must be the first message
587 * exchanged between the VF and PF, the vf_info->compat_ver must be
588 * set at this time.
589 * The in-use pattern is not cleared for notifications so that
590 * it can be used for collision detection.
591 */
592 if (vf_info->compat_ver >= ADF_PFVF_COMPATIBILITY_FAST_ACK &&
593 !is_notification)
594 msg &= ~ADF_VF2PF_IN_USE_BY_VF_MASK;
595 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
596
597 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
598 device_printf(GET_DEV(accel_dev),
599 "Failed to send response to VF\n");
600
601 out:
602 return;
603 }
604
605 void
606 adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
607 {
608 struct adf_accel_vf_info *vf;
609 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
610 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
611
612 int i, num_vfs = accel_dev->u1.pf.num_vfs;
613 for (i = 0, vf = accel_dev->u1.pf.vf_info; i < num_vfs; i++, vf++) {
614 if (vf->init && adf_iov_notify(accel_dev, msg, i))
615 device_printf(GET_DEV(accel_dev),
616 "Failed to send restarting msg to VF%d\n",
617 i);
618 }
619 }
620
621 void
622 adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
623 {
624 struct adf_accel_vf_info *vf;
625 int i, num_vfs = accel_dev->u1.pf.num_vfs;
626 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
627 (ADF_PF2VF_MSGTYPE_FATAL_ERROR << ADF_PF2VF_MSGTYPE_SHIFT));
628
629 for (i = 0, vf = accel_dev->u1.pf.vf_info; i < num_vfs; i++, vf++) {
630 if (vf->init && adf_iov_notify(accel_dev, msg, i))
631 device_printf(
632 GET_DEV(accel_dev),
633 "Failed to send fatal error msg 0x%x to VF%d\n",
634 msg,
635 i);
636 }
637 }
638
639 int
640 adf_iov_register_compat_checker(struct adf_accel_dev *accel_dev,
641 const adf_iov_compat_checker_t cc)
642 {
643 struct adf_accel_compat_manager *cm = accel_dev->cm;
644 int num = 0;
645
646 if (!cm) {
647 device_printf(GET_DEV(accel_dev),
648 "QAT: compatibility manager not initialized\n");
649 return ENOMEM;
650 }
651
652 for (num = 0; num < ADF_COMPAT_CHECKER_MAX; num++) {
653 if (cm->iov_compat_checkers[num]) {
654 if (cc == cm->iov_compat_checkers[num]) {
655 device_printf(GET_DEV(accel_dev),
656 "QAT: already registered\n");
657 return EFAULT;
658 }
659 } else {
660 /* registering the new checker */
661 cm->iov_compat_checkers[num] = cc;
662 break;
663 }
664 }
665
666 if (num >= ADF_COMPAT_CHECKER_MAX) {
667 device_printf(GET_DEV(accel_dev),
668 "QAT: compatibility checkers are overflow.\n");
669 return EFAULT;
670 }
671
672 cm->num_chker = num;
673 return 0;
674 }
675
676 int
677 adf_iov_unregister_compat_checker(struct adf_accel_dev *accel_dev,
678 const adf_iov_compat_checker_t cc)
679 {
680 struct adf_accel_compat_manager *cm = accel_dev->cm;
681 int num = 0;
682
683 if (!cm) {
684 device_printf(GET_DEV(accel_dev),
685 "QAT: compatibility manager not initialized\n");
686 return ENOMEM;
687 }
688 num = cm->num_chker - 1;
689
690 if (num < 0) {
691 device_printf(
692 GET_DEV(accel_dev),
693 "QAT: Array 'iov_compat_checkers' may use index value(s) -1\n");
694 return EFAULT;
695 }
696 if (cc == cm->iov_compat_checkers[num]) {
697 /* unregistering the given checker */
698 cm->iov_compat_checkers[num] = NULL;
699 } else {
700 device_printf(
701 GET_DEV(accel_dev),
702 "QAT: unregistering not in the registered order\n");
703 return EFAULT;
704 }
705
706 cm->num_chker--;
707 return 0;
708 }
709
710 int
711 adf_iov_init_compat_manager(struct adf_accel_dev *accel_dev,
712 struct adf_accel_compat_manager **cm)
713 {
714 if (!(*cm)) {
715 *cm = malloc(sizeof(**cm), M_QAT, M_WAITOK | M_ZERO);
716 } else {
717 /* zero the struct */
718 explicit_bzero(*cm, sizeof(**cm));
719 }
720
721 return 0;
722 }
723
724 int
725 adf_iov_shutdown_compat_manager(struct adf_accel_dev *accel_dev,
726 struct adf_accel_compat_manager **cm)
727 {
728 if (*cm) {
729 free(*cm, M_QAT);
730 *cm = NULL;
731 }
732 return 0;
733 }
734
735 int
736 adf_iov_compatibility_check(struct adf_accel_dev *accel_dev, u8 compat_ver)
737 {
738 int compatible = ADF_PF2VF_VF_COMPATIBLE;
739 int i = 0;
740 struct adf_accel_compat_manager *cm = accel_dev->cm;
741
742 if (!cm) {
743 device_printf(GET_DEV(accel_dev),
744 "QAT: compatibility manager not initialized\n");
745 return ADF_PF2VF_VF_INCOMPATIBLE;
746 }
747 for (i = 0; i < cm->num_chker; i++) {
748 compatible = cm->iov_compat_checkers[i](accel_dev, compat_ver);
749 if (compatible == ADF_PF2VF_VF_INCOMPATIBLE) {
750 device_printf(
751 GET_DEV(accel_dev),
752 "QAT: PF and VF are incompatible [checker%d]\n",
753 i);
754 break;
755 }
756 }
757 return compatible;
758 }
759
760 static int
761 adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
762 {
763 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
764 u32 msg = 0;
765 int ret = 0;
766 int comp = 0;
767 int response_received = 0;
768 int retry_count = 0;
769 struct pfvf_stats *pfvf_counters = NULL;
770
771 pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
772
773 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
774 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
775 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
776 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
777 /* Clear communication flag - without that VF will not be waiting for
778 * the response from host driver, and start sending init.
779 */
780 accel_dev->u1.vf.iov_msg_completion = 0;
781 do {
782 /* Send request from VF to PF */
783 if (retry_count)
784 pfvf_counters->retry++;
785 if (adf_iov_putmsg(accel_dev, msg, 0)) {
786 device_printf(
787 GET_DEV(accel_dev),
788 "Failed to send Compat Version Request.\n");
789 return EIO;
790 }
791 mutex_lock(&accel_dev->u1.vf.vf2pf_lock);
792 if (accel_dev->u1.vf.iov_msg_completion == 0 &&
793 sx_sleep(&accel_dev->u1.vf.iov_msg_completion,
794 &accel_dev->u1.vf.vf2pf_lock.sx,
795 0,
796 "pfver",
797 timeout) == EWOULDBLOCK) {
798 /* It's possible that wakeup could be missed */
799 if (accel_dev->u1.vf.iov_msg_completion) {
800 response_received = 1;
801 } else {
802 device_printf(
803 GET_DEV(accel_dev),
804 "IOV request/response message timeout expired\n");
805 }
806 } else {
807 response_received = 1;
808 }
809 mutex_unlock(&accel_dev->u1.vf.vf2pf_lock);
810 } while (!response_received &&
811 ++retry_count < ADF_IOV_MSG_RESP_RETRIES);
812
813 if (!response_received)
814 pfvf_counters->rx_timeout++;
815 else
816 pfvf_counters->rx_rsp++;
817 if (!response_received)
818 return EIO;
819
820 if (accel_dev->u1.vf.compatible == ADF_PF2VF_VF_COMPAT_UNKNOWN)
821 /* Response from PF received, check compatibility */
822 comp = adf_iov_compatibility_check(accel_dev,
823 accel_dev->u1.vf.pf_version);
824 else
825 comp = accel_dev->u1.vf.compatible;
826
827 ret = (comp == ADF_PF2VF_VF_COMPATIBLE) ? 0 : EFAULT;
828 if (ret)
829 device_printf(
830 GET_DEV(accel_dev),
831 "VF is not compatible with PF, due to the reason %d\n",
832 comp);
833
834 return ret;
835 }
836
837 /**
838 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
839 *
840 * @accel_dev: Pointer to acceleration device virtual function.
841 *
842 * Return: 0 on success, error code otherwise.
843 */
844 int
845 adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
846 {
847 int ret = 0;
848
849 /* init workqueue for VF */
850 ret = adf_init_vf_wq();
851 if (ret)
852 return ret;
853
854 adf_enable_pf2vf_interrupts(accel_dev);
855 adf_iov_init_compat_manager(accel_dev, &accel_dev->cm);
856 return adf_vf2pf_request_version(accel_dev);
857 }
858 /**
859 * adf_disable_vf2pf_comms() - Function disables communication from vf to pf
860 *
861 * @accel_dev: Pointer to acceleration device virtual function.
862 *
863 * Return: 0 on success, error code otherwise.
864 */
865 int
866 adf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev)
867 {
868 return adf_iov_shutdown_compat_manager(accel_dev, &accel_dev->cm);
869 }
870
871 /**
872 * adf_pf_enable_vf2pf_comms() - Function enables communication from pf
873 *
874 * @accel_dev: Pointer to acceleration device physical function.
875 *
876 * Return: 0 on success, error code otherwise.
877 */
878 int
879 adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
880 {
881 adf_iov_init_compat_manager(accel_dev, &accel_dev->cm);
882 return 0;
883 }
884
885 /**
886 * adf_pf_disable_vf2pf_comms() - Function disables communication from pf
887 *
888 * @accel_dev: Pointer to acceleration device physical function.
889 *
890 * Return: 0 on success, error code otherwise.
891 */
892 int
893 adf_pf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev)
894 {
895 return adf_iov_shutdown_compat_manager(accel_dev, &accel_dev->cm);
896 }
Cache object: b1b562eaf7b71afbdb57d35749ddbdcb
|