1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 /**
9 * adf_vf2pf_init() - send init msg to PF
10 * @accel_dev: Pointer to acceleration VF device.
11 *
12 * Function sends an init messge from the VF to a PF
13 *
14 * Return: 0 on success, error code otherwise.
15 */
16 int
17 adf_vf2pf_init(struct adf_accel_dev *accel_dev)
18 {
19 u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
20 (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
21 if (adf_iov_notify(accel_dev, msg, 0)) {
22 device_printf(GET_DEV(accel_dev),
23 "Failed to send Init event to PF\n");
24 return -EFAULT;
25 }
26 set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
27 return 0;
28 }
29
30 /**
31 * adf_vf2pf_shutdown() - send shutdown msg to PF
32 * @accel_dev: Pointer to acceleration VF device.
33 *
34 * Function sends a shutdown messge from the VF to a PF
35 *
36 * Return: void
37 */
38 void
39 adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
40 {
41 u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
42 (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
43 mutex_init(&accel_dev->u1.vf.vf2pf_lock);
44 if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
45 if (adf_iov_notify(accel_dev, msg, 0))
46 device_printf(GET_DEV(accel_dev),
47 "Failed to send Shutdown event to PF\n");
48 mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
49 }
50
51 static int
52 adf_iov_block_get_bc(struct adf_accel_dev *accel_dev,
53 u8 msg_type,
54 u8 msg_index,
55 u8 *data,
56 int get_crc)
57 {
58 u8 blk_type;
59 u32 msg;
60 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
61 int response_received = 0;
62 int retry_count = 0;
63
64 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
65 if (get_crc)
66 msg |= 1 << ADF_VF2PF_BLOCK_REQ_CRC_SHIFT;
67
68 if (msg_type <= ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE) {
69 if (msg_index >=
70 ADF_VF2PF_SMALL_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
71 device_printf(
72 GET_DEV(accel_dev),
73 "Invalid byte index %d for message type %d\n",
74 msg_index,
75 msg_type);
76 return -EINVAL;
77 }
78 msg |= ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ
79 << ADF_VF2PF_MSGTYPE_SHIFT;
80 blk_type = msg_type;
81 msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
82 msg |= msg_index << ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_SHIFT;
83 } else if (msg_type <= ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE) {
84 if (msg_index >=
85 ADF_VF2PF_MEDIUM_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
86 device_printf(
87 GET_DEV(accel_dev),
88 "Invalid byte index %d for message type %d\n",
89 msg_index,
90 msg_type);
91 return -EINVAL;
92 }
93 msg |= ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ
94 << ADF_VF2PF_MSGTYPE_SHIFT;
95 blk_type = msg_type - ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE;
96 msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
97 msg |= msg_index << ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_SHIFT;
98 } else if (msg_type <= ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE) {
99 if (msg_index >=
100 ADF_VF2PF_LARGE_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
101 device_printf(
102 GET_DEV(accel_dev),
103 "Invalid byte index %d for message type %d\n",
104 msg_index,
105 msg_type);
106 return -EINVAL;
107 }
108 msg |= ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ
109 << ADF_VF2PF_MSGTYPE_SHIFT;
110 blk_type = msg_type - ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE;
111 msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
112 msg |= msg_index << ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_SHIFT;
113 } else {
114 device_printf(GET_DEV(accel_dev),
115 "Invalid message type %d\n",
116 msg_type);
117 }
118 accel_dev->u1.vf.iov_msg_completion = 0;
119 do {
120 /* Send request from VF to PF */
121 if (retry_count)
122 accel_dev->u1.vf.pfvf_counters.retry++;
123 if (adf_iov_putmsg(accel_dev, msg, 0)) {
124 device_printf(GET_DEV(accel_dev),
125 "Failed to send block request to PF\n");
126 return EIO;
127 }
128
129 /* Wait for response */
130 mutex_lock(&accel_dev->u1.vf.vf2pf_lock);
131 if (accel_dev->u1.vf.iov_msg_completion == 0 &&
132 sx_sleep(&accel_dev->u1.vf.iov_msg_completion,
133 &accel_dev->u1.vf.vf2pf_lock.sx,
134 0,
135 "pfver",
136 timeout) == EWOULDBLOCK) {
137 /* It's possible that wakeup could be missed */
138 if (accel_dev->u1.vf.iov_msg_completion) {
139 response_received = 1;
140 } else {
141 device_printf(
142 GET_DEV(accel_dev),
143 "IOV request/response message timeout expired\n");
144 }
145 } else {
146 response_received = 1;
147 }
148 mutex_unlock(&accel_dev->u1.vf.vf2pf_lock);
149 } while (!response_received &&
150 ++retry_count < ADF_IOV_MSG_RESP_RETRIES);
151
152 if (!response_received)
153 accel_dev->u1.vf.pfvf_counters.rx_timeout++;
154 else
155 accel_dev->u1.vf.pfvf_counters.rx_rsp++;
156
157 if (!response_received)
158 return EIO;
159
160 if (accel_dev->u1.vf.pf2vf_block_resp_type !=
161 (get_crc ? ADF_PF2VF_BLOCK_RESP_TYPE_CRC :
162 ADF_PF2VF_BLOCK_RESP_TYPE_DATA)) {
163 device_printf(
164 GET_DEV(accel_dev),
165 "%sBlock response type %d, data %d, msg %d, index %d\n",
166 get_crc ? "CRC " : "",
167 accel_dev->u1.vf.pf2vf_block_resp_type,
168 accel_dev->u1.vf.pf2vf_block_byte,
169 msg_type,
170 msg_index);
171 return -EIO;
172 }
173 *data = accel_dev->u1.vf.pf2vf_block_byte;
174 return 0;
175 }
176
177 static int
178 adf_iov_block_get_byte(struct adf_accel_dev *accel_dev,
179 u8 msg_type,
180 u8 msg_index,
181 u8 *data)
182 {
183 return adf_iov_block_get_bc(accel_dev, msg_type, msg_index, data, 0);
184 }
185
186 static int
187 adf_iov_block_get_crc(struct adf_accel_dev *accel_dev,
188 u8 msg_type,
189 u8 msg_index,
190 u8 *crc)
191 {
192 return adf_iov_block_get_bc(accel_dev, msg_type, msg_index - 1, crc, 1);
193 }
194
195 int
196 adf_iov_block_get(struct adf_accel_dev *accel_dev,
197 u8 msg_type,
198 u8 *block_version,
199 u8 *buffer,
200 u8 *length)
201 {
202 u8 buf_size = *length;
203 u8 payload_len;
204 u8 remote_crc;
205 u8 local_crc;
206 u8 buf_index;
207 int ret;
208
209 if (msg_type > ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE) {
210 device_printf(GET_DEV(accel_dev),
211 "Invalid message type %d\n",
212 msg_type);
213 return -EINVAL;
214 }
215
216 ret = adf_iov_block_get_byte(accel_dev,
217 msg_type,
218 ADF_VF2PF_BLOCK_VERSION_BYTE,
219 block_version);
220 if (ret)
221 return ret;
222 ret = adf_iov_block_get_byte(accel_dev,
223 msg_type,
224 ADF_VF2PF_BLOCK_LEN_BYTE,
225 length);
226
227 if (ret)
228 return ret;
229
230 payload_len = *length;
231
232 if (buf_size < payload_len) {
233 device_printf(
234 GET_DEV(accel_dev),
235 "Truncating block type %d response from %d to %d bytes\n",
236 msg_type,
237 payload_len,
238 buf_size);
239 payload_len = buf_size;
240 }
241
242 /* Get the data */
243 for (buf_index = 0; buf_index < payload_len; buf_index++) {
244 ret = adf_iov_block_get_byte(accel_dev,
245 msg_type,
246 buf_index + ADF_VF2PF_BLOCK_DATA,
247 buffer + buf_index);
248 if (ret)
249 return ret;
250 }
251
252 ret = adf_iov_block_get_crc(accel_dev,
253 msg_type,
254 payload_len + ADF_VF2PF_BLOCK_DATA,
255 &remote_crc);
256 if (ret)
257 return ret;
258 local_crc = adf_pfvf_crc(ADF_CRC8_INIT_VALUE, block_version, 1);
259 local_crc = adf_pfvf_crc(local_crc, length, 1);
260 local_crc = adf_pfvf_crc(local_crc, buffer, payload_len);
261 if (local_crc != remote_crc) {
262 device_printf(
263 GET_DEV(accel_dev),
264 "CRC error on msg type %d. Local %02X, remote %02X\n",
265 msg_type,
266 local_crc,
267 remote_crc);
268 accel_dev->u1.vf.pfvf_counters.crc_err++;
269 return EIO;
270 }
271
272 accel_dev->u1.vf.pfvf_counters.blk_rx++;
273 *length = payload_len;
274 return 0;
275 }
Cache object: b51f28332df44a3f3ded6526ac437524
|