1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <sys/mutex.h>
15 #include "adf_cfg.h"
16 #include "adf_common_drv.h"
17
18 #define ADF_AE_PAIR 2
19 #define PKE_SLICES_PER_AE_PAIR 5
20
21 static LIST_HEAD(accel_table);
22 static LIST_HEAD(vfs_table);
23 static DEFINE_MUTEX(table_lock);
24 static uint32_t num_devices;
25 static u8 id_map[ADF_MAX_DEVICES];
26
27 struct vf_id_map {
28 u32 bdf;
29 u32 id;
30 u32 fake_id;
31 bool attached;
32 struct list_head list;
33 };
34
35 /**
36 * adf_get_vf_real_id() - Translate fake to real device id
37 *
38 * The "real" id is assigned to a device when it is initially
39 * bound to the driver.
40 * The "fake" id is usually the same as the real id, but
41 * can change when devices are unbound from the qat driver,
42 * perhaps to assign the device to a guest.
43 */
44 static int
45 adf_get_vf_real_id(u32 fake)
46 {
47 struct list_head *itr;
48
49 list_for_each(itr, &vfs_table)
50 {
51 struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list);
52 if (ptr->fake_id == fake)
53 return ptr->id;
54 }
55 return -1;
56 }
57
58 /**
59 * adf_clean_vf_map() - Cleans VF id mapings
60 *
61 * Function cleans internal ids for virtual functions.
62 * @vf: flag indicating whether mappings is cleaned
63 * for vfs only or for vfs and pfs
64 */
65 void
66 adf_clean_vf_map(bool vf)
67 {
68 struct vf_id_map *map;
69 struct list_head *ptr, *tmp;
70
71 mutex_lock(&table_lock);
72 list_for_each_safe(ptr, tmp, &vfs_table)
73 {
74 map = list_entry(ptr, struct vf_id_map, list);
75 if (map->bdf != -1) {
76 id_map[map->id] = 0;
77 num_devices--;
78 }
79
80 if (vf && map->bdf == -1)
81 continue;
82
83 list_del(ptr);
84 free(map, M_QAT);
85 }
86 mutex_unlock(&table_lock);
87 }
88
89 /**
90 * adf_devmgr_update_class_index() - Update internal index
91 * @hw_data: Pointer to internal device data.
92 *
93 * Function updates internal dev index for VFs
94 */
95 void
96 adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
97 {
98 struct adf_hw_device_class *class = hw_data->dev_class;
99 struct list_head *itr;
100 int i = 0;
101
102 list_for_each(itr, &accel_table)
103 {
104 struct adf_accel_dev *ptr =
105 list_entry(itr, struct adf_accel_dev, list);
106
107 if (ptr->hw_device->dev_class == class)
108 ptr->hw_device->instance_id = i++;
109
110 if (i == class->instances)
111 break;
112 }
113 }
114
115 static unsigned int
116 adf_find_free_id(void)
117 {
118 unsigned int i;
119
120 for (i = 0; i < ADF_MAX_DEVICES; i++) {
121 if (!id_map[i]) {
122 id_map[i] = 1;
123 return i;
124 }
125 }
126 return ADF_MAX_DEVICES + 1;
127 }
128
129 /**
130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
131 * @accel_dev: Pointer to acceleration device.
132 * @pf: Corresponding PF if the accel_dev is a VF
133 *
134 * Function adds acceleration device to the acceleration framework.
135 * To be used by QAT device specific drivers.
136 *
137 * Return: 0 on success, error code otherwise.
138 */
139 int
140 adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf)
141 {
142 struct list_head *itr;
143 int ret = 0;
144
145 if (num_devices == ADF_MAX_DEVICES) {
146 device_printf(GET_DEV(accel_dev),
147 "Only support up to %d devices\n",
148 ADF_MAX_DEVICES);
149 return EFAULT;
150 }
151
152 mutex_lock(&table_lock);
153
154 /* PF on host or VF on guest */
155 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
156 struct vf_id_map *map;
157
158 list_for_each(itr, &accel_table)
159 {
160 struct adf_accel_dev *ptr =
161 list_entry(itr, struct adf_accel_dev, list);
162
163 if (ptr == accel_dev) {
164 ret = EEXIST;
165 goto unlock;
166 }
167 }
168
169 list_add_tail(&accel_dev->list, &accel_table);
170 accel_dev->accel_id = adf_find_free_id();
171 if (accel_dev->accel_id > ADF_MAX_DEVICES) {
172 ret = EFAULT;
173 goto unlock;
174 }
175 num_devices++;
176 map = malloc(sizeof(*map), M_QAT, GFP_KERNEL);
177 if (!map) {
178 ret = ENOMEM;
179 goto unlock;
180 }
181 map->bdf = ~0;
182 map->id = accel_dev->accel_id;
183 map->fake_id = map->id;
184 map->attached = true;
185 list_add_tail(&map->list, &vfs_table);
186 } else if (accel_dev->is_vf && pf) {
187 ret = ENOTSUP;
188 goto unlock;
189 }
190 unlock:
191 mutex_unlock(&table_lock);
192 return ret;
193 }
194
195 struct list_head *
196 adf_devmgr_get_head(void)
197 {
198 return &accel_table;
199 }
200
201 /**
202 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
203 * @accel_dev: Pointer to acceleration device.
204 * @pf: Corresponding PF if the accel_dev is a VF
205 *
206 * Function removes acceleration device from the acceleration framework.
207 * To be used by QAT device specific drivers.
208 *
209 * Return: void
210 */
211 void
212 adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf)
213 {
214 mutex_lock(&table_lock);
215 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
216 id_map[accel_dev->accel_id] = 0;
217 num_devices--;
218 }
219 list_del(&accel_dev->list);
220 mutex_unlock(&table_lock);
221 }
222
223 struct adf_accel_dev *
224 adf_devmgr_get_first(void)
225 {
226 struct adf_accel_dev *dev = NULL;
227
228 if (!list_empty(&accel_table))
229 dev =
230 list_first_entry(&accel_table, struct adf_accel_dev, list);
231 return dev;
232 }
233
234 /**
235 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
236 * @accel_dev: Pointer to pci device.
237 *
238 * Function returns acceleration device associated with the given pci device.
239 * To be used by QAT device specific drivers.
240 *
241 * Return: pointer to accel_dev or NULL if not found.
242 */
243 struct adf_accel_dev *
244 adf_devmgr_pci_to_accel_dev(device_t pci_dev)
245 {
246 struct list_head *itr;
247
248 mutex_lock(&table_lock);
249 list_for_each(itr, &accel_table)
250 {
251 struct adf_accel_dev *ptr =
252 list_entry(itr, struct adf_accel_dev, list);
253
254 if (ptr->accel_pci_dev.pci_dev == pci_dev) {
255 mutex_unlock(&table_lock);
256 return ptr;
257 }
258 }
259 mutex_unlock(&table_lock);
260 return NULL;
261 }
262
263 struct adf_accel_dev *
264 adf_devmgr_get_dev_by_id(uint32_t id)
265 {
266 struct list_head *itr;
267 int real_id;
268
269 mutex_lock(&table_lock);
270 real_id = adf_get_vf_real_id(id);
271 if (real_id < 0)
272 goto unlock;
273
274 id = real_id;
275
276 list_for_each(itr, &accel_table)
277 {
278 struct adf_accel_dev *ptr =
279 list_entry(itr, struct adf_accel_dev, list);
280 if (ptr->accel_id == id) {
281 mutex_unlock(&table_lock);
282 return ptr;
283 }
284 }
285 unlock:
286 mutex_unlock(&table_lock);
287 return NULL;
288 }
289
290 int
291 adf_devmgr_verify_id(uint32_t *id)
292 {
293 struct adf_accel_dev *accel_dev;
294
295 if (*id == ADF_CFG_ALL_DEVICES)
296 return 0;
297
298 accel_dev = adf_devmgr_get_dev_by_id(*id);
299 if (!accel_dev)
300 return ENODEV;
301
302 /* Correct the id if real and fake differ */
303 *id = accel_dev->accel_id;
304 return 0;
305 }
306
307 static int
308 adf_get_num_dettached_vfs(void)
309 {
310 struct list_head *itr;
311 int vfs = 0;
312
313 mutex_lock(&table_lock);
314 list_for_each(itr, &vfs_table)
315 {
316 struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list);
317 if (ptr->bdf != ~0 && !ptr->attached)
318 vfs++;
319 }
320 mutex_unlock(&table_lock);
321 return vfs;
322 }
323
324 void
325 adf_devmgr_get_num_dev(uint32_t *num)
326 {
327 *num = num_devices - adf_get_num_dettached_vfs();
328 }
329
330 /**
331 * adf_dev_in_use() - Check whether accel_dev is currently in use
332 * @accel_dev: Pointer to acceleration device.
333 *
334 * To be used by QAT device specific drivers.
335 *
336 * Return: 1 when device is in use, 0 otherwise.
337 */
338 int
339 adf_dev_in_use(struct adf_accel_dev *accel_dev)
340 {
341 return atomic_read(&accel_dev->ref_count) != 0;
342 }
343
344 /**
345 * adf_dev_get() - Increment accel_dev reference count
346 * @accel_dev: Pointer to acceleration device.
347 *
348 * Increment the accel_dev refcount and if this is the first time
349 * incrementing it during this period the accel_dev is in use,
350 * increment the module refcount too.
351 * To be used by QAT device specific drivers.
352 *
353 * Return: void
354 */
355 void
356 adf_dev_get(struct adf_accel_dev *accel_dev)
357 {
358 if (atomic_add_return(1, &accel_dev->ref_count) == 1)
359 device_busy(GET_DEV(accel_dev));
360 }
361
362 /**
363 * adf_dev_put() - Decrement accel_dev reference count
364 * @accel_dev: Pointer to acceleration device.
365 *
366 * Decrement the accel_dev refcount and if this is the last time
367 * decrementing it during this period the accel_dev is in use,
368 * decrement the module refcount too.
369 * To be used by QAT device specific drivers.
370 *
371 * Return: void
372 */
373 void
374 adf_dev_put(struct adf_accel_dev *accel_dev)
375 {
376 if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
377 device_unbusy(GET_DEV(accel_dev));
378 }
379
380 /**
381 * adf_devmgr_in_reset() - Check whether device is in reset
382 * @accel_dev: Pointer to acceleration device.
383 *
384 * To be used by QAT device specific drivers.
385 *
386 * Return: 1 when the device is being reset, 0 otherwise.
387 */
388 int
389 adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
390 {
391 return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
392 }
393
394 /**
395 * adf_dev_started() - Check whether device has started
396 * @accel_dev: Pointer to acceleration device.
397 *
398 * To be used by QAT device specific drivers.
399 *
400 * Return: 1 when the device has started, 0 otherwise
401 */
402 int
403 adf_dev_started(struct adf_accel_dev *accel_dev)
404 {
405 return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
406 }
Cache object: cd6816abacf183c36c21464dbdf30cff
|