1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_dev_err.h"
13 #include "adf_transport_access_macros.h"
14 #include "adf_transport_internal.h"
15 #include <sys/mutex.h>
16 #include <linux/delay.h>
17 #include "adf_accel_devices.h"
18 #include "adf_cfg.h"
19 #include "adf_common_drv.h"
20 #include "icp_qat_fw.h"
21
22 /* Mask used to check the CompressAndVerify capability bit */
23 #define DC_CNV_EXTENDED_CAPABILITY (0x01)
24
25 /* Mask used to check the CompressAndVerifyAndRecover capability bit */
26 #define DC_CNVNR_EXTENDED_CAPABILITY (0x100)
27
28 static LIST_HEAD(service_table);
29 static DEFINE_MUTEX(service_lock);
30
31 static void
32 adf_service_add(struct service_hndl *service)
33 {
34 mutex_lock(&service_lock);
35 list_add(&service->list, &service_table);
36 mutex_unlock(&service_lock);
37 }
38
39 int
40 adf_service_register(struct service_hndl *service)
41 {
42 memset(service->init_status, 0, sizeof(service->init_status));
43 memset(service->start_status, 0, sizeof(service->start_status));
44 adf_service_add(service);
45 return 0;
46 }
47
48 static void
49 adf_service_remove(struct service_hndl *service)
50 {
51 mutex_lock(&service_lock);
52 list_del(&service->list);
53 mutex_unlock(&service_lock);
54 }
55
56 int
57 adf_service_unregister(struct service_hndl *service)
58 {
59 int i;
60
61 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
62 if (service->init_status[i] || service->start_status[i]) {
63 pr_err("QAT: Could not remove active service [%d]\n",
64 i);
65 return EFAULT;
66 }
67 }
68 adf_service_remove(service);
69 return 0;
70 }
71
72 static int
73 adf_cfg_add_device_params(struct adf_accel_dev *accel_dev)
74 {
75 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
76 char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
77 char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
78 struct adf_hw_device_data *hw_data = NULL;
79 unsigned long val;
80
81 if (!accel_dev)
82 return -EINVAL;
83
84 hw_data = accel_dev->hw_device;
85
86 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
87 goto err;
88
89 snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS);
90 val = GET_MAX_BANKS(accel_dev);
91 if (adf_cfg_add_key_value_param(
92 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
93 goto err;
94
95 snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK);
96 val = hw_data->accel_capabilities_mask;
97 if (adf_cfg_add_key_value_param(
98 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX))
99 goto err;
100
101 snprintf(key, sizeof(key), ADF_DEV_PKG_ID);
102 val = accel_dev->accel_id;
103 if (adf_cfg_add_key_value_param(
104 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
105 goto err;
106
107 snprintf(key, sizeof(key), ADF_DEV_NODE_ID);
108 val = dev_to_node(GET_DEV(accel_dev));
109 if (adf_cfg_add_key_value_param(
110 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
111 goto err;
112
113 snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK);
114 val = hw_data->num_rings_per_bank;
115 if (adf_cfg_add_key_value_param(
116 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
117 goto err;
118
119 snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY);
120 snprintf(hw_version,
121 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
122 "%d",
123 accel_dev->accel_pci_dev.revid);
124 if (adf_cfg_add_key_value_param(
125 accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR))
126 goto err;
127
128 snprintf(key, sizeof(key), ADF_MMP_VER_KEY);
129 snprintf(mmp_version,
130 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
131 "%d.%d.%d",
132 accel_dev->fw_versions.mmp_version_major,
133 accel_dev->fw_versions.mmp_version_minor,
134 accel_dev->fw_versions.mmp_version_patch);
135 if (adf_cfg_add_key_value_param(
136 accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR))
137 goto err;
138
139 return 0;
140 err:
141 device_printf(GET_DEV(accel_dev),
142 "Failed to add internal values to accel_dev cfg\n");
143 return -EINVAL;
144 }
145
146 static int
147 adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev)
148 {
149 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
150 char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
151
152 snprintf(key, sizeof(key), ADF_UOF_VER_KEY);
153 snprintf(fw_version,
154 ADF_CFG_MAX_VAL_LEN_IN_BYTES,
155 "%d.%d.%d",
156 accel_dev->fw_versions.fw_version_major,
157 accel_dev->fw_versions.fw_version_minor,
158 accel_dev->fw_versions.fw_version_patch);
159 if (adf_cfg_add_key_value_param(
160 accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR))
161 return EFAULT;
162
163 return 0;
164 }
165
166 static int
167 adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev)
168 {
169 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
170 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
171 unsigned long val;
172
173 snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES);
174
175 val = hw_data->extended_dc_capabilities;
176 if (adf_cfg_add_key_value_param(
177 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX))
178 return -EINVAL;
179
180 return 0;
181 }
182
183 void
184 adf_error_notifier(uintptr_t arg)
185 {
186 struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg;
187 struct service_hndl *service;
188 struct list_head *list_itr;
189
190 list_for_each(list_itr, &service_table)
191 {
192 service = list_entry(list_itr, struct service_hndl, list);
193 if (service->event_hld(accel_dev, ADF_EVENT_ERROR))
194 device_printf(GET_DEV(accel_dev),
195 "Failed to send error event to %s.\n",
196 service->name);
197 }
198 }
199
200 /**
201 * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer.
202 *
203 * Return: 0 on success, error code otherwise.
204 */
205 int
206 adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
207 {
208 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
209 struct adf_bar *misc_bar =
210 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
211 struct resource *csr = misc_bar->virt_addr;
212 u32 i;
213 unsigned int mask;
214 u32 clk_per_sec = hw_data->get_clock_speed(hw_data);
215 u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000);
216 u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE;
217 char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
218
219 /* Get Watch Dog Timer for CySym+Comp from the configuration */
220 if (!adf_cfg_get_param_value(accel_dev,
221 ADF_GENERAL_SEC,
222 ADF_DEV_SSM_WDT_BULK,
223 (char *)timer_str)) {
224 if (!compat_strtouint((char *)timer_str,
225 ADF_CFG_BASE_DEC,
226 &timer_val))
227 /* Convert msec to CPP clocks */
228 timer_val = timer_val * (clk_per_sec / 1000);
229 }
230 /* Get Watch Dog Timer for CyAsym from the configuration */
231 if (!adf_cfg_get_param_value(accel_dev,
232 ADF_GENERAL_SEC,
233 ADF_DEV_SSM_WDT_PKE,
234 (char *)timer_str)) {
235 if (!compat_strtouint((char *)timer_str,
236 ADF_CFG_BASE_DEC,
237 &timer_val_pke))
238 /* Convert msec to CPP clocks */
239 timer_val_pke = timer_val_pke * (clk_per_sec / 1000);
240 }
241
242 for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) {
243 if (!(mask & 1))
244 continue;
245 /* Enable Watch Dog Timer for CySym + Comp */
246 ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val);
247 /* Enable Watch Dog Timer for CyAsym */
248 ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke);
249 }
250 return 0;
251 }
252
253 /**
254 * adf_dev_init() - Init data structures and services for the given accel device
255 * @accel_dev: Pointer to acceleration device.
256 *
257 * Initialize the ring data structures and the admin comms and arbitration
258 * services.
259 *
260 * Return: 0 on success, error code otherwise.
261 */
262 int
263 adf_dev_init(struct adf_accel_dev *accel_dev)
264 {
265 struct service_hndl *service;
266 struct list_head *list_itr;
267 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
268 char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
269 int ret = 0;
270 sysctl_ctx_init(&accel_dev->sysctl_ctx);
271 set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status);
272
273 if (!hw_data) {
274 device_printf(GET_DEV(accel_dev),
275 "Failed to init device - hw_data not set\n");
276 return EFAULT;
277 }
278 if (hw_data->reset_hw_units)
279 hw_data->reset_hw_units(accel_dev);
280
281 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
282 !accel_dev->is_vf) {
283 device_printf(GET_DEV(accel_dev), "Device not configured\n");
284 return EFAULT;
285 }
286
287 if (adf_init_etr_data(accel_dev)) {
288 device_printf(GET_DEV(accel_dev), "Failed initialize etr\n");
289 return EFAULT;
290 }
291
292 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
293 device_printf(GET_DEV(accel_dev),
294 "Failed to initialize device\n");
295 return EFAULT;
296 }
297
298 if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) {
299 device_printf(GET_DEV(accel_dev),
300 "Failed initialize accel_units\n");
301 return EFAULT;
302 }
303
304 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
305 device_printf(GET_DEV(accel_dev),
306 "Failed initialize admin comms\n");
307 return EFAULT;
308 }
309
310 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
311 device_printf(GET_DEV(accel_dev),
312 "Failed initialize hw arbiter\n");
313 return EFAULT;
314 }
315
316 if (hw_data->set_asym_rings_mask)
317 hw_data->set_asym_rings_mask(accel_dev);
318
319 hw_data->enable_ints(accel_dev);
320
321 if (adf_ae_init(accel_dev)) {
322 device_printf(GET_DEV(accel_dev),
323 "Failed to initialise Acceleration Engine\n");
324 return EFAULT;
325 }
326
327 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
328
329 if (adf_ae_fw_load(accel_dev)) {
330 device_printf(GET_DEV(accel_dev),
331 "Failed to load acceleration FW\n");
332 return EFAULT;
333 }
334 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
335
336 if (hw_data->alloc_irq(accel_dev)) {
337 device_printf(GET_DEV(accel_dev),
338 "Failed to allocate interrupts\n");
339 return EFAULT;
340 }
341 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
342
343 if (hw_data->init_ras && hw_data->init_ras(accel_dev)) {
344 device_printf(GET_DEV(accel_dev), "Failed to init RAS\n");
345 return EFAULT;
346 }
347
348 hw_data->enable_ints(accel_dev);
349
350 hw_data->enable_error_correction(accel_dev);
351
352 if (hw_data->enable_vf2pf_comms &&
353 hw_data->enable_vf2pf_comms(accel_dev)) {
354 device_printf(GET_DEV(accel_dev),
355 "QAT: Failed to enable vf2pf comms\n");
356 return EFAULT;
357 }
358
359 if (adf_pf_vf_capabilities_init(accel_dev))
360 return EFAULT;
361
362 if (adf_pf_vf_ring_to_svc_init(accel_dev))
363 return EFAULT;
364
365 if (adf_cfg_add_device_params(accel_dev))
366 return EFAULT;
367
368 if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev))
369 return EFAULT;
370
371 if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev))
372 return EFAULT;
373 /*
374 * Subservice initialisation is divided into two stages: init and start.
375 * This is to facilitate any ordering dependencies between services
376 * prior to starting any of the accelerators.
377 */
378 list_for_each(list_itr, &service_table)
379 {
380 service = list_entry(list_itr, struct service_hndl, list);
381 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
382 device_printf(GET_DEV(accel_dev),
383 "Failed to initialise service %s\n",
384 service->name);
385 return EFAULT;
386 }
387 set_bit(accel_dev->accel_id, service->init_status);
388 }
389
390 /* Read autoreset on error parameter */
391 ret = adf_cfg_get_param_value(accel_dev,
392 ADF_GENERAL_SEC,
393 ADF_AUTO_RESET_ON_ERROR,
394 value);
395 if (!ret) {
396 if (compat_strtouint(value,
397 10,
398 &accel_dev->autoreset_on_error)) {
399 device_printf(
400 GET_DEV(accel_dev),
401 "Failed converting %s to a decimal value\n",
402 ADF_AUTO_RESET_ON_ERROR);
403 return EFAULT;
404 }
405 }
406
407 return 0;
408 }
409
410 /**
411 * adf_dev_start() - Start acceleration service for the given accel device
412 * @accel_dev: Pointer to acceleration device.
413 *
414 * Function notifies all the registered services that the acceleration device
415 * is ready to be used.
416 * To be used by QAT device specific drivers.
417 *
418 * Return: 0 on success, error code otherwise.
419 */
420 int
421 adf_dev_start(struct adf_accel_dev *accel_dev)
422 {
423 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
424 struct service_hndl *service;
425 struct list_head *list_itr;
426
427 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
428 if (adf_devmgr_verify_id(&accel_dev->accel_id)) {
429 device_printf(GET_DEV(accel_dev),
430 "QAT: Device %d not found\n",
431 accel_dev->accel_id);
432 return ENODEV;
433 }
434 if (adf_ae_start(accel_dev)) {
435 device_printf(GET_DEV(accel_dev), "AE Start Failed\n");
436 return EFAULT;
437 }
438
439 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
440 if (hw_data->send_admin_init(accel_dev)) {
441 device_printf(GET_DEV(accel_dev),
442 "Failed to send init message\n");
443 return EFAULT;
444 }
445
446 if (adf_cfg_add_fw_version(accel_dev)) {
447 device_printf(GET_DEV(accel_dev),
448 "Failed to update configuration FW version\n");
449 return EFAULT;
450 }
451
452 if (hw_data->measure_clock)
453 hw_data->measure_clock(accel_dev);
454
455 /*
456 * Set ssm watch dog timer for slice hang detection
457 * Note! Not supported on devices older than C62x
458 */
459 if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) {
460 device_printf(GET_DEV(accel_dev),
461 "QAT: Failed to set ssm watch dog timer\n");
462 return EFAULT;
463 }
464
465 list_for_each(list_itr, &service_table)
466 {
467 service = list_entry(list_itr, struct service_hndl, list);
468 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
469 device_printf(GET_DEV(accel_dev),
470 "Failed to start service %s\n",
471 service->name);
472 return EFAULT;
473 }
474 set_bit(accel_dev->accel_id, service->start_status);
475 }
476
477 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) &&
478 adf_cfg_add_ext_params(accel_dev))
479 return EFAULT;
480
481 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
482 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
483
484 return 0;
485 }
486
487 /**
488 * adf_dev_stop() - Stop acceleration service for the given accel device
489 * @accel_dev: Pointer to acceleration device.
490 *
491 * Function notifies all the registered services that the acceleration device
492 * is shuting down.
493 * To be used by QAT device specific drivers.
494 *
495 * Return: 0 on success, error code otherwise.
496 */
497 int
498 adf_dev_stop(struct adf_accel_dev *accel_dev)
499 {
500 struct service_hndl *service;
501 struct list_head *list_itr;
502
503 if (adf_devmgr_verify_id(&accel_dev->accel_id)) {
504 device_printf(GET_DEV(accel_dev),
505 "QAT: Device %d not found\n",
506 accel_dev->accel_id);
507 return ENODEV;
508 }
509 if (!adf_dev_started(accel_dev) &&
510 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
511 return 0;
512 }
513
514 if (adf_dev_stop_notify_sync(accel_dev)) {
515 device_printf(
516 GET_DEV(accel_dev),
517 "Waiting for device un-busy failed. Retries limit reached\n");
518 return EBUSY;
519 }
520
521 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
522 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
523
524 list_for_each(list_itr, &service_table)
525 {
526 service = list_entry(list_itr, struct service_hndl, list);
527 if (!test_bit(accel_dev->accel_id, service->start_status))
528 continue;
529 clear_bit(accel_dev->accel_id, service->start_status);
530 }
531
532 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
533 if (adf_ae_stop(accel_dev))
534 device_printf(GET_DEV(accel_dev),
535 "failed to stop AE\n");
536 else
537 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
538 }
539
540 return 0;
541 }
542
543 /**
544 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
545 * @accel_dev: Pointer to acceleration device
546 *
547 * Cleanup the ring data structures and the admin comms and arbitration
548 * services.
549 */
550 void
551 adf_dev_shutdown(struct adf_accel_dev *accel_dev)
552 {
553 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
554 struct service_hndl *service;
555 struct list_head *list_itr;
556
557 if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) {
558 sysctl_ctx_free(&accel_dev->sysctl_ctx);
559 clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED,
560 &accel_dev->status);
561 }
562
563 if (!hw_data) {
564 device_printf(
565 GET_DEV(accel_dev),
566 "QAT: Failed to shutdown device - hw_data not set\n");
567 return;
568 }
569
570 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
571 adf_ae_fw_release(accel_dev);
572 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
573 }
574
575 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
576 if (adf_ae_shutdown(accel_dev))
577 device_printf(GET_DEV(accel_dev),
578 "Failed to shutdown Accel Engine\n");
579 else
580 clear_bit(ADF_STATUS_AE_INITIALISED,
581 &accel_dev->status);
582 }
583
584 list_for_each(list_itr, &service_table)
585 {
586 service = list_entry(list_itr, struct service_hndl, list);
587 if (!test_bit(accel_dev->accel_id, service->init_status))
588 continue;
589 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
590 device_printf(GET_DEV(accel_dev),
591 "Failed to shutdown service %s\n",
592 service->name);
593 else
594 clear_bit(accel_dev->accel_id, service->init_status);
595 }
596
597 hw_data->disable_iov(accel_dev);
598
599 if (hw_data->disable_vf2pf_comms)
600 hw_data->disable_vf2pf_comms(accel_dev);
601
602 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
603 hw_data->free_irq(accel_dev);
604 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
605 }
606
607 /* Delete configuration only if not restarting */
608 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
609 adf_cfg_del_all(accel_dev);
610
611 if (hw_data->remove_pke_stats)
612 hw_data->remove_pke_stats(accel_dev);
613
614 if (hw_data->remove_misc_error)
615 hw_data->remove_misc_error(accel_dev);
616
617 if (hw_data->exit_ras)
618 hw_data->exit_ras(accel_dev);
619
620 if (hw_data->exit_arb)
621 hw_data->exit_arb(accel_dev);
622
623 if (hw_data->exit_admin_comms)
624 hw_data->exit_admin_comms(accel_dev);
625
626 if (hw_data->exit_accel_units)
627 hw_data->exit_accel_units(accel_dev);
628
629 adf_cleanup_etr_data(accel_dev);
630 if (hw_data->restore_device)
631 hw_data->restore_device(accel_dev);
632 }
633
634 /**
635 * adf_dev_reset() - Reset acceleration service for the given accel device
636 * @accel_dev: Pointer to acceleration device.
637 * @mode: Specifies reset mode - synchronous or asynchronous.
638 * Function notifies all the registered services that the acceleration device
639 * is resetting.
640 * To be used by QAT device specific drivers.
641 *
642 * Return: 0 on success, error code otherwise.
643 */
644 int
645 adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode)
646 {
647 return adf_dev_aer_schedule_reset(accel_dev, mode);
648 }
649
650 int
651 adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
652 {
653 struct service_hndl *service;
654 struct list_head *list_itr;
655
656 list_for_each(list_itr, &service_table)
657 {
658 service = list_entry(list_itr, struct service_hndl, list);
659 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
660 device_printf(GET_DEV(accel_dev),
661 "Failed to restart service %s.\n",
662 service->name);
663 }
664 return 0;
665 }
666
667 int
668 adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev)
669 {
670 int times;
671
672 adf_dev_restarting_notify(accel_dev);
673 for (times = 0; times < ADF_STOP_RETRY; times++) {
674 if (!adf_dev_in_use(accel_dev))
675 break;
676 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times);
677 pause_ms("adfstop", 100);
678 }
679 if (adf_dev_in_use(accel_dev)) {
680 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
681 device_printf(GET_DEV(accel_dev),
682 "Device still in use during reset sequence.\n");
683 return EBUSY;
684 }
685
686 return 0;
687 }
688
689 int
690 adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev)
691 {
692 int times;
693
694 struct service_hndl *service;
695 struct list_head *list_itr;
696
697 list_for_each(list_itr, &service_table)
698 {
699 service = list_entry(list_itr, struct service_hndl, list);
700 if (service->event_hld(accel_dev, ADF_EVENT_STOP))
701 device_printf(GET_DEV(accel_dev),
702 "Failed to restart service %s.\n",
703 service->name);
704 }
705
706 for (times = 0; times < ADF_STOP_RETRY; times++) {
707 if (!adf_dev_in_use(accel_dev))
708 break;
709 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times);
710 pause_ms("adfstop", 100);
711 }
712 if (adf_dev_in_use(accel_dev)) {
713 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
714 device_printf(GET_DEV(accel_dev),
715 "Device still in use during stop sequence.\n");
716 return EBUSY;
717 }
718
719 return 0;
720 }
721
722 int
723 adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
724 {
725 struct service_hndl *service;
726 struct list_head *list_itr;
727
728 list_for_each(list_itr, &service_table)
729 {
730 service = list_entry(list_itr, struct service_hndl, list);
731 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
732 device_printf(GET_DEV(accel_dev),
733 "Failed to restart service %s.\n",
734 service->name);
735 }
736 return 0;
737 }
Cache object: 8321664381616904abfebba980044ff7
|