1 /*-
2 * Copyright (c) 2017-2019 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <linux/workqueue.h>
31 #include <linux/wait.h>
32 #include <linux/compat.h>
33 #include <linux/spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/irq_work.h>
36
37 #include <sys/kernel.h>
38
39 /*
40 * Define all work struct states
41 */
42 enum {
43 WORK_ST_IDLE, /* idle - not started */
44 WORK_ST_TIMER, /* timer is being started */
45 WORK_ST_TASK, /* taskqueue is being queued */
46 WORK_ST_EXEC, /* callback is being called */
47 WORK_ST_CANCEL, /* cancel is being requested */
48 WORK_ST_MAX,
49 };
50
51 /*
52 * Define global workqueues
53 */
54 static struct workqueue_struct *linux_system_short_wq;
55 static struct workqueue_struct *linux_system_long_wq;
56
57 struct workqueue_struct *system_wq;
58 struct workqueue_struct *system_long_wq;
59 struct workqueue_struct *system_unbound_wq;
60 struct workqueue_struct *system_highpri_wq;
61 struct workqueue_struct *system_power_efficient_wq;
62
63 struct taskqueue *linux_irq_work_tq;
64
65 static int linux_default_wq_cpus = 4;
66
67 static void linux_delayed_work_timer_fn(void *);
68
69 /*
70 * This function atomically updates the work state and returns the
71 * previous state at the time of update.
72 */
73 static uint8_t
74 linux_update_state(atomic_t *v, const uint8_t *pstate)
75 {
76 int c, old;
77
78 c = v->counter;
79
80 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
81 c = old;
82
83 return (c);
84 }
85
86 /*
87 * A LinuxKPI task is allowed to free itself inside the callback function
88 * and cannot safely be referred after the callback function has
89 * completed. This function gives the linux_work_fn() function a hint,
90 * that the task is not going away and can have its state checked
91 * again. Without this extra hint LinuxKPI tasks cannot be serialized
92 * across multiple worker threads.
93 */
94 static bool
95 linux_work_exec_unblock(struct work_struct *work)
96 {
97 struct workqueue_struct *wq;
98 struct work_exec *exec;
99 bool retval = false;
100
101 wq = work->work_queue;
102 if (unlikely(wq == NULL))
103 goto done;
104
105 WQ_EXEC_LOCK(wq);
106 TAILQ_FOREACH(exec, &wq->exec_head, entry) {
107 if (exec->target == work) {
108 exec->target = NULL;
109 retval = true;
110 break;
111 }
112 }
113 WQ_EXEC_UNLOCK(wq);
114 done:
115 return (retval);
116 }
117
118 static void
119 linux_delayed_work_enqueue(struct delayed_work *dwork)
120 {
121 struct taskqueue *tq;
122
123 tq = dwork->work.work_queue->taskqueue;
124 taskqueue_enqueue(tq, &dwork->work.work_task);
125 }
126
127 /*
128 * This function queues the given work structure on the given
129 * workqueue. It returns non-zero if the work was successfully
130 * [re-]queued. Else the work is already pending for completion.
131 */
132 bool
133 linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
134 struct work_struct *work)
135 {
136 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
137 [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */
138 [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
139 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
140 [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
141 [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */
142 };
143
144 if (atomic_read(&wq->draining) != 0)
145 return (!work_pending(work));
146
147 switch (linux_update_state(&work->state, states)) {
148 case WORK_ST_EXEC:
149 case WORK_ST_CANCEL:
150 if (linux_work_exec_unblock(work) != 0)
151 return (true);
152 /* FALLTHROUGH */
153 case WORK_ST_IDLE:
154 work->work_queue = wq;
155 taskqueue_enqueue(wq->taskqueue, &work->work_task);
156 return (true);
157 default:
158 return (false); /* already on a queue */
159 }
160 }
161
162 /*
163 * Callback func for linux_queue_rcu_work
164 */
165 static void
166 rcu_work_func(struct rcu_head *rcu)
167 {
168 struct rcu_work *rwork;
169
170 rwork = container_of(rcu, struct rcu_work, rcu);
171 linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
172 }
173
174 /*
175 * This function queue a work after a grace period
176 * If the work was already pending it returns false,
177 * if not it calls call_rcu and returns true.
178 */
179 bool
180 linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
181 {
182
183 if (!linux_work_pending(&rwork->work)) {
184 rwork->wq = wq;
185 linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);
186 return (true);
187 }
188 return (false);
189 }
190
191 /*
192 * This function waits for the last execution of a work and then
193 * flush the work.
194 * It returns true if the work was pending and we waited, it returns
195 * false otherwise.
196 */
197 bool
198 linux_flush_rcu_work(struct rcu_work *rwork)
199 {
200
201 if (linux_work_pending(&rwork->work)) {
202 linux_rcu_barrier(RCU_TYPE_REGULAR);
203 linux_flush_work(&rwork->work);
204 return (true);
205 }
206 return (linux_flush_work(&rwork->work));
207 }
208
209 /*
210 * This function queues the given work structure on the given
211 * workqueue after a given delay in ticks. It returns non-zero if the
212 * work was successfully [re-]queued. Else the work is already pending
213 * for completion.
214 */
215 bool
216 linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
217 struct delayed_work *dwork, unsigned delay)
218 {
219 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
220 [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */
221 [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
222 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
223 [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */
224 [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */
225 };
226
227 if (atomic_read(&wq->draining) != 0)
228 return (!work_pending(&dwork->work));
229
230 switch (linux_update_state(&dwork->work.state, states)) {
231 case WORK_ST_EXEC:
232 case WORK_ST_CANCEL:
233 if (delay == 0 && linux_work_exec_unblock(&dwork->work) != 0) {
234 dwork->timer.expires = jiffies;
235 return (true);
236 }
237 /* FALLTHROUGH */
238 case WORK_ST_IDLE:
239 dwork->work.work_queue = wq;
240 dwork->timer.expires = jiffies + delay;
241
242 if (delay == 0) {
243 linux_delayed_work_enqueue(dwork);
244 } else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
245 mtx_lock(&dwork->timer.mtx);
246 callout_reset_on(&dwork->timer.callout, delay,
247 &linux_delayed_work_timer_fn, dwork, cpu);
248 mtx_unlock(&dwork->timer.mtx);
249 } else {
250 mtx_lock(&dwork->timer.mtx);
251 callout_reset(&dwork->timer.callout, delay,
252 &linux_delayed_work_timer_fn, dwork);
253 mtx_unlock(&dwork->timer.mtx);
254 }
255 return (true);
256 default:
257 return (false); /* already on a queue */
258 }
259 }
260
261 void
262 linux_work_fn(void *context, int pending)
263 {
264 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
265 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
266 [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */
267 [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */
268 [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */
269 [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */
270 };
271 struct work_struct *work;
272 struct workqueue_struct *wq;
273 struct work_exec exec;
274 struct task_struct *task;
275
276 task = current;
277
278 /* setup local variables */
279 work = context;
280 wq = work->work_queue;
281
282 /* store target pointer */
283 exec.target = work;
284
285 /* insert executor into list */
286 WQ_EXEC_LOCK(wq);
287 TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
288 while (1) {
289 switch (linux_update_state(&work->state, states)) {
290 case WORK_ST_TIMER:
291 case WORK_ST_TASK:
292 case WORK_ST_CANCEL:
293 WQ_EXEC_UNLOCK(wq);
294
295 /* set current work structure */
296 task->work = work;
297
298 /* call work function */
299 work->func(work);
300
301 /* set current work structure */
302 task->work = NULL;
303
304 WQ_EXEC_LOCK(wq);
305 /* check if unblocked */
306 if (exec.target != work) {
307 /* reapply block */
308 exec.target = work;
309 break;
310 }
311 /* FALLTHROUGH */
312 default:
313 goto done;
314 }
315 }
316 done:
317 /* remove executor from list */
318 TAILQ_REMOVE(&wq->exec_head, &exec, entry);
319 WQ_EXEC_UNLOCK(wq);
320 }
321
322 void
323 linux_delayed_work_fn(void *context, int pending)
324 {
325 struct delayed_work *dwork = context;
326
327 /*
328 * Make sure the timer belonging to the delayed work gets
329 * drained before invoking the work function. Else the timer
330 * mutex may still be in use which can lead to use-after-free
331 * situations, because the work function might free the work
332 * structure before returning.
333 */
334 callout_drain(&dwork->timer.callout);
335
336 linux_work_fn(&dwork->work, pending);
337 }
338
339 static void
340 linux_delayed_work_timer_fn(void *arg)
341 {
342 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
343 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
344 [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */
345 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
346 [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
347 [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */
348 };
349 struct delayed_work *dwork = arg;
350
351 switch (linux_update_state(&dwork->work.state, states)) {
352 case WORK_ST_TIMER:
353 case WORK_ST_CANCEL:
354 linux_delayed_work_enqueue(dwork);
355 break;
356 default:
357 break;
358 }
359 }
360
361 /*
362 * This function cancels the given work structure in a synchronous
363 * fashion. It returns non-zero if the work was successfully
364 * cancelled. Else the work was already cancelled.
365 */
366 bool
367 linux_cancel_work_sync(struct work_struct *work)
368 {
369 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
370 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
371 [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
372 [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
373 [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
374 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
375 };
376 struct taskqueue *tq;
377 bool retval = false;
378
379 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
380 "linux_cancel_work_sync() might sleep");
381 retry:
382 switch (linux_update_state(&work->state, states)) {
383 case WORK_ST_IDLE:
384 case WORK_ST_TIMER:
385 return (retval);
386 case WORK_ST_EXEC:
387 tq = work->work_queue->taskqueue;
388 if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
389 taskqueue_drain(tq, &work->work_task);
390 goto retry; /* work may have restarted itself */
391 default:
392 tq = work->work_queue->taskqueue;
393 if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
394 taskqueue_drain(tq, &work->work_task);
395 retval = true;
396 goto retry;
397 }
398 }
399
400 /*
401 * This function atomically stops the timer and callback. The timer
402 * callback will not be called after this function returns. This
403 * functions returns true when the timeout was cancelled. Else the
404 * timeout was not started or has already been called.
405 */
406 static inline bool
407 linux_cancel_timer(struct delayed_work *dwork, bool drain)
408 {
409 bool cancelled;
410
411 mtx_lock(&dwork->timer.mtx);
412 cancelled = (callout_stop(&dwork->timer.callout) == 1);
413 mtx_unlock(&dwork->timer.mtx);
414
415 /* check if we should drain */
416 if (drain)
417 callout_drain(&dwork->timer.callout);
418 return (cancelled);
419 }
420
421 /*
422 * This function cancels the given delayed work structure in a
423 * non-blocking fashion. It returns non-zero if the work was
424 * successfully cancelled. Else the work may still be busy or already
425 * cancelled.
426 */
427 bool
428 linux_cancel_delayed_work(struct delayed_work *dwork)
429 {
430 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
431 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
432 [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */
433 [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */
434 [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
435 [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
436 };
437 struct taskqueue *tq;
438 bool cancelled;
439
440 mtx_lock(&dwork->timer.mtx);
441 switch (linux_update_state(&dwork->work.state, states)) {
442 case WORK_ST_TIMER:
443 case WORK_ST_CANCEL:
444 cancelled = (callout_stop(&dwork->timer.callout) == 1);
445 if (cancelled) {
446 atomic_cmpxchg(&dwork->work.state,
447 WORK_ST_CANCEL, WORK_ST_IDLE);
448 mtx_unlock(&dwork->timer.mtx);
449 return (true);
450 }
451 /* FALLTHROUGH */
452 case WORK_ST_TASK:
453 tq = dwork->work.work_queue->taskqueue;
454 if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
455 atomic_cmpxchg(&dwork->work.state,
456 WORK_ST_CANCEL, WORK_ST_IDLE);
457 mtx_unlock(&dwork->timer.mtx);
458 return (true);
459 }
460 /* FALLTHROUGH */
461 default:
462 mtx_unlock(&dwork->timer.mtx);
463 return (false);
464 }
465 }
466
467 /*
468 * This function cancels the given work structure in a synchronous
469 * fashion. It returns non-zero if the work was successfully
470 * cancelled. Else the work was already cancelled.
471 */
472 bool
473 linux_cancel_delayed_work_sync(struct delayed_work *dwork)
474 {
475 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
476 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
477 [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */
478 [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
479 [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
480 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
481 };
482 struct taskqueue *tq;
483 bool retval = false;
484 int ret, state;
485 bool cancelled;
486
487 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
488 "linux_cancel_delayed_work_sync() might sleep");
489 mtx_lock(&dwork->timer.mtx);
490
491 state = linux_update_state(&dwork->work.state, states);
492 switch (state) {
493 case WORK_ST_IDLE:
494 mtx_unlock(&dwork->timer.mtx);
495 return (retval);
496 case WORK_ST_TIMER:
497 case WORK_ST_CANCEL:
498 cancelled = (callout_stop(&dwork->timer.callout) == 1);
499
500 tq = dwork->work.work_queue->taskqueue;
501 ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
502 mtx_unlock(&dwork->timer.mtx);
503
504 callout_drain(&dwork->timer.callout);
505 taskqueue_drain(tq, &dwork->work.work_task);
506 return (cancelled || (ret != 0));
507 default:
508 tq = dwork->work.work_queue->taskqueue;
509 ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
510 mtx_unlock(&dwork->timer.mtx);
511 if (ret != 0)
512 taskqueue_drain(tq, &dwork->work.work_task);
513 return (ret != 0);
514 }
515 }
516
517 /*
518 * This function waits until the given work structure is completed.
519 * It returns non-zero if the work was successfully
520 * waited for. Else the work was not waited for.
521 */
522 bool
523 linux_flush_work(struct work_struct *work)
524 {
525 struct taskqueue *tq;
526 bool retval;
527
528 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
529 "linux_flush_work() might sleep");
530
531 switch (atomic_read(&work->state)) {
532 case WORK_ST_IDLE:
533 return (false);
534 default:
535 tq = work->work_queue->taskqueue;
536 retval = taskqueue_poll_is_busy(tq, &work->work_task);
537 taskqueue_drain(tq, &work->work_task);
538 return (retval);
539 }
540 }
541
542 /*
543 * This function waits until the given delayed work structure is
544 * completed. It returns non-zero if the work was successfully waited
545 * for. Else the work was not waited for.
546 */
547 bool
548 linux_flush_delayed_work(struct delayed_work *dwork)
549 {
550 struct taskqueue *tq;
551 bool retval;
552
553 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
554 "linux_flush_delayed_work() might sleep");
555
556 switch (atomic_read(&dwork->work.state)) {
557 case WORK_ST_IDLE:
558 return (false);
559 case WORK_ST_TIMER:
560 if (linux_cancel_timer(dwork, 1))
561 linux_delayed_work_enqueue(dwork);
562 /* FALLTHROUGH */
563 default:
564 tq = dwork->work.work_queue->taskqueue;
565 retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
566 taskqueue_drain(tq, &dwork->work.work_task);
567 return (retval);
568 }
569 }
570
571 /*
572 * This function returns true if the given work is pending, and not
573 * yet executing:
574 */
575 bool
576 linux_work_pending(struct work_struct *work)
577 {
578 switch (atomic_read(&work->state)) {
579 case WORK_ST_TIMER:
580 case WORK_ST_TASK:
581 case WORK_ST_CANCEL:
582 return (true);
583 default:
584 return (false);
585 }
586 }
587
588 /*
589 * This function returns true if the given work is busy.
590 */
591 bool
592 linux_work_busy(struct work_struct *work)
593 {
594 struct taskqueue *tq;
595
596 switch (atomic_read(&work->state)) {
597 case WORK_ST_IDLE:
598 return (false);
599 case WORK_ST_EXEC:
600 tq = work->work_queue->taskqueue;
601 return (taskqueue_poll_is_busy(tq, &work->work_task));
602 default:
603 return (true);
604 }
605 }
606
607 struct workqueue_struct *
608 linux_create_workqueue_common(const char *name, int cpus)
609 {
610 struct workqueue_struct *wq;
611
612 /*
613 * If zero CPUs are specified use the default number of CPUs:
614 */
615 if (cpus == 0)
616 cpus = linux_default_wq_cpus;
617
618 wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
619 wq->taskqueue = taskqueue_create(name, M_WAITOK,
620 taskqueue_thread_enqueue, &wq->taskqueue);
621 atomic_set(&wq->draining, 0);
622 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
623 TAILQ_INIT(&wq->exec_head);
624 mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
625
626 return (wq);
627 }
628
629 void
630 linux_destroy_workqueue(struct workqueue_struct *wq)
631 {
632 atomic_inc(&wq->draining);
633 drain_workqueue(wq);
634 taskqueue_free(wq->taskqueue);
635 mtx_destroy(&wq->exec_mtx);
636 kfree(wq);
637 }
638
639 void
640 linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
641 {
642 memset(dwork, 0, sizeof(*dwork));
643 dwork->work.func = func;
644 TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
645 mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
646 MTX_DEF | MTX_NOWITNESS);
647 callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
648 }
649
650 struct work_struct *
651 linux_current_work(void)
652 {
653 return (current->work);
654 }
655
656 static void
657 linux_work_init(void *arg)
658 {
659 int max_wq_cpus = mp_ncpus + 1;
660
661 /* avoid deadlock when there are too few threads */
662 if (max_wq_cpus < 4)
663 max_wq_cpus = 4;
664
665 /* set default number of CPUs */
666 linux_default_wq_cpus = max_wq_cpus;
667
668 linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
669 linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
670
671 /* populate the workqueue pointers */
672 system_long_wq = linux_system_long_wq;
673 system_wq = linux_system_short_wq;
674 system_power_efficient_wq = linux_system_short_wq;
675 system_unbound_wq = linux_system_short_wq;
676 system_highpri_wq = linux_system_short_wq;
677 }
678 SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);
679
680 static void
681 linux_work_uninit(void *arg)
682 {
683 destroy_workqueue(linux_system_short_wq);
684 destroy_workqueue(linux_system_long_wq);
685
686 /* clear workqueue pointers */
687 system_long_wq = NULL;
688 system_wq = NULL;
689 system_power_efficient_wq = NULL;
690 system_unbound_wq = NULL;
691 system_highpri_wq = NULL;
692 }
693 SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
694
695 void
696 linux_irq_work_fn(void *context, int pending)
697 {
698 struct irq_work *irqw = context;
699
700 irqw->func(irqw);
701 }
702
703 static void
704 linux_irq_work_init_fn(void *context, int pending)
705 {
706 /*
707 * LinuxKPI performs lazy allocation of memory structures required by
708 * current on the first access to it. As some irq_work clients read
709 * it with spinlock taken, we have to preallocate td_lkpi_task before
710 * first call to irq_work_queue(). As irq_work uses a single thread,
711 * it is enough to read current once at SYSINIT stage.
712 */
713 if (current == NULL)
714 panic("irq_work taskqueue is not initialized");
715 }
716 static struct task linux_irq_work_init_task =
717 TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
718
719 static void
720 linux_irq_work_init(void *arg)
721 {
722 linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
723 M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
724 taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
725 "linuxkpi_irq_wq");
726 taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
727 }
728 SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
729 linux_irq_work_init, NULL);
730
731 static void
732 linux_irq_work_uninit(void *arg)
733 {
734 taskqueue_drain_all(linux_irq_work_tq);
735 taskqueue_free(linux_irq_work_tq);
736 }
737 SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
738 linux_irq_work_uninit, NULL);
Cache object: 62dba5e7c513297b5ad9718d5f1f5c66
|