1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/11.0/sys/kern/subr_taskqueue.c 304716 2016-08-24 01:56:30Z shurd $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/libkern.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/taskqueue.h>
46 #include <sys/unistd.h>
47 #include <machine/stdarg.h>
48
49 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
50 static void *taskqueue_giant_ih;
51 static void *taskqueue_ih;
52 static void taskqueue_fast_enqueue(void *);
53 static void taskqueue_swi_enqueue(void *);
54 static void taskqueue_swi_giant_enqueue(void *);
55
56 struct taskqueue_busy {
57 struct task *tb_running;
58 TAILQ_ENTRY(taskqueue_busy) tb_link;
59 };
60
61 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
62
63 struct taskqueue {
64 STAILQ_HEAD(, task) tq_queue;
65 taskqueue_enqueue_fn tq_enqueue;
66 void *tq_context;
67 char *tq_name;
68 TAILQ_HEAD(, taskqueue_busy) tq_active;
69 struct mtx tq_mutex;
70 struct thread **tq_threads;
71 int tq_tcount;
72 int tq_spin;
73 int tq_flags;
74 int tq_callouts;
75 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
77 };
78
79 #define TQ_FLAGS_ACTIVE (1 << 0)
80 #define TQ_FLAGS_BLOCKED (1 << 1)
81 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
82
83 #define DT_CALLOUT_ARMED (1 << 0)
84
85 #define TQ_LOCK(tq) \
86 do { \
87 if ((tq)->tq_spin) \
88 mtx_lock_spin(&(tq)->tq_mutex); \
89 else \
90 mtx_lock(&(tq)->tq_mutex); \
91 } while (0)
92 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
93
94 #define TQ_UNLOCK(tq) \
95 do { \
96 if ((tq)->tq_spin) \
97 mtx_unlock_spin(&(tq)->tq_mutex); \
98 else \
99 mtx_unlock(&(tq)->tq_mutex); \
100 } while (0)
101 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
102
103 void
104 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
105 int priority, task_fn_t func, void *context)
106 {
107
108 TASK_INIT(&timeout_task->t, priority, func, context);
109 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
110 CALLOUT_RETURNUNLOCKED);
111 timeout_task->q = queue;
112 timeout_task->f = 0;
113 }
114
115 static __inline int
116 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
117 int t)
118 {
119 if (tq->tq_spin)
120 return (msleep_spin(p, m, wm, t));
121 return (msleep(p, m, pri, wm, t));
122 }
123
124 static struct taskqueue *
125 _taskqueue_create(const char *name, int mflags,
126 taskqueue_enqueue_fn enqueue, void *context,
127 int mtxflags, const char *mtxname __unused)
128 {
129 struct taskqueue *queue;
130 char *tq_name;
131
132 tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
133 if (tq_name == NULL)
134 return (NULL);
135
136 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
137 if (queue == NULL) {
138 free(tq_name, M_TASKQUEUE);
139 return (NULL);
140 }
141
142 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
143
144 STAILQ_INIT(&queue->tq_queue);
145 TAILQ_INIT(&queue->tq_active);
146 queue->tq_enqueue = enqueue;
147 queue->tq_context = context;
148 queue->tq_name = tq_name;
149 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
150 queue->tq_flags |= TQ_FLAGS_ACTIVE;
151 if (enqueue == taskqueue_fast_enqueue ||
152 enqueue == taskqueue_swi_enqueue ||
153 enqueue == taskqueue_swi_giant_enqueue ||
154 enqueue == taskqueue_thread_enqueue)
155 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
156 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
157
158 return (queue);
159 }
160
161 struct taskqueue *
162 taskqueue_create(const char *name, int mflags,
163 taskqueue_enqueue_fn enqueue, void *context)
164 {
165
166 return _taskqueue_create(name, mflags, enqueue, context,
167 MTX_DEF, name);
168 }
169
170 void
171 taskqueue_set_callback(struct taskqueue *queue,
172 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
173 void *context)
174 {
175
176 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
177 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
178 ("Callback type %d not valid, must be %d-%d", cb_type,
179 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
180 KASSERT((queue->tq_callbacks[cb_type] == NULL),
181 ("Re-initialization of taskqueue callback?"));
182
183 queue->tq_callbacks[cb_type] = callback;
184 queue->tq_cb_contexts[cb_type] = context;
185 }
186
187 /*
188 * Signal a taskqueue thread to terminate.
189 */
190 static void
191 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
192 {
193
194 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
195 wakeup(tq);
196 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
197 }
198 }
199
200 void
201 taskqueue_free(struct taskqueue *queue)
202 {
203
204 TQ_LOCK(queue);
205 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
206 taskqueue_terminate(queue->tq_threads, queue);
207 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
208 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
209 mtx_destroy(&queue->tq_mutex);
210 free(queue->tq_threads, M_TASKQUEUE);
211 free(queue->tq_name, M_TASKQUEUE);
212 free(queue, M_TASKQUEUE);
213 }
214
215 static int
216 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
217 {
218 struct task *ins;
219 struct task *prev;
220
221 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
222 /*
223 * Count multiple enqueues.
224 */
225 if (task->ta_pending) {
226 if (task->ta_pending < USHRT_MAX)
227 task->ta_pending++;
228 TQ_UNLOCK(queue);
229 return (0);
230 }
231
232 /*
233 * Optimise the case when all tasks have the same priority.
234 */
235 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
236 if (!prev || prev->ta_priority >= task->ta_priority) {
237 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
238 } else {
239 prev = NULL;
240 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
241 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
242 if (ins->ta_priority < task->ta_priority)
243 break;
244
245 if (prev)
246 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
247 else
248 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
249 }
250
251 task->ta_pending = 1;
252 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
253 TQ_UNLOCK(queue);
254 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
255 queue->tq_enqueue(queue->tq_context);
256 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
257 TQ_UNLOCK(queue);
258
259 /* Return with lock released. */
260 return (0);
261 }
262
263 int
264 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
265 {
266 int res;
267
268 TQ_LOCK(queue);
269 res = taskqueue_enqueue_locked(queue, task);
270 /* The lock is released inside. */
271
272 return (res);
273 }
274
275 static void
276 taskqueue_timeout_func(void *arg)
277 {
278 struct taskqueue *queue;
279 struct timeout_task *timeout_task;
280
281 timeout_task = arg;
282 queue = timeout_task->q;
283 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
284 timeout_task->f &= ~DT_CALLOUT_ARMED;
285 queue->tq_callouts--;
286 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
287 /* The lock is released inside. */
288 }
289
290 int
291 taskqueue_enqueue_timeout(struct taskqueue *queue,
292 struct timeout_task *timeout_task, int ticks)
293 {
294 int res;
295
296 TQ_LOCK(queue);
297 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
298 ("Migrated queue"));
299 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
300 timeout_task->q = queue;
301 res = timeout_task->t.ta_pending;
302 if (ticks == 0) {
303 taskqueue_enqueue_locked(queue, &timeout_task->t);
304 /* The lock is released inside. */
305 } else {
306 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
307 res++;
308 } else {
309 queue->tq_callouts++;
310 timeout_task->f |= DT_CALLOUT_ARMED;
311 if (ticks < 0)
312 ticks = -ticks; /* Ignore overflow. */
313 }
314 if (ticks > 0) {
315 callout_reset(&timeout_task->c, ticks,
316 taskqueue_timeout_func, timeout_task);
317 }
318 TQ_UNLOCK(queue);
319 }
320 return (res);
321 }
322
323 static void
324 taskqueue_task_nop_fn(void *context, int pending)
325 {
326 }
327
328 /*
329 * Block until all currently queued tasks in this taskqueue
330 * have begun execution. Tasks queued during execution of
331 * this function are ignored.
332 */
333 static void
334 taskqueue_drain_tq_queue(struct taskqueue *queue)
335 {
336 struct task t_barrier;
337
338 if (STAILQ_EMPTY(&queue->tq_queue))
339 return;
340
341 /*
342 * Enqueue our barrier after all current tasks, but with
343 * the highest priority so that newly queued tasks cannot
344 * pass it. Because of the high priority, we can not use
345 * taskqueue_enqueue_locked directly (which drops the lock
346 * anyway) so just insert it at tail while we have the
347 * queue lock.
348 */
349 TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
350 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
351 t_barrier.ta_pending = 1;
352
353 /*
354 * Once the barrier has executed, all previously queued tasks
355 * have completed or are currently executing.
356 */
357 while (t_barrier.ta_pending != 0)
358 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
359 }
360
361 /*
362 * Block until all currently executing tasks for this taskqueue
363 * complete. Tasks that begin execution during the execution
364 * of this function are ignored.
365 */
366 static void
367 taskqueue_drain_tq_active(struct taskqueue *queue)
368 {
369 struct taskqueue_busy tb_marker, *tb_first;
370
371 if (TAILQ_EMPTY(&queue->tq_active))
372 return;
373
374 /* Block taskq_terminate().*/
375 queue->tq_callouts++;
376
377 /*
378 * Wait for all currently executing taskqueue threads
379 * to go idle.
380 */
381 tb_marker.tb_running = TB_DRAIN_WAITER;
382 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
383 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
384 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
385 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
386
387 /*
388 * Wakeup any other drain waiter that happened to queue up
389 * without any intervening active thread.
390 */
391 tb_first = TAILQ_FIRST(&queue->tq_active);
392 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
393 wakeup(tb_first);
394
395 /* Release taskqueue_terminate(). */
396 queue->tq_callouts--;
397 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
398 wakeup_one(queue->tq_threads);
399 }
400
401 void
402 taskqueue_block(struct taskqueue *queue)
403 {
404
405 TQ_LOCK(queue);
406 queue->tq_flags |= TQ_FLAGS_BLOCKED;
407 TQ_UNLOCK(queue);
408 }
409
410 void
411 taskqueue_unblock(struct taskqueue *queue)
412 {
413
414 TQ_LOCK(queue);
415 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
416 if (!STAILQ_EMPTY(&queue->tq_queue))
417 queue->tq_enqueue(queue->tq_context);
418 TQ_UNLOCK(queue);
419 }
420
421 static void
422 taskqueue_run_locked(struct taskqueue *queue)
423 {
424 struct taskqueue_busy tb;
425 struct taskqueue_busy *tb_first;
426 struct task *task;
427 int pending;
428
429 KASSERT(queue != NULL, ("tq is NULL"));
430 TQ_ASSERT_LOCKED(queue);
431 tb.tb_running = NULL;
432
433 while (STAILQ_FIRST(&queue->tq_queue)) {
434 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
435
436 /*
437 * Carefully remove the first task from the queue and
438 * zero its pending count.
439 */
440 task = STAILQ_FIRST(&queue->tq_queue);
441 KASSERT(task != NULL, ("task is NULL"));
442 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
443 pending = task->ta_pending;
444 task->ta_pending = 0;
445 tb.tb_running = task;
446 TQ_UNLOCK(queue);
447
448 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
449 task->ta_func(task->ta_context, pending);
450
451 TQ_LOCK(queue);
452 tb.tb_running = NULL;
453 wakeup(task);
454
455 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
456 tb_first = TAILQ_FIRST(&queue->tq_active);
457 if (tb_first != NULL &&
458 tb_first->tb_running == TB_DRAIN_WAITER)
459 wakeup(tb_first);
460 }
461 }
462
463 void
464 taskqueue_run(struct taskqueue *queue)
465 {
466
467 TQ_LOCK(queue);
468 taskqueue_run_locked(queue);
469 TQ_UNLOCK(queue);
470 }
471
472 static int
473 task_is_running(struct taskqueue *queue, struct task *task)
474 {
475 struct taskqueue_busy *tb;
476
477 TQ_ASSERT_LOCKED(queue);
478 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
479 if (tb->tb_running == task)
480 return (1);
481 }
482 return (0);
483 }
484
485 static int
486 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
487 u_int *pendp)
488 {
489
490 if (task->ta_pending > 0)
491 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
492 if (pendp != NULL)
493 *pendp = task->ta_pending;
494 task->ta_pending = 0;
495 return (task_is_running(queue, task) ? EBUSY : 0);
496 }
497
498 int
499 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
500 {
501 int error;
502
503 TQ_LOCK(queue);
504 error = taskqueue_cancel_locked(queue, task, pendp);
505 TQ_UNLOCK(queue);
506
507 return (error);
508 }
509
510 int
511 taskqueue_cancel_timeout(struct taskqueue *queue,
512 struct timeout_task *timeout_task, u_int *pendp)
513 {
514 u_int pending, pending1;
515 int error;
516
517 TQ_LOCK(queue);
518 pending = !!(callout_stop(&timeout_task->c) > 0);
519 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
520 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
521 timeout_task->f &= ~DT_CALLOUT_ARMED;
522 queue->tq_callouts--;
523 }
524 TQ_UNLOCK(queue);
525
526 if (pendp != NULL)
527 *pendp = pending + pending1;
528 return (error);
529 }
530
531 void
532 taskqueue_drain(struct taskqueue *queue, struct task *task)
533 {
534
535 if (!queue->tq_spin)
536 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
537
538 TQ_LOCK(queue);
539 while (task->ta_pending != 0 || task_is_running(queue, task))
540 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
541 TQ_UNLOCK(queue);
542 }
543
544 void
545 taskqueue_drain_all(struct taskqueue *queue)
546 {
547
548 if (!queue->tq_spin)
549 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
550
551 TQ_LOCK(queue);
552 taskqueue_drain_tq_queue(queue);
553 taskqueue_drain_tq_active(queue);
554 TQ_UNLOCK(queue);
555 }
556
557 void
558 taskqueue_drain_timeout(struct taskqueue *queue,
559 struct timeout_task *timeout_task)
560 {
561
562 callout_drain(&timeout_task->c);
563 taskqueue_drain(queue, &timeout_task->t);
564 }
565
566 static void
567 taskqueue_swi_enqueue(void *context)
568 {
569 swi_sched(taskqueue_ih, 0);
570 }
571
572 static void
573 taskqueue_swi_run(void *dummy)
574 {
575 taskqueue_run(taskqueue_swi);
576 }
577
578 static void
579 taskqueue_swi_giant_enqueue(void *context)
580 {
581 swi_sched(taskqueue_giant_ih, 0);
582 }
583
584 static void
585 taskqueue_swi_giant_run(void *dummy)
586 {
587 taskqueue_run(taskqueue_swi_giant);
588 }
589
590 static int
591 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
592 cpuset_t *mask, const char *name, va_list ap)
593 {
594 char ktname[MAXCOMLEN + 1];
595 struct thread *td;
596 struct taskqueue *tq;
597 int i, error;
598
599 if (count <= 0)
600 return (EINVAL);
601
602 vsnprintf(ktname, sizeof(ktname), name, ap);
603 tq = *tqp;
604
605 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
606 M_NOWAIT | M_ZERO);
607 if (tq->tq_threads == NULL) {
608 printf("%s: no memory for %s threads\n", __func__, ktname);
609 return (ENOMEM);
610 }
611
612 for (i = 0; i < count; i++) {
613 if (count == 1)
614 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
615 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
616 else
617 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
618 &tq->tq_threads[i], RFSTOPPED, 0,
619 "%s_%d", ktname, i);
620 if (error) {
621 /* should be ok to continue, taskqueue_free will dtrt */
622 printf("%s: kthread_add(%s): error %d", __func__,
623 ktname, error);
624 tq->tq_threads[i] = NULL; /* paranoid */
625 } else
626 tq->tq_tcount++;
627 }
628 for (i = 0; i < count; i++) {
629 if (tq->tq_threads[i] == NULL)
630 continue;
631 td = tq->tq_threads[i];
632 if (mask) {
633 error = cpuset_setthread(td->td_tid, mask);
634 /*
635 * Failing to pin is rarely an actual fatal error;
636 * it'll just affect performance.
637 */
638 if (error)
639 printf("%s: curthread=%llu: can't pin; "
640 "error=%d\n",
641 __func__,
642 (unsigned long long) td->td_tid,
643 error);
644 }
645 thread_lock(td);
646 sched_prio(td, pri);
647 sched_add(td, SRQ_BORING);
648 thread_unlock(td);
649 }
650
651 return (0);
652 }
653
654 int
655 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
656 const char *name, ...)
657 {
658 va_list ap;
659 int error;
660
661 va_start(ap, name);
662 error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
663 va_end(ap);
664 return (error);
665 }
666
667 int
668 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
669 cpuset_t *mask, const char *name, ...)
670 {
671 va_list ap;
672 int error;
673
674 va_start(ap, name);
675 error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
676 va_end(ap);
677 return (error);
678 }
679
680 static inline void
681 taskqueue_run_callback(struct taskqueue *tq,
682 enum taskqueue_callback_type cb_type)
683 {
684 taskqueue_callback_fn tq_callback;
685
686 TQ_ASSERT_UNLOCKED(tq);
687 tq_callback = tq->tq_callbacks[cb_type];
688 if (tq_callback != NULL)
689 tq_callback(tq->tq_cb_contexts[cb_type]);
690 }
691
692 void
693 taskqueue_thread_loop(void *arg)
694 {
695 struct taskqueue **tqp, *tq;
696
697 tqp = arg;
698 tq = *tqp;
699 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
700 TQ_LOCK(tq);
701 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
702 /* XXX ? */
703 taskqueue_run_locked(tq);
704 /*
705 * Because taskqueue_run() can drop tq_mutex, we need to
706 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
707 * meantime, which means we missed a wakeup.
708 */
709 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
710 break;
711 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
712 }
713 taskqueue_run_locked(tq);
714 /*
715 * This thread is on its way out, so just drop the lock temporarily
716 * in order to call the shutdown callback. This allows the callback
717 * to look at the taskqueue, even just before it dies.
718 */
719 TQ_UNLOCK(tq);
720 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
721 TQ_LOCK(tq);
722
723 /* rendezvous with thread that asked us to terminate */
724 tq->tq_tcount--;
725 wakeup_one(tq->tq_threads);
726 TQ_UNLOCK(tq);
727 kthread_exit();
728 }
729
730 void
731 taskqueue_thread_enqueue(void *context)
732 {
733 struct taskqueue **tqp, *tq;
734
735 tqp = context;
736 tq = *tqp;
737 wakeup_one(tq);
738 }
739
740 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
741 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
742 INTR_MPSAFE, &taskqueue_ih));
743
744 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
745 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
746 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
747
748 TASKQUEUE_DEFINE_THREAD(thread);
749
750 struct taskqueue *
751 taskqueue_create_fast(const char *name, int mflags,
752 taskqueue_enqueue_fn enqueue, void *context)
753 {
754 return _taskqueue_create(name, mflags, enqueue, context,
755 MTX_SPIN, "fast_taskqueue");
756 }
757
758 static void *taskqueue_fast_ih;
759
760 static void
761 taskqueue_fast_enqueue(void *context)
762 {
763 swi_sched(taskqueue_fast_ih, 0);
764 }
765
766 static void
767 taskqueue_fast_run(void *dummy)
768 {
769 taskqueue_run(taskqueue_fast);
770 }
771
772 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
773 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
774 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
775
776 int
777 taskqueue_member(struct taskqueue *queue, struct thread *td)
778 {
779 int i, j, ret = 0;
780
781 for (i = 0, j = 0; ; i++) {
782 if (queue->tq_threads[i] == NULL)
783 continue;
784 if (queue->tq_threads[i] == td) {
785 ret = 1;
786 break;
787 }
788 if (++j >= queue->tq_tcount)
789 break;
790 }
791 return (ret);
792 }
Cache object: 2cddca68c69e84640be8c9ecb0ece369
|