1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2000 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/cpuset.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/libkern.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/taskqueue.h>
48 #include <sys/unistd.h>
49 #include <machine/stdarg.h>
50
51 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
52 static void *taskqueue_giant_ih;
53 static void *taskqueue_ih;
54 static void taskqueue_fast_enqueue(void *);
55 static void taskqueue_swi_enqueue(void *);
56 static void taskqueue_swi_giant_enqueue(void *);
57
58 struct taskqueue_busy {
59 struct task *tb_running;
60 u_int tb_seq;
61 LIST_ENTRY(taskqueue_busy) tb_link;
62 };
63
64 struct taskqueue {
65 STAILQ_HEAD(, task) tq_queue;
66 LIST_HEAD(, taskqueue_busy) tq_active;
67 struct task *tq_hint;
68 u_int tq_seq;
69 int tq_callouts;
70 struct mtx_padalign tq_mutex;
71 taskqueue_enqueue_fn tq_enqueue;
72 void *tq_context;
73 char *tq_name;
74 struct thread **tq_threads;
75 int tq_tcount;
76 int tq_spin;
77 int tq_flags;
78 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
79 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
80 };
81
82 #define TQ_FLAGS_ACTIVE (1 << 0)
83 #define TQ_FLAGS_BLOCKED (1 << 1)
84 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
85
86 #define DT_CALLOUT_ARMED (1 << 0)
87 #define DT_DRAIN_IN_PROGRESS (1 << 1)
88
89 #define TQ_LOCK(tq) \
90 do { \
91 if ((tq)->tq_spin) \
92 mtx_lock_spin(&(tq)->tq_mutex); \
93 else \
94 mtx_lock(&(tq)->tq_mutex); \
95 } while (0)
96 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
97
98 #define TQ_UNLOCK(tq) \
99 do { \
100 if ((tq)->tq_spin) \
101 mtx_unlock_spin(&(tq)->tq_mutex); \
102 else \
103 mtx_unlock(&(tq)->tq_mutex); \
104 } while (0)
105 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
106
107 void
108 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
109 int priority, task_fn_t func, void *context)
110 {
111
112 TASK_INIT(&timeout_task->t, priority, func, context);
113 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
114 CALLOUT_RETURNUNLOCKED);
115 timeout_task->q = queue;
116 timeout_task->f = 0;
117 }
118
119 static __inline int
120 TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm)
121 {
122 if (tq->tq_spin)
123 return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
124 return (msleep(p, &tq->tq_mutex, 0, wm, 0));
125 }
126
127 static struct taskqueue *
128 _taskqueue_create(const char *name, int mflags,
129 taskqueue_enqueue_fn enqueue, void *context,
130 int mtxflags, const char *mtxname __unused)
131 {
132 struct taskqueue *queue;
133 char *tq_name;
134
135 tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
136 if (tq_name == NULL)
137 return (NULL);
138
139 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
140 if (queue == NULL) {
141 free(tq_name, M_TASKQUEUE);
142 return (NULL);
143 }
144
145 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
146
147 STAILQ_INIT(&queue->tq_queue);
148 LIST_INIT(&queue->tq_active);
149 queue->tq_enqueue = enqueue;
150 queue->tq_context = context;
151 queue->tq_name = tq_name;
152 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
153 queue->tq_flags |= TQ_FLAGS_ACTIVE;
154 if (enqueue == taskqueue_fast_enqueue ||
155 enqueue == taskqueue_swi_enqueue ||
156 enqueue == taskqueue_swi_giant_enqueue ||
157 enqueue == taskqueue_thread_enqueue)
158 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
159 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
160
161 return (queue);
162 }
163
164 struct taskqueue *
165 taskqueue_create(const char *name, int mflags,
166 taskqueue_enqueue_fn enqueue, void *context)
167 {
168
169 return _taskqueue_create(name, mflags, enqueue, context,
170 MTX_DEF, name);
171 }
172
173 void
174 taskqueue_set_callback(struct taskqueue *queue,
175 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
176 void *context)
177 {
178
179 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
180 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
181 ("Callback type %d not valid, must be %d-%d", cb_type,
182 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
183 KASSERT((queue->tq_callbacks[cb_type] == NULL),
184 ("Re-initialization of taskqueue callback?"));
185
186 queue->tq_callbacks[cb_type] = callback;
187 queue->tq_cb_contexts[cb_type] = context;
188 }
189
190 /*
191 * Signal a taskqueue thread to terminate.
192 */
193 static void
194 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
195 {
196
197 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
198 wakeup(tq);
199 TQ_SLEEP(tq, pp, "tq_destroy");
200 }
201 }
202
203 void
204 taskqueue_free(struct taskqueue *queue)
205 {
206
207 TQ_LOCK(queue);
208 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
209 taskqueue_terminate(queue->tq_threads, queue);
210 KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
211 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
212 mtx_destroy(&queue->tq_mutex);
213 free(queue->tq_threads, M_TASKQUEUE);
214 free(queue->tq_name, M_TASKQUEUE);
215 free(queue, M_TASKQUEUE);
216 }
217
218 static int
219 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
220 {
221 struct task *ins;
222 struct task *prev;
223
224 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
225 /*
226 * Count multiple enqueues.
227 */
228 if (task->ta_pending) {
229 if (task->ta_pending < USHRT_MAX)
230 task->ta_pending++;
231 TQ_UNLOCK(queue);
232 return (0);
233 }
234
235 /*
236 * Optimise cases when all tasks use small set of priorities.
237 * In case of only one priority we always insert at the end.
238 * In case of two tq_hint typically gives the insertion point.
239 * In case of more then two tq_hint should halve the search.
240 */
241 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
242 if (!prev || prev->ta_priority >= task->ta_priority) {
243 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
244 } else {
245 prev = queue->tq_hint;
246 if (prev && prev->ta_priority >= task->ta_priority) {
247 ins = STAILQ_NEXT(prev, ta_link);
248 } else {
249 prev = NULL;
250 ins = STAILQ_FIRST(&queue->tq_queue);
251 }
252 for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link))
253 if (ins->ta_priority < task->ta_priority)
254 break;
255
256 if (prev) {
257 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
258 queue->tq_hint = task;
259 } else
260 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
261 }
262
263 task->ta_pending = 1;
264 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
265 TQ_UNLOCK(queue);
266 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
267 queue->tq_enqueue(queue->tq_context);
268 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
269 TQ_UNLOCK(queue);
270
271 /* Return with lock released. */
272 return (0);
273 }
274
275 int
276 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
277 {
278 int res;
279
280 TQ_LOCK(queue);
281 res = taskqueue_enqueue_locked(queue, task);
282 /* The lock is released inside. */
283
284 return (res);
285 }
286
287 static void
288 taskqueue_timeout_func(void *arg)
289 {
290 struct taskqueue *queue;
291 struct timeout_task *timeout_task;
292
293 timeout_task = arg;
294 queue = timeout_task->q;
295 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
296 timeout_task->f &= ~DT_CALLOUT_ARMED;
297 queue->tq_callouts--;
298 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
299 /* The lock is released inside. */
300 }
301
302 int
303 taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
304 struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags)
305 {
306 int res;
307
308 TQ_LOCK(queue);
309 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
310 ("Migrated queue"));
311 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
312 timeout_task->q = queue;
313 res = timeout_task->t.ta_pending;
314 if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
315 /* Do nothing */
316 TQ_UNLOCK(queue);
317 res = -1;
318 } else if (sbt == 0) {
319 taskqueue_enqueue_locked(queue, &timeout_task->t);
320 /* The lock is released inside. */
321 } else {
322 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
323 res++;
324 } else {
325 queue->tq_callouts++;
326 timeout_task->f |= DT_CALLOUT_ARMED;
327 if (sbt < 0)
328 sbt = -sbt; /* Ignore overflow. */
329 }
330 if (sbt > 0) {
331 callout_reset_sbt(&timeout_task->c, sbt, pr,
332 taskqueue_timeout_func, timeout_task, flags);
333 }
334 TQ_UNLOCK(queue);
335 }
336 return (res);
337 }
338
339 int
340 taskqueue_enqueue_timeout(struct taskqueue *queue,
341 struct timeout_task *ttask, int ticks)
342 {
343
344 return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
345 0, C_HARDCLOCK));
346 }
347
348 static void
349 taskqueue_task_nop_fn(void *context, int pending)
350 {
351 }
352
353 /*
354 * Block until all currently queued tasks in this taskqueue
355 * have begun execution. Tasks queued during execution of
356 * this function are ignored.
357 */
358 static int
359 taskqueue_drain_tq_queue(struct taskqueue *queue)
360 {
361 struct task t_barrier;
362
363 if (STAILQ_EMPTY(&queue->tq_queue))
364 return (0);
365
366 /*
367 * Enqueue our barrier after all current tasks, but with
368 * the highest priority so that newly queued tasks cannot
369 * pass it. Because of the high priority, we can not use
370 * taskqueue_enqueue_locked directly (which drops the lock
371 * anyway) so just insert it at tail while we have the
372 * queue lock.
373 */
374 TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
375 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
376 queue->tq_hint = &t_barrier;
377 t_barrier.ta_pending = 1;
378
379 /*
380 * Once the barrier has executed, all previously queued tasks
381 * have completed or are currently executing.
382 */
383 while (t_barrier.ta_pending != 0)
384 TQ_SLEEP(queue, &t_barrier, "tq_qdrain");
385 return (1);
386 }
387
388 /*
389 * Block until all currently executing tasks for this taskqueue
390 * complete. Tasks that begin execution during the execution
391 * of this function are ignored.
392 */
393 static int
394 taskqueue_drain_tq_active(struct taskqueue *queue)
395 {
396 struct taskqueue_busy *tb;
397 u_int seq;
398
399 if (LIST_EMPTY(&queue->tq_active))
400 return (0);
401
402 /* Block taskq_terminate().*/
403 queue->tq_callouts++;
404
405 /* Wait for any active task with sequence from the past. */
406 seq = queue->tq_seq;
407 restart:
408 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
409 if ((int)(tb->tb_seq - seq) <= 0) {
410 TQ_SLEEP(queue, tb->tb_running, "tq_adrain");
411 goto restart;
412 }
413 }
414
415 /* Release taskqueue_terminate(). */
416 queue->tq_callouts--;
417 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
418 wakeup_one(queue->tq_threads);
419 return (1);
420 }
421
422 void
423 taskqueue_block(struct taskqueue *queue)
424 {
425
426 TQ_LOCK(queue);
427 queue->tq_flags |= TQ_FLAGS_BLOCKED;
428 TQ_UNLOCK(queue);
429 }
430
431 void
432 taskqueue_unblock(struct taskqueue *queue)
433 {
434
435 TQ_LOCK(queue);
436 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
437 if (!STAILQ_EMPTY(&queue->tq_queue))
438 queue->tq_enqueue(queue->tq_context);
439 TQ_UNLOCK(queue);
440 }
441
442 static void
443 taskqueue_run_locked(struct taskqueue *queue)
444 {
445 struct taskqueue_busy tb;
446 struct task *task;
447 int pending;
448
449 KASSERT(queue != NULL, ("tq is NULL"));
450 TQ_ASSERT_LOCKED(queue);
451 tb.tb_running = NULL;
452 LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
453
454 while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
455 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
456 if (queue->tq_hint == task)
457 queue->tq_hint = NULL;
458 pending = task->ta_pending;
459 task->ta_pending = 0;
460 tb.tb_running = task;
461 tb.tb_seq = ++queue->tq_seq;
462 TQ_UNLOCK(queue);
463
464 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
465 task->ta_func(task->ta_context, pending);
466
467 TQ_LOCK(queue);
468 wakeup(task);
469 }
470 LIST_REMOVE(&tb, tb_link);
471 }
472
473 void
474 taskqueue_run(struct taskqueue *queue)
475 {
476
477 TQ_LOCK(queue);
478 taskqueue_run_locked(queue);
479 TQ_UNLOCK(queue);
480 }
481
482 static int
483 task_is_running(struct taskqueue *queue, struct task *task)
484 {
485 struct taskqueue_busy *tb;
486
487 TQ_ASSERT_LOCKED(queue);
488 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
489 if (tb->tb_running == task)
490 return (1);
491 }
492 return (0);
493 }
494
495 /*
496 * Only use this function in single threaded contexts. It returns
497 * non-zero if the given task is either pending or running. Else the
498 * task is idle and can be queued again or freed.
499 */
500 int
501 taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
502 {
503 int retval;
504
505 TQ_LOCK(queue);
506 retval = task->ta_pending > 0 || task_is_running(queue, task);
507 TQ_UNLOCK(queue);
508
509 return (retval);
510 }
511
512 static int
513 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
514 u_int *pendp)
515 {
516
517 if (task->ta_pending > 0) {
518 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
519 if (queue->tq_hint == task)
520 queue->tq_hint = NULL;
521 }
522 if (pendp != NULL)
523 *pendp = task->ta_pending;
524 task->ta_pending = 0;
525 return (task_is_running(queue, task) ? EBUSY : 0);
526 }
527
528 int
529 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
530 {
531 int error;
532
533 TQ_LOCK(queue);
534 error = taskqueue_cancel_locked(queue, task, pendp);
535 TQ_UNLOCK(queue);
536
537 return (error);
538 }
539
540 int
541 taskqueue_cancel_timeout(struct taskqueue *queue,
542 struct timeout_task *timeout_task, u_int *pendp)
543 {
544 u_int pending, pending1;
545 int error;
546
547 TQ_LOCK(queue);
548 pending = !!(callout_stop(&timeout_task->c) > 0);
549 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
550 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
551 timeout_task->f &= ~DT_CALLOUT_ARMED;
552 queue->tq_callouts--;
553 }
554 TQ_UNLOCK(queue);
555
556 if (pendp != NULL)
557 *pendp = pending + pending1;
558 return (error);
559 }
560
561 void
562 taskqueue_drain(struct taskqueue *queue, struct task *task)
563 {
564
565 if (!queue->tq_spin)
566 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
567
568 TQ_LOCK(queue);
569 while (task->ta_pending != 0 || task_is_running(queue, task))
570 TQ_SLEEP(queue, task, "tq_drain");
571 TQ_UNLOCK(queue);
572 }
573
574 void
575 taskqueue_drain_all(struct taskqueue *queue)
576 {
577
578 if (!queue->tq_spin)
579 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
580
581 TQ_LOCK(queue);
582 (void)taskqueue_drain_tq_queue(queue);
583 (void)taskqueue_drain_tq_active(queue);
584 TQ_UNLOCK(queue);
585 }
586
587 void
588 taskqueue_drain_timeout(struct taskqueue *queue,
589 struct timeout_task *timeout_task)
590 {
591
592 /*
593 * Set flag to prevent timer from re-starting during drain:
594 */
595 TQ_LOCK(queue);
596 KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
597 ("Drain already in progress"));
598 timeout_task->f |= DT_DRAIN_IN_PROGRESS;
599 TQ_UNLOCK(queue);
600
601 callout_drain(&timeout_task->c);
602 taskqueue_drain(queue, &timeout_task->t);
603
604 /*
605 * Clear flag to allow timer to re-start:
606 */
607 TQ_LOCK(queue);
608 timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
609 TQ_UNLOCK(queue);
610 }
611
612 void
613 taskqueue_quiesce(struct taskqueue *queue)
614 {
615 int ret;
616
617 TQ_LOCK(queue);
618 do {
619 ret = taskqueue_drain_tq_queue(queue);
620 if (ret == 0)
621 ret = taskqueue_drain_tq_active(queue);
622 } while (ret != 0);
623 TQ_UNLOCK(queue);
624 }
625
626 static void
627 taskqueue_swi_enqueue(void *context)
628 {
629 swi_sched(taskqueue_ih, 0);
630 }
631
632 static void
633 taskqueue_swi_run(void *dummy)
634 {
635 taskqueue_run(taskqueue_swi);
636 }
637
638 static void
639 taskqueue_swi_giant_enqueue(void *context)
640 {
641 swi_sched(taskqueue_giant_ih, 0);
642 }
643
644 static void
645 taskqueue_swi_giant_run(void *dummy)
646 {
647 taskqueue_run(taskqueue_swi_giant);
648 }
649
650 static int
651 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
652 cpuset_t *mask, struct proc *p, const char *name, va_list ap)
653 {
654 char ktname[MAXCOMLEN + 1];
655 struct thread *td;
656 struct taskqueue *tq;
657 int i, error;
658
659 if (count <= 0)
660 return (EINVAL);
661
662 vsnprintf(ktname, sizeof(ktname), name, ap);
663 tq = *tqp;
664
665 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
666 M_NOWAIT | M_ZERO);
667 if (tq->tq_threads == NULL) {
668 printf("%s: no memory for %s threads\n", __func__, ktname);
669 return (ENOMEM);
670 }
671
672 for (i = 0; i < count; i++) {
673 if (count == 1)
674 error = kthread_add(taskqueue_thread_loop, tqp, p,
675 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
676 else
677 error = kthread_add(taskqueue_thread_loop, tqp, p,
678 &tq->tq_threads[i], RFSTOPPED, 0,
679 "%s_%d", ktname, i);
680 if (error) {
681 /* should be ok to continue, taskqueue_free will dtrt */
682 printf("%s: kthread_add(%s): error %d", __func__,
683 ktname, error);
684 tq->tq_threads[i] = NULL; /* paranoid */
685 } else
686 tq->tq_tcount++;
687 }
688 if (tq->tq_tcount == 0) {
689 free(tq->tq_threads, M_TASKQUEUE);
690 tq->tq_threads = NULL;
691 return (ENOMEM);
692 }
693 for (i = 0; i < count; i++) {
694 if (tq->tq_threads[i] == NULL)
695 continue;
696 td = tq->tq_threads[i];
697 if (mask) {
698 error = cpuset_setthread(td->td_tid, mask);
699 /*
700 * Failing to pin is rarely an actual fatal error;
701 * it'll just affect performance.
702 */
703 if (error)
704 printf("%s: curthread=%llu: can't pin; "
705 "error=%d\n",
706 __func__,
707 (unsigned long long) td->td_tid,
708 error);
709 }
710 thread_lock(td);
711 sched_prio(td, pri);
712 sched_add(td, SRQ_BORING);
713 thread_unlock(td);
714 }
715
716 return (0);
717 }
718
719 int
720 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
721 const char *name, ...)
722 {
723 va_list ap;
724 int error;
725
726 va_start(ap, name);
727 error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap);
728 va_end(ap);
729 return (error);
730 }
731
732 int
733 taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri,
734 struct proc *proc, const char *name, ...)
735 {
736 va_list ap;
737 int error;
738
739 va_start(ap, name);
740 error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap);
741 va_end(ap);
742 return (error);
743 }
744
745 int
746 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
747 cpuset_t *mask, const char *name, ...)
748 {
749 va_list ap;
750 int error;
751
752 va_start(ap, name);
753 error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap);
754 va_end(ap);
755 return (error);
756 }
757
758 static inline void
759 taskqueue_run_callback(struct taskqueue *tq,
760 enum taskqueue_callback_type cb_type)
761 {
762 taskqueue_callback_fn tq_callback;
763
764 TQ_ASSERT_UNLOCKED(tq);
765 tq_callback = tq->tq_callbacks[cb_type];
766 if (tq_callback != NULL)
767 tq_callback(tq->tq_cb_contexts[cb_type]);
768 }
769
770 void
771 taskqueue_thread_loop(void *arg)
772 {
773 struct taskqueue **tqp, *tq;
774
775 tqp = arg;
776 tq = *tqp;
777 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
778 TQ_LOCK(tq);
779 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
780 /* XXX ? */
781 taskqueue_run_locked(tq);
782 /*
783 * Because taskqueue_run() can drop tq_mutex, we need to
784 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
785 * meantime, which means we missed a wakeup.
786 */
787 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
788 break;
789 TQ_SLEEP(tq, tq, "-");
790 }
791 taskqueue_run_locked(tq);
792 /*
793 * This thread is on its way out, so just drop the lock temporarily
794 * in order to call the shutdown callback. This allows the callback
795 * to look at the taskqueue, even just before it dies.
796 */
797 TQ_UNLOCK(tq);
798 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
799 TQ_LOCK(tq);
800
801 /* rendezvous with thread that asked us to terminate */
802 tq->tq_tcount--;
803 wakeup_one(tq->tq_threads);
804 TQ_UNLOCK(tq);
805 kthread_exit();
806 }
807
808 void
809 taskqueue_thread_enqueue(void *context)
810 {
811 struct taskqueue **tqp, *tq;
812
813 tqp = context;
814 tq = *tqp;
815 wakeup_any(tq);
816 }
817
818 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
819 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
820 INTR_MPSAFE, &taskqueue_ih));
821
822 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
823 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
824 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
825
826 TASKQUEUE_DEFINE_THREAD(thread);
827
828 struct taskqueue *
829 taskqueue_create_fast(const char *name, int mflags,
830 taskqueue_enqueue_fn enqueue, void *context)
831 {
832 return _taskqueue_create(name, mflags, enqueue, context,
833 MTX_SPIN, "fast_taskqueue");
834 }
835
836 static void *taskqueue_fast_ih;
837
838 static void
839 taskqueue_fast_enqueue(void *context)
840 {
841 swi_sched(taskqueue_fast_ih, 0);
842 }
843
844 static void
845 taskqueue_fast_run(void *dummy)
846 {
847 taskqueue_run(taskqueue_fast);
848 }
849
850 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
851 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
852 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
853
854 int
855 taskqueue_member(struct taskqueue *queue, struct thread *td)
856 {
857 int i, j, ret = 0;
858
859 for (i = 0, j = 0; ; i++) {
860 if (queue->tq_threads[i] == NULL)
861 continue;
862 if (queue->tq_threads[i] == td) {
863 ret = 1;
864 break;
865 }
866 if (++j >= queue->tq_tcount)
867 break;
868 }
869 return (ret);
870 }
Cache object: af2a7d6db1065c183828b316c050306d
|