1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/taskqueue.h>
43 #include <sys/unistd.h>
44 #include <machine/stdarg.h>
45
46 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47 static void *taskqueue_giant_ih;
48 static void *taskqueue_ih;
49
50 struct taskqueue_busy {
51 struct task *tb_running;
52 TAILQ_ENTRY(taskqueue_busy) tb_link;
53 };
54
55 struct taskqueue {
56 STAILQ_HEAD(, task) tq_queue;
57 taskqueue_enqueue_fn tq_enqueue;
58 void *tq_context;
59 TAILQ_HEAD(, taskqueue_busy) tq_active;
60 struct mtx tq_mutex;
61 struct thread **tq_threads;
62 int tq_tcount;
63 int tq_spin;
64 int tq_flags;
65 int tq_callouts;
66 };
67
68 #define TQ_FLAGS_ACTIVE (1 << 0)
69 #define TQ_FLAGS_BLOCKED (1 << 1)
70 #define TQ_FLAGS_PENDING (1 << 2)
71
72 #define DT_CALLOUT_ARMED (1 << 0)
73 #define DT_DRAIN_IN_PROGRESS (1 << 1)
74
75 #define TQ_LOCK(tq) \
76 do { \
77 if ((tq)->tq_spin) \
78 mtx_lock_spin(&(tq)->tq_mutex); \
79 else \
80 mtx_lock(&(tq)->tq_mutex); \
81 } while (0)
82
83 #define TQ_UNLOCK(tq) \
84 do { \
85 if ((tq)->tq_spin) \
86 mtx_unlock_spin(&(tq)->tq_mutex); \
87 else \
88 mtx_unlock(&(tq)->tq_mutex); \
89 } while (0)
90
91 void
92 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
93 int priority, task_fn_t func, void *context)
94 {
95
96 TASK_INIT(&timeout_task->t, priority, func, context);
97 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
98 timeout_task->q = queue;
99 timeout_task->f = 0;
100 }
101
102 static __inline int
103 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
104 int t)
105 {
106 if (tq->tq_spin)
107 return (msleep_spin(p, m, wm, t));
108 return (msleep(p, m, pri, wm, t));
109 }
110
111 static struct taskqueue *
112 _taskqueue_create(const char *name __unused, int mflags,
113 taskqueue_enqueue_fn enqueue, void *context,
114 int mtxflags, const char *mtxname)
115 {
116 struct taskqueue *queue;
117
118 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
119 if (!queue)
120 return NULL;
121
122 STAILQ_INIT(&queue->tq_queue);
123 TAILQ_INIT(&queue->tq_active);
124 queue->tq_enqueue = enqueue;
125 queue->tq_context = context;
126 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
127 queue->tq_flags |= TQ_FLAGS_ACTIVE;
128 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
129
130 return queue;
131 }
132
133 struct taskqueue *
134 taskqueue_create(const char *name, int mflags,
135 taskqueue_enqueue_fn enqueue, void *context)
136 {
137 return _taskqueue_create(name, mflags, enqueue, context,
138 MTX_DEF, "taskqueue");
139 }
140
141 /*
142 * Signal a taskqueue thread to terminate.
143 */
144 static void
145 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
146 {
147
148 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
149 wakeup(tq);
150 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
151 }
152 }
153
154 void
155 taskqueue_free(struct taskqueue *queue)
156 {
157
158 TQ_LOCK(queue);
159 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
160 taskqueue_terminate(queue->tq_threads, queue);
161 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
162 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
163 mtx_destroy(&queue->tq_mutex);
164 free(queue->tq_threads, M_TASKQUEUE);
165 free(queue, M_TASKQUEUE);
166 }
167
168 static int
169 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
170 {
171 struct task *ins;
172 struct task *prev;
173
174 /*
175 * Count multiple enqueues.
176 */
177 if (task->ta_pending) {
178 if (task->ta_pending < USHRT_MAX)
179 task->ta_pending++;
180 return (0);
181 }
182
183 /*
184 * Optimise the case when all tasks have the same priority.
185 */
186 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
187 if (!prev || prev->ta_priority >= task->ta_priority) {
188 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
189 } else {
190 prev = NULL;
191 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
192 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
193 if (ins->ta_priority < task->ta_priority)
194 break;
195
196 if (prev)
197 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
198 else
199 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
200 }
201
202 task->ta_pending = 1;
203 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
204 queue->tq_enqueue(queue->tq_context);
205 else
206 queue->tq_flags |= TQ_FLAGS_PENDING;
207
208 return (0);
209 }
210 int
211 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
212 {
213 int res;
214
215 TQ_LOCK(queue);
216 res = taskqueue_enqueue_locked(queue, task);
217 TQ_UNLOCK(queue);
218
219 return (res);
220 }
221
222 static void
223 taskqueue_timeout_func(void *arg)
224 {
225 struct taskqueue *queue;
226 struct timeout_task *timeout_task;
227
228 timeout_task = arg;
229 queue = timeout_task->q;
230 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
231 timeout_task->f &= ~DT_CALLOUT_ARMED;
232 queue->tq_callouts--;
233 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
234 }
235
236 int
237 taskqueue_enqueue_timeout(struct taskqueue *queue,
238 struct timeout_task *timeout_task, int ticks)
239 {
240 int res;
241
242 TQ_LOCK(queue);
243 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
244 ("Migrated queue"));
245 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
246 timeout_task->q = queue;
247 res = timeout_task->t.ta_pending;
248 if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
249 /* Do nothing */
250 TQ_UNLOCK(queue);
251 res = -1;
252 } else if (ticks == 0) {
253 taskqueue_enqueue_locked(queue, &timeout_task->t);
254 } else {
255 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
256 res++;
257 } else {
258 queue->tq_callouts++;
259 timeout_task->f |= DT_CALLOUT_ARMED;
260 if (ticks < 0)
261 ticks = -ticks; /* Ignore overflow. */
262 }
263 if (ticks > 0) {
264 callout_reset(&timeout_task->c, ticks,
265 taskqueue_timeout_func, timeout_task);
266 }
267 }
268 TQ_UNLOCK(queue);
269 return (res);
270 }
271
272 static void
273 taskqueue_drain_running(struct taskqueue *queue)
274 {
275
276 while (!TAILQ_EMPTY(&queue->tq_active))
277 TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex,
278 PWAIT, "-", 0);
279 }
280
281 void
282 taskqueue_block(struct taskqueue *queue)
283 {
284
285 TQ_LOCK(queue);
286 queue->tq_flags |= TQ_FLAGS_BLOCKED;
287 TQ_UNLOCK(queue);
288 }
289
290 void
291 taskqueue_unblock(struct taskqueue *queue)
292 {
293
294 TQ_LOCK(queue);
295 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
296 if (queue->tq_flags & TQ_FLAGS_PENDING) {
297 queue->tq_flags &= ~TQ_FLAGS_PENDING;
298 queue->tq_enqueue(queue->tq_context);
299 }
300 TQ_UNLOCK(queue);
301 }
302
303 static void
304 taskqueue_run_locked(struct taskqueue *queue)
305 {
306 struct taskqueue_busy tb;
307 struct task *task;
308 int pending;
309
310 mtx_assert(&queue->tq_mutex, MA_OWNED);
311 tb.tb_running = NULL;
312 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
313
314 while (STAILQ_FIRST(&queue->tq_queue)) {
315 /*
316 * Carefully remove the first task from the queue and
317 * zero its pending count.
318 */
319 task = STAILQ_FIRST(&queue->tq_queue);
320 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
321 pending = task->ta_pending;
322 task->ta_pending = 0;
323 tb.tb_running = task;
324 TQ_UNLOCK(queue);
325
326 task->ta_func(task->ta_context, pending);
327
328 TQ_LOCK(queue);
329 tb.tb_running = NULL;
330 wakeup(task);
331 }
332 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
333 if (TAILQ_EMPTY(&queue->tq_active))
334 wakeup(&queue->tq_active);
335 }
336
337 void
338 taskqueue_run(struct taskqueue *queue)
339 {
340
341 TQ_LOCK(queue);
342 taskqueue_run_locked(queue);
343 TQ_UNLOCK(queue);
344 }
345
346 static int
347 task_is_running(struct taskqueue *queue, struct task *task)
348 {
349 struct taskqueue_busy *tb;
350
351 mtx_assert(&queue->tq_mutex, MA_OWNED);
352 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
353 if (tb->tb_running == task)
354 return (1);
355 }
356 return (0);
357 }
358
359 /*
360 * Only use this function in single threaded contexts. It returns
361 * non-zero if the given task is either pending or running. Else the
362 * task is idle and can be queued again or freed.
363 */
364 int
365 taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
366 {
367 int retval;
368
369 TQ_LOCK(queue);
370 retval = task->ta_pending > 0 || task_is_running(queue, task);
371 TQ_UNLOCK(queue);
372
373 return (retval);
374 }
375
376 static int
377 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
378 u_int *pendp)
379 {
380
381 if (task->ta_pending > 0)
382 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
383 if (pendp != NULL)
384 *pendp = task->ta_pending;
385 task->ta_pending = 0;
386 return (task_is_running(queue, task) ? EBUSY : 0);
387 }
388
389 int
390 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
391 {
392 int error;
393
394 TQ_LOCK(queue);
395 error = taskqueue_cancel_locked(queue, task, pendp);
396 TQ_UNLOCK(queue);
397
398 return (error);
399 }
400
401 int
402 taskqueue_cancel_timeout(struct taskqueue *queue,
403 struct timeout_task *timeout_task, u_int *pendp)
404 {
405 u_int pending, pending1;
406 int error;
407
408 TQ_LOCK(queue);
409 pending = !!callout_stop(&timeout_task->c);
410 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
411 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
412 timeout_task->f &= ~DT_CALLOUT_ARMED;
413 queue->tq_callouts--;
414 }
415 TQ_UNLOCK(queue);
416
417 if (pendp != NULL)
418 *pendp = pending + pending1;
419 return (error);
420 }
421
422 void
423 taskqueue_drain(struct taskqueue *queue, struct task *task)
424 {
425
426 if (!queue->tq_spin)
427 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
428
429 TQ_LOCK(queue);
430 while (task->ta_pending != 0 || task_is_running(queue, task))
431 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
432 TQ_UNLOCK(queue);
433 }
434
435 void
436 taskqueue_drain_all(struct taskqueue *queue)
437 {
438 struct task *task;
439
440 if (!queue->tq_spin)
441 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
442
443 TQ_LOCK(queue);
444 task = STAILQ_LAST(&queue->tq_queue, task, ta_link);
445 if (task != NULL)
446 while (task->ta_pending != 0)
447 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
448 taskqueue_drain_running(queue);
449 KASSERT(STAILQ_EMPTY(&queue->tq_queue),
450 ("taskqueue queue is not empty after draining"));
451 TQ_UNLOCK(queue);
452 }
453
454 void
455 taskqueue_drain_timeout(struct taskqueue *queue,
456 struct timeout_task *timeout_task)
457 {
458
459 /*
460 * Set flag to prevent timer from re-starting during drain:
461 */
462 TQ_LOCK(queue);
463 KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
464 ("Drain already in progress"));
465 timeout_task->f |= DT_DRAIN_IN_PROGRESS;
466 TQ_UNLOCK(queue);
467
468 callout_drain(&timeout_task->c);
469 taskqueue_drain(queue, &timeout_task->t);
470
471 /*
472 * Clear flag to allow timer to re-start:
473 */
474 TQ_LOCK(queue);
475 timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
476 TQ_UNLOCK(queue);
477 }
478
479 static void
480 taskqueue_swi_enqueue(void *context)
481 {
482 swi_sched(taskqueue_ih, 0);
483 }
484
485 static void
486 taskqueue_swi_run(void *dummy)
487 {
488 taskqueue_run(taskqueue_swi);
489 }
490
491 static void
492 taskqueue_swi_giant_enqueue(void *context)
493 {
494 swi_sched(taskqueue_giant_ih, 0);
495 }
496
497 static void
498 taskqueue_swi_giant_run(void *dummy)
499 {
500 taskqueue_run(taskqueue_swi_giant);
501 }
502
503 int
504 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
505 const char *name, ...)
506 {
507 va_list ap;
508 struct thread *td;
509 struct taskqueue *tq;
510 int i, error;
511 char ktname[MAXCOMLEN + 1];
512
513 if (count <= 0)
514 return (EINVAL);
515
516 tq = *tqp;
517
518 va_start(ap, name);
519 vsnprintf(ktname, sizeof(ktname), name, ap);
520 va_end(ap);
521
522 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
523 M_NOWAIT | M_ZERO);
524 if (tq->tq_threads == NULL) {
525 printf("%s: no memory for %s threads\n", __func__, ktname);
526 return (ENOMEM);
527 }
528
529 for (i = 0; i < count; i++) {
530 if (count == 1)
531 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
532 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
533 else
534 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
535 &tq->tq_threads[i], RFSTOPPED, 0,
536 "%s_%d", ktname, i);
537 if (error) {
538 /* should be ok to continue, taskqueue_free will dtrt */
539 printf("%s: kthread_add(%s): error %d", __func__,
540 ktname, error);
541 tq->tq_threads[i] = NULL; /* paranoid */
542 } else
543 tq->tq_tcount++;
544 }
545 for (i = 0; i < count; i++) {
546 if (tq->tq_threads[i] == NULL)
547 continue;
548 td = tq->tq_threads[i];
549 thread_lock(td);
550 sched_prio(td, pri);
551 sched_add(td, SRQ_BORING);
552 thread_unlock(td);
553 }
554
555 return (0);
556 }
557
558 void
559 taskqueue_thread_loop(void *arg)
560 {
561 struct taskqueue **tqp, *tq;
562
563 tqp = arg;
564 tq = *tqp;
565 TQ_LOCK(tq);
566 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
567 taskqueue_run_locked(tq);
568 /*
569 * Because taskqueue_run() can drop tq_mutex, we need to
570 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
571 * meantime, which means we missed a wakeup.
572 */
573 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
574 break;
575 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
576 }
577 taskqueue_run_locked(tq);
578
579 /* rendezvous with thread that asked us to terminate */
580 tq->tq_tcount--;
581 wakeup_one(tq->tq_threads);
582 TQ_UNLOCK(tq);
583 kthread_exit();
584 }
585
586 void
587 taskqueue_thread_enqueue(void *context)
588 {
589 struct taskqueue **tqp, *tq;
590
591 tqp = context;
592 tq = *tqp;
593
594 mtx_assert(&tq->tq_mutex, MA_OWNED);
595 wakeup_one(tq);
596 }
597
598 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
599 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
600 INTR_MPSAFE, &taskqueue_ih));
601
602 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
603 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
604 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
605
606 TASKQUEUE_DEFINE_THREAD(thread);
607
608 struct taskqueue *
609 taskqueue_create_fast(const char *name, int mflags,
610 taskqueue_enqueue_fn enqueue, void *context)
611 {
612 return _taskqueue_create(name, mflags, enqueue, context,
613 MTX_SPIN, "fast_taskqueue");
614 }
615
616 /* NB: for backwards compatibility */
617 int
618 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
619 {
620 return taskqueue_enqueue(queue, task);
621 }
622
623 static void *taskqueue_fast_ih;
624
625 static void
626 taskqueue_fast_enqueue(void *context)
627 {
628 swi_sched(taskqueue_fast_ih, 0);
629 }
630
631 static void
632 taskqueue_fast_run(void *dummy)
633 {
634 taskqueue_run(taskqueue_fast);
635 }
636
637 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
638 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
639 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
640
641 int
642 taskqueue_member(struct taskqueue *queue, struct thread *td)
643 {
644 int i, j, ret = 0;
645
646 for (i = 0, j = 0; ; i++) {
647 if (queue->tq_threads[i] == NULL)
648 continue;
649 if (queue->tq_threads[i] == td) {
650 ret = 1;
651 break;
652 }
653 if (++j >= queue->tq_tcount)
654 break;
655 }
656 return (ret);
657 }
Cache object: ac1b99aee7011720f0fd6934d38ace87
|