1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.1/sys/kern/subr_taskqueue.c 202764 2010-01-21 19:11:18Z jhb $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/sched.h>
41 #include <sys/taskqueue.h>
42 #include <sys/unistd.h>
43 #include <machine/stdarg.h>
44
45 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46 static void *taskqueue_giant_ih;
47 static void *taskqueue_ih;
48
49 struct taskqueue {
50 STAILQ_HEAD(, task) tq_queue;
51 const char *tq_name;
52 taskqueue_enqueue_fn tq_enqueue;
53 void *tq_context;
54 struct task *tq_running;
55 struct mtx tq_mutex;
56 struct thread **tq_threads;
57 int tq_tcount;
58 int tq_spin;
59 int tq_flags;
60 };
61
62 #define TQ_FLAGS_ACTIVE (1 << 0)
63 #define TQ_FLAGS_BLOCKED (1 << 1)
64 #define TQ_FLAGS_PENDING (1 << 2)
65
66 static __inline void
67 TQ_LOCK(struct taskqueue *tq)
68 {
69 if (tq->tq_spin)
70 mtx_lock_spin(&tq->tq_mutex);
71 else
72 mtx_lock(&tq->tq_mutex);
73 }
74
75 static __inline void
76 TQ_UNLOCK(struct taskqueue *tq)
77 {
78 if (tq->tq_spin)
79 mtx_unlock_spin(&tq->tq_mutex);
80 else
81 mtx_unlock(&tq->tq_mutex);
82 }
83
84 static __inline int
85 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
86 int t)
87 {
88 if (tq->tq_spin)
89 return (msleep_spin(p, m, wm, t));
90 return (msleep(p, m, pri, wm, t));
91 }
92
93 static struct taskqueue *
94 _taskqueue_create(const char *name, int mflags,
95 taskqueue_enqueue_fn enqueue, void *context,
96 int mtxflags, const char *mtxname)
97 {
98 struct taskqueue *queue;
99
100 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
101 if (!queue)
102 return NULL;
103
104 STAILQ_INIT(&queue->tq_queue);
105 queue->tq_name = name;
106 queue->tq_enqueue = enqueue;
107 queue->tq_context = context;
108 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
109 queue->tq_flags |= TQ_FLAGS_ACTIVE;
110 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
111
112 return queue;
113 }
114
115 struct taskqueue *
116 taskqueue_create(const char *name, int mflags,
117 taskqueue_enqueue_fn enqueue, void *context)
118 {
119 return _taskqueue_create(name, mflags, enqueue, context,
120 MTX_DEF, "taskqueue");
121 }
122
123 /*
124 * Signal a taskqueue thread to terminate.
125 */
126 static void
127 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
128 {
129
130 while (tq->tq_tcount > 0) {
131 wakeup(tq);
132 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
133 }
134 }
135
136 void
137 taskqueue_free(struct taskqueue *queue)
138 {
139
140 TQ_LOCK(queue);
141 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
142 taskqueue_run(queue);
143 taskqueue_terminate(queue->tq_threads, queue);
144 mtx_destroy(&queue->tq_mutex);
145 free(queue->tq_threads, M_TASKQUEUE);
146 free(queue, M_TASKQUEUE);
147 }
148
149 int
150 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
151 {
152 struct task *ins;
153 struct task *prev;
154
155 TQ_LOCK(queue);
156
157 /*
158 * Count multiple enqueues.
159 */
160 if (task->ta_pending) {
161 task->ta_pending++;
162 TQ_UNLOCK(queue);
163 return 0;
164 }
165
166 /*
167 * Optimise the case when all tasks have the same priority.
168 */
169 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
170 if (!prev || prev->ta_priority >= task->ta_priority) {
171 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
172 } else {
173 prev = NULL;
174 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
175 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
176 if (ins->ta_priority < task->ta_priority)
177 break;
178
179 if (prev)
180 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
181 else
182 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
183 }
184
185 task->ta_pending = 1;
186 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
187 queue->tq_enqueue(queue->tq_context);
188 else
189 queue->tq_flags |= TQ_FLAGS_PENDING;
190
191 TQ_UNLOCK(queue);
192
193 return 0;
194 }
195
196 void
197 taskqueue_block(struct taskqueue *queue)
198 {
199
200 TQ_LOCK(queue);
201 queue->tq_flags |= TQ_FLAGS_BLOCKED;
202 TQ_UNLOCK(queue);
203 }
204
205 void
206 taskqueue_unblock(struct taskqueue *queue)
207 {
208
209 TQ_LOCK(queue);
210 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
211 if (queue->tq_flags & TQ_FLAGS_PENDING) {
212 queue->tq_flags &= ~TQ_FLAGS_PENDING;
213 queue->tq_enqueue(queue->tq_context);
214 }
215 TQ_UNLOCK(queue);
216 }
217
218 void
219 taskqueue_run(struct taskqueue *queue)
220 {
221 struct task *task;
222 int owned, pending;
223
224 owned = mtx_owned(&queue->tq_mutex);
225 if (!owned)
226 TQ_LOCK(queue);
227 while (STAILQ_FIRST(&queue->tq_queue)) {
228 /*
229 * Carefully remove the first task from the queue and
230 * zero its pending count.
231 */
232 task = STAILQ_FIRST(&queue->tq_queue);
233 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
234 pending = task->ta_pending;
235 task->ta_pending = 0;
236 queue->tq_running = task;
237 TQ_UNLOCK(queue);
238
239 task->ta_func(task->ta_context, pending);
240
241 TQ_LOCK(queue);
242 queue->tq_running = NULL;
243 wakeup(task);
244 }
245
246 /*
247 * For compatibility, unlock on return if the queue was not locked
248 * on entry, although this opens a race window.
249 */
250 if (!owned)
251 TQ_UNLOCK(queue);
252 }
253
254 void
255 taskqueue_drain(struct taskqueue *queue, struct task *task)
256 {
257 if (queue->tq_spin) { /* XXX */
258 mtx_lock_spin(&queue->tq_mutex);
259 while (task->ta_pending != 0 || task == queue->tq_running)
260 msleep_spin(task, &queue->tq_mutex, "-", 0);
261 mtx_unlock_spin(&queue->tq_mutex);
262 } else {
263 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
264
265 mtx_lock(&queue->tq_mutex);
266 while (task->ta_pending != 0 || task == queue->tq_running)
267 msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
268 mtx_unlock(&queue->tq_mutex);
269 }
270 }
271
272 static void
273 taskqueue_swi_enqueue(void *context)
274 {
275 swi_sched(taskqueue_ih, 0);
276 }
277
278 static void
279 taskqueue_swi_run(void *dummy)
280 {
281 taskqueue_run(taskqueue_swi);
282 }
283
284 static void
285 taskqueue_swi_giant_enqueue(void *context)
286 {
287 swi_sched(taskqueue_giant_ih, 0);
288 }
289
290 static void
291 taskqueue_swi_giant_run(void *dummy)
292 {
293 taskqueue_run(taskqueue_swi_giant);
294 }
295
296 int
297 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
298 const char *name, ...)
299 {
300 va_list ap;
301 struct thread *td;
302 struct taskqueue *tq;
303 int i, error;
304 char ktname[MAXCOMLEN + 1];
305
306 if (count <= 0)
307 return (EINVAL);
308
309 tq = *tqp;
310
311 va_start(ap, name);
312 vsnprintf(ktname, sizeof(ktname), name, ap);
313 va_end(ap);
314
315 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
316 M_NOWAIT | M_ZERO);
317 if (tq->tq_threads == NULL) {
318 printf("%s: no memory for %s threads\n", __func__, ktname);
319 return (ENOMEM);
320 }
321
322 for (i = 0; i < count; i++) {
323 if (count == 1)
324 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
325 &tq->tq_threads[i], RFSTOPPED, 0, ktname);
326 else
327 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
328 &tq->tq_threads[i], RFSTOPPED, 0,
329 "%s_%d", ktname, i);
330 if (error) {
331 /* should be ok to continue, taskqueue_free will dtrt */
332 printf("%s: kthread_add(%s): error %d", __func__,
333 ktname, error);
334 tq->tq_threads[i] = NULL; /* paranoid */
335 } else
336 tq->tq_tcount++;
337 }
338 for (i = 0; i < count; i++) {
339 if (tq->tq_threads[i] == NULL)
340 continue;
341 td = tq->tq_threads[i];
342 thread_lock(td);
343 sched_prio(td, pri);
344 sched_add(td, SRQ_BORING);
345 thread_unlock(td);
346 }
347
348 return (0);
349 }
350
351 void
352 taskqueue_thread_loop(void *arg)
353 {
354 struct taskqueue **tqp, *tq;
355
356 tqp = arg;
357 tq = *tqp;
358 TQ_LOCK(tq);
359 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
360 taskqueue_run(tq);
361 /*
362 * Because taskqueue_run() can drop tq_mutex, we need to
363 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
364 * meantime, which means we missed a wakeup.
365 */
366 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
367 break;
368 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
369 }
370
371 /* rendezvous with thread that asked us to terminate */
372 tq->tq_tcount--;
373 wakeup_one(tq->tq_threads);
374 TQ_UNLOCK(tq);
375 kthread_exit();
376 }
377
378 void
379 taskqueue_thread_enqueue(void *context)
380 {
381 struct taskqueue **tqp, *tq;
382
383 tqp = context;
384 tq = *tqp;
385
386 mtx_assert(&tq->tq_mutex, MA_OWNED);
387 wakeup_one(tq);
388 }
389
390 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
391 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
392 INTR_MPSAFE, &taskqueue_ih));
393
394 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
395 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
396 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
397
398 TASKQUEUE_DEFINE_THREAD(thread);
399
400 struct taskqueue *
401 taskqueue_create_fast(const char *name, int mflags,
402 taskqueue_enqueue_fn enqueue, void *context)
403 {
404 return _taskqueue_create(name, mflags, enqueue, context,
405 MTX_SPIN, "fast_taskqueue");
406 }
407
408 /* NB: for backwards compatibility */
409 int
410 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
411 {
412 return taskqueue_enqueue(queue, task);
413 }
414
415 static void *taskqueue_fast_ih;
416
417 static void
418 taskqueue_fast_enqueue(void *context)
419 {
420 swi_sched(taskqueue_fast_ih, 0);
421 }
422
423 static void
424 taskqueue_fast_run(void *dummy)
425 {
426 taskqueue_run(taskqueue_fast);
427 }
428
429 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
430 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
431 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
432
433 int
434 taskqueue_member(struct taskqueue *queue, struct thread *td)
435 {
436 int i, j, ret = 0;
437
438 TQ_LOCK(queue);
439 for (i = 0, j = 0; ; i++) {
440 if (queue->tq_threads[i] == NULL)
441 continue;
442 if (queue->tq_threads[i] == td) {
443 ret = 1;
444 break;
445 }
446 if (++j >= queue->tq_tcount)
447 break;
448 }
449 TQ_UNLOCK(queue);
450 return (ret);
451 }
Cache object: 2fe6fb97a763c65f1fe0fca8ca88a2d8
|