1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/taskqueue.h>
34 #include <sys/interrupt.h>
35 #include <sys/malloc.h>
36 #include <sys/kthread.h>
37
38 #include <machine/ipl.h>
39
40 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
41
42 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
43 static struct proc *taskqueue_thread_proc;
44
45 struct taskqueue {
46 STAILQ_ENTRY(taskqueue) tq_link;
47 STAILQ_HEAD(, task) tq_queue;
48 const char *tq_name;
49 taskqueue_enqueue_fn tq_enqueue;
50 void *tq_context;
51 int tq_draining;
52 };
53
54 struct taskqueue *
55 taskqueue_create(const char *name, int mflags,
56 taskqueue_enqueue_fn enqueue, void *context)
57 {
58 struct taskqueue *queue;
59 static int once = 1;
60 int s;
61
62 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags);
63 if (!queue)
64 return 0;
65 STAILQ_INIT(&queue->tq_queue);
66 queue->tq_name = name;
67 queue->tq_enqueue = enqueue;
68 queue->tq_context = context;
69 queue->tq_draining = 0;
70
71 s = splhigh();
72 if (once) {
73 STAILQ_INIT(&taskqueue_queues);
74 once = 0;
75 }
76 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
77 splx(s);
78
79 return queue;
80 }
81
82 void
83 taskqueue_free(struct taskqueue *queue)
84 {
85 int s = splhigh();
86 queue->tq_draining = 1;
87 splx(s);
88
89 taskqueue_run(queue);
90
91 s = splhigh();
92 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
93 splx(s);
94
95 free(queue, M_TASKQUEUE);
96 }
97
98 struct taskqueue *
99 taskqueue_find(const char *name)
100 {
101 struct taskqueue *queue;
102 int s;
103
104 s = splhigh();
105 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link)
106 if (!strcmp(queue->tq_name, name)) {
107 splx(s);
108 return queue;
109 }
110 splx(s);
111 return 0;
112 }
113
114 int
115 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
116 {
117 struct task *ins;
118 struct task *prev;
119
120 int s = splhigh();
121
122 /*
123 * Don't allow new tasks on a queue which is being freed.
124 */
125 if (queue->tq_draining) {
126 splx(s);
127 return EPIPE;
128 }
129
130 /*
131 * Count multiple enqueues.
132 */
133 if (task->ta_pending) {
134 task->ta_pending++;
135 splx(s);
136 return 0;
137 }
138
139 /*
140 * Optimise the case when all tasks have the same priority.
141 */
142 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
143 if (!prev || prev->ta_priority >= task->ta_priority) {
144 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
145 } else {
146 prev = 0;
147 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
148 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
149 if (ins->ta_priority < task->ta_priority)
150 break;
151
152 if (prev)
153 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
154 else
155 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
156 }
157
158 task->ta_pending = 1;
159 if (queue->tq_enqueue)
160 queue->tq_enqueue(queue->tq_context);
161
162 splx(s);
163
164 return 0;
165 }
166
167 void
168 taskqueue_run(struct taskqueue *queue)
169 {
170 int s;
171 struct task *task;
172 int pending;
173
174 s = splhigh();
175 while (STAILQ_FIRST(&queue->tq_queue)) {
176 /*
177 * Carefully remove the first task from the queue and
178 * zero its pending count.
179 */
180 task = STAILQ_FIRST(&queue->tq_queue);
181 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
182 pending = task->ta_pending;
183 task->ta_pending = 0;
184 splx(s);
185
186 task->ta_func(task->ta_context, pending);
187
188 s = splhigh();
189 }
190 splx(s);
191 }
192
193 static void
194 taskqueue_swi_enqueue(void *context)
195 {
196 setsofttq();
197 }
198
199 static void
200 taskqueue_swi_run(void)
201 {
202 taskqueue_run(taskqueue_swi);
203 }
204
205 static void
206 taskqueue_kthread(void *arg)
207 {
208 int s;
209
210 for (;;) {
211 taskqueue_run(taskqueue_thread);
212 s = splhigh();
213 if (STAILQ_EMPTY(&taskqueue_thread->tq_queue))
214 tsleep(&taskqueue_thread, PWAIT, "tqthr", 0);
215 splx(s);
216 }
217 }
218
219 static void
220 taskqueue_thread_enqueue(void *context)
221 {
222 wakeup(&taskqueue_thread);
223 }
224
225 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
226 register_swi(SWI_TQ, taskqueue_swi_run));
227 TASKQUEUE_DEFINE(thread, taskqueue_thread_enqueue, 0,
228 kthread_create(taskqueue_kthread, NULL,
229 &taskqueue_thread_proc, "taskqueue"));
Cache object: 0ebc09203b3d37413c64931668c23aab
|