1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.1/sys/kern/subr_taskqueue.c 111528 2003-02-26 03:15:42Z scottl $
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/interrupt.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/taskqueue.h>
38
39 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
40
41 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
42
43 static void *taskqueue_ih;
44 static void *taskqueue_giant_ih;
45 static struct mtx taskqueue_queues_mutex;
46
47 struct taskqueue {
48 STAILQ_ENTRY(taskqueue) tq_link;
49 STAILQ_HEAD(, task) tq_queue;
50 const char *tq_name;
51 taskqueue_enqueue_fn tq_enqueue;
52 void *tq_context;
53 int tq_draining;
54 struct mtx tq_mutex;
55 };
56
57 static void init_taskqueue_list(void *data);
58
59 static void
60 init_taskqueue_list(void *data __unused)
61 {
62
63 mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
64 STAILQ_INIT(&taskqueue_queues);
65 }
66 SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
67 NULL);
68
69 struct taskqueue *
70 taskqueue_create(const char *name, int mflags,
71 taskqueue_enqueue_fn enqueue, void *context)
72 {
73 struct taskqueue *queue;
74
75 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
76 if (!queue)
77 return 0;
78
79 STAILQ_INIT(&queue->tq_queue);
80 queue->tq_name = name;
81 queue->tq_enqueue = enqueue;
82 queue->tq_context = context;
83 queue->tq_draining = 0;
84 mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF);
85
86 mtx_lock(&taskqueue_queues_mutex);
87 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
88 mtx_unlock(&taskqueue_queues_mutex);
89
90 return queue;
91 }
92
93 void
94 taskqueue_free(struct taskqueue *queue)
95 {
96
97 mtx_lock(&queue->tq_mutex);
98 KASSERT(queue->tq_draining == 0, ("free'ing a draining taskqueue"));
99 queue->tq_draining = 1;
100 mtx_unlock(&queue->tq_mutex);
101
102 taskqueue_run(queue);
103
104 mtx_lock(&taskqueue_queues_mutex);
105 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
106 mtx_unlock(&taskqueue_queues_mutex);
107
108 mtx_destroy(&queue->tq_mutex);
109 free(queue, M_TASKQUEUE);
110 }
111
112 /*
113 * Returns with the taskqueue locked.
114 */
115 struct taskqueue *
116 taskqueue_find(const char *name)
117 {
118 struct taskqueue *queue;
119
120 mtx_lock(&taskqueue_queues_mutex);
121 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
122 mtx_lock(&queue->tq_mutex);
123 if (!strcmp(queue->tq_name, name)) {
124 mtx_unlock(&taskqueue_queues_mutex);
125 return queue;
126 }
127 mtx_unlock(&queue->tq_mutex);
128 }
129 mtx_unlock(&taskqueue_queues_mutex);
130 return 0;
131 }
132
133 int
134 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
135 {
136 struct task *ins;
137 struct task *prev;
138
139 mtx_lock(&queue->tq_mutex);
140
141 /*
142 * Don't allow new tasks on a queue which is being freed.
143 */
144 if (queue->tq_draining) {
145 mtx_unlock(&queue->tq_mutex);
146 return EPIPE;
147 }
148
149 /*
150 * Count multiple enqueues.
151 */
152 if (task->ta_pending) {
153 task->ta_pending++;
154 mtx_unlock(&queue->tq_mutex);
155 return 0;
156 }
157
158 /*
159 * Optimise the case when all tasks have the same priority.
160 */
161 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
162 if (!prev || prev->ta_priority >= task->ta_priority) {
163 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
164 } else {
165 prev = 0;
166 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
167 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
168 if (ins->ta_priority < task->ta_priority)
169 break;
170
171 if (prev)
172 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
173 else
174 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
175 }
176
177 task->ta_pending = 1;
178 if (queue->tq_enqueue)
179 queue->tq_enqueue(queue->tq_context);
180
181 mtx_unlock(&queue->tq_mutex);
182
183 return 0;
184 }
185
186 void
187 taskqueue_run(struct taskqueue *queue)
188 {
189 struct task *task;
190 int pending;
191
192 mtx_lock(&queue->tq_mutex);
193 while (STAILQ_FIRST(&queue->tq_queue)) {
194 /*
195 * Carefully remove the first task from the queue and
196 * zero its pending count.
197 */
198 task = STAILQ_FIRST(&queue->tq_queue);
199 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
200 pending = task->ta_pending;
201 task->ta_pending = 0;
202 mtx_unlock(&queue->tq_mutex);
203
204 task->ta_func(task->ta_context, pending);
205
206 mtx_lock(&queue->tq_mutex);
207 }
208 mtx_unlock(&queue->tq_mutex);
209 }
210
211 static void
212 taskqueue_swi_enqueue(void *context)
213 {
214 swi_sched(taskqueue_ih, 0);
215 }
216
217 static void
218 taskqueue_swi_run(void *dummy)
219 {
220 taskqueue_run(taskqueue_swi);
221 }
222
223 static void
224 taskqueue_swi_giant_enqueue(void *context)
225 {
226 swi_sched(taskqueue_giant_ih, 0);
227 }
228
229 static void
230 taskqueue_swi_giant_run(void *dummy)
231 {
232 taskqueue_run(taskqueue_swi_giant);
233 }
234
235 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
236 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
237 INTR_MPSAFE, &taskqueue_ih));
238
239 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
240 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run,
241 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
Cache object: 018c9bf46791bee73c67808fb331c508
|