1 /*-
2 * Copyright (c) 2017 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/types.h>
31 #include <sys/malloc.h>
32 #include <sys/gtaskqueue.h>
33 #include <sys/proc.h>
34 #include <sys/sched.h>
35
36 #include <linux/compiler.h>
37 #include <linux/interrupt.h>
38 #include <linux/compat.h>
39
40 #define TASKLET_ST_IDLE 0
41 #define TASKLET_ST_BUSY 1
42 #define TASKLET_ST_EXEC 2
43 #define TASKLET_ST_LOOP 3
44
45 #define TASKLET_ST_CMPSET(ts, old, new) \
46 atomic_cmpset_int((volatile u_int *)&(ts)->tasklet_state, old, new)
47
48 #define TASKLET_ST_SET(ts, new) \
49 WRITE_ONCE(*(volatile u_int *)&(ts)->tasklet_state, new)
50
51 #define TASKLET_ST_GET(ts) \
52 READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state)
53
54 struct tasklet_worker {
55 struct mtx mtx;
56 TAILQ_HEAD(tasklet_list, tasklet_struct) head;
57 struct grouptask gtask;
58 } __aligned(CACHE_LINE_SIZE);
59
60 #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx)
61 #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx)
62
63 DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker);
64
65 static void
66 tasklet_handler(void *arg)
67 {
68 struct tasklet_worker *tw = (struct tasklet_worker *)arg;
69 struct tasklet_struct *ts;
70 struct tasklet_struct *last;
71
72 linux_set_current(curthread);
73
74 TASKLET_WORKER_LOCK(tw);
75 last = TAILQ_LAST(&tw->head, tasklet_list);
76 while (1) {
77 ts = TAILQ_FIRST(&tw->head);
78 if (ts == NULL)
79 break;
80 TAILQ_REMOVE(&tw->head, ts, entry);
81
82 if (!atomic_read(&ts->count)) {
83 TASKLET_WORKER_UNLOCK(tw);
84 do {
85 /* reset executing state */
86 TASKLET_ST_SET(ts, TASKLET_ST_EXEC);
87
88 if (ts->use_callback)
89 ts->callback(ts);
90 else
91 ts->func(ts->data);
92
93 } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC,
94 TASKLET_ST_IDLE) == 0);
95 TASKLET_WORKER_LOCK(tw);
96 } else {
97 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
98 }
99 if (ts == last)
100 break;
101 }
102 TASKLET_WORKER_UNLOCK(tw);
103 }
104
105 static void
106 tasklet_subsystem_init(void *arg __unused)
107 {
108 struct tasklet_worker *tw;
109 char buf[32];
110 int i;
111
112 CPU_FOREACH(i) {
113 if (CPU_ABSENT(i))
114 continue;
115
116 tw = DPCPU_ID_PTR(i, tasklet_worker);
117
118 mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF);
119 TAILQ_INIT(&tw->head);
120 GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
121 snprintf(buf, sizeof(buf), "softirq%d", i);
122 taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
123 "tasklet", i, NULL, NULL, buf);
124 }
125 }
126 SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
127
128 static void
129 tasklet_subsystem_uninit(void *arg __unused)
130 {
131 struct tasklet_worker *tw;
132 int i;
133
134 taskqgroup_drain_all(qgroup_softirq);
135
136 CPU_FOREACH(i) {
137 if (CPU_ABSENT(i))
138 continue;
139
140 tw = DPCPU_ID_PTR(i, tasklet_worker);
141
142 taskqgroup_detach(qgroup_softirq, &tw->gtask);
143 mtx_destroy(&tw->mtx);
144 }
145 }
146 SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL);
147
148 void
149 tasklet_init(struct tasklet_struct *ts,
150 tasklet_func_t *func, unsigned long data)
151 {
152 ts->entry.tqe_prev = NULL;
153 ts->entry.tqe_next = NULL;
154 ts->func = func;
155 ts->callback = NULL;
156 ts->data = data;
157 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
158 atomic_set(&ts->count, 0);
159 ts->use_callback = false;
160 }
161
162 void
163 tasklet_setup(struct tasklet_struct *ts, tasklet_callback_t *c)
164 {
165 ts->entry.tqe_prev = NULL;
166 ts->entry.tqe_next = NULL;
167 ts->func = NULL;
168 ts->callback = c;
169 ts->data = 0;
170 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
171 atomic_set(&ts->count, 0);
172 ts->use_callback = true;
173 }
174
175 void
176 local_bh_enable(void)
177 {
178 sched_unpin();
179 }
180
181 void
182 local_bh_disable(void)
183 {
184 sched_pin();
185 }
186
187 void
188 tasklet_schedule(struct tasklet_struct *ts)
189 {
190
191 /* tasklet is paused */
192 if (atomic_read(&ts->count))
193 return;
194
195 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) {
196 /* tasklet_handler() will loop */
197 } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) {
198 struct tasklet_worker *tw;
199
200 tw = &DPCPU_GET(tasklet_worker);
201
202 /* tasklet_handler() was not queued */
203 TASKLET_WORKER_LOCK(tw);
204 /* enqueue tasklet */
205 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
206 /* schedule worker */
207 GROUPTASK_ENQUEUE(&tw->gtask);
208 TASKLET_WORKER_UNLOCK(tw);
209 } else {
210 /*
211 * tasklet_handler() is already executing
212 *
213 * If the state is neither EXEC nor IDLE, it is either
214 * LOOP or BUSY. If the state changed between the two
215 * CMPSET's above the only possible transitions by
216 * elimination are LOOP->EXEC and BUSY->EXEC. If a
217 * EXEC->LOOP transition was missed that is not a
218 * problem because the callback function is then
219 * already about to be called again.
220 */
221 }
222 }
223
224 void
225 tasklet_kill(struct tasklet_struct *ts)
226 {
227
228 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
229
230 /* wait until tasklet is no longer busy */
231 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
232 pause("W", 1);
233 }
234
235 void
236 tasklet_enable(struct tasklet_struct *ts)
237 {
238
239 atomic_dec(&ts->count);
240 }
241
242 void
243 tasklet_disable(struct tasklet_struct *ts)
244 {
245
246 atomic_inc(&ts->count);
247 tasklet_unlock_wait(ts);
248 }
249
250 void
251 tasklet_disable_nosync(struct tasklet_struct *ts)
252 {
253 atomic_inc(&ts->count);
254 barrier();
255 }
256
257 int
258 tasklet_trylock(struct tasklet_struct *ts)
259 {
260
261 return (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY));
262 }
263
264 void
265 tasklet_unlock(struct tasklet_struct *ts)
266 {
267
268 TASKLET_ST_SET(ts, TASKLET_ST_IDLE);
269 }
270
271 void
272 tasklet_unlock_wait(struct tasklet_struct *ts)
273 {
274
275 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
276
277 /* wait until tasklet is no longer busy */
278 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
279 pause("W", 1);
280 }
Cache object: 386c4eda25d3f39b7dd9e24fff6ec071
|