1 /*-
2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
3 * Copyright (c) 2017-2021 Hans Petter Selasky (hselasky@freebsd.org)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/smp.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/kdb.h>
43
44 #include <ck_epoch.h>
45
46 #include <linux/rcupdate.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/kernel.h>
50 #include <linux/compat.h>
51 #include <linux/llist.h>
52 #include <linux/irq_work.h>
53
54 /*
55 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
56 * not be skipped during panic().
57 */
58 #ifdef CONFIG_NO_RCU_SKIP
59 #define RCU_SKIP(void) 0
60 #else
61 #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
62 #endif
63
64 struct callback_head {
65 union {
66 STAILQ_ENTRY(callback_head) entry;
67 struct llist_node node;
68 };
69 rcu_callback_t func;
70 };
71
72 struct linux_epoch_head {
73 struct llist_head cb_head;
74 struct task task;
75 } __aligned(CACHE_LINE_SIZE);
76
77 struct linux_epoch_record {
78 ck_epoch_record_t epoch_record;
79 TAILQ_HEAD(, task_struct) ts_head;
80 int cpuid;
81 int type;
82 } __aligned(CACHE_LINE_SIZE);
83
84 /*
85 * Verify that "struct rcu_head" is big enough to hold "struct
86 * callback_head". This has been done to avoid having to add special
87 * compile flags for including ck_epoch.h to all clients of the
88 * LinuxKPI.
89 */
90 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
91
92 /*
93 * Verify that "rcu_section[0]" has the same size as
94 * "ck_epoch_section_t". This has been done to avoid having to add
95 * special compile flags for including ck_epoch.h to all clients of
96 * the LinuxKPI.
97 */
98 CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] ==
99 sizeof(ck_epoch_section_t)));
100
101 /*
102 * Verify that "epoch_record" is at beginning of "struct
103 * linux_epoch_record":
104 */
105 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
106
107 CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX);
108
109 static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
110 static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
111 DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
112
113 static void linux_rcu_cleaner_func(void *, int);
114
115 static void
116 linux_rcu_runtime_init(void *arg __unused)
117 {
118 struct linux_epoch_head *head;
119 int i;
120 int j;
121
122 for (j = 0; j != RCU_TYPE_MAX; j++) {
123 ck_epoch_init(&linux_epoch[j]);
124
125 head = &linux_epoch_head[j];
126
127 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
128 init_llist_head(&head->cb_head);
129
130 CPU_FOREACH(i) {
131 struct linux_epoch_record *record;
132
133 record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
134
135 record->cpuid = i;
136 record->type = j;
137 ck_epoch_register(&linux_epoch[j],
138 &record->epoch_record, NULL);
139 TAILQ_INIT(&record->ts_head);
140 }
141 }
142 }
143 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
144
145 static void
146 linux_rcu_cleaner_func(void *context, int pending __unused)
147 {
148 struct linux_epoch_head *head = context;
149 struct callback_head *rcu;
150 STAILQ_HEAD(, callback_head) tmp_head;
151 struct llist_node *node, *next;
152 uintptr_t offset;
153
154 /* move current callbacks into own queue */
155 STAILQ_INIT(&tmp_head);
156 llist_for_each_safe(node, next, llist_del_all(&head->cb_head)) {
157 rcu = container_of(node, struct callback_head, node);
158 /* re-reverse list to restore chronological order */
159 STAILQ_INSERT_HEAD(&tmp_head, rcu, entry);
160 }
161
162 /* synchronize */
163 linux_synchronize_rcu(head - linux_epoch_head);
164
165 /* dispatch all callbacks, if any */
166 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
167 STAILQ_REMOVE_HEAD(&tmp_head, entry);
168
169 offset = (uintptr_t)rcu->func;
170
171 if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
172 kfree((char *)rcu - offset);
173 else
174 rcu->func((struct rcu_head *)rcu);
175 }
176 }
177
178 void
179 linux_rcu_read_lock(unsigned type)
180 {
181 struct linux_epoch_record *record;
182 struct task_struct *ts;
183
184 MPASS(type < RCU_TYPE_MAX);
185
186 if (RCU_SKIP())
187 return;
188
189 ts = current;
190
191 /* assert valid refcount */
192 MPASS(ts->rcu_recurse[type] != INT_MAX);
193
194 if (++(ts->rcu_recurse[type]) != 1)
195 return;
196
197 /*
198 * Pin thread to current CPU so that the unlock code gets the
199 * same per-CPU epoch record:
200 */
201 sched_pin();
202
203 record = &DPCPU_GET(linux_epoch_record[type]);
204
205 /*
206 * Use a critical section to prevent recursion inside
207 * ck_epoch_begin(). Else this function supports recursion.
208 */
209 critical_enter();
210 ck_epoch_begin(&record->epoch_record,
211 (ck_epoch_section_t *)&ts->rcu_section[type]);
212 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]);
213 critical_exit();
214 }
215
216 void
217 linux_rcu_read_unlock(unsigned type)
218 {
219 struct linux_epoch_record *record;
220 struct task_struct *ts;
221
222 MPASS(type < RCU_TYPE_MAX);
223
224 if (RCU_SKIP())
225 return;
226
227 ts = current;
228
229 /* assert valid refcount */
230 MPASS(ts->rcu_recurse[type] > 0);
231
232 if (--(ts->rcu_recurse[type]) != 0)
233 return;
234
235 record = &DPCPU_GET(linux_epoch_record[type]);
236
237 /*
238 * Use a critical section to prevent recursion inside
239 * ck_epoch_end(). Else this function supports recursion.
240 */
241 critical_enter();
242 ck_epoch_end(&record->epoch_record,
243 (ck_epoch_section_t *)&ts->rcu_section[type]);
244 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]);
245 critical_exit();
246
247 sched_unpin();
248 }
249
250 static void
251 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
252 {
253 struct linux_epoch_record *record =
254 container_of(epoch_record, struct linux_epoch_record, epoch_record);
255 struct thread *td = curthread;
256 struct task_struct *ts;
257
258 /* check if blocked on the current CPU */
259 if (record->cpuid == PCPU_GET(cpuid)) {
260 bool is_sleeping = 0;
261 u_char prio = 0;
262
263 /*
264 * Find the lowest priority or sleeping thread which
265 * is blocking synchronization on this CPU core. All
266 * the threads in the queue are CPU-pinned and cannot
267 * go anywhere while the current thread is locked.
268 */
269 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) {
270 if (ts->task_thread->td_priority > prio)
271 prio = ts->task_thread->td_priority;
272 is_sleeping |= (ts->task_thread->td_inhibitors != 0);
273 }
274
275 if (is_sleeping) {
276 thread_unlock(td);
277 pause("W", 1);
278 thread_lock(td);
279 } else {
280 /* set new thread priority */
281 sched_prio(td, prio);
282 /* task switch */
283 mi_switch(SW_VOL | SWT_RELINQUISH);
284 /*
285 * It is important the thread lock is dropped
286 * while yielding to allow other threads to
287 * acquire the lock pointed to by
288 * TDQ_LOCKPTR(td). Currently mi_switch() will
289 * unlock the thread lock before
290 * returning. Else a deadlock like situation
291 * might happen.
292 */
293 thread_lock(td);
294 }
295 } else {
296 /*
297 * To avoid spinning move execution to the other CPU
298 * which is blocking synchronization. Set highest
299 * thread priority so that code gets run. The thread
300 * priority will be restored later.
301 */
302 sched_prio(td, 0);
303 sched_bind(td, record->cpuid);
304 }
305 }
306
307 void
308 linux_synchronize_rcu(unsigned type)
309 {
310 struct thread *td;
311 int was_bound;
312 int old_cpu;
313 int old_pinned;
314 u_char old_prio;
315
316 MPASS(type < RCU_TYPE_MAX);
317
318 if (RCU_SKIP())
319 return;
320
321 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
322 "linux_synchronize_rcu() can sleep");
323
324 td = curthread;
325 DROP_GIANT();
326
327 /*
328 * Synchronizing RCU might change the CPU core this function
329 * is running on. Save current values:
330 */
331 thread_lock(td);
332
333 old_cpu = PCPU_GET(cpuid);
334 old_pinned = td->td_pinned;
335 old_prio = td->td_priority;
336 was_bound = sched_is_bound(td);
337 sched_unbind(td);
338 td->td_pinned = 0;
339 sched_bind(td, old_cpu);
340
341 ck_epoch_synchronize_wait(&linux_epoch[type],
342 &linux_synchronize_rcu_cb, NULL);
343
344 /* restore CPU binding, if any */
345 if (was_bound != 0) {
346 sched_bind(td, old_cpu);
347 } else {
348 /* get thread back to initial CPU, if any */
349 if (old_pinned != 0)
350 sched_bind(td, old_cpu);
351 sched_unbind(td);
352 }
353 /* restore pinned after bind */
354 td->td_pinned = old_pinned;
355
356 /* restore thread priority */
357 sched_prio(td, old_prio);
358 thread_unlock(td);
359
360 PICKUP_GIANT();
361 }
362
363 void
364 linux_rcu_barrier(unsigned type)
365 {
366 struct linux_epoch_head *head;
367
368 MPASS(type < RCU_TYPE_MAX);
369
370 /*
371 * This function is not obligated to wait for a grace period.
372 * It only waits for RCU callbacks that have already been posted.
373 * If there are no RCU callbacks posted, rcu_barrier() can return
374 * immediately.
375 */
376 head = &linux_epoch_head[type];
377
378 /* wait for callbacks to complete */
379 taskqueue_drain(linux_irq_work_tq, &head->task);
380 }
381
382 void
383 linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
384 {
385 struct callback_head *rcu;
386 struct linux_epoch_head *head;
387
388 MPASS(type < RCU_TYPE_MAX);
389
390 rcu = (struct callback_head *)context;
391 head = &linux_epoch_head[type];
392
393 rcu->func = func;
394 llist_add(&rcu->node, &head->cb_head);
395 taskqueue_enqueue(linux_irq_work_tq, &head->task);
396 }
397
398 int
399 init_srcu_struct(struct srcu_struct *srcu)
400 {
401 return (0);
402 }
403
404 void
405 cleanup_srcu_struct(struct srcu_struct *srcu)
406 {
407 }
408
409 int
410 srcu_read_lock(struct srcu_struct *srcu)
411 {
412 linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
413 return (0);
414 }
415
416 void
417 srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
418 {
419 linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
420 }
421
422 void
423 synchronize_srcu(struct srcu_struct *srcu)
424 {
425 linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
426 }
427
428 void
429 srcu_barrier(struct srcu_struct *srcu)
430 {
431 linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
432 }
Cache object: f4837506faef80df7923f80b0f3a7cd3
|