FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_epoch.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/sx.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #ifdef EPOCH_TRACE
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
52 #include <sys/tree.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/uma.h>
58
59 #include <ck_epoch.h>
60
61 #ifdef __amd64__
62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
63 #else
64 #define EPOCH_ALIGN CACHE_LINE_SIZE
65 #endif
66
67 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
68 typedef struct epoch_record {
69 ck_epoch_record_t er_record;
70 struct epoch_context er_drain_ctx;
71 struct epoch *er_parent;
72 volatile struct epoch_tdlist er_tdlist;
73 volatile uint32_t er_gen;
74 uint32_t er_cpuid;
75 #ifdef INVARIANTS
76 /* Used to verify record ownership for non-preemptible epochs. */
77 struct thread *er_td;
78 #endif
79 } __aligned(EPOCH_ALIGN) *epoch_record_t;
80
81 struct epoch {
82 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
83 epoch_record_t e_pcpu_record;
84 int e_in_use;
85 int e_flags;
86 struct sx e_drain_sx;
87 struct mtx e_drain_mtx;
88 volatile int e_drain_count;
89 const char *e_name;
90 };
91
92 /* arbitrary --- needs benchmarking */
93 #define MAX_ADAPTIVE_SPIN 100
94 #define MAX_EPOCHS 64
95
96 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
97 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
98 "epoch information");
99 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100 "epoch stats");
101
102 /* Stats. */
103 static counter_u64_t block_count;
104
105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
106 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
107 static counter_u64_t migrate_count;
108
109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
110 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
111 static counter_u64_t turnstile_count;
112
113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
114 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
115 static counter_u64_t switch_count;
116
117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
118 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
119 static counter_u64_t epoch_call_count;
120
121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
122 &epoch_call_count, "# of times a callback was deferred");
123 static counter_u64_t epoch_call_task_count;
124
125 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
126 &epoch_call_task_count, "# of times a callback task was run");
127
128 TAILQ_HEAD (threadlist, thread);
129
130 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
131 ck_epoch_entry_container)
132
133 static struct epoch epoch_array[MAX_EPOCHS];
134
135 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
136 DPCPU_DEFINE(int, epoch_cb_count);
137
138 static __read_mostly int inited;
139 __read_mostly epoch_t global_epoch;
140 __read_mostly epoch_t global_epoch_preempt;
141
142 static void epoch_call_task(void *context __unused);
143 static uma_zone_t pcpu_zone_record;
144
145 static struct sx epoch_sx;
146
147 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
148 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
149
150 #ifdef EPOCH_TRACE
151 struct stackentry {
152 RB_ENTRY(stackentry) se_node;
153 struct stack se_stack;
154 };
155
156 static int
157 stackentry_compare(struct stackentry *a, struct stackentry *b)
158 {
159
160 if (a->se_stack.depth > b->se_stack.depth)
161 return (1);
162 if (a->se_stack.depth < b->se_stack.depth)
163 return (-1);
164 for (int i = 0; i < a->se_stack.depth; i++) {
165 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
166 return (1);
167 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
168 return (-1);
169 }
170
171 return (0);
172 }
173
174 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
175 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
176
177 static struct mtx epoch_stacks_lock;
178 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
179
180 static bool epoch_trace_stack_print = true;
181 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
182 &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
183
184 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
185 static inline void
186 epoch_trace_report(const char *fmt, ...)
187 {
188 va_list ap;
189 struct stackentry se, *new;
190
191 stack_zero(&se.se_stack); /* XXX: is it really needed? */
192 stack_save(&se.se_stack);
193
194 /* Tree is never reduced - go lockless. */
195 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
196 return;
197
198 new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
199 if (new != NULL) {
200 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
201
202 mtx_lock(&epoch_stacks_lock);
203 new = RB_INSERT(stacktree, &epoch_stacks, new);
204 mtx_unlock(&epoch_stacks_lock);
205 if (new != NULL)
206 free(new, M_STACK);
207 }
208
209 va_start(ap, fmt);
210 (void)vprintf(fmt, ap);
211 va_end(ap);
212 if (epoch_trace_stack_print)
213 stack_print_ddb(&se.se_stack);
214 }
215
216 static inline void
217 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
218 const char *file, int line)
219 {
220 epoch_tracker_t iet;
221
222 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
223 if (iet->et_epoch != epoch)
224 continue;
225 epoch_trace_report("Recursively entering epoch %s "
226 "at %s:%d, previously entered at %s:%d\n",
227 epoch->e_name, file, line,
228 iet->et_file, iet->et_line);
229 }
230 et->et_epoch = epoch;
231 et->et_file = file;
232 et->et_line = line;
233 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
234 }
235
236 static inline void
237 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
238 const char *file, int line)
239 {
240
241 if (SLIST_FIRST(&td->td_epochs) != et) {
242 epoch_trace_report("Exiting epoch %s in a not nested order "
243 "at %s:%d. Most recently entered %s at %s:%d\n",
244 epoch->e_name,
245 file, line,
246 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
247 SLIST_FIRST(&td->td_epochs)->et_file,
248 SLIST_FIRST(&td->td_epochs)->et_line);
249 /* This will panic if et is not anywhere on td_epochs. */
250 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
251 } else
252 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
253 }
254
255 /* Used by assertions that check thread state before going to sleep. */
256 void
257 epoch_trace_list(struct thread *td)
258 {
259 epoch_tracker_t iet;
260
261 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
262 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
263 iet->et_file, iet->et_line);
264 }
265 #endif /* EPOCH_TRACE */
266
267 static void
268 epoch_init(void *arg __unused)
269 {
270 int cpu;
271
272 block_count = counter_u64_alloc(M_WAITOK);
273 migrate_count = counter_u64_alloc(M_WAITOK);
274 turnstile_count = counter_u64_alloc(M_WAITOK);
275 switch_count = counter_u64_alloc(M_WAITOK);
276 epoch_call_count = counter_u64_alloc(M_WAITOK);
277 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
278
279 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
280 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
281 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
282 CPU_FOREACH(cpu) {
283 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
284 epoch_call_task, NULL);
285 taskqgroup_attach_cpu(qgroup_softirq,
286 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
287 "epoch call task");
288 }
289 #ifdef EPOCH_TRACE
290 SLIST_INIT(&thread0.td_epochs);
291 #endif
292 sx_init(&epoch_sx, "epoch-sx");
293 inited = 1;
294 global_epoch = epoch_alloc("Global", 0);
295 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
296 }
297 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
298
299 #if !defined(EARLY_AP_STARTUP)
300 static void
301 epoch_init_smp(void *dummy __unused)
302 {
303 inited = 2;
304 }
305 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
306 #endif
307
308 static void
309 epoch_ctor(epoch_t epoch)
310 {
311 epoch_record_t er;
312 int cpu;
313
314 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
315 CPU_FOREACH(cpu) {
316 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
317 bzero(er, sizeof(*er));
318 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
319 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
320 er->er_cpuid = cpu;
321 er->er_parent = epoch;
322 }
323 }
324
325 static void
326 epoch_adjust_prio(struct thread *td, u_char prio)
327 {
328
329 thread_lock(td);
330 sched_prio(td, prio);
331 thread_unlock(td);
332 }
333
334 epoch_t
335 epoch_alloc(const char *name, int flags)
336 {
337 epoch_t epoch;
338 int i;
339
340 MPASS(name != NULL);
341
342 if (__predict_false(!inited))
343 panic("%s called too early in boot", __func__);
344
345 EPOCH_LOCK();
346
347 /*
348 * Find a free index in the epoch array. If no free index is
349 * found, try to use the index after the last one.
350 */
351 for (i = 0;; i++) {
352 /*
353 * If too many epochs are currently allocated,
354 * return NULL.
355 */
356 if (i == MAX_EPOCHS) {
357 epoch = NULL;
358 goto done;
359 }
360 if (epoch_array[i].e_in_use == 0)
361 break;
362 }
363
364 epoch = epoch_array + i;
365 ck_epoch_init(&epoch->e_epoch);
366 epoch_ctor(epoch);
367 epoch->e_flags = flags;
368 epoch->e_name = name;
369 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
370 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
371
372 /*
373 * Set e_in_use last, because when this field is set the
374 * epoch_call_task() function will start scanning this epoch
375 * structure.
376 */
377 atomic_store_rel_int(&epoch->e_in_use, 1);
378 done:
379 EPOCH_UNLOCK();
380 return (epoch);
381 }
382
383 void
384 epoch_free(epoch_t epoch)
385 {
386 #ifdef INVARIANTS
387 int cpu;
388 #endif
389
390 EPOCH_LOCK();
391
392 MPASS(epoch->e_in_use != 0);
393
394 epoch_drain_callbacks(epoch);
395
396 atomic_store_rel_int(&epoch->e_in_use, 0);
397 /*
398 * Make sure the epoch_call_task() function see e_in_use equal
399 * to zero, by calling epoch_wait() on the global_epoch:
400 */
401 epoch_wait(global_epoch);
402 #ifdef INVARIANTS
403 CPU_FOREACH(cpu) {
404 epoch_record_t er;
405
406 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
407
408 /*
409 * Sanity check: none of the records should be in use anymore.
410 * We drained callbacks above and freeing the pcpu records is
411 * imminent.
412 */
413 MPASS(er->er_td == NULL);
414 MPASS(TAILQ_EMPTY(&er->er_tdlist));
415 }
416 #endif
417 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
418 mtx_destroy(&epoch->e_drain_mtx);
419 sx_destroy(&epoch->e_drain_sx);
420 memset(epoch, 0, sizeof(*epoch));
421
422 EPOCH_UNLOCK();
423 }
424
425 static epoch_record_t
426 epoch_currecord(epoch_t epoch)
427 {
428
429 return (zpcpu_get(epoch->e_pcpu_record));
430 }
431
432 #define INIT_CHECK(epoch) \
433 do { \
434 if (__predict_false((epoch) == NULL)) \
435 return; \
436 } while (0)
437
438 void
439 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
440 {
441 struct epoch_record *er;
442 struct thread *td;
443
444 MPASS(cold || epoch != NULL);
445 td = curthread;
446 MPASS(kstack_contains(td, (vm_offset_t)et, sizeof(*et)));
447
448 INIT_CHECK(epoch);
449 MPASS(epoch->e_flags & EPOCH_PREEMPT);
450
451 #ifdef EPOCH_TRACE
452 epoch_trace_enter(td, epoch, et, file, line);
453 #endif
454 et->et_td = td;
455 THREAD_NO_SLEEPING();
456 critical_enter();
457 sched_pin();
458 et->et_old_priority = td->td_priority;
459 er = epoch_currecord(epoch);
460 /* Record-level tracking is reserved for non-preemptible epochs. */
461 MPASS(er->er_td == NULL);
462 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
463 ck_epoch_begin(&er->er_record, &et->et_section);
464 critical_exit();
465 }
466
467 void
468 epoch_enter(epoch_t epoch)
469 {
470 epoch_record_t er;
471
472 MPASS(cold || epoch != NULL);
473 INIT_CHECK(epoch);
474 critical_enter();
475 er = epoch_currecord(epoch);
476 #ifdef INVARIANTS
477 if (er->er_record.active == 0) {
478 MPASS(er->er_td == NULL);
479 er->er_td = curthread;
480 } else {
481 /* We've recursed, just make sure our accounting isn't wrong. */
482 MPASS(er->er_td == curthread);
483 }
484 #endif
485 ck_epoch_begin(&er->er_record, NULL);
486 }
487
488 void
489 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
490 {
491 struct epoch_record *er;
492 struct thread *td;
493
494 INIT_CHECK(epoch);
495 td = curthread;
496 critical_enter();
497 sched_unpin();
498 THREAD_SLEEPING_OK();
499 er = epoch_currecord(epoch);
500 MPASS(epoch->e_flags & EPOCH_PREEMPT);
501 MPASS(et != NULL);
502 MPASS(et->et_td == td);
503 #ifdef INVARIANTS
504 et->et_td = (void*)0xDEADBEEF;
505 /* Record-level tracking is reserved for non-preemptible epochs. */
506 MPASS(er->er_td == NULL);
507 #endif
508 ck_epoch_end(&er->er_record, &et->et_section);
509 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
510 er->er_gen++;
511 if (__predict_false(et->et_old_priority != td->td_priority))
512 epoch_adjust_prio(td, et->et_old_priority);
513 critical_exit();
514 #ifdef EPOCH_TRACE
515 epoch_trace_exit(td, epoch, et, file, line);
516 #endif
517 }
518
519 void
520 epoch_exit(epoch_t epoch)
521 {
522 epoch_record_t er;
523
524 INIT_CHECK(epoch);
525 er = epoch_currecord(epoch);
526 ck_epoch_end(&er->er_record, NULL);
527 #ifdef INVARIANTS
528 MPASS(er->er_td == curthread);
529 if (er->er_record.active == 0)
530 er->er_td = NULL;
531 #endif
532 critical_exit();
533 }
534
535 /*
536 * epoch_block_handler_preempt() is a callback from the CK code when another
537 * thread is currently in an epoch section.
538 */
539 static void
540 epoch_block_handler_preempt(struct ck_epoch *global __unused,
541 ck_epoch_record_t *cr, void *arg __unused)
542 {
543 epoch_record_t record;
544 struct thread *td, *owner, *curwaittd;
545 struct epoch_tracker *tdwait;
546 struct turnstile *ts;
547 struct lock_object *lock;
548 int spincount, gen;
549 int locksheld __unused;
550
551 record = __containerof(cr, struct epoch_record, er_record);
552 td = curthread;
553 locksheld = td->td_locks;
554 spincount = 0;
555 counter_u64_add(block_count, 1);
556 /*
557 * We lost a race and there's no longer any threads
558 * on the CPU in an epoch section.
559 */
560 if (TAILQ_EMPTY(&record->er_tdlist))
561 return;
562
563 if (record->er_cpuid != curcpu) {
564 /*
565 * If the head of the list is running, we can wait for it
566 * to remove itself from the list and thus save us the
567 * overhead of a migration
568 */
569 gen = record->er_gen;
570 thread_unlock(td);
571 /*
572 * We can't actually check if the waiting thread is running
573 * so we simply poll for it to exit before giving up and
574 * migrating.
575 */
576 do {
577 cpu_spinwait();
578 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
579 gen == record->er_gen &&
580 spincount++ < MAX_ADAPTIVE_SPIN);
581 thread_lock(td);
582 /*
583 * If the generation has changed we can poll again
584 * otherwise we need to migrate.
585 */
586 if (gen != record->er_gen)
587 return;
588 /*
589 * Being on the same CPU as that of the record on which
590 * we need to wait allows us access to the thread
591 * list associated with that CPU. We can then examine the
592 * oldest thread in the queue and wait on its turnstile
593 * until it resumes and so on until a grace period
594 * elapses.
595 *
596 */
597 counter_u64_add(migrate_count, 1);
598 sched_bind(td, record->er_cpuid);
599 /*
600 * At this point we need to return to the ck code
601 * to scan to see if a grace period has elapsed.
602 * We can't move on to check the thread list, because
603 * in the meantime new threads may have arrived that
604 * in fact belong to a different epoch.
605 */
606 return;
607 }
608 /*
609 * Try to find a thread in an epoch section on this CPU
610 * waiting on a turnstile. Otherwise find the lowest
611 * priority thread (highest prio value) and drop our priority
612 * to match to allow it to run.
613 */
614 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
615 /*
616 * Propagate our priority to any other waiters to prevent us
617 * from starving them. They will have their original priority
618 * restore on exit from epoch_wait().
619 */
620 curwaittd = tdwait->et_td;
621 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
622 critical_enter();
623 thread_unlock(td);
624 thread_lock(curwaittd);
625 sched_prio(curwaittd, td->td_priority);
626 thread_unlock(curwaittd);
627 thread_lock(td);
628 critical_exit();
629 }
630 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
631 ((ts = curwaittd->td_blocked) != NULL)) {
632 /*
633 * We unlock td to allow turnstile_wait to reacquire
634 * the thread lock. Before unlocking it we enter a
635 * critical section to prevent preemption after we
636 * reenable interrupts by dropping the thread lock in
637 * order to prevent curwaittd from getting to run.
638 */
639 critical_enter();
640 thread_unlock(td);
641
642 if (turnstile_lock(ts, &lock, &owner)) {
643 if (ts == curwaittd->td_blocked) {
644 MPASS(TD_IS_INHIBITED(curwaittd) &&
645 TD_ON_LOCK(curwaittd));
646 critical_exit();
647 turnstile_wait(ts, owner,
648 curwaittd->td_tsqueue);
649 counter_u64_add(turnstile_count, 1);
650 thread_lock(td);
651 return;
652 }
653 turnstile_unlock(ts, lock);
654 }
655 thread_lock(td);
656 critical_exit();
657 KASSERT(td->td_locks == locksheld,
658 ("%d extra locks held", td->td_locks - locksheld));
659 }
660 }
661 /*
662 * We didn't find any threads actually blocked on a lock
663 * so we have nothing to do except context switch away.
664 */
665 counter_u64_add(switch_count, 1);
666 mi_switch(SW_VOL | SWT_RELINQUISH);
667 /*
668 * It is important the thread lock is dropped while yielding
669 * to allow other threads to acquire the lock pointed to by
670 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
671 * thread lock before returning. Else a deadlock like
672 * situation might happen.
673 */
674 thread_lock(td);
675 }
676
677 void
678 epoch_wait_preempt(epoch_t epoch)
679 {
680 struct thread *td;
681 int was_bound;
682 int old_cpu;
683 int old_pinned;
684 u_char old_prio;
685 int locks __unused;
686
687 MPASS(cold || epoch != NULL);
688 INIT_CHECK(epoch);
689 td = curthread;
690 #ifdef INVARIANTS
691 locks = curthread->td_locks;
692 MPASS(epoch->e_flags & EPOCH_PREEMPT);
693 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
694 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
695 "epoch_wait() can be long running");
696 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
697 "of an epoch section of the same epoch"));
698 #endif
699 DROP_GIANT();
700 thread_lock(td);
701
702 old_cpu = PCPU_GET(cpuid);
703 old_pinned = td->td_pinned;
704 old_prio = td->td_priority;
705 was_bound = sched_is_bound(td);
706 sched_unbind(td);
707 td->td_pinned = 0;
708 sched_bind(td, old_cpu);
709
710 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
711 NULL);
712
713 /* restore CPU binding, if any */
714 if (was_bound != 0) {
715 sched_bind(td, old_cpu);
716 } else {
717 /* get thread back to initial CPU, if any */
718 if (old_pinned != 0)
719 sched_bind(td, old_cpu);
720 sched_unbind(td);
721 }
722 /* restore pinned after bind */
723 td->td_pinned = old_pinned;
724
725 /* restore thread priority */
726 sched_prio(td, old_prio);
727 thread_unlock(td);
728 PICKUP_GIANT();
729 KASSERT(td->td_locks == locks,
730 ("%d residual locks held", td->td_locks - locks));
731 }
732
733 static void
734 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
735 void *arg __unused)
736 {
737 cpu_spinwait();
738 }
739
740 void
741 epoch_wait(epoch_t epoch)
742 {
743
744 MPASS(cold || epoch != NULL);
745 INIT_CHECK(epoch);
746 MPASS(epoch->e_flags == 0);
747 critical_enter();
748 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
749 critical_exit();
750 }
751
752 void
753 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
754 {
755 epoch_record_t er;
756 ck_epoch_entry_t *cb;
757
758 cb = (void *)ctx;
759
760 MPASS(callback);
761 /* too early in boot to have epoch set up */
762 if (__predict_false(epoch == NULL))
763 goto boottime;
764 #if !defined(EARLY_AP_STARTUP)
765 if (__predict_false(inited < 2))
766 goto boottime;
767 #endif
768
769 critical_enter();
770 *DPCPU_PTR(epoch_cb_count) += 1;
771 er = epoch_currecord(epoch);
772 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
773 critical_exit();
774 return;
775 boottime:
776 callback(ctx);
777 }
778
779 static void
780 epoch_call_task(void *arg __unused)
781 {
782 ck_stack_entry_t *cursor, *head, *next;
783 ck_epoch_record_t *record;
784 epoch_record_t er;
785 epoch_t epoch;
786 ck_stack_t cb_stack;
787 int i, npending, total;
788
789 ck_stack_init(&cb_stack);
790 critical_enter();
791 epoch_enter(global_epoch);
792 for (total = i = 0; i != MAX_EPOCHS; i++) {
793 epoch = epoch_array + i;
794 if (__predict_false(
795 atomic_load_acq_int(&epoch->e_in_use) == 0))
796 continue;
797 er = epoch_currecord(epoch);
798 record = &er->er_record;
799 if ((npending = record->n_pending) == 0)
800 continue;
801 ck_epoch_poll_deferred(record, &cb_stack);
802 total += npending - record->n_pending;
803 }
804 epoch_exit(global_epoch);
805 *DPCPU_PTR(epoch_cb_count) -= total;
806 critical_exit();
807
808 counter_u64_add(epoch_call_count, total);
809 counter_u64_add(epoch_call_task_count, 1);
810
811 head = ck_stack_batch_pop_npsc(&cb_stack);
812 for (cursor = head; cursor != NULL; cursor = next) {
813 struct ck_epoch_entry *entry =
814 ck_epoch_entry_container(cursor);
815
816 next = CK_STACK_NEXT(cursor);
817 entry->function(entry);
818 }
819 }
820
821 static int
822 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
823 {
824 epoch_record_t er;
825 struct epoch_tracker *tdwait;
826 struct thread *td;
827
828 MPASS(epoch != NULL);
829 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
830 td = curthread;
831 if (THREAD_CAN_SLEEP())
832 return (0);
833 critical_enter();
834 er = epoch_currecord(epoch);
835 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
836 if (tdwait->et_td == td) {
837 critical_exit();
838 return (1);
839 }
840 #ifdef INVARIANTS
841 if (dump_onfail) {
842 MPASS(td->td_pinned);
843 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
844 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
845 printf("td_tid: %d ", tdwait->et_td->td_tid);
846 printf("\n");
847 }
848 #endif
849 critical_exit();
850 return (0);
851 }
852
853 #ifdef INVARIANTS
854 static void
855 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
856 {
857 epoch_record_t er;
858 int cpu;
859 bool crit;
860
861 crit = td->td_critnest > 0;
862
863 /* Check for a critical section mishap. */
864 CPU_FOREACH(cpu) {
865 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
866 KASSERT(er->er_td != td,
867 ("%s critical section in epoch '%s', from cpu %d",
868 (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
869 }
870 }
871 #else
872 #define epoch_assert_nocpu(e, td) do {} while (0)
873 #endif
874
875 int
876 in_epoch_verbose(epoch_t epoch, int dump_onfail)
877 {
878 epoch_record_t er;
879 struct thread *td;
880
881 if (__predict_false((epoch) == NULL))
882 return (0);
883 if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
884 return (in_epoch_verbose_preempt(epoch, dump_onfail));
885
886 /*
887 * The thread being in a critical section is a necessary
888 * condition to be correctly inside a non-preemptible epoch,
889 * so it's definitely not in this epoch.
890 */
891 td = curthread;
892 if (td->td_critnest == 0) {
893 epoch_assert_nocpu(epoch, td);
894 return (0);
895 }
896
897 /*
898 * The current cpu is in a critical section, so the epoch record will be
899 * stable for the rest of this function. Knowing that the record is not
900 * active is sufficient for knowing whether we're in this epoch or not,
901 * since it's a pcpu record.
902 */
903 er = epoch_currecord(epoch);
904 if (er->er_record.active == 0) {
905 epoch_assert_nocpu(epoch, td);
906 return (0);
907 }
908
909 MPASS(er->er_td == td);
910 return (1);
911 }
912
913 int
914 in_epoch(epoch_t epoch)
915 {
916 return (in_epoch_verbose(epoch, 0));
917 }
918
919 static void
920 epoch_drain_cb(struct epoch_context *ctx)
921 {
922 struct epoch *epoch =
923 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
924
925 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
926 mtx_lock(&epoch->e_drain_mtx);
927 wakeup(epoch);
928 mtx_unlock(&epoch->e_drain_mtx);
929 }
930 }
931
932 void
933 epoch_drain_callbacks(epoch_t epoch)
934 {
935 epoch_record_t er;
936 struct thread *td;
937 int was_bound;
938 int old_pinned;
939 int old_cpu;
940 int cpu;
941
942 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
943 "epoch_drain_callbacks() may sleep!");
944
945 /* too early in boot to have epoch set up */
946 if (__predict_false(epoch == NULL))
947 return;
948 #if !defined(EARLY_AP_STARTUP)
949 if (__predict_false(inited < 2))
950 return;
951 #endif
952 DROP_GIANT();
953
954 sx_xlock(&epoch->e_drain_sx);
955 mtx_lock(&epoch->e_drain_mtx);
956
957 td = curthread;
958 thread_lock(td);
959 old_cpu = PCPU_GET(cpuid);
960 old_pinned = td->td_pinned;
961 was_bound = sched_is_bound(td);
962 sched_unbind(td);
963 td->td_pinned = 0;
964
965 CPU_FOREACH(cpu)
966 epoch->e_drain_count++;
967 CPU_FOREACH(cpu) {
968 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
969 sched_bind(td, cpu);
970 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
971 }
972
973 /* restore CPU binding, if any */
974 if (was_bound != 0) {
975 sched_bind(td, old_cpu);
976 } else {
977 /* get thread back to initial CPU, if any */
978 if (old_pinned != 0)
979 sched_bind(td, old_cpu);
980 sched_unbind(td);
981 }
982 /* restore pinned after bind */
983 td->td_pinned = old_pinned;
984
985 thread_unlock(td);
986
987 while (epoch->e_drain_count != 0)
988 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
989
990 mtx_unlock(&epoch->e_drain_mtx);
991 sx_xunlock(&epoch->e_drain_sx);
992
993 PICKUP_GIANT();
994 }
Cache object: 4a70141eaf48552f20a9acaa9abd4a92
|