1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel. Sleep queues different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals. That said, there are several similarities between the turnstile
33 * and sleep queue implementations. (Note: turnstiles were implemented
34 * first.) For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues. An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue. This means that a wait channel object does not need to
39 * embed it's queue head just as locks do not embed their turnstile queue
40 * head. Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking. Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
44 *
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout. The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep. A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep). The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
50 * sleep queues also provide some extra assertions. One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD: releng/10.3/sys/kern/subr_sleepqueue.c 296913 2016-03-15 17:09:27Z kib $");
61
62 #include "opt_sleepqueue_profiling.h"
63 #include "opt_ddb.h"
64 #include "opt_kdtrace.h"
65 #include "opt_sched.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/sched.h>
76 #include <sys/sdt.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/sysctl.h>
80
81 #include <vm/uma.h>
82
83 #ifdef DDB
84 #include <ddb/ddb.h>
85 #endif
86
87 /*
88 * Constants for the hash table of sleep queue chains.
89 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
90 */
91 #define SC_TABLESIZE 256 /* Must be power of 2. */
92 #define SC_MASK (SC_TABLESIZE - 1)
93 #define SC_SHIFT 8
94 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
95 SC_MASK)
96 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
97 #define NR_SLEEPQS 2
98 /*
99 * There two different lists of sleep queues. Both lists are connected
100 * via the sq_hash entries. The first list is the sleep queue chain list
101 * that a sleep queue is on when it is attached to a wait channel. The
102 * second list is the free list hung off of a sleep queue that is attached
103 * to a wait channel.
104 *
105 * Each sleep queue also contains the wait channel it is attached to, the
106 * list of threads blocked on that wait channel, flags specific to the
107 * wait channel, and the lock used to synchronize with a wait channel.
108 * The flags are used to catch mismatches between the various consumers
109 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
110 * The lock pointer is only used when invariants are enabled for various
111 * debugging checks.
112 *
113 * Locking key:
114 * c - sleep queue chain lock
115 */
116 struct sleepqueue {
117 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
118 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
119 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
120 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
121 void *sq_wchan; /* (c) Wait channel. */
122 int sq_type; /* (c) Queue type. */
123 #ifdef INVARIANTS
124 struct lock_object *sq_lock; /* (c) Associated lock. */
125 #endif
126 };
127
128 struct sleepqueue_chain {
129 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
130 struct mtx sc_lock; /* Spin lock for this chain. */
131 #ifdef SLEEPQUEUE_PROFILING
132 u_int sc_depth; /* Length of sc_queues. */
133 u_int sc_max_depth; /* Max length of sc_queues. */
134 #endif
135 };
136
137 #ifdef SLEEPQUEUE_PROFILING
138 u_int sleepq_max_depth;
139 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
140 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
141 "sleepq chain stats");
142 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
143 0, "maxmimum depth achieved of a single chain");
144
145 static void sleepq_profile(const char *wmesg);
146 static int prof_enabled;
147 #endif
148 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
149 static uma_zone_t sleepq_zone;
150
151 /*
152 * Prototypes for non-exported routines.
153 */
154 static int sleepq_catch_signals(void *wchan, int pri);
155 static int sleepq_check_signals(void);
156 static int sleepq_check_timeout(void);
157 #ifdef INVARIANTS
158 static void sleepq_dtor(void *mem, int size, void *arg);
159 #endif
160 static int sleepq_init(void *mem, int size, int flags);
161 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
162 int pri);
163 static void sleepq_switch(void *wchan, int pri);
164 static void sleepq_timeout(void *arg);
165
166 SDT_PROBE_DECLARE(sched, , , sleep);
167 SDT_PROBE_DECLARE(sched, , , wakeup);
168
169 /*
170 * Early initialization of sleep queues that is called from the sleepinit()
171 * SYSINIT.
172 */
173 void
174 init_sleepqueues(void)
175 {
176 #ifdef SLEEPQUEUE_PROFILING
177 struct sysctl_oid *chain_oid;
178 char chain_name[10];
179 #endif
180 int i;
181
182 for (i = 0; i < SC_TABLESIZE; i++) {
183 LIST_INIT(&sleepq_chains[i].sc_queues);
184 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
185 MTX_SPIN | MTX_RECURSE);
186 #ifdef SLEEPQUEUE_PROFILING
187 snprintf(chain_name, sizeof(chain_name), "%d", i);
188 chain_oid = SYSCTL_ADD_NODE(NULL,
189 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
190 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
191 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
193 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
194 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
195 NULL);
196 #endif
197 }
198 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
199 #ifdef INVARIANTS
200 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
201 #else
202 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
203 #endif
204
205 thread0.td_sleepqueue = sleepq_alloc();
206 }
207
208 /*
209 * Get a sleep queue for a new thread.
210 */
211 struct sleepqueue *
212 sleepq_alloc(void)
213 {
214
215 return (uma_zalloc(sleepq_zone, M_WAITOK));
216 }
217
218 /*
219 * Free a sleep queue when a thread is destroyed.
220 */
221 void
222 sleepq_free(struct sleepqueue *sq)
223 {
224
225 uma_zfree(sleepq_zone, sq);
226 }
227
228 /*
229 * Lock the sleep queue chain associated with the specified wait channel.
230 */
231 void
232 sleepq_lock(void *wchan)
233 {
234 struct sleepqueue_chain *sc;
235
236 sc = SC_LOOKUP(wchan);
237 mtx_lock_spin(&sc->sc_lock);
238 }
239
240 /*
241 * Look up the sleep queue associated with a given wait channel in the hash
242 * table locking the associated sleep queue chain. If no queue is found in
243 * the table, NULL is returned.
244 */
245 struct sleepqueue *
246 sleepq_lookup(void *wchan)
247 {
248 struct sleepqueue_chain *sc;
249 struct sleepqueue *sq;
250
251 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
252 sc = SC_LOOKUP(wchan);
253 mtx_assert(&sc->sc_lock, MA_OWNED);
254 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
255 if (sq->sq_wchan == wchan)
256 return (sq);
257 return (NULL);
258 }
259
260 /*
261 * Unlock the sleep queue chain associated with a given wait channel.
262 */
263 void
264 sleepq_release(void *wchan)
265 {
266 struct sleepqueue_chain *sc;
267
268 sc = SC_LOOKUP(wchan);
269 mtx_unlock_spin(&sc->sc_lock);
270 }
271
272 /*
273 * Places the current thread on the sleep queue for the specified wait
274 * channel. If INVARIANTS is enabled, then it associates the passed in
275 * lock with the sleepq to make sure it is held when that sleep queue is
276 * woken up.
277 */
278 void
279 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
280 int queue)
281 {
282 struct sleepqueue_chain *sc;
283 struct sleepqueue *sq;
284 struct thread *td;
285
286 td = curthread;
287 sc = SC_LOOKUP(wchan);
288 mtx_assert(&sc->sc_lock, MA_OWNED);
289 MPASS(td->td_sleepqueue != NULL);
290 MPASS(wchan != NULL);
291 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
292
293 /* If this thread is not allowed to sleep, die a horrible death. */
294 KASSERT(td->td_no_sleeping == 0,
295 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
296 __func__, td, wchan));
297
298 /* Look up the sleep queue associated with the wait channel 'wchan'. */
299 sq = sleepq_lookup(wchan);
300
301 /*
302 * If the wait channel does not already have a sleep queue, use
303 * this thread's sleep queue. Otherwise, insert the current thread
304 * into the sleep queue already in use by this wait channel.
305 */
306 if (sq == NULL) {
307 #ifdef INVARIANTS
308 int i;
309
310 sq = td->td_sleepqueue;
311 for (i = 0; i < NR_SLEEPQS; i++) {
312 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
313 ("thread's sleep queue %d is not empty", i));
314 KASSERT(sq->sq_blockedcnt[i] == 0,
315 ("thread's sleep queue %d count mismatches", i));
316 }
317 KASSERT(LIST_EMPTY(&sq->sq_free),
318 ("thread's sleep queue has a non-empty free list"));
319 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
320 sq->sq_lock = lock;
321 #endif
322 #ifdef SLEEPQUEUE_PROFILING
323 sc->sc_depth++;
324 if (sc->sc_depth > sc->sc_max_depth) {
325 sc->sc_max_depth = sc->sc_depth;
326 if (sc->sc_max_depth > sleepq_max_depth)
327 sleepq_max_depth = sc->sc_max_depth;
328 }
329 #endif
330 sq = td->td_sleepqueue;
331 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
332 sq->sq_wchan = wchan;
333 sq->sq_type = flags & SLEEPQ_TYPE;
334 } else {
335 MPASS(wchan == sq->sq_wchan);
336 MPASS(lock == sq->sq_lock);
337 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
338 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
339 }
340 thread_lock(td);
341 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
342 sq->sq_blockedcnt[queue]++;
343 td->td_sleepqueue = NULL;
344 td->td_sqqueue = queue;
345 td->td_wchan = wchan;
346 td->td_wmesg = wmesg;
347 if (flags & SLEEPQ_INTERRUPTIBLE) {
348 td->td_flags |= TDF_SINTR;
349 td->td_flags &= ~TDF_SLEEPABORT;
350 }
351 thread_unlock(td);
352 }
353
354 /*
355 * Sets a timeout that will remove the current thread from the specified
356 * sleep queue after timo ticks if the thread has not already been awakened.
357 */
358 void
359 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
360 int flags)
361 {
362 struct sleepqueue_chain *sc;
363 struct thread *td;
364
365 td = curthread;
366 sc = SC_LOOKUP(wchan);
367 mtx_assert(&sc->sc_lock, MA_OWNED);
368 MPASS(TD_ON_SLEEPQ(td));
369 MPASS(td->td_sleepqueue == NULL);
370 MPASS(wchan != NULL);
371 callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
372 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
373 }
374
375 /*
376 * Return the number of actual sleepers for the specified queue.
377 */
378 u_int
379 sleepq_sleepcnt(void *wchan, int queue)
380 {
381 struct sleepqueue *sq;
382
383 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
384 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
385 sq = sleepq_lookup(wchan);
386 if (sq == NULL)
387 return (0);
388 return (sq->sq_blockedcnt[queue]);
389 }
390
391 /*
392 * Marks the pending sleep of the current thread as interruptible and
393 * makes an initial check for pending signals before putting a thread
394 * to sleep. Enters and exits with the thread lock held. Thread lock
395 * may have transitioned from the sleepq lock to a run lock.
396 */
397 static int
398 sleepq_catch_signals(void *wchan, int pri)
399 {
400 struct sleepqueue_chain *sc;
401 struct sleepqueue *sq;
402 struct thread *td;
403 struct proc *p;
404 struct sigacts *ps;
405 int sig, ret;
406
407 td = curthread;
408 p = curproc;
409 sc = SC_LOOKUP(wchan);
410 mtx_assert(&sc->sc_lock, MA_OWNED);
411 MPASS(wchan != NULL);
412 if ((td->td_pflags & TDP_WAKEUP) != 0) {
413 td->td_pflags &= ~TDP_WAKEUP;
414 ret = EINTR;
415 thread_lock(td);
416 goto out;
417 }
418
419 /*
420 * See if there are any pending signals for this thread. If not
421 * we can switch immediately. Otherwise do the signal processing
422 * directly.
423 */
424 thread_lock(td);
425 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
426 sleepq_switch(wchan, pri);
427 return (0);
428 }
429 thread_unlock(td);
430 mtx_unlock_spin(&sc->sc_lock);
431 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
432 (void *)td, (long)p->p_pid, td->td_name);
433 PROC_LOCK(p);
434 ps = p->p_sigacts;
435 mtx_lock(&ps->ps_mtx);
436 sig = cursig(td);
437 if (sig == 0) {
438 mtx_unlock(&ps->ps_mtx);
439 ret = thread_suspend_check(1);
440 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
441 } else {
442 if (SIGISMEMBER(ps->ps_sigintr, sig))
443 ret = EINTR;
444 else
445 ret = ERESTART;
446 mtx_unlock(&ps->ps_mtx);
447 }
448 /*
449 * Lock the per-process spinlock prior to dropping the PROC_LOCK
450 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
451 * thread_lock() are currently held in tdsendsignal().
452 */
453 PROC_SLOCK(p);
454 mtx_lock_spin(&sc->sc_lock);
455 PROC_UNLOCK(p);
456 thread_lock(td);
457 PROC_SUNLOCK(p);
458 if (ret == 0) {
459 sleepq_switch(wchan, pri);
460 return (0);
461 }
462 out:
463 /*
464 * There were pending signals and this thread is still
465 * on the sleep queue, remove it from the sleep queue.
466 */
467 if (TD_ON_SLEEPQ(td)) {
468 sq = sleepq_lookup(wchan);
469 if (sleepq_resume_thread(sq, td, 0)) {
470 #ifdef INVARIANTS
471 /*
472 * This thread hasn't gone to sleep yet, so it
473 * should not be swapped out.
474 */
475 panic("not waking up swapper");
476 #endif
477 }
478 }
479 mtx_unlock_spin(&sc->sc_lock);
480 MPASS(td->td_lock != &sc->sc_lock);
481 return (ret);
482 }
483
484 /*
485 * Switches to another thread if we are still asleep on a sleep queue.
486 * Returns with thread lock.
487 */
488 static void
489 sleepq_switch(void *wchan, int pri)
490 {
491 struct sleepqueue_chain *sc;
492 struct sleepqueue *sq;
493 struct thread *td;
494
495 td = curthread;
496 sc = SC_LOOKUP(wchan);
497 mtx_assert(&sc->sc_lock, MA_OWNED);
498 THREAD_LOCK_ASSERT(td, MA_OWNED);
499
500 /*
501 * If we have a sleep queue, then we've already been woken up, so
502 * just return.
503 */
504 if (td->td_sleepqueue != NULL) {
505 mtx_unlock_spin(&sc->sc_lock);
506 return;
507 }
508
509 /*
510 * If TDF_TIMEOUT is set, then our sleep has been timed out
511 * already but we are still on the sleep queue, so dequeue the
512 * thread and return.
513 */
514 if (td->td_flags & TDF_TIMEOUT) {
515 MPASS(TD_ON_SLEEPQ(td));
516 sq = sleepq_lookup(wchan);
517 if (sleepq_resume_thread(sq, td, 0)) {
518 #ifdef INVARIANTS
519 /*
520 * This thread hasn't gone to sleep yet, so it
521 * should not be swapped out.
522 */
523 panic("not waking up swapper");
524 #endif
525 }
526 mtx_unlock_spin(&sc->sc_lock);
527 return;
528 }
529 #ifdef SLEEPQUEUE_PROFILING
530 if (prof_enabled)
531 sleepq_profile(td->td_wmesg);
532 #endif
533 MPASS(td->td_sleepqueue == NULL);
534 sched_sleep(td, pri);
535 thread_lock_set(td, &sc->sc_lock);
536 SDT_PROBE0(sched, , , sleep);
537 TD_SET_SLEEPING(td);
538 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
539 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
540 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
541 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
542 }
543
544 /*
545 * Check to see if we timed out.
546 */
547 static int
548 sleepq_check_timeout(void)
549 {
550 struct thread *td;
551
552 td = curthread;
553 THREAD_LOCK_ASSERT(td, MA_OWNED);
554
555 /*
556 * If TDF_TIMEOUT is set, we timed out.
557 */
558 if (td->td_flags & TDF_TIMEOUT) {
559 td->td_flags &= ~TDF_TIMEOUT;
560 return (EWOULDBLOCK);
561 }
562
563 /*
564 * If TDF_TIMOFAIL is set, the timeout ran after we had
565 * already been woken up.
566 */
567 if (td->td_flags & TDF_TIMOFAIL)
568 td->td_flags &= ~TDF_TIMOFAIL;
569
570 /*
571 * If callout_stop() fails, then the timeout is running on
572 * another CPU, so synchronize with it to avoid having it
573 * accidentally wake up a subsequent sleep.
574 */
575 else if (_callout_stop_safe(&td->td_slpcallout, CS_MIGRBLOCK)
576 == 0) {
577 td->td_flags |= TDF_TIMEOUT;
578 TD_SET_SLEEPING(td);
579 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
580 }
581 return (0);
582 }
583
584 /*
585 * Check to see if we were awoken by a signal.
586 */
587 static int
588 sleepq_check_signals(void)
589 {
590 struct thread *td;
591
592 td = curthread;
593 THREAD_LOCK_ASSERT(td, MA_OWNED);
594
595 /* We are no longer in an interruptible sleep. */
596 if (td->td_flags & TDF_SINTR)
597 td->td_flags &= ~TDF_SINTR;
598
599 if (td->td_flags & TDF_SLEEPABORT) {
600 td->td_flags &= ~TDF_SLEEPABORT;
601 return (td->td_intrval);
602 }
603
604 return (0);
605 }
606
607 /*
608 * Block the current thread until it is awakened from its sleep queue.
609 */
610 void
611 sleepq_wait(void *wchan, int pri)
612 {
613 struct thread *td;
614
615 td = curthread;
616 MPASS(!(td->td_flags & TDF_SINTR));
617 thread_lock(td);
618 sleepq_switch(wchan, pri);
619 thread_unlock(td);
620 }
621
622 /*
623 * Block the current thread until it is awakened from its sleep queue
624 * or it is interrupted by a signal.
625 */
626 int
627 sleepq_wait_sig(void *wchan, int pri)
628 {
629 int rcatch;
630 int rval;
631
632 rcatch = sleepq_catch_signals(wchan, pri);
633 rval = sleepq_check_signals();
634 thread_unlock(curthread);
635 if (rcatch)
636 return (rcatch);
637 return (rval);
638 }
639
640 /*
641 * Block the current thread until it is awakened from its sleep queue
642 * or it times out while waiting.
643 */
644 int
645 sleepq_timedwait(void *wchan, int pri)
646 {
647 struct thread *td;
648 int rval;
649
650 td = curthread;
651 MPASS(!(td->td_flags & TDF_SINTR));
652 thread_lock(td);
653 sleepq_switch(wchan, pri);
654 rval = sleepq_check_timeout();
655 thread_unlock(td);
656
657 return (rval);
658 }
659
660 /*
661 * Block the current thread until it is awakened from its sleep queue,
662 * it is interrupted by a signal, or it times out waiting to be awakened.
663 */
664 int
665 sleepq_timedwait_sig(void *wchan, int pri)
666 {
667 int rcatch, rvalt, rvals;
668
669 rcatch = sleepq_catch_signals(wchan, pri);
670 rvalt = sleepq_check_timeout();
671 rvals = sleepq_check_signals();
672 thread_unlock(curthread);
673 if (rcatch)
674 return (rcatch);
675 if (rvals)
676 return (rvals);
677 return (rvalt);
678 }
679
680 /*
681 * Returns the type of sleepqueue given a waitchannel.
682 */
683 int
684 sleepq_type(void *wchan)
685 {
686 struct sleepqueue *sq;
687 int type;
688
689 MPASS(wchan != NULL);
690
691 sleepq_lock(wchan);
692 sq = sleepq_lookup(wchan);
693 if (sq == NULL) {
694 sleepq_release(wchan);
695 return (-1);
696 }
697 type = sq->sq_type;
698 sleepq_release(wchan);
699 return (type);
700 }
701
702 /*
703 * Removes a thread from a sleep queue and makes it
704 * runnable.
705 */
706 static int
707 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
708 {
709 struct sleepqueue_chain *sc;
710
711 MPASS(td != NULL);
712 MPASS(sq->sq_wchan != NULL);
713 MPASS(td->td_wchan == sq->sq_wchan);
714 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
715 THREAD_LOCK_ASSERT(td, MA_OWNED);
716 sc = SC_LOOKUP(sq->sq_wchan);
717 mtx_assert(&sc->sc_lock, MA_OWNED);
718
719 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
720
721 /* Remove the thread from the queue. */
722 sq->sq_blockedcnt[td->td_sqqueue]--;
723 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
724
725 /*
726 * Get a sleep queue for this thread. If this is the last waiter,
727 * use the queue itself and take it out of the chain, otherwise,
728 * remove a queue from the free list.
729 */
730 if (LIST_EMPTY(&sq->sq_free)) {
731 td->td_sleepqueue = sq;
732 #ifdef INVARIANTS
733 sq->sq_wchan = NULL;
734 #endif
735 #ifdef SLEEPQUEUE_PROFILING
736 sc->sc_depth--;
737 #endif
738 } else
739 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
740 LIST_REMOVE(td->td_sleepqueue, sq_hash);
741
742 td->td_wmesg = NULL;
743 td->td_wchan = NULL;
744 td->td_flags &= ~TDF_SINTR;
745
746 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
747 (void *)td, (long)td->td_proc->p_pid, td->td_name);
748
749 /* Adjust priority if requested. */
750 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
751 if (pri != 0 && td->td_priority > pri &&
752 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
753 sched_prio(td, pri);
754
755 /*
756 * Note that thread td might not be sleeping if it is running
757 * sleepq_catch_signals() on another CPU or is blocked on its
758 * proc lock to check signals. There's no need to mark the
759 * thread runnable in that case.
760 */
761 if (TD_IS_SLEEPING(td)) {
762 TD_CLR_SLEEPING(td);
763 return (setrunnable(td));
764 }
765 return (0);
766 }
767
768 #ifdef INVARIANTS
769 /*
770 * UMA zone item deallocator.
771 */
772 static void
773 sleepq_dtor(void *mem, int size, void *arg)
774 {
775 struct sleepqueue *sq;
776 int i;
777
778 sq = mem;
779 for (i = 0; i < NR_SLEEPQS; i++) {
780 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
781 MPASS(sq->sq_blockedcnt[i] == 0);
782 }
783 }
784 #endif
785
786 /*
787 * UMA zone item initializer.
788 */
789 static int
790 sleepq_init(void *mem, int size, int flags)
791 {
792 struct sleepqueue *sq;
793 int i;
794
795 bzero(mem, size);
796 sq = mem;
797 for (i = 0; i < NR_SLEEPQS; i++) {
798 TAILQ_INIT(&sq->sq_blocked[i]);
799 sq->sq_blockedcnt[i] = 0;
800 }
801 LIST_INIT(&sq->sq_free);
802 return (0);
803 }
804
805 /*
806 * Find the highest priority thread sleeping on a wait channel and resume it.
807 */
808 int
809 sleepq_signal(void *wchan, int flags, int pri, int queue)
810 {
811 struct sleepqueue *sq;
812 struct thread *td, *besttd;
813 int wakeup_swapper;
814
815 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
816 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
817 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
818 sq = sleepq_lookup(wchan);
819 if (sq == NULL)
820 return (0);
821 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
822 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
823
824 /*
825 * Find the highest priority thread on the queue. If there is a
826 * tie, use the thread that first appears in the queue as it has
827 * been sleeping the longest since threads are always added to
828 * the tail of sleep queues.
829 */
830 besttd = NULL;
831 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
832 if (besttd == NULL || td->td_priority < besttd->td_priority)
833 besttd = td;
834 }
835 MPASS(besttd != NULL);
836 thread_lock(besttd);
837 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
838 thread_unlock(besttd);
839 return (wakeup_swapper);
840 }
841
842 /*
843 * Resume all threads sleeping on a specified wait channel.
844 */
845 int
846 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
847 {
848 struct sleepqueue *sq;
849 struct thread *td, *tdn;
850 int wakeup_swapper;
851
852 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
853 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
854 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
855 sq = sleepq_lookup(wchan);
856 if (sq == NULL)
857 return (0);
858 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
859 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
860
861 /* Resume all blocked threads on the sleep queue. */
862 wakeup_swapper = 0;
863 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
864 thread_lock(td);
865 if (sleepq_resume_thread(sq, td, pri))
866 wakeup_swapper = 1;
867 thread_unlock(td);
868 }
869 return (wakeup_swapper);
870 }
871
872 /*
873 * Time sleeping threads out. When the timeout expires, the thread is
874 * removed from the sleep queue and made runnable if it is still asleep.
875 */
876 static void
877 sleepq_timeout(void *arg)
878 {
879 struct sleepqueue_chain *sc;
880 struct sleepqueue *sq;
881 struct thread *td;
882 void *wchan;
883 int wakeup_swapper;
884
885 td = arg;
886 wakeup_swapper = 0;
887 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
888 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
889
890 /*
891 * First, see if the thread is asleep and get the wait channel if
892 * it is.
893 */
894 thread_lock(td);
895 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
896 wchan = td->td_wchan;
897 sc = SC_LOOKUP(wchan);
898 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
899 sq = sleepq_lookup(wchan);
900 MPASS(sq != NULL);
901 td->td_flags |= TDF_TIMEOUT;
902 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
903 thread_unlock(td);
904 if (wakeup_swapper)
905 kick_proc0();
906 return;
907 }
908
909 /*
910 * If the thread is on the SLEEPQ but isn't sleeping yet, it
911 * can either be on another CPU in between sleepq_add() and
912 * one of the sleepq_*wait*() routines or it can be in
913 * sleepq_catch_signals().
914 */
915 if (TD_ON_SLEEPQ(td)) {
916 td->td_flags |= TDF_TIMEOUT;
917 thread_unlock(td);
918 return;
919 }
920
921 /*
922 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
923 * then the other thread has already yielded to us, so clear
924 * the flag and resume it. If TDF_TIMEOUT is not set, then the
925 * we know that the other thread is not on a sleep queue, but it
926 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
927 * to let it know that the timeout has already run and doesn't
928 * need to be canceled.
929 */
930 if (td->td_flags & TDF_TIMEOUT) {
931 MPASS(TD_IS_SLEEPING(td));
932 td->td_flags &= ~TDF_TIMEOUT;
933 TD_CLR_SLEEPING(td);
934 wakeup_swapper = setrunnable(td);
935 } else
936 td->td_flags |= TDF_TIMOFAIL;
937 thread_unlock(td);
938 if (wakeup_swapper)
939 kick_proc0();
940 }
941
942 /*
943 * Resumes a specific thread from the sleep queue associated with a specific
944 * wait channel if it is on that queue.
945 */
946 void
947 sleepq_remove(struct thread *td, void *wchan)
948 {
949 struct sleepqueue *sq;
950 int wakeup_swapper;
951
952 /*
953 * Look up the sleep queue for this wait channel, then re-check
954 * that the thread is asleep on that channel, if it is not, then
955 * bail.
956 */
957 MPASS(wchan != NULL);
958 sleepq_lock(wchan);
959 sq = sleepq_lookup(wchan);
960 /*
961 * We can not lock the thread here as it may be sleeping on a
962 * different sleepq. However, holding the sleepq lock for this
963 * wchan can guarantee that we do not miss a wakeup for this
964 * channel. The asserts below will catch any false positives.
965 */
966 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
967 sleepq_release(wchan);
968 return;
969 }
970 /* Thread is asleep on sleep queue sq, so wake it up. */
971 thread_lock(td);
972 MPASS(sq != NULL);
973 MPASS(td->td_wchan == wchan);
974 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
975 thread_unlock(td);
976 sleepq_release(wchan);
977 if (wakeup_swapper)
978 kick_proc0();
979 }
980
981 /*
982 * Abort a thread as if an interrupt had occurred. Only abort
983 * interruptible waits (unfortunately it isn't safe to abort others).
984 */
985 int
986 sleepq_abort(struct thread *td, int intrval)
987 {
988 struct sleepqueue *sq;
989 void *wchan;
990
991 THREAD_LOCK_ASSERT(td, MA_OWNED);
992 MPASS(TD_ON_SLEEPQ(td));
993 MPASS(td->td_flags & TDF_SINTR);
994 MPASS(intrval == EINTR || intrval == ERESTART);
995
996 /*
997 * If the TDF_TIMEOUT flag is set, just leave. A
998 * timeout is scheduled anyhow.
999 */
1000 if (td->td_flags & TDF_TIMEOUT)
1001 return (0);
1002
1003 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1004 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1005 td->td_intrval = intrval;
1006 td->td_flags |= TDF_SLEEPABORT;
1007 /*
1008 * If the thread has not slept yet it will find the signal in
1009 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1010 * we have to do it here.
1011 */
1012 if (!TD_IS_SLEEPING(td))
1013 return (0);
1014 wchan = td->td_wchan;
1015 MPASS(wchan != NULL);
1016 sq = sleepq_lookup(wchan);
1017 MPASS(sq != NULL);
1018
1019 /* Thread is asleep on sleep queue sq, so wake it up. */
1020 return (sleepq_resume_thread(sq, td, 0));
1021 }
1022
1023 #ifdef SLEEPQUEUE_PROFILING
1024 #define SLEEPQ_PROF_LOCATIONS 1024
1025 #define SLEEPQ_SBUFSIZE 512
1026 struct sleepq_prof {
1027 LIST_ENTRY(sleepq_prof) sp_link;
1028 const char *sp_wmesg;
1029 long sp_count;
1030 };
1031
1032 LIST_HEAD(sqphead, sleepq_prof);
1033
1034 struct sqphead sleepq_prof_free;
1035 struct sqphead sleepq_hash[SC_TABLESIZE];
1036 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1037 static struct mtx sleepq_prof_lock;
1038 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1039
1040 static void
1041 sleepq_profile(const char *wmesg)
1042 {
1043 struct sleepq_prof *sp;
1044
1045 mtx_lock_spin(&sleepq_prof_lock);
1046 if (prof_enabled == 0)
1047 goto unlock;
1048 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1049 if (sp->sp_wmesg == wmesg)
1050 goto done;
1051 sp = LIST_FIRST(&sleepq_prof_free);
1052 if (sp == NULL)
1053 goto unlock;
1054 sp->sp_wmesg = wmesg;
1055 LIST_REMOVE(sp, sp_link);
1056 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1057 done:
1058 sp->sp_count++;
1059 unlock:
1060 mtx_unlock_spin(&sleepq_prof_lock);
1061 return;
1062 }
1063
1064 static void
1065 sleepq_prof_reset(void)
1066 {
1067 struct sleepq_prof *sp;
1068 int enabled;
1069 int i;
1070
1071 mtx_lock_spin(&sleepq_prof_lock);
1072 enabled = prof_enabled;
1073 prof_enabled = 0;
1074 for (i = 0; i < SC_TABLESIZE; i++)
1075 LIST_INIT(&sleepq_hash[i]);
1076 LIST_INIT(&sleepq_prof_free);
1077 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1078 sp = &sleepq_profent[i];
1079 sp->sp_wmesg = NULL;
1080 sp->sp_count = 0;
1081 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1082 }
1083 prof_enabled = enabled;
1084 mtx_unlock_spin(&sleepq_prof_lock);
1085 }
1086
1087 static int
1088 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1089 {
1090 int error, v;
1091
1092 v = prof_enabled;
1093 error = sysctl_handle_int(oidp, &v, v, req);
1094 if (error)
1095 return (error);
1096 if (req->newptr == NULL)
1097 return (error);
1098 if (v == prof_enabled)
1099 return (0);
1100 if (v == 1)
1101 sleepq_prof_reset();
1102 mtx_lock_spin(&sleepq_prof_lock);
1103 prof_enabled = !!v;
1104 mtx_unlock_spin(&sleepq_prof_lock);
1105
1106 return (0);
1107 }
1108
1109 static int
1110 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1111 {
1112 int error, v;
1113
1114 v = 0;
1115 error = sysctl_handle_int(oidp, &v, 0, req);
1116 if (error)
1117 return (error);
1118 if (req->newptr == NULL)
1119 return (error);
1120 if (v == 0)
1121 return (0);
1122 sleepq_prof_reset();
1123
1124 return (0);
1125 }
1126
1127 static int
1128 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1129 {
1130 struct sleepq_prof *sp;
1131 struct sbuf *sb;
1132 int enabled;
1133 int error;
1134 int i;
1135
1136 error = sysctl_wire_old_buffer(req, 0);
1137 if (error != 0)
1138 return (error);
1139 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1140 sbuf_printf(sb, "\nwmesg\tcount\n");
1141 enabled = prof_enabled;
1142 mtx_lock_spin(&sleepq_prof_lock);
1143 prof_enabled = 0;
1144 mtx_unlock_spin(&sleepq_prof_lock);
1145 for (i = 0; i < SC_TABLESIZE; i++) {
1146 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1147 sbuf_printf(sb, "%s\t%ld\n",
1148 sp->sp_wmesg, sp->sp_count);
1149 }
1150 }
1151 mtx_lock_spin(&sleepq_prof_lock);
1152 prof_enabled = enabled;
1153 mtx_unlock_spin(&sleepq_prof_lock);
1154
1155 error = sbuf_finish(sb);
1156 sbuf_delete(sb);
1157 return (error);
1158 }
1159
1160 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1161 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1162 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1163 NULL, 0, reset_sleepq_prof_stats, "I",
1164 "Reset sleepqueue profiling statistics");
1165 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1166 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1167 #endif
1168
1169 #ifdef DDB
1170 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1171 {
1172 struct sleepqueue_chain *sc;
1173 struct sleepqueue *sq;
1174 #ifdef INVARIANTS
1175 struct lock_object *lock;
1176 #endif
1177 struct thread *td;
1178 void *wchan;
1179 int i;
1180
1181 if (!have_addr)
1182 return;
1183
1184 /*
1185 * First, see if there is an active sleep queue for the wait channel
1186 * indicated by the address.
1187 */
1188 wchan = (void *)addr;
1189 sc = SC_LOOKUP(wchan);
1190 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1191 if (sq->sq_wchan == wchan)
1192 goto found;
1193
1194 /*
1195 * Second, see if there is an active sleep queue at the address
1196 * indicated.
1197 */
1198 for (i = 0; i < SC_TABLESIZE; i++)
1199 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1200 if (sq == (struct sleepqueue *)addr)
1201 goto found;
1202 }
1203
1204 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1205 return;
1206 found:
1207 db_printf("Wait channel: %p\n", sq->sq_wchan);
1208 db_printf("Queue type: %d\n", sq->sq_type);
1209 #ifdef INVARIANTS
1210 if (sq->sq_lock) {
1211 lock = sq->sq_lock;
1212 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1213 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1214 }
1215 #endif
1216 db_printf("Blocked threads:\n");
1217 for (i = 0; i < NR_SLEEPQS; i++) {
1218 db_printf("\nQueue[%d]:\n", i);
1219 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1220 db_printf("\tempty\n");
1221 else
1222 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1223 td_slpq) {
1224 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1225 td->td_tid, td->td_proc->p_pid,
1226 td->td_name);
1227 }
1228 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1229 }
1230 }
1231
1232 /* Alias 'show sleepqueue' to 'show sleepq'. */
1233 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1234 #endif
Cache object: 2ddb40822fd41b978454bf80bcd5bc0d
|