1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * Implementation of sleep queues used to hold queue of threads blocked on
30 * a wait channel. Sleep queues are different from turnstiles in that wait
31 * channels are not owned by anyone, so there is no priority propagation.
32 * Sleep queues can also provide a timeout and can also be interrupted by
33 * signals. That said, there are several similarities between the turnstile
34 * and sleep queue implementations. (Note: turnstiles were implemented
35 * first.) For example, both use a hash table of the same size where each
36 * bucket is referred to as a "chain" that contains both a spin lock and
37 * a linked list of queues. An individual queue is located by using a hash
38 * to pick a chain, locking the chain, and then walking the chain searching
39 * for the queue. This means that a wait channel object does not need to
40 * embed its queue head just as locks do not embed their turnstile queue
41 * head. Threads also carry around a sleep queue that they lend to the
42 * wait channel when blocking. Just as in turnstiles, the queue includes
43 * a free list of the sleep queues of other threads blocked on the same
44 * wait channel in the case of multiple waiters.
45 *
46 * Some additional functionality provided by sleep queues include the
47 * ability to set a timeout. The timeout is managed using a per-thread
48 * callout that resumes a thread if it is asleep. A thread may also
49 * catch signals while it is asleep (aka an interruptible sleep). The
50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
51 * sleep queues also provide some extra assertions. One is not allowed to
52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
53 * must consistently use the same lock to synchronize with a wait channel,
54 * though this check is currently only a warning for sleep/wakeup due to
55 * pre-existing abuse of that API. The same lock must also be held when
56 * awakening threads, though that is currently only enforced for condition
57 * variables.
58 */
59
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
62
63 #include "opt_sleepqueue_profiling.h"
64 #include "opt_ddb.h"
65 #include "opt_sched.h"
66 #include "opt_stack.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/lock.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sbuf.h>
76 #include <sys/sched.h>
77 #include <sys/sdt.h>
78 #include <sys/signalvar.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/stack.h>
81 #include <sys/sysctl.h>
82 #include <sys/time.h>
83
84 #include <machine/atomic.h>
85
86 #include <vm/uma.h>
87
88 #ifdef DDB
89 #include <ddb/ddb.h>
90 #endif
91
92
93 /*
94 * Constants for the hash table of sleep queue chains.
95 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
96 */
97 #ifndef SC_TABLESIZE
98 #define SC_TABLESIZE 256
99 #endif
100 CTASSERT(powerof2(SC_TABLESIZE));
101 #define SC_MASK (SC_TABLESIZE - 1)
102 #define SC_SHIFT 8
103 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
104 SC_MASK)
105 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
106 #define NR_SLEEPQS 2
107 /*
108 * There are two different lists of sleep queues. Both lists are connected
109 * via the sq_hash entries. The first list is the sleep queue chain list
110 * that a sleep queue is on when it is attached to a wait channel. The
111 * second list is the free list hung off of a sleep queue that is attached
112 * to a wait channel.
113 *
114 * Each sleep queue also contains the wait channel it is attached to, the
115 * list of threads blocked on that wait channel, flags specific to the
116 * wait channel, and the lock used to synchronize with a wait channel.
117 * The flags are used to catch mismatches between the various consumers
118 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
119 * The lock pointer is only used when invariants are enabled for various
120 * debugging checks.
121 *
122 * Locking key:
123 * c - sleep queue chain lock
124 */
125 struct sleepqueue {
126 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
127 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
128 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
129 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
130 void *sq_wchan; /* (c) Wait channel. */
131 int sq_type; /* (c) Queue type. */
132 #ifdef INVARIANTS
133 struct lock_object *sq_lock; /* (c) Associated lock. */
134 #endif
135 };
136
137 struct sleepqueue_chain {
138 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
139 struct mtx sc_lock; /* Spin lock for this chain. */
140 #ifdef SLEEPQUEUE_PROFILING
141 u_int sc_depth; /* Length of sc_queues. */
142 u_int sc_max_depth; /* Max length of sc_queues. */
143 #endif
144 } __aligned(CACHE_LINE_SIZE);
145
146 #ifdef SLEEPQUEUE_PROFILING
147 u_int sleepq_max_depth;
148 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
149 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
150 "sleepq chain stats");
151 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
152 0, "maxmimum depth achieved of a single chain");
153
154 static void sleepq_profile(const char *wmesg);
155 static int prof_enabled;
156 #endif
157 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
158 static uma_zone_t sleepq_zone;
159
160 /*
161 * Prototypes for non-exported routines.
162 */
163 static int sleepq_catch_signals(void *wchan, int pri);
164 static int sleepq_check_signals(void);
165 static int sleepq_check_timeout(void);
166 #ifdef INVARIANTS
167 static void sleepq_dtor(void *mem, int size, void *arg);
168 #endif
169 static int sleepq_init(void *mem, int size, int flags);
170 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
171 int pri);
172 static void sleepq_switch(void *wchan, int pri);
173 static void sleepq_timeout(void *arg);
174
175 SDT_PROBE_DECLARE(sched, , , sleep);
176 SDT_PROBE_DECLARE(sched, , , wakeup);
177
178 /*
179 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
180 * Note that it must happen after sleepinit() has been fully executed, so
181 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
182 */
183 #ifdef SLEEPQUEUE_PROFILING
184 static void
185 init_sleepqueue_profiling(void)
186 {
187 char chain_name[10];
188 struct sysctl_oid *chain_oid;
189 u_int i;
190
191 for (i = 0; i < SC_TABLESIZE; i++) {
192 snprintf(chain_name, sizeof(chain_name), "%u", i);
193 chain_oid = SYSCTL_ADD_NODE(NULL,
194 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
195 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
196 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
197 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
198 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
199 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
200 NULL);
201 }
202 }
203
204 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
205 init_sleepqueue_profiling, NULL);
206 #endif
207
208 /*
209 * Early initialization of sleep queues that is called from the sleepinit()
210 * SYSINIT.
211 */
212 void
213 init_sleepqueues(void)
214 {
215 int i;
216
217 for (i = 0; i < SC_TABLESIZE; i++) {
218 LIST_INIT(&sleepq_chains[i].sc_queues);
219 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
220 MTX_SPIN | MTX_RECURSE);
221 }
222 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
223 #ifdef INVARIANTS
224 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
225 #else
226 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
227 #endif
228
229 thread0.td_sleepqueue = sleepq_alloc();
230 }
231
232 /*
233 * Get a sleep queue for a new thread.
234 */
235 struct sleepqueue *
236 sleepq_alloc(void)
237 {
238
239 return (uma_zalloc(sleepq_zone, M_WAITOK));
240 }
241
242 /*
243 * Free a sleep queue when a thread is destroyed.
244 */
245 void
246 sleepq_free(struct sleepqueue *sq)
247 {
248
249 uma_zfree(sleepq_zone, sq);
250 }
251
252 /*
253 * Lock the sleep queue chain associated with the specified wait channel.
254 */
255 void
256 sleepq_lock(void *wchan)
257 {
258 struct sleepqueue_chain *sc;
259
260 sc = SC_LOOKUP(wchan);
261 mtx_lock_spin(&sc->sc_lock);
262 }
263
264 /*
265 * Look up the sleep queue associated with a given wait channel in the hash
266 * table locking the associated sleep queue chain. If no queue is found in
267 * the table, NULL is returned.
268 */
269 struct sleepqueue *
270 sleepq_lookup(void *wchan)
271 {
272 struct sleepqueue_chain *sc;
273 struct sleepqueue *sq;
274
275 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
276 sc = SC_LOOKUP(wchan);
277 mtx_assert(&sc->sc_lock, MA_OWNED);
278 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
279 if (sq->sq_wchan == wchan)
280 return (sq);
281 return (NULL);
282 }
283
284 /*
285 * Unlock the sleep queue chain associated with a given wait channel.
286 */
287 void
288 sleepq_release(void *wchan)
289 {
290 struct sleepqueue_chain *sc;
291
292 sc = SC_LOOKUP(wchan);
293 mtx_unlock_spin(&sc->sc_lock);
294 }
295
296 /*
297 * Places the current thread on the sleep queue for the specified wait
298 * channel. If INVARIANTS is enabled, then it associates the passed in
299 * lock with the sleepq to make sure it is held when that sleep queue is
300 * woken up.
301 */
302 void
303 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
304 int queue)
305 {
306 struct sleepqueue_chain *sc;
307 struct sleepqueue *sq;
308 struct thread *td;
309
310 td = curthread;
311 sc = SC_LOOKUP(wchan);
312 mtx_assert(&sc->sc_lock, MA_OWNED);
313 MPASS(td->td_sleepqueue != NULL);
314 MPASS(wchan != NULL);
315 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
316
317 /* If this thread is not allowed to sleep, die a horrible death. */
318 KASSERT(td->td_no_sleeping == 0,
319 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
320 __func__, td, wchan));
321
322 /* Look up the sleep queue associated with the wait channel 'wchan'. */
323 sq = sleepq_lookup(wchan);
324
325 /*
326 * If the wait channel does not already have a sleep queue, use
327 * this thread's sleep queue. Otherwise, insert the current thread
328 * into the sleep queue already in use by this wait channel.
329 */
330 if (sq == NULL) {
331 #ifdef INVARIANTS
332 int i;
333
334 sq = td->td_sleepqueue;
335 for (i = 0; i < NR_SLEEPQS; i++) {
336 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
337 ("thread's sleep queue %d is not empty", i));
338 KASSERT(sq->sq_blockedcnt[i] == 0,
339 ("thread's sleep queue %d count mismatches", i));
340 }
341 KASSERT(LIST_EMPTY(&sq->sq_free),
342 ("thread's sleep queue has a non-empty free list"));
343 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
344 sq->sq_lock = lock;
345 #endif
346 #ifdef SLEEPQUEUE_PROFILING
347 sc->sc_depth++;
348 if (sc->sc_depth > sc->sc_max_depth) {
349 sc->sc_max_depth = sc->sc_depth;
350 if (sc->sc_max_depth > sleepq_max_depth)
351 sleepq_max_depth = sc->sc_max_depth;
352 }
353 #endif
354 sq = td->td_sleepqueue;
355 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
356 sq->sq_wchan = wchan;
357 sq->sq_type = flags & SLEEPQ_TYPE;
358 } else {
359 MPASS(wchan == sq->sq_wchan);
360 MPASS(lock == sq->sq_lock);
361 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
362 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
363 }
364 thread_lock(td);
365 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
366 sq->sq_blockedcnt[queue]++;
367 td->td_sleepqueue = NULL;
368 td->td_sqqueue = queue;
369 td->td_wchan = wchan;
370 td->td_wmesg = wmesg;
371 if (flags & SLEEPQ_INTERRUPTIBLE) {
372 td->td_flags |= TDF_SINTR;
373 td->td_flags &= ~TDF_SLEEPABORT;
374 }
375 thread_unlock(td);
376 }
377
378 /*
379 * Sets a timeout that will remove the current thread from the specified
380 * sleep queue after timo ticks if the thread has not already been awakened.
381 */
382 void
383 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
384 int flags)
385 {
386 struct sleepqueue_chain *sc __unused;
387 struct thread *td;
388 sbintime_t pr1;
389
390 td = curthread;
391 sc = SC_LOOKUP(wchan);
392 mtx_assert(&sc->sc_lock, MA_OWNED);
393 MPASS(TD_ON_SLEEPQ(td));
394 MPASS(td->td_sleepqueue == NULL);
395 MPASS(wchan != NULL);
396 if (cold && td == &thread0)
397 panic("timed sleep before timers are working");
398 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
399 td->td_tid, td, (uintmax_t)td->td_sleeptimo));
400 thread_lock(td);
401 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
402 thread_unlock(td);
403 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
404 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
405 C_DIRECT_EXEC);
406 }
407
408 /*
409 * Return the number of actual sleepers for the specified queue.
410 */
411 u_int
412 sleepq_sleepcnt(void *wchan, int queue)
413 {
414 struct sleepqueue *sq;
415
416 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
417 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
418 sq = sleepq_lookup(wchan);
419 if (sq == NULL)
420 return (0);
421 return (sq->sq_blockedcnt[queue]);
422 }
423
424 /*
425 * Marks the pending sleep of the current thread as interruptible and
426 * makes an initial check for pending signals before putting a thread
427 * to sleep. Enters and exits with the thread lock held. Thread lock
428 * may have transitioned from the sleepq lock to a run lock.
429 */
430 static int
431 sleepq_catch_signals(void *wchan, int pri)
432 {
433 struct sleepqueue_chain *sc;
434 struct sleepqueue *sq;
435 struct thread *td;
436 struct proc *p;
437 struct sigacts *ps;
438 int sig, ret;
439
440 ret = 0;
441 td = curthread;
442 p = curproc;
443 sc = SC_LOOKUP(wchan);
444 mtx_assert(&sc->sc_lock, MA_OWNED);
445 MPASS(wchan != NULL);
446 if ((td->td_pflags & TDP_WAKEUP) != 0) {
447 td->td_pflags &= ~TDP_WAKEUP;
448 ret = EINTR;
449 thread_lock(td);
450 goto out;
451 }
452
453 /*
454 * See if there are any pending signals or suspension requests for this
455 * thread. If not, we can switch immediately.
456 */
457 thread_lock(td);
458 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
459 thread_unlock(td);
460 mtx_unlock_spin(&sc->sc_lock);
461 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
462 (void *)td, (long)p->p_pid, td->td_name);
463 PROC_LOCK(p);
464 /*
465 * Check for suspension first. Checking for signals and then
466 * suspending could result in a missed signal, since a signal
467 * can be delivered while this thread is suspended.
468 */
469 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
470 ret = thread_suspend_check(1);
471 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
472 if (ret != 0) {
473 PROC_UNLOCK(p);
474 mtx_lock_spin(&sc->sc_lock);
475 thread_lock(td);
476 goto out;
477 }
478 }
479 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
480 ps = p->p_sigacts;
481 mtx_lock(&ps->ps_mtx);
482 sig = cursig(td);
483 if (sig == -1) {
484 mtx_unlock(&ps->ps_mtx);
485 KASSERT((td->td_flags & TDF_SBDRY) != 0,
486 ("lost TDF_SBDRY"));
487 KASSERT(TD_SBDRY_INTR(td),
488 ("lost TDF_SERESTART of TDF_SEINTR"));
489 KASSERT((td->td_flags &
490 (TDF_SEINTR | TDF_SERESTART)) !=
491 (TDF_SEINTR | TDF_SERESTART),
492 ("both TDF_SEINTR and TDF_SERESTART"));
493 ret = TD_SBDRY_ERRNO(td);
494 } else if (sig != 0) {
495 ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
496 EINTR : ERESTART;
497 mtx_unlock(&ps->ps_mtx);
498 } else {
499 mtx_unlock(&ps->ps_mtx);
500 }
501
502 /*
503 * Do not go into sleep if this thread was the
504 * ptrace(2) attach leader. cursig() consumed
505 * SIGSTOP from PT_ATTACH, but we usually act
506 * on the signal by interrupting sleep, and
507 * should do that here as well.
508 */
509 if ((td->td_dbgflags & TDB_FSTP) != 0) {
510 if (ret == 0)
511 ret = EINTR;
512 td->td_dbgflags &= ~TDB_FSTP;
513 }
514 }
515 /*
516 * Lock the per-process spinlock prior to dropping the PROC_LOCK
517 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
518 * thread_lock() are currently held in tdsendsignal().
519 */
520 PROC_SLOCK(p);
521 mtx_lock_spin(&sc->sc_lock);
522 PROC_UNLOCK(p);
523 thread_lock(td);
524 PROC_SUNLOCK(p);
525 }
526 if (ret == 0) {
527 sleepq_switch(wchan, pri);
528 return (0);
529 }
530 out:
531 /*
532 * There were pending signals and this thread is still
533 * on the sleep queue, remove it from the sleep queue.
534 */
535 if (TD_ON_SLEEPQ(td)) {
536 sq = sleepq_lookup(wchan);
537 if (sleepq_resume_thread(sq, td, 0)) {
538 #ifdef INVARIANTS
539 /*
540 * This thread hasn't gone to sleep yet, so it
541 * should not be swapped out.
542 */
543 panic("not waking up swapper");
544 #endif
545 }
546 }
547 mtx_unlock_spin(&sc->sc_lock);
548 MPASS(td->td_lock != &sc->sc_lock);
549 return (ret);
550 }
551
552 /*
553 * Switches to another thread if we are still asleep on a sleep queue.
554 * Returns with thread lock.
555 */
556 static void
557 sleepq_switch(void *wchan, int pri)
558 {
559 struct sleepqueue_chain *sc;
560 struct sleepqueue *sq;
561 struct thread *td;
562 bool rtc_changed;
563
564 td = curthread;
565 sc = SC_LOOKUP(wchan);
566 mtx_assert(&sc->sc_lock, MA_OWNED);
567 THREAD_LOCK_ASSERT(td, MA_OWNED);
568
569 /*
570 * If we have a sleep queue, then we've already been woken up, so
571 * just return.
572 */
573 if (td->td_sleepqueue != NULL) {
574 mtx_unlock_spin(&sc->sc_lock);
575 return;
576 }
577
578 /*
579 * If TDF_TIMEOUT is set, then our sleep has been timed out
580 * already but we are still on the sleep queue, so dequeue the
581 * thread and return.
582 *
583 * Do the same if the real-time clock has been adjusted since this
584 * thread calculated its timeout based on that clock. This handles
585 * the following race:
586 * - The Ts thread needs to sleep until an absolute real-clock time.
587 * It copies the global rtc_generation into curthread->td_rtcgen,
588 * reads the RTC, and calculates a sleep duration based on that time.
589 * See umtxq_sleep() for an example.
590 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
591 * threads that are sleeping until an absolute real-clock time.
592 * See tc_setclock() and the POSIX specification of clock_settime().
593 * - Ts reaches the code below. It holds the sleepqueue chain lock,
594 * so Tc has finished waking, so this thread must test td_rtcgen.
595 * (The declaration of td_rtcgen refers to this comment.)
596 */
597 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
598 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
599 if (rtc_changed) {
600 td->td_rtcgen = 0;
601 }
602 MPASS(TD_ON_SLEEPQ(td));
603 sq = sleepq_lookup(wchan);
604 if (sleepq_resume_thread(sq, td, 0)) {
605 #ifdef INVARIANTS
606 /*
607 * This thread hasn't gone to sleep yet, so it
608 * should not be swapped out.
609 */
610 panic("not waking up swapper");
611 #endif
612 }
613 mtx_unlock_spin(&sc->sc_lock);
614 return;
615 }
616 #ifdef SLEEPQUEUE_PROFILING
617 if (prof_enabled)
618 sleepq_profile(td->td_wmesg);
619 #endif
620 MPASS(td->td_sleepqueue == NULL);
621 sched_sleep(td, pri);
622 thread_lock_set(td, &sc->sc_lock);
623 SDT_PROBE0(sched, , , sleep);
624 TD_SET_SLEEPING(td);
625 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
626 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
627 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
628 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
629 }
630
631 /*
632 * Check to see if we timed out.
633 */
634 static int
635 sleepq_check_timeout(void)
636 {
637 struct thread *td;
638 int res;
639
640 td = curthread;
641 THREAD_LOCK_ASSERT(td, MA_OWNED);
642
643 /*
644 * If TDF_TIMEOUT is set, we timed out. But recheck
645 * td_sleeptimo anyway.
646 */
647 res = 0;
648 if (td->td_sleeptimo != 0) {
649 if (td->td_sleeptimo <= sbinuptime())
650 res = EWOULDBLOCK;
651 td->td_sleeptimo = 0;
652 }
653 if (td->td_flags & TDF_TIMEOUT)
654 td->td_flags &= ~TDF_TIMEOUT;
655 else
656 /*
657 * We ignore the situation where timeout subsystem was
658 * unable to stop our callout. The struct thread is
659 * type-stable, the callout will use the correct
660 * memory when running. The checks of the
661 * td_sleeptimo value in this function and in
662 * sleepq_timeout() ensure that the thread does not
663 * get spurious wakeups, even if the callout was reset
664 * or thread reused.
665 */
666 callout_stop(&td->td_slpcallout);
667 return (res);
668 }
669
670 /*
671 * Check to see if we were awoken by a signal.
672 */
673 static int
674 sleepq_check_signals(void)
675 {
676 struct thread *td;
677
678 td = curthread;
679 THREAD_LOCK_ASSERT(td, MA_OWNED);
680
681 /* We are no longer in an interruptible sleep. */
682 if (td->td_flags & TDF_SINTR)
683 td->td_flags &= ~TDF_SINTR;
684
685 if (td->td_flags & TDF_SLEEPABORT) {
686 td->td_flags &= ~TDF_SLEEPABORT;
687 return (td->td_intrval);
688 }
689
690 return (0);
691 }
692
693 /*
694 * Block the current thread until it is awakened from its sleep queue.
695 */
696 void
697 sleepq_wait(void *wchan, int pri)
698 {
699 struct thread *td;
700
701 td = curthread;
702 MPASS(!(td->td_flags & TDF_SINTR));
703 thread_lock(td);
704 sleepq_switch(wchan, pri);
705 thread_unlock(td);
706 }
707
708 /*
709 * Block the current thread until it is awakened from its sleep queue
710 * or it is interrupted by a signal.
711 */
712 int
713 sleepq_wait_sig(void *wchan, int pri)
714 {
715 int rcatch;
716 int rval;
717
718 rcatch = sleepq_catch_signals(wchan, pri);
719 rval = sleepq_check_signals();
720 thread_unlock(curthread);
721 if (rcatch)
722 return (rcatch);
723 return (rval);
724 }
725
726 /*
727 * Block the current thread until it is awakened from its sleep queue
728 * or it times out while waiting.
729 */
730 int
731 sleepq_timedwait(void *wchan, int pri)
732 {
733 struct thread *td;
734 int rval;
735
736 td = curthread;
737 MPASS(!(td->td_flags & TDF_SINTR));
738 thread_lock(td);
739 sleepq_switch(wchan, pri);
740 rval = sleepq_check_timeout();
741 thread_unlock(td);
742
743 return (rval);
744 }
745
746 /*
747 * Block the current thread until it is awakened from its sleep queue,
748 * it is interrupted by a signal, or it times out waiting to be awakened.
749 */
750 int
751 sleepq_timedwait_sig(void *wchan, int pri)
752 {
753 int rcatch, rvalt, rvals;
754
755 rcatch = sleepq_catch_signals(wchan, pri);
756 rvalt = sleepq_check_timeout();
757 rvals = sleepq_check_signals();
758 thread_unlock(curthread);
759 if (rcatch)
760 return (rcatch);
761 if (rvals)
762 return (rvals);
763 return (rvalt);
764 }
765
766 /*
767 * Returns the type of sleepqueue given a waitchannel.
768 */
769 int
770 sleepq_type(void *wchan)
771 {
772 struct sleepqueue *sq;
773 int type;
774
775 MPASS(wchan != NULL);
776
777 sleepq_lock(wchan);
778 sq = sleepq_lookup(wchan);
779 if (sq == NULL) {
780 sleepq_release(wchan);
781 return (-1);
782 }
783 type = sq->sq_type;
784 sleepq_release(wchan);
785 return (type);
786 }
787
788 /*
789 * Removes a thread from a sleep queue and makes it
790 * runnable.
791 */
792 static int
793 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
794 {
795 struct sleepqueue_chain *sc __unused;
796
797 MPASS(td != NULL);
798 MPASS(sq->sq_wchan != NULL);
799 MPASS(td->td_wchan == sq->sq_wchan);
800 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
801 THREAD_LOCK_ASSERT(td, MA_OWNED);
802 sc = SC_LOOKUP(sq->sq_wchan);
803 mtx_assert(&sc->sc_lock, MA_OWNED);
804
805 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
806
807 /* Remove the thread from the queue. */
808 sq->sq_blockedcnt[td->td_sqqueue]--;
809 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
810
811 /*
812 * Get a sleep queue for this thread. If this is the last waiter,
813 * use the queue itself and take it out of the chain, otherwise,
814 * remove a queue from the free list.
815 */
816 if (LIST_EMPTY(&sq->sq_free)) {
817 td->td_sleepqueue = sq;
818 #ifdef INVARIANTS
819 sq->sq_wchan = NULL;
820 #endif
821 #ifdef SLEEPQUEUE_PROFILING
822 sc->sc_depth--;
823 #endif
824 } else
825 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
826 LIST_REMOVE(td->td_sleepqueue, sq_hash);
827
828 td->td_wmesg = NULL;
829 td->td_wchan = NULL;
830 td->td_flags &= ~TDF_SINTR;
831
832 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
833 (void *)td, (long)td->td_proc->p_pid, td->td_name);
834
835 /* Adjust priority if requested. */
836 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
837 if (pri != 0 && td->td_priority > pri &&
838 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
839 sched_prio(td, pri);
840
841 /*
842 * Note that thread td might not be sleeping if it is running
843 * sleepq_catch_signals() on another CPU or is blocked on its
844 * proc lock to check signals. There's no need to mark the
845 * thread runnable in that case.
846 */
847 if (TD_IS_SLEEPING(td)) {
848 TD_CLR_SLEEPING(td);
849 return (setrunnable(td));
850 }
851 return (0);
852 }
853
854 #ifdef INVARIANTS
855 /*
856 * UMA zone item deallocator.
857 */
858 static void
859 sleepq_dtor(void *mem, int size, void *arg)
860 {
861 struct sleepqueue *sq;
862 int i;
863
864 sq = mem;
865 for (i = 0; i < NR_SLEEPQS; i++) {
866 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
867 MPASS(sq->sq_blockedcnt[i] == 0);
868 }
869 }
870 #endif
871
872 /*
873 * UMA zone item initializer.
874 */
875 static int
876 sleepq_init(void *mem, int size, int flags)
877 {
878 struct sleepqueue *sq;
879 int i;
880
881 bzero(mem, size);
882 sq = mem;
883 for (i = 0; i < NR_SLEEPQS; i++) {
884 TAILQ_INIT(&sq->sq_blocked[i]);
885 sq->sq_blockedcnt[i] = 0;
886 }
887 LIST_INIT(&sq->sq_free);
888 return (0);
889 }
890
891 /*
892 * Find thread sleeping on a wait channel and resume it.
893 */
894 int
895 sleepq_signal(void *wchan, int flags, int pri, int queue)
896 {
897 struct sleepqueue_chain *sc;
898 struct sleepqueue *sq;
899 struct threadqueue *head;
900 struct thread *td, *besttd;
901 int wakeup_swapper;
902
903 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
904 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
905 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
906 sq = sleepq_lookup(wchan);
907 if (sq == NULL)
908 return (0);
909 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
910 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
911
912 head = &sq->sq_blocked[queue];
913 if (flags & SLEEPQ_UNFAIR) {
914 /*
915 * Find the most recently sleeping thread, but try to
916 * skip threads still in process of context switch to
917 * avoid spinning on the thread lock.
918 */
919 sc = SC_LOOKUP(wchan);
920 besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
921 while (besttd->td_lock != &sc->sc_lock) {
922 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
923 if (td == NULL)
924 break;
925 besttd = td;
926 }
927 } else {
928 /*
929 * Find the highest priority thread on the queue. If there
930 * is a tie, use the thread that first appears in the queue
931 * as it has been sleeping the longest since threads are
932 * always added to the tail of sleep queues.
933 */
934 besttd = td = TAILQ_FIRST(head);
935 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
936 if (td->td_priority < besttd->td_priority)
937 besttd = td;
938 }
939 }
940 MPASS(besttd != NULL);
941 thread_lock(besttd);
942 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
943 thread_unlock(besttd);
944 return (wakeup_swapper);
945 }
946
947 static bool
948 match_any(struct thread *td __unused)
949 {
950
951 return (true);
952 }
953
954 /*
955 * Resume all threads sleeping on a specified wait channel.
956 */
957 int
958 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
959 {
960 struct sleepqueue *sq;
961
962 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
963 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
964 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
965 sq = sleepq_lookup(wchan);
966 if (sq == NULL)
967 return (0);
968 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
969 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
970
971 return (sleepq_remove_matching(sq, queue, match_any, pri));
972 }
973
974 /*
975 * Resume threads on the sleep queue that match the given predicate.
976 */
977 int
978 sleepq_remove_matching(struct sleepqueue *sq, int queue,
979 bool (*matches)(struct thread *), int pri)
980 {
981 struct thread *td, *tdn;
982 int wakeup_swapper;
983
984 /*
985 * The last thread will be given ownership of sq and may
986 * re-enqueue itself before sleepq_resume_thread() returns,
987 * so we must cache the "next" queue item at the beginning
988 * of the final iteration.
989 */
990 wakeup_swapper = 0;
991 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
992 thread_lock(td);
993 if (matches(td))
994 wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
995 thread_unlock(td);
996 }
997
998 return (wakeup_swapper);
999 }
1000
1001 /*
1002 * Time sleeping threads out. When the timeout expires, the thread is
1003 * removed from the sleep queue and made runnable if it is still asleep.
1004 */
1005 static void
1006 sleepq_timeout(void *arg)
1007 {
1008 struct sleepqueue_chain *sc __unused;
1009 struct sleepqueue *sq;
1010 struct thread *td;
1011 void *wchan;
1012 int wakeup_swapper;
1013
1014 td = arg;
1015 wakeup_swapper = 0;
1016 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1017 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1018
1019 thread_lock(td);
1020
1021 if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
1022 /*
1023 * The thread does not want a timeout (yet).
1024 */
1025 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1026 /*
1027 * See if the thread is asleep and get the wait
1028 * channel if it is.
1029 */
1030 wchan = td->td_wchan;
1031 sc = SC_LOOKUP(wchan);
1032 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1033 sq = sleepq_lookup(wchan);
1034 MPASS(sq != NULL);
1035 td->td_flags |= TDF_TIMEOUT;
1036 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1037 } else if (TD_ON_SLEEPQ(td)) {
1038 /*
1039 * If the thread is on the SLEEPQ but isn't sleeping
1040 * yet, it can either be on another CPU in between
1041 * sleepq_add() and one of the sleepq_*wait*()
1042 * routines or it can be in sleepq_catch_signals().
1043 */
1044 td->td_flags |= TDF_TIMEOUT;
1045 }
1046
1047 thread_unlock(td);
1048 if (wakeup_swapper)
1049 kick_proc0();
1050 }
1051
1052 /*
1053 * Resumes a specific thread from the sleep queue associated with a specific
1054 * wait channel if it is on that queue.
1055 */
1056 void
1057 sleepq_remove(struct thread *td, void *wchan)
1058 {
1059 struct sleepqueue *sq;
1060 int wakeup_swapper;
1061
1062 /*
1063 * Look up the sleep queue for this wait channel, then re-check
1064 * that the thread is asleep on that channel, if it is not, then
1065 * bail.
1066 */
1067 MPASS(wchan != NULL);
1068 sleepq_lock(wchan);
1069 sq = sleepq_lookup(wchan);
1070 /*
1071 * We can not lock the thread here as it may be sleeping on a
1072 * different sleepq. However, holding the sleepq lock for this
1073 * wchan can guarantee that we do not miss a wakeup for this
1074 * channel. The asserts below will catch any false positives.
1075 */
1076 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1077 sleepq_release(wchan);
1078 return;
1079 }
1080 /* Thread is asleep on sleep queue sq, so wake it up. */
1081 thread_lock(td);
1082 MPASS(sq != NULL);
1083 MPASS(td->td_wchan == wchan);
1084 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1085 thread_unlock(td);
1086 sleepq_release(wchan);
1087 if (wakeup_swapper)
1088 kick_proc0();
1089 }
1090
1091 /*
1092 * Abort a thread as if an interrupt had occurred. Only abort
1093 * interruptible waits (unfortunately it isn't safe to abort others).
1094 */
1095 int
1096 sleepq_abort(struct thread *td, int intrval)
1097 {
1098 struct sleepqueue *sq;
1099 void *wchan;
1100
1101 THREAD_LOCK_ASSERT(td, MA_OWNED);
1102 MPASS(TD_ON_SLEEPQ(td));
1103 MPASS(td->td_flags & TDF_SINTR);
1104 MPASS(intrval == EINTR || intrval == ERESTART);
1105
1106 /*
1107 * If the TDF_TIMEOUT flag is set, just leave. A
1108 * timeout is scheduled anyhow.
1109 */
1110 if (td->td_flags & TDF_TIMEOUT)
1111 return (0);
1112
1113 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1114 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1115 td->td_intrval = intrval;
1116 td->td_flags |= TDF_SLEEPABORT;
1117 /*
1118 * If the thread has not slept yet it will find the signal in
1119 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1120 * we have to do it here.
1121 */
1122 if (!TD_IS_SLEEPING(td))
1123 return (0);
1124 wchan = td->td_wchan;
1125 MPASS(wchan != NULL);
1126 sq = sleepq_lookup(wchan);
1127 MPASS(sq != NULL);
1128
1129 /* Thread is asleep on sleep queue sq, so wake it up. */
1130 return (sleepq_resume_thread(sq, td, 0));
1131 }
1132
1133 void
1134 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1135 {
1136 struct sleepqueue_chain *sc;
1137 struct sleepqueue *sq, *sq1;
1138 int i, wakeup_swapper;
1139
1140 wakeup_swapper = 0;
1141 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1142 if (LIST_EMPTY(&sc->sc_queues)) {
1143 continue;
1144 }
1145 mtx_lock_spin(&sc->sc_lock);
1146 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1147 for (i = 0; i < NR_SLEEPQS; ++i) {
1148 wakeup_swapper |= sleepq_remove_matching(sq, i,
1149 matches, 0);
1150 }
1151 }
1152 mtx_unlock_spin(&sc->sc_lock);
1153 }
1154 if (wakeup_swapper) {
1155 kick_proc0();
1156 }
1157 }
1158
1159 /*
1160 * Prints the stacks of all threads presently sleeping on wchan/queue to
1161 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1162 * printed. Typically, this will equal the number of threads sleeping on the
1163 * queue, but may be less if sb overflowed before all stacks were printed.
1164 */
1165 #ifdef STACK
1166 int
1167 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1168 int *count_stacks_printed)
1169 {
1170 struct thread *td, *td_next;
1171 struct sleepqueue *sq;
1172 struct stack **st;
1173 struct sbuf **td_infos;
1174 int i, stack_idx, error, stacks_to_allocate;
1175 bool finished;
1176
1177 error = 0;
1178 finished = false;
1179
1180 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1181 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1182
1183 stacks_to_allocate = 10;
1184 for (i = 0; i < 3 && !finished ; i++) {
1185 /* We cannot malloc while holding the queue's spinlock, so
1186 * we do our mallocs now, and hope it is enough. If it
1187 * isn't, we will free these, drop the lock, malloc more,
1188 * and try again, up to a point. After that point we will
1189 * give up and report ENOMEM. We also cannot write to sb
1190 * during this time since the client may have set the
1191 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1192 * malloc as we print to it. So we defer actually printing
1193 * to sb until after we drop the spinlock.
1194 */
1195
1196 /* Where we will store the stacks. */
1197 st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1198 M_TEMP, M_WAITOK);
1199 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1200 stack_idx++)
1201 st[stack_idx] = stack_create(M_WAITOK);
1202
1203 /* Where we will store the td name, tid, etc. */
1204 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1205 M_TEMP, M_WAITOK);
1206 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1207 stack_idx++)
1208 td_infos[stack_idx] = sbuf_new(NULL, NULL,
1209 MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1210 SBUF_FIXEDLEN);
1211
1212 sleepq_lock(wchan);
1213 sq = sleepq_lookup(wchan);
1214 if (sq == NULL) {
1215 /* This sleepq does not exist; exit and return ENOENT. */
1216 error = ENOENT;
1217 finished = true;
1218 sleepq_release(wchan);
1219 goto loop_end;
1220 }
1221
1222 stack_idx = 0;
1223 /* Save thread info */
1224 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1225 td_next) {
1226 if (stack_idx >= stacks_to_allocate)
1227 goto loop_end;
1228
1229 /* Note the td_lock is equal to the sleepq_lock here. */
1230 stack_save_td(st[stack_idx], td);
1231
1232 sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1233 td->td_tid, td->td_name, td);
1234
1235 ++stack_idx;
1236 }
1237
1238 finished = true;
1239 sleepq_release(wchan);
1240
1241 /* Print the stacks */
1242 for (i = 0; i < stack_idx; i++) {
1243 sbuf_finish(td_infos[i]);
1244 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1245 stack_sbuf_print(sb, st[i]);
1246 sbuf_printf(sb, "\n");
1247
1248 error = sbuf_error(sb);
1249 if (error == 0)
1250 *count_stacks_printed = stack_idx;
1251 }
1252
1253 loop_end:
1254 if (!finished)
1255 sleepq_release(wchan);
1256 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1257 stack_idx++)
1258 stack_destroy(st[stack_idx]);
1259 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1260 stack_idx++)
1261 sbuf_delete(td_infos[stack_idx]);
1262 free(st, M_TEMP);
1263 free(td_infos, M_TEMP);
1264 stacks_to_allocate *= 10;
1265 }
1266
1267 if (!finished && error == 0)
1268 error = ENOMEM;
1269
1270 return (error);
1271 }
1272 #endif
1273
1274 #ifdef SLEEPQUEUE_PROFILING
1275 #define SLEEPQ_PROF_LOCATIONS 1024
1276 #define SLEEPQ_SBUFSIZE 512
1277 struct sleepq_prof {
1278 LIST_ENTRY(sleepq_prof) sp_link;
1279 const char *sp_wmesg;
1280 long sp_count;
1281 };
1282
1283 LIST_HEAD(sqphead, sleepq_prof);
1284
1285 struct sqphead sleepq_prof_free;
1286 struct sqphead sleepq_hash[SC_TABLESIZE];
1287 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1288 static struct mtx sleepq_prof_lock;
1289 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1290
1291 static void
1292 sleepq_profile(const char *wmesg)
1293 {
1294 struct sleepq_prof *sp;
1295
1296 mtx_lock_spin(&sleepq_prof_lock);
1297 if (prof_enabled == 0)
1298 goto unlock;
1299 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1300 if (sp->sp_wmesg == wmesg)
1301 goto done;
1302 sp = LIST_FIRST(&sleepq_prof_free);
1303 if (sp == NULL)
1304 goto unlock;
1305 sp->sp_wmesg = wmesg;
1306 LIST_REMOVE(sp, sp_link);
1307 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1308 done:
1309 sp->sp_count++;
1310 unlock:
1311 mtx_unlock_spin(&sleepq_prof_lock);
1312 return;
1313 }
1314
1315 static void
1316 sleepq_prof_reset(void)
1317 {
1318 struct sleepq_prof *sp;
1319 int enabled;
1320 int i;
1321
1322 mtx_lock_spin(&sleepq_prof_lock);
1323 enabled = prof_enabled;
1324 prof_enabled = 0;
1325 for (i = 0; i < SC_TABLESIZE; i++)
1326 LIST_INIT(&sleepq_hash[i]);
1327 LIST_INIT(&sleepq_prof_free);
1328 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1329 sp = &sleepq_profent[i];
1330 sp->sp_wmesg = NULL;
1331 sp->sp_count = 0;
1332 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1333 }
1334 prof_enabled = enabled;
1335 mtx_unlock_spin(&sleepq_prof_lock);
1336 }
1337
1338 static int
1339 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1340 {
1341 int error, v;
1342
1343 v = prof_enabled;
1344 error = sysctl_handle_int(oidp, &v, v, req);
1345 if (error)
1346 return (error);
1347 if (req->newptr == NULL)
1348 return (error);
1349 if (v == prof_enabled)
1350 return (0);
1351 if (v == 1)
1352 sleepq_prof_reset();
1353 mtx_lock_spin(&sleepq_prof_lock);
1354 prof_enabled = !!v;
1355 mtx_unlock_spin(&sleepq_prof_lock);
1356
1357 return (0);
1358 }
1359
1360 static int
1361 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1362 {
1363 int error, v;
1364
1365 v = 0;
1366 error = sysctl_handle_int(oidp, &v, 0, req);
1367 if (error)
1368 return (error);
1369 if (req->newptr == NULL)
1370 return (error);
1371 if (v == 0)
1372 return (0);
1373 sleepq_prof_reset();
1374
1375 return (0);
1376 }
1377
1378 static int
1379 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1380 {
1381 struct sleepq_prof *sp;
1382 struct sbuf *sb;
1383 int enabled;
1384 int error;
1385 int i;
1386
1387 error = sysctl_wire_old_buffer(req, 0);
1388 if (error != 0)
1389 return (error);
1390 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1391 sbuf_printf(sb, "\nwmesg\tcount\n");
1392 enabled = prof_enabled;
1393 mtx_lock_spin(&sleepq_prof_lock);
1394 prof_enabled = 0;
1395 mtx_unlock_spin(&sleepq_prof_lock);
1396 for (i = 0; i < SC_TABLESIZE; i++) {
1397 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1398 sbuf_printf(sb, "%s\t%ld\n",
1399 sp->sp_wmesg, sp->sp_count);
1400 }
1401 }
1402 mtx_lock_spin(&sleepq_prof_lock);
1403 prof_enabled = enabled;
1404 mtx_unlock_spin(&sleepq_prof_lock);
1405
1406 error = sbuf_finish(sb);
1407 sbuf_delete(sb);
1408 return (error);
1409 }
1410
1411 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1412 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1413 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1414 NULL, 0, reset_sleepq_prof_stats, "I",
1415 "Reset sleepqueue profiling statistics");
1416 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1417 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1418 #endif
1419
1420 #ifdef DDB
1421 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1422 {
1423 struct sleepqueue_chain *sc;
1424 struct sleepqueue *sq;
1425 #ifdef INVARIANTS
1426 struct lock_object *lock;
1427 #endif
1428 struct thread *td;
1429 void *wchan;
1430 int i;
1431
1432 if (!have_addr)
1433 return;
1434
1435 /*
1436 * First, see if there is an active sleep queue for the wait channel
1437 * indicated by the address.
1438 */
1439 wchan = (void *)addr;
1440 sc = SC_LOOKUP(wchan);
1441 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1442 if (sq->sq_wchan == wchan)
1443 goto found;
1444
1445 /*
1446 * Second, see if there is an active sleep queue at the address
1447 * indicated.
1448 */
1449 for (i = 0; i < SC_TABLESIZE; i++)
1450 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1451 if (sq == (struct sleepqueue *)addr)
1452 goto found;
1453 }
1454
1455 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1456 return;
1457 found:
1458 db_printf("Wait channel: %p\n", sq->sq_wchan);
1459 db_printf("Queue type: %d\n", sq->sq_type);
1460 #ifdef INVARIANTS
1461 if (sq->sq_lock) {
1462 lock = sq->sq_lock;
1463 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1464 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1465 }
1466 #endif
1467 db_printf("Blocked threads:\n");
1468 for (i = 0; i < NR_SLEEPQS; i++) {
1469 db_printf("\nQueue[%d]:\n", i);
1470 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1471 db_printf("\tempty\n");
1472 else
1473 TAILQ_FOREACH(td, &sq->sq_blocked[i],
1474 td_slpq) {
1475 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1476 td->td_tid, td->td_proc->p_pid,
1477 td->td_name);
1478 }
1479 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1480 }
1481 }
1482
1483 /* Alias 'show sleepqueue' to 'show sleepq'. */
1484 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1485 #endif
Cache object: 76e52b4de8cc0515dce3c9ddd8bc543e
|