1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel. Sleep queues different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals. That said, there are several similarities between the turnstile
33 * and sleep queue implementations. (Note: turnstiles were implemented
34 * first.) For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues. An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue. This means that a wait channel object does not need to
39 * embed it's queue head just as locks do not embed their turnstile queue
40 * head. Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking. Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
44 *
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout. The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep. A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep). The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
50 * sleep queues also provide some extra assertions. One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD: releng/11.0/sys/kern/subr_sleepqueue.c 302350 2016-07-05 18:47:17Z glebius $");
61
62 #include "opt_sleepqueue_profiling.h"
63 #include "opt_ddb.h"
64 #include "opt_sched.h"
65 #include "opt_stack.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/sched.h>
76 #include <sys/sdt.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/stack.h>
80 #include <sys/sysctl.h>
81
82 #include <vm/uma.h>
83
84 #ifdef DDB
85 #include <ddb/ddb.h>
86 #endif
87
88
89 /*
90 * Constants for the hash table of sleep queue chains.
91 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
92 */
93 #define SC_TABLESIZE 256 /* Must be power of 2. */
94 #define SC_MASK (SC_TABLESIZE - 1)
95 #define SC_SHIFT 8
96 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
97 SC_MASK)
98 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
99 #define NR_SLEEPQS 2
100 /*
101 * There two different lists of sleep queues. Both lists are connected
102 * via the sq_hash entries. The first list is the sleep queue chain list
103 * that a sleep queue is on when it is attached to a wait channel. The
104 * second list is the free list hung off of a sleep queue that is attached
105 * to a wait channel.
106 *
107 * Each sleep queue also contains the wait channel it is attached to, the
108 * list of threads blocked on that wait channel, flags specific to the
109 * wait channel, and the lock used to synchronize with a wait channel.
110 * The flags are used to catch mismatches between the various consumers
111 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
112 * The lock pointer is only used when invariants are enabled for various
113 * debugging checks.
114 *
115 * Locking key:
116 * c - sleep queue chain lock
117 */
118 struct sleepqueue {
119 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
120 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
121 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
122 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
123 void *sq_wchan; /* (c) Wait channel. */
124 int sq_type; /* (c) Queue type. */
125 #ifdef INVARIANTS
126 struct lock_object *sq_lock; /* (c) Associated lock. */
127 #endif
128 };
129
130 struct sleepqueue_chain {
131 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
132 struct mtx sc_lock; /* Spin lock for this chain. */
133 #ifdef SLEEPQUEUE_PROFILING
134 u_int sc_depth; /* Length of sc_queues. */
135 u_int sc_max_depth; /* Max length of sc_queues. */
136 #endif
137 };
138
139 #ifdef SLEEPQUEUE_PROFILING
140 u_int sleepq_max_depth;
141 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
142 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
143 "sleepq chain stats");
144 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
145 0, "maxmimum depth achieved of a single chain");
146
147 static void sleepq_profile(const char *wmesg);
148 static int prof_enabled;
149 #endif
150 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
151 static uma_zone_t sleepq_zone;
152
153 /*
154 * Prototypes for non-exported routines.
155 */
156 static int sleepq_catch_signals(void *wchan, int pri);
157 static int sleepq_check_signals(void);
158 static int sleepq_check_timeout(void);
159 #ifdef INVARIANTS
160 static void sleepq_dtor(void *mem, int size, void *arg);
161 #endif
162 static int sleepq_init(void *mem, int size, int flags);
163 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
164 int pri);
165 static void sleepq_switch(void *wchan, int pri);
166 static void sleepq_timeout(void *arg);
167
168 SDT_PROBE_DECLARE(sched, , , sleep);
169 SDT_PROBE_DECLARE(sched, , , wakeup);
170
171 /*
172 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
173 * Note that it must happen after sleepinit() has been fully executed, so
174 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
175 */
176 #ifdef SLEEPQUEUE_PROFILING
177 static void
178 init_sleepqueue_profiling(void)
179 {
180 char chain_name[10];
181 struct sysctl_oid *chain_oid;
182 u_int i;
183
184 for (i = 0; i < SC_TABLESIZE; i++) {
185 snprintf(chain_name, sizeof(chain_name), "%u", i);
186 chain_oid = SYSCTL_ADD_NODE(NULL,
187 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
188 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
189 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
190 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
191 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
193 NULL);
194 }
195 }
196
197 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
198 init_sleepqueue_profiling, NULL);
199 #endif
200
201 /*
202 * Early initialization of sleep queues that is called from the sleepinit()
203 * SYSINIT.
204 */
205 void
206 init_sleepqueues(void)
207 {
208 int i;
209
210 for (i = 0; i < SC_TABLESIZE; i++) {
211 LIST_INIT(&sleepq_chains[i].sc_queues);
212 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
213 MTX_SPIN | MTX_RECURSE);
214 }
215 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
216 #ifdef INVARIANTS
217 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
218 #else
219 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
220 #endif
221
222 thread0.td_sleepqueue = sleepq_alloc();
223 }
224
225 /*
226 * Get a sleep queue for a new thread.
227 */
228 struct sleepqueue *
229 sleepq_alloc(void)
230 {
231
232 return (uma_zalloc(sleepq_zone, M_WAITOK));
233 }
234
235 /*
236 * Free a sleep queue when a thread is destroyed.
237 */
238 void
239 sleepq_free(struct sleepqueue *sq)
240 {
241
242 uma_zfree(sleepq_zone, sq);
243 }
244
245 /*
246 * Lock the sleep queue chain associated with the specified wait channel.
247 */
248 void
249 sleepq_lock(void *wchan)
250 {
251 struct sleepqueue_chain *sc;
252
253 sc = SC_LOOKUP(wchan);
254 mtx_lock_spin(&sc->sc_lock);
255 }
256
257 /*
258 * Look up the sleep queue associated with a given wait channel in the hash
259 * table locking the associated sleep queue chain. If no queue is found in
260 * the table, NULL is returned.
261 */
262 struct sleepqueue *
263 sleepq_lookup(void *wchan)
264 {
265 struct sleepqueue_chain *sc;
266 struct sleepqueue *sq;
267
268 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
269 sc = SC_LOOKUP(wchan);
270 mtx_assert(&sc->sc_lock, MA_OWNED);
271 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
272 if (sq->sq_wchan == wchan)
273 return (sq);
274 return (NULL);
275 }
276
277 /*
278 * Unlock the sleep queue chain associated with a given wait channel.
279 */
280 void
281 sleepq_release(void *wchan)
282 {
283 struct sleepqueue_chain *sc;
284
285 sc = SC_LOOKUP(wchan);
286 mtx_unlock_spin(&sc->sc_lock);
287 }
288
289 /*
290 * Places the current thread on the sleep queue for the specified wait
291 * channel. If INVARIANTS is enabled, then it associates the passed in
292 * lock with the sleepq to make sure it is held when that sleep queue is
293 * woken up.
294 */
295 void
296 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
297 int queue)
298 {
299 struct sleepqueue_chain *sc;
300 struct sleepqueue *sq;
301 struct thread *td;
302
303 td = curthread;
304 sc = SC_LOOKUP(wchan);
305 mtx_assert(&sc->sc_lock, MA_OWNED);
306 MPASS(td->td_sleepqueue != NULL);
307 MPASS(wchan != NULL);
308 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
309
310 /* If this thread is not allowed to sleep, die a horrible death. */
311 KASSERT(td->td_no_sleeping == 0,
312 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
313 __func__, td, wchan));
314
315 /* Look up the sleep queue associated with the wait channel 'wchan'. */
316 sq = sleepq_lookup(wchan);
317
318 /*
319 * If the wait channel does not already have a sleep queue, use
320 * this thread's sleep queue. Otherwise, insert the current thread
321 * into the sleep queue already in use by this wait channel.
322 */
323 if (sq == NULL) {
324 #ifdef INVARIANTS
325 int i;
326
327 sq = td->td_sleepqueue;
328 for (i = 0; i < NR_SLEEPQS; i++) {
329 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
330 ("thread's sleep queue %d is not empty", i));
331 KASSERT(sq->sq_blockedcnt[i] == 0,
332 ("thread's sleep queue %d count mismatches", i));
333 }
334 KASSERT(LIST_EMPTY(&sq->sq_free),
335 ("thread's sleep queue has a non-empty free list"));
336 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
337 sq->sq_lock = lock;
338 #endif
339 #ifdef SLEEPQUEUE_PROFILING
340 sc->sc_depth++;
341 if (sc->sc_depth > sc->sc_max_depth) {
342 sc->sc_max_depth = sc->sc_depth;
343 if (sc->sc_max_depth > sleepq_max_depth)
344 sleepq_max_depth = sc->sc_max_depth;
345 }
346 #endif
347 sq = td->td_sleepqueue;
348 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
349 sq->sq_wchan = wchan;
350 sq->sq_type = flags & SLEEPQ_TYPE;
351 } else {
352 MPASS(wchan == sq->sq_wchan);
353 MPASS(lock == sq->sq_lock);
354 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
355 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
356 }
357 thread_lock(td);
358 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
359 sq->sq_blockedcnt[queue]++;
360 td->td_sleepqueue = NULL;
361 td->td_sqqueue = queue;
362 td->td_wchan = wchan;
363 td->td_wmesg = wmesg;
364 if (flags & SLEEPQ_INTERRUPTIBLE) {
365 td->td_flags |= TDF_SINTR;
366 td->td_flags &= ~TDF_SLEEPABORT;
367 }
368 thread_unlock(td);
369 }
370
371 /*
372 * Sets a timeout that will remove the current thread from the specified
373 * sleep queue after timo ticks if the thread has not already been awakened.
374 */
375 void
376 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
377 int flags)
378 {
379 struct sleepqueue_chain *sc;
380 struct thread *td;
381
382 td = curthread;
383 sc = SC_LOOKUP(wchan);
384 mtx_assert(&sc->sc_lock, MA_OWNED);
385 MPASS(TD_ON_SLEEPQ(td));
386 MPASS(td->td_sleepqueue == NULL);
387 MPASS(wchan != NULL);
388 if (cold)
389 panic("timed sleep before timers are working");
390 callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
391 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
392 }
393
394 /*
395 * Return the number of actual sleepers for the specified queue.
396 */
397 u_int
398 sleepq_sleepcnt(void *wchan, int queue)
399 {
400 struct sleepqueue *sq;
401
402 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
403 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
404 sq = sleepq_lookup(wchan);
405 if (sq == NULL)
406 return (0);
407 return (sq->sq_blockedcnt[queue]);
408 }
409
410 /*
411 * Marks the pending sleep of the current thread as interruptible and
412 * makes an initial check for pending signals before putting a thread
413 * to sleep. Enters and exits with the thread lock held. Thread lock
414 * may have transitioned from the sleepq lock to a run lock.
415 */
416 static int
417 sleepq_catch_signals(void *wchan, int pri)
418 {
419 struct sleepqueue_chain *sc;
420 struct sleepqueue *sq;
421 struct thread *td;
422 struct proc *p;
423 struct sigacts *ps;
424 int sig, ret;
425
426 td = curthread;
427 p = curproc;
428 sc = SC_LOOKUP(wchan);
429 mtx_assert(&sc->sc_lock, MA_OWNED);
430 MPASS(wchan != NULL);
431 if ((td->td_pflags & TDP_WAKEUP) != 0) {
432 td->td_pflags &= ~TDP_WAKEUP;
433 ret = EINTR;
434 thread_lock(td);
435 goto out;
436 }
437
438 /*
439 * See if there are any pending signals for this thread. If not
440 * we can switch immediately. Otherwise do the signal processing
441 * directly.
442 */
443 thread_lock(td);
444 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
445 sleepq_switch(wchan, pri);
446 return (0);
447 }
448 thread_unlock(td);
449 mtx_unlock_spin(&sc->sc_lock);
450 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
451 (void *)td, (long)p->p_pid, td->td_name);
452 PROC_LOCK(p);
453 ps = p->p_sigacts;
454 mtx_lock(&ps->ps_mtx);
455 sig = cursig(td);
456 if (sig == -1) {
457 mtx_unlock(&ps->ps_mtx);
458 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
459 KASSERT(TD_SBDRY_INTR(td),
460 ("lost TDF_SERESTART of TDF_SEINTR"));
461 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
462 (TDF_SEINTR | TDF_SERESTART),
463 ("both TDF_SEINTR and TDF_SERESTART"));
464 ret = TD_SBDRY_ERRNO(td);
465 } else if (sig == 0) {
466 mtx_unlock(&ps->ps_mtx);
467 ret = thread_suspend_check(1);
468 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
469 } else {
470 if (SIGISMEMBER(ps->ps_sigintr, sig))
471 ret = EINTR;
472 else
473 ret = ERESTART;
474 mtx_unlock(&ps->ps_mtx);
475 }
476 /*
477 * Lock the per-process spinlock prior to dropping the PROC_LOCK
478 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
479 * thread_lock() are currently held in tdsendsignal().
480 */
481 PROC_SLOCK(p);
482 mtx_lock_spin(&sc->sc_lock);
483 PROC_UNLOCK(p);
484 thread_lock(td);
485 PROC_SUNLOCK(p);
486 if (ret == 0) {
487 sleepq_switch(wchan, pri);
488 return (0);
489 }
490 out:
491 /*
492 * There were pending signals and this thread is still
493 * on the sleep queue, remove it from the sleep queue.
494 */
495 if (TD_ON_SLEEPQ(td)) {
496 sq = sleepq_lookup(wchan);
497 if (sleepq_resume_thread(sq, td, 0)) {
498 #ifdef INVARIANTS
499 /*
500 * This thread hasn't gone to sleep yet, so it
501 * should not be swapped out.
502 */
503 panic("not waking up swapper");
504 #endif
505 }
506 }
507 mtx_unlock_spin(&sc->sc_lock);
508 MPASS(td->td_lock != &sc->sc_lock);
509 return (ret);
510 }
511
512 /*
513 * Switches to another thread if we are still asleep on a sleep queue.
514 * Returns with thread lock.
515 */
516 static void
517 sleepq_switch(void *wchan, int pri)
518 {
519 struct sleepqueue_chain *sc;
520 struct sleepqueue *sq;
521 struct thread *td;
522
523 td = curthread;
524 sc = SC_LOOKUP(wchan);
525 mtx_assert(&sc->sc_lock, MA_OWNED);
526 THREAD_LOCK_ASSERT(td, MA_OWNED);
527
528 /*
529 * If we have a sleep queue, then we've already been woken up, so
530 * just return.
531 */
532 if (td->td_sleepqueue != NULL) {
533 mtx_unlock_spin(&sc->sc_lock);
534 return;
535 }
536
537 /*
538 * If TDF_TIMEOUT is set, then our sleep has been timed out
539 * already but we are still on the sleep queue, so dequeue the
540 * thread and return.
541 */
542 if (td->td_flags & TDF_TIMEOUT) {
543 MPASS(TD_ON_SLEEPQ(td));
544 sq = sleepq_lookup(wchan);
545 if (sleepq_resume_thread(sq, td, 0)) {
546 #ifdef INVARIANTS
547 /*
548 * This thread hasn't gone to sleep yet, so it
549 * should not be swapped out.
550 */
551 panic("not waking up swapper");
552 #endif
553 }
554 mtx_unlock_spin(&sc->sc_lock);
555 return;
556 }
557 #ifdef SLEEPQUEUE_PROFILING
558 if (prof_enabled)
559 sleepq_profile(td->td_wmesg);
560 #endif
561 MPASS(td->td_sleepqueue == NULL);
562 sched_sleep(td, pri);
563 thread_lock_set(td, &sc->sc_lock);
564 SDT_PROBE0(sched, , , sleep);
565 TD_SET_SLEEPING(td);
566 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
567 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
568 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
569 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
570 }
571
572 /*
573 * Check to see if we timed out.
574 */
575 static int
576 sleepq_check_timeout(void)
577 {
578 struct thread *td;
579
580 td = curthread;
581 THREAD_LOCK_ASSERT(td, MA_OWNED);
582
583 /*
584 * If TDF_TIMEOUT is set, we timed out.
585 */
586 if (td->td_flags & TDF_TIMEOUT) {
587 td->td_flags &= ~TDF_TIMEOUT;
588 return (EWOULDBLOCK);
589 }
590
591 /*
592 * If TDF_TIMOFAIL is set, the timeout ran after we had
593 * already been woken up.
594 */
595 if (td->td_flags & TDF_TIMOFAIL)
596 td->td_flags &= ~TDF_TIMOFAIL;
597
598 /*
599 * If callout_stop() fails, then the timeout is running on
600 * another CPU, so synchronize with it to avoid having it
601 * accidentally wake up a subsequent sleep.
602 */
603 else if (_callout_stop_safe(&td->td_slpcallout, CS_EXECUTING, NULL)
604 == 0) {
605 td->td_flags |= TDF_TIMEOUT;
606 TD_SET_SLEEPING(td);
607 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
608 }
609 return (0);
610 }
611
612 /*
613 * Check to see if we were awoken by a signal.
614 */
615 static int
616 sleepq_check_signals(void)
617 {
618 struct thread *td;
619
620 td = curthread;
621 THREAD_LOCK_ASSERT(td, MA_OWNED);
622
623 /* We are no longer in an interruptible sleep. */
624 if (td->td_flags & TDF_SINTR)
625 td->td_flags &= ~TDF_SINTR;
626
627 if (td->td_flags & TDF_SLEEPABORT) {
628 td->td_flags &= ~TDF_SLEEPABORT;
629 return (td->td_intrval);
630 }
631
632 return (0);
633 }
634
635 /*
636 * Block the current thread until it is awakened from its sleep queue.
637 */
638 void
639 sleepq_wait(void *wchan, int pri)
640 {
641 struct thread *td;
642
643 td = curthread;
644 MPASS(!(td->td_flags & TDF_SINTR));
645 thread_lock(td);
646 sleepq_switch(wchan, pri);
647 thread_unlock(td);
648 }
649
650 /*
651 * Block the current thread until it is awakened from its sleep queue
652 * or it is interrupted by a signal.
653 */
654 int
655 sleepq_wait_sig(void *wchan, int pri)
656 {
657 int rcatch;
658 int rval;
659
660 rcatch = sleepq_catch_signals(wchan, pri);
661 rval = sleepq_check_signals();
662 thread_unlock(curthread);
663 if (rcatch)
664 return (rcatch);
665 return (rval);
666 }
667
668 /*
669 * Block the current thread until it is awakened from its sleep queue
670 * or it times out while waiting.
671 */
672 int
673 sleepq_timedwait(void *wchan, int pri)
674 {
675 struct thread *td;
676 int rval;
677
678 td = curthread;
679 MPASS(!(td->td_flags & TDF_SINTR));
680 thread_lock(td);
681 sleepq_switch(wchan, pri);
682 rval = sleepq_check_timeout();
683 thread_unlock(td);
684
685 return (rval);
686 }
687
688 /*
689 * Block the current thread until it is awakened from its sleep queue,
690 * it is interrupted by a signal, or it times out waiting to be awakened.
691 */
692 int
693 sleepq_timedwait_sig(void *wchan, int pri)
694 {
695 int rcatch, rvalt, rvals;
696
697 rcatch = sleepq_catch_signals(wchan, pri);
698 rvalt = sleepq_check_timeout();
699 rvals = sleepq_check_signals();
700 thread_unlock(curthread);
701 if (rcatch)
702 return (rcatch);
703 if (rvals)
704 return (rvals);
705 return (rvalt);
706 }
707
708 /*
709 * Returns the type of sleepqueue given a waitchannel.
710 */
711 int
712 sleepq_type(void *wchan)
713 {
714 struct sleepqueue *sq;
715 int type;
716
717 MPASS(wchan != NULL);
718
719 sleepq_lock(wchan);
720 sq = sleepq_lookup(wchan);
721 if (sq == NULL) {
722 sleepq_release(wchan);
723 return (-1);
724 }
725 type = sq->sq_type;
726 sleepq_release(wchan);
727 return (type);
728 }
729
730 /*
731 * Removes a thread from a sleep queue and makes it
732 * runnable.
733 */
734 static int
735 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
736 {
737 struct sleepqueue_chain *sc;
738
739 MPASS(td != NULL);
740 MPASS(sq->sq_wchan != NULL);
741 MPASS(td->td_wchan == sq->sq_wchan);
742 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
743 THREAD_LOCK_ASSERT(td, MA_OWNED);
744 sc = SC_LOOKUP(sq->sq_wchan);
745 mtx_assert(&sc->sc_lock, MA_OWNED);
746
747 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
748
749 /* Remove the thread from the queue. */
750 sq->sq_blockedcnt[td->td_sqqueue]--;
751 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
752
753 /*
754 * Get a sleep queue for this thread. If this is the last waiter,
755 * use the queue itself and take it out of the chain, otherwise,
756 * remove a queue from the free list.
757 */
758 if (LIST_EMPTY(&sq->sq_free)) {
759 td->td_sleepqueue = sq;
760 #ifdef INVARIANTS
761 sq->sq_wchan = NULL;
762 #endif
763 #ifdef SLEEPQUEUE_PROFILING
764 sc->sc_depth--;
765 #endif
766 } else
767 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
768 LIST_REMOVE(td->td_sleepqueue, sq_hash);
769
770 td->td_wmesg = NULL;
771 td->td_wchan = NULL;
772 td->td_flags &= ~TDF_SINTR;
773
774 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
775 (void *)td, (long)td->td_proc->p_pid, td->td_name);
776
777 /* Adjust priority if requested. */
778 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
779 if (pri != 0 && td->td_priority > pri &&
780 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
781 sched_prio(td, pri);
782
783 /*
784 * Note that thread td might not be sleeping if it is running
785 * sleepq_catch_signals() on another CPU or is blocked on its
786 * proc lock to check signals. There's no need to mark the
787 * thread runnable in that case.
788 */
789 if (TD_IS_SLEEPING(td)) {
790 TD_CLR_SLEEPING(td);
791 return (setrunnable(td));
792 }
793 return (0);
794 }
795
796 #ifdef INVARIANTS
797 /*
798 * UMA zone item deallocator.
799 */
800 static void
801 sleepq_dtor(void *mem, int size, void *arg)
802 {
803 struct sleepqueue *sq;
804 int i;
805
806 sq = mem;
807 for (i = 0; i < NR_SLEEPQS; i++) {
808 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
809 MPASS(sq->sq_blockedcnt[i] == 0);
810 }
811 }
812 #endif
813
814 /*
815 * UMA zone item initializer.
816 */
817 static int
818 sleepq_init(void *mem, int size, int flags)
819 {
820 struct sleepqueue *sq;
821 int i;
822
823 bzero(mem, size);
824 sq = mem;
825 for (i = 0; i < NR_SLEEPQS; i++) {
826 TAILQ_INIT(&sq->sq_blocked[i]);
827 sq->sq_blockedcnt[i] = 0;
828 }
829 LIST_INIT(&sq->sq_free);
830 return (0);
831 }
832
833 /*
834 * Find the highest priority thread sleeping on a wait channel and resume it.
835 */
836 int
837 sleepq_signal(void *wchan, int flags, int pri, int queue)
838 {
839 struct sleepqueue *sq;
840 struct thread *td, *besttd;
841 int wakeup_swapper;
842
843 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
844 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
845 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
846 sq = sleepq_lookup(wchan);
847 if (sq == NULL)
848 return (0);
849 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
850 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
851
852 /*
853 * Find the highest priority thread on the queue. If there is a
854 * tie, use the thread that first appears in the queue as it has
855 * been sleeping the longest since threads are always added to
856 * the tail of sleep queues.
857 */
858 besttd = NULL;
859 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
860 if (besttd == NULL || td->td_priority < besttd->td_priority)
861 besttd = td;
862 }
863 MPASS(besttd != NULL);
864 thread_lock(besttd);
865 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
866 thread_unlock(besttd);
867 return (wakeup_swapper);
868 }
869
870 /*
871 * Resume all threads sleeping on a specified wait channel.
872 */
873 int
874 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
875 {
876 struct sleepqueue *sq;
877 struct thread *td;
878 int wakeup_swapper;
879
880 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
881 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
882 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
883 sq = sleepq_lookup(wchan);
884 if (sq == NULL)
885 return (0);
886 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
887 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
888
889 /* Resume all blocked threads on the sleep queue. */
890 wakeup_swapper = 0;
891 while ((td = TAILQ_FIRST(&sq->sq_blocked[queue])) != NULL) {
892 thread_lock(td);
893 wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
894 thread_unlock(td);
895 }
896 return (wakeup_swapper);
897 }
898
899 /*
900 * Time sleeping threads out. When the timeout expires, the thread is
901 * removed from the sleep queue and made runnable if it is still asleep.
902 */
903 static void
904 sleepq_timeout(void *arg)
905 {
906 struct sleepqueue_chain *sc;
907 struct sleepqueue *sq;
908 struct thread *td;
909 void *wchan;
910 int wakeup_swapper;
911
912 td = arg;
913 wakeup_swapper = 0;
914 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
915 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
916
917 /*
918 * First, see if the thread is asleep and get the wait channel if
919 * it is.
920 */
921 thread_lock(td);
922 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
923 wchan = td->td_wchan;
924 sc = SC_LOOKUP(wchan);
925 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
926 sq = sleepq_lookup(wchan);
927 MPASS(sq != NULL);
928 td->td_flags |= TDF_TIMEOUT;
929 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
930 thread_unlock(td);
931 if (wakeup_swapper)
932 kick_proc0();
933 return;
934 }
935
936 /*
937 * If the thread is on the SLEEPQ but isn't sleeping yet, it
938 * can either be on another CPU in between sleepq_add() and
939 * one of the sleepq_*wait*() routines or it can be in
940 * sleepq_catch_signals().
941 */
942 if (TD_ON_SLEEPQ(td)) {
943 td->td_flags |= TDF_TIMEOUT;
944 thread_unlock(td);
945 return;
946 }
947
948 /*
949 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
950 * then the other thread has already yielded to us, so clear
951 * the flag and resume it. If TDF_TIMEOUT is not set, then the
952 * we know that the other thread is not on a sleep queue, but it
953 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
954 * to let it know that the timeout has already run and doesn't
955 * need to be canceled.
956 */
957 if (td->td_flags & TDF_TIMEOUT) {
958 MPASS(TD_IS_SLEEPING(td));
959 td->td_flags &= ~TDF_TIMEOUT;
960 TD_CLR_SLEEPING(td);
961 wakeup_swapper = setrunnable(td);
962 } else
963 td->td_flags |= TDF_TIMOFAIL;
964 thread_unlock(td);
965 if (wakeup_swapper)
966 kick_proc0();
967 }
968
969 /*
970 * Resumes a specific thread from the sleep queue associated with a specific
971 * wait channel if it is on that queue.
972 */
973 void
974 sleepq_remove(struct thread *td, void *wchan)
975 {
976 struct sleepqueue *sq;
977 int wakeup_swapper;
978
979 /*
980 * Look up the sleep queue for this wait channel, then re-check
981 * that the thread is asleep on that channel, if it is not, then
982 * bail.
983 */
984 MPASS(wchan != NULL);
985 sleepq_lock(wchan);
986 sq = sleepq_lookup(wchan);
987 /*
988 * We can not lock the thread here as it may be sleeping on a
989 * different sleepq. However, holding the sleepq lock for this
990 * wchan can guarantee that we do not miss a wakeup for this
991 * channel. The asserts below will catch any false positives.
992 */
993 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
994 sleepq_release(wchan);
995 return;
996 }
997 /* Thread is asleep on sleep queue sq, so wake it up. */
998 thread_lock(td);
999 MPASS(sq != NULL);
1000 MPASS(td->td_wchan == wchan);
1001 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1002 thread_unlock(td);
1003 sleepq_release(wchan);
1004 if (wakeup_swapper)
1005 kick_proc0();
1006 }
1007
1008 /*
1009 * Abort a thread as if an interrupt had occurred. Only abort
1010 * interruptible waits (unfortunately it isn't safe to abort others).
1011 */
1012 int
1013 sleepq_abort(struct thread *td, int intrval)
1014 {
1015 struct sleepqueue *sq;
1016 void *wchan;
1017
1018 THREAD_LOCK_ASSERT(td, MA_OWNED);
1019 MPASS(TD_ON_SLEEPQ(td));
1020 MPASS(td->td_flags & TDF_SINTR);
1021 MPASS(intrval == EINTR || intrval == ERESTART);
1022
1023 /*
1024 * If the TDF_TIMEOUT flag is set, just leave. A
1025 * timeout is scheduled anyhow.
1026 */
1027 if (td->td_flags & TDF_TIMEOUT)
1028 return (0);
1029
1030 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1031 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1032 td->td_intrval = intrval;
1033 td->td_flags |= TDF_SLEEPABORT;
1034 /*
1035 * If the thread has not slept yet it will find the signal in
1036 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1037 * we have to do it here.
1038 */
1039 if (!TD_IS_SLEEPING(td))
1040 return (0);
1041 wchan = td->td_wchan;
1042 MPASS(wchan != NULL);
1043 sq = sleepq_lookup(wchan);
1044 MPASS(sq != NULL);
1045
1046 /* Thread is asleep on sleep queue sq, so wake it up. */
1047 return (sleepq_resume_thread(sq, td, 0));
1048 }
1049
1050 /*
1051 * Prints the stacks of all threads presently sleeping on wchan/queue to
1052 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1053 * printed. Typically, this will equal the number of threads sleeping on the
1054 * queue, but may be less if sb overflowed before all stacks were printed.
1055 */
1056 #ifdef STACK
1057 int
1058 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1059 int *count_stacks_printed)
1060 {
1061 struct thread *td, *td_next;
1062 struct sleepqueue *sq;
1063 struct stack **st;
1064 struct sbuf **td_infos;
1065 int i, stack_idx, error, stacks_to_allocate;
1066 bool finished, partial_print;
1067
1068 error = 0;
1069 finished = false;
1070 partial_print = false;
1071
1072 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1073 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1074
1075 stacks_to_allocate = 10;
1076 for (i = 0; i < 3 && !finished ; i++) {
1077 /* We cannot malloc while holding the queue's spinlock, so
1078 * we do our mallocs now, and hope it is enough. If it
1079 * isn't, we will free these, drop the lock, malloc more,
1080 * and try again, up to a point. After that point we will
1081 * give up and report ENOMEM. We also cannot write to sb
1082 * during this time since the client may have set the
1083 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1084 * malloc as we print to it. So we defer actually printing
1085 * to sb until after we drop the spinlock.
1086 */
1087
1088 /* Where we will store the stacks. */
1089 st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1090 M_TEMP, M_WAITOK);
1091 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1092 stack_idx++)
1093 st[stack_idx] = stack_create();
1094
1095 /* Where we will store the td name, tid, etc. */
1096 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1097 M_TEMP, M_WAITOK);
1098 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1099 stack_idx++)
1100 td_infos[stack_idx] = sbuf_new(NULL, NULL,
1101 MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1102 SBUF_FIXEDLEN);
1103
1104 sleepq_lock(wchan);
1105 sq = sleepq_lookup(wchan);
1106 if (sq == NULL) {
1107 /* This sleepq does not exist; exit and return ENOENT. */
1108 error = ENOENT;
1109 finished = true;
1110 sleepq_release(wchan);
1111 goto loop_end;
1112 }
1113
1114 stack_idx = 0;
1115 /* Save thread info */
1116 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1117 td_next) {
1118 if (stack_idx >= stacks_to_allocate)
1119 goto loop_end;
1120
1121 /* Note the td_lock is equal to the sleepq_lock here. */
1122 stack_save_td(st[stack_idx], td);
1123
1124 sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1125 td->td_tid, td->td_name, td);
1126
1127 ++stack_idx;
1128 }
1129
1130 finished = true;
1131 sleepq_release(wchan);
1132
1133 /* Print the stacks */
1134 for (i = 0; i < stack_idx; i++) {
1135 sbuf_finish(td_infos[i]);
1136 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1137 stack_sbuf_print(sb, st[i]);
1138 sbuf_printf(sb, "\n");
1139
1140 error = sbuf_error(sb);
1141 if (error == 0)
1142 *count_stacks_printed = stack_idx;
1143 }
1144
1145 loop_end:
1146 if (!finished)
1147 sleepq_release(wchan);
1148 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1149 stack_idx++)
1150 stack_destroy(st[stack_idx]);
1151 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1152 stack_idx++)
1153 sbuf_delete(td_infos[stack_idx]);
1154 free(st, M_TEMP);
1155 free(td_infos, M_TEMP);
1156 stacks_to_allocate *= 10;
1157 }
1158
1159 if (!finished && error == 0)
1160 error = ENOMEM;
1161
1162 return (error);
1163 }
1164 #endif
1165
1166 #ifdef SLEEPQUEUE_PROFILING
1167 #define SLEEPQ_PROF_LOCATIONS 1024
1168 #define SLEEPQ_SBUFSIZE 512
1169 struct sleepq_prof {
1170 LIST_ENTRY(sleepq_prof) sp_link;
1171 const char *sp_wmesg;
1172 long sp_count;
1173 };
1174
1175 LIST_HEAD(sqphead, sleepq_prof);
1176
1177 struct sqphead sleepq_prof_free;
1178 struct sqphead sleepq_hash[SC_TABLESIZE];
1179 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1180 static struct mtx sleepq_prof_lock;
1181 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1182
1183 static void
1184 sleepq_profile(const char *wmesg)
1185 {
1186 struct sleepq_prof *sp;
1187
1188 mtx_lock_spin(&sleepq_prof_lock);
1189 if (prof_enabled == 0)
1190 goto unlock;
1191 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1192 if (sp->sp_wmesg == wmesg)
1193 goto done;
1194 sp = LIST_FIRST(&sleepq_prof_free);
1195 if (sp == NULL)
1196 goto unlock;
1197 sp->sp_wmesg = wmesg;
1198 LIST_REMOVE(sp, sp_link);
1199 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1200 done:
1201 sp->sp_count++;
1202 unlock:
1203 mtx_unlock_spin(&sleepq_prof_lock);
1204 return;
1205 }
1206
1207 static void
1208 sleepq_prof_reset(void)
1209 {
1210 struct sleepq_prof *sp;
1211 int enabled;
1212 int i;
1213
1214 mtx_lock_spin(&sleepq_prof_lock);
1215 enabled = prof_enabled;
1216 prof_enabled = 0;
1217 for (i = 0; i < SC_TABLESIZE; i++)
1218 LIST_INIT(&sleepq_hash[i]);
1219 LIST_INIT(&sleepq_prof_free);
1220 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1221 sp = &sleepq_profent[i];
1222 sp->sp_wmesg = NULL;
1223 sp->sp_count = 0;
1224 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1225 }
1226 prof_enabled = enabled;
1227 mtx_unlock_spin(&sleepq_prof_lock);
1228 }
1229
1230 static int
1231 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1232 {
1233 int error, v;
1234
1235 v = prof_enabled;
1236 error = sysctl_handle_int(oidp, &v, v, req);
1237 if (error)
1238 return (error);
1239 if (req->newptr == NULL)
1240 return (error);
1241 if (v == prof_enabled)
1242 return (0);
1243 if (v == 1)
1244 sleepq_prof_reset();
1245 mtx_lock_spin(&sleepq_prof_lock);
1246 prof_enabled = !!v;
1247 mtx_unlock_spin(&sleepq_prof_lock);
1248
1249 return (0);
1250 }
1251
1252 static int
1253 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1254 {
1255 int error, v;
1256
1257 v = 0;
1258 error = sysctl_handle_int(oidp, &v, 0, req);
1259 if (error)
1260 return (error);
1261 if (req->newptr == NULL)
1262 return (error);
1263 if (v == 0)
1264 return (0);
1265 sleepq_prof_reset();
1266
1267 return (0);
1268 }
1269
1270 static int
1271 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1272 {
1273 struct sleepq_prof *sp;
1274 struct sbuf *sb;
1275 int enabled;
1276 int error;
1277 int i;
1278
1279 error = sysctl_wire_old_buffer(req, 0);
1280 if (error != 0)
1281 return (error);
1282 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1283 sbuf_printf(sb, "\nwmesg\tcount\n");
1284 enabled = prof_enabled;
1285 mtx_lock_spin(&sleepq_prof_lock);
1286 prof_enabled = 0;
1287 mtx_unlock_spin(&sleepq_prof_lock);
1288 for (i = 0; i < SC_TABLESIZE; i++) {
1289 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1290 sbuf_printf(sb, "%s\t%ld\n",
1291 sp->sp_wmesg, sp->sp_count);
1292 }
1293 }
1294 mtx_lock_spin(&sleepq_prof_lock);
1295 prof_enabled = enabled;
1296 mtx_unlock_spin(&sleepq_prof_lock);
1297
1298 error = sbuf_finish(sb);
1299 sbuf_delete(sb);
1300 return (error);
1301 }
1302
1303 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1304 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1305 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1306 NULL, 0, reset_sleepq_prof_stats, "I",
1307 "Reset sleepqueue profiling statistics");
1308 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1309 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1310 #endif
1311
1312 #ifdef DDB
1313 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1314 {
1315 struct sleepqueue_chain *sc;
1316 struct sleepqueue *sq;
1317 #ifdef INVARIANTS
1318 struct lock_object *lock;
1319 #endif
1320 struct thread *td;
1321 void *wchan;
1322 int i;
1323
1324 if (!have_addr)
1325 return;
1326
1327 /*
1328 * First, see if there is an active sleep queue for the wait channel
1329 * indicated by the address.
1330 */
1331 wchan = (void *)addr;
1332 sc = SC_LOOKUP(wchan);
1333 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1334 if (sq->sq_wchan == wchan)
1335 goto found;
1336
1337 /*
1338 * Second, see if there is an active sleep queue at the address
1339 * indicated.
1340 */
1341 for (i = 0; i < SC_TABLESIZE; i++)
1342 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1343 if (sq == (struct sleepqueue *)addr)
1344 goto found;
1345 }
1346
1347 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1348 return;
1349 found:
1350 db_printf("Wait channel: %p\n", sq->sq_wchan);
1351 db_printf("Queue type: %d\n", sq->sq_type);
1352 #ifdef INVARIANTS
1353 if (sq->sq_lock) {
1354 lock = sq->sq_lock;
1355 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1356 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1357 }
1358 #endif
1359 db_printf("Blocked threads:\n");
1360 for (i = 0; i < NR_SLEEPQS; i++) {
1361 db_printf("\nQueue[%d]:\n", i);
1362 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1363 db_printf("\tempty\n");
1364 else
1365 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1366 td_slpq) {
1367 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1368 td->td_tid, td->td_proc->p_pid,
1369 td->td_name);
1370 }
1371 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1372 }
1373 }
1374
1375 /* Alias 'show sleepqueue' to 'show sleepq'. */
1376 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1377 #endif
Cache object: 7aea6c4d71eafcf6aa959d8d6b2480f1
|