1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD: releng/8.1/sys/kern/subr_sleepqueue.c 202966 2010-01-25 12:05:51Z attilio $");
64
65 #include "opt_sleepqueue_profiling.h"
66 #include "opt_ddb.h"
67 #include "opt_sched.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/lock.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/signalvar.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/sysctl.h>
81
82 #include <vm/uma.h>
83
84 #ifdef DDB
85 #include <ddb/ddb.h>
86 #endif
87
88 /*
89 * Constants for the hash table of sleep queue chains. These constants are
90 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
91 * Basically, we ignore the lower 8 bits of the address since most wait
92 * channel pointers are aligned and only look at the next 7 bits for the
93 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
94 */
95 #define SC_TABLESIZE 128 /* Must be power of 2. */
96 #define SC_MASK (SC_TABLESIZE - 1)
97 #define SC_SHIFT 8
98 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
99 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
100 #define NR_SLEEPQS 2
101 /*
102 * There two different lists of sleep queues. Both lists are connected
103 * via the sq_hash entries. The first list is the sleep queue chain list
104 * that a sleep queue is on when it is attached to a wait channel. The
105 * second list is the free list hung off of a sleep queue that is attached
106 * to a wait channel.
107 *
108 * Each sleep queue also contains the wait channel it is attached to, the
109 * list of threads blocked on that wait channel, flags specific to the
110 * wait channel, and the lock used to synchronize with a wait channel.
111 * The flags are used to catch mismatches between the various consumers
112 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
113 * The lock pointer is only used when invariants are enabled for various
114 * debugging checks.
115 *
116 * Locking key:
117 * c - sleep queue chain lock
118 */
119 struct sleepqueue {
120 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
121 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
122 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
123 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
124 void *sq_wchan; /* (c) Wait channel. */
125 int sq_type; /* (c) Queue type. */
126 #ifdef INVARIANTS
127 struct lock_object *sq_lock; /* (c) Associated lock. */
128 #endif
129 };
130
131 struct sleepqueue_chain {
132 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
133 struct mtx sc_lock; /* Spin lock for this chain. */
134 #ifdef SLEEPQUEUE_PROFILING
135 u_int sc_depth; /* Length of sc_queues. */
136 u_int sc_max_depth; /* Max length of sc_queues. */
137 #endif
138 };
139
140 #ifdef SLEEPQUEUE_PROFILING
141 u_int sleepq_max_depth;
142 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
143 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
144 "sleepq chain stats");
145 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
146 0, "maxmimum depth achieved of a single chain");
147
148 static void sleepq_profile(const char *wmesg);
149 static int prof_enabled;
150 #endif
151 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
152 static uma_zone_t sleepq_zone;
153
154 /*
155 * Prototypes for non-exported routines.
156 */
157 static int sleepq_catch_signals(void *wchan, int pri);
158 static int sleepq_check_signals(void);
159 static int sleepq_check_timeout(void);
160 #ifdef INVARIANTS
161 static void sleepq_dtor(void *mem, int size, void *arg);
162 #endif
163 static int sleepq_init(void *mem, int size, int flags);
164 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
165 int pri);
166 static void sleepq_switch(void *wchan, int pri);
167 static void sleepq_timeout(void *arg);
168
169 /*
170 * Early initialization of sleep queues that is called from the sleepinit()
171 * SYSINIT.
172 */
173 void
174 init_sleepqueues(void)
175 {
176 #ifdef SLEEPQUEUE_PROFILING
177 struct sysctl_oid *chain_oid;
178 char chain_name[10];
179 #endif
180 int i;
181
182 for (i = 0; i < SC_TABLESIZE; i++) {
183 LIST_INIT(&sleepq_chains[i].sc_queues);
184 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
185 MTX_SPIN | MTX_RECURSE);
186 #ifdef SLEEPQUEUE_PROFILING
187 snprintf(chain_name, sizeof(chain_name), "%d", i);
188 chain_oid = SYSCTL_ADD_NODE(NULL,
189 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
190 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
191 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
193 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
194 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
195 NULL);
196 #endif
197 }
198 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
199 #ifdef INVARIANTS
200 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
201 #else
202 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
203 #endif
204
205 thread0.td_sleepqueue = sleepq_alloc();
206 }
207
208 /*
209 * Get a sleep queue for a new thread.
210 */
211 struct sleepqueue *
212 sleepq_alloc(void)
213 {
214
215 return (uma_zalloc(sleepq_zone, M_WAITOK));
216 }
217
218 /*
219 * Free a sleep queue when a thread is destroyed.
220 */
221 void
222 sleepq_free(struct sleepqueue *sq)
223 {
224
225 uma_zfree(sleepq_zone, sq);
226 }
227
228 /*
229 * Lock the sleep queue chain associated with the specified wait channel.
230 */
231 void
232 sleepq_lock(void *wchan)
233 {
234 struct sleepqueue_chain *sc;
235
236 sc = SC_LOOKUP(wchan);
237 mtx_lock_spin(&sc->sc_lock);
238 }
239
240 /*
241 * Look up the sleep queue associated with a given wait channel in the hash
242 * table locking the associated sleep queue chain. If no queue is found in
243 * the table, NULL is returned.
244 */
245 struct sleepqueue *
246 sleepq_lookup(void *wchan)
247 {
248 struct sleepqueue_chain *sc;
249 struct sleepqueue *sq;
250
251 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
252 sc = SC_LOOKUP(wchan);
253 mtx_assert(&sc->sc_lock, MA_OWNED);
254 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
255 if (sq->sq_wchan == wchan)
256 return (sq);
257 return (NULL);
258 }
259
260 /*
261 * Unlock the sleep queue chain associated with a given wait channel.
262 */
263 void
264 sleepq_release(void *wchan)
265 {
266 struct sleepqueue_chain *sc;
267
268 sc = SC_LOOKUP(wchan);
269 mtx_unlock_spin(&sc->sc_lock);
270 }
271
272 /*
273 * Places the current thread on the sleep queue for the specified wait
274 * channel. If INVARIANTS is enabled, then it associates the passed in
275 * lock with the sleepq to make sure it is held when that sleep queue is
276 * woken up.
277 */
278 void
279 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
280 int queue)
281 {
282 struct sleepqueue_chain *sc;
283 struct sleepqueue *sq;
284 struct thread *td;
285
286 td = curthread;
287 sc = SC_LOOKUP(wchan);
288 mtx_assert(&sc->sc_lock, MA_OWNED);
289 MPASS(td->td_sleepqueue != NULL);
290 MPASS(wchan != NULL);
291 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
292
293 /* If this thread is not allowed to sleep, die a horrible death. */
294 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
295 ("Trying sleep, but thread marked as sleeping prohibited"));
296
297 /* Look up the sleep queue associated with the wait channel 'wchan'. */
298 sq = sleepq_lookup(wchan);
299
300 /*
301 * If the wait channel does not already have a sleep queue, use
302 * this thread's sleep queue. Otherwise, insert the current thread
303 * into the sleep queue already in use by this wait channel.
304 */
305 if (sq == NULL) {
306 #ifdef INVARIANTS
307 int i;
308
309 sq = td->td_sleepqueue;
310 for (i = 0; i < NR_SLEEPQS; i++) {
311 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
312 ("thread's sleep queue %d is not empty", i));
313 KASSERT(sq->sq_blockedcnt[i] == 0,
314 ("thread's sleep queue %d count mismatches", i));
315 }
316 KASSERT(LIST_EMPTY(&sq->sq_free),
317 ("thread's sleep queue has a non-empty free list"));
318 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
319 sq->sq_lock = lock;
320 #endif
321 #ifdef SLEEPQUEUE_PROFILING
322 sc->sc_depth++;
323 if (sc->sc_depth > sc->sc_max_depth) {
324 sc->sc_max_depth = sc->sc_depth;
325 if (sc->sc_max_depth > sleepq_max_depth)
326 sleepq_max_depth = sc->sc_max_depth;
327 }
328 #endif
329 sq = td->td_sleepqueue;
330 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
331 sq->sq_wchan = wchan;
332 sq->sq_type = flags & SLEEPQ_TYPE;
333 } else {
334 MPASS(wchan == sq->sq_wchan);
335 MPASS(lock == sq->sq_lock);
336 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
337 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
338 }
339 thread_lock(td);
340 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
341 sq->sq_blockedcnt[queue]++;
342 td->td_sleepqueue = NULL;
343 td->td_sqqueue = queue;
344 td->td_wchan = wchan;
345 td->td_wmesg = wmesg;
346 if (flags & SLEEPQ_INTERRUPTIBLE) {
347 td->td_flags |= TDF_SINTR;
348 td->td_flags &= ~TDF_SLEEPABORT;
349 if (flags & SLEEPQ_STOP_ON_BDRY)
350 td->td_flags |= TDF_SBDRY;
351 }
352 thread_unlock(td);
353 }
354
355 /*
356 * Sets a timeout that will remove the current thread from the specified
357 * sleep queue after timo ticks if the thread has not already been awakened.
358 */
359 void
360 sleepq_set_timeout(void *wchan, int timo)
361 {
362 struct sleepqueue_chain *sc;
363 struct thread *td;
364
365 td = curthread;
366 sc = SC_LOOKUP(wchan);
367 mtx_assert(&sc->sc_lock, MA_OWNED);
368 MPASS(TD_ON_SLEEPQ(td));
369 MPASS(td->td_sleepqueue == NULL);
370 MPASS(wchan != NULL);
371 callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
372 }
373
374 /*
375 * Return the number of actual sleepers for the specified queue.
376 */
377 u_int
378 sleepq_sleepcnt(void *wchan, int queue)
379 {
380 struct sleepqueue *sq;
381
382 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
383 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
384 sq = sleepq_lookup(wchan);
385 if (sq == NULL)
386 return (0);
387 return (sq->sq_blockedcnt[queue]);
388 }
389
390 /*
391 * Marks the pending sleep of the current thread as interruptible and
392 * makes an initial check for pending signals before putting a thread
393 * to sleep. Enters and exits with the thread lock held. Thread lock
394 * may have transitioned from the sleepq lock to a run lock.
395 */
396 static int
397 sleepq_catch_signals(void *wchan, int pri)
398 {
399 struct sleepqueue_chain *sc;
400 struct sleepqueue *sq;
401 struct thread *td;
402 struct proc *p;
403 struct sigacts *ps;
404 int sig, ret, stop_allowed;
405
406 td = curthread;
407 p = curproc;
408 sc = SC_LOOKUP(wchan);
409 mtx_assert(&sc->sc_lock, MA_OWNED);
410 MPASS(wchan != NULL);
411 /*
412 * See if there are any pending signals for this thread. If not
413 * we can switch immediately. Otherwise do the signal processing
414 * directly.
415 */
416 thread_lock(td);
417 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
418 sleepq_switch(wchan, pri);
419 return (0);
420 }
421 stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
422 SIG_STOP_ALLOWED;
423 thread_unlock(td);
424 mtx_unlock_spin(&sc->sc_lock);
425 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
426 (void *)td, (long)p->p_pid, td->td_name);
427 PROC_LOCK(p);
428 ps = p->p_sigacts;
429 mtx_lock(&ps->ps_mtx);
430 sig = cursig(td, stop_allowed);
431 if (sig == 0) {
432 mtx_unlock(&ps->ps_mtx);
433 ret = thread_suspend_check(1);
434 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
435 } else {
436 if (SIGISMEMBER(ps->ps_sigintr, sig))
437 ret = EINTR;
438 else
439 ret = ERESTART;
440 mtx_unlock(&ps->ps_mtx);
441 }
442 /*
443 * Lock the per-process spinlock prior to dropping the PROC_LOCK
444 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
445 * thread_lock() are currently held in tdsignal().
446 */
447 PROC_SLOCK(p);
448 mtx_lock_spin(&sc->sc_lock);
449 PROC_UNLOCK(p);
450 thread_lock(td);
451 PROC_SUNLOCK(p);
452 if (ret == 0) {
453 sleepq_switch(wchan, pri);
454 return (0);
455 }
456 /*
457 * There were pending signals and this thread is still
458 * on the sleep queue, remove it from the sleep queue.
459 */
460 if (TD_ON_SLEEPQ(td)) {
461 sq = sleepq_lookup(wchan);
462 if (sleepq_resume_thread(sq, td, 0)) {
463 #ifdef INVARIANTS
464 /*
465 * This thread hasn't gone to sleep yet, so it
466 * should not be swapped out.
467 */
468 panic("not waking up swapper");
469 #endif
470 }
471 }
472 mtx_unlock_spin(&sc->sc_lock);
473 MPASS(td->td_lock != &sc->sc_lock);
474 return (ret);
475 }
476
477 /*
478 * Switches to another thread if we are still asleep on a sleep queue.
479 * Returns with thread lock.
480 */
481 static void
482 sleepq_switch(void *wchan, int pri)
483 {
484 struct sleepqueue_chain *sc;
485 struct sleepqueue *sq;
486 struct thread *td;
487
488 td = curthread;
489 sc = SC_LOOKUP(wchan);
490 mtx_assert(&sc->sc_lock, MA_OWNED);
491 THREAD_LOCK_ASSERT(td, MA_OWNED);
492
493 /*
494 * If we have a sleep queue, then we've already been woken up, so
495 * just return.
496 */
497 if (td->td_sleepqueue != NULL) {
498 mtx_unlock_spin(&sc->sc_lock);
499 return;
500 }
501
502 /*
503 * If TDF_TIMEOUT is set, then our sleep has been timed out
504 * already but we are still on the sleep queue, so dequeue the
505 * thread and return.
506 */
507 if (td->td_flags & TDF_TIMEOUT) {
508 MPASS(TD_ON_SLEEPQ(td));
509 sq = sleepq_lookup(wchan);
510 if (sleepq_resume_thread(sq, td, 0)) {
511 #ifdef INVARIANTS
512 /*
513 * This thread hasn't gone to sleep yet, so it
514 * should not be swapped out.
515 */
516 panic("not waking up swapper");
517 #endif
518 }
519 mtx_unlock_spin(&sc->sc_lock);
520 return;
521 }
522 #ifdef SLEEPQUEUE_PROFILING
523 if (prof_enabled)
524 sleepq_profile(td->td_wmesg);
525 #endif
526 MPASS(td->td_sleepqueue == NULL);
527 sched_sleep(td, pri);
528 thread_lock_set(td, &sc->sc_lock);
529 TD_SET_SLEEPING(td);
530 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
531 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
532 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
533 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
534 }
535
536 /*
537 * Check to see if we timed out.
538 */
539 static int
540 sleepq_check_timeout(void)
541 {
542 struct thread *td;
543
544 td = curthread;
545 THREAD_LOCK_ASSERT(td, MA_OWNED);
546
547 /*
548 * If TDF_TIMEOUT is set, we timed out.
549 */
550 if (td->td_flags & TDF_TIMEOUT) {
551 td->td_flags &= ~TDF_TIMEOUT;
552 return (EWOULDBLOCK);
553 }
554
555 /*
556 * If TDF_TIMOFAIL is set, the timeout ran after we had
557 * already been woken up.
558 */
559 if (td->td_flags & TDF_TIMOFAIL)
560 td->td_flags &= ~TDF_TIMOFAIL;
561
562 /*
563 * If callout_stop() fails, then the timeout is running on
564 * another CPU, so synchronize with it to avoid having it
565 * accidentally wake up a subsequent sleep.
566 */
567 else if (callout_stop(&td->td_slpcallout) == 0) {
568 td->td_flags |= TDF_TIMEOUT;
569 TD_SET_SLEEPING(td);
570 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
571 }
572 return (0);
573 }
574
575 /*
576 * Check to see if we were awoken by a signal.
577 */
578 static int
579 sleepq_check_signals(void)
580 {
581 struct thread *td;
582
583 td = curthread;
584 THREAD_LOCK_ASSERT(td, MA_OWNED);
585
586 /* We are no longer in an interruptible sleep. */
587 if (td->td_flags & TDF_SINTR)
588 td->td_flags &= ~(TDF_SINTR | TDF_SBDRY);
589
590 if (td->td_flags & TDF_SLEEPABORT) {
591 td->td_flags &= ~TDF_SLEEPABORT;
592 return (td->td_intrval);
593 }
594
595 return (0);
596 }
597
598 /*
599 * Block the current thread until it is awakened from its sleep queue.
600 */
601 void
602 sleepq_wait(void *wchan, int pri)
603 {
604 struct thread *td;
605
606 td = curthread;
607 MPASS(!(td->td_flags & TDF_SINTR));
608 thread_lock(td);
609 sleepq_switch(wchan, pri);
610 thread_unlock(td);
611 }
612
613 /*
614 * Block the current thread until it is awakened from its sleep queue
615 * or it is interrupted by a signal.
616 */
617 int
618 sleepq_wait_sig(void *wchan, int pri)
619 {
620 int rcatch;
621 int rval;
622
623 rcatch = sleepq_catch_signals(wchan, pri);
624 rval = sleepq_check_signals();
625 thread_unlock(curthread);
626 if (rcatch)
627 return (rcatch);
628 return (rval);
629 }
630
631 /*
632 * Block the current thread until it is awakened from its sleep queue
633 * or it times out while waiting.
634 */
635 int
636 sleepq_timedwait(void *wchan, int pri)
637 {
638 struct thread *td;
639 int rval;
640
641 td = curthread;
642 MPASS(!(td->td_flags & TDF_SINTR));
643 thread_lock(td);
644 sleepq_switch(wchan, pri);
645 rval = sleepq_check_timeout();
646 thread_unlock(td);
647
648 return (rval);
649 }
650
651 /*
652 * Block the current thread until it is awakened from its sleep queue,
653 * it is interrupted by a signal, or it times out waiting to be awakened.
654 */
655 int
656 sleepq_timedwait_sig(void *wchan, int pri)
657 {
658 int rcatch, rvalt, rvals;
659
660 rcatch = sleepq_catch_signals(wchan, pri);
661 rvalt = sleepq_check_timeout();
662 rvals = sleepq_check_signals();
663 thread_unlock(curthread);
664 if (rcatch)
665 return (rcatch);
666 if (rvals)
667 return (rvals);
668 return (rvalt);
669 }
670
671 /*
672 * Returns the type of sleepqueue given a waitchannel.
673 */
674 int
675 sleepq_type(void *wchan)
676 {
677 struct sleepqueue *sq;
678 int type;
679
680 MPASS(wchan != NULL);
681
682 sleepq_lock(wchan);
683 sq = sleepq_lookup(wchan);
684 if (sq == NULL) {
685 sleepq_release(wchan);
686 return (-1);
687 }
688 type = sq->sq_type;
689 sleepq_release(wchan);
690 return (type);
691 }
692
693 /*
694 * Removes a thread from a sleep queue and makes it
695 * runnable.
696 */
697 static int
698 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
699 {
700 struct sleepqueue_chain *sc;
701
702 MPASS(td != NULL);
703 MPASS(sq->sq_wchan != NULL);
704 MPASS(td->td_wchan == sq->sq_wchan);
705 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
706 THREAD_LOCK_ASSERT(td, MA_OWNED);
707 sc = SC_LOOKUP(sq->sq_wchan);
708 mtx_assert(&sc->sc_lock, MA_OWNED);
709
710 /* Remove the thread from the queue. */
711 sq->sq_blockedcnt[td->td_sqqueue]--;
712 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
713
714 /*
715 * Get a sleep queue for this thread. If this is the last waiter,
716 * use the queue itself and take it out of the chain, otherwise,
717 * remove a queue from the free list.
718 */
719 if (LIST_EMPTY(&sq->sq_free)) {
720 td->td_sleepqueue = sq;
721 #ifdef INVARIANTS
722 sq->sq_wchan = NULL;
723 #endif
724 #ifdef SLEEPQUEUE_PROFILING
725 sc->sc_depth--;
726 #endif
727 } else
728 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
729 LIST_REMOVE(td->td_sleepqueue, sq_hash);
730
731 td->td_wmesg = NULL;
732 td->td_wchan = NULL;
733 td->td_flags &= ~(TDF_SINTR | TDF_SBDRY);
734
735 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
736 (void *)td, (long)td->td_proc->p_pid, td->td_name);
737
738 /* Adjust priority if requested. */
739 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
740 if (pri != 0 && td->td_priority > pri)
741 sched_prio(td, pri);
742
743 /*
744 * Note that thread td might not be sleeping if it is running
745 * sleepq_catch_signals() on another CPU or is blocked on its
746 * proc lock to check signals. There's no need to mark the
747 * thread runnable in that case.
748 */
749 if (TD_IS_SLEEPING(td)) {
750 TD_CLR_SLEEPING(td);
751 return (setrunnable(td));
752 }
753 return (0);
754 }
755
756 #ifdef INVARIANTS
757 /*
758 * UMA zone item deallocator.
759 */
760 static void
761 sleepq_dtor(void *mem, int size, void *arg)
762 {
763 struct sleepqueue *sq;
764 int i;
765
766 sq = mem;
767 for (i = 0; i < NR_SLEEPQS; i++) {
768 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
769 MPASS(sq->sq_blockedcnt[i] == 0);
770 }
771 }
772 #endif
773
774 /*
775 * UMA zone item initializer.
776 */
777 static int
778 sleepq_init(void *mem, int size, int flags)
779 {
780 struct sleepqueue *sq;
781 int i;
782
783 bzero(mem, size);
784 sq = mem;
785 for (i = 0; i < NR_SLEEPQS; i++) {
786 TAILQ_INIT(&sq->sq_blocked[i]);
787 sq->sq_blockedcnt[i] = 0;
788 }
789 LIST_INIT(&sq->sq_free);
790 return (0);
791 }
792
793 /*
794 * Find the highest priority thread sleeping on a wait channel and resume it.
795 */
796 int
797 sleepq_signal(void *wchan, int flags, int pri, int queue)
798 {
799 struct sleepqueue *sq;
800 struct thread *td, *besttd;
801 int wakeup_swapper;
802
803 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
804 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
805 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
806 sq = sleepq_lookup(wchan);
807 if (sq == NULL)
808 return (0);
809 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
810 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
811
812 /*
813 * Find the highest priority thread on the queue. If there is a
814 * tie, use the thread that first appears in the queue as it has
815 * been sleeping the longest since threads are always added to
816 * the tail of sleep queues.
817 */
818 besttd = NULL;
819 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
820 if (besttd == NULL || td->td_priority < besttd->td_priority)
821 besttd = td;
822 }
823 MPASS(besttd != NULL);
824 thread_lock(besttd);
825 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
826 thread_unlock(besttd);
827 return (wakeup_swapper);
828 }
829
830 /*
831 * Resume all threads sleeping on a specified wait channel.
832 */
833 int
834 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
835 {
836 struct sleepqueue *sq;
837 struct thread *td, *tdn;
838 int wakeup_swapper;
839
840 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
841 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
842 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
843 sq = sleepq_lookup(wchan);
844 if (sq == NULL)
845 return (0);
846 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
847 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
848
849 /* Resume all blocked threads on the sleep queue. */
850 wakeup_swapper = 0;
851 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
852 thread_lock(td);
853 if (sleepq_resume_thread(sq, td, pri))
854 wakeup_swapper = 1;
855 thread_unlock(td);
856 }
857 return (wakeup_swapper);
858 }
859
860 /*
861 * Time sleeping threads out. When the timeout expires, the thread is
862 * removed from the sleep queue and made runnable if it is still asleep.
863 */
864 static void
865 sleepq_timeout(void *arg)
866 {
867 struct sleepqueue_chain *sc;
868 struct sleepqueue *sq;
869 struct thread *td;
870 void *wchan;
871 int wakeup_swapper;
872
873 td = arg;
874 wakeup_swapper = 0;
875 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
876 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
877
878 /*
879 * First, see if the thread is asleep and get the wait channel if
880 * it is.
881 */
882 thread_lock(td);
883 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
884 wchan = td->td_wchan;
885 sc = SC_LOOKUP(wchan);
886 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
887 sq = sleepq_lookup(wchan);
888 MPASS(sq != NULL);
889 td->td_flags |= TDF_TIMEOUT;
890 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
891 thread_unlock(td);
892 if (wakeup_swapper)
893 kick_proc0();
894 return;
895 }
896
897 /*
898 * If the thread is on the SLEEPQ but isn't sleeping yet, it
899 * can either be on another CPU in between sleepq_add() and
900 * one of the sleepq_*wait*() routines or it can be in
901 * sleepq_catch_signals().
902 */
903 if (TD_ON_SLEEPQ(td)) {
904 td->td_flags |= TDF_TIMEOUT;
905 thread_unlock(td);
906 return;
907 }
908
909 /*
910 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
911 * then the other thread has already yielded to us, so clear
912 * the flag and resume it. If TDF_TIMEOUT is not set, then the
913 * we know that the other thread is not on a sleep queue, but it
914 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
915 * to let it know that the timeout has already run and doesn't
916 * need to be canceled.
917 */
918 if (td->td_flags & TDF_TIMEOUT) {
919 MPASS(TD_IS_SLEEPING(td));
920 td->td_flags &= ~TDF_TIMEOUT;
921 TD_CLR_SLEEPING(td);
922 wakeup_swapper = setrunnable(td);
923 } else
924 td->td_flags |= TDF_TIMOFAIL;
925 thread_unlock(td);
926 if (wakeup_swapper)
927 kick_proc0();
928 }
929
930 /*
931 * Resumes a specific thread from the sleep queue associated with a specific
932 * wait channel if it is on that queue.
933 */
934 void
935 sleepq_remove(struct thread *td, void *wchan)
936 {
937 struct sleepqueue *sq;
938 int wakeup_swapper;
939
940 /*
941 * Look up the sleep queue for this wait channel, then re-check
942 * that the thread is asleep on that channel, if it is not, then
943 * bail.
944 */
945 MPASS(wchan != NULL);
946 sleepq_lock(wchan);
947 sq = sleepq_lookup(wchan);
948 /*
949 * We can not lock the thread here as it may be sleeping on a
950 * different sleepq. However, holding the sleepq lock for this
951 * wchan can guarantee that we do not miss a wakeup for this
952 * channel. The asserts below will catch any false positives.
953 */
954 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
955 sleepq_release(wchan);
956 return;
957 }
958 /* Thread is asleep on sleep queue sq, so wake it up. */
959 thread_lock(td);
960 MPASS(sq != NULL);
961 MPASS(td->td_wchan == wchan);
962 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
963 thread_unlock(td);
964 sleepq_release(wchan);
965 if (wakeup_swapper)
966 kick_proc0();
967 }
968
969 /*
970 * Abort a thread as if an interrupt had occurred. Only abort
971 * interruptible waits (unfortunately it isn't safe to abort others).
972 */
973 int
974 sleepq_abort(struct thread *td, int intrval)
975 {
976 struct sleepqueue *sq;
977 void *wchan;
978
979 THREAD_LOCK_ASSERT(td, MA_OWNED);
980 MPASS(TD_ON_SLEEPQ(td));
981 MPASS(td->td_flags & TDF_SINTR);
982 MPASS(intrval == EINTR || intrval == ERESTART);
983
984 /*
985 * If the TDF_TIMEOUT flag is set, just leave. A
986 * timeout is scheduled anyhow.
987 */
988 if (td->td_flags & TDF_TIMEOUT)
989 return (0);
990
991 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
992 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
993 td->td_intrval = intrval;
994 td->td_flags |= TDF_SLEEPABORT;
995 /*
996 * If the thread has not slept yet it will find the signal in
997 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
998 * we have to do it here.
999 */
1000 if (!TD_IS_SLEEPING(td))
1001 return (0);
1002 wchan = td->td_wchan;
1003 MPASS(wchan != NULL);
1004 sq = sleepq_lookup(wchan);
1005 MPASS(sq != NULL);
1006
1007 /* Thread is asleep on sleep queue sq, so wake it up. */
1008 return (sleepq_resume_thread(sq, td, 0));
1009 }
1010
1011 #ifdef SLEEPQUEUE_PROFILING
1012 #define SLEEPQ_PROF_LOCATIONS 1024
1013 #define SLEEPQ_SBUFSIZE (40 * 512)
1014 struct sleepq_prof {
1015 LIST_ENTRY(sleepq_prof) sp_link;
1016 const char *sp_wmesg;
1017 long sp_count;
1018 };
1019
1020 LIST_HEAD(sqphead, sleepq_prof);
1021
1022 struct sqphead sleepq_prof_free;
1023 struct sqphead sleepq_hash[SC_TABLESIZE];
1024 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1025 static struct mtx sleepq_prof_lock;
1026 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1027
1028 static void
1029 sleepq_profile(const char *wmesg)
1030 {
1031 struct sleepq_prof *sp;
1032
1033 mtx_lock_spin(&sleepq_prof_lock);
1034 if (prof_enabled == 0)
1035 goto unlock;
1036 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1037 if (sp->sp_wmesg == wmesg)
1038 goto done;
1039 sp = LIST_FIRST(&sleepq_prof_free);
1040 if (sp == NULL)
1041 goto unlock;
1042 sp->sp_wmesg = wmesg;
1043 LIST_REMOVE(sp, sp_link);
1044 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1045 done:
1046 sp->sp_count++;
1047 unlock:
1048 mtx_unlock_spin(&sleepq_prof_lock);
1049 return;
1050 }
1051
1052 static void
1053 sleepq_prof_reset(void)
1054 {
1055 struct sleepq_prof *sp;
1056 int enabled;
1057 int i;
1058
1059 mtx_lock_spin(&sleepq_prof_lock);
1060 enabled = prof_enabled;
1061 prof_enabled = 0;
1062 for (i = 0; i < SC_TABLESIZE; i++)
1063 LIST_INIT(&sleepq_hash[i]);
1064 LIST_INIT(&sleepq_prof_free);
1065 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1066 sp = &sleepq_profent[i];
1067 sp->sp_wmesg = NULL;
1068 sp->sp_count = 0;
1069 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1070 }
1071 prof_enabled = enabled;
1072 mtx_unlock_spin(&sleepq_prof_lock);
1073 }
1074
1075 static int
1076 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1077 {
1078 int error, v;
1079
1080 v = prof_enabled;
1081 error = sysctl_handle_int(oidp, &v, v, req);
1082 if (error)
1083 return (error);
1084 if (req->newptr == NULL)
1085 return (error);
1086 if (v == prof_enabled)
1087 return (0);
1088 if (v == 1)
1089 sleepq_prof_reset();
1090 mtx_lock_spin(&sleepq_prof_lock);
1091 prof_enabled = !!v;
1092 mtx_unlock_spin(&sleepq_prof_lock);
1093
1094 return (0);
1095 }
1096
1097 static int
1098 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1099 {
1100 int error, v;
1101
1102 v = 0;
1103 error = sysctl_handle_int(oidp, &v, 0, req);
1104 if (error)
1105 return (error);
1106 if (req->newptr == NULL)
1107 return (error);
1108 if (v == 0)
1109 return (0);
1110 sleepq_prof_reset();
1111
1112 return (0);
1113 }
1114
1115 static int
1116 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1117 {
1118 static int multiplier = 1;
1119 struct sleepq_prof *sp;
1120 struct sbuf *sb;
1121 int enabled;
1122 int error;
1123 int i;
1124
1125 retry_sbufops:
1126 sb = sbuf_new(NULL, NULL, SLEEPQ_SBUFSIZE * multiplier, SBUF_FIXEDLEN);
1127 sbuf_printf(sb, "\nwmesg\tcount\n");
1128 enabled = prof_enabled;
1129 mtx_lock_spin(&sleepq_prof_lock);
1130 prof_enabled = 0;
1131 mtx_unlock_spin(&sleepq_prof_lock);
1132 for (i = 0; i < SC_TABLESIZE; i++) {
1133 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1134 sbuf_printf(sb, "%s\t%ld\n",
1135 sp->sp_wmesg, sp->sp_count);
1136 if (sbuf_overflowed(sb)) {
1137 sbuf_delete(sb);
1138 multiplier++;
1139 goto retry_sbufops;
1140 }
1141 }
1142 }
1143 mtx_lock_spin(&sleepq_prof_lock);
1144 prof_enabled = enabled;
1145 mtx_unlock_spin(&sleepq_prof_lock);
1146
1147 sbuf_finish(sb);
1148 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1149 sbuf_delete(sb);
1150 return (error);
1151 }
1152
1153 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1154 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1155 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1156 NULL, 0, reset_sleepq_prof_stats, "I",
1157 "Reset sleepqueue profiling statistics");
1158 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1159 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1160 #endif
1161
1162 #ifdef DDB
1163 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1164 {
1165 struct sleepqueue_chain *sc;
1166 struct sleepqueue *sq;
1167 #ifdef INVARIANTS
1168 struct lock_object *lock;
1169 #endif
1170 struct thread *td;
1171 void *wchan;
1172 int i;
1173
1174 if (!have_addr)
1175 return;
1176
1177 /*
1178 * First, see if there is an active sleep queue for the wait channel
1179 * indicated by the address.
1180 */
1181 wchan = (void *)addr;
1182 sc = SC_LOOKUP(wchan);
1183 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1184 if (sq->sq_wchan == wchan)
1185 goto found;
1186
1187 /*
1188 * Second, see if there is an active sleep queue at the address
1189 * indicated.
1190 */
1191 for (i = 0; i < SC_TABLESIZE; i++)
1192 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1193 if (sq == (struct sleepqueue *)addr)
1194 goto found;
1195 }
1196
1197 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1198 return;
1199 found:
1200 db_printf("Wait channel: %p\n", sq->sq_wchan);
1201 db_printf("Queue type: %d\n", sq->sq_type);
1202 #ifdef INVARIANTS
1203 if (sq->sq_lock) {
1204 lock = sq->sq_lock;
1205 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1206 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1207 }
1208 #endif
1209 db_printf("Blocked threads:\n");
1210 for (i = 0; i < NR_SLEEPQS; i++) {
1211 db_printf("\nQueue[%d]:\n", i);
1212 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1213 db_printf("\tempty\n");
1214 else
1215 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1216 td_slpq) {
1217 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1218 td->td_tid, td->td_proc->p_pid,
1219 td->td_name);
1220 }
1221 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1222 }
1223 }
1224
1225 /* Alias 'show sleepqueue' to 'show sleepq'. */
1226 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1227 #endif
Cache object: db33fe09c6c8d46b557c0cffaf41d893
|