1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62 #include "opt_sleepqueue_profiling.h"
63
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD: releng/6.0/sys/kern/subr_sleepqueue.c 150590 2005-09-26 19:43:37Z jhb $");
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/malloc.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/sysctl.h>
79
80 /*
81 * Constants for the hash table of sleep queue chains. These constants are
82 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
83 * Basically, we ignore the lower 8 bits of the address since most wait
84 * channel pointers are aligned and only look at the next 7 bits for the
85 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
86 */
87 #define SC_TABLESIZE 128 /* Must be power of 2. */
88 #define SC_MASK (SC_TABLESIZE - 1)
89 #define SC_SHIFT 8
90 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
91 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
92
93 /*
94 * There two different lists of sleep queues. Both lists are connected
95 * via the sq_hash entries. The first list is the sleep queue chain list
96 * that a sleep queue is on when it is attached to a wait channel. The
97 * second list is the free list hung off of a sleep queue that is attached
98 * to a wait channel.
99 *
100 * Each sleep queue also contains the wait channel it is attached to, the
101 * list of threads blocked on that wait channel, flags specific to the
102 * wait channel, and the lock used to synchronize with a wait channel.
103 * The flags are used to catch mismatches between the various consumers
104 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
105 * The lock pointer is only used when invariants are enabled for various
106 * debugging checks.
107 *
108 * Locking key:
109 * c - sleep queue chain lock
110 */
111 struct sleepqueue {
112 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */
113 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
114 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
115 void *sq_wchan; /* (c) Wait channel. */
116 #ifdef INVARIANTS
117 int sq_type; /* (c) Queue type. */
118 struct mtx *sq_lock; /* (c) Associated lock. */
119 #endif
120 };
121
122 struct sleepqueue_chain {
123 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
124 struct mtx sc_lock; /* Spin lock for this chain. */
125 #ifdef SLEEPQUEUE_PROFILING
126 u_int sc_depth; /* Length of sc_queues. */
127 u_int sc_max_depth; /* Max length of sc_queues. */
128 #endif
129 };
130
131 #ifdef SLEEPQUEUE_PROFILING
132 u_int sleepq_max_depth;
133 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
134 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
135 "sleepq chain stats");
136 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
137 0, "maxmimum depth achieved of a single chain");
138 #endif
139 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
140
141 static MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142
143 /*
144 * Prototypes for non-exported routines.
145 */
146 static int sleepq_check_timeout(void);
147 static void sleepq_switch(void *wchan);
148 static void sleepq_timeout(void *arg);
149 static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
150
151 /*
152 * Early initialization of sleep queues that is called from the sleepinit()
153 * SYSINIT.
154 */
155 void
156 init_sleepqueues(void)
157 {
158 #ifdef SLEEPQUEUE_PROFILING
159 struct sysctl_oid *chain_oid;
160 char chain_name[10];
161 #endif
162 int i;
163
164 for (i = 0; i < SC_TABLESIZE; i++) {
165 LIST_INIT(&sleepq_chains[i].sc_queues);
166 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
167 MTX_SPIN);
168 #ifdef SLEEPQUEUE_PROFILING
169 snprintf(chain_name, sizeof(chain_name), "%d", i);
170 chain_oid = SYSCTL_ADD_NODE(NULL,
171 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
172 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
173 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
174 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
175 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
176 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
177 NULL);
178 #endif
179 }
180 thread0.td_sleepqueue = sleepq_alloc();
181 }
182
183 /*
184 * Malloc and initialize a new sleep queue for a new thread.
185 */
186 struct sleepqueue *
187 sleepq_alloc(void)
188 {
189 struct sleepqueue *sq;
190
191 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
192 TAILQ_INIT(&sq->sq_blocked);
193 LIST_INIT(&sq->sq_free);
194 return (sq);
195 }
196
197 /*
198 * Free a sleep queue when a thread is destroyed.
199 */
200 void
201 sleepq_free(struct sleepqueue *sq)
202 {
203
204 MPASS(sq != NULL);
205 MPASS(TAILQ_EMPTY(&sq->sq_blocked));
206 free(sq, M_SLEEPQUEUE);
207 }
208
209 /*
210 * Lock the sleep queue chain associated with the specified wait channel.
211 */
212 void
213 sleepq_lock(void *wchan)
214 {
215 struct sleepqueue_chain *sc;
216
217 sc = SC_LOOKUP(wchan);
218 mtx_lock_spin(&sc->sc_lock);
219 }
220
221 /*
222 * Look up the sleep queue associated with a given wait channel in the hash
223 * table locking the associated sleep queue chain. If no queue is found in
224 * the table, NULL is returned.
225 */
226 struct sleepqueue *
227 sleepq_lookup(void *wchan)
228 {
229 struct sleepqueue_chain *sc;
230 struct sleepqueue *sq;
231
232 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
233 sc = SC_LOOKUP(wchan);
234 mtx_assert(&sc->sc_lock, MA_OWNED);
235 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
236 if (sq->sq_wchan == wchan)
237 return (sq);
238 return (NULL);
239 }
240
241 /*
242 * Unlock the sleep queue chain associated with a given wait channel.
243 */
244 void
245 sleepq_release(void *wchan)
246 {
247 struct sleepqueue_chain *sc;
248
249 sc = SC_LOOKUP(wchan);
250 mtx_unlock_spin(&sc->sc_lock);
251 }
252
253 /*
254 * Places the current thread on the sleep queue for the specified wait
255 * channel. If INVARIANTS is enabled, then it associates the passed in
256 * lock with the sleepq to make sure it is held when that sleep queue is
257 * woken up.
258 */
259 void
260 sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags)
261 {
262 struct sleepqueue_chain *sc;
263 struct sleepqueue *sq;
264 struct thread *td;
265
266 td = curthread;
267 sc = SC_LOOKUP(wchan);
268 mtx_assert(&sc->sc_lock, MA_OWNED);
269 MPASS(td->td_sleepqueue != NULL);
270 MPASS(wchan != NULL);
271
272 /* If this thread is not allowed to sleep, die a horrible death. */
273 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
274 ("trying to sleep while sleeping is prohibited"));
275
276 /* Look up the sleep queue associated with the wait channel 'wchan'. */
277 sq = sleepq_lookup(wchan);
278
279 /*
280 * If the wait channel does not already have a sleep queue, use
281 * this thread's sleep queue. Otherwise, insert the current thread
282 * into the sleep queue already in use by this wait channel.
283 */
284 if (sq == NULL) {
285 #ifdef SLEEPQUEUE_PROFILING
286 sc->sc_depth++;
287 if (sc->sc_depth > sc->sc_max_depth) {
288 sc->sc_max_depth = sc->sc_depth;
289 if (sc->sc_max_depth > sleepq_max_depth)
290 sleepq_max_depth = sc->sc_max_depth;
291 }
292 #endif
293 sq = td->td_sleepqueue;
294 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
295 KASSERT(TAILQ_EMPTY(&sq->sq_blocked),
296 ("thread's sleep queue has a non-empty queue"));
297 KASSERT(LIST_EMPTY(&sq->sq_free),
298 ("thread's sleep queue has a non-empty free list"));
299 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
300 sq->sq_wchan = wchan;
301 #ifdef INVARIANTS
302 sq->sq_lock = lock;
303 sq->sq_type = flags & SLEEPQ_TYPE;
304 #endif
305 } else {
306 MPASS(wchan == sq->sq_wchan);
307 MPASS(lock == sq->sq_lock);
308 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
309 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
310 }
311 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
312 td->td_sleepqueue = NULL;
313 mtx_lock_spin(&sched_lock);
314 td->td_wchan = wchan;
315 td->td_wmesg = wmesg;
316 if (flags & SLEEPQ_INTERRUPTIBLE)
317 td->td_flags |= TDF_SINTR;
318 mtx_unlock_spin(&sched_lock);
319 }
320
321 /*
322 * Sets a timeout that will remove the current thread from the specified
323 * sleep queue after timo ticks if the thread has not already been awakened.
324 */
325 void
326 sleepq_set_timeout(void *wchan, int timo)
327 {
328 struct sleepqueue_chain *sc;
329 struct thread *td;
330
331 td = curthread;
332 sc = SC_LOOKUP(wchan);
333 mtx_assert(&sc->sc_lock, MA_OWNED);
334 MPASS(TD_ON_SLEEPQ(td));
335 MPASS(td->td_sleepqueue == NULL);
336 MPASS(wchan != NULL);
337 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
338 }
339
340 /*
341 * Marks the pending sleep of the current thread as interruptible and
342 * makes an initial check for pending signals before putting a thread
343 * to sleep.
344 */
345 int
346 sleepq_catch_signals(void *wchan)
347 {
348 struct sleepqueue_chain *sc;
349 struct sleepqueue *sq;
350 struct thread *td;
351 struct proc *p;
352 int sig;
353
354 td = curthread;
355 p = td->td_proc;
356 sc = SC_LOOKUP(wchan);
357 mtx_assert(&sc->sc_lock, MA_OWNED);
358 MPASS(td->td_sleepqueue == NULL);
359 MPASS(wchan != NULL);
360 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
361 (void *)td, (long)p->p_pid, p->p_comm);
362
363 /* Mark thread as being in an interruptible sleep. */
364 MPASS(td->td_flags & TDF_SINTR);
365 MPASS(TD_ON_SLEEPQ(td));
366 sleepq_release(wchan);
367
368 /* See if there are any pending signals for this thread. */
369 PROC_LOCK(p);
370 mtx_lock(&p->p_sigacts->ps_mtx);
371 sig = cursig(td);
372 mtx_unlock(&p->p_sigacts->ps_mtx);
373 if (sig == 0 && thread_suspend_check(1))
374 sig = SIGSTOP;
375 PROC_UNLOCK(p);
376
377 /*
378 * If there were pending signals and this thread is still on
379 * the sleep queue, remove it from the sleep queue. If the
380 * thread was removed from the sleep queue while we were blocked
381 * above, then clear TDF_SINTR before returning.
382 */
383 sleepq_lock(wchan);
384 sq = sleepq_lookup(wchan);
385 mtx_lock_spin(&sched_lock);
386 if (TD_ON_SLEEPQ(td) && sig != 0)
387 sleepq_resume_thread(sq, td, -1);
388 else if (!TD_ON_SLEEPQ(td) && sig == 0)
389 td->td_flags &= ~TDF_SINTR;
390 mtx_unlock_spin(&sched_lock);
391 return (sig);
392 }
393
394 /*
395 * Switches to another thread if we are still asleep on a sleep queue and
396 * drop the lock on the sleep queue chain. Returns with sched_lock held.
397 */
398 static void
399 sleepq_switch(void *wchan)
400 {
401 struct sleepqueue_chain *sc;
402 struct thread *td;
403
404 td = curthread;
405 sc = SC_LOOKUP(wchan);
406 mtx_assert(&sc->sc_lock, MA_OWNED);
407
408 /*
409 * If we have a sleep queue, then we've already been woken up, so
410 * just return.
411 */
412 if (td->td_sleepqueue != NULL) {
413 MPASS(!TD_ON_SLEEPQ(td));
414 mtx_unlock_spin(&sc->sc_lock);
415 mtx_lock_spin(&sched_lock);
416 return;
417 }
418
419 /*
420 * Otherwise, actually go to sleep.
421 */
422 mtx_lock_spin(&sched_lock);
423 mtx_unlock_spin(&sc->sc_lock);
424
425 sched_sleep(td);
426 TD_SET_SLEEPING(td);
427 mi_switch(SW_VOL, NULL);
428 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
429 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
430 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
431 }
432
433 /*
434 * Check to see if we timed out.
435 */
436 static int
437 sleepq_check_timeout(void)
438 {
439 struct thread *td;
440
441 mtx_assert(&sched_lock, MA_OWNED);
442 td = curthread;
443
444 /*
445 * If TDF_TIMEOUT is set, we timed out.
446 */
447 if (td->td_flags & TDF_TIMEOUT) {
448 td->td_flags &= ~TDF_TIMEOUT;
449 return (EWOULDBLOCK);
450 }
451
452 /*
453 * If TDF_TIMOFAIL is set, the timeout ran after we had
454 * already been woken up.
455 */
456 if (td->td_flags & TDF_TIMOFAIL)
457 td->td_flags &= ~TDF_TIMOFAIL;
458
459 /*
460 * If callout_stop() fails, then the timeout is running on
461 * another CPU, so synchronize with it to avoid having it
462 * accidentally wake up a subsequent sleep.
463 */
464 else if (callout_stop(&td->td_slpcallout) == 0) {
465 td->td_flags |= TDF_TIMEOUT;
466 TD_SET_SLEEPING(td);
467 mi_switch(SW_INVOL, NULL);
468 }
469 return (0);
470 }
471
472 /*
473 * Check to see if we were awoken by a signal.
474 */
475 static int
476 sleepq_check_signals(void)
477 {
478 struct thread *td;
479
480 mtx_assert(&sched_lock, MA_OWNED);
481 td = curthread;
482
483 /*
484 * If TDF_SINTR is clear, then we were awakened while executing
485 * sleepq_catch_signals().
486 */
487 if (!(td->td_flags & TDF_SINTR))
488 return (0);
489
490 /* We are no longer in an interruptible sleep. */
491 td->td_flags &= ~TDF_SINTR;
492
493 if (td->td_flags & TDF_INTERRUPT)
494 return (td->td_intrval);
495 return (0);
496 }
497
498 /*
499 * If we were in an interruptible sleep and we weren't interrupted and
500 * didn't timeout, check to see if there are any pending signals and
501 * which return value we should use if so. The return value from an
502 * earlier call to sleepq_catch_signals() should be passed in as the
503 * argument.
504 */
505 int
506 sleepq_calc_signal_retval(int sig)
507 {
508 struct thread *td;
509 struct proc *p;
510 int rval;
511
512 td = curthread;
513 p = td->td_proc;
514 PROC_LOCK(p);
515 mtx_lock(&p->p_sigacts->ps_mtx);
516 /* XXX: Should we always be calling cursig()? */
517 if (sig == 0)
518 sig = cursig(td);
519 if (sig != 0) {
520 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
521 rval = EINTR;
522 else
523 rval = ERESTART;
524 } else
525 rval = 0;
526 mtx_unlock(&p->p_sigacts->ps_mtx);
527 PROC_UNLOCK(p);
528 return (rval);
529 }
530
531 /*
532 * Block the current thread until it is awakened from its sleep queue.
533 */
534 void
535 sleepq_wait(void *wchan)
536 {
537
538 MPASS(!(curthread->td_flags & TDF_SINTR));
539 sleepq_switch(wchan);
540 mtx_unlock_spin(&sched_lock);
541 }
542
543 /*
544 * Block the current thread until it is awakened from its sleep queue
545 * or it is interrupted by a signal.
546 */
547 int
548 sleepq_wait_sig(void *wchan)
549 {
550 int rval;
551
552 sleepq_switch(wchan);
553 rval = sleepq_check_signals();
554 mtx_unlock_spin(&sched_lock);
555 return (rval);
556 }
557
558 /*
559 * Block the current thread until it is awakened from its sleep queue
560 * or it times out while waiting.
561 */
562 int
563 sleepq_timedwait(void *wchan)
564 {
565 int rval;
566
567 MPASS(!(curthread->td_flags & TDF_SINTR));
568 sleepq_switch(wchan);
569 rval = sleepq_check_timeout();
570 mtx_unlock_spin(&sched_lock);
571 return (rval);
572 }
573
574 /*
575 * Block the current thread until it is awakened from its sleep queue,
576 * it is interrupted by a signal, or it times out waiting to be awakened.
577 */
578 int
579 sleepq_timedwait_sig(void *wchan, int signal_caught)
580 {
581 int rvalt, rvals;
582
583 sleepq_switch(wchan);
584 rvalt = sleepq_check_timeout();
585 rvals = sleepq_check_signals();
586 mtx_unlock_spin(&sched_lock);
587 if (signal_caught || rvalt == 0)
588 return (rvals);
589 else
590 return (rvalt);
591 }
592
593 /*
594 * Removes a thread from a sleep queue and makes it
595 * runnable.
596 */
597 static void
598 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
599 {
600 struct sleepqueue_chain *sc;
601
602 MPASS(td != NULL);
603 MPASS(sq->sq_wchan != NULL);
604 MPASS(td->td_wchan == sq->sq_wchan);
605 sc = SC_LOOKUP(sq->sq_wchan);
606 mtx_assert(&sc->sc_lock, MA_OWNED);
607 mtx_assert(&sched_lock, MA_OWNED);
608
609 /* Remove the thread from the queue. */
610 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
611
612 /*
613 * Get a sleep queue for this thread. If this is the last waiter,
614 * use the queue itself and take it out of the chain, otherwise,
615 * remove a queue from the free list.
616 */
617 if (LIST_EMPTY(&sq->sq_free)) {
618 td->td_sleepqueue = sq;
619 #ifdef INVARIANTS
620 sq->sq_wchan = NULL;
621 #endif
622 #ifdef SLEEPQUEUE_PROFILING
623 sc->sc_depth--;
624 #endif
625 } else
626 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
627 LIST_REMOVE(td->td_sleepqueue, sq_hash);
628
629 td->td_wmesg = NULL;
630 td->td_wchan = NULL;
631
632 /*
633 * Note that thread td might not be sleeping if it is running
634 * sleepq_catch_signals() on another CPU or is blocked on
635 * its proc lock to check signals. It doesn't hurt to clear
636 * the sleeping flag if it isn't set though, so we just always
637 * do it. However, we can't assert that it is set.
638 */
639 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
640 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
641 TD_CLR_SLEEPING(td);
642
643 /* Adjust priority if requested. */
644 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
645 if (pri != -1 && td->td_priority > pri)
646 sched_prio(td, pri);
647 setrunnable(td);
648 }
649
650 /*
651 * Find the highest priority thread sleeping on a wait channel and resume it.
652 */
653 void
654 sleepq_signal(void *wchan, int flags, int pri)
655 {
656 struct sleepqueue *sq;
657 struct thread *td, *besttd;
658
659 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
660 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
661 sq = sleepq_lookup(wchan);
662 if (sq == NULL) {
663 sleepq_release(wchan);
664 return;
665 }
666 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
667 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
668
669 /*
670 * Find the highest priority thread on the queue. If there is a
671 * tie, use the thread that first appears in the queue as it has
672 * been sleeping the longest since threads are always added to
673 * the tail of sleep queues.
674 */
675 besttd = NULL;
676 TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
677 if (besttd == NULL || td->td_priority < besttd->td_priority)
678 besttd = td;
679 }
680 MPASS(besttd != NULL);
681 mtx_lock_spin(&sched_lock);
682 sleepq_resume_thread(sq, besttd, pri);
683 mtx_unlock_spin(&sched_lock);
684 sleepq_release(wchan);
685 }
686
687 /*
688 * Resume all threads sleeping on a specified wait channel.
689 */
690 void
691 sleepq_broadcast(void *wchan, int flags, int pri)
692 {
693 struct sleepqueue *sq;
694
695 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
696 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
697 sq = sleepq_lookup(wchan);
698 if (sq == NULL) {
699 sleepq_release(wchan);
700 return;
701 }
702 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
703 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
704
705 /* Resume all blocked threads on the sleep queue. */
706 mtx_lock_spin(&sched_lock);
707 while (!TAILQ_EMPTY(&sq->sq_blocked))
708 sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri);
709 mtx_unlock_spin(&sched_lock);
710 sleepq_release(wchan);
711 }
712
713 /*
714 * Time sleeping threads out. When the timeout expires, the thread is
715 * removed from the sleep queue and made runnable if it is still asleep.
716 */
717 static void
718 sleepq_timeout(void *arg)
719 {
720 struct sleepqueue *sq;
721 struct thread *td;
722 void *wchan;
723
724 td = arg;
725 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
726 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
727
728 /*
729 * First, see if the thread is asleep and get the wait channel if
730 * it is.
731 */
732 mtx_lock_spin(&sched_lock);
733 if (TD_ON_SLEEPQ(td)) {
734 wchan = td->td_wchan;
735 mtx_unlock_spin(&sched_lock);
736 sleepq_lock(wchan);
737 sq = sleepq_lookup(wchan);
738 mtx_lock_spin(&sched_lock);
739 } else {
740 wchan = NULL;
741 sq = NULL;
742 }
743
744 /*
745 * At this point, if the thread is still on the sleep queue,
746 * we have that sleep queue locked as it cannot migrate sleep
747 * queues while we dropped sched_lock. If it had resumed and
748 * was on another CPU while the lock was dropped, it would have
749 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
750 * call to callout_stop() to stop this routine would have failed
751 * meaning that it would have already set TDF_TIMEOUT to
752 * synchronize with this function.
753 */
754 if (TD_ON_SLEEPQ(td)) {
755 MPASS(td->td_wchan == wchan);
756 MPASS(sq != NULL);
757 td->td_flags |= TDF_TIMEOUT;
758 sleepq_resume_thread(sq, td, -1);
759 mtx_unlock_spin(&sched_lock);
760 sleepq_release(wchan);
761 return;
762 } else if (wchan != NULL)
763 sleepq_release(wchan);
764
765 /*
766 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
767 * then the other thread has already yielded to us, so clear
768 * the flag and resume it. If TDF_TIMEOUT is not set, then the
769 * we know that the other thread is not on a sleep queue, but it
770 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
771 * to let it know that the timeout has already run and doesn't
772 * need to be canceled.
773 */
774 if (td->td_flags & TDF_TIMEOUT) {
775 MPASS(TD_IS_SLEEPING(td));
776 td->td_flags &= ~TDF_TIMEOUT;
777 TD_CLR_SLEEPING(td);
778 setrunnable(td);
779 } else
780 td->td_flags |= TDF_TIMOFAIL;
781 mtx_unlock_spin(&sched_lock);
782 }
783
784 /*
785 * Resumes a specific thread from the sleep queue associated with a specific
786 * wait channel if it is on that queue.
787 */
788 void
789 sleepq_remove(struct thread *td, void *wchan)
790 {
791 struct sleepqueue *sq;
792
793 /*
794 * Look up the sleep queue for this wait channel, then re-check
795 * that the thread is asleep on that channel, if it is not, then
796 * bail.
797 */
798 MPASS(wchan != NULL);
799 sleepq_lock(wchan);
800 sq = sleepq_lookup(wchan);
801 mtx_lock_spin(&sched_lock);
802 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
803 mtx_unlock_spin(&sched_lock);
804 sleepq_release(wchan);
805 return;
806 }
807 MPASS(sq != NULL);
808
809 /* Thread is asleep on sleep queue sq, so wake it up. */
810 sleepq_resume_thread(sq, td, -1);
811 sleepq_release(wchan);
812 mtx_unlock_spin(&sched_lock);
813 }
814
815 /*
816 * Abort a thread as if an interrupt had occurred. Only abort
817 * interruptible waits (unfortunately it isn't safe to abort others).
818 *
819 * XXX: What in the world does the comment below mean?
820 * Also, whatever the signal code does...
821 */
822 void
823 sleepq_abort(struct thread *td)
824 {
825 void *wchan;
826
827 mtx_assert(&sched_lock, MA_OWNED);
828 MPASS(TD_ON_SLEEPQ(td));
829 MPASS(td->td_flags & TDF_SINTR);
830
831 /*
832 * If the TDF_TIMEOUT flag is set, just leave. A
833 * timeout is scheduled anyhow.
834 */
835 if (td->td_flags & TDF_TIMEOUT)
836 return;
837
838 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
839 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
840 wchan = td->td_wchan;
841 mtx_unlock_spin(&sched_lock);
842 sleepq_remove(td, wchan);
843 mtx_lock_spin(&sched_lock);
844 }
Cache object: 3c07059748c0311136bb5bd2c07c0185
|