1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62 #include "opt_sleepqueue_profiling.h"
63
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD: src/sys/kern/subr_sleepqueue.c,v 1.10.2.5 2005/04/26 18:01:31 jhb Exp $");
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/malloc.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/sysctl.h>
79
80 /*
81 * Constants for the hash table of sleep queue chains. These constants are
82 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
83 * Basically, we ignore the lower 8 bits of the address since most wait
84 * channel pointers are aligned and only look at the next 7 bits for the
85 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
86 */
87 #define SC_TABLESIZE 128 /* Must be power of 2. */
88 #define SC_MASK (SC_TABLESIZE - 1)
89 #define SC_SHIFT 8
90 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
91 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
92
93 /*
94 * There two different lists of sleep queues. Both lists are connected
95 * via the sq_hash entries. The first list is the sleep queue chain list
96 * that a sleep queue is on when it is attached to a wait channel. The
97 * second list is the free list hung off of a sleep queue that is attached
98 * to a wait channel.
99 *
100 * Each sleep queue also contains the wait channel it is attached to, the
101 * list of threads blocked on that wait channel, flags specific to the
102 * wait channel, and the lock used to synchronize with a wait channel.
103 * The flags are used to catch mismatches between the various consumers
104 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
105 * The lock pointer is only used when invariants are enabled for various
106 * debugging checks.
107 *
108 * Locking key:
109 * c - sleep queue chain lock
110 */
111 struct sleepqueue {
112 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */
113 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
114 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
115 void *sq_wchan; /* (c) Wait channel. */
116 int sq_type; /* (c) Queue type. */
117 #ifdef INVARIANTS
118 struct mtx *sq_lock; /* (c) Associated lock. */
119 #endif
120 };
121
122 struct sleepqueue_chain {
123 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
124 struct mtx sc_lock; /* Spin lock for this chain. */
125 #ifdef SLEEPQUEUE_PROFILING
126 u_int sc_depth; /* Length of sc_queues. */
127 u_int sc_max_depth; /* Max length of sc_queues. */
128 #endif
129 };
130
131 #ifdef SLEEPQUEUE_PROFILING
132 u_int sleepq_max_depth;
133 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
134 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
135 "sleepq chain stats");
136 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
137 0, "maxmimum depth achieved of a single chain");
138 #endif
139 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
140
141 MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142
143 /*
144 * Prototypes for non-exported routines.
145 */
146 static int sleepq_check_timeout(void);
147 static void sleepq_switch(void *wchan);
148 static void sleepq_timeout(void *arg);
149 static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
150
151 /*
152 * Early initialization of sleep queues that is called from the sleepinit()
153 * SYSINIT.
154 */
155 void
156 init_sleepqueues(void)
157 {
158 #ifdef SLEEPQUEUE_PROFILING
159 struct sysctl_oid *chain_oid;
160 char chain_name[10];
161 #endif
162 int i;
163
164 for (i = 0; i < SC_TABLESIZE; i++) {
165 LIST_INIT(&sleepq_chains[i].sc_queues);
166 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
167 MTX_SPIN);
168 #ifdef SLEEPQUEUE_PROFILING
169 snprintf(chain_name, sizeof(chain_name), "%d", i);
170 chain_oid = SYSCTL_ADD_NODE(NULL,
171 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
172 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
173 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
174 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
175 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
176 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
177 NULL);
178 #endif
179 }
180 thread0.td_sleepqueue = sleepq_alloc();
181 }
182
183 /*
184 * Malloc and initialize a new sleep queue for a new thread.
185 */
186 struct sleepqueue *
187 sleepq_alloc(void)
188 {
189 struct sleepqueue *sq;
190
191 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
192 TAILQ_INIT(&sq->sq_blocked);
193 LIST_INIT(&sq->sq_free);
194 return (sq);
195 }
196
197 /*
198 * Free a sleep queue when a thread is destroyed.
199 */
200 void
201 sleepq_free(struct sleepqueue *sq)
202 {
203
204 MPASS(sq != NULL);
205 MPASS(TAILQ_EMPTY(&sq->sq_blocked));
206 free(sq, M_SLEEPQUEUE);
207 }
208
209 /*
210 * Look up the sleep queue associated with a given wait channel in the hash
211 * table locking the associated sleep queue chain. Return holdind the sleep
212 * queue chain lock. If no queue is found in the table, NULL is returned.
213 */
214 struct sleepqueue *
215 sleepq_lookup(void *wchan)
216 {
217 struct sleepqueue_chain *sc;
218 struct sleepqueue *sq;
219
220 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
221 sc = SC_LOOKUP(wchan);
222 mtx_lock_spin(&sc->sc_lock);
223 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
224 if (sq->sq_wchan == wchan)
225 return (sq);
226 return (NULL);
227 }
228
229 /*
230 * Unlock the sleep queue chain associated with a given wait channel.
231 */
232 void
233 sleepq_release(void *wchan)
234 {
235 struct sleepqueue_chain *sc;
236
237 sc = SC_LOOKUP(wchan);
238 mtx_unlock_spin(&sc->sc_lock);
239 }
240
241 /*
242 * Places the current thread on the sleep queue for the specified wait
243 * channel. If INVARIANTS is enabled, then it associates the passed in
244 * lock with the sleepq to make sure it is held when that sleep queue is
245 * woken up.
246 */
247 void
248 sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
249 const char *wmesg, int flags)
250 {
251 struct sleepqueue_chain *sc;
252 struct thread *td;
253
254 td = curthread;
255 sc = SC_LOOKUP(wchan);
256 mtx_assert(&sc->sc_lock, MA_OWNED);
257 MPASS(td->td_sleepqueue != NULL);
258 MPASS(wchan != NULL);
259
260 /* If the passed in sleep queue is NULL, use this thread's queue. */
261 if (sq == NULL) {
262 #ifdef SLEEPQUEUE_PROFILING
263 sc->sc_depth++;
264 if (sc->sc_depth > sc->sc_max_depth) {
265 sc->sc_max_depth = sc->sc_depth;
266 if (sc->sc_max_depth > sleepq_max_depth)
267 sleepq_max_depth = sc->sc_max_depth;
268 }
269 #endif
270 sq = td->td_sleepqueue;
271 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
272 KASSERT(TAILQ_EMPTY(&sq->sq_blocked),
273 ("thread's sleep queue has a non-empty queue"));
274 KASSERT(LIST_EMPTY(&sq->sq_free),
275 ("thread's sleep queue has a non-empty free list"));
276 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
277 sq->sq_wchan = wchan;
278 #ifdef INVARIANTS
279 sq->sq_lock = lock;
280 #endif
281 sq->sq_type = flags & SLEEPQ_TYPE;
282 } else {
283 MPASS(wchan == sq->sq_wchan);
284 MPASS(lock == sq->sq_lock);
285 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
286 }
287 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
288 td->td_sleepqueue = NULL;
289 mtx_lock_spin(&sched_lock);
290 td->td_wchan = wchan;
291 td->td_wmesg = wmesg;
292 if (flags & SLEEPQ_INTERRUPTIBLE)
293 td->td_flags |= TDF_SINTR;
294 mtx_unlock_spin(&sched_lock);
295 }
296
297 /*
298 * Sets a timeout that will remove the current thread from the specified
299 * sleep queue after timo ticks if the thread has not already been awakened.
300 */
301 void
302 sleepq_set_timeout(void *wchan, int timo)
303 {
304 struct sleepqueue_chain *sc;
305 struct thread *td;
306
307 td = curthread;
308 sc = SC_LOOKUP(wchan);
309 mtx_assert(&sc->sc_lock, MA_OWNED);
310 MPASS(TD_ON_SLEEPQ(td));
311 MPASS(td->td_sleepqueue == NULL);
312 MPASS(wchan != NULL);
313 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
314 }
315
316 /*
317 * Marks the pending sleep of the current thread as interruptible and
318 * makes an initial check for pending signals before putting a thread
319 * to sleep.
320 */
321 int
322 sleepq_catch_signals(void *wchan)
323 {
324 struct sleepqueue_chain *sc;
325 struct sleepqueue *sq;
326 struct thread *td;
327 struct proc *p;
328 int do_upcall;
329 int sig;
330
331 do_upcall = 0;
332 td = curthread;
333 p = td->td_proc;
334 sc = SC_LOOKUP(wchan);
335 mtx_assert(&sc->sc_lock, MA_OWNED);
336 MPASS(td->td_sleepqueue == NULL);
337 MPASS(wchan != NULL);
338 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
339 (void *)td, (long)p->p_pid, p->p_comm);
340
341 /* Mark thread as being in an interruptible sleep. */
342 MPASS(td->td_flags & TDF_SINTR);
343 MPASS(TD_ON_SLEEPQ(td));
344 sleepq_release(wchan);
345
346 /* See if there are any pending signals for this thread. */
347 PROC_LOCK(p);
348 mtx_lock(&p->p_sigacts->ps_mtx);
349 sig = cursig(td);
350 mtx_unlock(&p->p_sigacts->ps_mtx);
351 if (sig == 0 && thread_suspend_check(1))
352 sig = SIGSTOP;
353 else
354 do_upcall = thread_upcall_check(td);
355 PROC_UNLOCK(p);
356
357 /*
358 * If there were pending signals and this thread is still on
359 * the sleep queue, remove it from the sleep queue. If the
360 * thread was removed from the sleep queue while we were blocked
361 * above, then clear TDF_SINTR before returning.
362 */
363 sq = sleepq_lookup(wchan);
364 mtx_lock_spin(&sched_lock);
365 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0))
366 sleepq_resume_thread(sq, td, -1);
367 else if (!TD_ON_SLEEPQ(td) && sig == 0)
368 td->td_flags &= ~TDF_SINTR;
369 mtx_unlock_spin(&sched_lock);
370 return (sig);
371 }
372
373 /*
374 * Switches to another thread if we are still asleep on a sleep queue and
375 * drop the lock on the sleep queue chain. Returns with sched_lock held.
376 */
377 static void
378 sleepq_switch(void *wchan)
379 {
380 struct sleepqueue_chain *sc;
381 struct thread *td;
382
383 td = curthread;
384 sc = SC_LOOKUP(wchan);
385 mtx_assert(&sc->sc_lock, MA_OWNED);
386
387 /*
388 * If we have a sleep queue, then we've already been woken up, so
389 * just return.
390 */
391 if (td->td_sleepqueue != NULL) {
392 MPASS(!TD_ON_SLEEPQ(td));
393 mtx_unlock_spin(&sc->sc_lock);
394 mtx_lock_spin(&sched_lock);
395 return;
396 }
397
398 /*
399 * Otherwise, actually go to sleep.
400 */
401 mtx_lock_spin(&sched_lock);
402 mtx_unlock_spin(&sc->sc_lock);
403
404 sched_sleep(td);
405 TD_SET_SLEEPING(td);
406 mi_switch(SW_VOL, NULL);
407 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
408 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
409 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
410 }
411
412 /*
413 * Check to see if we timed out.
414 */
415 static int
416 sleepq_check_timeout(void)
417 {
418 struct thread *td;
419
420 mtx_assert(&sched_lock, MA_OWNED);
421 td = curthread;
422
423 /*
424 * If TDF_TIMEOUT is set, we timed out.
425 */
426 if (td->td_flags & TDF_TIMEOUT) {
427 td->td_flags &= ~TDF_TIMEOUT;
428 return (EWOULDBLOCK);
429 }
430
431 /*
432 * If TDF_TIMOFAIL is set, the timeout ran after we had
433 * already been woken up.
434 */
435 if (td->td_flags & TDF_TIMOFAIL)
436 td->td_flags &= ~TDF_TIMOFAIL;
437
438 /*
439 * If callout_stop() fails, then the timeout is running on
440 * another CPU, so synchronize with it to avoid having it
441 * accidentally wake up a subsequent sleep.
442 */
443 else if (callout_stop(&td->td_slpcallout) == 0) {
444 td->td_flags |= TDF_TIMEOUT;
445 TD_SET_SLEEPING(td);
446 mi_switch(SW_INVOL, NULL);
447 }
448 return (0);
449 }
450
451 /*
452 * Check to see if we were awoken by a signal.
453 */
454 static int
455 sleepq_check_signals(void)
456 {
457 struct thread *td;
458
459 mtx_assert(&sched_lock, MA_OWNED);
460 td = curthread;
461
462 /*
463 * If TDF_SINTR is clear, then we were awakened while executing
464 * sleepq_catch_signals().
465 */
466 if (!(td->td_flags & TDF_SINTR))
467 return (0);
468
469 /* We are no longer in an interruptible sleep. */
470 td->td_flags &= ~TDF_SINTR;
471
472 if (td->td_flags & TDF_INTERRUPT)
473 return (td->td_intrval);
474 return (0);
475 }
476
477 /*
478 * If we were in an interruptible sleep and we weren't interrupted and
479 * didn't timeout, check to see if there are any pending signals and
480 * which return value we should use if so. The return value from an
481 * earlier call to sleepq_catch_signals() should be passed in as the
482 * argument.
483 */
484 int
485 sleepq_calc_signal_retval(int sig)
486 {
487 struct thread *td;
488 struct proc *p;
489 int rval;
490
491 td = curthread;
492 p = td->td_proc;
493 PROC_LOCK(p);
494 mtx_lock(&p->p_sigacts->ps_mtx);
495 /* XXX: Should we always be calling cursig()? */
496 if (sig == 0)
497 sig = cursig(td);
498 if (sig != 0) {
499 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
500 rval = EINTR;
501 else
502 rval = ERESTART;
503 } else
504 rval = 0;
505 mtx_unlock(&p->p_sigacts->ps_mtx);
506 PROC_UNLOCK(p);
507 return (rval);
508 }
509
510 /*
511 * Block the current thread until it is awakened from its sleep queue.
512 */
513 void
514 sleepq_wait(void *wchan)
515 {
516
517 MPASS(!(curthread->td_flags & TDF_SINTR));
518 sleepq_switch(wchan);
519 mtx_unlock_spin(&sched_lock);
520 }
521
522 /*
523 * Block the current thread until it is awakened from its sleep queue
524 * or it is interrupted by a signal.
525 */
526 int
527 sleepq_wait_sig(void *wchan)
528 {
529 int rval;
530
531 sleepq_switch(wchan);
532 rval = sleepq_check_signals();
533 mtx_unlock_spin(&sched_lock);
534 return (rval);
535 }
536
537 /*
538 * Block the current thread until it is awakened from its sleep queue
539 * or it times out while waiting.
540 */
541 int
542 sleepq_timedwait(void *wchan)
543 {
544 int rval;
545
546 MPASS(!(curthread->td_flags & TDF_SINTR));
547 sleepq_switch(wchan);
548 rval = sleepq_check_timeout();
549 mtx_unlock_spin(&sched_lock);
550 return (rval);
551 }
552
553 /*
554 * Block the current thread until it is awakened from its sleep queue,
555 * it is interrupted by a signal, or it times out waiting to be awakened.
556 */
557 int
558 sleepq_timedwait_sig(void *wchan, int signal_caught)
559 {
560 int rvalt, rvals;
561
562 sleepq_switch(wchan);
563 rvalt = sleepq_check_timeout();
564 rvals = sleepq_check_signals();
565 mtx_unlock_spin(&sched_lock);
566 if (signal_caught || rvalt == 0)
567 return (rvals);
568 else
569 return (rvalt);
570 }
571
572 /*
573 * Removes a thread from a sleep queue and makes it
574 * runnable.
575 */
576 static void
577 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
578 {
579 struct sleepqueue_chain *sc;
580
581 MPASS(td != NULL);
582 MPASS(sq->sq_wchan != NULL);
583 MPASS(td->td_wchan == sq->sq_wchan);
584 sc = SC_LOOKUP(sq->sq_wchan);
585 mtx_assert(&sc->sc_lock, MA_OWNED);
586 mtx_assert(&sched_lock, MA_OWNED);
587
588 /* Remove the thread from the queue. */
589 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
590
591 /*
592 * Get a sleep queue for this thread. If this is the last waiter,
593 * use the queue itself and take it out of the chain, otherwise,
594 * remove a queue from the free list.
595 */
596 if (LIST_EMPTY(&sq->sq_free)) {
597 td->td_sleepqueue = sq;
598 #ifdef INVARIANTS
599 sq->sq_wchan = NULL;
600 #endif
601 #ifdef SLEEPQUEUE_PROFILING
602 sc->sc_depth--;
603 #endif
604 } else
605 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
606 LIST_REMOVE(td->td_sleepqueue, sq_hash);
607
608 td->td_wmesg = NULL;
609 td->td_wchan = NULL;
610
611 /*
612 * Note that thread td might not be sleeping if it is running
613 * sleepq_catch_signals() on another CPU or is blocked on
614 * its proc lock to check signals. It doesn't hurt to clear
615 * the sleeping flag if it isn't set though, so we just always
616 * do it. However, we can't assert that it is set.
617 */
618 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
619 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
620 TD_CLR_SLEEPING(td);
621
622 /* Adjust priority if requested. */
623 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
624 if (pri != -1 && td->td_priority > pri)
625 sched_prio(td, pri);
626 setrunnable(td);
627 }
628
629 /*
630 * Find the highest priority thread sleeping on a wait channel and resume it.
631 */
632 void
633 sleepq_signal(void *wchan, int flags, int pri)
634 {
635 struct sleepqueue *sq;
636 struct thread *td, *besttd;
637
638 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
639 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
640 sq = sleepq_lookup(wchan);
641 if (sq == NULL) {
642 sleepq_release(wchan);
643 return;
644 }
645 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
646 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
647 /* XXX: Do for all sleep queues eventually. */
648 if (flags & SLEEPQ_CONDVAR)
649 mtx_assert(sq->sq_lock, MA_OWNED);
650
651 /*
652 * Find the highest priority thread on the queue. If there is a
653 * tie, use the thread that first appears in the queue as it has
654 * been sleeping the longest since threads are always added to
655 * the tail of sleep queues.
656 */
657 besttd = NULL;
658 TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
659 if (besttd == NULL || td->td_priority < besttd->td_priority)
660 besttd = td;
661 }
662 MPASS(besttd != NULL);
663 mtx_lock_spin(&sched_lock);
664 sleepq_resume_thread(sq, besttd, pri);
665 mtx_unlock_spin(&sched_lock);
666 sleepq_release(wchan);
667 }
668
669 /*
670 * Resume all threads sleeping on a specified wait channel.
671 */
672 void
673 sleepq_broadcast(void *wchan, int flags, int pri)
674 {
675 struct sleepqueue *sq;
676
677 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
678 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
679 sq = sleepq_lookup(wchan);
680 if (sq == NULL) {
681 sleepq_release(wchan);
682 return;
683 }
684 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
685 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
686 /* XXX: Do for all sleep queues eventually. */
687 if (flags & SLEEPQ_CONDVAR)
688 mtx_assert(sq->sq_lock, MA_OWNED);
689
690 /* Resume all blocked threads on the sleep queue. */
691 mtx_lock_spin(&sched_lock);
692 while (!TAILQ_EMPTY(&sq->sq_blocked))
693 sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri);
694 mtx_unlock_spin(&sched_lock);
695 sleepq_release(wchan);
696 }
697
698 /*
699 * Time sleeping threads out. When the timeout expires, the thread is
700 * removed from the sleep queue and made runnable if it is still asleep.
701 */
702 static void
703 sleepq_timeout(void *arg)
704 {
705 struct sleepqueue *sq;
706 struct thread *td;
707 void *wchan;
708
709 td = arg;
710 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
711 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
712
713 /*
714 * First, see if the thread is asleep and get the wait channel if
715 * it is.
716 */
717 mtx_lock_spin(&sched_lock);
718 if (TD_ON_SLEEPQ(td)) {
719 wchan = td->td_wchan;
720 mtx_unlock_spin(&sched_lock);
721 sq = sleepq_lookup(wchan);
722 mtx_lock_spin(&sched_lock);
723 } else {
724 wchan = NULL;
725 sq = NULL;
726 }
727
728 /*
729 * At this point, if the thread is still on the sleep queue,
730 * we have that sleep queue locked as it cannot migrate sleep
731 * queues while we dropped sched_lock. If it had resumed and
732 * was on another CPU while the lock was dropped, it would have
733 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
734 * call to callout_stop() to stop this routine would have failed
735 * meaning that it would have already set TDF_TIMEOUT to
736 * synchronize with this function.
737 */
738 if (TD_ON_SLEEPQ(td)) {
739 MPASS(td->td_wchan == wchan);
740 MPASS(sq != NULL);
741 td->td_flags |= TDF_TIMEOUT;
742 sleepq_resume_thread(sq, td, -1);
743 mtx_unlock_spin(&sched_lock);
744 sleepq_release(wchan);
745 return;
746 } else if (wchan != NULL)
747 sleepq_release(wchan);
748
749 /*
750 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
751 * then the other thread has already yielded to us, so clear
752 * the flag and resume it. If TDF_TIMEOUT is not set, then the
753 * we know that the other thread is not on a sleep queue, but it
754 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
755 * to let it know that the timeout has already run and doesn't
756 * need to be canceled.
757 */
758 if (td->td_flags & TDF_TIMEOUT) {
759 MPASS(TD_IS_SLEEPING(td));
760 td->td_flags &= ~TDF_TIMEOUT;
761 TD_CLR_SLEEPING(td);
762 setrunnable(td);
763 } else
764 td->td_flags |= TDF_TIMOFAIL;
765 mtx_unlock_spin(&sched_lock);
766 }
767
768 /*
769 * Resumes a specific thread from the sleep queue associated with a specific
770 * wait channel if it is on that queue.
771 */
772 void
773 sleepq_remove(struct thread *td, void *wchan)
774 {
775 struct sleepqueue *sq;
776
777 /*
778 * Look up the sleep queue for this wait channel, then re-check
779 * that the thread is asleep on that channel, if it is not, then
780 * bail.
781 */
782 MPASS(wchan != NULL);
783 sq = sleepq_lookup(wchan);
784 mtx_lock_spin(&sched_lock);
785 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
786 mtx_unlock_spin(&sched_lock);
787 sleepq_release(wchan);
788 return;
789 }
790 MPASS(sq != NULL);
791
792 /* Thread is asleep on sleep queue sq, so wake it up. */
793 sleepq_resume_thread(sq, td, -1);
794 sleepq_release(wchan);
795 mtx_unlock_spin(&sched_lock);
796 }
797
798 /*
799 * Abort a thread as if an interrupt had occurred. Only abort
800 * interruptible waits (unfortunately it isn't safe to abort others).
801 *
802 * XXX: What in the world does the comment below mean?
803 * Also, whatever the signal code does...
804 */
805 void
806 sleepq_abort(struct thread *td)
807 {
808 void *wchan;
809
810 mtx_assert(&sched_lock, MA_OWNED);
811 MPASS(TD_ON_SLEEPQ(td));
812 MPASS(td->td_flags & TDF_SINTR);
813
814 /*
815 * If the TDF_TIMEOUT flag is set, just leave. A
816 * timeout is scheduled anyhow.
817 */
818 if (td->td_flags & TDF_TIMEOUT)
819 return;
820
821 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
822 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
823 wchan = td->td_wchan;
824 mtx_unlock_spin(&sched_lock);
825 sleepq_remove(td, wchan);
826 mtx_lock_spin(&sched_lock);
827 }
Cache object: 72fd35636eeeb56f1af1d692f5a689cd
|