1 /*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD: releng/6.3/sys/kern/subr_sleepqueue.c 173886 2007-11-24 19:45:58Z cvs2svn $");
64
65 #include "opt_sleepqueue_profiling.h"
66 #include "opt_ddb.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/lock.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/sysctl.h>
79
80 #include <vm/uma.h>
81
82 #ifdef DDB
83 #include <ddb/ddb.h>
84 #endif
85
86 /*
87 * Constants for the hash table of sleep queue chains. These constants are
88 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
89 * Basically, we ignore the lower 8 bits of the address since most wait
90 * channel pointers are aligned and only look at the next 7 bits for the
91 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
92 */
93 #define SC_TABLESIZE 128 /* Must be power of 2. */
94 #define SC_MASK (SC_TABLESIZE - 1)
95 #define SC_SHIFT 8
96 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
97 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
98 #define NR_SLEEPQS 2
99 /*
100 * There two different lists of sleep queues. Both lists are connected
101 * via the sq_hash entries. The first list is the sleep queue chain list
102 * that a sleep queue is on when it is attached to a wait channel. The
103 * second list is the free list hung off of a sleep queue that is attached
104 * to a wait channel.
105 *
106 * Each sleep queue also contains the wait channel it is attached to, the
107 * list of threads blocked on that wait channel, flags specific to the
108 * wait channel, and the lock used to synchronize with a wait channel.
109 * The flags are used to catch mismatches between the various consumers
110 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
111 * The lock pointer is only used when invariants are enabled for various
112 * debugging checks.
113 *
114 * Locking key:
115 * c - sleep queue chain lock
116 */
117 struct sleepqueue {
118 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
119 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
120 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
121 void *sq_wchan; /* (c) Wait channel. */
122 #ifdef INVARIANTS
123 int sq_type; /* (c) Queue type. */
124 struct mtx *sq_lock; /* (c) Associated lock. */
125 #endif
126 };
127
128 struct sleepqueue_chain {
129 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
130 struct mtx sc_lock; /* Spin lock for this chain. */
131 #ifdef SLEEPQUEUE_PROFILING
132 u_int sc_depth; /* Length of sc_queues. */
133 u_int sc_max_depth; /* Max length of sc_queues. */
134 #endif
135 };
136
137 #ifdef SLEEPQUEUE_PROFILING
138 u_int sleepq_max_depth;
139 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
140 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
141 "sleepq chain stats");
142 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
143 0, "maxmimum depth achieved of a single chain");
144 #endif
145 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
146 static uma_zone_t sleepq_zone;
147
148 /*
149 * Prototypes for non-exported routines.
150 */
151 static int sleepq_catch_signals(void *wchan);
152 static int sleepq_check_signals(void);
153 static int sleepq_check_timeout(void);
154 #ifdef INVARIANTS
155 static void sleepq_dtor(void *mem, int size, void *arg);
156 #endif
157 static int sleepq_init(void *mem, int size, int flags);
158 static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
159 int pri);
160 static void sleepq_switch(void *wchan);
161 static void sleepq_timeout(void *arg);
162
163 /*
164 * Early initialization of sleep queues that is called from the sleepinit()
165 * SYSINIT.
166 */
167 void
168 init_sleepqueues(void)
169 {
170 #ifdef SLEEPQUEUE_PROFILING
171 struct sysctl_oid *chain_oid;
172 char chain_name[10];
173 #endif
174 int i;
175
176 for (i = 0; i < SC_TABLESIZE; i++) {
177 LIST_INIT(&sleepq_chains[i].sc_queues);
178 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
179 MTX_SPIN);
180 #ifdef SLEEPQUEUE_PROFILING
181 snprintf(chain_name, sizeof(chain_name), "%d", i);
182 chain_oid = SYSCTL_ADD_NODE(NULL,
183 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
184 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
185 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
186 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
187 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
188 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
189 NULL);
190 #endif
191 }
192 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
193 #ifdef INVARIANTS
194 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
195 #else
196 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
197 #endif
198
199 thread0.td_sleepqueue = sleepq_alloc();
200 }
201
202 /*
203 * Get a sleep queue for a new thread.
204 */
205 struct sleepqueue *
206 sleepq_alloc(void)
207 {
208
209 return (uma_zalloc(sleepq_zone, M_WAITOK));
210 }
211
212 /*
213 * Free a sleep queue when a thread is destroyed.
214 */
215 void
216 sleepq_free(struct sleepqueue *sq)
217 {
218
219 uma_zfree(sleepq_zone, sq);
220 }
221
222 /*
223 * Lock the sleep queue chain associated with the specified wait channel.
224 */
225 void
226 sleepq_lock(void *wchan)
227 {
228 struct sleepqueue_chain *sc;
229
230 sc = SC_LOOKUP(wchan);
231 mtx_lock_spin(&sc->sc_lock);
232 }
233
234 /*
235 * Look up the sleep queue associated with a given wait channel in the hash
236 * table locking the associated sleep queue chain. If no queue is found in
237 * the table, NULL is returned.
238 */
239 struct sleepqueue *
240 sleepq_lookup(void *wchan)
241 {
242 struct sleepqueue_chain *sc;
243 struct sleepqueue *sq;
244
245 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
246 sc = SC_LOOKUP(wchan);
247 mtx_assert(&sc->sc_lock, MA_OWNED);
248 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
249 if (sq->sq_wchan == wchan)
250 return (sq);
251 return (NULL);
252 }
253
254 /*
255 * Unlock the sleep queue chain associated with a given wait channel.
256 */
257 void
258 sleepq_release(void *wchan)
259 {
260 struct sleepqueue_chain *sc;
261
262 sc = SC_LOOKUP(wchan);
263 mtx_unlock_spin(&sc->sc_lock);
264 }
265
266 /*
267 * Places the current thread on the sleep queue for the specified wait
268 * channel. If INVARIANTS is enabled, then it associates the passed in
269 * lock with the sleepq to make sure it is held when that sleep queue is
270 * woken up.
271 */
272 void
273 sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags,
274 int queue)
275 {
276 struct sleepqueue_chain *sc;
277 struct sleepqueue *sq;
278 struct thread *td;
279
280 td = curthread;
281 sc = SC_LOOKUP(wchan);
282 mtx_assert(&sc->sc_lock, MA_OWNED);
283 MPASS(td->td_sleepqueue != NULL);
284 MPASS(wchan != NULL);
285 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
286
287 /* If this thread is not allowed to sleep, die a horrible death. */
288 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
289 ("Trying sleep, but thread marked as sleeping prohibited"));
290
291 /* Look up the sleep queue associated with the wait channel 'wchan'. */
292 sq = sleepq_lookup(wchan);
293
294 /*
295 * If the wait channel does not already have a sleep queue, use
296 * this thread's sleep queue. Otherwise, insert the current thread
297 * into the sleep queue already in use by this wait channel.
298 */
299 if (sq == NULL) {
300 #ifdef INVARIANTS
301 int i;
302
303 sq = td->td_sleepqueue;
304 for (i = 0; i < NR_SLEEPQS; i++)
305 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
306 ("thread's sleep queue %d is not empty", i));
307 KASSERT(LIST_EMPTY(&sq->sq_free),
308 ("thread's sleep queue has a non-empty free list"));
309 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
310 sq->sq_lock = lock;
311 sq->sq_type = flags & SLEEPQ_TYPE;
312 #endif
313 #ifdef SLEEPQUEUE_PROFILING
314 sc->sc_depth++;
315 if (sc->sc_depth > sc->sc_max_depth) {
316 sc->sc_max_depth = sc->sc_depth;
317 if (sc->sc_max_depth > sleepq_max_depth)
318 sleepq_max_depth = sc->sc_max_depth;
319 }
320 #endif
321 sq = td->td_sleepqueue;
322 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
323 sq->sq_wchan = wchan;
324 } else {
325 MPASS(wchan == sq->sq_wchan);
326 MPASS(lock == sq->sq_lock);
327 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
328 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
329 }
330 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
331 td->td_sleepqueue = NULL;
332 mtx_lock_spin(&sched_lock);
333 td->td_sqqueue = queue;
334 td->td_wchan = wchan;
335 td->td_wmesg = wmesg;
336 if (flags & SLEEPQ_INTERRUPTIBLE) {
337 td->td_flags |= TDF_SINTR;
338 td->td_flags &= ~TDF_SLEEPABORT;
339 }
340 mtx_unlock_spin(&sched_lock);
341 }
342
343 /*
344 * Sets a timeout that will remove the current thread from the specified
345 * sleep queue after timo ticks if the thread has not already been awakened.
346 */
347 void
348 sleepq_set_timeout(void *wchan, int timo)
349 {
350 struct sleepqueue_chain *sc;
351 struct thread *td;
352
353 td = curthread;
354 sc = SC_LOOKUP(wchan);
355 mtx_assert(&sc->sc_lock, MA_OWNED);
356 MPASS(TD_ON_SLEEPQ(td));
357 MPASS(td->td_sleepqueue == NULL);
358 MPASS(wchan != NULL);
359 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
360 }
361
362 /*
363 * Marks the pending sleep of the current thread as interruptible and
364 * makes an initial check for pending signals before putting a thread
365 * to sleep. Return with sleep queue and scheduler lock held.
366 */
367 static int
368 sleepq_catch_signals(void *wchan)
369 {
370 struct sleepqueue_chain *sc;
371 struct sleepqueue *sq;
372 struct thread *td;
373 struct proc *p;
374 struct sigacts *ps;
375 int sig, ret;
376
377 td = curthread;
378 p = curproc;
379 sc = SC_LOOKUP(wchan);
380 mtx_assert(&sc->sc_lock, MA_OWNED);
381 MPASS(wchan != NULL);
382 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
383 (void *)td, (long)p->p_pid, p->p_comm);
384
385 MPASS(td->td_flags & TDF_SINTR);
386 mtx_unlock_spin(&sc->sc_lock);
387
388 /* See if there are any pending signals for this thread. */
389 PROC_LOCK(p);
390 ps = p->p_sigacts;
391 mtx_lock(&ps->ps_mtx);
392 sig = cursig(td);
393 if (sig == 0) {
394 mtx_unlock(&ps->ps_mtx);
395 ret = thread_suspend_check(1);
396 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
397 } else {
398 if (SIGISMEMBER(ps->ps_sigintr, sig))
399 ret = EINTR;
400 else
401 ret = ERESTART;
402 mtx_unlock(&ps->ps_mtx);
403 }
404
405 if (ret == 0) {
406 mtx_lock_spin(&sc->sc_lock);
407 /*
408 * Lock sched_lock before unlocking proc lock,
409 * without this, we could lose a race.
410 */
411 mtx_lock_spin(&sched_lock);
412 PROC_UNLOCK(p);
413 if (!(td->td_flags & TDF_INTERRUPT))
414 return (0);
415 /* KSE threads tried unblocking us. */
416 ret = td->td_intrval;
417 mtx_unlock_spin(&sched_lock);
418 MPASS(ret == EINTR || ret == ERESTART);
419 } else {
420 PROC_UNLOCK(p);
421 mtx_lock_spin(&sc->sc_lock);
422 }
423 /*
424 * There were pending signals and this thread is still
425 * on the sleep queue, remove it from the sleep queue.
426 */
427 sq = sleepq_lookup(wchan);
428 mtx_lock_spin(&sched_lock);
429 if (TD_ON_SLEEPQ(td))
430 sleepq_resume_thread(sq, td, -1);
431 return (ret);
432 }
433
434 /*
435 * Switches to another thread if we are still asleep on a sleep queue and
436 * drop the lock on the sleep queue chain. Returns with sched_lock held.
437 */
438 static void
439 sleepq_switch(void *wchan)
440 {
441 struct sleepqueue_chain *sc;
442 struct thread *td;
443
444 td = curthread;
445 sc = SC_LOOKUP(wchan);
446 mtx_assert(&sc->sc_lock, MA_OWNED);
447 mtx_assert(&sched_lock, MA_OWNED);
448
449 /*
450 * If we have a sleep queue, then we've already been woken up, so
451 * just return.
452 */
453 if (td->td_sleepqueue != NULL) {
454 MPASS(!TD_ON_SLEEPQ(td));
455 mtx_unlock_spin(&sc->sc_lock);
456 return;
457 }
458
459 /*
460 * Otherwise, actually go to sleep.
461 */
462 mtx_unlock_spin(&sc->sc_lock);
463 sched_sleep(td);
464 TD_SET_SLEEPING(td);
465 mi_switch(SW_VOL, NULL);
466 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
467 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
468 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
469 }
470
471 /*
472 * Check to see if we timed out.
473 */
474 static int
475 sleepq_check_timeout(void)
476 {
477 struct thread *td;
478
479 mtx_assert(&sched_lock, MA_OWNED);
480 td = curthread;
481
482 /*
483 * If TDF_TIMEOUT is set, we timed out.
484 */
485 if (td->td_flags & TDF_TIMEOUT) {
486 td->td_flags &= ~TDF_TIMEOUT;
487 return (EWOULDBLOCK);
488 }
489
490 /*
491 * If TDF_TIMOFAIL is set, the timeout ran after we had
492 * already been woken up.
493 */
494 if (td->td_flags & TDF_TIMOFAIL)
495 td->td_flags &= ~TDF_TIMOFAIL;
496
497 /*
498 * If callout_stop() fails, then the timeout is running on
499 * another CPU, so synchronize with it to avoid having it
500 * accidentally wake up a subsequent sleep.
501 */
502 else if (callout_stop(&td->td_slpcallout) == 0) {
503 td->td_flags |= TDF_TIMEOUT;
504 TD_SET_SLEEPING(td);
505 mi_switch(SW_INVOL, NULL);
506 }
507 return (0);
508 }
509
510 /*
511 * Check to see if we were awoken by a signal.
512 */
513 static int
514 sleepq_check_signals(void)
515 {
516 struct thread *td;
517
518 mtx_assert(&sched_lock, MA_OWNED);
519 td = curthread;
520
521 /* We are no longer in an interruptible sleep. */
522 if (td->td_flags & TDF_SINTR)
523 td->td_flags &= ~TDF_SINTR;
524
525 if (td->td_flags & TDF_SLEEPABORT) {
526 td->td_flags &= ~TDF_SLEEPABORT;
527 return (td->td_intrval);
528 }
529
530 if (td->td_flags & TDF_INTERRUPT)
531 return (td->td_intrval);
532
533 return (0);
534 }
535
536 /*
537 * Block the current thread until it is awakened from its sleep queue.
538 */
539 void
540 sleepq_wait(void *wchan)
541 {
542
543 MPASS(!(curthread->td_flags & TDF_SINTR));
544 mtx_lock_spin(&sched_lock);
545 sleepq_switch(wchan);
546 mtx_unlock_spin(&sched_lock);
547 }
548
549 /*
550 * Block the current thread until it is awakened from its sleep queue
551 * or it is interrupted by a signal.
552 */
553 int
554 sleepq_wait_sig(void *wchan)
555 {
556 int rcatch;
557 int rval;
558
559 rcatch = sleepq_catch_signals(wchan);
560 if (rcatch == 0)
561 sleepq_switch(wchan);
562 else
563 sleepq_release(wchan);
564 rval = sleepq_check_signals();
565 mtx_unlock_spin(&sched_lock);
566 if (rcatch)
567 return (rcatch);
568 return (rval);
569 }
570
571 /*
572 * Block the current thread until it is awakened from its sleep queue
573 * or it times out while waiting.
574 */
575 int
576 sleepq_timedwait(void *wchan)
577 {
578 int rval;
579
580 MPASS(!(curthread->td_flags & TDF_SINTR));
581 mtx_lock_spin(&sched_lock);
582 sleepq_switch(wchan);
583 rval = sleepq_check_timeout();
584 mtx_unlock_spin(&sched_lock);
585 return (rval);
586 }
587
588 /*
589 * Block the current thread until it is awakened from its sleep queue,
590 * it is interrupted by a signal, or it times out waiting to be awakened.
591 */
592 int
593 sleepq_timedwait_sig(void *wchan)
594 {
595 int rcatch, rvalt, rvals;
596
597 rcatch = sleepq_catch_signals(wchan);
598 if (rcatch == 0)
599 sleepq_switch(wchan);
600 else
601 sleepq_release(wchan);
602 rvalt = sleepq_check_timeout();
603 rvals = sleepq_check_signals();
604 mtx_unlock_spin(&sched_lock);
605 if (rcatch)
606 return (rcatch);
607 if (rvals)
608 return (rvals);
609 return (rvalt);
610 }
611
612 /*
613 * Removes a thread from a sleep queue and makes it
614 * runnable.
615 */
616 static void
617 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
618 {
619 struct sleepqueue_chain *sc;
620
621 MPASS(td != NULL);
622 MPASS(sq->sq_wchan != NULL);
623 MPASS(td->td_wchan == sq->sq_wchan);
624 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
625 sc = SC_LOOKUP(sq->sq_wchan);
626 mtx_assert(&sc->sc_lock, MA_OWNED);
627 mtx_assert(&sched_lock, MA_OWNED);
628
629 /* Remove the thread from the queue. */
630 TAILQ_REMOVE(&sq->sq_blocked[(int)td->td_sqqueue], td, td_slpq);
631
632 /*
633 * Get a sleep queue for this thread. If this is the last waiter,
634 * use the queue itself and take it out of the chain, otherwise,
635 * remove a queue from the free list.
636 */
637 if (LIST_EMPTY(&sq->sq_free)) {
638 td->td_sleepqueue = sq;
639 #ifdef INVARIANTS
640 sq->sq_wchan = NULL;
641 #endif
642 #ifdef SLEEPQUEUE_PROFILING
643 sc->sc_depth--;
644 #endif
645 } else
646 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
647 LIST_REMOVE(td->td_sleepqueue, sq_hash);
648
649 td->td_wmesg = NULL;
650 td->td_wchan = NULL;
651 td->td_flags &= ~TDF_SINTR;
652
653 /*
654 * Note that thread td might not be sleeping if it is running
655 * sleepq_catch_signals() on another CPU or is blocked on
656 * its proc lock to check signals. It doesn't hurt to clear
657 * the sleeping flag if it isn't set though, so we just always
658 * do it. However, we can't assert that it is set.
659 */
660 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
661 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
662 TD_CLR_SLEEPING(td);
663
664 /* Adjust priority if requested. */
665 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
666 if (pri != -1 && td->td_priority > pri)
667 sched_prio(td, pri);
668 setrunnable(td);
669 }
670
671 #ifdef INVARIANTS
672 /*
673 * UMA zone item deallocator.
674 */
675 static void
676 sleepq_dtor(void *mem, int size, void *arg)
677 {
678 struct sleepqueue *sq;
679 int i;
680
681 sq = mem;
682 for (i = 0; i < NR_SLEEPQS; i++)
683 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
684 }
685 #endif
686
687 /*
688 * UMA zone item initializer.
689 */
690 static int
691 sleepq_init(void *mem, int size, int flags)
692 {
693 struct sleepqueue *sq;
694 int i;
695
696 bzero(mem, size);
697 sq = mem;
698 for (i = 0; i < NR_SLEEPQS; i++)
699 TAILQ_INIT(&sq->sq_blocked[i]);
700 LIST_INIT(&sq->sq_free);
701 return (0);
702 }
703
704 /*
705 * Find the highest priority thread sleeping on a wait channel and resume it.
706 */
707 void
708 sleepq_signal(void *wchan, int flags, int pri, int queue)
709 {
710 struct sleepqueue *sq;
711 struct thread *td, *besttd;
712
713 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
714 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
715 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
716 sq = sleepq_lookup(wchan);
717 if (sq == NULL) {
718 sleepq_release(wchan);
719 return;
720 }
721 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
722 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
723
724 /*
725 * Find the highest priority thread on the queue. If there is a
726 * tie, use the thread that first appears in the queue as it has
727 * been sleeping the longest since threads are always added to
728 * the tail of sleep queues.
729 */
730 besttd = NULL;
731 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
732 if (besttd == NULL || td->td_priority < besttd->td_priority)
733 besttd = td;
734 }
735 MPASS(besttd != NULL);
736 mtx_lock_spin(&sched_lock);
737 sleepq_resume_thread(sq, besttd, pri);
738 mtx_unlock_spin(&sched_lock);
739 sleepq_release(wchan);
740 }
741
742 /*
743 * Resume all threads sleeping on a specified wait channel.
744 */
745 void
746 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
747 {
748 struct sleepqueue *sq;
749
750 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
751 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
752 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
753 sq = sleepq_lookup(wchan);
754 if (sq == NULL) {
755 sleepq_release(wchan);
756 return;
757 }
758 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
759 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
760
761 /* Resume all blocked threads on the sleep queue. */
762 mtx_lock_spin(&sched_lock);
763 while (!TAILQ_EMPTY(&sq->sq_blocked[queue]))
764 sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked[queue]),
765 pri);
766 mtx_unlock_spin(&sched_lock);
767 sleepq_release(wchan);
768 }
769
770 /*
771 * Time sleeping threads out. When the timeout expires, the thread is
772 * removed from the sleep queue and made runnable if it is still asleep.
773 */
774 static void
775 sleepq_timeout(void *arg)
776 {
777 struct sleepqueue *sq;
778 struct thread *td;
779 void *wchan;
780
781 td = arg;
782 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
783 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
784
785 /*
786 * First, see if the thread is asleep and get the wait channel if
787 * it is.
788 */
789 mtx_lock_spin(&sched_lock);
790 if (TD_ON_SLEEPQ(td)) {
791 wchan = td->td_wchan;
792 mtx_unlock_spin(&sched_lock);
793 sleepq_lock(wchan);
794 sq = sleepq_lookup(wchan);
795 mtx_lock_spin(&sched_lock);
796 } else {
797 wchan = NULL;
798 sq = NULL;
799 }
800
801 /*
802 * At this point, if the thread is still on the sleep queue,
803 * we have that sleep queue locked as it cannot migrate sleep
804 * queues while we dropped sched_lock. If it had resumed and
805 * was on another CPU while the lock was dropped, it would have
806 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
807 * call to callout_stop() to stop this routine would have failed
808 * meaning that it would have already set TDF_TIMEOUT to
809 * synchronize with this function.
810 */
811 if (TD_ON_SLEEPQ(td)) {
812 MPASS(td->td_wchan == wchan);
813 MPASS(sq != NULL);
814 td->td_flags |= TDF_TIMEOUT;
815 sleepq_resume_thread(sq, td, -1);
816 mtx_unlock_spin(&sched_lock);
817 sleepq_release(wchan);
818 return;
819 } else if (wchan != NULL)
820 sleepq_release(wchan);
821
822 /*
823 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
824 * then the other thread has already yielded to us, so clear
825 * the flag and resume it. If TDF_TIMEOUT is not set, then the
826 * we know that the other thread is not on a sleep queue, but it
827 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
828 * to let it know that the timeout has already run and doesn't
829 * need to be canceled.
830 */
831 if (td->td_flags & TDF_TIMEOUT) {
832 MPASS(TD_IS_SLEEPING(td));
833 td->td_flags &= ~TDF_TIMEOUT;
834 TD_CLR_SLEEPING(td);
835 setrunnable(td);
836 } else
837 td->td_flags |= TDF_TIMOFAIL;
838 mtx_unlock_spin(&sched_lock);
839 }
840
841 /*
842 * Resumes a specific thread from the sleep queue associated with a specific
843 * wait channel if it is on that queue.
844 */
845 void
846 sleepq_remove(struct thread *td, void *wchan)
847 {
848 struct sleepqueue *sq;
849
850 /*
851 * Look up the sleep queue for this wait channel, then re-check
852 * that the thread is asleep on that channel, if it is not, then
853 * bail.
854 */
855 MPASS(wchan != NULL);
856 sleepq_lock(wchan);
857 sq = sleepq_lookup(wchan);
858 mtx_lock_spin(&sched_lock);
859 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
860 mtx_unlock_spin(&sched_lock);
861 sleepq_release(wchan);
862 return;
863 }
864 MPASS(sq != NULL);
865
866 /* Thread is asleep on sleep queue sq, so wake it up. */
867 sleepq_resume_thread(sq, td, -1);
868 sleepq_release(wchan);
869 mtx_unlock_spin(&sched_lock);
870 }
871
872 /*
873 * Abort a thread as if an interrupt had occurred. Only abort
874 * interruptible waits (unfortunately it isn't safe to abort others).
875 *
876 * XXX: What in the world does the comment below mean?
877 * Also, whatever the signal code does...
878 */
879 void
880 sleepq_abort(struct thread *td, int intrval)
881 {
882 void *wchan;
883
884 mtx_assert(&sched_lock, MA_OWNED);
885 MPASS(TD_ON_SLEEPQ(td));
886 MPASS(td->td_flags & TDF_SINTR);
887 MPASS(intrval == EINTR || intrval == ERESTART);
888
889 /*
890 * If the TDF_TIMEOUT flag is set, just leave. A
891 * timeout is scheduled anyhow.
892 */
893 if (td->td_flags & TDF_TIMEOUT)
894 return;
895
896 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
897 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
898 wchan = td->td_wchan;
899 if (wchan != NULL) {
900 td->td_intrval = intrval;
901 td->td_flags |= TDF_SLEEPABORT;
902 }
903 mtx_unlock_spin(&sched_lock);
904 sleepq_remove(td, wchan);
905 mtx_lock_spin(&sched_lock);
906 }
907
908 #ifdef DDB
909 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
910 {
911 struct sleepqueue_chain *sc;
912 struct sleepqueue *sq;
913 #ifdef INVARIANTS
914 struct lock_object *lock;
915 #endif
916 struct thread *td;
917 void *wchan;
918 int i;
919
920 if (!have_addr)
921 return;
922
923 /*
924 * First, see if there is an active sleep queue for the wait channel
925 * indicated by the address.
926 */
927 wchan = (void *)addr;
928 sc = SC_LOOKUP(wchan);
929 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
930 if (sq->sq_wchan == wchan)
931 goto found;
932
933 /*
934 * Second, see if there is an active sleep queue at the address
935 * indicated.
936 */
937 for (i = 0; i < SC_TABLESIZE; i++)
938 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
939 if (sq == (struct sleepqueue *)addr)
940 goto found;
941 }
942
943 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
944 return;
945 found:
946 db_printf("Wait channel: %p\n", sq->sq_wchan);
947 #ifdef INVARIANTS
948 db_printf("Queue type: %d\n", sq->sq_type);
949 if (sq->sq_lock) {
950 lock = &sq->sq_lock->mtx_object;
951 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
952 LOCK_CLASS(lock)->lc_name, lock->lo_name);
953 }
954 #endif
955 db_printf("Blocked threads:\n");
956 for (i = 0; i < NR_SLEEPQS; i++) {
957 db_printf("\nQueue[%d]:\n", i);
958 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
959 db_printf("\tempty\n");
960 else
961 TAILQ_FOREACH(td, &sq->sq_blocked[0],
962 td_slpq) {
963 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
964 td->td_tid, td->td_proc->p_pid,
965 td->td_proc->p_comm);
966 }
967 }
968 }
969 #endif
Cache object: 19bc44ed184faa9b31f6eea00bd5c933
|