1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Implementation of turnstiles used to hold queue of threads blocked on
34 * non-sleepable locks. Sleepable locks use condition variables to
35 * implement their queues. Turnstiles differ from a sleep queue in that
36 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
37 * when one thread is enqueued onto a turnstile, it can lend its priority
38 * to the owning thread.
39 *
40 * We wish to avoid bloating locks with an embedded turnstile and we do not
41 * want to use back-pointers in the locks for the same reason. Thus, we
42 * use a similar approach to that of Solaris 7 as described in Solaris
43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up
44 * in a hash table based on the address of the lock. Each entry in the
45 * hash table is a linked-lists of turnstiles and is called a turnstile
46 * chain. Each chain contains a spin mutex that protects all of the
47 * turnstiles in the chain.
48 *
49 * Each time a thread is created, a turnstile is malloc'd and attached to
50 * that thread. When a thread blocks on a lock, if it is the first thread
51 * to block, it lends its turnstile to the lock. If the lock already has
52 * a turnstile, then it gives its turnstile to the lock's turnstile's free
53 * list. When a thread is woken up, it takes a turnstile from the free list
54 * if there are any other waiters. If it is the only thread blocked on the
55 * lock, then it reclaims the turnstile associated with the lock and removes
56 * it from the hash table.
57 */
58
59 #include "opt_turnstile_profiling.h"
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD: releng/5.4/sys/kern/subr_turnstile.c 145335 2005-04-20 19:11:07Z cvs2svn $");
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/ktr.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/queue.h>
73 #include <sys/resourcevar.h>
74 #include <sys/sched.h>
75 #include <sys/sysctl.h>
76 #include <sys/turnstile.h>
77
78 /*
79 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic
80 * number chosen because the sleep queue's use the same value for the
81 * shift. Basically, we ignore the lower 8 bits of the address.
82 * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
83 */
84 #define TC_TABLESIZE 128 /* Must be power of 2. */
85 #define TC_MASK (TC_TABLESIZE - 1)
86 #define TC_SHIFT 8
87 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
88 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
89
90 /*
91 * There are three different lists of turnstiles as follows. The list
92 * connected by ts_link entries is a per-thread list of all the turnstiles
93 * attached to locks that we own. This is used to fixup our priority when
94 * a lock is released. The other two lists use the ts_hash entries. The
95 * first of these two is the turnstile chain list that a turnstile is on
96 * when it is attached to a lock. The second list to use ts_hash is the
97 * free list hung off of a turnstile that is attached to a lock.
98 *
99 * Each turnstile contains two lists of threads. The ts_blocked list is
100 * a linked list of threads blocked on the turnstile's lock. The
101 * ts_pending list is a linked list of threads previously awakened by
102 * turnstile_signal() or turnstile_wait() that are waiting to be put on
103 * the run queue.
104 *
105 * Locking key:
106 * c - turnstile chain lock
107 * q - td_contested lock
108 */
109 struct turnstile {
110 TAILQ_HEAD(, thread) ts_blocked; /* (c + q) Blocked threads. */
111 TAILQ_HEAD(, thread) ts_pending; /* (c) Pending threads. */
112 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */
113 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */
114 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */
115 struct lock_object *ts_lockobj; /* (c) Lock we reference. */
116 struct thread *ts_owner; /* (c + q) Who owns the lock. */
117 };
118
119 struct turnstile_chain {
120 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */
121 struct mtx tc_lock; /* Spin lock for this chain. */
122 #ifdef TURNSTILE_PROFILING
123 u_int tc_depth; /* Length of tc_queues. */
124 u_int tc_max_depth; /* Max length of tc_queues. */
125 #endif
126 };
127
128 #ifdef TURNSTILE_PROFILING
129 u_int turnstile_max_depth;
130 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
131 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
132 "turnstile chain stats");
133 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
134 &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain");
135 #endif
136 static struct mtx td_contested_lock;
137 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
138
139 MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
140
141 /*
142 * Prototypes for non-exported routines.
143 */
144 static void init_turnstile0(void *dummy);
145 #ifdef TURNSTILE_PROFILING
146 static void init_turnstile_profiling(void *arg);
147 #endif
148 static void propagate_priority(struct thread *);
149 static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
150
151 /*
152 * Walks the chain of turnstiles and their owners to propagate the priority
153 * of the thread being blocked to all the threads holding locks that have to
154 * release their locks before this thread can run again.
155 */
156 static void
157 propagate_priority(struct thread *td)
158 {
159 struct turnstile_chain *tc;
160 struct turnstile *ts;
161 struct thread *td1;
162 int pri;
163
164 mtx_assert(&sched_lock, MA_OWNED);
165 pri = td->td_priority;
166 ts = td->td_blocked;
167 for (;;) {
168 td = ts->ts_owner;
169
170 if (td == NULL) {
171 /*
172 * This really isn't quite right. Really
173 * ought to bump priority of thread that
174 * next acquires the lock.
175 */
176 return;
177 }
178
179 MPASS(td->td_proc != NULL);
180 MPASS(td->td_proc->p_magic == P_MAGIC);
181
182 /*
183 * XXX: The owner of a turnstile can be stale if it is the
184 * first thread to grab a slock of a sx lock. In that case
185 * it is possible for us to be at SSLEEP or some other
186 * weird state. We should probably just return if the state
187 * isn't SRUN or SLOCK.
188 */
189 KASSERT(!TD_IS_SLEEPING(td),
190 ("sleeping thread (pid %d) owns a non-sleepable lock",
191 td->td_proc->p_pid));
192
193 /*
194 * If this thread already has higher priority than the
195 * thread that is being blocked, we are finished.
196 */
197 if (td->td_priority <= pri)
198 return;
199
200 /*
201 * If lock holder is actually running, just bump priority.
202 */
203 if (TD_IS_RUNNING(td)) {
204 td->td_priority = pri;
205 return;
206 }
207
208 #ifndef SMP
209 /*
210 * For UP, we check to see if td is curthread (this shouldn't
211 * ever happen however as it would mean we are in a deadlock.)
212 */
213 KASSERT(td != curthread, ("Deadlock detected"));
214 #endif
215
216 /*
217 * If on run queue move to new run queue, and quit.
218 * XXXKSE this gets a lot more complicated under threads
219 * but try anyhow.
220 */
221 if (TD_ON_RUNQ(td)) {
222 MPASS(td->td_blocked == NULL);
223 sched_prio(td, pri);
224 return;
225 }
226
227 /*
228 * Bump this thread's priority.
229 */
230 td->td_priority = pri;
231
232 /*
233 * If we aren't blocked on a lock, we should be.
234 */
235 KASSERT(TD_ON_LOCK(td), (
236 "process %d(%s):%d holds %s but isn't blocked on a lock\n",
237 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
238 ts->ts_lockobj->lo_name));
239
240 /*
241 * Pick up the lock that td is blocked on.
242 */
243 ts = td->td_blocked;
244 MPASS(ts != NULL);
245 tc = TC_LOOKUP(ts->ts_lockobj);
246 mtx_lock_spin(&tc->tc_lock);
247
248 /*
249 * This thread may not be blocked on this turnstile anymore
250 * but instead might already be woken up on another CPU
251 * that is waiting on sched_lock in turnstile_unpend() to
252 * finish waking this thread up. We can detect this case
253 * by checking to see if this thread has been given a
254 * turnstile by either turnstile_signal() or
255 * turnstile_broadcast(). In this case, treat the thread as
256 * if it was already running.
257 */
258 if (td->td_turnstile != NULL) {
259 mtx_unlock_spin(&tc->tc_lock);
260 return;
261 }
262
263 /*
264 * Check if the thread needs to be moved up on
265 * the blocked chain. It doesn't need to be moved
266 * if it is already at the head of the list or if
267 * the item in front of it still has a higher priority.
268 */
269 if (td == TAILQ_FIRST(&ts->ts_blocked)) {
270 mtx_unlock_spin(&tc->tc_lock);
271 continue;
272 }
273
274 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
275 if (td1->td_priority <= pri) {
276 mtx_unlock_spin(&tc->tc_lock);
277 continue;
278 }
279
280 /*
281 * Remove thread from blocked chain and determine where
282 * it should be moved up to. Since we know that td1 has
283 * a lower priority than td, we know that at least one
284 * thread in the chain has a lower priority and that
285 * td1 will thus not be NULL after the loop.
286 */
287 mtx_lock_spin(&td_contested_lock);
288 TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
289 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) {
290 MPASS(td1->td_proc->p_magic == P_MAGIC);
291 if (td1->td_priority > pri)
292 break;
293 }
294
295 MPASS(td1 != NULL);
296 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
297 mtx_unlock_spin(&td_contested_lock);
298 CTR4(KTR_LOCK,
299 "propagate_priority: td %p moved before %p on [%p] %s",
300 td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name);
301 mtx_unlock_spin(&tc->tc_lock);
302 }
303 }
304
305 /*
306 * Early initialization of turnstiles. This is not done via a SYSINIT()
307 * since this needs to be initialized very early when mutexes are first
308 * initialized.
309 */
310 void
311 init_turnstiles(void)
312 {
313 int i;
314
315 for (i = 0; i < TC_TABLESIZE; i++) {
316 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
317 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
318 NULL, MTX_SPIN);
319 }
320 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
321 thread0.td_turnstile = NULL;
322 }
323
324 #ifdef TURNSTILE_PROFILING
325 static void
326 init_turnstile_profiling(void *arg)
327 {
328 struct sysctl_oid *chain_oid;
329 char chain_name[10];
330 int i;
331
332 for (i = 0; i < TC_TABLESIZE; i++) {
333 snprintf(chain_name, sizeof(chain_name), "%d", i);
334 chain_oid = SYSCTL_ADD_NODE(NULL,
335 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
336 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
337 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
338 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
339 NULL);
340 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
341 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
342 0, NULL);
343 }
344 }
345 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
346 init_turnstile_profiling, NULL);
347 #endif
348
349 static void
350 init_turnstile0(void *dummy)
351 {
352
353 thread0.td_turnstile = turnstile_alloc();
354 }
355 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
356
357 /*
358 * Set the owner of the lock this turnstile is attached to.
359 */
360 static void
361 turnstile_setowner(struct turnstile *ts, struct thread *owner)
362 {
363
364 mtx_assert(&td_contested_lock, MA_OWNED);
365 MPASS(owner->td_proc->p_magic == P_MAGIC);
366 MPASS(ts->ts_owner == NULL);
367 ts->ts_owner = owner;
368 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
369 }
370
371 /*
372 * Malloc a turnstile for a new thread, initialize it and return it.
373 */
374 struct turnstile *
375 turnstile_alloc(void)
376 {
377 struct turnstile *ts;
378
379 ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
380 TAILQ_INIT(&ts->ts_blocked);
381 TAILQ_INIT(&ts->ts_pending);
382 LIST_INIT(&ts->ts_free);
383 return (ts);
384 }
385
386 /*
387 * Free a turnstile when a thread is destroyed.
388 */
389 void
390 turnstile_free(struct turnstile *ts)
391 {
392
393 MPASS(ts != NULL);
394 MPASS(TAILQ_EMPTY(&ts->ts_blocked));
395 MPASS(TAILQ_EMPTY(&ts->ts_pending));
396 free(ts, M_TURNSTILE);
397 }
398
399 /*
400 * Look up the turnstile for a lock in the hash table locking the associated
401 * turnstile chain along the way. Return with the turnstile chain locked.
402 * If no turnstile is found in the hash table, NULL is returned.
403 */
404 struct turnstile *
405 turnstile_lookup(struct lock_object *lock)
406 {
407 struct turnstile_chain *tc;
408 struct turnstile *ts;
409
410 tc = TC_LOOKUP(lock);
411 mtx_lock_spin(&tc->tc_lock);
412 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
413 if (ts->ts_lockobj == lock)
414 return (ts);
415 return (NULL);
416 }
417
418 /*
419 * Unlock the turnstile chain associated with a given lock.
420 */
421 void
422 turnstile_release(struct lock_object *lock)
423 {
424 struct turnstile_chain *tc;
425
426 tc = TC_LOOKUP(lock);
427 mtx_unlock_spin(&tc->tc_lock);
428 }
429
430 /*
431 * Take ownership of a turnstile and adjust the priority of the new
432 * owner appropriately.
433 */
434 void
435 turnstile_claim(struct turnstile *ts)
436 {
437 struct turnstile_chain *tc;
438 struct thread *td, *owner;
439
440 tc = TC_LOOKUP(ts->ts_lockobj);
441 mtx_assert(&tc->tc_lock, MA_OWNED);
442
443 owner = curthread;
444 mtx_lock_spin(&td_contested_lock);
445 turnstile_setowner(ts, owner);
446 mtx_unlock_spin(&td_contested_lock);
447
448 td = TAILQ_FIRST(&ts->ts_blocked);
449 MPASS(td != NULL);
450 MPASS(td->td_proc->p_magic == P_MAGIC);
451 mtx_unlock_spin(&tc->tc_lock);
452
453 /*
454 * Update the priority of the new owner if needed.
455 */
456 mtx_lock_spin(&sched_lock);
457 if (td->td_priority < owner->td_priority)
458 owner->td_priority = td->td_priority;
459 mtx_unlock_spin(&sched_lock);
460 }
461
462 /*
463 * Block the current thread on the turnstile ts. This function will context
464 * switch and not return until this thread has been woken back up. This
465 * function must be called with the appropriate turnstile chain locked and
466 * will return with it unlocked.
467 */
468 void
469 turnstile_wait(struct turnstile *ts, struct lock_object *lock,
470 struct thread *owner)
471 {
472 struct turnstile_chain *tc;
473 struct thread *td, *td1;
474
475 td = curthread;
476 tc = TC_LOOKUP(lock);
477 mtx_assert(&tc->tc_lock, MA_OWNED);
478 MPASS(td->td_turnstile != NULL);
479 MPASS(owner != NULL);
480 MPASS(owner->td_proc->p_magic == P_MAGIC);
481
482 /* If the passed in turnstile is NULL, use this thread's turnstile. */
483 if (ts == NULL) {
484 #ifdef TURNSTILE_PROFILING
485 tc->tc_depth++;
486 if (tc->tc_depth > tc->tc_max_depth) {
487 tc->tc_max_depth = tc->tc_depth;
488 if (tc->tc_max_depth > turnstile_max_depth)
489 turnstile_max_depth = tc->tc_max_depth;
490 }
491 #endif
492 ts = td->td_turnstile;
493 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
494 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
495 ("thread's turnstile has pending threads"));
496 KASSERT(TAILQ_EMPTY(&ts->ts_blocked),
497 ("thread's turnstile has a non-empty queue"));
498 KASSERT(LIST_EMPTY(&ts->ts_free),
499 ("thread's turnstile has a non-empty free list"));
500 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
501 ts->ts_lockobj = lock;
502 mtx_lock_spin(&td_contested_lock);
503 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
504 turnstile_setowner(ts, owner);
505 mtx_unlock_spin(&td_contested_lock);
506 } else {
507 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq)
508 if (td1->td_priority > td->td_priority)
509 break;
510 mtx_lock_spin(&td_contested_lock);
511 if (td1 != NULL)
512 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
513 else
514 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
515 mtx_unlock_spin(&td_contested_lock);
516 MPASS(td->td_turnstile != NULL);
517 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
518 MPASS(owner == ts->ts_owner);
519 }
520 td->td_turnstile = NULL;
521 mtx_unlock_spin(&tc->tc_lock);
522
523 mtx_lock_spin(&sched_lock);
524 /*
525 * Handle race condition where a thread on another CPU that owns
526 * lock 'lock' could have woken us in between us dropping the
527 * turnstile chain lock and acquiring the sched_lock.
528 */
529 if (td->td_flags & TDF_TSNOBLOCK) {
530 td->td_flags &= ~TDF_TSNOBLOCK;
531 mtx_unlock_spin(&sched_lock);
532 return;
533 }
534
535 #ifdef notyet
536 /*
537 * If we're borrowing an interrupted thread's VM context, we
538 * must clean up before going to sleep.
539 */
540 if (td->td_ithd != NULL) {
541 struct ithd *it = td->td_ithd;
542
543 if (it->it_interrupted) {
544 if (LOCK_LOG_TEST(lock, 0))
545 CTR3(KTR_LOCK, "%s: %p interrupted %p",
546 __func__, it, it->it_interrupted);
547 intr_thd_fixup(it);
548 }
549 }
550 #endif
551
552 /* Save who we are blocked on and switch. */
553 td->td_blocked = ts;
554 td->td_lockname = lock->lo_name;
555 TD_SET_LOCK(td);
556 propagate_priority(td);
557
558 if (LOCK_LOG_TEST(lock, 0))
559 CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
560 lock, lock->lo_name);
561
562 mi_switch(SW_VOL, NULL);
563
564 if (LOCK_LOG_TEST(lock, 0))
565 CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
566 __func__, td, lock, lock->lo_name);
567
568 mtx_unlock_spin(&sched_lock);
569 }
570
571 /*
572 * Pick the highest priority thread on this turnstile and put it on the
573 * pending list. This must be called with the turnstile chain locked.
574 */
575 int
576 turnstile_signal(struct turnstile *ts)
577 {
578 struct turnstile_chain *tc;
579 struct thread *td;
580 int empty;
581
582 MPASS(ts != NULL);
583 MPASS(curthread->td_proc->p_magic == P_MAGIC);
584 MPASS(ts->ts_owner == curthread);
585 tc = TC_LOOKUP(ts->ts_lockobj);
586 mtx_assert(&tc->tc_lock, MA_OWNED);
587
588 /*
589 * Pick the highest priority thread blocked on this lock and
590 * move it to the pending list.
591 */
592 td = TAILQ_FIRST(&ts->ts_blocked);
593 MPASS(td->td_proc->p_magic == P_MAGIC);
594 mtx_lock_spin(&td_contested_lock);
595 TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
596 mtx_unlock_spin(&td_contested_lock);
597 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
598
599 /*
600 * If the turnstile is now empty, remove it from its chain and
601 * give it to the about-to-be-woken thread. Otherwise take a
602 * turnstile from the free list and give it to the thread.
603 */
604 empty = TAILQ_EMPTY(&ts->ts_blocked);
605 if (empty) {
606 MPASS(LIST_EMPTY(&ts->ts_free));
607 #ifdef TURNSTILE_PROFILING
608 tc->tc_depth--;
609 #endif
610 } else
611 ts = LIST_FIRST(&ts->ts_free);
612 MPASS(ts != NULL);
613 LIST_REMOVE(ts, ts_hash);
614 td->td_turnstile = ts;
615
616 return (empty);
617 }
618
619 /*
620 * Put all blocked threads on the pending list. This must be called with
621 * the turnstile chain locked.
622 */
623 void
624 turnstile_broadcast(struct turnstile *ts)
625 {
626 struct turnstile_chain *tc;
627 struct turnstile *ts1;
628 struct thread *td;
629
630 MPASS(ts != NULL);
631 MPASS(curthread->td_proc->p_magic == P_MAGIC);
632 MPASS(ts->ts_owner == curthread);
633 tc = TC_LOOKUP(ts->ts_lockobj);
634 mtx_assert(&tc->tc_lock, MA_OWNED);
635
636 /*
637 * Transfer the blocked list to the pending list.
638 */
639 mtx_lock_spin(&td_contested_lock);
640 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq);
641 mtx_unlock_spin(&td_contested_lock);
642
643 /*
644 * Give a turnstile to each thread. The last thread gets
645 * this turnstile.
646 */
647 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
648 if (LIST_EMPTY(&ts->ts_free)) {
649 MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
650 ts1 = ts;
651 #ifdef TURNSTILE_PROFILING
652 tc->tc_depth--;
653 #endif
654 } else
655 ts1 = LIST_FIRST(&ts->ts_free);
656 MPASS(ts1 != NULL);
657 LIST_REMOVE(ts1, ts_hash);
658 td->td_turnstile = ts1;
659 }
660 }
661
662 /*
663 * Wakeup all threads on the pending list and adjust the priority of the
664 * current thread appropriately. This must be called with the turnstile
665 * chain locked.
666 */
667 void
668 turnstile_unpend(struct turnstile *ts)
669 {
670 TAILQ_HEAD( ,thread) pending_threads;
671 struct turnstile_chain *tc;
672 struct thread *td;
673 int cp, pri;
674
675 MPASS(ts != NULL);
676 MPASS(ts->ts_owner == curthread);
677 tc = TC_LOOKUP(ts->ts_lockobj);
678 mtx_assert(&tc->tc_lock, MA_OWNED);
679 MPASS(!TAILQ_EMPTY(&ts->ts_pending));
680
681 /*
682 * Move the list of pending threads out of the turnstile and
683 * into a local variable.
684 */
685 TAILQ_INIT(&pending_threads);
686 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
687 #ifdef INVARIANTS
688 if (TAILQ_EMPTY(&ts->ts_blocked))
689 ts->ts_lockobj = NULL;
690 #endif
691
692 /*
693 * Remove the turnstile from this thread's list of contested locks
694 * since this thread doesn't own it anymore. New threads will
695 * not be blocking on the turnstile until it is claimed by a new
696 * owner.
697 */
698 mtx_lock_spin(&td_contested_lock);
699 ts->ts_owner = NULL;
700 LIST_REMOVE(ts, ts_link);
701 mtx_unlock_spin(&td_contested_lock);
702 critical_enter();
703 mtx_unlock_spin(&tc->tc_lock);
704
705 /*
706 * Adjust the priority of curthread based on other contested
707 * locks it owns. Don't lower the priority below the base
708 * priority however.
709 */
710 td = curthread;
711 pri = PRI_MAX;
712 mtx_lock_spin(&sched_lock);
713 mtx_lock_spin(&td_contested_lock);
714 LIST_FOREACH(ts, &td->td_contested, ts_link) {
715 cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority;
716 if (cp < pri)
717 pri = cp;
718 }
719 mtx_unlock_spin(&td_contested_lock);
720 if (pri > td->td_base_pri)
721 pri = td->td_base_pri;
722 td->td_priority = pri;
723
724 /*
725 * Wake up all the pending threads. If a thread is not blocked
726 * on a lock, then it is currently executing on another CPU in
727 * turnstile_wait() or sitting on a run queue waiting to resume
728 * in turnstile_wait(). Set a flag to force it to try to acquire
729 * the lock again instead of blocking.
730 */
731 while (!TAILQ_EMPTY(&pending_threads)) {
732 td = TAILQ_FIRST(&pending_threads);
733 TAILQ_REMOVE(&pending_threads, td, td_lockq);
734 MPASS(td->td_proc->p_magic == P_MAGIC);
735 if (TD_ON_LOCK(td)) {
736 td->td_blocked = NULL;
737 td->td_lockname = NULL;
738 TD_CLR_LOCK(td);
739 MPASS(TD_CAN_RUN(td));
740 setrunqueue(td, SRQ_BORING);
741 } else {
742 td->td_flags |= TDF_TSNOBLOCK;
743 MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
744 }
745 }
746 critical_exit();
747 mtx_unlock_spin(&sched_lock);
748 }
749
750 /*
751 * Return the first thread in a turnstile.
752 */
753 struct thread *
754 turnstile_head(struct turnstile *ts)
755 {
756 #ifdef INVARIANTS
757 struct turnstile_chain *tc;
758
759 MPASS(ts != NULL);
760 tc = TC_LOOKUP(ts->ts_lockobj);
761 mtx_assert(&tc->tc_lock, MA_OWNED);
762 #endif
763 return (TAILQ_FIRST(&ts->ts_blocked));
764 }
765
766 /*
767 * Returns true if a turnstile is empty.
768 */
769 int
770 turnstile_empty(struct turnstile *ts)
771 {
772 #ifdef INVARIANTS
773 struct turnstile_chain *tc;
774
775 MPASS(ts != NULL);
776 tc = TC_LOOKUP(ts->ts_lockobj);
777 mtx_assert(&tc->tc_lock, MA_OWNED);
778 #endif
779 return (TAILQ_EMPTY(&ts->ts_blocked));
780 }
Cache object: 915a0801e2ca3ebbbba504c96be4ab96
|