FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c
1 /*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_lock.c 301157 2016-06-01 18:32:20Z mjg $");
35
36 #include <sys/param.h>
37 #include <sys/kdb.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50
51 #include <machine/cpu.h>
52
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61
62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63 (LK_ADAPTIVE | LK_NOSHARE));
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
69
70 #ifndef INVARIANTS
71 #define _lockmgr_assert(lk, what, file, line)
72 #endif
73
74 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
76
77 #ifndef DEBUG_LOCKS
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
81 #else
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
85 #endif
86
87 #define LOCK_LOG2(lk, string, arg1, arg2) \
88 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
89 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
91 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
92 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93
94 #define GIANT_DECLARE \
95 int _i = 0; \
96 WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do { \
98 if (_i > 0) { \
99 while (_i--) \
100 mtx_lock(&Giant); \
101 WITNESS_RESTORE(&Giant.lock_object, Giant); \
102 } \
103 } while (0)
104 #define GIANT_SAVE() do { \
105 if (mtx_owned(&Giant)) { \
106 WITNESS_SAVE(&Giant.lock_object, Giant); \
107 while (mtx_owned(&Giant)) { \
108 _i++; \
109 mtx_unlock(&Giant); \
110 } \
111 } \
112 } while (0)
113
114 #define LK_CAN_SHARE(x, flags) \
115 (((x) & LK_SHARE) && \
116 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
117 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
118 (curthread->td_pflags & TDP_DEADLKTREAT)))
119 #define LK_TRYOP(x) \
120 ((x) & LK_NOWAIT)
121
122 #define LK_CAN_WITNESS(x) \
123 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
124 #define LK_TRYWIT(x) \
125 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
126
127 #define LK_CAN_ADAPT(lk, f) \
128 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
129 ((f) & LK_SLEEPFAIL) == 0)
130
131 #define lockmgr_disowned(lk) \
132 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
133
134 #define lockmgr_xlocked(lk) \
135 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
136
137 static void assert_lockmgr(const struct lock_object *lock, int how);
138 #ifdef DDB
139 static void db_show_lockmgr(const struct lock_object *lock);
140 #endif
141 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
142 #ifdef KDTRACE_HOOKS
143 static int owner_lockmgr(const struct lock_object *lock,
144 struct thread **owner);
145 #endif
146 static uintptr_t unlock_lockmgr(struct lock_object *lock);
147
148 struct lock_class lock_class_lockmgr = {
149 .lc_name = "lockmgr",
150 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
151 .lc_assert = assert_lockmgr,
152 #ifdef DDB
153 .lc_ddb_show = db_show_lockmgr,
154 #endif
155 .lc_lock = lock_lockmgr,
156 .lc_unlock = unlock_lockmgr,
157 #ifdef KDTRACE_HOOKS
158 .lc_owner = owner_lockmgr,
159 #endif
160 };
161
162 #ifdef ADAPTIVE_LOCKMGRS
163 static u_int alk_retries = 10;
164 static u_int alk_loops = 10000;
165 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
166 "lockmgr debugging");
167 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
168 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
169 #endif
170
171 static __inline struct thread *
172 lockmgr_xholder(const struct lock *lk)
173 {
174 uintptr_t x;
175
176 x = lk->lk_lock;
177 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
178 }
179
180 /*
181 * It assumes sleepq_lock held and returns with this one unheld.
182 * It also assumes the generic interlock is sane and previously checked.
183 * If LK_INTERLOCK is specified the interlock is not reacquired after the
184 * sleep.
185 */
186 static __inline int
187 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
188 const char *wmesg, int pri, int timo, int queue)
189 {
190 GIANT_DECLARE;
191 struct lock_class *class;
192 int catch, error;
193
194 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
195 catch = pri & PCATCH;
196 pri &= PRIMASK;
197 error = 0;
198
199 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
200 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
201
202 if (flags & LK_INTERLOCK)
203 class->lc_unlock(ilk);
204 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
205 lk->lk_exslpfail++;
206 GIANT_SAVE();
207 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
208 SLEEPQ_INTERRUPTIBLE : 0), queue);
209 if ((flags & LK_TIMELOCK) && timo)
210 sleepq_set_timeout(&lk->lock_object, timo);
211
212 /*
213 * Decisional switch for real sleeping.
214 */
215 if ((flags & LK_TIMELOCK) && timo && catch)
216 error = sleepq_timedwait_sig(&lk->lock_object, pri);
217 else if ((flags & LK_TIMELOCK) && timo)
218 error = sleepq_timedwait(&lk->lock_object, pri);
219 else if (catch)
220 error = sleepq_wait_sig(&lk->lock_object, pri);
221 else
222 sleepq_wait(&lk->lock_object, pri);
223 GIANT_RESTORE();
224 if ((flags & LK_SLEEPFAIL) && error == 0)
225 error = ENOLCK;
226
227 return (error);
228 }
229
230 static __inline int
231 wakeupshlk(struct lock *lk, const char *file, int line)
232 {
233 uintptr_t v, x;
234 u_int realexslp;
235 int queue, wakeup_swapper;
236
237 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
238 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
239
240 wakeup_swapper = 0;
241 for (;;) {
242 x = lk->lk_lock;
243
244 /*
245 * If there is more than one shared lock held, just drop one
246 * and return.
247 */
248 if (LK_SHARERS(x) > 1) {
249 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
250 x - LK_ONE_SHARER))
251 break;
252 continue;
253 }
254
255 /*
256 * If there are not waiters on the exclusive queue, drop the
257 * lock quickly.
258 */
259 if ((x & LK_ALL_WAITERS) == 0) {
260 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
261 LK_SHARERS_LOCK(1));
262 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
263 break;
264 continue;
265 }
266
267 /*
268 * We should have a sharer with waiters, so enter the hard
269 * path in order to handle wakeups correctly.
270 */
271 sleepq_lock(&lk->lock_object);
272 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
273 v = LK_UNLOCKED;
274
275 /*
276 * If the lock has exclusive waiters, give them preference in
277 * order to avoid deadlock with shared runners up.
278 * If interruptible sleeps left the exclusive queue empty
279 * avoid a starvation for the threads sleeping on the shared
280 * queue by giving them precedence and cleaning up the
281 * exclusive waiters bit anyway.
282 * Please note that lk_exslpfail count may be lying about
283 * the real number of waiters with the LK_SLEEPFAIL flag on
284 * because they may be used in conjunction with interruptible
285 * sleeps so lk_exslpfail might be considered an 'upper limit'
286 * bound, including the edge cases.
287 */
288 realexslp = sleepq_sleepcnt(&lk->lock_object,
289 SQ_EXCLUSIVE_QUEUE);
290 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
291 if (lk->lk_exslpfail < realexslp) {
292 lk->lk_exslpfail = 0;
293 queue = SQ_EXCLUSIVE_QUEUE;
294 v |= (x & LK_SHARED_WAITERS);
295 } else {
296 lk->lk_exslpfail = 0;
297 LOCK_LOG2(lk,
298 "%s: %p has only LK_SLEEPFAIL sleepers",
299 __func__, lk);
300 LOCK_LOG2(lk,
301 "%s: %p waking up threads on the exclusive queue",
302 __func__, lk);
303 wakeup_swapper =
304 sleepq_broadcast(&lk->lock_object,
305 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
306 queue = SQ_SHARED_QUEUE;
307 }
308
309 } else {
310
311 /*
312 * Exclusive waiters sleeping with LK_SLEEPFAIL on
313 * and using interruptible sleeps/timeout may have
314 * left spourious lk_exslpfail counts on, so clean
315 * it up anyway.
316 */
317 lk->lk_exslpfail = 0;
318 queue = SQ_SHARED_QUEUE;
319 }
320
321 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
322 v)) {
323 sleepq_release(&lk->lock_object);
324 continue;
325 }
326 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
327 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
328 "exclusive");
329 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
330 0, queue);
331 sleepq_release(&lk->lock_object);
332 break;
333 }
334
335 lock_profile_release_lock(&lk->lock_object);
336 TD_LOCKS_DEC(curthread);
337 TD_SLOCKS_DEC(curthread);
338 return (wakeup_swapper);
339 }
340
341 static void
342 assert_lockmgr(const struct lock_object *lock, int what)
343 {
344
345 panic("lockmgr locks do not support assertions");
346 }
347
348 static void
349 lock_lockmgr(struct lock_object *lock, uintptr_t how)
350 {
351
352 panic("lockmgr locks do not support sleep interlocking");
353 }
354
355 static uintptr_t
356 unlock_lockmgr(struct lock_object *lock)
357 {
358
359 panic("lockmgr locks do not support sleep interlocking");
360 }
361
362 #ifdef KDTRACE_HOOKS
363 static int
364 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
365 {
366
367 panic("lockmgr locks do not support owner inquiring");
368 }
369 #endif
370
371 void
372 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
373 {
374 int iflags;
375
376 MPASS((flags & ~LK_INIT_MASK) == 0);
377 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
378 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
379 &lk->lk_lock));
380
381 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
382 if (flags & LK_CANRECURSE)
383 iflags |= LO_RECURSABLE;
384 if ((flags & LK_NODUP) == 0)
385 iflags |= LO_DUPOK;
386 if (flags & LK_NOPROFILE)
387 iflags |= LO_NOPROFILE;
388 if ((flags & LK_NOWITNESS) == 0)
389 iflags |= LO_WITNESS;
390 if (flags & LK_QUIET)
391 iflags |= LO_QUIET;
392 if (flags & LK_IS_VNODE)
393 iflags |= LO_IS_VNODE;
394 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
395
396 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
397 lk->lk_lock = LK_UNLOCKED;
398 lk->lk_recurse = 0;
399 lk->lk_exslpfail = 0;
400 lk->lk_timo = timo;
401 lk->lk_pri = pri;
402 STACK_ZERO(lk);
403 }
404
405 /*
406 * XXX: Gross hacks to manipulate external lock flags after
407 * initialization. Used for certain vnode and buf locks.
408 */
409 void
410 lockallowshare(struct lock *lk)
411 {
412
413 lockmgr_assert(lk, KA_XLOCKED);
414 lk->lock_object.lo_flags &= ~LK_NOSHARE;
415 }
416
417 void
418 lockdisableshare(struct lock *lk)
419 {
420
421 lockmgr_assert(lk, KA_XLOCKED);
422 lk->lock_object.lo_flags |= LK_NOSHARE;
423 }
424
425 void
426 lockallowrecurse(struct lock *lk)
427 {
428
429 lockmgr_assert(lk, KA_XLOCKED);
430 lk->lock_object.lo_flags |= LO_RECURSABLE;
431 }
432
433 void
434 lockdisablerecurse(struct lock *lk)
435 {
436
437 lockmgr_assert(lk, KA_XLOCKED);
438 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
439 }
440
441 void
442 lockdestroy(struct lock *lk)
443 {
444
445 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
446 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
447 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
448 lock_destroy(&lk->lock_object);
449 }
450
451 int
452 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
453 const char *wmesg, int pri, int timo, const char *file, int line)
454 {
455 GIANT_DECLARE;
456 struct lock_class *class;
457 const char *iwmesg;
458 uintptr_t tid, v, x;
459 u_int op, realexslp;
460 int error, ipri, itimo, queue, wakeup_swapper;
461 #ifdef LOCK_PROFILING
462 uint64_t waittime = 0;
463 int contested = 0;
464 #endif
465 #ifdef ADAPTIVE_LOCKMGRS
466 volatile struct thread *owner;
467 u_int i, spintries = 0;
468 #endif
469
470 error = 0;
471 tid = (uintptr_t)curthread;
472 op = (flags & LK_TYPE_MASK);
473 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
474 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
475 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
476
477 MPASS((flags & ~LK_TOTAL_MASK) == 0);
478 KASSERT((op & (op - 1)) == 0,
479 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
480 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
481 (op != LK_DOWNGRADE && op != LK_RELEASE),
482 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
483 __func__, file, line));
484 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
485 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
486 __func__, file, line));
487 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
488 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
489 lk->lock_object.lo_name, file, line));
490
491 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
492 if (panicstr != NULL) {
493 if (flags & LK_INTERLOCK)
494 class->lc_unlock(ilk);
495 return (0);
496 }
497
498 if (lk->lock_object.lo_flags & LK_NOSHARE) {
499 switch (op) {
500 case LK_SHARED:
501 op = LK_EXCLUSIVE;
502 break;
503 case LK_UPGRADE:
504 case LK_TRYUPGRADE:
505 case LK_DOWNGRADE:
506 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
507 file, line);
508 if (flags & LK_INTERLOCK)
509 class->lc_unlock(ilk);
510 return (0);
511 }
512 }
513
514 wakeup_swapper = 0;
515 switch (op) {
516 case LK_SHARED:
517 if (LK_CAN_WITNESS(flags))
518 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
519 file, line, flags & LK_INTERLOCK ? ilk : NULL);
520 for (;;) {
521 x = lk->lk_lock;
522
523 /*
524 * If no other thread has an exclusive lock, or
525 * no exclusive waiter is present, bump the count of
526 * sharers. Since we have to preserve the state of
527 * waiters, if we fail to acquire the shared lock
528 * loop back and retry.
529 */
530 if (LK_CAN_SHARE(x, flags)) {
531 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
532 x + LK_ONE_SHARER))
533 break;
534 continue;
535 }
536 #ifdef HWPMC_HOOKS
537 PMC_SOFT_CALL( , , lock, failed);
538 #endif
539 lock_profile_obtain_lock_failed(&lk->lock_object,
540 &contested, &waittime);
541
542 /*
543 * If the lock is already held by curthread in
544 * exclusive way avoid a deadlock.
545 */
546 if (LK_HOLDER(x) == tid) {
547 LOCK_LOG2(lk,
548 "%s: %p already held in exclusive mode",
549 __func__, lk);
550 error = EDEADLK;
551 break;
552 }
553
554 /*
555 * If the lock is expected to not sleep just give up
556 * and return.
557 */
558 if (LK_TRYOP(flags)) {
559 LOCK_LOG2(lk, "%s: %p fails the try operation",
560 __func__, lk);
561 error = EBUSY;
562 break;
563 }
564
565 #ifdef ADAPTIVE_LOCKMGRS
566 /*
567 * If the owner is running on another CPU, spin until
568 * the owner stops running or the state of the lock
569 * changes. We need a double-state handle here
570 * because for a failed acquisition the lock can be
571 * either held in exclusive mode or shared mode
572 * (for the writer starvation avoidance technique).
573 */
574 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
575 LK_HOLDER(x) != LK_KERNPROC) {
576 owner = (struct thread *)LK_HOLDER(x);
577 if (LOCK_LOG_TEST(&lk->lock_object, 0))
578 CTR3(KTR_LOCK,
579 "%s: spinning on %p held by %p",
580 __func__, lk, owner);
581 KTR_STATE1(KTR_SCHED, "thread",
582 sched_tdname(td), "spinning",
583 "lockname:\"%s\"", lk->lock_object.lo_name);
584
585 /*
586 * If we are holding also an interlock drop it
587 * in order to avoid a deadlock if the lockmgr
588 * owner is adaptively spinning on the
589 * interlock itself.
590 */
591 if (flags & LK_INTERLOCK) {
592 class->lc_unlock(ilk);
593 flags &= ~LK_INTERLOCK;
594 }
595 GIANT_SAVE();
596 while (LK_HOLDER(lk->lk_lock) ==
597 (uintptr_t)owner && TD_IS_RUNNING(owner))
598 cpu_spinwait();
599 KTR_STATE0(KTR_SCHED, "thread",
600 sched_tdname(td), "running");
601 GIANT_RESTORE();
602 continue;
603 } else if (LK_CAN_ADAPT(lk, flags) &&
604 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
605 spintries < alk_retries) {
606 KTR_STATE1(KTR_SCHED, "thread",
607 sched_tdname(td), "spinning",
608 "lockname:\"%s\"", lk->lock_object.lo_name);
609 if (flags & LK_INTERLOCK) {
610 class->lc_unlock(ilk);
611 flags &= ~LK_INTERLOCK;
612 }
613 GIANT_SAVE();
614 spintries++;
615 for (i = 0; i < alk_loops; i++) {
616 if (LOCK_LOG_TEST(&lk->lock_object, 0))
617 CTR4(KTR_LOCK,
618 "%s: shared spinning on %p with %u and %u",
619 __func__, lk, spintries, i);
620 x = lk->lk_lock;
621 if ((x & LK_SHARE) == 0 ||
622 LK_CAN_SHARE(x, flags) != 0)
623 break;
624 cpu_spinwait();
625 }
626 KTR_STATE0(KTR_SCHED, "thread",
627 sched_tdname(td), "running");
628 GIANT_RESTORE();
629 if (i != alk_loops)
630 continue;
631 }
632 #endif
633
634 /*
635 * Acquire the sleepqueue chain lock because we
636 * probabilly will need to manipulate waiters flags.
637 */
638 sleepq_lock(&lk->lock_object);
639 x = lk->lk_lock;
640
641 /*
642 * if the lock can be acquired in shared mode, try
643 * again.
644 */
645 if (LK_CAN_SHARE(x, flags)) {
646 sleepq_release(&lk->lock_object);
647 continue;
648 }
649
650 #ifdef ADAPTIVE_LOCKMGRS
651 /*
652 * The current lock owner might have started executing
653 * on another CPU (or the lock could have changed
654 * owner) while we were waiting on the turnstile
655 * chain lock. If so, drop the turnstile lock and try
656 * again.
657 */
658 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
659 LK_HOLDER(x) != LK_KERNPROC) {
660 owner = (struct thread *)LK_HOLDER(x);
661 if (TD_IS_RUNNING(owner)) {
662 sleepq_release(&lk->lock_object);
663 continue;
664 }
665 }
666 #endif
667
668 /*
669 * Try to set the LK_SHARED_WAITERS flag. If we fail,
670 * loop back and retry.
671 */
672 if ((x & LK_SHARED_WAITERS) == 0) {
673 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
674 x | LK_SHARED_WAITERS)) {
675 sleepq_release(&lk->lock_object);
676 continue;
677 }
678 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
679 __func__, lk);
680 }
681
682 /*
683 * As far as we have been unable to acquire the
684 * shared lock and the shared waiters flag is set,
685 * we will sleep.
686 */
687 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
688 SQ_SHARED_QUEUE);
689 flags &= ~LK_INTERLOCK;
690 if (error) {
691 LOCK_LOG3(lk,
692 "%s: interrupted sleep for %p with %d",
693 __func__, lk, error);
694 break;
695 }
696 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
697 __func__, lk);
698 }
699 if (error == 0) {
700 lock_profile_obtain_lock_success(&lk->lock_object,
701 contested, waittime, file, line);
702 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
703 line);
704 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
705 line);
706 TD_LOCKS_INC(curthread);
707 TD_SLOCKS_INC(curthread);
708 STACK_SAVE(lk);
709 }
710 break;
711 case LK_UPGRADE:
712 case LK_TRYUPGRADE:
713 _lockmgr_assert(lk, KA_SLOCKED, file, line);
714 v = lk->lk_lock;
715 x = v & LK_ALL_WAITERS;
716 v &= LK_EXCLUSIVE_SPINNERS;
717
718 /*
719 * Try to switch from one shared lock to an exclusive one.
720 * We need to preserve waiters flags during the operation.
721 */
722 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
723 tid | x)) {
724 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
725 line);
726 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
727 LK_TRYWIT(flags), file, line);
728 TD_SLOCKS_DEC(curthread);
729 break;
730 }
731
732 /*
733 * In LK_TRYUPGRADE mode, do not drop the lock,
734 * returning EBUSY instead.
735 */
736 if (op == LK_TRYUPGRADE) {
737 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
738 __func__, lk);
739 error = EBUSY;
740 break;
741 }
742
743 /*
744 * We have been unable to succeed in upgrading, so just
745 * give up the shared lock.
746 */
747 wakeup_swapper |= wakeupshlk(lk, file, line);
748
749 /* FALLTHROUGH */
750 case LK_EXCLUSIVE:
751 if (LK_CAN_WITNESS(flags))
752 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
753 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
754 ilk : NULL);
755
756 /*
757 * If curthread already holds the lock and this one is
758 * allowed to recurse, simply recurse on it.
759 */
760 if (lockmgr_xlocked(lk)) {
761 if ((flags & LK_CANRECURSE) == 0 &&
762 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
763
764 /*
765 * If the lock is expected to not panic just
766 * give up and return.
767 */
768 if (LK_TRYOP(flags)) {
769 LOCK_LOG2(lk,
770 "%s: %p fails the try operation",
771 __func__, lk);
772 error = EBUSY;
773 break;
774 }
775 if (flags & LK_INTERLOCK)
776 class->lc_unlock(ilk);
777 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
778 __func__, iwmesg, file, line);
779 }
780 lk->lk_recurse++;
781 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
782 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
783 lk->lk_recurse, file, line);
784 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
785 LK_TRYWIT(flags), file, line);
786 TD_LOCKS_INC(curthread);
787 break;
788 }
789
790 for (;;) {
791 if (lk->lk_lock == LK_UNLOCKED &&
792 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
793 break;
794 #ifdef HWPMC_HOOKS
795 PMC_SOFT_CALL( , , lock, failed);
796 #endif
797 lock_profile_obtain_lock_failed(&lk->lock_object,
798 &contested, &waittime);
799
800 /*
801 * If the lock is expected to not sleep just give up
802 * and return.
803 */
804 if (LK_TRYOP(flags)) {
805 LOCK_LOG2(lk, "%s: %p fails the try operation",
806 __func__, lk);
807 error = EBUSY;
808 break;
809 }
810
811 #ifdef ADAPTIVE_LOCKMGRS
812 /*
813 * If the owner is running on another CPU, spin until
814 * the owner stops running or the state of the lock
815 * changes.
816 */
817 x = lk->lk_lock;
818 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
819 LK_HOLDER(x) != LK_KERNPROC) {
820 owner = (struct thread *)LK_HOLDER(x);
821 if (LOCK_LOG_TEST(&lk->lock_object, 0))
822 CTR3(KTR_LOCK,
823 "%s: spinning on %p held by %p",
824 __func__, lk, owner);
825 KTR_STATE1(KTR_SCHED, "thread",
826 sched_tdname(td), "spinning",
827 "lockname:\"%s\"", lk->lock_object.lo_name);
828
829 /*
830 * If we are holding also an interlock drop it
831 * in order to avoid a deadlock if the lockmgr
832 * owner is adaptively spinning on the
833 * interlock itself.
834 */
835 if (flags & LK_INTERLOCK) {
836 class->lc_unlock(ilk);
837 flags &= ~LK_INTERLOCK;
838 }
839 GIANT_SAVE();
840 while (LK_HOLDER(lk->lk_lock) ==
841 (uintptr_t)owner && TD_IS_RUNNING(owner))
842 cpu_spinwait();
843 KTR_STATE0(KTR_SCHED, "thread",
844 sched_tdname(td), "running");
845 GIANT_RESTORE();
846 continue;
847 } else if (LK_CAN_ADAPT(lk, flags) &&
848 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
849 spintries < alk_retries) {
850 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
851 !atomic_cmpset_ptr(&lk->lk_lock, x,
852 x | LK_EXCLUSIVE_SPINNERS))
853 continue;
854 KTR_STATE1(KTR_SCHED, "thread",
855 sched_tdname(td), "spinning",
856 "lockname:\"%s\"", lk->lock_object.lo_name);
857 if (flags & LK_INTERLOCK) {
858 class->lc_unlock(ilk);
859 flags &= ~LK_INTERLOCK;
860 }
861 GIANT_SAVE();
862 spintries++;
863 for (i = 0; i < alk_loops; i++) {
864 if (LOCK_LOG_TEST(&lk->lock_object, 0))
865 CTR4(KTR_LOCK,
866 "%s: shared spinning on %p with %u and %u",
867 __func__, lk, spintries, i);
868 if ((lk->lk_lock &
869 LK_EXCLUSIVE_SPINNERS) == 0)
870 break;
871 cpu_spinwait();
872 }
873 KTR_STATE0(KTR_SCHED, "thread",
874 sched_tdname(td), "running");
875 GIANT_RESTORE();
876 if (i != alk_loops)
877 continue;
878 }
879 #endif
880
881 /*
882 * Acquire the sleepqueue chain lock because we
883 * probabilly will need to manipulate waiters flags.
884 */
885 sleepq_lock(&lk->lock_object);
886 x = lk->lk_lock;
887
888 /*
889 * if the lock has been released while we spun on
890 * the sleepqueue chain lock just try again.
891 */
892 if (x == LK_UNLOCKED) {
893 sleepq_release(&lk->lock_object);
894 continue;
895 }
896
897 #ifdef ADAPTIVE_LOCKMGRS
898 /*
899 * The current lock owner might have started executing
900 * on another CPU (or the lock could have changed
901 * owner) while we were waiting on the turnstile
902 * chain lock. If so, drop the turnstile lock and try
903 * again.
904 */
905 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
906 LK_HOLDER(x) != LK_KERNPROC) {
907 owner = (struct thread *)LK_HOLDER(x);
908 if (TD_IS_RUNNING(owner)) {
909 sleepq_release(&lk->lock_object);
910 continue;
911 }
912 }
913 #endif
914
915 /*
916 * The lock can be in the state where there is a
917 * pending queue of waiters, but still no owner.
918 * This happens when the lock is contested and an
919 * owner is going to claim the lock.
920 * If curthread is the one successfully acquiring it
921 * claim lock ownership and return, preserving waiters
922 * flags.
923 */
924 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
925 if ((x & ~v) == LK_UNLOCKED) {
926 v &= ~LK_EXCLUSIVE_SPINNERS;
927 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
928 tid | v)) {
929 sleepq_release(&lk->lock_object);
930 LOCK_LOG2(lk,
931 "%s: %p claimed by a new writer",
932 __func__, lk);
933 break;
934 }
935 sleepq_release(&lk->lock_object);
936 continue;
937 }
938
939 /*
940 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
941 * fail, loop back and retry.
942 */
943 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
944 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
945 x | LK_EXCLUSIVE_WAITERS)) {
946 sleepq_release(&lk->lock_object);
947 continue;
948 }
949 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
950 __func__, lk);
951 }
952
953 /*
954 * As far as we have been unable to acquire the
955 * exclusive lock and the exclusive waiters flag
956 * is set, we will sleep.
957 */
958 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
959 SQ_EXCLUSIVE_QUEUE);
960 flags &= ~LK_INTERLOCK;
961 if (error) {
962 LOCK_LOG3(lk,
963 "%s: interrupted sleep for %p with %d",
964 __func__, lk, error);
965 break;
966 }
967 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
968 __func__, lk);
969 }
970 if (error == 0) {
971 lock_profile_obtain_lock_success(&lk->lock_object,
972 contested, waittime, file, line);
973 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
974 lk->lk_recurse, file, line);
975 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
976 LK_TRYWIT(flags), file, line);
977 TD_LOCKS_INC(curthread);
978 STACK_SAVE(lk);
979 }
980 break;
981 case LK_DOWNGRADE:
982 _lockmgr_assert(lk, KA_XLOCKED, file, line);
983 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
984 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
985
986 /*
987 * Panic if the lock is recursed.
988 */
989 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
990 if (flags & LK_INTERLOCK)
991 class->lc_unlock(ilk);
992 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
993 __func__, iwmesg, file, line);
994 }
995 TD_SLOCKS_INC(curthread);
996
997 /*
998 * In order to preserve waiters flags, just spin.
999 */
1000 for (;;) {
1001 x = lk->lk_lock;
1002 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1003 x &= LK_ALL_WAITERS;
1004 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1005 LK_SHARERS_LOCK(1) | x))
1006 break;
1007 cpu_spinwait();
1008 }
1009 break;
1010 case LK_RELEASE:
1011 _lockmgr_assert(lk, KA_LOCKED, file, line);
1012 x = lk->lk_lock;
1013
1014 if ((x & LK_SHARE) == 0) {
1015
1016 /*
1017 * As first option, treact the lock as if it has not
1018 * any waiter.
1019 * Fix-up the tid var if the lock has been disowned.
1020 */
1021 if (LK_HOLDER(x) == LK_KERNPROC)
1022 tid = LK_KERNPROC;
1023 else {
1024 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1025 file, line);
1026 TD_LOCKS_DEC(curthread);
1027 }
1028 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1029 lk->lk_recurse, file, line);
1030
1031 /*
1032 * The lock is held in exclusive mode.
1033 * If the lock is recursed also, then unrecurse it.
1034 */
1035 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1036 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1037 lk);
1038 lk->lk_recurse--;
1039 break;
1040 }
1041 if (tid != LK_KERNPROC)
1042 lock_profile_release_lock(&lk->lock_object);
1043
1044 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1045 LK_UNLOCKED))
1046 break;
1047
1048 sleepq_lock(&lk->lock_object);
1049 x = lk->lk_lock;
1050 v = LK_UNLOCKED;
1051
1052 /*
1053 * If the lock has exclusive waiters, give them
1054 * preference in order to avoid deadlock with
1055 * shared runners up.
1056 * If interruptible sleeps left the exclusive queue
1057 * empty avoid a starvation for the threads sleeping
1058 * on the shared queue by giving them precedence
1059 * and cleaning up the exclusive waiters bit anyway.
1060 * Please note that lk_exslpfail count may be lying
1061 * about the real number of waiters with the
1062 * LK_SLEEPFAIL flag on because they may be used in
1063 * conjunction with interruptible sleeps so
1064 * lk_exslpfail might be considered an 'upper limit'
1065 * bound, including the edge cases.
1066 */
1067 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1068 realexslp = sleepq_sleepcnt(&lk->lock_object,
1069 SQ_EXCLUSIVE_QUEUE);
1070 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1071 if (lk->lk_exslpfail < realexslp) {
1072 lk->lk_exslpfail = 0;
1073 queue = SQ_EXCLUSIVE_QUEUE;
1074 v |= (x & LK_SHARED_WAITERS);
1075 } else {
1076 lk->lk_exslpfail = 0;
1077 LOCK_LOG2(lk,
1078 "%s: %p has only LK_SLEEPFAIL sleepers",
1079 __func__, lk);
1080 LOCK_LOG2(lk,
1081 "%s: %p waking up threads on the exclusive queue",
1082 __func__, lk);
1083 wakeup_swapper =
1084 sleepq_broadcast(&lk->lock_object,
1085 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1086 queue = SQ_SHARED_QUEUE;
1087 }
1088 } else {
1089
1090 /*
1091 * Exclusive waiters sleeping with LK_SLEEPFAIL
1092 * on and using interruptible sleeps/timeout
1093 * may have left spourious lk_exslpfail counts
1094 * on, so clean it up anyway.
1095 */
1096 lk->lk_exslpfail = 0;
1097 queue = SQ_SHARED_QUEUE;
1098 }
1099
1100 LOCK_LOG3(lk,
1101 "%s: %p waking up threads on the %s queue",
1102 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1103 "exclusive");
1104 atomic_store_rel_ptr(&lk->lk_lock, v);
1105 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1106 SLEEPQ_LK, 0, queue);
1107 sleepq_release(&lk->lock_object);
1108 break;
1109 } else
1110 wakeup_swapper = wakeupshlk(lk, file, line);
1111 break;
1112 case LK_DRAIN:
1113 if (LK_CAN_WITNESS(flags))
1114 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1115 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1116 ilk : NULL);
1117
1118 /*
1119 * Trying to drain a lock we already own will result in a
1120 * deadlock.
1121 */
1122 if (lockmgr_xlocked(lk)) {
1123 if (flags & LK_INTERLOCK)
1124 class->lc_unlock(ilk);
1125 panic("%s: draining %s with the lock held @ %s:%d\n",
1126 __func__, iwmesg, file, line);
1127 }
1128
1129 for (;;) {
1130 if (lk->lk_lock == LK_UNLOCKED &&
1131 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1132 break;
1133
1134 #ifdef HWPMC_HOOKS
1135 PMC_SOFT_CALL( , , lock, failed);
1136 #endif
1137 lock_profile_obtain_lock_failed(&lk->lock_object,
1138 &contested, &waittime);
1139
1140 /*
1141 * If the lock is expected to not sleep just give up
1142 * and return.
1143 */
1144 if (LK_TRYOP(flags)) {
1145 LOCK_LOG2(lk, "%s: %p fails the try operation",
1146 __func__, lk);
1147 error = EBUSY;
1148 break;
1149 }
1150
1151 /*
1152 * Acquire the sleepqueue chain lock because we
1153 * probabilly will need to manipulate waiters flags.
1154 */
1155 sleepq_lock(&lk->lock_object);
1156 x = lk->lk_lock;
1157
1158 /*
1159 * if the lock has been released while we spun on
1160 * the sleepqueue chain lock just try again.
1161 */
1162 if (x == LK_UNLOCKED) {
1163 sleepq_release(&lk->lock_object);
1164 continue;
1165 }
1166
1167 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1168 if ((x & ~v) == LK_UNLOCKED) {
1169 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1170
1171 /*
1172 * If interruptible sleeps left the exclusive
1173 * queue empty avoid a starvation for the
1174 * threads sleeping on the shared queue by
1175 * giving them precedence and cleaning up the
1176 * exclusive waiters bit anyway.
1177 * Please note that lk_exslpfail count may be
1178 * lying about the real number of waiters with
1179 * the LK_SLEEPFAIL flag on because they may
1180 * be used in conjunction with interruptible
1181 * sleeps so lk_exslpfail might be considered
1182 * an 'upper limit' bound, including the edge
1183 * cases.
1184 */
1185 if (v & LK_EXCLUSIVE_WAITERS) {
1186 queue = SQ_EXCLUSIVE_QUEUE;
1187 v &= ~LK_EXCLUSIVE_WAITERS;
1188 } else {
1189
1190 /*
1191 * Exclusive waiters sleeping with
1192 * LK_SLEEPFAIL on and using
1193 * interruptible sleeps/timeout may
1194 * have left spourious lk_exslpfail
1195 * counts on, so clean it up anyway.
1196 */
1197 MPASS(v & LK_SHARED_WAITERS);
1198 lk->lk_exslpfail = 0;
1199 queue = SQ_SHARED_QUEUE;
1200 v &= ~LK_SHARED_WAITERS;
1201 }
1202 if (queue == SQ_EXCLUSIVE_QUEUE) {
1203 realexslp =
1204 sleepq_sleepcnt(&lk->lock_object,
1205 SQ_EXCLUSIVE_QUEUE);
1206 if (lk->lk_exslpfail >= realexslp) {
1207 lk->lk_exslpfail = 0;
1208 queue = SQ_SHARED_QUEUE;
1209 v &= ~LK_SHARED_WAITERS;
1210 if (realexslp != 0) {
1211 LOCK_LOG2(lk,
1212 "%s: %p has only LK_SLEEPFAIL sleepers",
1213 __func__, lk);
1214 LOCK_LOG2(lk,
1215 "%s: %p waking up threads on the exclusive queue",
1216 __func__, lk);
1217 wakeup_swapper =
1218 sleepq_broadcast(
1219 &lk->lock_object,
1220 SLEEPQ_LK, 0,
1221 SQ_EXCLUSIVE_QUEUE);
1222 }
1223 } else
1224 lk->lk_exslpfail = 0;
1225 }
1226 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1227 sleepq_release(&lk->lock_object);
1228 continue;
1229 }
1230 LOCK_LOG3(lk,
1231 "%s: %p waking up all threads on the %s queue",
1232 __func__, lk, queue == SQ_SHARED_QUEUE ?
1233 "shared" : "exclusive");
1234 wakeup_swapper |= sleepq_broadcast(
1235 &lk->lock_object, SLEEPQ_LK, 0, queue);
1236
1237 /*
1238 * If shared waiters have been woken up we need
1239 * to wait for one of them to acquire the lock
1240 * before to set the exclusive waiters in
1241 * order to avoid a deadlock.
1242 */
1243 if (queue == SQ_SHARED_QUEUE) {
1244 for (v = lk->lk_lock;
1245 (v & LK_SHARE) && !LK_SHARERS(v);
1246 v = lk->lk_lock)
1247 cpu_spinwait();
1248 }
1249 }
1250
1251 /*
1252 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1253 * fail, loop back and retry.
1254 */
1255 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1256 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1257 x | LK_EXCLUSIVE_WAITERS)) {
1258 sleepq_release(&lk->lock_object);
1259 continue;
1260 }
1261 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1262 __func__, lk);
1263 }
1264
1265 /*
1266 * As far as we have been unable to acquire the
1267 * exclusive lock and the exclusive waiters flag
1268 * is set, we will sleep.
1269 */
1270 if (flags & LK_INTERLOCK) {
1271 class->lc_unlock(ilk);
1272 flags &= ~LK_INTERLOCK;
1273 }
1274 GIANT_SAVE();
1275 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1276 SQ_EXCLUSIVE_QUEUE);
1277 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1278 GIANT_RESTORE();
1279 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1280 __func__, lk);
1281 }
1282
1283 if (error == 0) {
1284 lock_profile_obtain_lock_success(&lk->lock_object,
1285 contested, waittime, file, line);
1286 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1287 lk->lk_recurse, file, line);
1288 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1289 LK_TRYWIT(flags), file, line);
1290 TD_LOCKS_INC(curthread);
1291 STACK_SAVE(lk);
1292 }
1293 break;
1294 default:
1295 if (flags & LK_INTERLOCK)
1296 class->lc_unlock(ilk);
1297 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1298 }
1299
1300 if (flags & LK_INTERLOCK)
1301 class->lc_unlock(ilk);
1302 if (wakeup_swapper)
1303 kick_proc0();
1304
1305 return (error);
1306 }
1307
1308 void
1309 _lockmgr_disown(struct lock *lk, const char *file, int line)
1310 {
1311 uintptr_t tid, x;
1312
1313 if (SCHEDULER_STOPPED())
1314 return;
1315
1316 tid = (uintptr_t)curthread;
1317 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1318
1319 /*
1320 * Panic if the lock is recursed.
1321 */
1322 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1323 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1324 __func__, file, line);
1325
1326 /*
1327 * If the owner is already LK_KERNPROC just skip the whole operation.
1328 */
1329 if (LK_HOLDER(lk->lk_lock) != tid)
1330 return;
1331 lock_profile_release_lock(&lk->lock_object);
1332 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1333 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1334 TD_LOCKS_DEC(curthread);
1335 STACK_SAVE(lk);
1336
1337 /*
1338 * In order to preserve waiters flags, just spin.
1339 */
1340 for (;;) {
1341 x = lk->lk_lock;
1342 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1343 x &= LK_ALL_WAITERS;
1344 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1345 LK_KERNPROC | x))
1346 return;
1347 cpu_spinwait();
1348 }
1349 }
1350
1351 void
1352 lockmgr_printinfo(const struct lock *lk)
1353 {
1354 struct thread *td;
1355 uintptr_t x;
1356
1357 if (lk->lk_lock == LK_UNLOCKED)
1358 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1359 else if (lk->lk_lock & LK_SHARE)
1360 printf("lock type %s: SHARED (count %ju)\n",
1361 lk->lock_object.lo_name,
1362 (uintmax_t)LK_SHARERS(lk->lk_lock));
1363 else {
1364 td = lockmgr_xholder(lk);
1365 if (td == (struct thread *)LK_KERNPROC)
1366 printf("lock type %s: EXCL by KERNPROC\n",
1367 lk->lock_object.lo_name);
1368 else
1369 printf("lock type %s: EXCL by thread %p "
1370 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1371 td, td->td_proc->p_pid, td->td_proc->p_comm,
1372 td->td_tid);
1373 }
1374
1375 x = lk->lk_lock;
1376 if (x & LK_EXCLUSIVE_WAITERS)
1377 printf(" with exclusive waiters pending\n");
1378 if (x & LK_SHARED_WAITERS)
1379 printf(" with shared waiters pending\n");
1380 if (x & LK_EXCLUSIVE_SPINNERS)
1381 printf(" with exclusive spinners pending\n");
1382
1383 STACK_PRINT(lk);
1384 }
1385
1386 int
1387 lockstatus(const struct lock *lk)
1388 {
1389 uintptr_t v, x;
1390 int ret;
1391
1392 ret = LK_SHARED;
1393 x = lk->lk_lock;
1394 v = LK_HOLDER(x);
1395
1396 if ((x & LK_SHARE) == 0) {
1397 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1398 ret = LK_EXCLUSIVE;
1399 else
1400 ret = LK_EXCLOTHER;
1401 } else if (x == LK_UNLOCKED)
1402 ret = 0;
1403
1404 return (ret);
1405 }
1406
1407 #ifdef INVARIANT_SUPPORT
1408
1409 FEATURE(invariant_support,
1410 "Support for modules compiled with INVARIANTS option");
1411
1412 #ifndef INVARIANTS
1413 #undef _lockmgr_assert
1414 #endif
1415
1416 void
1417 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1418 {
1419 int slocked = 0;
1420
1421 if (panicstr != NULL)
1422 return;
1423 switch (what) {
1424 case KA_SLOCKED:
1425 case KA_SLOCKED | KA_NOTRECURSED:
1426 case KA_SLOCKED | KA_RECURSED:
1427 slocked = 1;
1428 case KA_LOCKED:
1429 case KA_LOCKED | KA_NOTRECURSED:
1430 case KA_LOCKED | KA_RECURSED:
1431 #ifdef WITNESS
1432
1433 /*
1434 * We cannot trust WITNESS if the lock is held in exclusive
1435 * mode and a call to lockmgr_disown() happened.
1436 * Workaround this skipping the check if the lock is held in
1437 * exclusive mode even for the KA_LOCKED case.
1438 */
1439 if (slocked || (lk->lk_lock & LK_SHARE)) {
1440 witness_assert(&lk->lock_object, what, file, line);
1441 break;
1442 }
1443 #endif
1444 if (lk->lk_lock == LK_UNLOCKED ||
1445 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1446 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1447 panic("Lock %s not %slocked @ %s:%d\n",
1448 lk->lock_object.lo_name, slocked ? "share" : "",
1449 file, line);
1450
1451 if ((lk->lk_lock & LK_SHARE) == 0) {
1452 if (lockmgr_recursed(lk)) {
1453 if (what & KA_NOTRECURSED)
1454 panic("Lock %s recursed @ %s:%d\n",
1455 lk->lock_object.lo_name, file,
1456 line);
1457 } else if (what & KA_RECURSED)
1458 panic("Lock %s not recursed @ %s:%d\n",
1459 lk->lock_object.lo_name, file, line);
1460 }
1461 break;
1462 case KA_XLOCKED:
1463 case KA_XLOCKED | KA_NOTRECURSED:
1464 case KA_XLOCKED | KA_RECURSED:
1465 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1466 panic("Lock %s not exclusively locked @ %s:%d\n",
1467 lk->lock_object.lo_name, file, line);
1468 if (lockmgr_recursed(lk)) {
1469 if (what & KA_NOTRECURSED)
1470 panic("Lock %s recursed @ %s:%d\n",
1471 lk->lock_object.lo_name, file, line);
1472 } else if (what & KA_RECURSED)
1473 panic("Lock %s not recursed @ %s:%d\n",
1474 lk->lock_object.lo_name, file, line);
1475 break;
1476 case KA_UNLOCKED:
1477 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1478 panic("Lock %s exclusively locked @ %s:%d\n",
1479 lk->lock_object.lo_name, file, line);
1480 break;
1481 default:
1482 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1483 line);
1484 }
1485 }
1486 #endif
1487
1488 #ifdef DDB
1489 int
1490 lockmgr_chain(struct thread *td, struct thread **ownerp)
1491 {
1492 struct lock *lk;
1493
1494 lk = td->td_wchan;
1495
1496 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1497 return (0);
1498 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1499 if (lk->lk_lock & LK_SHARE)
1500 db_printf("SHARED (count %ju)\n",
1501 (uintmax_t)LK_SHARERS(lk->lk_lock));
1502 else
1503 db_printf("EXCL\n");
1504 *ownerp = lockmgr_xholder(lk);
1505
1506 return (1);
1507 }
1508
1509 static void
1510 db_show_lockmgr(const struct lock_object *lock)
1511 {
1512 struct thread *td;
1513 const struct lock *lk;
1514
1515 lk = (const struct lock *)lock;
1516
1517 db_printf(" state: ");
1518 if (lk->lk_lock == LK_UNLOCKED)
1519 db_printf("UNLOCKED\n");
1520 else if (lk->lk_lock & LK_SHARE)
1521 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1522 else {
1523 td = lockmgr_xholder(lk);
1524 if (td == (struct thread *)LK_KERNPROC)
1525 db_printf("XLOCK: LK_KERNPROC\n");
1526 else
1527 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1528 td->td_tid, td->td_proc->p_pid,
1529 td->td_proc->p_comm);
1530 if (lockmgr_recursed(lk))
1531 db_printf(" recursed: %d\n", lk->lk_recurse);
1532 }
1533 db_printf(" waiters: ");
1534 switch (lk->lk_lock & LK_ALL_WAITERS) {
1535 case LK_SHARED_WAITERS:
1536 db_printf("shared\n");
1537 break;
1538 case LK_EXCLUSIVE_WAITERS:
1539 db_printf("exclusive\n");
1540 break;
1541 case LK_ALL_WAITERS:
1542 db_printf("shared and exclusive\n");
1543 break;
1544 default:
1545 db_printf("none\n");
1546 }
1547 db_printf(" spinners: ");
1548 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1549 db_printf("exclusive\n");
1550 else
1551 db_printf("none\n");
1552 }
1553 #endif
Cache object: c69303937d2ef307f96532e3ce0b636c
|