FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c
1 /*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_lock.c 278694 2015-02-13 19:06:22Z sbruno $");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51
52 #include <machine/cpu.h>
53
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64 (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67
68 #define SQ_EXCLUSIVE_QUEUE 0
69 #define SQ_SHARED_QUEUE 1
70
71 #ifndef INVARIANTS
72 #define _lockmgr_assert(lk, what, file, line)
73 #define TD_LOCKS_INC(td)
74 #define TD_LOCKS_DEC(td)
75 #else
76 #define TD_LOCKS_INC(td) ((td)->td_locks++)
77 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
78 #endif
79 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
80 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
81
82 #ifndef DEBUG_LOCKS
83 #define STACK_PRINT(lk)
84 #define STACK_SAVE(lk)
85 #define STACK_ZERO(lk)
86 #else
87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
88 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
89 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
90 #endif
91
92 #define LOCK_LOG2(lk, string, arg1, arg2) \
93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
94 CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98
99 #define GIANT_DECLARE \
100 int _i = 0; \
101 WITNESS_SAVE_DECL(Giant)
102 #define GIANT_RESTORE() do { \
103 if (_i > 0) { \
104 while (_i--) \
105 mtx_lock(&Giant); \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
107 } \
108 } while (0)
109 #define GIANT_SAVE() do { \
110 if (mtx_owned(&Giant)) { \
111 WITNESS_SAVE(&Giant.lock_object, Giant); \
112 while (mtx_owned(&Giant)) { \
113 _i++; \
114 mtx_unlock(&Giant); \
115 } \
116 } \
117 } while (0)
118
119 #define LK_CAN_SHARE(x, flags) \
120 (((x) & LK_SHARE) && \
121 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
122 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
123 (curthread->td_pflags & TDP_DEADLKTREAT)))
124 #define LK_TRYOP(x) \
125 ((x) & LK_NOWAIT)
126
127 #define LK_CAN_WITNESS(x) \
128 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
129 #define LK_TRYWIT(x) \
130 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
131
132 #define LK_CAN_ADAPT(lk, f) \
133 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
134 ((f) & LK_SLEEPFAIL) == 0)
135
136 #define lockmgr_disowned(lk) \
137 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
138
139 #define lockmgr_xlocked(lk) \
140 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
141
142 static void assert_lockmgr(const struct lock_object *lock, int how);
143 #ifdef DDB
144 static void db_show_lockmgr(const struct lock_object *lock);
145 #endif
146 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
147 #ifdef KDTRACE_HOOKS
148 static int owner_lockmgr(const struct lock_object *lock,
149 struct thread **owner);
150 #endif
151 static uintptr_t unlock_lockmgr(struct lock_object *lock);
152
153 struct lock_class lock_class_lockmgr = {
154 .lc_name = "lockmgr",
155 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
156 .lc_assert = assert_lockmgr,
157 #ifdef DDB
158 .lc_ddb_show = db_show_lockmgr,
159 #endif
160 .lc_lock = lock_lockmgr,
161 .lc_unlock = unlock_lockmgr,
162 #ifdef KDTRACE_HOOKS
163 .lc_owner = owner_lockmgr,
164 #endif
165 };
166
167 #ifdef ADAPTIVE_LOCKMGRS
168 static u_int alk_retries = 10;
169 static u_int alk_loops = 10000;
170 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
171 "lockmgr debugging");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
173 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
174 #endif
175
176 static __inline struct thread *
177 lockmgr_xholder(const struct lock *lk)
178 {
179 uintptr_t x;
180
181 x = lk->lk_lock;
182 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
183 }
184
185 /*
186 * It assumes sleepq_lock held and returns with this one unheld.
187 * It also assumes the generic interlock is sane and previously checked.
188 * If LK_INTERLOCK is specified the interlock is not reacquired after the
189 * sleep.
190 */
191 static __inline int
192 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
193 const char *wmesg, int pri, int timo, int queue)
194 {
195 GIANT_DECLARE;
196 struct lock_class *class;
197 int catch, error;
198
199 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
200 catch = pri & PCATCH;
201 pri &= PRIMASK;
202 error = 0;
203
204 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
205 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
206
207 if (flags & LK_INTERLOCK)
208 class->lc_unlock(ilk);
209 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
210 lk->lk_exslpfail++;
211 GIANT_SAVE();
212 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
213 SLEEPQ_INTERRUPTIBLE : 0), queue);
214 if ((flags & LK_TIMELOCK) && timo)
215 sleepq_set_timeout(&lk->lock_object, timo);
216
217 /*
218 * Decisional switch for real sleeping.
219 */
220 if ((flags & LK_TIMELOCK) && timo && catch)
221 error = sleepq_timedwait_sig(&lk->lock_object, pri);
222 else if ((flags & LK_TIMELOCK) && timo)
223 error = sleepq_timedwait(&lk->lock_object, pri);
224 else if (catch)
225 error = sleepq_wait_sig(&lk->lock_object, pri);
226 else
227 sleepq_wait(&lk->lock_object, pri);
228 GIANT_RESTORE();
229 if ((flags & LK_SLEEPFAIL) && error == 0)
230 error = ENOLCK;
231
232 return (error);
233 }
234
235 static __inline int
236 wakeupshlk(struct lock *lk, const char *file, int line)
237 {
238 uintptr_t v, x;
239 u_int realexslp;
240 int queue, wakeup_swapper;
241
242 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244
245 wakeup_swapper = 0;
246 for (;;) {
247 x = lk->lk_lock;
248
249 /*
250 * If there is more than one shared lock held, just drop one
251 * and return.
252 */
253 if (LK_SHARERS(x) > 1) {
254 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255 x - LK_ONE_SHARER))
256 break;
257 continue;
258 }
259
260 /*
261 * If there are not waiters on the exclusive queue, drop the
262 * lock quickly.
263 */
264 if ((x & LK_ALL_WAITERS) == 0) {
265 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266 LK_SHARERS_LOCK(1));
267 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268 break;
269 continue;
270 }
271
272 /*
273 * We should have a sharer with waiters, so enter the hard
274 * path in order to handle wakeups correctly.
275 */
276 sleepq_lock(&lk->lock_object);
277 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
278 v = LK_UNLOCKED;
279
280 /*
281 * If the lock has exclusive waiters, give them preference in
282 * order to avoid deadlock with shared runners up.
283 * If interruptible sleeps left the exclusive queue empty
284 * avoid a starvation for the threads sleeping on the shared
285 * queue by giving them precedence and cleaning up the
286 * exclusive waiters bit anyway.
287 * Please note that lk_exslpfail count may be lying about
288 * the real number of waiters with the LK_SLEEPFAIL flag on
289 * because they may be used in conjuction with interruptible
290 * sleeps so lk_exslpfail might be considered an 'upper limit'
291 * bound, including the edge cases.
292 */
293 realexslp = sleepq_sleepcnt(&lk->lock_object,
294 SQ_EXCLUSIVE_QUEUE);
295 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296 if (lk->lk_exslpfail < realexslp) {
297 lk->lk_exslpfail = 0;
298 queue = SQ_EXCLUSIVE_QUEUE;
299 v |= (x & LK_SHARED_WAITERS);
300 } else {
301 lk->lk_exslpfail = 0;
302 LOCK_LOG2(lk,
303 "%s: %p has only LK_SLEEPFAIL sleepers",
304 __func__, lk);
305 LOCK_LOG2(lk,
306 "%s: %p waking up threads on the exclusive queue",
307 __func__, lk);
308 wakeup_swapper =
309 sleepq_broadcast(&lk->lock_object,
310 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311 queue = SQ_SHARED_QUEUE;
312 }
313
314 } else {
315
316 /*
317 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318 * and using interruptible sleeps/timeout may have
319 * left spourious lk_exslpfail counts on, so clean
320 * it up anyway.
321 */
322 lk->lk_exslpfail = 0;
323 queue = SQ_SHARED_QUEUE;
324 }
325
326 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327 v)) {
328 sleepq_release(&lk->lock_object);
329 continue;
330 }
331 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333 "exclusive");
334 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335 0, queue);
336 sleepq_release(&lk->lock_object);
337 break;
338 }
339
340 lock_profile_release_lock(&lk->lock_object);
341 TD_LOCKS_DEC(curthread);
342 TD_SLOCKS_DEC(curthread);
343 return (wakeup_swapper);
344 }
345
346 static void
347 assert_lockmgr(const struct lock_object *lock, int what)
348 {
349
350 panic("lockmgr locks do not support assertions");
351 }
352
353 static void
354 lock_lockmgr(struct lock_object *lock, uintptr_t how)
355 {
356
357 panic("lockmgr locks do not support sleep interlocking");
358 }
359
360 static uintptr_t
361 unlock_lockmgr(struct lock_object *lock)
362 {
363
364 panic("lockmgr locks do not support sleep interlocking");
365 }
366
367 #ifdef KDTRACE_HOOKS
368 static int
369 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
370 {
371
372 panic("lockmgr locks do not support owner inquiring");
373 }
374 #endif
375
376 void
377 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
378 {
379 int iflags;
380
381 MPASS((flags & ~LK_INIT_MASK) == 0);
382 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
383 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
384 &lk->lk_lock));
385
386 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
387 if (flags & LK_CANRECURSE)
388 iflags |= LO_RECURSABLE;
389 if ((flags & LK_NODUP) == 0)
390 iflags |= LO_DUPOK;
391 if (flags & LK_NOPROFILE)
392 iflags |= LO_NOPROFILE;
393 if ((flags & LK_NOWITNESS) == 0)
394 iflags |= LO_WITNESS;
395 if (flags & LK_QUIET)
396 iflags |= LO_QUIET;
397 if (flags & LK_IS_VNODE)
398 iflags |= LO_IS_VNODE;
399 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
400
401 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
402 lk->lk_lock = LK_UNLOCKED;
403 lk->lk_recurse = 0;
404 lk->lk_exslpfail = 0;
405 lk->lk_timo = timo;
406 lk->lk_pri = pri;
407 STACK_ZERO(lk);
408 }
409
410 /*
411 * XXX: Gross hacks to manipulate external lock flags after
412 * initialization. Used for certain vnode and buf locks.
413 */
414 void
415 lockallowshare(struct lock *lk)
416 {
417
418 lockmgr_assert(lk, KA_XLOCKED);
419 lk->lock_object.lo_flags &= ~LK_NOSHARE;
420 }
421
422 void
423 lockdisableshare(struct lock *lk)
424 {
425
426 lockmgr_assert(lk, KA_XLOCKED);
427 lk->lock_object.lo_flags |= LK_NOSHARE;
428 }
429
430 void
431 lockallowrecurse(struct lock *lk)
432 {
433
434 lockmgr_assert(lk, KA_XLOCKED);
435 lk->lock_object.lo_flags |= LO_RECURSABLE;
436 }
437
438 void
439 lockdisablerecurse(struct lock *lk)
440 {
441
442 lockmgr_assert(lk, KA_XLOCKED);
443 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
444 }
445
446 void
447 lockdestroy(struct lock *lk)
448 {
449
450 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
451 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
452 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
453 lock_destroy(&lk->lock_object);
454 }
455
456 int
457 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
458 const char *wmesg, int pri, int timo, const char *file, int line)
459 {
460 GIANT_DECLARE;
461 struct lock_class *class;
462 const char *iwmesg;
463 uintptr_t tid, v, x;
464 u_int op, realexslp;
465 int error, ipri, itimo, queue, wakeup_swapper;
466 #ifdef LOCK_PROFILING
467 uint64_t waittime = 0;
468 int contested = 0;
469 #endif
470 #ifdef ADAPTIVE_LOCKMGRS
471 volatile struct thread *owner;
472 u_int i, spintries = 0;
473 #endif
474
475 error = 0;
476 tid = (uintptr_t)curthread;
477 op = (flags & LK_TYPE_MASK);
478 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
479 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
480 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
481
482 MPASS((flags & ~LK_TOTAL_MASK) == 0);
483 KASSERT((op & (op - 1)) == 0,
484 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
485 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
486 (op != LK_DOWNGRADE && op != LK_RELEASE),
487 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
488 __func__, file, line));
489 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
490 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
491 __func__, file, line));
492 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
493 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
494 lk->lock_object.lo_name, file, line));
495
496 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
497 if (panicstr != NULL) {
498 if (flags & LK_INTERLOCK)
499 class->lc_unlock(ilk);
500 return (0);
501 }
502
503 if (lk->lock_object.lo_flags & LK_NOSHARE) {
504 switch (op) {
505 case LK_SHARED:
506 op = LK_EXCLUSIVE;
507 break;
508 case LK_UPGRADE:
509 case LK_TRYUPGRADE:
510 case LK_DOWNGRADE:
511 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
512 file, line);
513 if (flags & LK_INTERLOCK)
514 class->lc_unlock(ilk);
515 return (0);
516 }
517 }
518
519 wakeup_swapper = 0;
520 switch (op) {
521 case LK_SHARED:
522 if (LK_CAN_WITNESS(flags))
523 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
524 file, line, flags & LK_INTERLOCK ? ilk : NULL);
525 for (;;) {
526 x = lk->lk_lock;
527
528 /*
529 * If no other thread has an exclusive lock, or
530 * no exclusive waiter is present, bump the count of
531 * sharers. Since we have to preserve the state of
532 * waiters, if we fail to acquire the shared lock
533 * loop back and retry.
534 */
535 if (LK_CAN_SHARE(x, flags)) {
536 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
537 x + LK_ONE_SHARER))
538 break;
539 continue;
540 }
541 #ifdef HWPMC_HOOKS
542 PMC_SOFT_CALL( , , lock, failed);
543 #endif
544 lock_profile_obtain_lock_failed(&lk->lock_object,
545 &contested, &waittime);
546
547 /*
548 * If the lock is already held by curthread in
549 * exclusive way avoid a deadlock.
550 */
551 if (LK_HOLDER(x) == tid) {
552 LOCK_LOG2(lk,
553 "%s: %p already held in exclusive mode",
554 __func__, lk);
555 error = EDEADLK;
556 break;
557 }
558
559 /*
560 * If the lock is expected to not sleep just give up
561 * and return.
562 */
563 if (LK_TRYOP(flags)) {
564 LOCK_LOG2(lk, "%s: %p fails the try operation",
565 __func__, lk);
566 error = EBUSY;
567 break;
568 }
569
570 #ifdef ADAPTIVE_LOCKMGRS
571 /*
572 * If the owner is running on another CPU, spin until
573 * the owner stops running or the state of the lock
574 * changes. We need a double-state handle here
575 * because for a failed acquisition the lock can be
576 * either held in exclusive mode or shared mode
577 * (for the writer starvation avoidance technique).
578 */
579 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
580 LK_HOLDER(x) != LK_KERNPROC) {
581 owner = (struct thread *)LK_HOLDER(x);
582 if (LOCK_LOG_TEST(&lk->lock_object, 0))
583 CTR3(KTR_LOCK,
584 "%s: spinning on %p held by %p",
585 __func__, lk, owner);
586 KTR_STATE1(KTR_SCHED, "thread",
587 sched_tdname(td), "spinning",
588 "lockname:\"%s\"", lk->lock_object.lo_name);
589
590 /*
591 * If we are holding also an interlock drop it
592 * in order to avoid a deadlock if the lockmgr
593 * owner is adaptively spinning on the
594 * interlock itself.
595 */
596 if (flags & LK_INTERLOCK) {
597 class->lc_unlock(ilk);
598 flags &= ~LK_INTERLOCK;
599 }
600 GIANT_SAVE();
601 while (LK_HOLDER(lk->lk_lock) ==
602 (uintptr_t)owner && TD_IS_RUNNING(owner))
603 cpu_spinwait();
604 KTR_STATE0(KTR_SCHED, "thread",
605 sched_tdname(td), "running");
606 GIANT_RESTORE();
607 continue;
608 } else if (LK_CAN_ADAPT(lk, flags) &&
609 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
610 spintries < alk_retries) {
611 KTR_STATE1(KTR_SCHED, "thread",
612 sched_tdname(td), "spinning",
613 "lockname:\"%s\"", lk->lock_object.lo_name);
614 if (flags & LK_INTERLOCK) {
615 class->lc_unlock(ilk);
616 flags &= ~LK_INTERLOCK;
617 }
618 GIANT_SAVE();
619 spintries++;
620 for (i = 0; i < alk_loops; i++) {
621 if (LOCK_LOG_TEST(&lk->lock_object, 0))
622 CTR4(KTR_LOCK,
623 "%s: shared spinning on %p with %u and %u",
624 __func__, lk, spintries, i);
625 x = lk->lk_lock;
626 if ((x & LK_SHARE) == 0 ||
627 LK_CAN_SHARE(x, flags) != 0)
628 break;
629 cpu_spinwait();
630 }
631 KTR_STATE0(KTR_SCHED, "thread",
632 sched_tdname(td), "running");
633 GIANT_RESTORE();
634 if (i != alk_loops)
635 continue;
636 }
637 #endif
638
639 /*
640 * Acquire the sleepqueue chain lock because we
641 * probabilly will need to manipulate waiters flags.
642 */
643 sleepq_lock(&lk->lock_object);
644 x = lk->lk_lock;
645
646 /*
647 * if the lock can be acquired in shared mode, try
648 * again.
649 */
650 if (LK_CAN_SHARE(x, flags)) {
651 sleepq_release(&lk->lock_object);
652 continue;
653 }
654
655 #ifdef ADAPTIVE_LOCKMGRS
656 /*
657 * The current lock owner might have started executing
658 * on another CPU (or the lock could have changed
659 * owner) while we were waiting on the turnstile
660 * chain lock. If so, drop the turnstile lock and try
661 * again.
662 */
663 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
664 LK_HOLDER(x) != LK_KERNPROC) {
665 owner = (struct thread *)LK_HOLDER(x);
666 if (TD_IS_RUNNING(owner)) {
667 sleepq_release(&lk->lock_object);
668 continue;
669 }
670 }
671 #endif
672
673 /*
674 * Try to set the LK_SHARED_WAITERS flag. If we fail,
675 * loop back and retry.
676 */
677 if ((x & LK_SHARED_WAITERS) == 0) {
678 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
679 x | LK_SHARED_WAITERS)) {
680 sleepq_release(&lk->lock_object);
681 continue;
682 }
683 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
684 __func__, lk);
685 }
686
687 /*
688 * As far as we have been unable to acquire the
689 * shared lock and the shared waiters flag is set,
690 * we will sleep.
691 */
692 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
693 SQ_SHARED_QUEUE);
694 flags &= ~LK_INTERLOCK;
695 if (error) {
696 LOCK_LOG3(lk,
697 "%s: interrupted sleep for %p with %d",
698 __func__, lk, error);
699 break;
700 }
701 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
702 __func__, lk);
703 }
704 if (error == 0) {
705 lock_profile_obtain_lock_success(&lk->lock_object,
706 contested, waittime, file, line);
707 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
708 line);
709 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
710 line);
711 TD_LOCKS_INC(curthread);
712 TD_SLOCKS_INC(curthread);
713 STACK_SAVE(lk);
714 }
715 break;
716 case LK_UPGRADE:
717 case LK_TRYUPGRADE:
718 _lockmgr_assert(lk, KA_SLOCKED, file, line);
719 v = lk->lk_lock;
720 x = v & LK_ALL_WAITERS;
721 v &= LK_EXCLUSIVE_SPINNERS;
722
723 /*
724 * Try to switch from one shared lock to an exclusive one.
725 * We need to preserve waiters flags during the operation.
726 */
727 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
728 tid | x)) {
729 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
730 line);
731 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
732 LK_TRYWIT(flags), file, line);
733 TD_SLOCKS_DEC(curthread);
734 break;
735 }
736
737 /*
738 * In LK_TRYUPGRADE mode, do not drop the lock,
739 * returning EBUSY instead.
740 */
741 if (op == LK_TRYUPGRADE) {
742 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
743 __func__, lk);
744 error = EBUSY;
745 break;
746 }
747
748 /*
749 * We have been unable to succeed in upgrading, so just
750 * give up the shared lock.
751 */
752 wakeup_swapper |= wakeupshlk(lk, file, line);
753
754 /* FALLTHROUGH */
755 case LK_EXCLUSIVE:
756 if (LK_CAN_WITNESS(flags))
757 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
758 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
759 ilk : NULL);
760
761 /*
762 * If curthread already holds the lock and this one is
763 * allowed to recurse, simply recurse on it.
764 */
765 if (lockmgr_xlocked(lk)) {
766 if ((flags & LK_CANRECURSE) == 0 &&
767 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
768
769 /*
770 * If the lock is expected to not panic just
771 * give up and return.
772 */
773 if (LK_TRYOP(flags)) {
774 LOCK_LOG2(lk,
775 "%s: %p fails the try operation",
776 __func__, lk);
777 error = EBUSY;
778 break;
779 }
780 if (flags & LK_INTERLOCK)
781 class->lc_unlock(ilk);
782 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
783 __func__, iwmesg, file, line);
784 }
785 lk->lk_recurse++;
786 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
787 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
788 lk->lk_recurse, file, line);
789 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
790 LK_TRYWIT(flags), file, line);
791 TD_LOCKS_INC(curthread);
792 break;
793 }
794
795 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
796 tid)) {
797 #ifdef HWPMC_HOOKS
798 PMC_SOFT_CALL( , , lock, failed);
799 #endif
800 lock_profile_obtain_lock_failed(&lk->lock_object,
801 &contested, &waittime);
802
803 /*
804 * If the lock is expected to not sleep just give up
805 * and return.
806 */
807 if (LK_TRYOP(flags)) {
808 LOCK_LOG2(lk, "%s: %p fails the try operation",
809 __func__, lk);
810 error = EBUSY;
811 break;
812 }
813
814 #ifdef ADAPTIVE_LOCKMGRS
815 /*
816 * If the owner is running on another CPU, spin until
817 * the owner stops running or the state of the lock
818 * changes.
819 */
820 x = lk->lk_lock;
821 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
822 LK_HOLDER(x) != LK_KERNPROC) {
823 owner = (struct thread *)LK_HOLDER(x);
824 if (LOCK_LOG_TEST(&lk->lock_object, 0))
825 CTR3(KTR_LOCK,
826 "%s: spinning on %p held by %p",
827 __func__, lk, owner);
828 KTR_STATE1(KTR_SCHED, "thread",
829 sched_tdname(td), "spinning",
830 "lockname:\"%s\"", lk->lock_object.lo_name);
831
832 /*
833 * If we are holding also an interlock drop it
834 * in order to avoid a deadlock if the lockmgr
835 * owner is adaptively spinning on the
836 * interlock itself.
837 */
838 if (flags & LK_INTERLOCK) {
839 class->lc_unlock(ilk);
840 flags &= ~LK_INTERLOCK;
841 }
842 GIANT_SAVE();
843 while (LK_HOLDER(lk->lk_lock) ==
844 (uintptr_t)owner && TD_IS_RUNNING(owner))
845 cpu_spinwait();
846 KTR_STATE0(KTR_SCHED, "thread",
847 sched_tdname(td), "running");
848 GIANT_RESTORE();
849 continue;
850 } else if (LK_CAN_ADAPT(lk, flags) &&
851 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
852 spintries < alk_retries) {
853 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
854 !atomic_cmpset_ptr(&lk->lk_lock, x,
855 x | LK_EXCLUSIVE_SPINNERS))
856 continue;
857 KTR_STATE1(KTR_SCHED, "thread",
858 sched_tdname(td), "spinning",
859 "lockname:\"%s\"", lk->lock_object.lo_name);
860 if (flags & LK_INTERLOCK) {
861 class->lc_unlock(ilk);
862 flags &= ~LK_INTERLOCK;
863 }
864 GIANT_SAVE();
865 spintries++;
866 for (i = 0; i < alk_loops; i++) {
867 if (LOCK_LOG_TEST(&lk->lock_object, 0))
868 CTR4(KTR_LOCK,
869 "%s: shared spinning on %p with %u and %u",
870 __func__, lk, spintries, i);
871 if ((lk->lk_lock &
872 LK_EXCLUSIVE_SPINNERS) == 0)
873 break;
874 cpu_spinwait();
875 }
876 KTR_STATE0(KTR_SCHED, "thread",
877 sched_tdname(td), "running");
878 GIANT_RESTORE();
879 if (i != alk_loops)
880 continue;
881 }
882 #endif
883
884 /*
885 * Acquire the sleepqueue chain lock because we
886 * probabilly will need to manipulate waiters flags.
887 */
888 sleepq_lock(&lk->lock_object);
889 x = lk->lk_lock;
890
891 /*
892 * if the lock has been released while we spun on
893 * the sleepqueue chain lock just try again.
894 */
895 if (x == LK_UNLOCKED) {
896 sleepq_release(&lk->lock_object);
897 continue;
898 }
899
900 #ifdef ADAPTIVE_LOCKMGRS
901 /*
902 * The current lock owner might have started executing
903 * on another CPU (or the lock could have changed
904 * owner) while we were waiting on the turnstile
905 * chain lock. If so, drop the turnstile lock and try
906 * again.
907 */
908 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
909 LK_HOLDER(x) != LK_KERNPROC) {
910 owner = (struct thread *)LK_HOLDER(x);
911 if (TD_IS_RUNNING(owner)) {
912 sleepq_release(&lk->lock_object);
913 continue;
914 }
915 }
916 #endif
917
918 /*
919 * The lock can be in the state where there is a
920 * pending queue of waiters, but still no owner.
921 * This happens when the lock is contested and an
922 * owner is going to claim the lock.
923 * If curthread is the one successfully acquiring it
924 * claim lock ownership and return, preserving waiters
925 * flags.
926 */
927 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
928 if ((x & ~v) == LK_UNLOCKED) {
929 v &= ~LK_EXCLUSIVE_SPINNERS;
930 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
931 tid | v)) {
932 sleepq_release(&lk->lock_object);
933 LOCK_LOG2(lk,
934 "%s: %p claimed by a new writer",
935 __func__, lk);
936 break;
937 }
938 sleepq_release(&lk->lock_object);
939 continue;
940 }
941
942 /*
943 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
944 * fail, loop back and retry.
945 */
946 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
947 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
948 x | LK_EXCLUSIVE_WAITERS)) {
949 sleepq_release(&lk->lock_object);
950 continue;
951 }
952 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
953 __func__, lk);
954 }
955
956 /*
957 * As far as we have been unable to acquire the
958 * exclusive lock and the exclusive waiters flag
959 * is set, we will sleep.
960 */
961 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
962 SQ_EXCLUSIVE_QUEUE);
963 flags &= ~LK_INTERLOCK;
964 if (error) {
965 LOCK_LOG3(lk,
966 "%s: interrupted sleep for %p with %d",
967 __func__, lk, error);
968 break;
969 }
970 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
971 __func__, lk);
972 }
973 if (error == 0) {
974 lock_profile_obtain_lock_success(&lk->lock_object,
975 contested, waittime, file, line);
976 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
977 lk->lk_recurse, file, line);
978 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
979 LK_TRYWIT(flags), file, line);
980 TD_LOCKS_INC(curthread);
981 STACK_SAVE(lk);
982 }
983 break;
984 case LK_DOWNGRADE:
985 _lockmgr_assert(lk, KA_XLOCKED, file, line);
986 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
987 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
988
989 /*
990 * Panic if the lock is recursed.
991 */
992 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
993 if (flags & LK_INTERLOCK)
994 class->lc_unlock(ilk);
995 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
996 __func__, iwmesg, file, line);
997 }
998 TD_SLOCKS_INC(curthread);
999
1000 /*
1001 * In order to preserve waiters flags, just spin.
1002 */
1003 for (;;) {
1004 x = lk->lk_lock;
1005 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1006 x &= LK_ALL_WAITERS;
1007 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1008 LK_SHARERS_LOCK(1) | x))
1009 break;
1010 cpu_spinwait();
1011 }
1012 break;
1013 case LK_RELEASE:
1014 _lockmgr_assert(lk, KA_LOCKED, file, line);
1015 x = lk->lk_lock;
1016
1017 if ((x & LK_SHARE) == 0) {
1018
1019 /*
1020 * As first option, treact the lock as if it has not
1021 * any waiter.
1022 * Fix-up the tid var if the lock has been disowned.
1023 */
1024 if (LK_HOLDER(x) == LK_KERNPROC)
1025 tid = LK_KERNPROC;
1026 else {
1027 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1028 file, line);
1029 TD_LOCKS_DEC(curthread);
1030 }
1031 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1032 lk->lk_recurse, file, line);
1033
1034 /*
1035 * The lock is held in exclusive mode.
1036 * If the lock is recursed also, then unrecurse it.
1037 */
1038 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1039 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1040 lk);
1041 lk->lk_recurse--;
1042 break;
1043 }
1044 if (tid != LK_KERNPROC)
1045 lock_profile_release_lock(&lk->lock_object);
1046
1047 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1048 LK_UNLOCKED))
1049 break;
1050
1051 sleepq_lock(&lk->lock_object);
1052 x = lk->lk_lock;
1053 v = LK_UNLOCKED;
1054
1055 /*
1056 * If the lock has exclusive waiters, give them
1057 * preference in order to avoid deadlock with
1058 * shared runners up.
1059 * If interruptible sleeps left the exclusive queue
1060 * empty avoid a starvation for the threads sleeping
1061 * on the shared queue by giving them precedence
1062 * and cleaning up the exclusive waiters bit anyway.
1063 * Please note that lk_exslpfail count may be lying
1064 * about the real number of waiters with the
1065 * LK_SLEEPFAIL flag on because they may be used in
1066 * conjuction with interruptible sleeps so
1067 * lk_exslpfail might be considered an 'upper limit'
1068 * bound, including the edge cases.
1069 */
1070 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1071 realexslp = sleepq_sleepcnt(&lk->lock_object,
1072 SQ_EXCLUSIVE_QUEUE);
1073 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1074 if (lk->lk_exslpfail < realexslp) {
1075 lk->lk_exslpfail = 0;
1076 queue = SQ_EXCLUSIVE_QUEUE;
1077 v |= (x & LK_SHARED_WAITERS);
1078 } else {
1079 lk->lk_exslpfail = 0;
1080 LOCK_LOG2(lk,
1081 "%s: %p has only LK_SLEEPFAIL sleepers",
1082 __func__, lk);
1083 LOCK_LOG2(lk,
1084 "%s: %p waking up threads on the exclusive queue",
1085 __func__, lk);
1086 wakeup_swapper =
1087 sleepq_broadcast(&lk->lock_object,
1088 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1089 queue = SQ_SHARED_QUEUE;
1090 }
1091 } else {
1092
1093 /*
1094 * Exclusive waiters sleeping with LK_SLEEPFAIL
1095 * on and using interruptible sleeps/timeout
1096 * may have left spourious lk_exslpfail counts
1097 * on, so clean it up anyway.
1098 */
1099 lk->lk_exslpfail = 0;
1100 queue = SQ_SHARED_QUEUE;
1101 }
1102
1103 LOCK_LOG3(lk,
1104 "%s: %p waking up threads on the %s queue",
1105 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1106 "exclusive");
1107 atomic_store_rel_ptr(&lk->lk_lock, v);
1108 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1109 SLEEPQ_LK, 0, queue);
1110 sleepq_release(&lk->lock_object);
1111 break;
1112 } else
1113 wakeup_swapper = wakeupshlk(lk, file, line);
1114 break;
1115 case LK_DRAIN:
1116 if (LK_CAN_WITNESS(flags))
1117 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1118 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1119 ilk : NULL);
1120
1121 /*
1122 * Trying to drain a lock we already own will result in a
1123 * deadlock.
1124 */
1125 if (lockmgr_xlocked(lk)) {
1126 if (flags & LK_INTERLOCK)
1127 class->lc_unlock(ilk);
1128 panic("%s: draining %s with the lock held @ %s:%d\n",
1129 __func__, iwmesg, file, line);
1130 }
1131
1132 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1133 #ifdef HWPMC_HOOKS
1134 PMC_SOFT_CALL( , , lock, failed);
1135 #endif
1136 lock_profile_obtain_lock_failed(&lk->lock_object,
1137 &contested, &waittime);
1138
1139 /*
1140 * If the lock is expected to not sleep just give up
1141 * and return.
1142 */
1143 if (LK_TRYOP(flags)) {
1144 LOCK_LOG2(lk, "%s: %p fails the try operation",
1145 __func__, lk);
1146 error = EBUSY;
1147 break;
1148 }
1149
1150 /*
1151 * Acquire the sleepqueue chain lock because we
1152 * probabilly will need to manipulate waiters flags.
1153 */
1154 sleepq_lock(&lk->lock_object);
1155 x = lk->lk_lock;
1156
1157 /*
1158 * if the lock has been released while we spun on
1159 * the sleepqueue chain lock just try again.
1160 */
1161 if (x == LK_UNLOCKED) {
1162 sleepq_release(&lk->lock_object);
1163 continue;
1164 }
1165
1166 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1167 if ((x & ~v) == LK_UNLOCKED) {
1168 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1169
1170 /*
1171 * If interruptible sleeps left the exclusive
1172 * queue empty avoid a starvation for the
1173 * threads sleeping on the shared queue by
1174 * giving them precedence and cleaning up the
1175 * exclusive waiters bit anyway.
1176 * Please note that lk_exslpfail count may be
1177 * lying about the real number of waiters with
1178 * the LK_SLEEPFAIL flag on because they may
1179 * be used in conjuction with interruptible
1180 * sleeps so lk_exslpfail might be considered
1181 * an 'upper limit' bound, including the edge
1182 * cases.
1183 */
1184 if (v & LK_EXCLUSIVE_WAITERS) {
1185 queue = SQ_EXCLUSIVE_QUEUE;
1186 v &= ~LK_EXCLUSIVE_WAITERS;
1187 } else {
1188
1189 /*
1190 * Exclusive waiters sleeping with
1191 * LK_SLEEPFAIL on and using
1192 * interruptible sleeps/timeout may
1193 * have left spourious lk_exslpfail
1194 * counts on, so clean it up anyway.
1195 */
1196 MPASS(v & LK_SHARED_WAITERS);
1197 lk->lk_exslpfail = 0;
1198 queue = SQ_SHARED_QUEUE;
1199 v &= ~LK_SHARED_WAITERS;
1200 }
1201 if (queue == SQ_EXCLUSIVE_QUEUE) {
1202 realexslp =
1203 sleepq_sleepcnt(&lk->lock_object,
1204 SQ_EXCLUSIVE_QUEUE);
1205 if (lk->lk_exslpfail >= realexslp) {
1206 lk->lk_exslpfail = 0;
1207 queue = SQ_SHARED_QUEUE;
1208 v &= ~LK_SHARED_WAITERS;
1209 if (realexslp != 0) {
1210 LOCK_LOG2(lk,
1211 "%s: %p has only LK_SLEEPFAIL sleepers",
1212 __func__, lk);
1213 LOCK_LOG2(lk,
1214 "%s: %p waking up threads on the exclusive queue",
1215 __func__, lk);
1216 wakeup_swapper =
1217 sleepq_broadcast(
1218 &lk->lock_object,
1219 SLEEPQ_LK, 0,
1220 SQ_EXCLUSIVE_QUEUE);
1221 }
1222 } else
1223 lk->lk_exslpfail = 0;
1224 }
1225 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1226 sleepq_release(&lk->lock_object);
1227 continue;
1228 }
1229 LOCK_LOG3(lk,
1230 "%s: %p waking up all threads on the %s queue",
1231 __func__, lk, queue == SQ_SHARED_QUEUE ?
1232 "shared" : "exclusive");
1233 wakeup_swapper |= sleepq_broadcast(
1234 &lk->lock_object, SLEEPQ_LK, 0, queue);
1235
1236 /*
1237 * If shared waiters have been woken up we need
1238 * to wait for one of them to acquire the lock
1239 * before to set the exclusive waiters in
1240 * order to avoid a deadlock.
1241 */
1242 if (queue == SQ_SHARED_QUEUE) {
1243 for (v = lk->lk_lock;
1244 (v & LK_SHARE) && !LK_SHARERS(v);
1245 v = lk->lk_lock)
1246 cpu_spinwait();
1247 }
1248 }
1249
1250 /*
1251 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1252 * fail, loop back and retry.
1253 */
1254 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1255 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1256 x | LK_EXCLUSIVE_WAITERS)) {
1257 sleepq_release(&lk->lock_object);
1258 continue;
1259 }
1260 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1261 __func__, lk);
1262 }
1263
1264 /*
1265 * As far as we have been unable to acquire the
1266 * exclusive lock and the exclusive waiters flag
1267 * is set, we will sleep.
1268 */
1269 if (flags & LK_INTERLOCK) {
1270 class->lc_unlock(ilk);
1271 flags &= ~LK_INTERLOCK;
1272 }
1273 GIANT_SAVE();
1274 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1275 SQ_EXCLUSIVE_QUEUE);
1276 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1277 GIANT_RESTORE();
1278 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1279 __func__, lk);
1280 }
1281
1282 if (error == 0) {
1283 lock_profile_obtain_lock_success(&lk->lock_object,
1284 contested, waittime, file, line);
1285 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1286 lk->lk_recurse, file, line);
1287 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1288 LK_TRYWIT(flags), file, line);
1289 TD_LOCKS_INC(curthread);
1290 STACK_SAVE(lk);
1291 }
1292 break;
1293 default:
1294 if (flags & LK_INTERLOCK)
1295 class->lc_unlock(ilk);
1296 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1297 }
1298
1299 if (flags & LK_INTERLOCK)
1300 class->lc_unlock(ilk);
1301 if (wakeup_swapper)
1302 kick_proc0();
1303
1304 return (error);
1305 }
1306
1307 void
1308 _lockmgr_disown(struct lock *lk, const char *file, int line)
1309 {
1310 uintptr_t tid, x;
1311
1312 if (SCHEDULER_STOPPED())
1313 return;
1314
1315 tid = (uintptr_t)curthread;
1316 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1317
1318 /*
1319 * Panic if the lock is recursed.
1320 */
1321 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1322 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1323 __func__, file, line);
1324
1325 /*
1326 * If the owner is already LK_KERNPROC just skip the whole operation.
1327 */
1328 if (LK_HOLDER(lk->lk_lock) != tid)
1329 return;
1330 lock_profile_release_lock(&lk->lock_object);
1331 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1332 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1333 TD_LOCKS_DEC(curthread);
1334 STACK_SAVE(lk);
1335
1336 /*
1337 * In order to preserve waiters flags, just spin.
1338 */
1339 for (;;) {
1340 x = lk->lk_lock;
1341 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1342 x &= LK_ALL_WAITERS;
1343 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1344 LK_KERNPROC | x))
1345 return;
1346 cpu_spinwait();
1347 }
1348 }
1349
1350 void
1351 lockmgr_printinfo(const struct lock *lk)
1352 {
1353 struct thread *td;
1354 uintptr_t x;
1355
1356 if (lk->lk_lock == LK_UNLOCKED)
1357 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1358 else if (lk->lk_lock & LK_SHARE)
1359 printf("lock type %s: SHARED (count %ju)\n",
1360 lk->lock_object.lo_name,
1361 (uintmax_t)LK_SHARERS(lk->lk_lock));
1362 else {
1363 td = lockmgr_xholder(lk);
1364 if (td == (struct thread *)LK_KERNPROC)
1365 printf("lock type %s: EXCL by KERNPROC\n",
1366 lk->lock_object.lo_name);
1367 else
1368 printf("lock type %s: EXCL by thread %p "
1369 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1370 td, td->td_proc->p_pid, td->td_proc->p_comm,
1371 td->td_tid);
1372 }
1373
1374 x = lk->lk_lock;
1375 if (x & LK_EXCLUSIVE_WAITERS)
1376 printf(" with exclusive waiters pending\n");
1377 if (x & LK_SHARED_WAITERS)
1378 printf(" with shared waiters pending\n");
1379 if (x & LK_EXCLUSIVE_SPINNERS)
1380 printf(" with exclusive spinners pending\n");
1381
1382 STACK_PRINT(lk);
1383 }
1384
1385 int
1386 lockstatus(const struct lock *lk)
1387 {
1388 uintptr_t v, x;
1389 int ret;
1390
1391 ret = LK_SHARED;
1392 x = lk->lk_lock;
1393 v = LK_HOLDER(x);
1394
1395 if ((x & LK_SHARE) == 0) {
1396 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1397 ret = LK_EXCLUSIVE;
1398 else
1399 ret = LK_EXCLOTHER;
1400 } else if (x == LK_UNLOCKED)
1401 ret = 0;
1402
1403 return (ret);
1404 }
1405
1406 #ifdef INVARIANT_SUPPORT
1407
1408 FEATURE(invariant_support,
1409 "Support for modules compiled with INVARIANTS option");
1410
1411 #ifndef INVARIANTS
1412 #undef _lockmgr_assert
1413 #endif
1414
1415 void
1416 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1417 {
1418 int slocked = 0;
1419
1420 if (panicstr != NULL)
1421 return;
1422 switch (what) {
1423 case KA_SLOCKED:
1424 case KA_SLOCKED | KA_NOTRECURSED:
1425 case KA_SLOCKED | KA_RECURSED:
1426 slocked = 1;
1427 case KA_LOCKED:
1428 case KA_LOCKED | KA_NOTRECURSED:
1429 case KA_LOCKED | KA_RECURSED:
1430 #ifdef WITNESS
1431
1432 /*
1433 * We cannot trust WITNESS if the lock is held in exclusive
1434 * mode and a call to lockmgr_disown() happened.
1435 * Workaround this skipping the check if the lock is held in
1436 * exclusive mode even for the KA_LOCKED case.
1437 */
1438 if (slocked || (lk->lk_lock & LK_SHARE)) {
1439 witness_assert(&lk->lock_object, what, file, line);
1440 break;
1441 }
1442 #endif
1443 if (lk->lk_lock == LK_UNLOCKED ||
1444 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1445 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1446 panic("Lock %s not %slocked @ %s:%d\n",
1447 lk->lock_object.lo_name, slocked ? "share" : "",
1448 file, line);
1449
1450 if ((lk->lk_lock & LK_SHARE) == 0) {
1451 if (lockmgr_recursed(lk)) {
1452 if (what & KA_NOTRECURSED)
1453 panic("Lock %s recursed @ %s:%d\n",
1454 lk->lock_object.lo_name, file,
1455 line);
1456 } else if (what & KA_RECURSED)
1457 panic("Lock %s not recursed @ %s:%d\n",
1458 lk->lock_object.lo_name, file, line);
1459 }
1460 break;
1461 case KA_XLOCKED:
1462 case KA_XLOCKED | KA_NOTRECURSED:
1463 case KA_XLOCKED | KA_RECURSED:
1464 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1465 panic("Lock %s not exclusively locked @ %s:%d\n",
1466 lk->lock_object.lo_name, file, line);
1467 if (lockmgr_recursed(lk)) {
1468 if (what & KA_NOTRECURSED)
1469 panic("Lock %s recursed @ %s:%d\n",
1470 lk->lock_object.lo_name, file, line);
1471 } else if (what & KA_RECURSED)
1472 panic("Lock %s not recursed @ %s:%d\n",
1473 lk->lock_object.lo_name, file, line);
1474 break;
1475 case KA_UNLOCKED:
1476 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1477 panic("Lock %s exclusively locked @ %s:%d\n",
1478 lk->lock_object.lo_name, file, line);
1479 break;
1480 default:
1481 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1482 line);
1483 }
1484 }
1485 #endif
1486
1487 #ifdef DDB
1488 int
1489 lockmgr_chain(struct thread *td, struct thread **ownerp)
1490 {
1491 struct lock *lk;
1492
1493 lk = td->td_wchan;
1494
1495 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1496 return (0);
1497 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1498 if (lk->lk_lock & LK_SHARE)
1499 db_printf("SHARED (count %ju)\n",
1500 (uintmax_t)LK_SHARERS(lk->lk_lock));
1501 else
1502 db_printf("EXCL\n");
1503 *ownerp = lockmgr_xholder(lk);
1504
1505 return (1);
1506 }
1507
1508 static void
1509 db_show_lockmgr(const struct lock_object *lock)
1510 {
1511 struct thread *td;
1512 const struct lock *lk;
1513
1514 lk = (const struct lock *)lock;
1515
1516 db_printf(" state: ");
1517 if (lk->lk_lock == LK_UNLOCKED)
1518 db_printf("UNLOCKED\n");
1519 else if (lk->lk_lock & LK_SHARE)
1520 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1521 else {
1522 td = lockmgr_xholder(lk);
1523 if (td == (struct thread *)LK_KERNPROC)
1524 db_printf("XLOCK: LK_KERNPROC\n");
1525 else
1526 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1527 td->td_tid, td->td_proc->p_pid,
1528 td->td_proc->p_comm);
1529 if (lockmgr_recursed(lk))
1530 db_printf(" recursed: %d\n", lk->lk_recurse);
1531 }
1532 db_printf(" waiters: ");
1533 switch (lk->lk_lock & LK_ALL_WAITERS) {
1534 case LK_SHARED_WAITERS:
1535 db_printf("shared\n");
1536 break;
1537 case LK_EXCLUSIVE_WAITERS:
1538 db_printf("exclusive\n");
1539 break;
1540 case LK_ALL_WAITERS:
1541 db_printf("shared and exclusive\n");
1542 break;
1543 default:
1544 db_printf("none\n");
1545 }
1546 db_printf(" spinners: ");
1547 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1548 db_printf("exclusive\n");
1549 else
1550 db_printf("none\n");
1551 }
1552 #endif
Cache object: cf9f925fad806343c6b77c9702582c34
|