FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c
1 /*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/10.1/sys/kern/kern_lock.c 271161 2014-09-05 13:22:28Z kib $");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51
52 #include <machine/cpu.h>
53
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64 (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67
68 #define SQ_EXCLUSIVE_QUEUE 0
69 #define SQ_SHARED_QUEUE 1
70
71 #ifndef INVARIANTS
72 #define _lockmgr_assert(lk, what, file, line)
73 #define TD_LOCKS_INC(td)
74 #define TD_LOCKS_DEC(td)
75 #else
76 #define TD_LOCKS_INC(td) ((td)->td_locks++)
77 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
78 #endif
79 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
80 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
81
82 #ifndef DEBUG_LOCKS
83 #define STACK_PRINT(lk)
84 #define STACK_SAVE(lk)
85 #define STACK_ZERO(lk)
86 #else
87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
88 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
89 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
90 #endif
91
92 #define LOCK_LOG2(lk, string, arg1, arg2) \
93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
94 CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98
99 #define GIANT_DECLARE \
100 int _i = 0; \
101 WITNESS_SAVE_DECL(Giant)
102 #define GIANT_RESTORE() do { \
103 if (_i > 0) { \
104 while (_i--) \
105 mtx_lock(&Giant); \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
107 } \
108 } while (0)
109 #define GIANT_SAVE() do { \
110 if (mtx_owned(&Giant)) { \
111 WITNESS_SAVE(&Giant.lock_object, Giant); \
112 while (mtx_owned(&Giant)) { \
113 _i++; \
114 mtx_unlock(&Giant); \
115 } \
116 } \
117 } while (0)
118
119 #define LK_CAN_SHARE(x) \
120 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
121 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
122 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
123 #define LK_TRYOP(x) \
124 ((x) & LK_NOWAIT)
125
126 #define LK_CAN_WITNESS(x) \
127 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128 #define LK_TRYWIT(x) \
129 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130
131 #define LK_CAN_ADAPT(lk, f) \
132 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
133 ((f) & LK_SLEEPFAIL) == 0)
134
135 #define lockmgr_disowned(lk) \
136 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137
138 #define lockmgr_xlocked(lk) \
139 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140
141 static void assert_lockmgr(const struct lock_object *lock, int how);
142 #ifdef DDB
143 static void db_show_lockmgr(const struct lock_object *lock);
144 #endif
145 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
146 #ifdef KDTRACE_HOOKS
147 static int owner_lockmgr(const struct lock_object *lock,
148 struct thread **owner);
149 #endif
150 static uintptr_t unlock_lockmgr(struct lock_object *lock);
151
152 struct lock_class lock_class_lockmgr = {
153 .lc_name = "lockmgr",
154 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155 .lc_assert = assert_lockmgr,
156 #ifdef DDB
157 .lc_ddb_show = db_show_lockmgr,
158 #endif
159 .lc_lock = lock_lockmgr,
160 .lc_unlock = unlock_lockmgr,
161 #ifdef KDTRACE_HOOKS
162 .lc_owner = owner_lockmgr,
163 #endif
164 };
165
166 #ifdef ADAPTIVE_LOCKMGRS
167 static u_int alk_retries = 10;
168 static u_int alk_loops = 10000;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170 "lockmgr debugging");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
173 #endif
174
175 static __inline struct thread *
176 lockmgr_xholder(const struct lock *lk)
177 {
178 uintptr_t x;
179
180 x = lk->lk_lock;
181 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
182 }
183
184 /*
185 * It assumes sleepq_lock held and returns with this one unheld.
186 * It also assumes the generic interlock is sane and previously checked.
187 * If LK_INTERLOCK is specified the interlock is not reacquired after the
188 * sleep.
189 */
190 static __inline int
191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192 const char *wmesg, int pri, int timo, int queue)
193 {
194 GIANT_DECLARE;
195 struct lock_class *class;
196 int catch, error;
197
198 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199 catch = pri & PCATCH;
200 pri &= PRIMASK;
201 error = 0;
202
203 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205
206 if (flags & LK_INTERLOCK)
207 class->lc_unlock(ilk);
208 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
209 lk->lk_exslpfail++;
210 GIANT_SAVE();
211 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212 SLEEPQ_INTERRUPTIBLE : 0), queue);
213 if ((flags & LK_TIMELOCK) && timo)
214 sleepq_set_timeout(&lk->lock_object, timo);
215
216 /*
217 * Decisional switch for real sleeping.
218 */
219 if ((flags & LK_TIMELOCK) && timo && catch)
220 error = sleepq_timedwait_sig(&lk->lock_object, pri);
221 else if ((flags & LK_TIMELOCK) && timo)
222 error = sleepq_timedwait(&lk->lock_object, pri);
223 else if (catch)
224 error = sleepq_wait_sig(&lk->lock_object, pri);
225 else
226 sleepq_wait(&lk->lock_object, pri);
227 GIANT_RESTORE();
228 if ((flags & LK_SLEEPFAIL) && error == 0)
229 error = ENOLCK;
230
231 return (error);
232 }
233
234 static __inline int
235 wakeupshlk(struct lock *lk, const char *file, int line)
236 {
237 uintptr_t v, x;
238 u_int realexslp;
239 int queue, wakeup_swapper;
240
241 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
242 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
243
244 wakeup_swapper = 0;
245 for (;;) {
246 x = lk->lk_lock;
247
248 /*
249 * If there is more than one shared lock held, just drop one
250 * and return.
251 */
252 if (LK_SHARERS(x) > 1) {
253 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
254 x - LK_ONE_SHARER))
255 break;
256 continue;
257 }
258
259 /*
260 * If there are not waiters on the exclusive queue, drop the
261 * lock quickly.
262 */
263 if ((x & LK_ALL_WAITERS) == 0) {
264 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
265 LK_SHARERS_LOCK(1));
266 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
267 break;
268 continue;
269 }
270
271 /*
272 * We should have a sharer with waiters, so enter the hard
273 * path in order to handle wakeups correctly.
274 */
275 sleepq_lock(&lk->lock_object);
276 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
277 v = LK_UNLOCKED;
278
279 /*
280 * If the lock has exclusive waiters, give them preference in
281 * order to avoid deadlock with shared runners up.
282 * If interruptible sleeps left the exclusive queue empty
283 * avoid a starvation for the threads sleeping on the shared
284 * queue by giving them precedence and cleaning up the
285 * exclusive waiters bit anyway.
286 * Please note that lk_exslpfail count may be lying about
287 * the real number of waiters with the LK_SLEEPFAIL flag on
288 * because they may be used in conjuction with interruptible
289 * sleeps so lk_exslpfail might be considered an 'upper limit'
290 * bound, including the edge cases.
291 */
292 realexslp = sleepq_sleepcnt(&lk->lock_object,
293 SQ_EXCLUSIVE_QUEUE);
294 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
295 if (lk->lk_exslpfail < realexslp) {
296 lk->lk_exslpfail = 0;
297 queue = SQ_EXCLUSIVE_QUEUE;
298 v |= (x & LK_SHARED_WAITERS);
299 } else {
300 lk->lk_exslpfail = 0;
301 LOCK_LOG2(lk,
302 "%s: %p has only LK_SLEEPFAIL sleepers",
303 __func__, lk);
304 LOCK_LOG2(lk,
305 "%s: %p waking up threads on the exclusive queue",
306 __func__, lk);
307 wakeup_swapper =
308 sleepq_broadcast(&lk->lock_object,
309 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
310 queue = SQ_SHARED_QUEUE;
311 }
312
313 } else {
314
315 /*
316 * Exclusive waiters sleeping with LK_SLEEPFAIL on
317 * and using interruptible sleeps/timeout may have
318 * left spourious lk_exslpfail counts on, so clean
319 * it up anyway.
320 */
321 lk->lk_exslpfail = 0;
322 queue = SQ_SHARED_QUEUE;
323 }
324
325 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
326 v)) {
327 sleepq_release(&lk->lock_object);
328 continue;
329 }
330 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
331 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
332 "exclusive");
333 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
334 0, queue);
335 sleepq_release(&lk->lock_object);
336 break;
337 }
338
339 lock_profile_release_lock(&lk->lock_object);
340 TD_LOCKS_DEC(curthread);
341 TD_SLOCKS_DEC(curthread);
342 return (wakeup_swapper);
343 }
344
345 static void
346 assert_lockmgr(const struct lock_object *lock, int what)
347 {
348
349 panic("lockmgr locks do not support assertions");
350 }
351
352 static void
353 lock_lockmgr(struct lock_object *lock, uintptr_t how)
354 {
355
356 panic("lockmgr locks do not support sleep interlocking");
357 }
358
359 static uintptr_t
360 unlock_lockmgr(struct lock_object *lock)
361 {
362
363 panic("lockmgr locks do not support sleep interlocking");
364 }
365
366 #ifdef KDTRACE_HOOKS
367 static int
368 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
369 {
370
371 panic("lockmgr locks do not support owner inquiring");
372 }
373 #endif
374
375 void
376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
377 {
378 int iflags;
379
380 MPASS((flags & ~LK_INIT_MASK) == 0);
381 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
383 &lk->lk_lock));
384
385 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386 if (flags & LK_CANRECURSE)
387 iflags |= LO_RECURSABLE;
388 if ((flags & LK_NODUP) == 0)
389 iflags |= LO_DUPOK;
390 if (flags & LK_NOPROFILE)
391 iflags |= LO_NOPROFILE;
392 if ((flags & LK_NOWITNESS) == 0)
393 iflags |= LO_WITNESS;
394 if (flags & LK_QUIET)
395 iflags |= LO_QUIET;
396 if (flags & LK_IS_VNODE)
397 iflags |= LO_IS_VNODE;
398 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
399
400 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
401 lk->lk_lock = LK_UNLOCKED;
402 lk->lk_recurse = 0;
403 lk->lk_exslpfail = 0;
404 lk->lk_timo = timo;
405 lk->lk_pri = pri;
406 STACK_ZERO(lk);
407 }
408
409 /*
410 * XXX: Gross hacks to manipulate external lock flags after
411 * initialization. Used for certain vnode and buf locks.
412 */
413 void
414 lockallowshare(struct lock *lk)
415 {
416
417 lockmgr_assert(lk, KA_XLOCKED);
418 lk->lock_object.lo_flags &= ~LK_NOSHARE;
419 }
420
421 void
422 lockdisableshare(struct lock *lk)
423 {
424
425 lockmgr_assert(lk, KA_XLOCKED);
426 lk->lock_object.lo_flags |= LK_NOSHARE;
427 }
428
429 void
430 lockallowrecurse(struct lock *lk)
431 {
432
433 lockmgr_assert(lk, KA_XLOCKED);
434 lk->lock_object.lo_flags |= LO_RECURSABLE;
435 }
436
437 void
438 lockdisablerecurse(struct lock *lk)
439 {
440
441 lockmgr_assert(lk, KA_XLOCKED);
442 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
443 }
444
445 void
446 lockdestroy(struct lock *lk)
447 {
448
449 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
450 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
451 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
452 lock_destroy(&lk->lock_object);
453 }
454
455 int
456 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
457 const char *wmesg, int pri, int timo, const char *file, int line)
458 {
459 GIANT_DECLARE;
460 struct lock_class *class;
461 const char *iwmesg;
462 uintptr_t tid, v, x;
463 u_int op, realexslp;
464 int error, ipri, itimo, queue, wakeup_swapper;
465 #ifdef LOCK_PROFILING
466 uint64_t waittime = 0;
467 int contested = 0;
468 #endif
469 #ifdef ADAPTIVE_LOCKMGRS
470 volatile struct thread *owner;
471 u_int i, spintries = 0;
472 #endif
473
474 error = 0;
475 tid = (uintptr_t)curthread;
476 op = (flags & LK_TYPE_MASK);
477 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
478 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
479 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
480
481 MPASS((flags & ~LK_TOTAL_MASK) == 0);
482 KASSERT((op & (op - 1)) == 0,
483 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
484 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
485 (op != LK_DOWNGRADE && op != LK_RELEASE),
486 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
487 __func__, file, line));
488 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
489 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
490 __func__, file, line));
491 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
492 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
493 lk->lock_object.lo_name, file, line));
494
495 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
496 if (panicstr != NULL) {
497 if (flags & LK_INTERLOCK)
498 class->lc_unlock(ilk);
499 return (0);
500 }
501
502 if (lk->lock_object.lo_flags & LK_NOSHARE) {
503 switch (op) {
504 case LK_SHARED:
505 op = LK_EXCLUSIVE;
506 break;
507 case LK_UPGRADE:
508 case LK_TRYUPGRADE:
509 case LK_DOWNGRADE:
510 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
511 file, line);
512 if (flags & LK_INTERLOCK)
513 class->lc_unlock(ilk);
514 return (0);
515 }
516 }
517
518 wakeup_swapper = 0;
519 switch (op) {
520 case LK_SHARED:
521 if (LK_CAN_WITNESS(flags))
522 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
523 file, line, flags & LK_INTERLOCK ? ilk : NULL);
524 for (;;) {
525 x = lk->lk_lock;
526
527 /*
528 * If no other thread has an exclusive lock, or
529 * no exclusive waiter is present, bump the count of
530 * sharers. Since we have to preserve the state of
531 * waiters, if we fail to acquire the shared lock
532 * loop back and retry.
533 */
534 if (LK_CAN_SHARE(x)) {
535 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
536 x + LK_ONE_SHARER))
537 break;
538 continue;
539 }
540 #ifdef HWPMC_HOOKS
541 PMC_SOFT_CALL( , , lock, failed);
542 #endif
543 lock_profile_obtain_lock_failed(&lk->lock_object,
544 &contested, &waittime);
545
546 /*
547 * If the lock is already held by curthread in
548 * exclusive way avoid a deadlock.
549 */
550 if (LK_HOLDER(x) == tid) {
551 LOCK_LOG2(lk,
552 "%s: %p already held in exclusive mode",
553 __func__, lk);
554 error = EDEADLK;
555 break;
556 }
557
558 /*
559 * If the lock is expected to not sleep just give up
560 * and return.
561 */
562 if (LK_TRYOP(flags)) {
563 LOCK_LOG2(lk, "%s: %p fails the try operation",
564 __func__, lk);
565 error = EBUSY;
566 break;
567 }
568
569 #ifdef ADAPTIVE_LOCKMGRS
570 /*
571 * If the owner is running on another CPU, spin until
572 * the owner stops running or the state of the lock
573 * changes. We need a double-state handle here
574 * because for a failed acquisition the lock can be
575 * either held in exclusive mode or shared mode
576 * (for the writer starvation avoidance technique).
577 */
578 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
579 LK_HOLDER(x) != LK_KERNPROC) {
580 owner = (struct thread *)LK_HOLDER(x);
581 if (LOCK_LOG_TEST(&lk->lock_object, 0))
582 CTR3(KTR_LOCK,
583 "%s: spinning on %p held by %p",
584 __func__, lk, owner);
585
586 /*
587 * If we are holding also an interlock drop it
588 * in order to avoid a deadlock if the lockmgr
589 * owner is adaptively spinning on the
590 * interlock itself.
591 */
592 if (flags & LK_INTERLOCK) {
593 class->lc_unlock(ilk);
594 flags &= ~LK_INTERLOCK;
595 }
596 GIANT_SAVE();
597 while (LK_HOLDER(lk->lk_lock) ==
598 (uintptr_t)owner && TD_IS_RUNNING(owner))
599 cpu_spinwait();
600 GIANT_RESTORE();
601 continue;
602 } else if (LK_CAN_ADAPT(lk, flags) &&
603 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
604 spintries < alk_retries) {
605 if (flags & LK_INTERLOCK) {
606 class->lc_unlock(ilk);
607 flags &= ~LK_INTERLOCK;
608 }
609 GIANT_SAVE();
610 spintries++;
611 for (i = 0; i < alk_loops; i++) {
612 if (LOCK_LOG_TEST(&lk->lock_object, 0))
613 CTR4(KTR_LOCK,
614 "%s: shared spinning on %p with %u and %u",
615 __func__, lk, spintries, i);
616 x = lk->lk_lock;
617 if ((x & LK_SHARE) == 0 ||
618 LK_CAN_SHARE(x) != 0)
619 break;
620 cpu_spinwait();
621 }
622 GIANT_RESTORE();
623 if (i != alk_loops)
624 continue;
625 }
626 #endif
627
628 /*
629 * Acquire the sleepqueue chain lock because we
630 * probabilly will need to manipulate waiters flags.
631 */
632 sleepq_lock(&lk->lock_object);
633 x = lk->lk_lock;
634
635 /*
636 * if the lock can be acquired in shared mode, try
637 * again.
638 */
639 if (LK_CAN_SHARE(x)) {
640 sleepq_release(&lk->lock_object);
641 continue;
642 }
643
644 #ifdef ADAPTIVE_LOCKMGRS
645 /*
646 * The current lock owner might have started executing
647 * on another CPU (or the lock could have changed
648 * owner) while we were waiting on the turnstile
649 * chain lock. If so, drop the turnstile lock and try
650 * again.
651 */
652 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
653 LK_HOLDER(x) != LK_KERNPROC) {
654 owner = (struct thread *)LK_HOLDER(x);
655 if (TD_IS_RUNNING(owner)) {
656 sleepq_release(&lk->lock_object);
657 continue;
658 }
659 }
660 #endif
661
662 /*
663 * Try to set the LK_SHARED_WAITERS flag. If we fail,
664 * loop back and retry.
665 */
666 if ((x & LK_SHARED_WAITERS) == 0) {
667 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
668 x | LK_SHARED_WAITERS)) {
669 sleepq_release(&lk->lock_object);
670 continue;
671 }
672 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
673 __func__, lk);
674 }
675
676 /*
677 * As far as we have been unable to acquire the
678 * shared lock and the shared waiters flag is set,
679 * we will sleep.
680 */
681 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
682 SQ_SHARED_QUEUE);
683 flags &= ~LK_INTERLOCK;
684 if (error) {
685 LOCK_LOG3(lk,
686 "%s: interrupted sleep for %p with %d",
687 __func__, lk, error);
688 break;
689 }
690 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
691 __func__, lk);
692 }
693 if (error == 0) {
694 lock_profile_obtain_lock_success(&lk->lock_object,
695 contested, waittime, file, line);
696 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
697 line);
698 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
699 line);
700 TD_LOCKS_INC(curthread);
701 TD_SLOCKS_INC(curthread);
702 STACK_SAVE(lk);
703 }
704 break;
705 case LK_UPGRADE:
706 case LK_TRYUPGRADE:
707 _lockmgr_assert(lk, KA_SLOCKED, file, line);
708 v = lk->lk_lock;
709 x = v & LK_ALL_WAITERS;
710 v &= LK_EXCLUSIVE_SPINNERS;
711
712 /*
713 * Try to switch from one shared lock to an exclusive one.
714 * We need to preserve waiters flags during the operation.
715 */
716 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
717 tid | x)) {
718 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
719 line);
720 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
721 LK_TRYWIT(flags), file, line);
722 TD_SLOCKS_DEC(curthread);
723 break;
724 }
725
726 /*
727 * In LK_TRYUPGRADE mode, do not drop the lock,
728 * returning EBUSY instead.
729 */
730 if (op == LK_TRYUPGRADE) {
731 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
732 __func__, lk);
733 error = EBUSY;
734 break;
735 }
736
737 /*
738 * We have been unable to succeed in upgrading, so just
739 * give up the shared lock.
740 */
741 wakeup_swapper |= wakeupshlk(lk, file, line);
742
743 /* FALLTHROUGH */
744 case LK_EXCLUSIVE:
745 if (LK_CAN_WITNESS(flags))
746 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
747 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
748 ilk : NULL);
749
750 /*
751 * If curthread already holds the lock and this one is
752 * allowed to recurse, simply recurse on it.
753 */
754 if (lockmgr_xlocked(lk)) {
755 if ((flags & LK_CANRECURSE) == 0 &&
756 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
757
758 /*
759 * If the lock is expected to not panic just
760 * give up and return.
761 */
762 if (LK_TRYOP(flags)) {
763 LOCK_LOG2(lk,
764 "%s: %p fails the try operation",
765 __func__, lk);
766 error = EBUSY;
767 break;
768 }
769 if (flags & LK_INTERLOCK)
770 class->lc_unlock(ilk);
771 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
772 __func__, iwmesg, file, line);
773 }
774 lk->lk_recurse++;
775 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
776 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
777 lk->lk_recurse, file, line);
778 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
779 LK_TRYWIT(flags), file, line);
780 TD_LOCKS_INC(curthread);
781 break;
782 }
783
784 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
785 tid)) {
786 #ifdef HWPMC_HOOKS
787 PMC_SOFT_CALL( , , lock, failed);
788 #endif
789 lock_profile_obtain_lock_failed(&lk->lock_object,
790 &contested, &waittime);
791
792 /*
793 * If the lock is expected to not sleep just give up
794 * and return.
795 */
796 if (LK_TRYOP(flags)) {
797 LOCK_LOG2(lk, "%s: %p fails the try operation",
798 __func__, lk);
799 error = EBUSY;
800 break;
801 }
802
803 #ifdef ADAPTIVE_LOCKMGRS
804 /*
805 * If the owner is running on another CPU, spin until
806 * the owner stops running or the state of the lock
807 * changes.
808 */
809 x = lk->lk_lock;
810 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
811 LK_HOLDER(x) != LK_KERNPROC) {
812 owner = (struct thread *)LK_HOLDER(x);
813 if (LOCK_LOG_TEST(&lk->lock_object, 0))
814 CTR3(KTR_LOCK,
815 "%s: spinning on %p held by %p",
816 __func__, lk, owner);
817
818 /*
819 * If we are holding also an interlock drop it
820 * in order to avoid a deadlock if the lockmgr
821 * owner is adaptively spinning on the
822 * interlock itself.
823 */
824 if (flags & LK_INTERLOCK) {
825 class->lc_unlock(ilk);
826 flags &= ~LK_INTERLOCK;
827 }
828 GIANT_SAVE();
829 while (LK_HOLDER(lk->lk_lock) ==
830 (uintptr_t)owner && TD_IS_RUNNING(owner))
831 cpu_spinwait();
832 GIANT_RESTORE();
833 continue;
834 } else if (LK_CAN_ADAPT(lk, flags) &&
835 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
836 spintries < alk_retries) {
837 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
838 !atomic_cmpset_ptr(&lk->lk_lock, x,
839 x | LK_EXCLUSIVE_SPINNERS))
840 continue;
841 if (flags & LK_INTERLOCK) {
842 class->lc_unlock(ilk);
843 flags &= ~LK_INTERLOCK;
844 }
845 GIANT_SAVE();
846 spintries++;
847 for (i = 0; i < alk_loops; i++) {
848 if (LOCK_LOG_TEST(&lk->lock_object, 0))
849 CTR4(KTR_LOCK,
850 "%s: shared spinning on %p with %u and %u",
851 __func__, lk, spintries, i);
852 if ((lk->lk_lock &
853 LK_EXCLUSIVE_SPINNERS) == 0)
854 break;
855 cpu_spinwait();
856 }
857 GIANT_RESTORE();
858 if (i != alk_loops)
859 continue;
860 }
861 #endif
862
863 /*
864 * Acquire the sleepqueue chain lock because we
865 * probabilly will need to manipulate waiters flags.
866 */
867 sleepq_lock(&lk->lock_object);
868 x = lk->lk_lock;
869
870 /*
871 * if the lock has been released while we spun on
872 * the sleepqueue chain lock just try again.
873 */
874 if (x == LK_UNLOCKED) {
875 sleepq_release(&lk->lock_object);
876 continue;
877 }
878
879 #ifdef ADAPTIVE_LOCKMGRS
880 /*
881 * The current lock owner might have started executing
882 * on another CPU (or the lock could have changed
883 * owner) while we were waiting on the turnstile
884 * chain lock. If so, drop the turnstile lock and try
885 * again.
886 */
887 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
888 LK_HOLDER(x) != LK_KERNPROC) {
889 owner = (struct thread *)LK_HOLDER(x);
890 if (TD_IS_RUNNING(owner)) {
891 sleepq_release(&lk->lock_object);
892 continue;
893 }
894 }
895 #endif
896
897 /*
898 * The lock can be in the state where there is a
899 * pending queue of waiters, but still no owner.
900 * This happens when the lock is contested and an
901 * owner is going to claim the lock.
902 * If curthread is the one successfully acquiring it
903 * claim lock ownership and return, preserving waiters
904 * flags.
905 */
906 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
907 if ((x & ~v) == LK_UNLOCKED) {
908 v &= ~LK_EXCLUSIVE_SPINNERS;
909 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
910 tid | v)) {
911 sleepq_release(&lk->lock_object);
912 LOCK_LOG2(lk,
913 "%s: %p claimed by a new writer",
914 __func__, lk);
915 break;
916 }
917 sleepq_release(&lk->lock_object);
918 continue;
919 }
920
921 /*
922 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
923 * fail, loop back and retry.
924 */
925 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
926 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
927 x | LK_EXCLUSIVE_WAITERS)) {
928 sleepq_release(&lk->lock_object);
929 continue;
930 }
931 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
932 __func__, lk);
933 }
934
935 /*
936 * As far as we have been unable to acquire the
937 * exclusive lock and the exclusive waiters flag
938 * is set, we will sleep.
939 */
940 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
941 SQ_EXCLUSIVE_QUEUE);
942 flags &= ~LK_INTERLOCK;
943 if (error) {
944 LOCK_LOG3(lk,
945 "%s: interrupted sleep for %p with %d",
946 __func__, lk, error);
947 break;
948 }
949 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
950 __func__, lk);
951 }
952 if (error == 0) {
953 lock_profile_obtain_lock_success(&lk->lock_object,
954 contested, waittime, file, line);
955 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
956 lk->lk_recurse, file, line);
957 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
958 LK_TRYWIT(flags), file, line);
959 TD_LOCKS_INC(curthread);
960 STACK_SAVE(lk);
961 }
962 break;
963 case LK_DOWNGRADE:
964 _lockmgr_assert(lk, KA_XLOCKED, file, line);
965 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
966 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
967
968 /*
969 * Panic if the lock is recursed.
970 */
971 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
972 if (flags & LK_INTERLOCK)
973 class->lc_unlock(ilk);
974 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
975 __func__, iwmesg, file, line);
976 }
977 TD_SLOCKS_INC(curthread);
978
979 /*
980 * In order to preserve waiters flags, just spin.
981 */
982 for (;;) {
983 x = lk->lk_lock;
984 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
985 x &= LK_ALL_WAITERS;
986 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
987 LK_SHARERS_LOCK(1) | x))
988 break;
989 cpu_spinwait();
990 }
991 break;
992 case LK_RELEASE:
993 _lockmgr_assert(lk, KA_LOCKED, file, line);
994 x = lk->lk_lock;
995
996 if ((x & LK_SHARE) == 0) {
997
998 /*
999 * As first option, treact the lock as if it has not
1000 * any waiter.
1001 * Fix-up the tid var if the lock has been disowned.
1002 */
1003 if (LK_HOLDER(x) == LK_KERNPROC)
1004 tid = LK_KERNPROC;
1005 else {
1006 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1007 file, line);
1008 TD_LOCKS_DEC(curthread);
1009 }
1010 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1011 lk->lk_recurse, file, line);
1012
1013 /*
1014 * The lock is held in exclusive mode.
1015 * If the lock is recursed also, then unrecurse it.
1016 */
1017 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1018 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1019 lk);
1020 lk->lk_recurse--;
1021 break;
1022 }
1023 if (tid != LK_KERNPROC)
1024 lock_profile_release_lock(&lk->lock_object);
1025
1026 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1027 LK_UNLOCKED))
1028 break;
1029
1030 sleepq_lock(&lk->lock_object);
1031 x = lk->lk_lock;
1032 v = LK_UNLOCKED;
1033
1034 /*
1035 * If the lock has exclusive waiters, give them
1036 * preference in order to avoid deadlock with
1037 * shared runners up.
1038 * If interruptible sleeps left the exclusive queue
1039 * empty avoid a starvation for the threads sleeping
1040 * on the shared queue by giving them precedence
1041 * and cleaning up the exclusive waiters bit anyway.
1042 * Please note that lk_exslpfail count may be lying
1043 * about the real number of waiters with the
1044 * LK_SLEEPFAIL flag on because they may be used in
1045 * conjuction with interruptible sleeps so
1046 * lk_exslpfail might be considered an 'upper limit'
1047 * bound, including the edge cases.
1048 */
1049 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1050 realexslp = sleepq_sleepcnt(&lk->lock_object,
1051 SQ_EXCLUSIVE_QUEUE);
1052 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1053 if (lk->lk_exslpfail < realexslp) {
1054 lk->lk_exslpfail = 0;
1055 queue = SQ_EXCLUSIVE_QUEUE;
1056 v |= (x & LK_SHARED_WAITERS);
1057 } else {
1058 lk->lk_exslpfail = 0;
1059 LOCK_LOG2(lk,
1060 "%s: %p has only LK_SLEEPFAIL sleepers",
1061 __func__, lk);
1062 LOCK_LOG2(lk,
1063 "%s: %p waking up threads on the exclusive queue",
1064 __func__, lk);
1065 wakeup_swapper =
1066 sleepq_broadcast(&lk->lock_object,
1067 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1068 queue = SQ_SHARED_QUEUE;
1069 }
1070 } else {
1071
1072 /*
1073 * Exclusive waiters sleeping with LK_SLEEPFAIL
1074 * on and using interruptible sleeps/timeout
1075 * may have left spourious lk_exslpfail counts
1076 * on, so clean it up anyway.
1077 */
1078 lk->lk_exslpfail = 0;
1079 queue = SQ_SHARED_QUEUE;
1080 }
1081
1082 LOCK_LOG3(lk,
1083 "%s: %p waking up threads on the %s queue",
1084 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1085 "exclusive");
1086 atomic_store_rel_ptr(&lk->lk_lock, v);
1087 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1088 SLEEPQ_LK, 0, queue);
1089 sleepq_release(&lk->lock_object);
1090 break;
1091 } else
1092 wakeup_swapper = wakeupshlk(lk, file, line);
1093 break;
1094 case LK_DRAIN:
1095 if (LK_CAN_WITNESS(flags))
1096 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1097 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1098 ilk : NULL);
1099
1100 /*
1101 * Trying to drain a lock we already own will result in a
1102 * deadlock.
1103 */
1104 if (lockmgr_xlocked(lk)) {
1105 if (flags & LK_INTERLOCK)
1106 class->lc_unlock(ilk);
1107 panic("%s: draining %s with the lock held @ %s:%d\n",
1108 __func__, iwmesg, file, line);
1109 }
1110
1111 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1112 #ifdef HWPMC_HOOKS
1113 PMC_SOFT_CALL( , , lock, failed);
1114 #endif
1115 lock_profile_obtain_lock_failed(&lk->lock_object,
1116 &contested, &waittime);
1117
1118 /*
1119 * If the lock is expected to not sleep just give up
1120 * and return.
1121 */
1122 if (LK_TRYOP(flags)) {
1123 LOCK_LOG2(lk, "%s: %p fails the try operation",
1124 __func__, lk);
1125 error = EBUSY;
1126 break;
1127 }
1128
1129 /*
1130 * Acquire the sleepqueue chain lock because we
1131 * probabilly will need to manipulate waiters flags.
1132 */
1133 sleepq_lock(&lk->lock_object);
1134 x = lk->lk_lock;
1135
1136 /*
1137 * if the lock has been released while we spun on
1138 * the sleepqueue chain lock just try again.
1139 */
1140 if (x == LK_UNLOCKED) {
1141 sleepq_release(&lk->lock_object);
1142 continue;
1143 }
1144
1145 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1146 if ((x & ~v) == LK_UNLOCKED) {
1147 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1148
1149 /*
1150 * If interruptible sleeps left the exclusive
1151 * queue empty avoid a starvation for the
1152 * threads sleeping on the shared queue by
1153 * giving them precedence and cleaning up the
1154 * exclusive waiters bit anyway.
1155 * Please note that lk_exslpfail count may be
1156 * lying about the real number of waiters with
1157 * the LK_SLEEPFAIL flag on because they may
1158 * be used in conjuction with interruptible
1159 * sleeps so lk_exslpfail might be considered
1160 * an 'upper limit' bound, including the edge
1161 * cases.
1162 */
1163 if (v & LK_EXCLUSIVE_WAITERS) {
1164 queue = SQ_EXCLUSIVE_QUEUE;
1165 v &= ~LK_EXCLUSIVE_WAITERS;
1166 } else {
1167
1168 /*
1169 * Exclusive waiters sleeping with
1170 * LK_SLEEPFAIL on and using
1171 * interruptible sleeps/timeout may
1172 * have left spourious lk_exslpfail
1173 * counts on, so clean it up anyway.
1174 */
1175 MPASS(v & LK_SHARED_WAITERS);
1176 lk->lk_exslpfail = 0;
1177 queue = SQ_SHARED_QUEUE;
1178 v &= ~LK_SHARED_WAITERS;
1179 }
1180 if (queue == SQ_EXCLUSIVE_QUEUE) {
1181 realexslp =
1182 sleepq_sleepcnt(&lk->lock_object,
1183 SQ_EXCLUSIVE_QUEUE);
1184 if (lk->lk_exslpfail >= realexslp) {
1185 lk->lk_exslpfail = 0;
1186 queue = SQ_SHARED_QUEUE;
1187 v &= ~LK_SHARED_WAITERS;
1188 if (realexslp != 0) {
1189 LOCK_LOG2(lk,
1190 "%s: %p has only LK_SLEEPFAIL sleepers",
1191 __func__, lk);
1192 LOCK_LOG2(lk,
1193 "%s: %p waking up threads on the exclusive queue",
1194 __func__, lk);
1195 wakeup_swapper =
1196 sleepq_broadcast(
1197 &lk->lock_object,
1198 SLEEPQ_LK, 0,
1199 SQ_EXCLUSIVE_QUEUE);
1200 }
1201 } else
1202 lk->lk_exslpfail = 0;
1203 }
1204 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1205 sleepq_release(&lk->lock_object);
1206 continue;
1207 }
1208 LOCK_LOG3(lk,
1209 "%s: %p waking up all threads on the %s queue",
1210 __func__, lk, queue == SQ_SHARED_QUEUE ?
1211 "shared" : "exclusive");
1212 wakeup_swapper |= sleepq_broadcast(
1213 &lk->lock_object, SLEEPQ_LK, 0, queue);
1214
1215 /*
1216 * If shared waiters have been woken up we need
1217 * to wait for one of them to acquire the lock
1218 * before to set the exclusive waiters in
1219 * order to avoid a deadlock.
1220 */
1221 if (queue == SQ_SHARED_QUEUE) {
1222 for (v = lk->lk_lock;
1223 (v & LK_SHARE) && !LK_SHARERS(v);
1224 v = lk->lk_lock)
1225 cpu_spinwait();
1226 }
1227 }
1228
1229 /*
1230 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1231 * fail, loop back and retry.
1232 */
1233 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1234 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1235 x | LK_EXCLUSIVE_WAITERS)) {
1236 sleepq_release(&lk->lock_object);
1237 continue;
1238 }
1239 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1240 __func__, lk);
1241 }
1242
1243 /*
1244 * As far as we have been unable to acquire the
1245 * exclusive lock and the exclusive waiters flag
1246 * is set, we will sleep.
1247 */
1248 if (flags & LK_INTERLOCK) {
1249 class->lc_unlock(ilk);
1250 flags &= ~LK_INTERLOCK;
1251 }
1252 GIANT_SAVE();
1253 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1254 SQ_EXCLUSIVE_QUEUE);
1255 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1256 GIANT_RESTORE();
1257 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1258 __func__, lk);
1259 }
1260
1261 if (error == 0) {
1262 lock_profile_obtain_lock_success(&lk->lock_object,
1263 contested, waittime, file, line);
1264 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1265 lk->lk_recurse, file, line);
1266 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1267 LK_TRYWIT(flags), file, line);
1268 TD_LOCKS_INC(curthread);
1269 STACK_SAVE(lk);
1270 }
1271 break;
1272 default:
1273 if (flags & LK_INTERLOCK)
1274 class->lc_unlock(ilk);
1275 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1276 }
1277
1278 if (flags & LK_INTERLOCK)
1279 class->lc_unlock(ilk);
1280 if (wakeup_swapper)
1281 kick_proc0();
1282
1283 return (error);
1284 }
1285
1286 void
1287 _lockmgr_disown(struct lock *lk, const char *file, int line)
1288 {
1289 uintptr_t tid, x;
1290
1291 if (SCHEDULER_STOPPED())
1292 return;
1293
1294 tid = (uintptr_t)curthread;
1295 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1296
1297 /*
1298 * Panic if the lock is recursed.
1299 */
1300 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1301 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1302 __func__, file, line);
1303
1304 /*
1305 * If the owner is already LK_KERNPROC just skip the whole operation.
1306 */
1307 if (LK_HOLDER(lk->lk_lock) != tid)
1308 return;
1309 lock_profile_release_lock(&lk->lock_object);
1310 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1311 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1312 TD_LOCKS_DEC(curthread);
1313 STACK_SAVE(lk);
1314
1315 /*
1316 * In order to preserve waiters flags, just spin.
1317 */
1318 for (;;) {
1319 x = lk->lk_lock;
1320 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1321 x &= LK_ALL_WAITERS;
1322 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1323 LK_KERNPROC | x))
1324 return;
1325 cpu_spinwait();
1326 }
1327 }
1328
1329 void
1330 lockmgr_printinfo(const struct lock *lk)
1331 {
1332 struct thread *td;
1333 uintptr_t x;
1334
1335 if (lk->lk_lock == LK_UNLOCKED)
1336 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1337 else if (lk->lk_lock & LK_SHARE)
1338 printf("lock type %s: SHARED (count %ju)\n",
1339 lk->lock_object.lo_name,
1340 (uintmax_t)LK_SHARERS(lk->lk_lock));
1341 else {
1342 td = lockmgr_xholder(lk);
1343 printf("lock type %s: EXCL by thread %p "
1344 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1345 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1346 }
1347
1348 x = lk->lk_lock;
1349 if (x & LK_EXCLUSIVE_WAITERS)
1350 printf(" with exclusive waiters pending\n");
1351 if (x & LK_SHARED_WAITERS)
1352 printf(" with shared waiters pending\n");
1353 if (x & LK_EXCLUSIVE_SPINNERS)
1354 printf(" with exclusive spinners pending\n");
1355
1356 STACK_PRINT(lk);
1357 }
1358
1359 int
1360 lockstatus(const struct lock *lk)
1361 {
1362 uintptr_t v, x;
1363 int ret;
1364
1365 ret = LK_SHARED;
1366 x = lk->lk_lock;
1367 v = LK_HOLDER(x);
1368
1369 if ((x & LK_SHARE) == 0) {
1370 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1371 ret = LK_EXCLUSIVE;
1372 else
1373 ret = LK_EXCLOTHER;
1374 } else if (x == LK_UNLOCKED)
1375 ret = 0;
1376
1377 return (ret);
1378 }
1379
1380 #ifdef INVARIANT_SUPPORT
1381
1382 FEATURE(invariant_support,
1383 "Support for modules compiled with INVARIANTS option");
1384
1385 #ifndef INVARIANTS
1386 #undef _lockmgr_assert
1387 #endif
1388
1389 void
1390 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1391 {
1392 int slocked = 0;
1393
1394 if (panicstr != NULL)
1395 return;
1396 switch (what) {
1397 case KA_SLOCKED:
1398 case KA_SLOCKED | KA_NOTRECURSED:
1399 case KA_SLOCKED | KA_RECURSED:
1400 slocked = 1;
1401 case KA_LOCKED:
1402 case KA_LOCKED | KA_NOTRECURSED:
1403 case KA_LOCKED | KA_RECURSED:
1404 #ifdef WITNESS
1405
1406 /*
1407 * We cannot trust WITNESS if the lock is held in exclusive
1408 * mode and a call to lockmgr_disown() happened.
1409 * Workaround this skipping the check if the lock is held in
1410 * exclusive mode even for the KA_LOCKED case.
1411 */
1412 if (slocked || (lk->lk_lock & LK_SHARE)) {
1413 witness_assert(&lk->lock_object, what, file, line);
1414 break;
1415 }
1416 #endif
1417 if (lk->lk_lock == LK_UNLOCKED ||
1418 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1419 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1420 panic("Lock %s not %slocked @ %s:%d\n",
1421 lk->lock_object.lo_name, slocked ? "share" : "",
1422 file, line);
1423
1424 if ((lk->lk_lock & LK_SHARE) == 0) {
1425 if (lockmgr_recursed(lk)) {
1426 if (what & KA_NOTRECURSED)
1427 panic("Lock %s recursed @ %s:%d\n",
1428 lk->lock_object.lo_name, file,
1429 line);
1430 } else if (what & KA_RECURSED)
1431 panic("Lock %s not recursed @ %s:%d\n",
1432 lk->lock_object.lo_name, file, line);
1433 }
1434 break;
1435 case KA_XLOCKED:
1436 case KA_XLOCKED | KA_NOTRECURSED:
1437 case KA_XLOCKED | KA_RECURSED:
1438 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1439 panic("Lock %s not exclusively locked @ %s:%d\n",
1440 lk->lock_object.lo_name, file, line);
1441 if (lockmgr_recursed(lk)) {
1442 if (what & KA_NOTRECURSED)
1443 panic("Lock %s recursed @ %s:%d\n",
1444 lk->lock_object.lo_name, file, line);
1445 } else if (what & KA_RECURSED)
1446 panic("Lock %s not recursed @ %s:%d\n",
1447 lk->lock_object.lo_name, file, line);
1448 break;
1449 case KA_UNLOCKED:
1450 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1451 panic("Lock %s exclusively locked @ %s:%d\n",
1452 lk->lock_object.lo_name, file, line);
1453 break;
1454 default:
1455 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1456 line);
1457 }
1458 }
1459 #endif
1460
1461 #ifdef DDB
1462 int
1463 lockmgr_chain(struct thread *td, struct thread **ownerp)
1464 {
1465 struct lock *lk;
1466
1467 lk = td->td_wchan;
1468
1469 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1470 return (0);
1471 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1472 if (lk->lk_lock & LK_SHARE)
1473 db_printf("SHARED (count %ju)\n",
1474 (uintmax_t)LK_SHARERS(lk->lk_lock));
1475 else
1476 db_printf("EXCL\n");
1477 *ownerp = lockmgr_xholder(lk);
1478
1479 return (1);
1480 }
1481
1482 static void
1483 db_show_lockmgr(const struct lock_object *lock)
1484 {
1485 struct thread *td;
1486 const struct lock *lk;
1487
1488 lk = (const struct lock *)lock;
1489
1490 db_printf(" state: ");
1491 if (lk->lk_lock == LK_UNLOCKED)
1492 db_printf("UNLOCKED\n");
1493 else if (lk->lk_lock & LK_SHARE)
1494 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1495 else {
1496 td = lockmgr_xholder(lk);
1497 if (td == (struct thread *)LK_KERNPROC)
1498 db_printf("XLOCK: LK_KERNPROC\n");
1499 else
1500 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1501 td->td_tid, td->td_proc->p_pid,
1502 td->td_proc->p_comm);
1503 if (lockmgr_recursed(lk))
1504 db_printf(" recursed: %d\n", lk->lk_recurse);
1505 }
1506 db_printf(" waiters: ");
1507 switch (lk->lk_lock & LK_ALL_WAITERS) {
1508 case LK_SHARED_WAITERS:
1509 db_printf("shared\n");
1510 break;
1511 case LK_EXCLUSIVE_WAITERS:
1512 db_printf("exclusive\n");
1513 break;
1514 case LK_ALL_WAITERS:
1515 db_printf("shared and exclusive\n");
1516 break;
1517 default:
1518 db_printf("none\n");
1519 }
1520 db_printf(" spinners: ");
1521 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1522 db_printf("exclusive\n");
1523 else
1524 db_printf("none\n");
1525 }
1526 #endif
Cache object: c3ac5db9b2df2b31e4e5122154de16a8
|