FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: releng/5.0/sys/kern/kern_lock.c 107414 2002-11-30 19:00:51Z mckusick $
42 */
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/lockmgr.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52
53 /*
54 * Locking primitives implementation.
55 * Locks provide shared/exclusive sychronization.
56 */
57
58 #define LOCK_WAIT_TIME 100
59 #define LOCK_SAMPLE_WAIT 7
60
61 #if defined(DIAGNOSTIC)
62 #define LOCK_INLINE
63 #else
64 #define LOCK_INLINE __inline
65 #endif
66
67 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
68 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
69
70 /*
71 * Mutex array variables. Rather than each lockmgr lock having its own mutex,
72 * share a fixed (at boot time) number of mutexes across all lockmgr locks in
73 * order to keep sizeof(struct lock) down.
74 */
75 int lock_mtx_valid;
76 static struct mtx lock_mtx;
77
78 static int acquire(struct lock **lkpp, int extflags, int wanted);
79 static int apause(struct lock *lkp, int flags);
80 static int acquiredrain(struct lock *lkp, int extflags) ;
81
82 static void
83 lockmgr_init(void *dummy __unused)
84 {
85 /*
86 * Initialize the lockmgr protection mutex if it hasn't already been
87 * done. Unless something changes about kernel startup order, VM
88 * initialization will always cause this mutex to already be
89 * initialized in a call to lockinit().
90 */
91 if (lock_mtx_valid == 0) {
92 mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
93 lock_mtx_valid = 1;
94 }
95 }
96 SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
97
98 static LOCK_INLINE void
99 sharelock(struct lock *lkp, int incr) {
100 lkp->lk_flags |= LK_SHARE_NONZERO;
101 lkp->lk_sharecount += incr;
102 }
103
104 static LOCK_INLINE void
105 shareunlock(struct lock *lkp, int decr) {
106
107 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
108
109 if (lkp->lk_sharecount == decr) {
110 lkp->lk_flags &= ~LK_SHARE_NONZERO;
111 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
112 wakeup(lkp);
113 }
114 lkp->lk_sharecount = 0;
115 } else {
116 lkp->lk_sharecount -= decr;
117 }
118 }
119
120 /*
121 * This is the waitloop optimization.
122 */
123 static int
124 apause(struct lock *lkp, int flags)
125 {
126 #ifdef SMP
127 int i, lock_wait;
128 #endif
129
130 if ((lkp->lk_flags & flags) == 0)
131 return 0;
132 #ifdef SMP
133 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
134 mtx_unlock(lkp->lk_interlock);
135 for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
136 if ((lkp->lk_flags & flags) == 0)
137 break;
138 mtx_lock(lkp->lk_interlock);
139 if ((lkp->lk_flags & flags) == 0)
140 return 0;
141 }
142 #endif
143 return 1;
144 }
145
146 static int
147 acquire(struct lock **lkpp, int extflags, int wanted) {
148 struct lock *lkp = *lkpp;
149 int s, error;
150
151 CTR3(KTR_LOCKMGR,
152 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
153 lkp, extflags, wanted);
154
155 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
156 return EBUSY;
157 }
158
159 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
160 error = apause(lkp, wanted);
161 if (error == 0)
162 return 0;
163 }
164
165 s = splhigh();
166 while ((lkp->lk_flags & wanted) != 0) {
167 lkp->lk_flags |= LK_WAIT_NONZERO;
168 lkp->lk_waitcount++;
169 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
170 lkp->lk_wmesg,
171 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
172 if (lkp->lk_waitcount == 1) {
173 lkp->lk_flags &= ~LK_WAIT_NONZERO;
174 lkp->lk_waitcount = 0;
175 } else {
176 lkp->lk_waitcount--;
177 }
178 if (error) {
179 splx(s);
180 return error;
181 }
182 if (extflags & LK_SLEEPFAIL) {
183 splx(s);
184 return ENOLCK;
185 }
186 if (lkp->lk_newlock != NULL) {
187 mtx_lock(lkp->lk_newlock->lk_interlock);
188 mtx_unlock(lkp->lk_interlock);
189 if (lkp->lk_waitcount == 0)
190 wakeup((void *)(&lkp->lk_newlock));
191 *lkpp = lkp = lkp->lk_newlock;
192 }
193 }
194 splx(s);
195 return 0;
196 }
197
198 /*
199 * Set, change, or release a lock.
200 *
201 * Shared requests increment the shared count. Exclusive requests set the
202 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
203 * accepted shared locks and shared-to-exclusive upgrades to go away.
204 */
205 int
206 #ifndef DEBUG_LOCKS
207 lockmgr(lkp, flags, interlkp, td)
208 #else
209 debuglockmgr(lkp, flags, interlkp, td, name, file, line)
210 #endif
211 struct lock *lkp;
212 u_int flags;
213 struct mtx *interlkp;
214 struct thread *td;
215 #ifdef DEBUG_LOCKS
216 const char *name; /* Name of lock function */
217 const char *file; /* Name of file call is from */
218 int line; /* Line number in file */
219 #endif
220 {
221 int error;
222 pid_t pid;
223 int extflags, lockflags;
224
225 CTR5(KTR_LOCKMGR,
226 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
227 "interlkp == %p, td == %p", lkp, lkp->lk_wmesg, flags, interlkp, td);
228
229 error = 0;
230 if (td == NULL)
231 pid = LK_KERNPROC;
232 else
233 pid = td->td_proc->p_pid;
234
235 mtx_lock(lkp->lk_interlock);
236 if (flags & LK_INTERLOCK) {
237 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
238 mtx_unlock(interlkp);
239 }
240
241 if (panicstr != NULL) {
242 mtx_unlock(lkp->lk_interlock);
243 return (0);
244 }
245
246 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
247
248 switch (flags & LK_TYPE_MASK) {
249
250 case LK_SHARED:
251 /*
252 * If we are not the exclusive lock holder, we have to block
253 * while there is an exclusive lock holder or while an
254 * exclusive lock request or upgrade request is in progress.
255 *
256 * However, if TDF_DEADLKTREAT is set, we override exclusive
257 * lock requests or upgrade requests ( but not the exclusive
258 * lock itself ).
259 */
260 if (lkp->lk_lockholder != pid) {
261 lockflags = LK_HAVE_EXCL;
262 mtx_lock_spin(&sched_lock);
263 if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT))
264 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
265 mtx_unlock_spin(&sched_lock);
266 error = acquire(&lkp, extflags, lockflags);
267 if (error)
268 break;
269 sharelock(lkp, 1);
270 #if defined(DEBUG_LOCKS)
271 lkp->lk_slockholder = pid;
272 lkp->lk_sfilename = file;
273 lkp->lk_slineno = line;
274 lkp->lk_slockername = name;
275 #endif
276 break;
277 }
278 /*
279 * We hold an exclusive lock, so downgrade it to shared.
280 * An alternative would be to fail with EDEADLK.
281 */
282 sharelock(lkp, 1);
283 /* FALLTHROUGH downgrade */
284
285 case LK_DOWNGRADE:
286 KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0,
287 ("lockmgr: not holding exclusive lock "
288 "(owner pid (%d) != pid (%d), exlcnt (%d) != 0",
289 lkp->lk_lockholder, pid, lkp->lk_exclusivecount));
290 sharelock(lkp, lkp->lk_exclusivecount);
291 lkp->lk_exclusivecount = 0;
292 lkp->lk_flags &= ~LK_HAVE_EXCL;
293 lkp->lk_lockholder = LK_NOPROC;
294 if (lkp->lk_waitcount)
295 wakeup((void *)lkp);
296 break;
297
298 case LK_EXCLUPGRADE:
299 /*
300 * If another process is ahead of us to get an upgrade,
301 * then we want to fail rather than have an intervening
302 * exclusive access.
303 */
304 if (lkp->lk_flags & LK_WANT_UPGRADE) {
305 shareunlock(lkp, 1);
306 error = EBUSY;
307 break;
308 }
309 /* FALLTHROUGH normal upgrade */
310
311 case LK_UPGRADE:
312 /*
313 * Upgrade a shared lock to an exclusive one. If another
314 * shared lock has already requested an upgrade to an
315 * exclusive lock, our shared lock is released and an
316 * exclusive lock is requested (which will be granted
317 * after the upgrade). If we return an error, the file
318 * will always be unlocked.
319 */
320 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
321 panic("lockmgr: upgrade exclusive lock");
322 shareunlock(lkp, 1);
323 /*
324 * If we are just polling, check to see if we will block.
325 */
326 if ((extflags & LK_NOWAIT) &&
327 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
328 lkp->lk_sharecount > 1)) {
329 error = EBUSY;
330 break;
331 }
332 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
333 /*
334 * We are first shared lock to request an upgrade, so
335 * request upgrade and wait for the shared count to
336 * drop to zero, then take exclusive lock.
337 */
338 lkp->lk_flags |= LK_WANT_UPGRADE;
339 error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
340 lkp->lk_flags &= ~LK_WANT_UPGRADE;
341
342 if (error)
343 break;
344 lkp->lk_flags |= LK_HAVE_EXCL;
345 lkp->lk_lockholder = pid;
346 if (lkp->lk_exclusivecount != 0)
347 panic("lockmgr: non-zero exclusive count");
348 lkp->lk_exclusivecount = 1;
349 #if defined(DEBUG_LOCKS)
350 lkp->lk_filename = file;
351 lkp->lk_lineno = line;
352 lkp->lk_lockername = name;
353 #endif
354 break;
355 }
356 /*
357 * Someone else has requested upgrade. Release our shared
358 * lock, awaken upgrade requestor if we are the last shared
359 * lock, then request an exclusive lock.
360 */
361 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
362 LK_WAIT_NONZERO)
363 wakeup((void *)lkp);
364 /* FALLTHROUGH exclusive request */
365
366 case LK_EXCLUSIVE:
367 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
368 /*
369 * Recursive lock.
370 */
371 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
372 panic("lockmgr: locking against myself");
373 if ((extflags & LK_CANRECURSE) != 0) {
374 lkp->lk_exclusivecount++;
375 break;
376 }
377 }
378 /*
379 * If we are just polling, check to see if we will sleep.
380 */
381 if ((extflags & LK_NOWAIT) &&
382 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
383 error = EBUSY;
384 break;
385 }
386 /*
387 * Try to acquire the want_exclusive flag.
388 */
389 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
390 if (error)
391 break;
392 lkp->lk_flags |= LK_WANT_EXCL;
393 /*
394 * Wait for shared locks and upgrades to finish.
395 */
396 error = acquire(&lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
397 lkp->lk_flags &= ~LK_WANT_EXCL;
398 if (error)
399 break;
400 lkp->lk_flags |= LK_HAVE_EXCL;
401 lkp->lk_lockholder = pid;
402 if (lkp->lk_exclusivecount != 0)
403 panic("lockmgr: non-zero exclusive count");
404 lkp->lk_exclusivecount = 1;
405 #if defined(DEBUG_LOCKS)
406 lkp->lk_filename = file;
407 lkp->lk_lineno = line;
408 lkp->lk_lockername = name;
409 #endif
410 break;
411
412 case LK_RELEASE:
413 if (lkp->lk_exclusivecount != 0) {
414 if (lkp->lk_lockholder != pid &&
415 lkp->lk_lockholder != LK_KERNPROC) {
416 panic("lockmgr: pid %d, not %s %d unlocking",
417 pid, "exclusive lock holder",
418 lkp->lk_lockholder);
419 }
420 if (lkp->lk_exclusivecount == 1) {
421 lkp->lk_flags &= ~LK_HAVE_EXCL;
422 lkp->lk_lockholder = LK_NOPROC;
423 lkp->lk_exclusivecount = 0;
424 } else {
425 lkp->lk_exclusivecount--;
426 }
427 } else if (lkp->lk_flags & LK_SHARE_NONZERO)
428 shareunlock(lkp, 1);
429 if (lkp->lk_flags & LK_WAIT_NONZERO)
430 wakeup((void *)lkp);
431 break;
432
433 case LK_DRAIN:
434 /*
435 * Check that we do not already hold the lock, as it can
436 * never drain if we do. Unfortunately, we have no way to
437 * check for holding a shared lock, but at least we can
438 * check for an exclusive one.
439 */
440 if (lkp->lk_lockholder == pid)
441 panic("lockmgr: draining against myself");
442
443 error = acquiredrain(lkp, extflags);
444 if (error)
445 break;
446 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
447 lkp->lk_lockholder = pid;
448 lkp->lk_exclusivecount = 1;
449 #if defined(DEBUG_LOCKS)
450 lkp->lk_filename = file;
451 lkp->lk_lineno = line;
452 lkp->lk_lockername = name;
453 #endif
454 break;
455
456 default:
457 mtx_unlock(lkp->lk_interlock);
458 panic("lockmgr: unknown locktype request %d",
459 flags & LK_TYPE_MASK);
460 /* NOTREACHED */
461 }
462 if ((lkp->lk_flags & LK_WAITDRAIN) &&
463 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
464 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
465 lkp->lk_flags &= ~LK_WAITDRAIN;
466 wakeup((void *)&lkp->lk_flags);
467 }
468 mtx_unlock(lkp->lk_interlock);
469 return (error);
470 }
471
472 static int
473 acquiredrain(struct lock *lkp, int extflags) {
474 int error;
475
476 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
477 return EBUSY;
478 }
479
480 error = apause(lkp, LK_ALL);
481 if (error == 0)
482 return 0;
483
484 while (lkp->lk_flags & LK_ALL) {
485 lkp->lk_flags |= LK_WAITDRAIN;
486 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
487 lkp->lk_wmesg,
488 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
489 if (error)
490 return error;
491 if (extflags & LK_SLEEPFAIL) {
492 return ENOLCK;
493 }
494 }
495 return 0;
496 }
497
498 /*
499 * Transfer any waiting processes from one lock to another.
500 */
501 void
502 transferlockers(from, to)
503 struct lock *from;
504 struct lock *to;
505 {
506
507 KASSERT(from != to, ("lock transfer to self"));
508 KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
509 if (from->lk_waitcount == 0)
510 return;
511 from->lk_newlock = to;
512 wakeup((void *)from);
513 msleep(&from->lk_newlock, NULL, from->lk_prio, "lkxfer", 0);
514 from->lk_newlock = NULL;
515 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
516 KASSERT(from->lk_waitcount == 0, ("active lock"));
517 }
518
519
520 /*
521 * Initialize a lock; required before use.
522 */
523 void
524 lockinit(lkp, prio, wmesg, timo, flags)
525 struct lock *lkp;
526 int prio;
527 const char *wmesg;
528 int timo;
529 int flags;
530 {
531 CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
532 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
533
534 if (lock_mtx_valid == 0) {
535 mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
536 lock_mtx_valid = 1;
537 }
538 /*
539 * XXX cleanup - make sure mtxpool is always initialized before
540 * this is ever called.
541 */
542 if (mtx_pool_valid) {
543 mtx_lock(&lock_mtx);
544 lkp->lk_interlock = mtx_pool_alloc();
545 mtx_unlock(&lock_mtx);
546 } else {
547 lkp->lk_interlock = &lock_mtx;
548 }
549 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
550 lkp->lk_sharecount = 0;
551 lkp->lk_waitcount = 0;
552 lkp->lk_exclusivecount = 0;
553 lkp->lk_prio = prio;
554 lkp->lk_wmesg = wmesg;
555 lkp->lk_timo = timo;
556 lkp->lk_lockholder = LK_NOPROC;
557 lkp->lk_newlock = NULL;
558 #ifdef DEBUG_LOCKS
559 lkp->lk_filename = "none";
560 lkp->lk_lockername = "never exclusive locked";
561 lkp->lk_lineno = 0;
562 lkp->lk_slockholder = LK_NOPROC;
563 lkp->lk_sfilename = "none";
564 lkp->lk_slockername = "never share locked";
565 lkp->lk_slineno = 0;
566 #endif
567 }
568
569 /*
570 * Destroy a lock.
571 */
572 void
573 lockdestroy(lkp)
574 struct lock *lkp;
575 {
576 CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
577 lkp, lkp->lk_wmesg);
578 }
579
580 /*
581 * Determine the status of a lock.
582 */
583 int
584 lockstatus(lkp, td)
585 struct lock *lkp;
586 struct thread *td;
587 {
588 int lock_type = 0;
589
590 mtx_lock(lkp->lk_interlock);
591 if (lkp->lk_exclusivecount != 0) {
592 if (td == NULL || lkp->lk_lockholder == td->td_proc->p_pid)
593 lock_type = LK_EXCLUSIVE;
594 else
595 lock_type = LK_EXCLOTHER;
596 } else if (lkp->lk_sharecount != 0)
597 lock_type = LK_SHARED;
598 mtx_unlock(lkp->lk_interlock);
599 return (lock_type);
600 }
601
602 /*
603 * Determine the number of holders of a lock.
604 */
605 int
606 lockcount(lkp)
607 struct lock *lkp;
608 {
609 int count;
610
611 mtx_lock(lkp->lk_interlock);
612 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
613 mtx_unlock(lkp->lk_interlock);
614 return (count);
615 }
616
617 /*
618 * Print out information about state of a lock. Used by VOP_PRINT
619 * routines to display status about contained locks.
620 */
621 void
622 lockmgr_printinfo(lkp)
623 struct lock *lkp;
624 {
625
626 if (lkp->lk_sharecount)
627 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
628 lkp->lk_sharecount);
629 else if (lkp->lk_flags & LK_HAVE_EXCL)
630 printf(" lock type %s: EXCL (count %d) by pid %d",
631 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
632 if (lkp->lk_waitcount > 0)
633 printf(" with %d pending", lkp->lk_waitcount);
634 }
Cache object: e72bbb6555d5ae84a9acb010fbfe56da
|