FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD$
42 */
43
44 #include "opt_lint.h"
45
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/lock.h>
49 #include <sys/systm.h>
50
51 /*
52 * Locking primitives implementation.
53 * Locks provide shared/exclusive sychronization.
54 */
55
56 #ifdef SIMPLELOCK_DEBUG
57 #define COUNT(p, x) if (p) (p)->p_locks += (x)
58 #else
59 #define COUNT(p, x)
60 #endif
61
62 #define LOCK_WAIT_TIME 100
63 #define LOCK_SAMPLE_WAIT 7
64
65 #if defined(DIAGNOSTIC)
66 #define LOCK_INLINE
67 #else
68 #define LOCK_INLINE __inline
69 #endif
70
71 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
72 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
73
74 static int acquire(struct lock *lkp, int extflags, int wanted);
75 static int apause(struct lock *lkp, int flags);
76 static int acquiredrain(struct lock *lkp, int extflags) ;
77
78 static LOCK_INLINE void
79 sharelock(struct lock *lkp, int incr) {
80 lkp->lk_flags |= LK_SHARE_NONZERO;
81 lkp->lk_sharecount += incr;
82 }
83
84 static LOCK_INLINE void
85 shareunlock(struct lock *lkp, int decr) {
86
87 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
88
89 if (lkp->lk_sharecount == decr) {
90 lkp->lk_flags &= ~LK_SHARE_NONZERO;
91 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
92 wakeup(lkp);
93 }
94 lkp->lk_sharecount = 0;
95 } else {
96 lkp->lk_sharecount -= decr;
97 }
98 }
99
100 /*
101 * This is the waitloop optimization, and note for this to work
102 * simple_lock and simple_unlock should be subroutines to avoid
103 * optimization troubles.
104 */
105 static int
106 apause(struct lock *lkp, int flags) {
107 int lock_wait;
108 lock_wait = LOCK_WAIT_TIME;
109 for (; lock_wait > 0; lock_wait--) {
110 int i;
111 if ((lkp->lk_flags & flags) == 0)
112 return 0;
113 simple_unlock(&lkp->lk_interlock);
114 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) {
115 if ((lkp->lk_flags & flags) == 0) {
116 simple_lock(&lkp->lk_interlock);
117 if ((lkp->lk_flags & flags) == 0)
118 return 0;
119 break;
120 }
121 }
122 }
123 return 1;
124 }
125
126 static int
127 acquire(struct lock *lkp, int extflags, int wanted) {
128 int s, error;
129
130 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
131 return EBUSY;
132 }
133
134 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
135 error = apause(lkp, wanted);
136 if (error == 0)
137 return 0;
138 }
139
140 s = splhigh();
141 while ((lkp->lk_flags & wanted) != 0) {
142 lkp->lk_flags |= LK_WAIT_NONZERO;
143 lkp->lk_waitcount++;
144 simple_unlock(&lkp->lk_interlock);
145 error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo);
146 simple_lock(&lkp->lk_interlock);
147 if (lkp->lk_waitcount == 1) {
148 lkp->lk_flags &= ~LK_WAIT_NONZERO;
149 lkp->lk_waitcount = 0;
150 } else {
151 lkp->lk_waitcount--;
152 }
153 if (error) {
154 splx(s);
155 return error;
156 }
157 if (extflags & LK_SLEEPFAIL) {
158 splx(s);
159 return ENOLCK;
160 }
161 }
162 splx(s);
163 return 0;
164 }
165
166 /*
167 * Set, change, or release a lock.
168 *
169 * Shared requests increment the shared count. Exclusive requests set the
170 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
171 * accepted shared locks and shared-to-exclusive upgrades to go away.
172 */
173 int
174 #ifndef DEBUG_LOCKS
175 lockmgr(lkp, flags, interlkp, p)
176 #else
177 debuglockmgr(lkp, flags, interlkp, p, name, file, line)
178 #endif
179 struct lock *lkp;
180 u_int flags;
181 struct simplelock *interlkp;
182 struct proc *p;
183 #ifdef DEBUG_LOCKS
184 const char *name; /* Name of lock function */
185 const char *file; /* Name of file call is from */
186 int line; /* Line number in file */
187 #endif
188 {
189 int error;
190 pid_t pid;
191 int extflags;
192
193 error = 0;
194 if (p == NULL)
195 pid = LK_KERNPROC;
196 else
197 pid = p->p_pid;
198
199 simple_lock(&lkp->lk_interlock);
200 if (flags & LK_INTERLOCK)
201 simple_unlock(interlkp);
202
203 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
204
205 switch (flags & LK_TYPE_MASK) {
206
207 case LK_SHARED:
208 if (lkp->lk_lockholder != pid) {
209 if ((lkp->lk_flags & LK_SHARE_NONZERO) != 0 &&
210 (flags & LK_CANRECURSE) != 0) {
211 sharelock(lkp, 1);
212 COUNT(p, 1);
213 break;
214 }
215 error = acquire(lkp, extflags,
216 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
217 if (error)
218 break;
219 sharelock(lkp, 1);
220 COUNT(p, 1);
221 break;
222 }
223 /*
224 * We hold an exclusive lock, so downgrade it to shared.
225 * An alternative would be to fail with EDEADLK.
226 */
227 sharelock(lkp, 1);
228 COUNT(p, 1);
229 /* fall into downgrade */
230
231 case LK_DOWNGRADE:
232 #if !defined(MAX_PERF)
233 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
234 panic("lockmgr: not holding exclusive lock");
235 #endif
236 sharelock(lkp, lkp->lk_exclusivecount);
237 lkp->lk_exclusivecount = 0;
238 lkp->lk_flags &= ~LK_HAVE_EXCL;
239 lkp->lk_lockholder = LK_NOPROC;
240 if (lkp->lk_waitcount)
241 wakeup((void *)lkp);
242 break;
243
244 case LK_EXCLUPGRADE:
245 /*
246 * If another process is ahead of us to get an upgrade,
247 * then we want to fail rather than have an intervening
248 * exclusive access.
249 */
250 if (lkp->lk_flags & LK_WANT_UPGRADE) {
251 shareunlock(lkp, 1);
252 COUNT(p, -1);
253 error = EBUSY;
254 break;
255 }
256 /* fall into normal upgrade */
257
258 case LK_UPGRADE:
259 /*
260 * Upgrade a shared lock to an exclusive one. If another
261 * shared lock has already requested an upgrade to an
262 * exclusive lock, our shared lock is released and an
263 * exclusive lock is requested (which will be granted
264 * after the upgrade). If we return an error, the file
265 * will always be unlocked.
266 */
267 #if !defined(MAX_PERF)
268 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
269 panic("lockmgr: upgrade exclusive lock");
270 #endif
271 shareunlock(lkp, 1);
272 COUNT(p, -1);
273 /*
274 * If we are just polling, check to see if we will block.
275 */
276 if ((extflags & LK_NOWAIT) &&
277 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
278 lkp->lk_sharecount > 1)) {
279 error = EBUSY;
280 break;
281 }
282 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
283 /*
284 * We are first shared lock to request an upgrade, so
285 * request upgrade and wait for the shared count to
286 * drop to zero, then take exclusive lock.
287 */
288 lkp->lk_flags |= LK_WANT_UPGRADE;
289 error = acquire(lkp, extflags, LK_SHARE_NONZERO);
290 lkp->lk_flags &= ~LK_WANT_UPGRADE;
291
292 if (error)
293 break;
294 lkp->lk_flags |= LK_HAVE_EXCL;
295 lkp->lk_lockholder = pid;
296 #if !defined(MAX_PERF)
297 if (lkp->lk_exclusivecount != 0)
298 panic("lockmgr: non-zero exclusive count");
299 #endif
300 lkp->lk_exclusivecount = 1;
301 #if defined(DEBUG_LOCKS)
302 lkp->lk_filename = file;
303 lkp->lk_lineno = line;
304 lkp->lk_lockername = name;
305 #endif
306 COUNT(p, 1);
307 break;
308 }
309 /*
310 * Someone else has requested upgrade. Release our shared
311 * lock, awaken upgrade requestor if we are the last shared
312 * lock, then request an exclusive lock.
313 */
314 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
315 LK_WAIT_NONZERO)
316 wakeup((void *)lkp);
317 /* fall into exclusive request */
318
319 case LK_EXCLUSIVE:
320 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
321 /*
322 * Recursive lock.
323 */
324 #if !defined(MAX_PERF)
325 if ((extflags & LK_CANRECURSE) == 0)
326 panic("lockmgr: locking against myself");
327 #endif
328 lkp->lk_exclusivecount++;
329 COUNT(p, 1);
330 break;
331 }
332 /*
333 * If we are just polling, check to see if we will sleep.
334 */
335 if ((extflags & LK_NOWAIT) &&
336 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
337 error = EBUSY;
338 break;
339 }
340 /*
341 * Try to acquire the want_exclusive flag.
342 */
343 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
344 if (error)
345 break;
346 lkp->lk_flags |= LK_WANT_EXCL;
347 /*
348 * Wait for shared locks and upgrades to finish.
349 */
350 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
351 lkp->lk_flags &= ~LK_WANT_EXCL;
352 if (error)
353 break;
354 lkp->lk_flags |= LK_HAVE_EXCL;
355 lkp->lk_lockholder = pid;
356 #if !defined(MAX_PERF)
357 if (lkp->lk_exclusivecount != 0)
358 panic("lockmgr: non-zero exclusive count");
359 #endif
360 lkp->lk_exclusivecount = 1;
361 #if defined(DEBUG_LOCKS)
362 lkp->lk_filename = file;
363 lkp->lk_lineno = line;
364 lkp->lk_lockername = name;
365 #endif
366 COUNT(p, 1);
367 break;
368
369 case LK_RELEASE:
370 if (lkp->lk_exclusivecount != 0) {
371 #if !defined(MAX_PERF)
372 if (pid != lkp->lk_lockholder)
373 panic("lockmgr: pid %d, not %s %d unlocking",
374 pid, "exclusive lock holder",
375 lkp->lk_lockholder);
376 #endif
377 COUNT(p, -1);
378 if (lkp->lk_exclusivecount == 1) {
379 lkp->lk_flags &= ~LK_HAVE_EXCL;
380 lkp->lk_lockholder = LK_NOPROC;
381 lkp->lk_exclusivecount = 0;
382 } else {
383 lkp->lk_exclusivecount--;
384 }
385 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
386 shareunlock(lkp, 1);
387 COUNT(p, -1);
388 }
389 if (lkp->lk_flags & LK_WAIT_NONZERO)
390 wakeup((void *)lkp);
391 break;
392
393 case LK_DRAIN:
394 /*
395 * Check that we do not already hold the lock, as it can
396 * never drain if we do. Unfortunately, we have no way to
397 * check for holding a shared lock, but at least we can
398 * check for an exclusive one.
399 */
400 #if !defined(MAX_PERF)
401 if (lkp->lk_lockholder == pid)
402 panic("lockmgr: draining against myself");
403 #endif
404
405 error = acquiredrain(lkp, extflags);
406 if (error)
407 break;
408 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
409 lkp->lk_lockholder = pid;
410 lkp->lk_exclusivecount = 1;
411 #if defined(DEBUG_LOCKS)
412 lkp->lk_filename = file;
413 lkp->lk_lineno = line;
414 lkp->lk_lockername = name;
415 #endif
416 COUNT(p, 1);
417 break;
418
419 default:
420 #if !defined(MAX_PERF)
421 simple_unlock(&lkp->lk_interlock);
422 panic("lockmgr: unknown locktype request %d",
423 flags & LK_TYPE_MASK);
424 #endif
425 /* NOTREACHED */
426 }
427 if ((lkp->lk_flags & LK_WAITDRAIN) &&
428 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
429 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
430 lkp->lk_flags &= ~LK_WAITDRAIN;
431 wakeup((void *)&lkp->lk_flags);
432 }
433 simple_unlock(&lkp->lk_interlock);
434 return (error);
435 }
436
437 static int
438 acquiredrain(struct lock *lkp, int extflags) {
439 int error;
440
441 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
442 return EBUSY;
443 }
444
445 error = apause(lkp, LK_ALL);
446 if (error == 0)
447 return 0;
448
449 while (lkp->lk_flags & LK_ALL) {
450 lkp->lk_flags |= LK_WAITDRAIN;
451 simple_unlock(&lkp->lk_interlock);
452 error = tsleep(&lkp->lk_flags, lkp->lk_prio,
453 lkp->lk_wmesg, lkp->lk_timo);
454 simple_lock(&lkp->lk_interlock);
455 if (error)
456 return error;
457 if (extflags & LK_SLEEPFAIL) {
458 return ENOLCK;
459 }
460 }
461 return 0;
462 }
463
464 /*
465 * Initialize a lock; required before use.
466 */
467 void
468 lockinit(lkp, prio, wmesg, timo, flags)
469 struct lock *lkp;
470 int prio;
471 char *wmesg;
472 int timo;
473 int flags;
474 {
475
476 simple_lock_init(&lkp->lk_interlock);
477 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
478 lkp->lk_sharecount = 0;
479 lkp->lk_waitcount = 0;
480 lkp->lk_exclusivecount = 0;
481 lkp->lk_prio = prio;
482 lkp->lk_wmesg = wmesg;
483 lkp->lk_timo = timo;
484 lkp->lk_lockholder = LK_NOPROC;
485 }
486
487 /*
488 * Determine the status of a lock.
489 */
490 int
491 lockstatus(lkp)
492 struct lock *lkp;
493 {
494 int lock_type = 0;
495
496 simple_lock(&lkp->lk_interlock);
497 if (lkp->lk_exclusivecount != 0)
498 lock_type = LK_EXCLUSIVE;
499 else if (lkp->lk_sharecount != 0)
500 lock_type = LK_SHARED;
501 simple_unlock(&lkp->lk_interlock);
502 return (lock_type);
503 }
504
505 /*
506 * Print out information about state of a lock. Used by VOP_PRINT
507 * routines to display status about contained locks.
508 */
509 void
510 lockmgr_printinfo(lkp)
511 struct lock *lkp;
512 {
513
514 if (lkp->lk_sharecount)
515 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
516 lkp->lk_sharecount);
517 else if (lkp->lk_flags & LK_HAVE_EXCL)
518 printf(" lock type %s: EXCL (count %d) by pid %d",
519 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
520 if (lkp->lk_waitcount > 0)
521 printf(" with %d pending", lkp->lk_waitcount);
522 }
523
524 #if defined(SIMPLELOCK_DEBUG) && (NCPUS == 1 || defined(COMPILING_LINT))
525 #include <sys/kernel.h>
526 #include <sys/sysctl.h>
527
528 static int lockpausetime = 0;
529 SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, "");
530
531 static int simplelockrecurse;
532
533 /*
534 * Simple lock functions so that the debugger can see from whence
535 * they are being called.
536 */
537 void
538 simple_lock_init(alp)
539 struct simplelock *alp;
540 {
541
542 alp->lock_data = 0;
543 }
544
545 void
546 _simple_lock(alp, id, l)
547 struct simplelock *alp;
548 const char *id;
549 int l;
550 {
551
552 if (simplelockrecurse)
553 return;
554 if (alp->lock_data == 1) {
555 if (lockpausetime == -1)
556 panic("%s:%d: simple_lock: lock held", id, l);
557 printf("%s:%d: simple_lock: lock held\n", id, l);
558 if (lockpausetime == 1) {
559 Debugger("simple_lock");
560 /*BACKTRACE(curproc); */
561 } else if (lockpausetime > 1) {
562 printf("%s:%d: simple_lock: lock held...", id, l);
563 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
564 lockpausetime * hz);
565 printf(" continuing\n");
566 }
567 }
568 alp->lock_data = 1;
569 if (curproc)
570 curproc->p_simple_locks++;
571 }
572
573 int
574 _simple_lock_try(alp, id, l)
575 struct simplelock *alp;
576 const char *id;
577 int l;
578 {
579
580 if (alp->lock_data)
581 return (0);
582 if (simplelockrecurse)
583 return (1);
584 alp->lock_data = 1;
585 if (curproc)
586 curproc->p_simple_locks++;
587 return (1);
588 }
589
590 void
591 _simple_unlock(alp, id, l)
592 struct simplelock *alp;
593 const char *id;
594 int l;
595 {
596
597 if (simplelockrecurse)
598 return;
599 if (alp->lock_data == 0) {
600 if (lockpausetime == -1)
601 panic("%s:%d: simple_unlock: lock not held", id, l);
602 printf("%s:%d: simple_unlock: lock not held\n", id, l);
603 if (lockpausetime == 1) {
604 Debugger("simple_unlock");
605 /* BACKTRACE(curproc); */
606 } else if (lockpausetime > 1) {
607 printf("%s:%d: simple_unlock: lock not held...", id, l);
608 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
609 lockpausetime * hz);
610 printf(" continuing\n");
611 }
612 }
613 alp->lock_data = 0;
614 if (curproc)
615 curproc->p_simple_locks--;
616 }
617 #elif defined(SIMPLELOCK_DEBUG)
618 #error "SIMPLELOCK_DEBUG is not compatible with SMP!"
619 #endif /* SIMPLELOCK_DEBUG && NCPUS == 1 */
Cache object: fdcabcf6110f4134044474f40ce177e8
|