FreeBSD/Linux Kernel Cross Reference
sys/kern/lock.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: lock.c,v $
29 * Revision 2.11 93/01/14 17:34:55 danner
30 * Added ANSI function prototypes.
31 * [92/12/30 dbg]
32 * 64bit cleanup.
33 * [92/12/01 af]
34 *
35 * Revision 2.10 92/08/03 17:38:01 jfriedl
36 * removed silly prototypes
37 * [92/08/02 jfriedl]
38 *
39 * Revision 2.9 92/05/21 17:14:27 jfriedl
40 * Added void to functions that yet needed it.
41 * [92/05/16 jfriedl]
42 *
43 * Revision 2.8 92/01/23 15:20:56 rpd
44 * Fixed lock_done and lock_read_to_write to only wakeup
45 * if the number of readers is zero (from Sanjay Nadkarni).
46 * Fixed db_show_all_slocks.
47 * [92/01/20 rpd]
48 *
49 * Revision 2.7 91/07/01 08:25:03 jsb
50 * Implemented db_show_all_slocks.
51 * [91/06/29 16:52:53 jsb]
52 *
53 * Revision 2.6 91/05/18 14:32:07 rpd
54 * Added simple_locks_info, to keep track of which locks are held.
55 * [91/05/02 rpd]
56 * Added check_simple_locks.
57 * [91/03/31 rpd]
58 *
59 * Revision 2.5 91/05/14 16:43:40 mrt
60 * Correcting copyright
61 *
62 * Revision 2.4 91/02/05 17:27:31 mrt
63 * Changed to new Mach copyright
64 * [91/02/01 16:14:31 mrt]
65 *
66 * Revision 2.3 90/06/02 14:54:56 rpd
67 * Allow the lock to be taken in simple_lock_try.
68 * [90/03/26 22:09:13 rpd]
69 *
70 * Revision 2.2 90/01/11 11:43:18 dbg
71 * Upgrade to mainline: use simple_lock_addr when calling
72 * thread_sleep.
73 * [89/12/11 dbg]
74 *
75 * Revision 2.1 89/08/03 15:45:34 rwd
76 * Created.
77 *
78 * Revision 2.3 88/09/25 22:14:45 rpd
79 * Changed explicit panics in sanity-checking simple locking calls
80 * to assertions.
81 * [88/09/12 23:03:22 rpd]
82 *
83 * Revision 2.2 88/07/20 16:35:47 rpd
84 * Add simple-locking sanity-checking code.
85 *
86 * 23-Jan-88 Richard Sanzi (sanzi) at Carnegie-Mellon University
87 * On a UNIPROCESSOR, set lock_wait_time = 0. There is no reason
88 * for a uni to spin on a complex lock.
89 *
90 * 18-Nov-87 Avadis Tevanian (avie) at Carnegie-Mellon University
91 * Eliminated previous history.
92 */
93 /*
94 * File: kern/lock.c
95 * Author: Avadis Tevanian, Jr., Michael Wayne Young
96 * Date: 1985
97 *
98 * Locking primitives implementation
99 */
100
101 #include <cpus.h>
102 #include <mach_kdb.h>
103
104 #include <kern/lock.h>
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #if MACH_KDB
108 #include <machine/db_machdep.h>
109 #include <ddb/db_sym.h>
110 #endif
111
112
113 #if NCPUS > 1
114
115 /*
116 * Module: lock
117 * Function:
118 * Provide reader/writer sychronization.
119 * Implementation:
120 * Simple interlock on a bit. Readers first interlock,
121 * increment the reader count, then let go. Writers hold
122 * the interlock (thus preventing further readers), and
123 * wait for already-accepted readers to go away.
124 */
125
126 /*
127 * The simple-lock routines are the primitives out of which
128 * the lock package is built. The implementation is left
129 * to the machine-dependent code.
130 */
131
132 #ifdef notdef
133 /*
134 * A sample implementation of simple locks.
135 * assumes:
136 * boolean_t test_and_set(boolean_t *)
137 * indivisibly sets the boolean to TRUE
138 * and returns its old value
139 * and that setting a boolean to FALSE is indivisible.
140 */
141 /*
142 * simple_lock_init initializes a simple lock. A simple lock
143 * may only be used for exclusive locks.
144 */
145
146 void simple_lock_init(simple_lock_t l)
147 {
148 *(boolean_t *)l = FALSE;
149 }
150
151 void simple_lock(simple_lock_t l)
152 {
153 while (test_and_set((boolean_t *)l))
154 continue;
155 }
156
157 void simple_unlock(simple_lock_t l)
158 {
159 *(boolean_t *)l = FALSE;
160 }
161
162 boolean_t simple_lock_try(simple_lock_t l)
163 {
164 return (!test_and_set((boolean_t *)l));
165 }
166 #endif /* notdef */
167 #endif /* NCPUS > 1 */
168
169 #if NCPUS > 1
170 int lock_wait_time = 100;
171 #else /* NCPUS > 1 */
172
173 /*
174 * It is silly to spin on a uni-processor as if we
175 * thought something magical would happen to the
176 * want_write bit while we are executing.
177 */
178 int lock_wait_time = 0;
179 #endif /* NCPUS > 1 */
180
181 #if MACH_SLOCKS && NCPUS == 1
182 /*
183 * This code does not protect simple_locks_taken and simple_locks_info.
184 * It works despite the fact that interrupt code does use simple locks.
185 * This is because interrupts use locks in a stack-like manner.
186 * Each interrupt releases all the locks it acquires, so the data
187 * structures end up in the same state after the interrupt as before.
188 * The only precaution necessary is that simple_locks_taken be
189 * incremented first and decremented last, so that interrupt handlers
190 * don't over-write active slots in simple_locks_info.
191 */
192
193 unsigned int simple_locks_taken = 0;
194
195 #define NSLINFO 1000 /* maximum number of locks held */
196
197 struct simple_locks_info {
198 simple_lock_t l;
199 unsigned int ra;
200 } simple_locks_info[NSLINFO];
201
202 void check_simple_locks(void)
203 {
204 assert(simple_locks_taken == 0);
205 }
206
207 /* Need simple lock sanity checking code if simple locks are being
208 compiled in, and we are compiling for a uniprocessor. */
209
210 void simple_lock_init(
211 simple_lock_t l)
212 {
213 l->lock_data = 0;
214 }
215
216 void simple_lock(
217 simple_lock_t l)
218 {
219 struct simple_locks_info *info;
220
221 assert(l->lock_data == 0);
222
223 l->lock_data = 1;
224
225 info = &simple_locks_info[simple_locks_taken++];
226 info->l = l;
227 /* XXX we want our return address, if possible */
228 #ifdef i386
229 info->ra = *((unsigned int *)&l - 1);
230 #endif /* i386 */
231 }
232
233 boolean_t simple_lock_try(
234 simple_lock_t l)
235 {
236 struct simple_locks_info *info;
237
238 if (l->lock_data != 0)
239 return FALSE;
240
241 l->lock_data = 1;
242
243 info = &simple_locks_info[simple_locks_taken++];
244 info->l = l;
245 /* XXX we want our return address, if possible */
246 #ifdef i386
247 info->ra = *((unsigned int *)&l - 1);
248 #endif /* i386 */
249
250 return TRUE;
251 }
252
253 void simple_unlock(
254 simple_lock_t l)
255 {
256 assert(l->lock_data != 0);
257
258 l->lock_data = 0;
259
260 if (simple_locks_info[simple_locks_taken-1].l != l) {
261 unsigned int i = simple_locks_taken;
262
263 /* out-of-order unlocking */
264
265 do
266 if (i == 0)
267 panic("simple_unlock");
268 while (simple_locks_info[--i].l != l);
269
270 simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
271 }
272 simple_locks_taken--;
273 }
274
275 #endif /* MACH_SLOCKS && NCPUS == 1 */
276
277 /*
278 * Routine: lock_init
279 * Function:
280 * Initialize a lock; required before use.
281 * Note that clients declare the "struct lock"
282 * variables and then initialize them, rather
283 * than getting a new one from this module.
284 */
285 void lock_init(
286 lock_t l,
287 boolean_t can_sleep)
288 {
289 bzero((char *)l, sizeof(lock_data_t));
290 simple_lock_init(&l->interlock);
291 l->want_write = FALSE;
292 l->want_upgrade = FALSE;
293 l->read_count = 0;
294 l->can_sleep = can_sleep;
295 l->thread = (struct thread *)-1; /* XXX */
296 l->recursion_depth = 0;
297 }
298
299 void lock_sleepable(
300 lock_t l,
301 boolean_t can_sleep)
302 {
303 simple_lock(&l->interlock);
304 l->can_sleep = can_sleep;
305 simple_unlock(&l->interlock);
306 }
307
308
309 /*
310 * Sleep locks. These use the same data structure and algorithm
311 * as the spin locks, but the process sleeps while it is waiting
312 * for the lock. These work on uniprocessor systems.
313 */
314
315 void lock_write(
316 register lock_t l)
317 {
318 register int i;
319
320 check_simple_locks();
321 simple_lock(&l->interlock);
322
323 if (l->thread == current_thread()) {
324 /*
325 * Recursive lock.
326 */
327 l->recursion_depth++;
328 simple_unlock(&l->interlock);
329 return;
330 }
331
332 /*
333 * Try to acquire the want_write bit.
334 */
335 while (l->want_write) {
336 if ((i = lock_wait_time) > 0) {
337 simple_unlock(&l->interlock);
338 while (--i > 0 && l->want_write)
339 continue;
340 simple_lock(&l->interlock);
341 }
342
343 if (l->can_sleep && l->want_write) {
344 l->waiting = TRUE;
345 thread_sleep(l,
346 simple_lock_addr(l->interlock), FALSE);
347 simple_lock(&l->interlock);
348 }
349 }
350 l->want_write = TRUE;
351
352 /* Wait for readers (and upgrades) to finish */
353
354 while ((l->read_count != 0) || l->want_upgrade) {
355 if ((i = lock_wait_time) > 0) {
356 simple_unlock(&l->interlock);
357 while (--i > 0 && (l->read_count != 0 ||
358 l->want_upgrade))
359 continue;
360 simple_lock(&l->interlock);
361 }
362
363 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
364 l->waiting = TRUE;
365 thread_sleep(l,
366 simple_lock_addr(l->interlock), FALSE);
367 simple_lock(&l->interlock);
368 }
369 }
370 simple_unlock(&l->interlock);
371 }
372
373 void lock_done(
374 register lock_t l)
375 {
376 simple_lock(&l->interlock);
377
378 if (l->read_count != 0)
379 l->read_count--;
380 else
381 if (l->recursion_depth != 0)
382 l->recursion_depth--;
383 else
384 if (l->want_upgrade)
385 l->want_upgrade = FALSE;
386 else
387 l->want_write = FALSE;
388
389 /*
390 * There is no reason to wakeup a waiting thread
391 * if the read-count is non-zero. Consider:
392 * we must be dropping a read lock
393 * threads are waiting only if one wants a write lock
394 * if there are still readers, they can't proceed
395 */
396
397 if (l->waiting && (l->read_count == 0)) {
398 l->waiting = FALSE;
399 thread_wakeup(l);
400 }
401
402 simple_unlock(&l->interlock);
403 }
404
405 void lock_read(
406 register lock_t l)
407 {
408 register int i;
409
410 check_simple_locks();
411 simple_lock(&l->interlock);
412
413 if (l->thread == current_thread()) {
414 /*
415 * Recursive lock.
416 */
417 l->read_count++;
418 simple_unlock(&l->interlock);
419 return;
420 }
421
422 while (l->want_write || l->want_upgrade) {
423 if ((i = lock_wait_time) > 0) {
424 simple_unlock(&l->interlock);
425 while (--i > 0 && (l->want_write || l->want_upgrade))
426 continue;
427 simple_lock(&l->interlock);
428 }
429
430 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
431 l->waiting = TRUE;
432 thread_sleep(l,
433 simple_lock_addr(l->interlock), FALSE);
434 simple_lock(&l->interlock);
435 }
436 }
437
438 l->read_count++;
439 simple_unlock(&l->interlock);
440 }
441
442 /*
443 * Routine: lock_read_to_write
444 * Function:
445 * Improves a read-only lock to one with
446 * write permission. If another reader has
447 * already requested an upgrade to a write lock,
448 * no lock is held upon return.
449 *
450 * Returns TRUE if the upgrade *failed*.
451 */
452 boolean_t lock_read_to_write(
453 register lock_t l)
454 {
455 register int i;
456
457 check_simple_locks();
458 simple_lock(&l->interlock);
459
460 l->read_count--;
461
462 if (l->thread == current_thread()) {
463 /*
464 * Recursive lock.
465 */
466 l->recursion_depth++;
467 simple_unlock(&l->interlock);
468 return(FALSE);
469 }
470
471 if (l->want_upgrade) {
472 /*
473 * Someone else has requested upgrade.
474 * Since we've released a read lock, wake
475 * him up.
476 */
477 if (l->waiting && (l->read_count == 0)) {
478 l->waiting = FALSE;
479 thread_wakeup(l);
480 }
481
482 simple_unlock(&l->interlock);
483 return TRUE;
484 }
485
486 l->want_upgrade = TRUE;
487
488 while (l->read_count != 0) {
489 if ((i = lock_wait_time) > 0) {
490 simple_unlock(&l->interlock);
491 while (--i > 0 && l->read_count != 0)
492 continue;
493 simple_lock(&l->interlock);
494 }
495
496 if (l->can_sleep && l->read_count != 0) {
497 l->waiting = TRUE;
498 thread_sleep(l,
499 simple_lock_addr(l->interlock), FALSE);
500 simple_lock(&l->interlock);
501 }
502 }
503
504 simple_unlock(&l->interlock);
505 return FALSE;
506 }
507
508 void lock_write_to_read(
509 register lock_t l)
510 {
511 simple_lock(&l->interlock);
512
513 l->read_count++;
514 if (l->recursion_depth != 0)
515 l->recursion_depth--;
516 else
517 if (l->want_upgrade)
518 l->want_upgrade = FALSE;
519 else
520 l->want_write = FALSE;
521
522 if (l->waiting) {
523 l->waiting = FALSE;
524 thread_wakeup(l);
525 }
526
527 simple_unlock(&l->interlock);
528 }
529
530
531 /*
532 * Routine: lock_try_write
533 * Function:
534 * Tries to get a write lock.
535 *
536 * Returns FALSE if the lock is not held on return.
537 */
538
539 boolean_t lock_try_write(
540 register lock_t l)
541 {
542 simple_lock(&l->interlock);
543
544 if (l->thread == current_thread()) {
545 /*
546 * Recursive lock
547 */
548 l->recursion_depth++;
549 simple_unlock(&l->interlock);
550 return TRUE;
551 }
552
553 if (l->want_write || l->want_upgrade || l->read_count) {
554 /*
555 * Can't get lock.
556 */
557 simple_unlock(&l->interlock);
558 return FALSE;
559 }
560
561 /*
562 * Have lock.
563 */
564
565 l->want_write = TRUE;
566 simple_unlock(&l->interlock);
567 return TRUE;
568 }
569
570 /*
571 * Routine: lock_try_read
572 * Function:
573 * Tries to get a read lock.
574 *
575 * Returns FALSE if the lock is not held on return.
576 */
577
578 boolean_t lock_try_read(
579 register lock_t l)
580 {
581 simple_lock(&l->interlock);
582
583 if (l->thread == current_thread()) {
584 /*
585 * Recursive lock
586 */
587 l->read_count++;
588 simple_unlock(&l->interlock);
589 return TRUE;
590 }
591
592 if (l->want_write || l->want_upgrade) {
593 simple_unlock(&l->interlock);
594 return FALSE;
595 }
596
597 l->read_count++;
598 simple_unlock(&l->interlock);
599 return TRUE;
600 }
601
602 /*
603 * Routine: lock_try_read_to_write
604 * Function:
605 * Improves a read-only lock to one with
606 * write permission. If another reader has
607 * already requested an upgrade to a write lock,
608 * the read lock is still held upon return.
609 *
610 * Returns FALSE if the upgrade *failed*.
611 */
612 boolean_t lock_try_read_to_write(
613 register lock_t l)
614 {
615 check_simple_locks();
616 simple_lock(&l->interlock);
617
618 if (l->thread == current_thread()) {
619 /*
620 * Recursive lock
621 */
622 l->read_count--;
623 l->recursion_depth++;
624 simple_unlock(&l->interlock);
625 return TRUE;
626 }
627
628 if (l->want_upgrade) {
629 simple_unlock(&l->interlock);
630 return FALSE;
631 }
632 l->want_upgrade = TRUE;
633 l->read_count--;
634
635 while (l->read_count != 0) {
636 l->waiting = TRUE;
637 thread_sleep(l,
638 simple_lock_addr(l->interlock), FALSE);
639 simple_lock(&l->interlock);
640 }
641
642 simple_unlock(&l->interlock);
643 return TRUE;
644 }
645
646 /*
647 * Allow a process that has a lock for write to acquire it
648 * recursively (for read, write, or update).
649 */
650 void lock_set_recursive(
651 lock_t l)
652 {
653 simple_lock(&l->interlock);
654 if (!l->want_write) {
655 panic("lock_set_recursive: don't have write lock");
656 }
657 l->thread = current_thread();
658 simple_unlock(&l->interlock);
659 }
660
661 /*
662 * Prevent a lock from being re-acquired.
663 */
664 void lock_clear_recursive(
665 lock_t l)
666 {
667 simple_lock(&l->interlock);
668 if (l->thread != current_thread()) {
669 panic("lock_clear_recursive: wrong thread");
670 }
671 if (l->recursion_depth == 0)
672 l->thread = (struct thread *)-1; /* XXX */
673 simple_unlock(&l->interlock);
674 }
675
676 #if MACH_KDB
677 #if MACH_SLOCKS && NCPUS == 1
678 void db_show_all_slocks(void)
679 {
680 int i;
681 struct simple_locks_info *info;
682 simple_lock_t l;
683
684 for (i = 0; i < simple_locks_taken; i++) {
685 info = &simple_locks_info[i];
686 db_printf("%d: ", i);
687 db_printsym(info->l, DB_STGY_ANY);
688 #if i386
689 db_printf(" locked by ");
690 db_printsym(info->ra, DB_STGY_PROC);
691 #endif
692 db_printf("\n");
693 }
694 }
695 #else /* MACH_SLOCKS && NCPUS == 1 */
696 void db_show_all_slocks(void)
697 {
698 db_printf("simple lock info not available\n");
699 }
700 #endif /* MACH_SLOCKS && NCPUS == 1 */
701 #endif /* MACH_KDB */
Cache object: 913c0693e2b108001e678c5b3cfd6407
|