FreeBSD/Linux Kernel Cross Reference
sys/kern/lock.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: lock.c,v $
29 * Revision 2.12 93/11/17 17:13:54 dbg
30 * Added includes for missing functions.
31 * [93/10/27 dbg]
32 *
33 * Revision 2.11 93/01/14 17:34:55 danner
34 * Added ANSI function prototypes.
35 * [92/12/30 dbg]
36 * 64bit cleanup.
37 * [92/12/01 af]
38 *
39 * Revision 2.10 92/08/03 17:38:01 jfriedl
40 * removed silly prototypes
41 * [92/08/02 jfriedl]
42 *
43 * Revision 2.9 92/05/21 17:14:27 jfriedl
44 * Added void to functions that yet needed it.
45 * [92/05/16 jfriedl]
46 *
47 * Revision 2.8 92/01/23 15:20:56 rpd
48 * Fixed lock_done and lock_read_to_write to only wakeup
49 * if the number of readers is zero (from Sanjay Nadkarni).
50 * Fixed db_show_all_slocks.
51 * [92/01/20 rpd]
52 *
53 * Revision 2.7 91/07/01 08:25:03 jsb
54 * Implemented db_show_all_slocks.
55 * [91/06/29 16:52:53 jsb]
56 *
57 * Revision 2.6 91/05/18 14:32:07 rpd
58 * Added simple_locks_info, to keep track of which locks are held.
59 * [91/05/02 rpd]
60 * Added check_simple_locks.
61 * [91/03/31 rpd]
62 *
63 * Revision 2.5 91/05/14 16:43:40 mrt
64 * Correcting copyright
65 *
66 * Revision 2.4 91/02/05 17:27:31 mrt
67 * Changed to new Mach copyright
68 * [91/02/01 16:14:31 mrt]
69 *
70 * Revision 2.3 90/06/02 14:54:56 rpd
71 * Allow the lock to be taken in simple_lock_try.
72 * [90/03/26 22:09:13 rpd]
73 *
74 * Revision 2.2 90/01/11 11:43:18 dbg
75 * Upgrade to mainline: use simple_lock_addr when calling
76 * thread_sleep.
77 * [89/12/11 dbg]
78 *
79 * Revision 2.1 89/08/03 15:45:34 rwd
80 * Created.
81 *
82 * Revision 2.3 88/09/25 22:14:45 rpd
83 * Changed explicit panics in sanity-checking simple locking calls
84 * to assertions.
85 * [88/09/12 23:03:22 rpd]
86 *
87 * Revision 2.2 88/07/20 16:35:47 rpd
88 * Add simple-locking sanity-checking code.
89 *
90 * 23-Jan-88 Richard Sanzi (sanzi) at Carnegie-Mellon University
91 * On a UNIPROCESSOR, set lock_wait_time = 0. There is no reason
92 * for a uni to spin on a complex lock.
93 *
94 * 18-Nov-87 Avadis Tevanian (avie) at Carnegie-Mellon University
95 * Eliminated previous history.
96 */
97 /*
98 * File: kern/lock.c
99 * Author: Avadis Tevanian, Jr., Michael Wayne Young
100 * Date: 1985
101 *
102 * Locking primitives implementation
103 */
104
105 #include <cpus.h>
106 #include <mach_kdb.h>
107
108 #include <kern/lock.h>
109 #include <kern/memory.h>
110 #include <kern/thread.h>
111 #include <kern/sched_prim.h>
112 #if MACH_KDB
113 #include <machine/db_machdep.h>
114 #include <ddb/db_output.h>
115 #include <ddb/db_sym.h>
116 #endif
117
118
119 #if NCPUS > 1
120
121 /*
122 * Module: lock
123 * Function:
124 * Provide reader/writer sychronization.
125 * Implementation:
126 * Simple interlock on a bit. Readers first interlock,
127 * increment the reader count, then let go. Writers hold
128 * the interlock (thus preventing further readers), and
129 * wait for already-accepted readers to go away.
130 */
131
132 /*
133 * The simple-lock routines are the primitives out of which
134 * the lock package is built. The implementation is left
135 * to the machine-dependent code.
136 */
137
138 #ifdef notdef
139 /*
140 * A sample implementation of simple locks.
141 * assumes:
142 * boolean_t test_and_set(boolean_t *)
143 * indivisibly sets the boolean to TRUE
144 * and returns its old value
145 * and that setting a boolean to FALSE is indivisible.
146 */
147 /*
148 * simple_lock_init initializes a simple lock. A simple lock
149 * may only be used for exclusive locks.
150 */
151
152 void simple_lock_init(simple_lock_t l)
153 {
154 *(boolean_t *)l = FALSE;
155 }
156
157 void simple_lock(simple_lock_t l)
158 {
159 while (test_and_set((boolean_t *)l))
160 continue;
161 }
162
163 void simple_unlock(simple_lock_t l)
164 {
165 *(boolean_t *)l = FALSE;
166 }
167
168 boolean_t simple_lock_try(simple_lock_t l)
169 {
170 return (!test_and_set((boolean_t *)l));
171 }
172 #endif /* notdef */
173 #endif /* NCPUS > 1 */
174
175 #if NCPUS > 1
176 int lock_wait_time = 100;
177 #else /* NCPUS > 1 */
178
179 /*
180 * It is silly to spin on a uni-processor as if we
181 * thought something magical would happen to the
182 * want_write bit while we are executing.
183 */
184 int lock_wait_time = 0;
185 #endif /* NCPUS > 1 */
186
187 #if MACH_SLOCKS && NCPUS == 1
188 /*
189 * This code does not protect simple_locks_taken and simple_locks_info.
190 * It works despite the fact that interrupt code does use simple locks.
191 * This is because interrupts use locks in a stack-like manner.
192 * Each interrupt releases all the locks it acquires, so the data
193 * structures end up in the same state after the interrupt as before.
194 * The only precaution necessary is that simple_locks_taken be
195 * incremented first and decremented last, so that interrupt handlers
196 * don't over-write active slots in simple_locks_info.
197 */
198
199 unsigned int simple_locks_taken = 0;
200
201 #define NSLINFO 1000 /* maximum number of locks held */
202
203 struct simple_locks_info {
204 simple_lock_t l;
205 unsigned int ra;
206 } simple_locks_info[NSLINFO];
207
208 void check_simple_locks(void)
209 {
210 assert(simple_locks_taken == 0);
211 }
212
213 /* Need simple lock sanity checking code if simple locks are being
214 compiled in, and we are compiling for a uniprocessor. */
215
216 void simple_lock_init(
217 simple_lock_t l)
218 {
219 l->lock_data = 0;
220 }
221
222 void simple_lock(
223 simple_lock_t l)
224 {
225 struct simple_locks_info *info;
226
227 assert(l->lock_data == 0);
228
229 l->lock_data = 1;
230
231 info = &simple_locks_info[simple_locks_taken++];
232 info->l = l;
233 /* XXX we want our return address, if possible */
234 #ifdef i386
235 info->ra = *((unsigned int *)&l - 1);
236 #endif /* i386 */
237 }
238
239 boolean_t simple_lock_try(
240 simple_lock_t l)
241 {
242 struct simple_locks_info *info;
243
244 if (l->lock_data != 0)
245 return FALSE;
246
247 l->lock_data = 1;
248
249 info = &simple_locks_info[simple_locks_taken++];
250 info->l = l;
251 /* XXX we want our return address, if possible */
252 #ifdef i386
253 info->ra = *((unsigned int *)&l - 1);
254 #endif /* i386 */
255
256 return TRUE;
257 }
258
259 void simple_unlock(
260 simple_lock_t l)
261 {
262 assert(l->lock_data != 0);
263
264 l->lock_data = 0;
265
266 if (simple_locks_info[simple_locks_taken-1].l != l) {
267 unsigned int i = simple_locks_taken;
268
269 /* out-of-order unlocking */
270
271 do
272 if (i == 0)
273 panic("simple_unlock");
274 while (simple_locks_info[--i].l != l);
275
276 simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
277 }
278 simple_locks_taken--;
279 }
280
281 #endif /* MACH_SLOCKS && NCPUS == 1 */
282
283 /*
284 * Routine: lock_init
285 * Function:
286 * Initialize a lock; required before use.
287 * Note that clients declare the "struct lock"
288 * variables and then initialize them, rather
289 * than getting a new one from this module.
290 */
291 void lock_init(
292 lock_t l,
293 boolean_t can_sleep)
294 {
295 bzero(l, sizeof(lock_data_t));
296 simple_lock_init(&l->interlock);
297 l->want_write = FALSE;
298 l->want_upgrade = FALSE;
299 l->read_count = 0;
300 l->can_sleep = can_sleep;
301 l->thread = (struct thread *)-1; /* XXX */
302 l->recursion_depth = 0;
303 }
304
305 void lock_sleepable(
306 lock_t l,
307 boolean_t can_sleep)
308 {
309 simple_lock(&l->interlock);
310 l->can_sleep = can_sleep;
311 simple_unlock(&l->interlock);
312 }
313
314
315 /*
316 * Sleep locks. These use the same data structure and algorithm
317 * as the spin locks, but the process sleeps while it is waiting
318 * for the lock. These work on uniprocessor systems.
319 */
320
321 void lock_write(
322 register lock_t l)
323 {
324 register int i;
325
326 check_simple_locks();
327 simple_lock(&l->interlock);
328
329 if (l->thread == current_thread()) {
330 /*
331 * Recursive lock.
332 */
333 l->recursion_depth++;
334 simple_unlock(&l->interlock);
335 return;
336 }
337
338 /*
339 * Try to acquire the want_write bit.
340 */
341 while (l->want_write) {
342 if ((i = lock_wait_time) > 0) {
343 simple_unlock(&l->interlock);
344 while (--i > 0 && l->want_write)
345 continue;
346 simple_lock(&l->interlock);
347 }
348
349 if (l->can_sleep && l->want_write) {
350 l->waiting = TRUE;
351 thread_sleep(l,
352 simple_lock_addr(l->interlock), FALSE);
353 simple_lock(&l->interlock);
354 }
355 }
356 l->want_write = TRUE;
357
358 /* Wait for readers (and upgrades) to finish */
359
360 while ((l->read_count != 0) || l->want_upgrade) {
361 if ((i = lock_wait_time) > 0) {
362 simple_unlock(&l->interlock);
363 while (--i > 0 && (l->read_count != 0 ||
364 l->want_upgrade))
365 continue;
366 simple_lock(&l->interlock);
367 }
368
369 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
370 l->waiting = TRUE;
371 thread_sleep(l,
372 simple_lock_addr(l->interlock), FALSE);
373 simple_lock(&l->interlock);
374 }
375 }
376 simple_unlock(&l->interlock);
377 }
378
379 void lock_done(
380 register lock_t l)
381 {
382 simple_lock(&l->interlock);
383
384 if (l->read_count != 0)
385 l->read_count--;
386 else
387 if (l->recursion_depth != 0)
388 l->recursion_depth--;
389 else
390 if (l->want_upgrade)
391 l->want_upgrade = FALSE;
392 else
393 l->want_write = FALSE;
394
395 /*
396 * There is no reason to wakeup a waiting thread
397 * if the read-count is non-zero. Consider:
398 * we must be dropping a read lock
399 * threads are waiting only if one wants a write lock
400 * if there are still readers, they can't proceed
401 */
402
403 if (l->waiting && (l->read_count == 0)) {
404 l->waiting = FALSE;
405 thread_wakeup(l);
406 }
407
408 simple_unlock(&l->interlock);
409 }
410
411 void lock_read(
412 register lock_t l)
413 {
414 register int i;
415
416 check_simple_locks();
417 simple_lock(&l->interlock);
418
419 if (l->thread == current_thread()) {
420 /*
421 * Recursive lock.
422 */
423 l->read_count++;
424 simple_unlock(&l->interlock);
425 return;
426 }
427
428 while (l->want_write || l->want_upgrade) {
429 if ((i = lock_wait_time) > 0) {
430 simple_unlock(&l->interlock);
431 while (--i > 0 && (l->want_write || l->want_upgrade))
432 continue;
433 simple_lock(&l->interlock);
434 }
435
436 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
437 l->waiting = TRUE;
438 thread_sleep(l,
439 simple_lock_addr(l->interlock), FALSE);
440 simple_lock(&l->interlock);
441 }
442 }
443
444 l->read_count++;
445 simple_unlock(&l->interlock);
446 }
447
448 /*
449 * Routine: lock_read_to_write
450 * Function:
451 * Improves a read-only lock to one with
452 * write permission. If another reader has
453 * already requested an upgrade to a write lock,
454 * no lock is held upon return.
455 *
456 * Returns TRUE if the upgrade *failed*.
457 */
458 boolean_t lock_read_to_write(
459 register lock_t l)
460 {
461 register int i;
462
463 check_simple_locks();
464 simple_lock(&l->interlock);
465
466 l->read_count--;
467
468 if (l->thread == current_thread()) {
469 /*
470 * Recursive lock.
471 */
472 l->recursion_depth++;
473 simple_unlock(&l->interlock);
474 return(FALSE);
475 }
476
477 if (l->want_upgrade) {
478 /*
479 * Someone else has requested upgrade.
480 * Since we've released a read lock, wake
481 * him up.
482 */
483 if (l->waiting && (l->read_count == 0)) {
484 l->waiting = FALSE;
485 thread_wakeup(l);
486 }
487
488 simple_unlock(&l->interlock);
489 return TRUE;
490 }
491
492 l->want_upgrade = TRUE;
493
494 while (l->read_count != 0) {
495 if ((i = lock_wait_time) > 0) {
496 simple_unlock(&l->interlock);
497 while (--i > 0 && l->read_count != 0)
498 continue;
499 simple_lock(&l->interlock);
500 }
501
502 if (l->can_sleep && l->read_count != 0) {
503 l->waiting = TRUE;
504 thread_sleep(l,
505 simple_lock_addr(l->interlock), FALSE);
506 simple_lock(&l->interlock);
507 }
508 }
509
510 simple_unlock(&l->interlock);
511 return FALSE;
512 }
513
514 void lock_write_to_read(
515 register lock_t l)
516 {
517 simple_lock(&l->interlock);
518
519 l->read_count++;
520 if (l->recursion_depth != 0)
521 l->recursion_depth--;
522 else
523 if (l->want_upgrade)
524 l->want_upgrade = FALSE;
525 else
526 l->want_write = FALSE;
527
528 if (l->waiting) {
529 l->waiting = FALSE;
530 thread_wakeup(l);
531 }
532
533 simple_unlock(&l->interlock);
534 }
535
536
537 /*
538 * Routine: lock_try_write
539 * Function:
540 * Tries to get a write lock.
541 *
542 * Returns FALSE if the lock is not held on return.
543 */
544
545 boolean_t lock_try_write(
546 register lock_t l)
547 {
548 simple_lock(&l->interlock);
549
550 if (l->thread == current_thread()) {
551 /*
552 * Recursive lock
553 */
554 l->recursion_depth++;
555 simple_unlock(&l->interlock);
556 return TRUE;
557 }
558
559 if (l->want_write || l->want_upgrade || l->read_count) {
560 /*
561 * Can't get lock.
562 */
563 simple_unlock(&l->interlock);
564 return FALSE;
565 }
566
567 /*
568 * Have lock.
569 */
570
571 l->want_write = TRUE;
572 simple_unlock(&l->interlock);
573 return TRUE;
574 }
575
576 /*
577 * Routine: lock_try_read
578 * Function:
579 * Tries to get a read lock.
580 *
581 * Returns FALSE if the lock is not held on return.
582 */
583
584 boolean_t lock_try_read(
585 register lock_t l)
586 {
587 simple_lock(&l->interlock);
588
589 if (l->thread == current_thread()) {
590 /*
591 * Recursive lock
592 */
593 l->read_count++;
594 simple_unlock(&l->interlock);
595 return TRUE;
596 }
597
598 if (l->want_write || l->want_upgrade) {
599 simple_unlock(&l->interlock);
600 return FALSE;
601 }
602
603 l->read_count++;
604 simple_unlock(&l->interlock);
605 return TRUE;
606 }
607
608 /*
609 * Routine: lock_try_read_to_write
610 * Function:
611 * Improves a read-only lock to one with
612 * write permission. If another reader has
613 * already requested an upgrade to a write lock,
614 * the read lock is still held upon return.
615 *
616 * Returns FALSE if the upgrade *failed*.
617 */
618 boolean_t lock_try_read_to_write(
619 register lock_t l)
620 {
621 check_simple_locks();
622 simple_lock(&l->interlock);
623
624 if (l->thread == current_thread()) {
625 /*
626 * Recursive lock
627 */
628 l->read_count--;
629 l->recursion_depth++;
630 simple_unlock(&l->interlock);
631 return TRUE;
632 }
633
634 if (l->want_upgrade) {
635 simple_unlock(&l->interlock);
636 return FALSE;
637 }
638 l->want_upgrade = TRUE;
639 l->read_count--;
640
641 while (l->read_count != 0) {
642 l->waiting = TRUE;
643 thread_sleep(l,
644 simple_lock_addr(l->interlock), FALSE);
645 simple_lock(&l->interlock);
646 }
647
648 simple_unlock(&l->interlock);
649 return TRUE;
650 }
651
652 /*
653 * Allow a process that has a lock for write to acquire it
654 * recursively (for read, write, or update).
655 */
656 void lock_set_recursive(
657 lock_t l)
658 {
659 simple_lock(&l->interlock);
660 if (!l->want_write) {
661 panic("lock_set_recursive: don't have write lock");
662 }
663 l->thread = current_thread();
664 simple_unlock(&l->interlock);
665 }
666
667 /*
668 * Prevent a lock from being re-acquired.
669 */
670 void lock_clear_recursive(
671 lock_t l)
672 {
673 simple_lock(&l->interlock);
674 if (l->thread != current_thread()) {
675 panic("lock_clear_recursive: wrong thread");
676 }
677 if (l->recursion_depth == 0)
678 l->thread = (struct thread *)-1; /* XXX */
679 simple_unlock(&l->interlock);
680 }
681
682 #if MACH_KDB
683 #if MACH_SLOCKS && NCPUS == 1
684 void db_show_all_slocks(void)
685 {
686 int i;
687 struct simple_locks_info *info;
688 simple_lock_t l;
689
690 for (i = 0; i < simple_locks_taken; i++) {
691 info = &simple_locks_info[i];
692 db_printf("%d: ", i);
693 db_printsym(info->l, DB_STGY_ANY);
694 #if i386
695 db_printf(" locked by ");
696 db_printsym(info->ra, DB_STGY_PROC);
697 #endif
698 db_printf("\n");
699 }
700 }
701 #else /* MACH_SLOCKS && NCPUS == 1 */
702 void db_show_all_slocks(void)
703 {
704 db_printf("simple lock info not available\n");
705 }
706 #endif /* MACH_SLOCKS && NCPUS == 1 */
707 #endif /* MACH_KDB */
Cache object: 10d5661ab94a82e55851d996f5ab5faf
|