FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/6.0/sys/kern/kern_lockf.c 144278 2005-03-29 08:13:01Z phk $");
37
38 #include "opt_debug_lockf.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53
54 /*
55 * This variable controls the maximum number of processes that will
56 * be checked in doing deadlock detection.
57 */
58 static int maxlockdepth = MAXDEPTH;
59
60 #ifdef LOCKF_DEBUG
61 #include <sys/sysctl.h>
62
63 #include <ufs/ufs/quota.h>
64 #include <ufs/ufs/inode.h>
65
66
67 static int lockf_debug = 0;
68 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
69 #endif
70
71 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
72
73 #define NOLOCKF (struct lockf *)0
74 #define SELF 0x1
75 #define OTHERS 0x2
76 static int lf_clearlock(struct lockf *);
77 static int lf_findoverlap(struct lockf *,
78 struct lockf *, int, struct lockf ***, struct lockf **);
79 static struct lockf *
80 lf_getblock(struct lockf *);
81 static int lf_getlock(struct lockf *, struct flock *);
82 static int lf_setlock(struct lockf *);
83 static void lf_split(struct lockf *, struct lockf *);
84 static void lf_wakelock(struct lockf *);
85 #ifdef LOCKF_DEBUG
86 static void lf_print(char *, struct lockf *);
87 static void lf_printlist(char *, struct lockf *);
88 #endif
89
90 /*
91 * Advisory record locking support
92 */
93 int
94 lf_advlock(ap, head, size)
95 struct vop_advlock_args /* {
96 struct vnode *a_vp;
97 caddr_t a_id;
98 int a_op;
99 struct flock *a_fl;
100 int a_flags;
101 } */ *ap;
102 struct lockf **head;
103 u_quad_t size;
104 {
105 register struct flock *fl = ap->a_fl;
106 register struct lockf *lock;
107 off_t start, end, oadd;
108 int error;
109
110 mtx_lock(&Giant);
111 /*
112 * Convert the flock structure into a start and end.
113 */
114 switch (fl->l_whence) {
115
116 case SEEK_SET:
117 case SEEK_CUR:
118 /*
119 * Caller is responsible for adding any necessary offset
120 * when SEEK_CUR is used.
121 */
122 start = fl->l_start;
123 break;
124
125 case SEEK_END:
126 if (size > OFF_MAX ||
127 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) {
128 error = EOVERFLOW;
129 goto out;
130 }
131 start = size + fl->l_start;
132 break;
133
134 default:
135 error = EINVAL;
136 goto out;
137 }
138 if (start < 0) {
139 error = EINVAL;
140 goto out;
141 }
142 if (fl->l_len < 0) {
143 if (start == 0) {
144 error = EINVAL;
145 goto out;
146 }
147 end = start - 1;
148 start += fl->l_len;
149 if (start < 0) {
150 error = EINVAL;
151 goto out;
152 }
153 } else if (fl->l_len == 0)
154 end = -1;
155 else {
156 oadd = fl->l_len - 1;
157 if (oadd > OFF_MAX - start) {
158 error = EOVERFLOW;
159 goto out;
160 }
161 end = start + oadd;
162 }
163 /*
164 * Avoid the common case of unlocking when inode has no locks.
165 */
166 if (*head == (struct lockf *)0) {
167 if (ap->a_op != F_SETLK) {
168 fl->l_type = F_UNLCK;
169 error = 0;
170 goto out;
171 }
172 }
173 /*
174 * Create the lockf structure
175 */
176 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
177 lock->lf_start = start;
178 lock->lf_end = end;
179 lock->lf_id = ap->a_id;
180 /*
181 * XXX The problem is that VTOI is ufs specific, so it will
182 * break LOCKF_DEBUG for all other FS's other than UFS because
183 * it casts the vnode->data ptr to struct inode *.
184 */
185 /* lock->lf_inode = VTOI(ap->a_vp); */
186 lock->lf_inode = (struct inode *)0;
187 lock->lf_type = fl->l_type;
188 lock->lf_head = head;
189 lock->lf_next = (struct lockf *)0;
190 TAILQ_INIT(&lock->lf_blkhd);
191 lock->lf_flags = ap->a_flags;
192 /*
193 * Do the requested operation.
194 */
195 switch(ap->a_op) {
196 case F_SETLK:
197 error = lf_setlock(lock);
198 goto out;
199
200 case F_UNLCK:
201 error = lf_clearlock(lock);
202 FREE(lock, M_LOCKF);
203 goto out;
204
205 case F_GETLK:
206 error = lf_getlock(lock, fl);
207 FREE(lock, M_LOCKF);
208 goto out;
209
210 default:
211 free(lock, M_LOCKF);
212 error = EINVAL;
213 goto out;
214 }
215 /* NOTREACHED */
216 out:
217 mtx_unlock(&Giant);
218 return (error);
219 }
220
221 /*
222 * Set a byte-range lock.
223 */
224 static int
225 lf_setlock(lock)
226 register struct lockf *lock;
227 {
228 register struct lockf *block;
229 struct lockf **head = lock->lf_head;
230 struct lockf **prev, *overlap, *ltmp;
231 static char lockstr[] = "lockf";
232 int ovcase, priority, needtolink, error;
233
234 #ifdef LOCKF_DEBUG
235 if (lockf_debug & 1)
236 lf_print("lf_setlock", lock);
237 #endif /* LOCKF_DEBUG */
238
239 /*
240 * Set the priority
241 */
242 priority = PLOCK;
243 if (lock->lf_type == F_WRLCK)
244 priority += 4;
245 priority |= PCATCH;
246 /*
247 * Scan lock list for this file looking for locks that would block us.
248 */
249 while ((block = lf_getblock(lock))) {
250 /*
251 * Free the structure and return if nonblocking.
252 */
253 if ((lock->lf_flags & F_WAIT) == 0) {
254 FREE(lock, M_LOCKF);
255 return (EAGAIN);
256 }
257 /*
258 * We are blocked. Since flock style locks cover
259 * the whole file, there is no chance for deadlock.
260 * For byte-range locks we must check for deadlock.
261 *
262 * Deadlock detection is done by looking through the
263 * wait channels to see if there are any cycles that
264 * involve us. MAXDEPTH is set just to make sure we
265 * do not go off into neverland.
266 */
267 if ((lock->lf_flags & F_POSIX) &&
268 (block->lf_flags & F_POSIX)) {
269 register struct proc *wproc;
270 struct thread *td;
271 register struct lockf *waitblock;
272 int i = 0;
273
274 /* The block is waiting on something */
275 /* XXXKSE this is not complete under threads */
276 wproc = (struct proc *)block->lf_id;
277 mtx_lock_spin(&sched_lock);
278 FOREACH_THREAD_IN_PROC(wproc, td) {
279 while (td->td_wchan &&
280 (td->td_wmesg == lockstr) &&
281 (i++ < maxlockdepth)) {
282 waitblock = (struct lockf *)td->td_wchan;
283 /* Get the owner of the blocking lock */
284 waitblock = waitblock->lf_next;
285 if ((waitblock->lf_flags & F_POSIX) == 0)
286 break;
287 wproc = (struct proc *)waitblock->lf_id;
288 if (wproc == (struct proc *)lock->lf_id) {
289 mtx_unlock_spin(&sched_lock);
290 free(lock, M_LOCKF);
291 return (EDEADLK);
292 }
293 }
294 }
295 mtx_unlock_spin(&sched_lock);
296 }
297 /*
298 * For flock type locks, we must first remove
299 * any shared locks that we hold before we sleep
300 * waiting for an exclusive lock.
301 */
302 if ((lock->lf_flags & F_FLOCK) &&
303 lock->lf_type == F_WRLCK) {
304 lock->lf_type = F_UNLCK;
305 (void) lf_clearlock(lock);
306 lock->lf_type = F_WRLCK;
307 }
308 /*
309 * Add our lock to the blocked list and sleep until we're free.
310 * Remember who blocked us (for deadlock detection).
311 */
312 lock->lf_next = block;
313 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
314 #ifdef LOCKF_DEBUG
315 if (lockf_debug & 1) {
316 lf_print("lf_setlock: blocking on", block);
317 lf_printlist("lf_setlock", block);
318 }
319 #endif /* LOCKF_DEBUG */
320 error = tsleep(lock, priority, lockstr, 0);
321 /*
322 * We may have been awakened by a signal and/or by a
323 * debugger continuing us (in which cases we must remove
324 * ourselves from the blocked list) and/or by another
325 * process releasing a lock (in which case we have
326 * already been removed from the blocked list and our
327 * lf_next field set to NOLOCKF).
328 */
329 if (lock->lf_next) {
330 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
331 lock->lf_next = NOLOCKF;
332 }
333 if (error) {
334 free(lock, M_LOCKF);
335 return (error);
336 }
337 }
338 /*
339 * No blocks!! Add the lock. Note that we will
340 * downgrade or upgrade any overlapping locks this
341 * process already owns.
342 *
343 * Skip over locks owned by other processes.
344 * Handle any locks that overlap and are owned by ourselves.
345 */
346 prev = head;
347 block = *head;
348 needtolink = 1;
349 for (;;) {
350 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
351 if (ovcase)
352 block = overlap->lf_next;
353 /*
354 * Six cases:
355 * 0) no overlap
356 * 1) overlap == lock
357 * 2) overlap contains lock
358 * 3) lock contains overlap
359 * 4) overlap starts before lock
360 * 5) overlap ends after lock
361 */
362 switch (ovcase) {
363 case 0: /* no overlap */
364 if (needtolink) {
365 *prev = lock;
366 lock->lf_next = overlap;
367 }
368 break;
369
370 case 1: /* overlap == lock */
371 /*
372 * If downgrading lock, others may be
373 * able to acquire it.
374 */
375 if (lock->lf_type == F_RDLCK &&
376 overlap->lf_type == F_WRLCK)
377 lf_wakelock(overlap);
378 overlap->lf_type = lock->lf_type;
379 FREE(lock, M_LOCKF);
380 lock = overlap; /* for debug output below */
381 break;
382
383 case 2: /* overlap contains lock */
384 /*
385 * Check for common starting point and different types.
386 */
387 if (overlap->lf_type == lock->lf_type) {
388 free(lock, M_LOCKF);
389 lock = overlap; /* for debug output below */
390 break;
391 }
392 if (overlap->lf_start == lock->lf_start) {
393 *prev = lock;
394 lock->lf_next = overlap;
395 overlap->lf_start = lock->lf_end + 1;
396 } else
397 lf_split(overlap, lock);
398 lf_wakelock(overlap);
399 break;
400
401 case 3: /* lock contains overlap */
402 /*
403 * If downgrading lock, others may be able to
404 * acquire it, otherwise take the list.
405 */
406 if (lock->lf_type == F_RDLCK &&
407 overlap->lf_type == F_WRLCK) {
408 lf_wakelock(overlap);
409 } else {
410 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
411 ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
412 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
413 lf_block);
414 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
415 ltmp, lf_block);
416 ltmp->lf_next = lock;
417 }
418 }
419 /*
420 * Add the new lock if necessary and delete the overlap.
421 */
422 if (needtolink) {
423 *prev = lock;
424 lock->lf_next = overlap->lf_next;
425 prev = &lock->lf_next;
426 needtolink = 0;
427 } else
428 *prev = overlap->lf_next;
429 free(overlap, M_LOCKF);
430 continue;
431
432 case 4: /* overlap starts before lock */
433 /*
434 * Add lock after overlap on the list.
435 */
436 lock->lf_next = overlap->lf_next;
437 overlap->lf_next = lock;
438 overlap->lf_end = lock->lf_start - 1;
439 prev = &lock->lf_next;
440 lf_wakelock(overlap);
441 needtolink = 0;
442 continue;
443
444 case 5: /* overlap ends after lock */
445 /*
446 * Add the new lock before overlap.
447 */
448 if (needtolink) {
449 *prev = lock;
450 lock->lf_next = overlap;
451 }
452 overlap->lf_start = lock->lf_end + 1;
453 lf_wakelock(overlap);
454 break;
455 }
456 break;
457 }
458 #ifdef LOCKF_DEBUG
459 if (lockf_debug & 1) {
460 lf_print("lf_setlock: got the lock", lock);
461 lf_printlist("lf_setlock", lock);
462 }
463 #endif /* LOCKF_DEBUG */
464 return (0);
465 }
466
467 /*
468 * Remove a byte-range lock on an inode.
469 *
470 * Generally, find the lock (or an overlap to that lock)
471 * and remove it (or shrink it), then wakeup anyone we can.
472 */
473 static int
474 lf_clearlock(unlock)
475 register struct lockf *unlock;
476 {
477 struct lockf **head = unlock->lf_head;
478 register struct lockf *lf = *head;
479 struct lockf *overlap, **prev;
480 int ovcase;
481
482 if (lf == NOLOCKF)
483 return (0);
484 #ifdef LOCKF_DEBUG
485 if (unlock->lf_type != F_UNLCK)
486 panic("lf_clearlock: bad type");
487 if (lockf_debug & 1)
488 lf_print("lf_clearlock", unlock);
489 #endif /* LOCKF_DEBUG */
490 prev = head;
491 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
492 /*
493 * Wakeup the list of locks to be retried.
494 */
495 lf_wakelock(overlap);
496
497 switch (ovcase) {
498
499 case 1: /* overlap == lock */
500 *prev = overlap->lf_next;
501 FREE(overlap, M_LOCKF);
502 break;
503
504 case 2: /* overlap contains lock: split it */
505 if (overlap->lf_start == unlock->lf_start) {
506 overlap->lf_start = unlock->lf_end + 1;
507 break;
508 }
509 lf_split(overlap, unlock);
510 overlap->lf_next = unlock->lf_next;
511 break;
512
513 case 3: /* lock contains overlap */
514 *prev = overlap->lf_next;
515 lf = overlap->lf_next;
516 free(overlap, M_LOCKF);
517 continue;
518
519 case 4: /* overlap starts before lock */
520 overlap->lf_end = unlock->lf_start - 1;
521 prev = &overlap->lf_next;
522 lf = overlap->lf_next;
523 continue;
524
525 case 5: /* overlap ends after lock */
526 overlap->lf_start = unlock->lf_end + 1;
527 break;
528 }
529 break;
530 }
531 #ifdef LOCKF_DEBUG
532 if (lockf_debug & 1)
533 lf_printlist("lf_clearlock", unlock);
534 #endif /* LOCKF_DEBUG */
535 return (0);
536 }
537
538 /*
539 * Check whether there is a blocking lock,
540 * and if so return its process identifier.
541 */
542 static int
543 lf_getlock(lock, fl)
544 register struct lockf *lock;
545 register struct flock *fl;
546 {
547 register struct lockf *block;
548
549 #ifdef LOCKF_DEBUG
550 if (lockf_debug & 1)
551 lf_print("lf_getlock", lock);
552 #endif /* LOCKF_DEBUG */
553
554 if ((block = lf_getblock(lock))) {
555 fl->l_type = block->lf_type;
556 fl->l_whence = SEEK_SET;
557 fl->l_start = block->lf_start;
558 if (block->lf_end == -1)
559 fl->l_len = 0;
560 else
561 fl->l_len = block->lf_end - block->lf_start + 1;
562 if (block->lf_flags & F_POSIX)
563 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
564 else
565 fl->l_pid = -1;
566 } else {
567 fl->l_type = F_UNLCK;
568 }
569 return (0);
570 }
571
572 /*
573 * Walk the list of locks for an inode and
574 * return the first blocking lock.
575 */
576 static struct lockf *
577 lf_getblock(lock)
578 register struct lockf *lock;
579 {
580 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
581 int ovcase;
582
583 prev = lock->lf_head;
584 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
585 /*
586 * We've found an overlap, see if it blocks us
587 */
588 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
589 return (overlap);
590 /*
591 * Nope, point to the next one on the list and
592 * see if it blocks us
593 */
594 lf = overlap->lf_next;
595 }
596 return (NOLOCKF);
597 }
598
599 /*
600 * Walk the list of locks for an inode to
601 * find an overlapping lock (if any).
602 *
603 * NOTE: this returns only the FIRST overlapping lock. There
604 * may be more than one.
605 */
606 static int
607 lf_findoverlap(lf, lock, type, prev, overlap)
608 register struct lockf *lf;
609 struct lockf *lock;
610 int type;
611 struct lockf ***prev;
612 struct lockf **overlap;
613 {
614 off_t start, end;
615
616 *overlap = lf;
617 if (lf == NOLOCKF)
618 return (0);
619 #ifdef LOCKF_DEBUG
620 if (lockf_debug & 2)
621 lf_print("lf_findoverlap: looking for overlap in", lock);
622 #endif /* LOCKF_DEBUG */
623 start = lock->lf_start;
624 end = lock->lf_end;
625 while (lf != NOLOCKF) {
626 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
627 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
628 *prev = &lf->lf_next;
629 *overlap = lf = lf->lf_next;
630 continue;
631 }
632 #ifdef LOCKF_DEBUG
633 if (lockf_debug & 2)
634 lf_print("\tchecking", lf);
635 #endif /* LOCKF_DEBUG */
636 /*
637 * OK, check for overlap
638 *
639 * Six cases:
640 * 0) no overlap
641 * 1) overlap == lock
642 * 2) overlap contains lock
643 * 3) lock contains overlap
644 * 4) overlap starts before lock
645 * 5) overlap ends after lock
646 */
647 if ((lf->lf_end != -1 && start > lf->lf_end) ||
648 (end != -1 && lf->lf_start > end)) {
649 /* Case 0 */
650 #ifdef LOCKF_DEBUG
651 if (lockf_debug & 2)
652 printf("no overlap\n");
653 #endif /* LOCKF_DEBUG */
654 if ((type & SELF) && end != -1 && lf->lf_start > end)
655 return (0);
656 *prev = &lf->lf_next;
657 *overlap = lf = lf->lf_next;
658 continue;
659 }
660 if ((lf->lf_start == start) && (lf->lf_end == end)) {
661 /* Case 1 */
662 #ifdef LOCKF_DEBUG
663 if (lockf_debug & 2)
664 printf("overlap == lock\n");
665 #endif /* LOCKF_DEBUG */
666 return (1);
667 }
668 if ((lf->lf_start <= start) &&
669 (end != -1) &&
670 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
671 /* Case 2 */
672 #ifdef LOCKF_DEBUG
673 if (lockf_debug & 2)
674 printf("overlap contains lock\n");
675 #endif /* LOCKF_DEBUG */
676 return (2);
677 }
678 if (start <= lf->lf_start &&
679 (end == -1 ||
680 (lf->lf_end != -1 && end >= lf->lf_end))) {
681 /* Case 3 */
682 #ifdef LOCKF_DEBUG
683 if (lockf_debug & 2)
684 printf("lock contains overlap\n");
685 #endif /* LOCKF_DEBUG */
686 return (3);
687 }
688 if ((lf->lf_start < start) &&
689 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
690 /* Case 4 */
691 #ifdef LOCKF_DEBUG
692 if (lockf_debug & 2)
693 printf("overlap starts before lock\n");
694 #endif /* LOCKF_DEBUG */
695 return (4);
696 }
697 if ((lf->lf_start > start) &&
698 (end != -1) &&
699 ((lf->lf_end > end) || (lf->lf_end == -1))) {
700 /* Case 5 */
701 #ifdef LOCKF_DEBUG
702 if (lockf_debug & 2)
703 printf("overlap ends after lock\n");
704 #endif /* LOCKF_DEBUG */
705 return (5);
706 }
707 panic("lf_findoverlap: default");
708 }
709 return (0);
710 }
711
712 /*
713 * Split a lock and a contained region into
714 * two or three locks as necessary.
715 */
716 static void
717 lf_split(lock1, lock2)
718 register struct lockf *lock1;
719 register struct lockf *lock2;
720 {
721 register struct lockf *splitlock;
722
723 #ifdef LOCKF_DEBUG
724 if (lockf_debug & 2) {
725 lf_print("lf_split", lock1);
726 lf_print("splitting from", lock2);
727 }
728 #endif /* LOCKF_DEBUG */
729 /*
730 * Check to see if spliting into only two pieces.
731 */
732 if (lock1->lf_start == lock2->lf_start) {
733 lock1->lf_start = lock2->lf_end + 1;
734 lock2->lf_next = lock1;
735 return;
736 }
737 if (lock1->lf_end == lock2->lf_end) {
738 lock1->lf_end = lock2->lf_start - 1;
739 lock2->lf_next = lock1->lf_next;
740 lock1->lf_next = lock2;
741 return;
742 }
743 /*
744 * Make a new lock consisting of the last part of
745 * the encompassing lock
746 */
747 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
748 bcopy(lock1, splitlock, sizeof *splitlock);
749 splitlock->lf_start = lock2->lf_end + 1;
750 TAILQ_INIT(&splitlock->lf_blkhd);
751 lock1->lf_end = lock2->lf_start - 1;
752 /*
753 * OK, now link it in
754 */
755 splitlock->lf_next = lock1->lf_next;
756 lock2->lf_next = splitlock;
757 lock1->lf_next = lock2;
758 }
759
760 /*
761 * Wakeup a blocklist
762 */
763 static void
764 lf_wakelock(listhead)
765 struct lockf *listhead;
766 {
767 register struct lockf *wakelock;
768
769 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
770 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
771 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
772 wakelock->lf_next = NOLOCKF;
773 #ifdef LOCKF_DEBUG
774 if (lockf_debug & 2)
775 lf_print("lf_wakelock: awakening", wakelock);
776 #endif /* LOCKF_DEBUG */
777 wakeup(wakelock);
778 }
779 }
780
781 #ifdef LOCKF_DEBUG
782 /*
783 * Print out a lock.
784 */
785 static void
786 lf_print(tag, lock)
787 char *tag;
788 register struct lockf *lock;
789 {
790
791 printf("%s: lock %p for ", tag, (void *)lock);
792 if (lock->lf_flags & F_POSIX)
793 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
794 else
795 printf("id %p", (void *)lock->lf_id);
796 if (lock->lf_inode != (struct inode *)0)
797 printf(" in ino %ju on dev <%s>, %s, start %jd, end %jd",
798 (uintmax_t)lock->lf_inode->i_number,
799 devtoname(lock->lf_inode->i_dev),
800 lock->lf_type == F_RDLCK ? "shared" :
801 lock->lf_type == F_WRLCK ? "exclusive" :
802 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
803 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
804 else
805 printf(" %s, start %jd, end %jd",
806 lock->lf_type == F_RDLCK ? "shared" :
807 lock->lf_type == F_WRLCK ? "exclusive" :
808 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
809 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
810 if (!TAILQ_EMPTY(&lock->lf_blkhd))
811 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
812 else
813 printf("\n");
814 }
815
816 static void
817 lf_printlist(tag, lock)
818 char *tag;
819 struct lockf *lock;
820 {
821 register struct lockf *lf, *blk;
822
823 if (lock->lf_inode == (struct inode *)0)
824 return;
825
826 printf("%s: Lock list for ino %ju on dev <%s>:\n",
827 tag, (uintmax_t)lock->lf_inode->i_number,
828 devtoname(lock->lf_inode->i_dev));
829 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
830 printf("\tlock %p for ",(void *)lf);
831 if (lf->lf_flags & F_POSIX)
832 printf("proc %ld",
833 (long)((struct proc *)lf->lf_id)->p_pid);
834 else
835 printf("id %p", (void *)lf->lf_id);
836 printf(", %s, start %jd, end %jd",
837 lf->lf_type == F_RDLCK ? "shared" :
838 lf->lf_type == F_WRLCK ? "exclusive" :
839 lf->lf_type == F_UNLCK ? "unlock" :
840 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
841 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
842 printf("\n\t\tlock request %p for ", (void *)blk);
843 if (blk->lf_flags & F_POSIX)
844 printf("proc %ld",
845 (long)((struct proc *)blk->lf_id)->p_pid);
846 else
847 printf("id %p", (void *)blk->lf_id);
848 printf(", %s, start %jd, end %jd",
849 blk->lf_type == F_RDLCK ? "shared" :
850 blk->lf_type == F_WRLCK ? "exclusive" :
851 blk->lf_type == F_UNLCK ? "unlock" :
852 "unknown", (intmax_t)blk->lf_start,
853 (intmax_t)blk->lf_end);
854 if (!TAILQ_EMPTY(&blk->lf_blkhd))
855 panic("lf_printlist: bad list");
856 }
857 printf("\n");
858 }
859 }
860 #endif /* LOCKF_DEBUG */
Cache object: d14f485b9e13465dccfc2fb8bc8b83de
|