FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c
1 /*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
4 *
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
36 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
37 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.37 2007/11/01 22:48:16 dillon Exp $
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/unistd.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/resourcevar.h>
50
51 #include <sys/lockf.h>
52 #include <machine/limits.h> /* for LLONG_MAX */
53 #include <machine/stdarg.h>
54
55 #include <sys/spinlock2.h>
56
57 #ifdef INVARIANTS
58 int lf_global_counter = 0;
59 #endif
60
61 #ifdef LOCKF_DEBUG
62 int lf_print_ranges = 0;
63
64 static void _lf_print_lock(const struct lockf *);
65 static void _lf_printf(const char *, ...);
66
67 #define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
68 #define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
69 #else
70 #define lf_print_lock(lock)
71 #define lf_printf(ctl, args...)
72 #endif
73
74 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
75
76 static void lf_wakeup(struct lockf *, off_t, off_t);
77 static struct lockf_range *lf_alloc_range(void);
78 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
79 off_t, off_t);
80 static void lf_insert(struct lockf_range_list *list,
81 struct lockf_range *elm,
82 struct lockf_range *insert_point);
83 static void lf_destroy_range(struct lockf_range *);
84
85 static int lf_setlock(struct lockf *, struct proc *, int, int,
86 off_t, off_t);
87 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
88 int, int, off_t, off_t);
89
90 static int lf_count_change(struct proc *, int);
91
92 /*
93 * Return TRUE (non-zero) if the type and posix flags match.
94 */
95 static __inline
96 int
97 lf_match(struct lockf_range *range, int type, int flags)
98 {
99 if (range->lf_type != type)
100 return(0);
101 if ((range->lf_flags ^ flags) & F_POSIX)
102 return(0);
103 return(1);
104 }
105
106 /*
107 * Check whether range and [start, end] overlap.
108 */
109 static __inline
110 int
111 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
112 {
113 if (range->lf_start >= start && range->lf_start <= end)
114 return(1);
115 else if (start >= range->lf_start && start <= range->lf_end)
116 return(1);
117 else
118 return(0);
119 }
120
121
122 /*
123 * Change the POSIX lock accounting for the given process.
124 */
125 void
126 lf_count_adjust(struct proc *p, int increase)
127 {
128 struct uidinfo *uip;
129
130 KKASSERT(p != NULL);
131
132 uip = p->p_ucred->cr_uidinfo;
133 spin_lock(&uip->ui_lock);
134
135 if (increase)
136 uip->ui_posixlocks += p->p_numposixlocks;
137 else
138 uip->ui_posixlocks -= p->p_numposixlocks;
139
140 KASSERT(uip->ui_posixlocks >= 0,
141 ("Negative number of POSIX locks held by %s user: %d.",
142 increase ? "new" : "old", uip->ui_posixlocks));
143 spin_unlock(&uip->ui_lock);
144 }
145
146 static int
147 lf_count_change(struct proc *owner, int diff)
148 {
149 struct uidinfo *uip;
150 int max, ret;
151
152 /* we might actually not have a process context */
153 if (owner == NULL)
154 return(0);
155
156 uip = owner->p_ucred->cr_uidinfo;
157
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160
161 spin_lock(&uip->ui_lock);
162 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
163 uip->ui_posixlocks >= max ) {
164 ret = 1;
165 } else {
166 uip->ui_posixlocks += diff;
167 owner->p_numposixlocks += diff;
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
174 ret = 0;
175 }
176 spin_unlock(&uip->ui_lock);
177 return ret;
178 }
179
180 /*
181 * Advisory record locking support
182 */
183 int
184 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
185 {
186 struct flock *fl = ap->a_fl;
187 struct proc *owner;
188 off_t start, end;
189 int type, flags, error;
190 lwkt_token_t token;
191
192 /*
193 * Convert the flock structure into a start and end.
194 */
195 switch (fl->l_whence) {
196 case SEEK_SET:
197 case SEEK_CUR:
198 /*
199 * Caller is responsible for adding any necessary offset
200 * when SEEK_CUR is used.
201 */
202 start = fl->l_start;
203 break;
204
205 case SEEK_END:
206 start = size + fl->l_start;
207 break;
208
209 default:
210 return(EINVAL);
211 }
212
213 flags = ap->a_flags;
214 if (start < 0)
215 return(EINVAL);
216 if (fl->l_len == 0) {
217 flags |= F_NOEND;
218 end = LLONG_MAX;
219 } else if (fl->l_len < 0) {
220 return(EINVAL);
221 } else {
222 end = start + fl->l_len - 1;
223 if (end < start)
224 return(EINVAL);
225 }
226
227 type = fl->l_type;
228 /*
229 * This isn't really correct for flock-style locks,
230 * but the current handling is somewhat broken anyway.
231 */
232 owner = (struct proc *)ap->a_id;
233
234 /*
235 * Do the requested operation.
236 */
237 token = lwkt_getpooltoken(lock);
238
239 if (lock->init_done == 0) {
240 TAILQ_INIT(&lock->lf_range);
241 TAILQ_INIT(&lock->lf_blocked);
242 lock->init_done = 1;
243 }
244
245 switch(ap->a_op) {
246 case F_SETLK:
247 /*
248 * NOTE: It is possible for both lf_range and lf_blocked to
249 * be empty if we block and get woken up, but another process
250 * then gets in and issues an unlock. So VMAYHAVELOCKS must
251 * be set after the lf_setlock() operation completes rather
252 * then before.
253 */
254 error = lf_setlock(lock, owner, type, flags, start, end);
255 vsetflags(ap->a_vp, VMAYHAVELOCKS);
256 break;
257
258 case F_UNLCK:
259 error = lf_setlock(lock, owner, type, flags, start, end);
260 if (TAILQ_EMPTY(&lock->lf_range) &&
261 TAILQ_EMPTY(&lock->lf_blocked)) {
262 vclrflags(ap->a_vp, VMAYHAVELOCKS);
263 }
264 break;
265
266 case F_GETLK:
267 error = lf_getlock(fl, lock, owner, type, flags, start, end);
268 break;
269
270 default:
271 error = EINVAL;
272 break;
273 }
274 lwkt_reltoken(token);
275 return(error);
276 }
277
278 static int
279 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
280 off_t start, off_t end)
281 {
282 struct lockf_range *range;
283 struct lockf_range *brange;
284 struct lockf_range *next;
285 struct lockf_range *first_match;
286 struct lockf_range *last_match;
287 struct lockf_range *insert_point;
288 struct lockf_range *new_range1;
289 struct lockf_range *new_range2;
290 int wakeup_needed;
291 int double_clip;
292 int unlock_override;
293 int error = 0;
294 int count;
295 struct lockf_range_list deadlist;
296
297 new_range1 = NULL;
298 new_range2 = NULL;
299 count = 0;
300
301 restart:
302 /*
303 * Preallocate two ranges so we don't have to worry about blocking
304 * in the middle of the lock code.
305 */
306 if (new_range1 == NULL)
307 new_range1 = lf_alloc_range();
308 if (new_range2 == NULL)
309 new_range2 = lf_alloc_range();
310 first_match = NULL;
311 last_match = NULL;
312 insert_point = NULL;
313 wakeup_needed = 0;
314
315 lf_print_lock(lock);
316
317 /*
318 * Locate the insertion point for the new lock (the first range
319 * with an lf_start >= start).
320 *
321 * Locate the first and latch ranges owned by us that overlap
322 * the requested range.
323 */
324 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
325 if (insert_point == NULL && range->lf_start >= start)
326 insert_point = range;
327
328 /*
329 * Skip non-overlapping locks. Locks are sorted by lf_start
330 * So we can terminate the search when lf_start exceeds the
331 * requested range (insert_point is still guarenteed to be
332 * set properly).
333 */
334 if (range->lf_end < start)
335 continue;
336 if (range->lf_start > end) {
337 range = NULL;
338 break;
339 }
340
341 /*
342 * Overlapping lock. Set first_match and last_match if we
343 * are the owner.
344 */
345 if (range->lf_owner == owner) {
346 if (first_match == NULL)
347 first_match = range;
348 last_match = range;
349 continue;
350 }
351
352 /*
353 * If we aren't the owner check for a conflicting lock. Only
354 * if not unlocking.
355 */
356 if (type != F_UNLCK) {
357 if (type == F_WRLCK || range->lf_type == F_WRLCK)
358 break;
359 }
360 }
361
362 /*
363 * If a conflicting lock was observed, block or fail as appropriate.
364 * (this code is skipped when unlocking)
365 */
366 if (range != NULL) {
367 if ((flags & F_WAIT) == 0) {
368 error = EAGAIN;
369 goto do_cleanup;
370 }
371
372 /*
373 * We are blocked. For POSIX locks we have to check
374 * for deadlocks and return with EDEADLK. This is done
375 * by checking whether range->lf_owner is already
376 * blocked.
377 *
378 * Since flock-style locks cover the whole file, a
379 * deadlock between those is nearly impossible.
380 * This can only occur if a process tries to lock the
381 * same inode exclusively while holding a shared lock
382 * with another descriptor.
383 * XXX How can we cleanly detect this?
384 * XXX The current mixing of flock & fcntl/lockf is evil.
385 *
386 * Handle existing locks of flock-style like POSIX locks.
387 */
388 if (flags & F_POSIX) {
389 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link) {
390 if (brange->lf_owner == range->lf_owner) {
391 error = EDEADLK;
392 goto do_cleanup;
393 }
394 }
395 }
396
397 /*
398 * For flock-style locks, we must first remove
399 * any shared locks that we hold before we sleep
400 * waiting for an exclusive lock.
401 */
402 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
403 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
404
405 brange = new_range1;
406 new_range1 = NULL;
407 lf_create_range(brange, owner, type, 0, start, end);
408 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
409 error = tsleep(brange, PCATCH, "lockf", 0);
410
411 /*
412 * We may have been awaked by a signal and/or by a
413 * debugger continuing us (in which case we must remove
414 * ourselves from the blocked list) and/or by another
415 * process releasing/downgrading a lock (in which case
416 * we have already been removed from the blocked list
417 * and our lf_flags field is 1).
418 *
419 * Sleep if it looks like we might be livelocking.
420 */
421 if (brange->lf_flags == 0)
422 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
423 if (count == 2)
424 tsleep(brange, 0, "lockfz", 2);
425 else
426 ++count;
427 lf_destroy_range(brange);
428
429 if (error)
430 goto do_cleanup;
431 goto restart;
432 }
433
434 /*
435 * If there are no overlapping locks owned by us then creating
436 * the new lock is easy. This is the most common case.
437 */
438 if (first_match == NULL) {
439 if (type == F_UNLCK)
440 goto do_wakeup;
441 if (flags & F_POSIX) {
442 if (lf_count_change(owner, 1)) {
443 error = ENOLCK;
444 goto do_cleanup;
445 }
446 }
447 range = new_range1;
448 new_range1 = NULL;
449 lf_create_range(range, owner, type, flags, start, end);
450 lf_insert(&lock->lf_range, range, insert_point);
451 goto do_wakeup;
452 }
453
454 /*
455 * double_clip - Calculate a special case where TWO locks may have
456 * to be added due to the new lock breaking up an
457 * existing incompatible lock in the middle.
458 *
459 * unlock_override - Calculate a special case where NO locks
460 * need to be created. This occurs when an unlock
461 * does not clip any locks at the front and rear.
462 *
463 * WARNING! closef() and fdrop() assume that an F_UNLCK of the
464 * entire range will always succeed so the unlock_override
465 * case is mandatory.
466 */
467 double_clip = 0;
468 unlock_override = 0;
469 if (first_match->lf_start < start) {
470 if (first_match == last_match && last_match->lf_end > end)
471 double_clip = 1;
472 } else if (type == F_UNLCK && last_match->lf_end <= end) {
473 unlock_override = 1;
474 }
475
476 /*
477 * Figure out the worst case net increase in POSIX locks and account
478 * for it now before we start modifying things. If neither the
479 * first or last locks match we have an issue. If there is only
480 * one overlapping range which needs to be clipped on both ends
481 * we wind up having to create up to two new locks, else only one.
482 *
483 * When unlocking the worst case is always 1 new lock if our
484 * unlock request cuts the middle out of an existing lock range.
485 *
486 * count represents the 'cleanup' adjustment needed. It starts
487 * negative, is incremented whenever we create a new POSIX lock,
488 * and decremented whenever we delete an existing one. At the
489 * end of the day it had better be <= 0 or we didn't calculate the
490 * worse case properly here.
491 */
492 count = 0;
493 if ((flags & F_POSIX) && !unlock_override) {
494 if (!lf_match(first_match, type, flags) &&
495 !lf_match(last_match, type, flags)
496 ) {
497 if (double_clip && type != F_UNLCK)
498 count = -2;
499 else
500 count = -1;
501 }
502 if (count && lf_count_change(owner, -count)) {
503 error = ENOLCK;
504 goto do_cleanup;
505 }
506 }
507 /* else flock style lock which encompasses entire range */
508
509 /*
510 * Create and insert the lock represented the requested range.
511 * Adjust the net POSIX lock count. We have to move our insertion
512 * point since brange now represents the first record >= start.
513 *
514 * When unlocking, no new lock is inserted but we still clip.
515 */
516 if (type != F_UNLCK) {
517 brange = new_range1;
518 new_range1 = NULL;
519 lf_create_range(brange, owner, type, flags, start, end);
520 lf_insert(&lock->lf_range, brange, insert_point);
521 insert_point = brange;
522 if (flags & F_POSIX)
523 ++count;
524 } else {
525 brange = NULL;
526 }
527
528 /*
529 * Handle the double_clip case. This is the only case where
530 * we wind up having to add TWO locks.
531 */
532 if (double_clip) {
533 KKASSERT(first_match == last_match);
534 last_match = new_range2;
535 new_range2 = NULL;
536 lf_create_range(last_match, first_match->lf_owner,
537 first_match->lf_type, first_match->lf_flags,
538 end + 1, first_match->lf_end);
539 first_match->lf_end = start - 1;
540 first_match->lf_flags &= ~F_NOEND;
541
542 /*
543 * Figure out where to insert the right side clip.
544 */
545 lf_insert(&lock->lf_range, last_match, first_match);
546 if (last_match->lf_flags & F_POSIX)
547 ++count;
548 }
549
550 /*
551 * Clip or destroy the locks between first_match and last_match,
552 * inclusive. Ignore the primary lock we created (brange). Note
553 * that if double-clipped, first_match and last_match will be
554 * outside our clipping range. Otherwise first_match and last_match
555 * will be deleted.
556 *
557 * We have already taken care of any double clipping.
558 *
559 * The insert_point may become invalid as we delete records, do not
560 * use that pointer any more. Also, when removing something other
561 * then 'range' we have to check to see if the item we are removing
562 * is 'next' and adjust 'next' properly.
563 *
564 * NOTE: brange will be NULL if F_UNLCKing.
565 */
566 TAILQ_INIT(&deadlist);
567 next = first_match;
568
569 while ((range = next) != NULL) {
570 next = TAILQ_NEXT(range, lf_link);
571
572 /*
573 * Ignore elements that we do not own and ignore the
574 * primary request range which we just created.
575 */
576 if (range->lf_owner != owner || range == brange)
577 continue;
578
579 /*
580 * We may have to wakeup a waiter when downgrading a lock.
581 */
582 if (type == F_UNLCK)
583 wakeup_needed = 1;
584 if (type == F_RDLCK && range->lf_type == F_WRLCK)
585 wakeup_needed = 1;
586
587 /*
588 * Clip left. This can only occur on first_match.
589 *
590 * Merge the left clip with brange if possible. This must
591 * be done specifically, not in the optimized merge heuristic
592 * below, since we may have counted on it in our 'count'
593 * calculation above.
594 */
595 if (range->lf_start < start) {
596 KKASSERT(range == first_match);
597 if (brange &&
598 range->lf_end >= start - 1 &&
599 lf_match(range, type, flags)) {
600 range->lf_end = brange->lf_end;
601 range->lf_flags |= brange->lf_flags & F_NOEND;
602 /*
603 * Removing something other then 'range',
604 * adjust 'next' if necessary.
605 */
606 if (next == brange)
607 next = TAILQ_NEXT(next, lf_link);
608 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
609 if (brange->lf_flags & F_POSIX)
610 --count;
611 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
612 brange = range;
613 } else if (range->lf_end >= start) {
614 range->lf_end = start - 1;
615 if (type != F_UNLCK)
616 range->lf_flags &= ~F_NOEND;
617 }
618 if (range == last_match)
619 break;
620 continue;
621 }
622
623 /*
624 * Clip right. This can only occur on last_match.
625 *
626 * Merge the right clip if possible. This must be done
627 * specifically, not in the optimized merge heuristic
628 * below, since we may have counted on it in our 'count'
629 * calculation.
630 *
631 * Since we are adjusting lf_start, we have to move the
632 * record to maintain the sorted list. Since lf_start is
633 * only getting larger we can use the next element as the
634 * insert point (we don't have to backtrack).
635 */
636 if (range->lf_end > end) {
637 KKASSERT(range == last_match);
638 if (brange &&
639 range->lf_start <= end + 1 &&
640 lf_match(range, type, flags)) {
641 brange->lf_end = range->lf_end;
642 brange->lf_flags |= range->lf_flags & F_NOEND;
643 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
644 if (range->lf_flags & F_POSIX)
645 --count;
646 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
647 } else if (range->lf_start <= end) {
648 range->lf_start = end + 1;
649 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
650 lf_insert(&lock->lf_range, range, next);
651 }
652 /* range == last_match, we are done */
653 break;
654 }
655
656 /*
657 * The record must be entirely enclosed. Note that the
658 * record could be first_match or last_match, and will be
659 * deleted.
660 */
661 KKASSERT(range->lf_start >= start && range->lf_end <= end);
662 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
663 if (range->lf_flags & F_POSIX)
664 --count;
665 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
666 if (range == last_match)
667 break;
668 }
669
670 /*
671 * Attempt to merge locks adjacent to brange. For example, we may
672 * have had to clip first_match and/or last_match, and they might
673 * be adjacent. Or there might simply have been an adjacent lock
674 * already there.
675 *
676 * Don't get fancy, just check adjacent elements in the list if they
677 * happen to be owned by us.
678 *
679 * This case only gets hit if we have a situation where a shared
680 * and exclusive lock are adjacent, and the exclusive lock is
681 * downgraded to shared or the shared lock is upgraded to exclusive.
682 */
683 if (brange) {
684 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
685 if (range &&
686 range->lf_owner == owner &&
687 range->lf_end == brange->lf_start - 1 &&
688 lf_match(range, type, flags)
689 ) {
690 /*
691 * Extend range to cover brange and scrap brange.
692 */
693 range->lf_end = brange->lf_end;
694 range->lf_flags |= brange->lf_flags & F_NOEND;
695 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
696 if (brange->lf_flags & F_POSIX)
697 --count;
698 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
699 brange = range;
700 }
701 range = TAILQ_NEXT(brange, lf_link);
702 if (range &&
703 range->lf_owner == owner &&
704 range->lf_start == brange->lf_end + 1 &&
705 lf_match(range, type, flags)
706 ) {
707 /*
708 * Extend brange to cover range and scrap range.
709 */
710 brange->lf_end = range->lf_end;
711 brange->lf_flags |= range->lf_flags & F_NOEND;
712 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
713 if (range->lf_flags & F_POSIX)
714 --count;
715 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
716 }
717 }
718
719 /*
720 * Destroy deleted elements. We didn't want to do it in the loop
721 * because the free() might have blocked.
722 *
723 * Adjust the count for any posix locks we thought we might create
724 * but didn't.
725 */
726 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
727 TAILQ_REMOVE(&deadlist, range, lf_link);
728 lf_destroy_range(range);
729 }
730
731 KKASSERT(count <= 0);
732 if (count < 0)
733 lf_count_change(owner, count);
734 do_wakeup:
735 lf_print_lock(lock);
736 if (wakeup_needed)
737 lf_wakeup(lock, start, end);
738 error = 0;
739 do_cleanup:
740 if (new_range1 != NULL)
741 lf_destroy_range(new_range1);
742 if (new_range2 != NULL)
743 lf_destroy_range(new_range2);
744 return(error);
745 }
746
747 /*
748 * Check whether there is a blocking lock,
749 * and if so return its process identifier.
750 */
751 static int
752 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
753 int type, int flags, off_t start, off_t end)
754 {
755 struct lockf_range *range;
756
757 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
758 if (range->lf_owner != owner &&
759 lf_overlap(range, start, end) &&
760 (type == F_WRLCK || range->lf_type == F_WRLCK))
761 break;
762 if (range == NULL) {
763 fl->l_type = F_UNLCK;
764 return(0);
765 }
766 fl->l_type = range->lf_type;
767 fl->l_whence = SEEK_SET;
768 fl->l_start = range->lf_start;
769 if (range->lf_flags & F_NOEND)
770 fl->l_len = 0;
771 else
772 fl->l_len = range->lf_end - range->lf_start + 1;
773 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
774 fl->l_pid = range->lf_owner->p_pid;
775 else
776 fl->l_pid = -1;
777 return(0);
778 }
779
780 /*
781 * Wakeup pending lock attempts. Theoretically we can stop as soon as
782 * we encounter an exclusive request that covers the whole range (at least
783 * insofar as the sleep code above calls lf_wakeup() if it would otherwise
784 * exit instead of loop), but for now just wakeup all overlapping
785 * requests. XXX
786 */
787 static void
788 lf_wakeup(struct lockf *lock, off_t start, off_t end)
789 {
790 struct lockf_range *range, *nrange;
791
792 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
793 if (lf_overlap(range, start, end) == 0)
794 continue;
795 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
796 range->lf_flags = 1;
797 wakeup(range);
798 }
799 }
800
801 /*
802 * Allocate a range structure and initialize it sufficiently such that
803 * lf_destroy_range() does not barf.
804 */
805 static struct lockf_range *
806 lf_alloc_range(void)
807 {
808 struct lockf_range *range;
809
810 #ifdef INVARIANTS
811 atomic_add_int(&lf_global_counter, 1);
812 #endif
813 range = kmalloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
814 range->lf_owner = NULL;
815 return(range);
816 }
817
818 static void
819 lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
820 struct lockf_range *insert_point)
821 {
822 while (insert_point && insert_point->lf_start < elm->lf_start)
823 insert_point = TAILQ_NEXT(insert_point, lf_link);
824 if (insert_point != NULL)
825 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
826 else
827 TAILQ_INSERT_TAIL(list, elm, lf_link);
828 }
829
830 static void
831 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
832 int flags, off_t start, off_t end)
833 {
834 KKASSERT(start <= end);
835 range->lf_type = type;
836 range->lf_flags = flags;
837 range->lf_start = start;
838 range->lf_end = end;
839 range->lf_owner = owner;
840
841 lf_printf("lf_create_range: %lld..%lld\n",
842 range->lf_start, range->lf_end);
843 }
844
845 static void
846 lf_destroy_range(struct lockf_range *range)
847 {
848 lf_printf("lf_destroy_range: %lld..%lld\n",
849 range->lf_start, range->lf_end);
850 kfree(range, M_LOCKF);
851 #ifdef INVARIANTS
852 atomic_add_int(&lf_global_counter, -1);
853 KKASSERT(lf_global_counter >= 0);
854 #endif
855 }
856
857 #ifdef LOCKF_DEBUG
858
859 static void
860 _lf_printf(const char *ctl, ...)
861 {
862 struct proc *p;
863 __va_list va;
864
865 if (lf_print_ranges) {
866 if ((p = curproc) != NULL)
867 kprintf("pid %d (%s): ", p->p_pid, p->p_comm);
868 }
869 __va_start(va, ctl);
870 kvprintf(ctl, va);
871 __va_end(va);
872 }
873
874 static void
875 _lf_print_lock(const struct lockf *lock)
876 {
877 struct lockf_range *range;
878
879 if (lf_print_ranges == 0)
880 return;
881
882 if (TAILQ_EMPTY(&lock->lf_range)) {
883 lf_printf("lockf %p: no ranges locked\n", lock);
884 } else {
885 lf_printf("lockf %p:\n", lock);
886 }
887 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
888 kprintf("\t%lld..%lld type %s owned by %d\n",
889 range->lf_start, range->lf_end,
890 range->lf_type == F_RDLCK ? "shared" : "exclusive",
891 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
892 if (TAILQ_EMPTY(&lock->lf_blocked))
893 kprintf("no process waiting for range\n");
894 else
895 kprintf("blocked locks:");
896 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
897 kprintf("\t%lld..%lld type %s waiting on %p\n",
898 range->lf_start, range->lf_end,
899 range->lf_type == F_RDLCK ? "shared" : "exclusive",
900 range);
901 }
902 #endif /* LOCKF_DEBUG */
Cache object: 9f777b1766e752e45a6d965a7e0618e0
|