FreeBSD/Linux Kernel Cross Reference
sys/fs/locks.c
1 #define MSNFS /* HACK HACK */
2 /*
3 * linux/fs/locks.c
4 *
5 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
6 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 *
8 * Deadlock detection added.
9 * FIXME: one thing isn't handled yet:
10 * - mandatory locks (requires lots of changes elsewhere)
11 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 *
13 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
14 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 *
16 * Converted file_lock_table to a linked list from an array, which eliminates
17 * the limits on how many active file locks are open.
18 * Chad Page (pageone@netcom.com), November 27, 1994
19 *
20 * Removed dependency on file descriptors. dup()'ed file descriptors now
21 * get the same locks as the original file descriptors, and a close() on
22 * any file descriptor removes ALL the locks on the file for the current
23 * process. Since locks still depend on the process id, locks are inherited
24 * after an exec() but not after a fork(). This agrees with POSIX, and both
25 * BSD and SVR4 practice.
26 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 *
28 * Scrapped free list which is redundant now that we allocate locks
29 * dynamically with kmalloc()/kfree().
30 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 *
32 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 *
34 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
35 * fcntl() system call. They have the semantics described above.
36 *
37 * FL_FLOCK locks are created with calls to flock(), through the flock()
38 * system call, which is new. Old C libraries implement flock() via fcntl()
39 * and will continue to use the old, broken implementation.
40 *
41 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
42 * with a file pointer (filp). As a result they can be shared by a parent
43 * process and its children after a fork(). They are removed when the last
44 * file descriptor referring to the file pointer is closed (unless explicitly
45 * unlocked).
46 *
47 * FL_FLOCK locks never deadlock, an existing lock is always removed before
48 * upgrading from shared to exclusive (or vice versa). When this happens
49 * any processes blocked by the current lock are woken up and allowed to
50 * run before the new lock is applied.
51 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 *
53 * Removed some race conditions in flock_lock_file(), marked other possible
54 * races. Just grep for FIXME to see them.
55 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 *
57 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
58 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
59 * once we've checked for blocking and deadlocking.
60 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 *
62 * Initial implementation of mandatory locks. SunOS turned out to be
63 * a rotten model, so I implemented the "obvious" semantics.
64 * See 'linux/Documentation/mandatory.txt' for details.
65 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 *
67 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
68 * check if a file has mandatory locks, used by mmap(), open() and creat() to
69 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Manual, Section 2.
71 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 *
73 * Tidied up block list handling. Added '/proc/locks' interface.
74 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 *
76 * Fixed deadlock condition for pathological code that mixes calls to
77 * flock() and fcntl().
78 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 *
80 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
81 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
82 * guarantee sensible behaviour in the case where file system modules might
83 * be compiled with different options than the kernel itself.
84 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 *
86 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
87 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
88 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 *
90 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
91 * locks. Changed process synchronisation to avoid dereferencing locks that
92 * have already been freed.
93 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 *
95 * Made the block list a circular list to minimise searching in the list.
96 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 *
98 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * locking.
100 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 *
102 * Some adaptations for NFS support.
103 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 *
105 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
106 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 *
108 * Use slab allocator instead of kmalloc/kfree.
109 * Use generic list implementation from <linux/list.h>.
110 * Sped up posix_locks_deadlock by only considering blocked locks.
111 * Matthew Wilcox <willy@thepuffingroup.com>, March, 2000.
112 *
113 * Leases and LOCK_MAND
114 * Matthew Wilcox <willy@linuxcare.com>, June, 2000.
115 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
116 */
117
118 #include <linux/slab.h>
119 #include <linux/file.h>
120 #include <linux/smp_lock.h>
121 #include <linux/init.h>
122 #include <linux/capability.h>
123 #include <linux/sched.h>
124 #include <linux/timer.h>
125
126 #include <asm/semaphore.h>
127 #include <asm/uaccess.h>
128
129 int leases_enable = 1;
130 int lease_break_time = 45;
131
132 LIST_HEAD(file_lock_list);
133 static LIST_HEAD(blocked_list);
134
135 static kmem_cache_t *filelock_cache;
136
137 /* Allocate an empty lock structure. */
138 static struct file_lock *locks_alloc_lock(int account)
139 {
140 struct file_lock *fl;
141 if (account && current->locks >= current->rlim[RLIMIT_LOCKS].rlim_cur)
142 return NULL;
143 fl = kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
144 if (fl)
145 current->locks++;
146 return fl;
147 }
148
149 /* Free a lock which is not in use. */
150 static inline void locks_free_lock(struct file_lock *fl)
151 {
152 if (fl == NULL) {
153 BUG();
154 return;
155 }
156 current->locks--;
157 if (waitqueue_active(&fl->fl_wait))
158 panic("Attempting to free lock with active wait queue");
159
160 if (!list_empty(&fl->fl_block))
161 panic("Attempting to free lock with active block list");
162
163 if (!list_empty(&fl->fl_link))
164 panic("Attempting to free lock on active lock list");
165
166 kmem_cache_free(filelock_cache, fl);
167 }
168
169 void locks_init_lock(struct file_lock *fl)
170 {
171 INIT_LIST_HEAD(&fl->fl_link);
172 INIT_LIST_HEAD(&fl->fl_block);
173 init_waitqueue_head(&fl->fl_wait);
174 fl->fl_next = NULL;
175 fl->fl_fasync = NULL;
176 fl->fl_owner = 0;
177 fl->fl_pid = 0;
178 fl->fl_file = NULL;
179 fl->fl_flags = 0;
180 fl->fl_type = 0;
181 fl->fl_start = fl->fl_end = 0;
182 fl->fl_notify = NULL;
183 fl->fl_insert = NULL;
184 fl->fl_remove = NULL;
185 }
186
187 /*
188 * Initialises the fields of the file lock which are invariant for
189 * free file_locks.
190 */
191 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
192 {
193 struct file_lock *lock = (struct file_lock *) foo;
194
195 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) !=
196 SLAB_CTOR_CONSTRUCTOR)
197 return;
198
199 locks_init_lock(lock);
200 }
201
202 /*
203 * Initialize a new lock from an existing file_lock structure.
204 */
205 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
206 {
207 new->fl_owner = fl->fl_owner;
208 new->fl_pid = fl->fl_pid;
209 new->fl_file = fl->fl_file;
210 new->fl_flags = fl->fl_flags;
211 new->fl_type = fl->fl_type;
212 new->fl_start = fl->fl_start;
213 new->fl_end = fl->fl_end;
214 new->fl_notify = fl->fl_notify;
215 new->fl_insert = fl->fl_insert;
216 new->fl_remove = fl->fl_remove;
217 new->fl_u = fl->fl_u;
218 }
219
220 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
221 static struct file_lock *flock_make_lock(struct file *filp, unsigned int type)
222 {
223 struct file_lock *fl = locks_alloc_lock(1);
224 if (fl == NULL)
225 return NULL;
226
227 fl->fl_owner = NULL;
228 fl->fl_file = filp;
229 fl->fl_pid = current->pid;
230 fl->fl_flags = FL_FLOCK;
231 fl->fl_type = type;
232 fl->fl_start = 0;
233 fl->fl_end = OFFSET_MAX;
234 fl->fl_notify = NULL;
235 fl->fl_insert = NULL;
236 fl->fl_remove = NULL;
237
238 return fl;
239 }
240
241 static int assign_type(struct file_lock *fl, int type)
242 {
243 switch (type) {
244 case F_RDLCK:
245 case F_WRLCK:
246 case F_UNLCK:
247 fl->fl_type = type;
248 break;
249 default:
250 return -EINVAL;
251 }
252 return 0;
253 }
254
255 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
256 * style lock.
257 */
258 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
259 struct flock *l)
260 {
261 off_t start, end;
262
263 switch (l->l_whence) {
264 case 0: /*SEEK_SET*/
265 start = 0;
266 break;
267 case 1: /*SEEK_CUR*/
268 start = filp->f_pos;
269 break;
270 case 2: /*SEEK_END*/
271 start = filp->f_dentry->d_inode->i_size;
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
278 POSIX-2001 defines it. */
279 start += l->l_start;
280 if (l->l_len < 0) {
281 end = start - 1;
282 start += l->l_len;
283 } else {
284 end = start + l->l_len - 1;
285 }
286
287 if (start < 0)
288 return -EINVAL;
289 if (l->l_len > 0 && end < 0)
290 return -EOVERFLOW;
291 fl->fl_start = start; /* we record the absolute position */
292 fl->fl_end = end;
293 if (l->l_len == 0)
294 fl->fl_end = OFFSET_MAX;
295
296 fl->fl_owner = current->files;
297 fl->fl_pid = current->pid;
298 fl->fl_file = filp;
299 fl->fl_flags = FL_POSIX;
300 fl->fl_notify = NULL;
301 fl->fl_insert = NULL;
302 fl->fl_remove = NULL;
303
304 return assign_type(fl, l->l_type);
305 }
306
307 #if BITS_PER_LONG == 32
308 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
309 struct flock64 *l)
310 {
311 loff_t start;
312
313 switch (l->l_whence) {
314 case 0: /*SEEK_SET*/
315 start = 0;
316 break;
317 case 1: /*SEEK_CUR*/
318 start = filp->f_pos;
319 break;
320 case 2: /*SEEK_END*/
321 start = filp->f_dentry->d_inode->i_size;
322 break;
323 default:
324 return -EINVAL;
325 }
326
327 if (((start += l->l_start) < 0) || (l->l_len < 0))
328 return -EINVAL;
329 fl->fl_end = start + l->l_len - 1;
330 if (l->l_len > 0 && fl->fl_end < 0)
331 return -EOVERFLOW;
332 fl->fl_start = start; /* we record the absolute position */
333 if (l->l_len == 0)
334 fl->fl_end = OFFSET_MAX;
335
336 fl->fl_owner = current->files;
337 fl->fl_pid = current->pid;
338 fl->fl_file = filp;
339 fl->fl_flags = FL_POSIX;
340 fl->fl_notify = NULL;
341 fl->fl_insert = NULL;
342 fl->fl_remove = NULL;
343
344 switch (l->l_type) {
345 case F_RDLCK:
346 case F_WRLCK:
347 case F_UNLCK:
348 fl->fl_type = l->l_type;
349 break;
350 default:
351 return -EINVAL;
352 }
353
354 return (0);
355 }
356 #endif
357
358 /* Allocate a file_lock initialised to this type of lease */
359 static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
360 {
361 struct file_lock *fl = locks_alloc_lock(1);
362 if (fl == NULL)
363 return -ENOMEM;
364
365 fl->fl_owner = current->files;
366 fl->fl_pid = current->pid;
367
368 fl->fl_file = filp;
369 fl->fl_flags = FL_LEASE;
370 if (assign_type(fl, type) != 0) {
371 locks_free_lock(fl);
372 return -EINVAL;
373 }
374 fl->fl_start = 0;
375 fl->fl_end = OFFSET_MAX;
376 fl->fl_notify = NULL;
377 fl->fl_insert = NULL;
378 fl->fl_remove = NULL;
379
380 *flp = fl;
381 return 0;
382 }
383
384 /* Check if two locks overlap each other.
385 */
386 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
387 {
388 return ((fl1->fl_end >= fl2->fl_start) &&
389 (fl2->fl_end >= fl1->fl_start));
390 }
391
392 /*
393 * Check whether two locks have the same owner
394 * N.B. Do we need the test on PID as well as owner?
395 * (Clone tasks should be considered as one "owner".)
396 */
397 static inline int
398 locks_same_owner(struct file_lock *fl1, struct file_lock *fl2)
399 {
400 return (fl1->fl_owner == fl2->fl_owner) &&
401 (fl1->fl_pid == fl2->fl_pid);
402 }
403
404 /* Remove waiter from blocker's block list.
405 * When blocker ends up pointing to itself then the list is empty.
406 */
407 static void locks_delete_block(struct file_lock *waiter)
408 {
409 list_del(&waiter->fl_block);
410 INIT_LIST_HEAD(&waiter->fl_block);
411 list_del(&waiter->fl_link);
412 INIT_LIST_HEAD(&waiter->fl_link);
413 waiter->fl_next = NULL;
414 }
415
416 /* Insert waiter into blocker's block list.
417 * We use a circular list so that processes can be easily woken up in
418 * the order they blocked. The documentation doesn't require this but
419 * it seems like the reasonable thing to do.
420 */
421 static void locks_insert_block(struct file_lock *blocker,
422 struct file_lock *waiter)
423 {
424 if (!list_empty(&waiter->fl_block)) {
425 printk(KERN_ERR "locks_insert_block: removing duplicated lock "
426 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
427 waiter->fl_start, waiter->fl_end, waiter->fl_type);
428 locks_delete_block(waiter);
429 }
430 list_add_tail(&waiter->fl_block, &blocker->fl_block);
431 waiter->fl_next = blocker;
432 list_add(&waiter->fl_link, &blocked_list);
433 }
434
435 static inline
436 void locks_notify_blocked(struct file_lock *waiter)
437 {
438 if (waiter->fl_notify)
439 waiter->fl_notify(waiter);
440 else
441 wake_up(&waiter->fl_wait);
442 }
443
444 /* Wake up processes blocked waiting for blocker.
445 * If told to wait then schedule the processes until the block list
446 * is empty, otherwise empty the block list ourselves.
447 */
448 static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait)
449 {
450 while (!list_empty(&blocker->fl_block)) {
451 struct file_lock *waiter = list_entry(blocker->fl_block.next, struct file_lock, fl_block);
452
453 if (wait) {
454 locks_notify_blocked(waiter);
455 /* Let the blocked process remove waiter from the
456 * block list when it gets scheduled.
457 */
458 yield();
459 } else {
460 /* Remove waiter from the block list, because by the
461 * time it wakes up blocker won't exist any more.
462 */
463 locks_delete_block(waiter);
464 locks_notify_blocked(waiter);
465 }
466 }
467 }
468
469 /* Insert file lock fl into an inode's lock list at the position indicated
470 * by pos. At the same time add the lock to the global file lock list.
471 */
472 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
473 {
474 list_add(&fl->fl_link, &file_lock_list);
475
476 /* insert into file's list */
477 fl->fl_next = *pos;
478 *pos = fl;
479
480 if (fl->fl_insert)
481 fl->fl_insert(fl);
482 }
483
484 /*
485 * Remove lock from the lock lists
486 */
487 static inline void _unhash_lock(struct file_lock **thisfl_p)
488 {
489 struct file_lock *fl = *thisfl_p;
490
491 *thisfl_p = fl->fl_next;
492 fl->fl_next = NULL;
493
494 list_del_init(&fl->fl_link);
495 }
496
497 /*
498 * Wake up processes that are blocked waiting for this lock,
499 * notify the FS that the lock has been cleared and
500 * finally free the lock.
501 */
502 static inline void _delete_lock(struct file_lock *fl, unsigned int wait)
503 {
504 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
505 if (fl->fl_fasync != NULL){
506 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
507 fl->fl_fasync = NULL;
508 }
509
510 if (fl->fl_remove)
511 fl->fl_remove(fl);
512
513 locks_wake_up_blocks(fl, wait);
514 locks_free_lock(fl);
515 }
516
517 /*
518 * Delete a lock and then free it.
519 */
520 static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait)
521 {
522 struct file_lock *fl = *thisfl_p;
523
524 _unhash_lock(thisfl_p);
525 _delete_lock(fl, wait);
526 }
527
528 /*
529 * Call back client filesystem in order to get it to unregister a lock,
530 * then delete lock. Essentially useful only in locks_remove_*().
531 * Note: this must be called with the semaphore already held!
532 */
533 static inline void locks_unlock_delete(struct file_lock **thisfl_p)
534 {
535 struct file_lock *fl = *thisfl_p;
536 int (*lock)(struct file *, int, struct file_lock *);
537
538 _unhash_lock(thisfl_p);
539 if (fl->fl_file->f_op &&
540 (lock = fl->fl_file->f_op->lock) != NULL) {
541 fl->fl_type = F_UNLCK;
542 lock(fl->fl_file, F_SETLK, fl);
543 }
544 _delete_lock(fl, 0);
545 }
546
547 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
548 * checks for shared/exclusive status of overlapping locks.
549 */
550 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
551 {
552 switch (caller_fl->fl_type) {
553 case F_RDLCK:
554 return (sys_fl->fl_type == F_WRLCK);
555
556 case F_WRLCK:
557 return (1);
558
559 default:
560 printk(KERN_ERR "locks_conflict(): impossible lock type - %d\n",
561 caller_fl->fl_type);
562 break;
563 }
564 return (0); /* This should never happen */
565 }
566
567 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
568 * checking before calling the locks_conflict().
569 */
570 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
571 {
572 /* POSIX locks owned by the same process do not conflict with
573 * each other.
574 */
575 if (!(sys_fl->fl_flags & FL_POSIX) ||
576 locks_same_owner(caller_fl, sys_fl))
577 return (0);
578
579 /* Check whether they overlap */
580 if (!locks_overlap(caller_fl, sys_fl))
581 return 0;
582
583 return (locks_conflict(caller_fl, sys_fl));
584 }
585
586 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
587 * checking before calling the locks_conflict().
588 */
589 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
590 {
591 /* FLOCK locks referring to the same filp do not conflict with
592 * each other.
593 */
594 if (!(sys_fl->fl_flags & FL_FLOCK) ||
595 (caller_fl->fl_file == sys_fl->fl_file))
596 return (0);
597 #ifdef MSNFS
598 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
599 return 0;
600 #endif
601
602 return (locks_conflict(caller_fl, sys_fl));
603 }
604
605 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)
606 {
607 int result = 0;
608 DECLARE_WAITQUEUE(wait, current);
609
610 current->state = TASK_INTERRUPTIBLE;
611 add_wait_queue(fl_wait, &wait);
612 if (timeout == 0)
613 schedule();
614 else
615 result = schedule_timeout(timeout);
616 if (signal_pending(current))
617 result = -ERESTARTSYS;
618 remove_wait_queue(fl_wait, &wait);
619 current->state = TASK_RUNNING;
620 return result;
621 }
622
623 static int locks_block_on(struct file_lock *blocker, struct file_lock *waiter)
624 {
625 int result;
626 locks_insert_block(blocker, waiter);
627 result = interruptible_sleep_on_locked(&waiter->fl_wait, 0);
628 locks_delete_block(waiter);
629 return result;
630 }
631
632 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)
633 {
634 int result;
635 locks_insert_block(blocker, waiter);
636 result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
637 locks_delete_block(waiter);
638 return result;
639 }
640
641 struct file_lock *
642 posix_test_lock(struct file *filp, struct file_lock *fl)
643 {
644 struct file_lock *cfl;
645
646 lock_kernel();
647 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
648 if (!(cfl->fl_flags & FL_POSIX))
649 continue;
650 if (posix_locks_conflict(cfl, fl))
651 break;
652 }
653 unlock_kernel();
654
655 return (cfl);
656 }
657
658 /* This function tests for deadlock condition before putting a process to
659 * sleep. The detection scheme is no longer recursive. Recursive was neat,
660 * but dangerous - we risked stack corruption if the lock data was bad, or
661 * if the recursion was too deep for any other reason.
662 *
663 * We rely on the fact that a task can only be on one lock's wait queue
664 * at a time. When we find blocked_task on a wait queue we can re-search
665 * with blocked_task equal to that queue's owner, until either blocked_task
666 * isn't found, or blocked_task is found on a queue owned by my_task.
667 *
668 * Note: the above assumption may not be true when handling lock requests
669 * from a broken NFS client. But broken NFS clients have a lot more to
670 * worry about than proper deadlock detection anyway... --okir
671 */
672 int posix_locks_deadlock(struct file_lock *caller_fl,
673 struct file_lock *block_fl)
674 {
675 struct list_head *tmp;
676 fl_owner_t caller_owner, blocked_owner;
677 unsigned int caller_pid, blocked_pid;
678
679 caller_owner = caller_fl->fl_owner;
680 caller_pid = caller_fl->fl_pid;
681 blocked_owner = block_fl->fl_owner;
682 blocked_pid = block_fl->fl_pid;
683
684 next_task:
685 if (caller_owner == blocked_owner && caller_pid == blocked_pid)
686 return 1;
687 list_for_each(tmp, &blocked_list) {
688 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
689 if ((fl->fl_owner == blocked_owner)
690 && (fl->fl_pid == blocked_pid)) {
691 fl = fl->fl_next;
692 blocked_owner = fl->fl_owner;
693 blocked_pid = fl->fl_pid;
694 goto next_task;
695 }
696 }
697 return 0;
698 }
699
700 int locks_mandatory_locked(struct inode *inode)
701 {
702 fl_owner_t owner = current->files;
703 struct file_lock *fl;
704
705 /*
706 * Search the lock list for this inode for any POSIX locks.
707 */
708 lock_kernel();
709 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
710 if (!(fl->fl_flags & FL_POSIX))
711 continue;
712 if (fl->fl_owner != owner)
713 break;
714 }
715 unlock_kernel();
716 return fl ? -EAGAIN : 0;
717 }
718
719 int locks_mandatory_area(int read_write, struct inode *inode,
720 struct file *filp, loff_t offset,
721 size_t count)
722 {
723 struct file_lock *fl;
724 struct file_lock *new_fl = locks_alloc_lock(0);
725 int error;
726
727 if (new_fl == NULL)
728 return -ENOMEM;
729
730 new_fl->fl_owner = current->files;
731 new_fl->fl_pid = current->pid;
732 new_fl->fl_file = filp;
733 new_fl->fl_flags = FL_POSIX | FL_ACCESS;
734 new_fl->fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
735 new_fl->fl_start = offset;
736 new_fl->fl_end = offset + count - 1;
737
738 error = 0;
739 lock_kernel();
740
741 repeat:
742 /* Search the lock list for this inode for locks that conflict with
743 * the proposed read/write.
744 */
745 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
746 if (!(fl->fl_flags & FL_POSIX))
747 continue;
748 if (fl->fl_start > new_fl->fl_end)
749 break;
750 if (posix_locks_conflict(new_fl, fl)) {
751 error = -EAGAIN;
752 if (filp && (filp->f_flags & O_NONBLOCK))
753 break;
754 error = -EDEADLK;
755 if (posix_locks_deadlock(new_fl, fl))
756 break;
757
758 error = locks_block_on(fl, new_fl);
759 if (error != 0)
760 break;
761
762 /*
763 * If we've been sleeping someone might have
764 * changed the permissions behind our back.
765 */
766 if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)
767 break;
768 goto repeat;
769 }
770 }
771 locks_free_lock(new_fl);
772 unlock_kernel();
773 return error;
774 }
775
776 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
777 * at the head of the list, but that's secret knowledge known only to
778 * flock_lock_file and posix_lock_file.
779 */
780 static int flock_lock_file(struct file *filp, unsigned int lock_type,
781 unsigned int wait)
782 {
783 struct file_lock *fl;
784 struct file_lock *new_fl = NULL;
785 struct file_lock **before;
786 struct inode * inode = filp->f_dentry->d_inode;
787 int error, change;
788 int unlock = (lock_type == F_UNLCK);
789
790 /*
791 * If we need a new lock, get it in advance to avoid races.
792 */
793 if (!unlock) {
794 error = -ENOLCK;
795 new_fl = flock_make_lock(filp, lock_type);
796 if (!new_fl)
797 return error;
798 }
799
800 error = 0;
801 search:
802 change = 0;
803 before = &inode->i_flock;
804 while (((fl = *before) != NULL) && (fl->fl_flags & FL_FLOCK)) {
805 if (filp == fl->fl_file) {
806 if (lock_type == fl->fl_type)
807 goto out;
808 change = 1;
809 break;
810 }
811 before = &fl->fl_next;
812 }
813 /* change means that we are changing the type of an existing lock,
814 * or else unlocking it.
815 */
816 if (change) {
817 /* N.B. What if the wait argument is false? */
818 locks_delete_lock(before, !unlock);
819 /*
820 * If we waited, another lock may have been added ...
821 */
822 if (!unlock)
823 goto search;
824 }
825 if (unlock)
826 goto out;
827
828 repeat:
829 for (fl = inode->i_flock; (fl != NULL) && (fl->fl_flags & FL_FLOCK);
830 fl = fl->fl_next) {
831 if (!flock_locks_conflict(new_fl, fl))
832 continue;
833 error = -EAGAIN;
834 if (!wait)
835 goto out;
836 error = locks_block_on(fl, new_fl);
837 if (error != 0)
838 goto out;
839 goto repeat;
840 }
841 locks_insert_lock(&inode->i_flock, new_fl);
842 new_fl = NULL;
843 error = 0;
844
845 out:
846 if (new_fl)
847 locks_free_lock(new_fl);
848 return error;
849 }
850
851 /**
852 * posix_lock_file:
853 * @filp: The file to apply the lock to
854 * @caller: The lock to be applied
855 * @wait: 1 to retry automatically, 0 to return -EAGAIN
856 *
857 * Add a POSIX style lock to a file.
858 * We merge adjacent locks whenever possible. POSIX locks are sorted by owner
859 * task, then by starting address
860 *
861 * Kai Petzke writes:
862 * To make freeing a lock much faster, we keep a pointer to the lock before the
863 * actual one. But the real gain of the new coding was, that lock_it() and
864 * unlock_it() became one function.
865 *
866 * To all purists: Yes, I use a few goto's. Just pass on to the next function.
867 */
868
869 int posix_lock_file(struct file *filp, struct file_lock *caller,
870 unsigned int wait)
871 {
872 struct file_lock *fl;
873 struct file_lock *new_fl, *new_fl2;
874 struct file_lock *left = NULL;
875 struct file_lock *right = NULL;
876 struct file_lock **before;
877 struct inode * inode = filp->f_dentry->d_inode;
878 int error, added = 0;
879
880 /*
881 * We may need two file_lock structures for this operation,
882 * so we get them in advance to avoid races.
883 */
884 new_fl = locks_alloc_lock(0);
885 new_fl2 = locks_alloc_lock(0);
886 error = -ENOLCK; /* "no luck" */
887 if (!(new_fl && new_fl2))
888 goto out_nolock;
889
890 lock_kernel();
891 if (caller->fl_type != F_UNLCK) {
892 repeat:
893 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
894 if (!(fl->fl_flags & FL_POSIX))
895 continue;
896 if (!posix_locks_conflict(caller, fl))
897 continue;
898 error = -EAGAIN;
899 if (!wait)
900 goto out;
901 error = -EDEADLK;
902 if (posix_locks_deadlock(caller, fl))
903 goto out;
904
905 error = locks_block_on(fl, caller);
906 if (error != 0)
907 goto out;
908 goto repeat;
909 }
910 }
911
912 /*
913 * We've allocated the new locks in advance, so there are no
914 * errors possible (and no blocking operations) from here on.
915 *
916 * Find the first old lock with the same owner as the new lock.
917 */
918
919 before = &inode->i_flock;
920
921 /* First skip locks owned by other processes.
922 */
923 while ((fl = *before) && (!(fl->fl_flags & FL_POSIX) ||
924 !locks_same_owner(caller, fl))) {
925 before = &fl->fl_next;
926 }
927
928 /* Process locks with this owner.
929 */
930 while ((fl = *before) && locks_same_owner(caller, fl)) {
931 /* Detect adjacent or overlapping regions (if same lock type)
932 */
933 if (caller->fl_type == fl->fl_type) {
934 if (fl->fl_end < caller->fl_start - 1)
935 goto next_lock;
936 /* If the next lock in the list has entirely bigger
937 * addresses than the new one, insert the lock here.
938 */
939 if (fl->fl_start > caller->fl_end + 1)
940 break;
941
942 /* If we come here, the new and old lock are of the
943 * same type and adjacent or overlapping. Make one
944 * lock yielding from the lower start address of both
945 * locks to the higher end address.
946 */
947 if (fl->fl_start > caller->fl_start)
948 fl->fl_start = caller->fl_start;
949 else
950 caller->fl_start = fl->fl_start;
951 if (fl->fl_end < caller->fl_end)
952 fl->fl_end = caller->fl_end;
953 else
954 caller->fl_end = fl->fl_end;
955 if (added) {
956 locks_delete_lock(before, 0);
957 continue;
958 }
959 caller = fl;
960 added = 1;
961 }
962 else {
963 /* Processing for different lock types is a bit
964 * more complex.
965 */
966 if (fl->fl_end < caller->fl_start)
967 goto next_lock;
968 if (fl->fl_start > caller->fl_end)
969 break;
970 if (caller->fl_type == F_UNLCK)
971 added = 1;
972 if (fl->fl_start < caller->fl_start)
973 left = fl;
974 /* If the next lock in the list has a higher end
975 * address than the new one, insert the new one here.
976 */
977 if (fl->fl_end > caller->fl_end) {
978 right = fl;
979 break;
980 }
981 if (fl->fl_start >= caller->fl_start) {
982 /* The new lock completely replaces an old
983 * one (This may happen several times).
984 */
985 if (added) {
986 locks_delete_lock(before, 0);
987 continue;
988 }
989 /* Replace the old lock with the new one.
990 * Wake up anybody waiting for the old one,
991 * as the change in lock type might satisfy
992 * their needs.
993 */
994 locks_wake_up_blocks(fl, 0); /* This cannot schedule()! */
995 fl->fl_start = caller->fl_start;
996 fl->fl_end = caller->fl_end;
997 fl->fl_type = caller->fl_type;
998 fl->fl_u = caller->fl_u;
999 caller = fl;
1000 added = 1;
1001 }
1002 }
1003 /* Go on to next lock.
1004 */
1005 next_lock:
1006 before = &fl->fl_next;
1007 }
1008
1009 error = 0;
1010 if (!added) {
1011 if (caller->fl_type == F_UNLCK)
1012 goto out;
1013 locks_copy_lock(new_fl, caller);
1014 locks_insert_lock(before, new_fl);
1015 new_fl = NULL;
1016 }
1017 if (right) {
1018 if (left == right) {
1019 /* The new lock breaks the old one in two pieces,
1020 * so we have to use the second new lock.
1021 */
1022 left = new_fl2;
1023 new_fl2 = NULL;
1024 locks_copy_lock(left, right);
1025 locks_insert_lock(before, left);
1026 }
1027 right->fl_start = caller->fl_end + 1;
1028 locks_wake_up_blocks(right, 0);
1029 }
1030 if (left) {
1031 left->fl_end = caller->fl_start - 1;
1032 locks_wake_up_blocks(left, 0);
1033 }
1034 out:
1035 unlock_kernel();
1036 out_nolock:
1037 /*
1038 * Free any unused locks.
1039 */
1040 if (new_fl)
1041 locks_free_lock(new_fl);
1042 if (new_fl2)
1043 locks_free_lock(new_fl2);
1044 return error;
1045 }
1046
1047 static inline int flock_translate_cmd(int cmd) {
1048 #ifdef MSNFS
1049 if (cmd & LOCK_MAND)
1050 return cmd & (LOCK_MAND | LOCK_RW);
1051 #endif
1052 switch (cmd &~ LOCK_NB) {
1053 case LOCK_SH:
1054 return F_RDLCK;
1055 case LOCK_EX:
1056 return F_WRLCK;
1057 case LOCK_UN:
1058 return F_UNLCK;
1059 }
1060 return -EINVAL;
1061 }
1062
1063 /* We already had a lease on this file; just change its type */
1064 static int lease_modify(struct file_lock **before, int arg)
1065 {
1066 struct file_lock *fl = *before;
1067 int error = assign_type(fl, arg);
1068
1069 if (error)
1070 return error;
1071 locks_wake_up_blocks(fl, 0);
1072 if (arg == F_UNLCK) {
1073 struct file *filp = fl->fl_file;
1074
1075 filp->f_owner.pid = 0;
1076 filp->f_owner.uid = 0;
1077 filp->f_owner.euid = 0;
1078 filp->f_owner.signum = 0;
1079 locks_delete_lock(before, 0);
1080 }
1081 return 0;
1082 }
1083
1084 static void time_out_leases(struct inode *inode)
1085 {
1086 struct file_lock **before;
1087 struct file_lock *fl;
1088
1089 before = &inode->i_flock;
1090 while ((fl = *before) && (fl->fl_flags & FL_LEASE)
1091 && (fl->fl_type & F_INPROGRESS)) {
1092 if ((fl->fl_break_time == 0)
1093 || time_before(jiffies, fl->fl_break_time)) {
1094 before = &fl->fl_next;
1095 continue;
1096 }
1097 printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid);
1098 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1099 if (fl == *before) /* lease_modify may have freed fl */
1100 before = &fl->fl_next;
1101 }
1102 }
1103
1104 /**
1105 * __get_lease - revoke all outstanding leases on file
1106 * @inode: the inode of the file to return
1107 * @mode: the open mode (read or write)
1108 *
1109 * get_lease (inlined for speed) has checked there already
1110 * is a lease on this file. Leases are broken on a call to open()
1111 * or truncate(). This function can sleep unless you
1112 * specified %O_NONBLOCK to your open().
1113 */
1114 int __get_lease(struct inode *inode, unsigned int mode)
1115 {
1116 int error = 0, future;
1117 struct file_lock *new_fl, *flock;
1118 struct file_lock *fl;
1119 int alloc_err;
1120 unsigned long break_time;
1121 int i_have_this_lease = 0;
1122
1123 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
1124 &new_fl);
1125
1126 lock_kernel();
1127
1128 time_out_leases(inode);
1129
1130 flock = inode->i_flock;
1131 if ((flock == NULL) || (flock->fl_flags & FL_LEASE) == 0)
1132 goto out;
1133
1134 for (fl = flock; fl && (fl->fl_flags & FL_LEASE); fl = fl->fl_next)
1135 if (fl->fl_owner == current->files)
1136 i_have_this_lease = 1;
1137
1138 if (mode & FMODE_WRITE) {
1139 /* If we want write access, we have to revoke any lease. */
1140 future = F_UNLCK | F_INPROGRESS;
1141 } else if (flock->fl_type & F_INPROGRESS) {
1142 /* If the lease is already being broken, we just leave it */
1143 future = flock->fl_type;
1144 } else if (flock->fl_type & F_WRLCK) {
1145 /* Downgrade the exclusive lease to a read-only lease. */
1146 future = F_RDLCK | F_INPROGRESS;
1147 } else {
1148 /* the existing lease was read-only, so we can read too. */
1149 goto out;
1150 }
1151
1152 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
1153 error = alloc_err;
1154 goto out;
1155 }
1156
1157 break_time = 0;
1158 if (lease_break_time > 0) {
1159 break_time = jiffies + lease_break_time * HZ;
1160 if (break_time == 0)
1161 break_time++; /* so that 0 means no break time */
1162 }
1163
1164 for (fl = flock; fl && (fl->fl_flags & FL_LEASE); fl = fl->fl_next) {
1165 if (fl->fl_type != future) {
1166 fl->fl_type = future;
1167 fl->fl_break_time = break_time;
1168 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
1169 }
1170 }
1171
1172 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1173 error = -EWOULDBLOCK;
1174 goto out;
1175 }
1176
1177 restart:
1178 break_time = flock->fl_break_time;
1179 if (break_time != 0) {
1180 break_time -= jiffies;
1181 if (break_time == 0)
1182 break_time++;
1183 }
1184 error = locks_block_on_timeout(flock, new_fl, break_time);
1185 if (error >= 0) {
1186 if (error == 0)
1187 time_out_leases(inode);
1188 /* Wait for the next lease that has not been broken yet */
1189 for (flock = inode->i_flock;
1190 flock && (flock->fl_flags & FL_LEASE);
1191 flock = flock->fl_next) {
1192 if (flock->fl_type & F_INPROGRESS)
1193 goto restart;
1194 }
1195 error = 0;
1196 }
1197
1198 out:
1199 unlock_kernel();
1200 if (!alloc_err)
1201 locks_free_lock(new_fl);
1202 return error;
1203 }
1204
1205 /**
1206 * lease_get_mtime
1207 * @inode: the inode
1208 *
1209 * This is to force NFS clients to flush their caches for files with
1210 * exclusive leases. The justification is that if someone has an
1211 * exclusive lease, then they could be modifiying it.
1212 */
1213 time_t lease_get_mtime(struct inode *inode)
1214 {
1215 struct file_lock *flock = inode->i_flock;
1216 if (flock && (flock->fl_flags & FL_LEASE) && (flock->fl_type & F_WRLCK))
1217 return CURRENT_TIME;
1218 return inode->i_mtime;
1219 }
1220
1221 /**
1222 * fcntl_getlease - Enquire what lease is currently active
1223 * @filp: the file
1224 *
1225 * The value returned by this function will be one of
1226 * (if no lease break is pending):
1227 *
1228 * %F_RDLCK to indicate a shared lease is held.
1229 *
1230 * %F_WRLCK to indicate an exclusive lease is held.
1231 *
1232 * %F_UNLCK to indicate no lease is held.
1233 *
1234 * (if a lease break is pending):
1235 *
1236 * %F_RDLCK to indicate an exclusive lease needs to be
1237 * changed to a shared lease (or removed).
1238 *
1239 * %F_UNLCK to indicate the lease needs to be removed.
1240 *
1241 * XXX: sfr & willy disagree over whether F_INPROGRESS
1242 * should be returned to userspace.
1243 */
1244 int fcntl_getlease(struct file *filp)
1245 {
1246 struct file_lock *fl;
1247 int type = F_UNLCK;
1248
1249 lock_kernel();
1250 time_out_leases(filp->f_dentry->d_inode);
1251 for (fl = filp->f_dentry->d_inode->i_flock;
1252 fl && (fl->fl_flags & FL_LEASE);
1253 fl = fl->fl_next) {
1254 if (fl->fl_file == filp) {
1255 type = fl->fl_type & ~F_INPROGRESS;
1256 break;
1257 }
1258 }
1259 unlock_kernel();
1260 return type;
1261 }
1262
1263 /**
1264 * fcntl_setlease - sets a lease on an open file
1265 * @fd: open file descriptor
1266 * @filp: file pointer
1267 * @arg: type of lease to obtain
1268 *
1269 * Call this fcntl to establish a lease on the file.
1270 * Note that you also need to call %F_SETSIG to
1271 * receive a signal when the lease is broken.
1272 */
1273 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1274 {
1275 struct file_lock *fl, **before, **my_before = NULL;
1276 struct dentry *dentry;
1277 struct inode *inode;
1278 int error, rdlease_count = 0, wrlease_count = 0;
1279
1280 dentry = filp->f_dentry;
1281 inode = dentry->d_inode;
1282
1283 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
1284 return -EACCES;
1285 if (!S_ISREG(inode->i_mode))
1286 return -EINVAL;
1287
1288 lock_kernel();
1289
1290 time_out_leases(inode);
1291
1292 /*
1293 * FIXME: What about F_RDLCK and files open for writing?
1294 */
1295 error = -EAGAIN;
1296 if ((arg == F_WRLCK)
1297 && ((atomic_read(&dentry->d_count) > 1)
1298 || (atomic_read(&inode->i_count) > 1)))
1299 goto out_unlock;
1300
1301 /*
1302 * At this point, we know that if there is an exclusive
1303 * lease on this file, then we hold it on this filp
1304 * (otherwise our open of this file would have blocked).
1305 * And if we are trying to acquire an exclusive lease,
1306 * then the file is not open by anyone (including us)
1307 * except for this filp.
1308 */
1309 for (before = &inode->i_flock;
1310 ((fl = *before) != NULL) && (fl->fl_flags & FL_LEASE);
1311 before = &fl->fl_next) {
1312 if (fl->fl_file == filp)
1313 my_before = before;
1314 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1315 /*
1316 * Someone is in the process of opening this
1317 * file for writing so we may not take an
1318 * exclusive lease on it.
1319 */
1320 wrlease_count++;
1321 else
1322 rdlease_count++;
1323 }
1324
1325 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1326 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1327 goto out_unlock;
1328
1329 if (my_before != NULL) {
1330 error = lease_modify(my_before, arg);
1331 goto out_unlock;
1332 }
1333
1334 error = 0;
1335 if (arg == F_UNLCK)
1336 goto out_unlock;
1337
1338 error = -EINVAL;
1339 if (!leases_enable)
1340 goto out_unlock;
1341
1342 error = lease_alloc(filp, arg, &fl);
1343 if (error)
1344 goto out_unlock;
1345
1346 error = fasync_helper(fd, filp, 1, &fl->fl_fasync);
1347 if (error < 0) {
1348 locks_free_lock(fl);
1349 goto out_unlock;
1350 }
1351 fl->fl_next = *before;
1352 *before = fl;
1353 list_add(&fl->fl_link, &file_lock_list);
1354 filp->f_owner.pid = current->pid;
1355 filp->f_owner.uid = current->uid;
1356 filp->f_owner.euid = current->euid;
1357 out_unlock:
1358 unlock_kernel();
1359 return error;
1360 }
1361
1362 /**
1363 * sys_flock: - flock() system call.
1364 * @fd: the file descriptor to lock.
1365 * @cmd: the type of lock to apply.
1366 *
1367 * Apply a %FL_FLOCK style lock to an open file descriptor.
1368 * The @cmd can be one of
1369 *
1370 * %LOCK_SH -- a shared lock.
1371 *
1372 * %LOCK_EX -- an exclusive lock.
1373 *
1374 * %LOCK_UN -- remove an existing lock.
1375 *
1376 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1377 *
1378 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1379 * processes read and write access respectively.
1380 */
1381 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
1382 {
1383 struct file *filp;
1384 int error, type;
1385
1386 error = -EBADF;
1387 filp = fget(fd);
1388 if (!filp)
1389 goto out;
1390
1391 error = flock_translate_cmd(cmd);
1392 if (error < 0)
1393 goto out_putf;
1394 type = error;
1395
1396 error = -EBADF;
1397 if ((type != F_UNLCK)
1398 #ifdef MSNFS
1399 && !(type & LOCK_MAND)
1400 #endif
1401 && !(filp->f_mode & 3))
1402 goto out_putf;
1403
1404 lock_kernel();
1405 error = flock_lock_file(filp, type,
1406 (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1);
1407 unlock_kernel();
1408
1409 out_putf:
1410 fput(filp);
1411 out:
1412 return error;
1413 }
1414
1415 /* Report the first existing lock that would conflict with l.
1416 * This implements the F_GETLK command of fcntl().
1417 */
1418 int fcntl_getlk(unsigned int fd, struct flock *l)
1419 {
1420 struct file *filp;
1421 struct file_lock *fl, file_lock;
1422 struct flock flock;
1423 int error;
1424
1425 error = -EFAULT;
1426 if (copy_from_user(&flock, l, sizeof(flock)))
1427 goto out;
1428 error = -EINVAL;
1429 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1430 goto out;
1431
1432 error = -EBADF;
1433 filp = fget(fd);
1434 if (!filp)
1435 goto out;
1436
1437 error = flock_to_posix_lock(filp, &file_lock, &flock);
1438 if (error)
1439 goto out_putf;
1440
1441 if (filp->f_op && filp->f_op->lock) {
1442 error = filp->f_op->lock(filp, F_GETLK, &file_lock);
1443 if (error < 0)
1444 goto out_putf;
1445 else if (error == LOCK_USE_CLNT)
1446 /* Bypass for NFS with no locking - 2.0.36 compat */
1447 fl = posix_test_lock(filp, &file_lock);
1448 else
1449 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
1450 } else {
1451 fl = posix_test_lock(filp, &file_lock);
1452 }
1453
1454 flock.l_type = F_UNLCK;
1455 if (fl != NULL) {
1456 flock.l_pid = fl->fl_pid;
1457 #if BITS_PER_LONG == 32
1458 /*
1459 * Make sure we can represent the posix lock via
1460 * legacy 32bit flock.
1461 */
1462 error = -EOVERFLOW;
1463 if (fl->fl_start > OFFT_OFFSET_MAX)
1464 goto out_putf;
1465 if ((fl->fl_end != OFFSET_MAX)
1466 && (fl->fl_end > OFFT_OFFSET_MAX))
1467 goto out_putf;
1468 #endif
1469 flock.l_start = fl->fl_start;
1470 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
1471 fl->fl_end - fl->fl_start + 1;
1472 flock.l_whence = 0;
1473 flock.l_type = fl->fl_type;
1474 }
1475 error = -EFAULT;
1476 if (!copy_to_user(l, &flock, sizeof(flock)))
1477 error = 0;
1478
1479 out_putf:
1480 fput(filp);
1481 out:
1482 return error;
1483 }
1484
1485 /* Apply the lock described by l to an open file descriptor.
1486 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1487 */
1488 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
1489 {
1490 struct file *filp;
1491 struct file_lock *file_lock = locks_alloc_lock(0);
1492 struct flock flock;
1493 struct inode *inode;
1494 int error;
1495
1496 if (file_lock == NULL)
1497 return -ENOLCK;
1498
1499 /*
1500 * This might block, so we do it before checking the inode.
1501 */
1502 error = -EFAULT;
1503 if (copy_from_user(&flock, l, sizeof(flock)))
1504 goto out;
1505
1506 /* Get arguments and validate them ...
1507 */
1508
1509 error = -EBADF;
1510 filp = fget(fd);
1511 if (!filp)
1512 goto out;
1513
1514 error = -EINVAL;
1515 inode = filp->f_dentry->d_inode;
1516
1517 /* Don't allow mandatory locks on files that may be memory mapped
1518 * and shared.
1519 */
1520 if (IS_MANDLOCK(inode) &&
1521 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
1522 struct address_space *mapping = inode->i_mapping;
1523
1524 if (mapping->i_mmap_shared != NULL) {
1525 error = -EAGAIN;
1526 goto out_putf;
1527 }
1528 }
1529
1530 error = flock_to_posix_lock(filp, file_lock, &flock);
1531 if (error)
1532 goto out_putf;
1533
1534 error = -EBADF;
1535 switch (flock.l_type) {
1536 case F_RDLCK:
1537 if (!(filp->f_mode & FMODE_READ))
1538 goto out_putf;
1539 break;
1540 case F_WRLCK:
1541 if (!(filp->f_mode & FMODE_WRITE))
1542 goto out_putf;
1543 break;
1544 case F_UNLCK:
1545 break;
1546 case F_SHLCK:
1547 case F_EXLCK:
1548 #ifdef __sparc__
1549 /* warn a bit for now, but don't overdo it */
1550 {
1551 static int count = 0;
1552 if (!count) {
1553 count=1;
1554 printk(KERN_WARNING
1555 "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n",
1556 current->pid, current->comm);
1557 }
1558 }
1559 if (!(filp->f_mode & 3))
1560 goto out_putf;
1561 break;
1562 #endif
1563 default:
1564 error = -EINVAL;
1565 goto out_putf;
1566 }
1567
1568 if (filp->f_op && filp->f_op->lock != NULL) {
1569 error = filp->f_op->lock(filp, cmd, file_lock);
1570 if (error < 0)
1571 goto out_putf;
1572 }
1573 error = posix_lock_file(filp, file_lock, cmd == F_SETLKW);
1574
1575 out_putf:
1576 fput(filp);
1577 out:
1578 locks_free_lock(file_lock);
1579 return error;
1580 }
1581
1582 #if BITS_PER_LONG == 32
1583 /* Report the first existing lock that would conflict with l.
1584 * This implements the F_GETLK command of fcntl().
1585 */
1586 int fcntl_getlk64(unsigned int fd, struct flock64 *l)
1587 {
1588 struct file *filp;
1589 struct file_lock *fl, file_lock;
1590 struct flock64 flock;
1591 int error;
1592
1593 error = -EFAULT;
1594 if (copy_from_user(&flock, l, sizeof(flock)))
1595 goto out;
1596 error = -EINVAL;
1597 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1598 goto out;
1599
1600 error = -EBADF;
1601 filp = fget(fd);
1602 if (!filp)
1603 goto out;
1604
1605 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1606 if (error)
1607 goto out_putf;
1608
1609 if (filp->f_op && filp->f_op->lock) {
1610 error = filp->f_op->lock(filp, F_GETLK, &file_lock);
1611 if (error < 0)
1612 goto out_putf;
1613 else if (error == LOCK_USE_CLNT)
1614 /* Bypass for NFS with no locking - 2.0.36 compat */
1615 fl = posix_test_lock(filp, &file_lock);
1616 else
1617 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
1618 } else {
1619 fl = posix_test_lock(filp, &file_lock);
1620 }
1621
1622 flock.l_type = F_UNLCK;
1623 if (fl != NULL) {
1624 flock.l_pid = fl->fl_pid;
1625 flock.l_start = fl->fl_start;
1626 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
1627 fl->fl_end - fl->fl_start + 1;
1628 flock.l_whence = 0;
1629 flock.l_type = fl->fl_type;
1630 }
1631 error = -EFAULT;
1632 if (!copy_to_user(l, &flock, sizeof(flock)))
1633 error = 0;
1634
1635 out_putf:
1636 fput(filp);
1637 out:
1638 return error;
1639 }
1640
1641 /* Apply the lock described by l to an open file descriptor.
1642 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1643 */
1644 int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
1645 {
1646 struct file *filp;
1647 struct file_lock *file_lock = locks_alloc_lock(0);
1648 struct flock64 flock;
1649 struct inode *inode;
1650 int error;
1651
1652 if (file_lock == NULL)
1653 return -ENOLCK;
1654
1655 /*
1656 * This might block, so we do it before checking the inode.
1657 */
1658 error = -EFAULT;
1659 if (copy_from_user(&flock, l, sizeof(flock)))
1660 goto out;
1661
1662 /* Get arguments and validate them ...
1663 */
1664
1665 error = -EBADF;
1666 filp = fget(fd);
1667 if (!filp)
1668 goto out;
1669
1670 error = -EINVAL;
1671 inode = filp->f_dentry->d_inode;
1672
1673 /* Don't allow mandatory locks on files that may be memory mapped
1674 * and shared.
1675 */
1676 if (IS_MANDLOCK(inode) &&
1677 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
1678 struct address_space *mapping = inode->i_mapping;
1679
1680 if (mapping->i_mmap_shared != NULL) {
1681 error = -EAGAIN;
1682 goto out_putf;
1683 }
1684 }
1685
1686 error = flock64_to_posix_lock(filp, file_lock, &flock);
1687 if (error)
1688 goto out_putf;
1689
1690 error = -EBADF;
1691 switch (flock.l_type) {
1692 case F_RDLCK:
1693 if (!(filp->f_mode & FMODE_READ))
1694 goto out_putf;
1695 break;
1696 case F_WRLCK:
1697 if (!(filp->f_mode & FMODE_WRITE))
1698 goto out_putf;
1699 break;
1700 case F_UNLCK:
1701 break;
1702 case F_SHLCK:
1703 case F_EXLCK:
1704 default:
1705 error = -EINVAL;
1706 goto out_putf;
1707 }
1708
1709 if (filp->f_op && filp->f_op->lock != NULL) {
1710 error = filp->f_op->lock(filp, cmd, file_lock);
1711 if (error < 0)
1712 goto out_putf;
1713 }
1714 error = posix_lock_file(filp, file_lock, cmd == F_SETLKW64);
1715
1716 out_putf:
1717 fput(filp);
1718 out:
1719 locks_free_lock(file_lock);
1720 return error;
1721 }
1722 #endif /* BITS_PER_LONG == 32 */
1723
1724 /*
1725 * This function is called when the file is being removed
1726 * from the task's fd array.
1727 */
1728 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1729 {
1730 struct inode * inode = filp->f_dentry->d_inode;
1731 struct file_lock *fl;
1732 struct file_lock **before;
1733
1734 /*
1735 * For POSIX locks we free all locks on this file for the given task.
1736 */
1737 if (!inode->i_flock) {
1738 /*
1739 * Notice that something might be grabbing a lock right now.
1740 * Consider it as a race won by us - event is async, so even if
1741 * we miss the lock added we can trivially consider it as added
1742 * after we went through this call.
1743 */
1744 return;
1745 }
1746 lock_kernel();
1747 before = &inode->i_flock;
1748 while ((fl = *before) != NULL) {
1749 if ((fl->fl_flags & FL_POSIX) && fl->fl_owner == owner) {
1750 locks_unlock_delete(before);
1751 before = &inode->i_flock;
1752 continue;
1753 }
1754 before = &fl->fl_next;
1755 }
1756 unlock_kernel();
1757 }
1758
1759 /*
1760 * This function is called on the last close of an open file.
1761 */
1762 void locks_remove_flock(struct file *filp)
1763 {
1764 struct inode * inode = filp->f_dentry->d_inode;
1765 struct file_lock *fl;
1766 struct file_lock **before;
1767
1768 if (!inode->i_flock)
1769 return;
1770
1771 lock_kernel();
1772 before = &inode->i_flock;
1773
1774 while ((fl = *before) != NULL) {
1775 if (fl->fl_file == filp) {
1776 if (fl->fl_flags & FL_FLOCK) {
1777 locks_delete_lock(before, 0);
1778 continue;
1779 }
1780 if (fl->fl_flags & FL_LEASE) {
1781 lease_modify(before, F_UNLCK);
1782 continue;
1783 }
1784 }
1785 before = &fl->fl_next;
1786 }
1787 unlock_kernel();
1788 }
1789
1790 /**
1791 * posix_block_lock - blocks waiting for a file lock
1792 * @blocker: the lock which is blocking
1793 * @waiter: the lock which conflicts and has to wait
1794 *
1795 * lockd needs to block waiting for locks.
1796 */
1797 void
1798 posix_block_lock(struct file_lock *blocker, struct file_lock *waiter)
1799 {
1800 locks_insert_block(blocker, waiter);
1801 }
1802
1803 /**
1804 * posix_unblock_lock - stop waiting for a file lock
1805 * @waiter: the lock which was waiting
1806 *
1807 * lockd needs to block waiting for locks.
1808 */
1809 void
1810 posix_unblock_lock(struct file_lock *waiter)
1811 {
1812 if (!list_empty(&waiter->fl_block))
1813 locks_delete_block(waiter);
1814 }
1815
1816 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
1817 {
1818 struct inode *inode = NULL;
1819
1820 if (fl->fl_file != NULL)
1821 inode = fl->fl_file->f_dentry->d_inode;
1822
1823 out += sprintf(out, "%d:%s ", id, pfx);
1824 if (fl->fl_flags & FL_POSIX) {
1825 out += sprintf(out, "%6s %s ",
1826 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
1827 (inode == NULL) ? "*NOINODE*" :
1828 (IS_MANDLOCK(inode) &&
1829 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ?
1830 "MANDATORY" : "ADVISORY ");
1831 } else if (fl->fl_flags & FL_FLOCK) {
1832 #ifdef MSNFS
1833 if (fl->fl_type & LOCK_MAND) {
1834 out += sprintf(out, "FLOCK MSNFS ");
1835 } else
1836 #endif
1837 out += sprintf(out, "FLOCK ADVISORY ");
1838 } else if (fl->fl_flags & FL_LEASE) {
1839 out += sprintf(out, "LEASE ");
1840 if (fl->fl_type & F_INPROGRESS)
1841 out += sprintf(out, "BREAKING ");
1842 else if (fl->fl_file)
1843 out += sprintf(out, "ACTIVE ");
1844 else
1845 out += sprintf(out, "BREAKER ");
1846 } else {
1847 out += sprintf(out, "UNKNOWN UNKNOWN ");
1848 }
1849 #ifdef MSNFS
1850 if (fl->fl_type & LOCK_MAND) {
1851 out += sprintf(out, "%s ",
1852 (fl->fl_type & LOCK_READ)
1853 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
1854 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
1855 } else
1856 #endif
1857 out += sprintf(out, "%s ",
1858 (fl->fl_type & F_INPROGRESS)
1859 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
1860 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
1861 out += sprintf(out, "%d %s:%ld ",
1862 fl->fl_pid,
1863 inode ? kdevname(inode->i_dev) : "<none>",
1864 inode ? inode->i_ino : 0);
1865 out += sprintf(out, "%Ld ", fl->fl_start);
1866 if (fl->fl_end == OFFSET_MAX)
1867 out += sprintf(out, "EOF ");
1868 else
1869 out += sprintf(out, "%Ld ", fl->fl_end);
1870 sprintf(out, "%08lx %08lx %08lx %08lx %08lx\n",
1871 (long)fl, (long)fl->fl_link.prev, (long)fl->fl_link.next,
1872 (long)fl->fl_next, (long)fl->fl_block.next);
1873 }
1874
1875 static void move_lock_status(char **p, off_t* pos, off_t offset)
1876 {
1877 int len;
1878 len = strlen(*p);
1879 if(*pos >= offset) {
1880 /* the complete line is valid */
1881 *p += len;
1882 *pos += len;
1883 return;
1884 }
1885 if(*pos+len > offset) {
1886 /* use the second part of the line */
1887 int i = offset-*pos;
1888 memmove(*p,*p+i,len-i);
1889 *p += len-i;
1890 *pos += len;
1891 return;
1892 }
1893 /* discard the complete line */
1894 *pos += len;
1895 }
1896
1897 /**
1898 * get_locks_status - reports lock usage in /proc/locks
1899 * @buffer: address in userspace to write into
1900 * @start: ?
1901 * @offset: how far we are through the buffer
1902 * @length: how much to read
1903 */
1904
1905 int get_locks_status(char *buffer, char **start, off_t offset, int length)
1906 {
1907 struct list_head *tmp;
1908 char *q = buffer;
1909 off_t pos = 0;
1910 int i = 0;
1911
1912 lock_kernel();
1913 list_for_each(tmp, &file_lock_list) {
1914 struct list_head *btmp;
1915 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
1916 lock_get_status(q, fl, ++i, "");
1917 move_lock_status(&q, &pos, offset);
1918
1919 if(pos >= offset+length)
1920 goto done;
1921
1922 list_for_each(btmp, &fl->fl_block) {
1923 struct file_lock *bfl = list_entry(btmp,
1924 struct file_lock, fl_block);
1925 lock_get_status(q, bfl, i, " ->");
1926 move_lock_status(&q, &pos, offset);
1927
1928 if(pos >= offset+length)
1929 goto done;
1930 }
1931 }
1932 done:
1933 unlock_kernel();
1934 *start = buffer;
1935 if(q-buffer < length)
1936 return (q-buffer);
1937 return length;
1938 }
1939
1940 void steal_locks(fl_owner_t from)
1941 {
1942 struct list_head *tmp;
1943
1944 if (from == current->files)
1945 return;
1946
1947 lock_kernel();
1948 list_for_each(tmp, &file_lock_list) {
1949 struct file_lock *fl = list_entry(tmp, struct file_lock,
1950 fl_link);
1951 if (fl->fl_owner == from)
1952 fl->fl_owner = current->files;
1953 }
1954 unlock_kernel();
1955 }
1956
1957 #ifdef MSNFS
1958 /**
1959 * lock_may_read - checks that the region is free of locks
1960 * @inode: the inode that is being read
1961 * @start: the first byte to read
1962 * @len: the number of bytes to read
1963 *
1964 * Emulates Windows locking requirements. Whole-file
1965 * mandatory locks (share modes) can prohibit a read and
1966 * byte-range POSIX locks can prohibit a read if they overlap.
1967 *
1968 * N.B. this function is only ever called
1969 * from knfsd and ownership of locks is never checked.
1970 */
1971 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
1972 {
1973 struct file_lock *fl;
1974 int result = 1;
1975 lock_kernel();
1976 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1977 if (fl->fl_flags == FL_POSIX) {
1978 if (fl->fl_type == F_RDLCK)
1979 continue;
1980 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
1981 continue;
1982 } else if (fl->fl_flags == FL_FLOCK) {
1983 if (!(fl->fl_type & LOCK_MAND))
1984 continue;
1985 if (fl->fl_type & LOCK_READ)
1986 continue;
1987 } else
1988 continue;
1989 result = 0;
1990 break;
1991 }
1992 unlock_kernel();
1993 return result;
1994 }
1995
1996 /**
1997 * lock_may_write - checks that the region is free of locks
1998 * @inode: the inode that is being written
1999 * @start: the first byte to write
2000 * @len: the number of bytes to write
2001 *
2002 * Emulates Windows locking requirements. Whole-file
2003 * mandatory locks (share modes) can prohibit a write and
2004 * byte-range POSIX locks can prohibit a write if they overlap.
2005 *
2006 * N.B. this function is only ever called
2007 * from knfsd and ownership of locks is never checked.
2008 */
2009 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2010 {
2011 struct file_lock *fl;
2012 int result = 1;
2013 lock_kernel();
2014 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2015 if (fl->fl_flags == FL_POSIX) {
2016 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2017 continue;
2018 } else if (fl->fl_flags == FL_FLOCK) {
2019 if (!(fl->fl_type & LOCK_MAND))
2020 continue;
2021 if (fl->fl_type & LOCK_WRITE)
2022 continue;
2023 } else
2024 continue;
2025 result = 0;
2026 break;
2027 }
2028 unlock_kernel();
2029 return result;
2030 }
2031 #endif
2032
2033 static int __init filelock_init(void)
2034 {
2035 filelock_cache = kmem_cache_create("file_lock_cache",
2036 sizeof(struct file_lock), 0, 0, init_once, NULL);
2037 if (!filelock_cache)
2038 panic("cannot create file lock slab cache");
2039 return 0;
2040 }
2041
2042 module_init(filelock_init)
Cache object: 270c696c9f9ccb3494129890345c8033
|