FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c
1 /*-
2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 * Authors: Doug Rabson <dfr@rabson.org>
4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 /*-
28 * Copyright (c) 1982, 1986, 1989, 1993
29 * The Regents of the University of California. All rights reserved.
30 *
31 * This code is derived from software contributed to Berkeley by
32 * Scooter Morris at Genentech Inc.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
59 */
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_lockf.c 302216 2016-06-26 20:08:42Z kib $");
63
64 #include "opt_debug_lockf.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
71 #include <sys/lock.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sx.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
82
83 #ifdef LOCKF_DEBUG
84 #include <sys/sysctl.h>
85
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88
89 static int lockf_debug = 0; /* control debug output */
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91 #endif
92
93 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94
95 struct owner_edge;
96 struct owner_vertex;
97 struct owner_vertex_list;
98 struct owner_graph;
99
100 #define NOLOCKF (struct lockf_entry *)0
101 #define SELF 0x1
102 #define OTHERS 0x2
103 static void lf_init(void *);
104 static int lf_hash_owner(caddr_t, struct flock *, int);
105 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106 int);
107 static struct lockf_entry *
108 lf_alloc_lock(struct lock_owner *);
109 static int lf_free_lock(struct lockf_entry *);
110 static int lf_clearlock(struct lockf *, struct lockf_entry *);
111 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112 static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
113 static void lf_free_edge(struct lockf_edge *);
114 static struct lockf_edge *
115 lf_alloc_edge(void);
116 static void lf_alloc_vertex(struct lockf_entry *);
117 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118 static void lf_remove_edge(struct lockf_edge *);
119 static void lf_remove_outgoing(struct lockf_entry *);
120 static void lf_remove_incoming(struct lockf_entry *);
121 static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
122 static int lf_add_incoming(struct lockf *, struct lockf_entry *);
123 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124 int);
125 static struct lockf_entry *
126 lf_getblock(struct lockf *, struct lockf_entry *);
127 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128 static void lf_insert_lock(struct lockf *, struct lockf_entry *);
129 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130 static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
131 int all, struct lockf_entry_list *);
132 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133 struct lockf_entry_list*);
134 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135 struct lockf_entry_list*);
136 static int lf_setlock(struct lockf *, struct lockf_entry *,
137 struct vnode *, void **cookiep);
138 static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
139 static void lf_split(struct lockf *, struct lockf_entry *,
140 struct lockf_entry *, struct lockf_entry_list *);
141 #ifdef LOCKF_DEBUG
142 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143 struct owner_vertex_list *path);
144 static void graph_check(struct owner_graph *g, int checkorder);
145 static void graph_print_vertices(struct owner_vertex_list *set);
146 #endif
147 static int graph_delta_forward(struct owner_graph *g,
148 struct owner_vertex *x, struct owner_vertex *y,
149 struct owner_vertex_list *delta);
150 static int graph_delta_backward(struct owner_graph *g,
151 struct owner_vertex *x, struct owner_vertex *y,
152 struct owner_vertex_list *delta);
153 static int graph_add_indices(int *indices, int n,
154 struct owner_vertex_list *set);
155 static int graph_assign_indices(struct owner_graph *g, int *indices,
156 int nextunused, struct owner_vertex_list *set);
157 static int graph_add_edge(struct owner_graph *g,
158 struct owner_vertex *x, struct owner_vertex *y);
159 static void graph_remove_edge(struct owner_graph *g,
160 struct owner_vertex *x, struct owner_vertex *y);
161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162 struct lock_owner *lo);
163 static void graph_free_vertex(struct owner_graph *g,
164 struct owner_vertex *v);
165 static struct owner_graph * graph_init(struct owner_graph *g);
166 #ifdef LOCKF_DEBUG
167 static void lf_print(char *, struct lockf_entry *);
168 static void lf_printlist(char *, struct lockf_entry *);
169 static void lf_print_owner(struct lock_owner *);
170 #endif
171
172 /*
173 * This structure is used to keep track of both local and remote lock
174 * owners. The lf_owner field of the struct lockf_entry points back at
175 * the lock owner structure. Each possible lock owner (local proc for
176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177 * pair for remote locks) is represented by a unique instance of
178 * struct lock_owner.
179 *
180 * If a lock owner has a lock that blocks some other lock or a lock
181 * that is waiting for some other lock, it also has a vertex in the
182 * owner_graph below.
183 *
184 * Locks:
185 * (s) locked by state->ls_lock
186 * (S) locked by lf_lock_states_lock
187 * (l) locked by lf_lock_owners_lock
188 * (g) locked by lf_owner_graph_lock
189 * (c) const until freeing
190 */
191 #define LOCK_OWNER_HASH_SIZE 256
192
193 struct lock_owner {
194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195 int lo_refs; /* (l) Number of locks referring to this */
196 int lo_flags; /* (c) Flags passwd to lf_advlock */
197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
198 pid_t lo_pid; /* (c) Process Id of the lock owner */
199 int lo_sysid; /* (c) System Id of the lock owner */
200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201 };
202
203 LIST_HEAD(lock_owner_list, lock_owner);
204
205 static struct sx lf_lock_states_lock;
206 static struct lockf_list lf_lock_states; /* (S) */
207 static struct sx lf_lock_owners_lock;
208 static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209
210 /*
211 * Structures for deadlock detection.
212 *
213 * We have two types of directed graph, the first is the set of locks,
214 * both active and pending on a vnode. Within this graph, active locks
215 * are terminal nodes in the graph (i.e. have no out-going
216 * edges). Pending locks have out-going edges to each blocking active
217 * lock that prevents the lock from being granted and also to each
218 * older pending lock that would block them if it was active. The
219 * graph for each vnode is naturally acyclic; new edges are only ever
220 * added to or from new nodes (either new pending locks which only add
221 * out-going edges or new active locks which only add in-coming edges)
222 * therefore they cannot create loops in the lock graph.
223 *
224 * The second graph is a global graph of lock owners. Each lock owner
225 * is a vertex in that graph and an edge is added to the graph
226 * whenever an edge is added to a vnode graph, with end points
227 * corresponding to owner of the new pending lock and the owner of the
228 * lock upon which it waits. In order to prevent deadlock, we only add
229 * an edge to this graph if the new edge would not create a cycle.
230 *
231 * The lock owner graph is topologically sorted, i.e. if a node has
232 * any outgoing edges, then it has an order strictly less than any
233 * node to which it has an outgoing edge. We preserve this ordering
234 * (and detect cycles) on edge insertion using Algorithm PK from the
235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237 * No. 1.7)
238 */
239 struct owner_vertex;
240
241 struct owner_edge {
242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
244 int e_refs; /* (g) number of times added */
245 struct owner_vertex *e_from; /* (c) out-going from here */
246 struct owner_vertex *e_to; /* (c) in-coming to here */
247 };
248 LIST_HEAD(owner_edge_list, owner_edge);
249
250 struct owner_vertex {
251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252 uint32_t v_gen; /* (g) workspace for edge insertion */
253 int v_order; /* (g) order of vertex in graph */
254 struct owner_edge_list v_outedges;/* (g) list of out-edges */
255 struct owner_edge_list v_inedges; /* (g) list of in-edges */
256 struct lock_owner *v_owner; /* (c) corresponding lock owner */
257 };
258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
259
260 struct owner_graph {
261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262 int g_size; /* (g) number of vertices */
263 int g_space; /* (g) space allocated for vertices */
264 int *g_indexbuf; /* (g) workspace for loop detection */
265 uint32_t g_gen; /* (g) increment when re-ordering */
266 };
267
268 static struct sx lf_owner_graph_lock;
269 static struct owner_graph lf_owner_graph;
270
271 /*
272 * Initialise various structures and locks.
273 */
274 static void
275 lf_init(void *dummy)
276 {
277 int i;
278
279 sx_init(&lf_lock_states_lock, "lock states lock");
280 LIST_INIT(&lf_lock_states);
281
282 sx_init(&lf_lock_owners_lock, "lock owners lock");
283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284 LIST_INIT(&lf_lock_owners[i]);
285
286 sx_init(&lf_owner_graph_lock, "owner graph lock");
287 graph_init(&lf_owner_graph);
288 }
289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290
291 /*
292 * Generate a hash value for a lock owner.
293 */
294 static int
295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296 {
297 uint32_t h;
298
299 if (flags & F_REMOTE) {
300 h = HASHSTEP(0, fl->l_pid);
301 h = HASHSTEP(h, fl->l_sysid);
302 } else if (flags & F_FLOCK) {
303 h = ((uintptr_t) id) >> 7;
304 } else {
305 struct proc *p = (struct proc *) id;
306 h = HASHSTEP(0, p->p_pid);
307 h = HASHSTEP(h, 0);
308 }
309
310 return (h % LOCK_OWNER_HASH_SIZE);
311 }
312
313 /*
314 * Return true if a lock owner matches the details passed to
315 * lf_advlock.
316 */
317 static int
318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319 int flags)
320 {
321 if (flags & F_REMOTE) {
322 return lo->lo_pid == fl->l_pid
323 && lo->lo_sysid == fl->l_sysid;
324 } else {
325 return lo->lo_id == id;
326 }
327 }
328
329 static struct lockf_entry *
330 lf_alloc_lock(struct lock_owner *lo)
331 {
332 struct lockf_entry *lf;
333
334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335
336 #ifdef LOCKF_DEBUG
337 if (lockf_debug & 4)
338 printf("Allocated lock %p\n", lf);
339 #endif
340 if (lo) {
341 sx_xlock(&lf_lock_owners_lock);
342 lo->lo_refs++;
343 sx_xunlock(&lf_lock_owners_lock);
344 lf->lf_owner = lo;
345 }
346
347 return (lf);
348 }
349
350 static int
351 lf_free_lock(struct lockf_entry *lock)
352 {
353
354 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
355 if (--lock->lf_refs > 0)
356 return (0);
357 /*
358 * Adjust the lock_owner reference count and
359 * reclaim the entry if this is the last lock
360 * for that owner.
361 */
362 struct lock_owner *lo = lock->lf_owner;
363 if (lo) {
364 KASSERT(LIST_EMPTY(&lock->lf_outedges),
365 ("freeing lock with dependencies"));
366 KASSERT(LIST_EMPTY(&lock->lf_inedges),
367 ("freeing lock with dependants"));
368 sx_xlock(&lf_lock_owners_lock);
369 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
370 lo->lo_refs--;
371 if (lo->lo_refs == 0) {
372 #ifdef LOCKF_DEBUG
373 if (lockf_debug & 1)
374 printf("lf_free_lock: freeing lock owner %p\n",
375 lo);
376 #endif
377 if (lo->lo_vertex) {
378 sx_xlock(&lf_owner_graph_lock);
379 graph_free_vertex(&lf_owner_graph,
380 lo->lo_vertex);
381 sx_xunlock(&lf_owner_graph_lock);
382 }
383 LIST_REMOVE(lo, lo_link);
384 free(lo, M_LOCKF);
385 #ifdef LOCKF_DEBUG
386 if (lockf_debug & 4)
387 printf("Freed lock owner %p\n", lo);
388 #endif
389 }
390 sx_unlock(&lf_lock_owners_lock);
391 }
392 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
393 vrele(lock->lf_vnode);
394 lock->lf_vnode = NULL;
395 }
396 #ifdef LOCKF_DEBUG
397 if (lockf_debug & 4)
398 printf("Freed lock %p\n", lock);
399 #endif
400 free(lock, M_LOCKF);
401 return (1);
402 }
403
404 /*
405 * Advisory record locking support
406 */
407 int
408 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
409 u_quad_t size)
410 {
411 struct lockf *state, *freestate = NULL;
412 struct flock *fl = ap->a_fl;
413 struct lockf_entry *lock;
414 struct vnode *vp = ap->a_vp;
415 caddr_t id = ap->a_id;
416 int flags = ap->a_flags;
417 int hash;
418 struct lock_owner *lo;
419 off_t start, end, oadd;
420 int error;
421
422 /*
423 * Handle the F_UNLKSYS case first - no need to mess about
424 * creating a lock owner for this one.
425 */
426 if (ap->a_op == F_UNLCKSYS) {
427 lf_clearremotesys(fl->l_sysid);
428 return (0);
429 }
430
431 /*
432 * Convert the flock structure into a start and end.
433 */
434 switch (fl->l_whence) {
435
436 case SEEK_SET:
437 case SEEK_CUR:
438 /*
439 * Caller is responsible for adding any necessary offset
440 * when SEEK_CUR is used.
441 */
442 start = fl->l_start;
443 break;
444
445 case SEEK_END:
446 if (size > OFF_MAX ||
447 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
448 return (EOVERFLOW);
449 start = size + fl->l_start;
450 break;
451
452 default:
453 return (EINVAL);
454 }
455 if (start < 0)
456 return (EINVAL);
457 if (fl->l_len < 0) {
458 if (start == 0)
459 return (EINVAL);
460 end = start - 1;
461 start += fl->l_len;
462 if (start < 0)
463 return (EINVAL);
464 } else if (fl->l_len == 0) {
465 end = OFF_MAX;
466 } else {
467 oadd = fl->l_len - 1;
468 if (oadd > OFF_MAX - start)
469 return (EOVERFLOW);
470 end = start + oadd;
471 }
472
473 retry_setlock:
474
475 /*
476 * Avoid the common case of unlocking when inode has no locks.
477 */
478 VI_LOCK(vp);
479 if ((*statep) == NULL) {
480 if (ap->a_op != F_SETLK) {
481 fl->l_type = F_UNLCK;
482 VI_UNLOCK(vp);
483 return (0);
484 }
485 }
486 VI_UNLOCK(vp);
487
488 /*
489 * Map our arguments to an existing lock owner or create one
490 * if this is the first time we have seen this owner.
491 */
492 hash = lf_hash_owner(id, fl, flags);
493 sx_xlock(&lf_lock_owners_lock);
494 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
495 if (lf_owner_matches(lo, id, fl, flags))
496 break;
497 if (!lo) {
498 /*
499 * We initialise the lock with a reference
500 * count which matches the new lockf_entry
501 * structure created below.
502 */
503 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
504 M_WAITOK|M_ZERO);
505 #ifdef LOCKF_DEBUG
506 if (lockf_debug & 4)
507 printf("Allocated lock owner %p\n", lo);
508 #endif
509
510 lo->lo_refs = 1;
511 lo->lo_flags = flags;
512 lo->lo_id = id;
513 if (flags & F_REMOTE) {
514 lo->lo_pid = fl->l_pid;
515 lo->lo_sysid = fl->l_sysid;
516 } else if (flags & F_FLOCK) {
517 lo->lo_pid = -1;
518 lo->lo_sysid = 0;
519 } else {
520 struct proc *p = (struct proc *) id;
521 lo->lo_pid = p->p_pid;
522 lo->lo_sysid = 0;
523 }
524 lo->lo_vertex = NULL;
525
526 #ifdef LOCKF_DEBUG
527 if (lockf_debug & 1) {
528 printf("lf_advlockasync: new lock owner %p ", lo);
529 lf_print_owner(lo);
530 printf("\n");
531 }
532 #endif
533
534 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
535 } else {
536 /*
537 * We have seen this lock owner before, increase its
538 * reference count to account for the new lockf_entry
539 * structure we create below.
540 */
541 lo->lo_refs++;
542 }
543 sx_xunlock(&lf_lock_owners_lock);
544
545 /*
546 * Create the lockf structure. We initialise the lf_owner
547 * field here instead of in lf_alloc_lock() to avoid paying
548 * the lf_lock_owners_lock tax twice.
549 */
550 lock = lf_alloc_lock(NULL);
551 lock->lf_refs = 1;
552 lock->lf_start = start;
553 lock->lf_end = end;
554 lock->lf_owner = lo;
555 lock->lf_vnode = vp;
556 if (flags & F_REMOTE) {
557 /*
558 * For remote locks, the caller may release its ref to
559 * the vnode at any time - we have to ref it here to
560 * prevent it from being recycled unexpectedly.
561 */
562 vref(vp);
563 }
564
565 /*
566 * XXX The problem is that VTOI is ufs specific, so it will
567 * break LOCKF_DEBUG for all other FS's other than UFS because
568 * it casts the vnode->data ptr to struct inode *.
569 */
570 /* lock->lf_inode = VTOI(ap->a_vp); */
571 lock->lf_inode = (struct inode *)0;
572 lock->lf_type = fl->l_type;
573 LIST_INIT(&lock->lf_outedges);
574 LIST_INIT(&lock->lf_inedges);
575 lock->lf_async_task = ap->a_task;
576 lock->lf_flags = ap->a_flags;
577
578 /*
579 * Do the requested operation. First find our state structure
580 * and create a new one if necessary - the caller's *statep
581 * variable and the state's ls_threads count is protected by
582 * the vnode interlock.
583 */
584 VI_LOCK(vp);
585 if (vp->v_iflag & VI_DOOMED) {
586 VI_UNLOCK(vp);
587 lf_free_lock(lock);
588 return (ENOENT);
589 }
590
591 /*
592 * Allocate a state structure if necessary.
593 */
594 state = *statep;
595 if (state == NULL) {
596 struct lockf *ls;
597
598 VI_UNLOCK(vp);
599
600 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
601 sx_init(&ls->ls_lock, "ls_lock");
602 LIST_INIT(&ls->ls_active);
603 LIST_INIT(&ls->ls_pending);
604 ls->ls_threads = 1;
605
606 sx_xlock(&lf_lock_states_lock);
607 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
608 sx_xunlock(&lf_lock_states_lock);
609
610 /*
611 * Cope if we lost a race with some other thread while
612 * trying to allocate memory.
613 */
614 VI_LOCK(vp);
615 if (vp->v_iflag & VI_DOOMED) {
616 VI_UNLOCK(vp);
617 sx_xlock(&lf_lock_states_lock);
618 LIST_REMOVE(ls, ls_link);
619 sx_xunlock(&lf_lock_states_lock);
620 sx_destroy(&ls->ls_lock);
621 free(ls, M_LOCKF);
622 lf_free_lock(lock);
623 return (ENOENT);
624 }
625 if ((*statep) == NULL) {
626 state = *statep = ls;
627 VI_UNLOCK(vp);
628 } else {
629 state = *statep;
630 state->ls_threads++;
631 VI_UNLOCK(vp);
632
633 sx_xlock(&lf_lock_states_lock);
634 LIST_REMOVE(ls, ls_link);
635 sx_xunlock(&lf_lock_states_lock);
636 sx_destroy(&ls->ls_lock);
637 free(ls, M_LOCKF);
638 }
639 } else {
640 state->ls_threads++;
641 VI_UNLOCK(vp);
642 }
643
644 sx_xlock(&state->ls_lock);
645 /*
646 * Recheck the doomed vnode after state->ls_lock is
647 * locked. lf_purgelocks() requires that no new threads add
648 * pending locks when vnode is marked by VI_DOOMED flag.
649 */
650 VI_LOCK(vp);
651 if (vp->v_iflag & VI_DOOMED) {
652 state->ls_threads--;
653 wakeup(state);
654 VI_UNLOCK(vp);
655 sx_xunlock(&state->ls_lock);
656 lf_free_lock(lock);
657 return (ENOENT);
658 }
659 VI_UNLOCK(vp);
660
661 switch (ap->a_op) {
662 case F_SETLK:
663 error = lf_setlock(state, lock, vp, ap->a_cookiep);
664 break;
665
666 case F_UNLCK:
667 error = lf_clearlock(state, lock);
668 lf_free_lock(lock);
669 break;
670
671 case F_GETLK:
672 error = lf_getlock(state, lock, fl);
673 lf_free_lock(lock);
674 break;
675
676 case F_CANCEL:
677 if (ap->a_cookiep)
678 error = lf_cancel(state, lock, *ap->a_cookiep);
679 else
680 error = EINVAL;
681 lf_free_lock(lock);
682 break;
683
684 default:
685 lf_free_lock(lock);
686 error = EINVAL;
687 break;
688 }
689
690 #ifdef INVARIANTS
691 /*
692 * Check for some can't happen stuff. In this case, the active
693 * lock list becoming disordered or containing mutually
694 * blocking locks. We also check the pending list for locks
695 * which should be active (i.e. have no out-going edges).
696 */
697 LIST_FOREACH(lock, &state->ls_active, lf_link) {
698 struct lockf_entry *lf;
699 if (LIST_NEXT(lock, lf_link))
700 KASSERT((lock->lf_start
701 <= LIST_NEXT(lock, lf_link)->lf_start),
702 ("locks disordered"));
703 LIST_FOREACH(lf, &state->ls_active, lf_link) {
704 if (lock == lf)
705 break;
706 KASSERT(!lf_blocks(lock, lf),
707 ("two conflicting active locks"));
708 if (lock->lf_owner == lf->lf_owner)
709 KASSERT(!lf_overlaps(lock, lf),
710 ("two overlapping locks from same owner"));
711 }
712 }
713 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
714 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
715 ("pending lock which should be active"));
716 }
717 #endif
718 sx_xunlock(&state->ls_lock);
719
720 /*
721 * If we have removed the last active lock on the vnode and
722 * this is the last thread that was in-progress, we can free
723 * the state structure. We update the caller's pointer inside
724 * the vnode interlock but call free outside.
725 *
726 * XXX alternatively, keep the state structure around until
727 * the filesystem recycles - requires a callback from the
728 * filesystem.
729 */
730 VI_LOCK(vp);
731
732 state->ls_threads--;
733 wakeup(state);
734 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
735 KASSERT(LIST_EMPTY(&state->ls_pending),
736 ("freeing state with pending locks"));
737 freestate = state;
738 *statep = NULL;
739 }
740
741 VI_UNLOCK(vp);
742
743 if (freestate != NULL) {
744 sx_xlock(&lf_lock_states_lock);
745 LIST_REMOVE(freestate, ls_link);
746 sx_xunlock(&lf_lock_states_lock);
747 sx_destroy(&freestate->ls_lock);
748 free(freestate, M_LOCKF);
749 freestate = NULL;
750 }
751
752 if (error == EDOOFUS) {
753 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
754 goto retry_setlock;
755 }
756 return (error);
757 }
758
759 int
760 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
761 {
762 struct vop_advlockasync_args a;
763
764 a.a_vp = ap->a_vp;
765 a.a_id = ap->a_id;
766 a.a_op = ap->a_op;
767 a.a_fl = ap->a_fl;
768 a.a_flags = ap->a_flags;
769 a.a_task = NULL;
770 a.a_cookiep = NULL;
771
772 return (lf_advlockasync(&a, statep, size));
773 }
774
775 void
776 lf_purgelocks(struct vnode *vp, struct lockf **statep)
777 {
778 struct lockf *state;
779 struct lockf_entry *lock, *nlock;
780
781 /*
782 * For this to work correctly, the caller must ensure that no
783 * other threads enter the locking system for this vnode,
784 * e.g. by checking VI_DOOMED. We wake up any threads that are
785 * sleeping waiting for locks on this vnode and then free all
786 * the remaining locks.
787 */
788 VI_LOCK(vp);
789 KASSERT(vp->v_iflag & VI_DOOMED,
790 ("lf_purgelocks: vp %p has not vgone yet", vp));
791 state = *statep;
792 if (state) {
793 *statep = NULL;
794 state->ls_threads++;
795 VI_UNLOCK(vp);
796
797 sx_xlock(&state->ls_lock);
798 sx_xlock(&lf_owner_graph_lock);
799 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
800 LIST_REMOVE(lock, lf_link);
801 lf_remove_outgoing(lock);
802 lf_remove_incoming(lock);
803
804 /*
805 * If its an async lock, we can just free it
806 * here, otherwise we let the sleeping thread
807 * free it.
808 */
809 if (lock->lf_async_task) {
810 lf_free_lock(lock);
811 } else {
812 lock->lf_flags |= F_INTR;
813 wakeup(lock);
814 }
815 }
816 sx_xunlock(&lf_owner_graph_lock);
817 sx_xunlock(&state->ls_lock);
818
819 /*
820 * Wait for all other threads, sleeping and otherwise
821 * to leave.
822 */
823 VI_LOCK(vp);
824 while (state->ls_threads > 1)
825 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
826 VI_UNLOCK(vp);
827
828 /*
829 * We can just free all the active locks since they
830 * will have no dependencies (we removed them all
831 * above). We don't need to bother locking since we
832 * are the last thread using this state structure.
833 */
834 KASSERT(LIST_EMPTY(&state->ls_pending),
835 ("lock pending for %p", state));
836 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
837 LIST_REMOVE(lock, lf_link);
838 lf_free_lock(lock);
839 }
840 sx_xlock(&lf_lock_states_lock);
841 LIST_REMOVE(state, ls_link);
842 sx_xunlock(&lf_lock_states_lock);
843 sx_destroy(&state->ls_lock);
844 free(state, M_LOCKF);
845 } else {
846 VI_UNLOCK(vp);
847 }
848 }
849
850 /*
851 * Return non-zero if locks 'x' and 'y' overlap.
852 */
853 static int
854 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
855 {
856
857 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
858 }
859
860 /*
861 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
862 */
863 static int
864 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
865 {
866
867 return x->lf_owner != y->lf_owner
868 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
869 && lf_overlaps(x, y);
870 }
871
872 /*
873 * Allocate a lock edge from the free list
874 */
875 static struct lockf_edge *
876 lf_alloc_edge(void)
877 {
878
879 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
880 }
881
882 /*
883 * Free a lock edge.
884 */
885 static void
886 lf_free_edge(struct lockf_edge *e)
887 {
888
889 free(e, M_LOCKF);
890 }
891
892
893 /*
894 * Ensure that the lock's owner has a corresponding vertex in the
895 * owner graph.
896 */
897 static void
898 lf_alloc_vertex(struct lockf_entry *lock)
899 {
900 struct owner_graph *g = &lf_owner_graph;
901
902 if (!lock->lf_owner->lo_vertex)
903 lock->lf_owner->lo_vertex =
904 graph_alloc_vertex(g, lock->lf_owner);
905 }
906
907 /*
908 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
909 * the new edge would cause a cycle in the owner graph.
910 */
911 static int
912 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
913 {
914 struct owner_graph *g = &lf_owner_graph;
915 struct lockf_edge *e;
916 int error;
917
918 #ifdef INVARIANTS
919 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
920 KASSERT(e->le_to != y, ("adding lock edge twice"));
921 #endif
922
923 /*
924 * Make sure the two owners have entries in the owner graph.
925 */
926 lf_alloc_vertex(x);
927 lf_alloc_vertex(y);
928
929 error = graph_add_edge(g, x->lf_owner->lo_vertex,
930 y->lf_owner->lo_vertex);
931 if (error)
932 return (error);
933
934 e = lf_alloc_edge();
935 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
936 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
937 e->le_from = x;
938 e->le_to = y;
939
940 return (0);
941 }
942
943 /*
944 * Remove an edge from the lock graph.
945 */
946 static void
947 lf_remove_edge(struct lockf_edge *e)
948 {
949 struct owner_graph *g = &lf_owner_graph;
950 struct lockf_entry *x = e->le_from;
951 struct lockf_entry *y = e->le_to;
952
953 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
954 LIST_REMOVE(e, le_outlink);
955 LIST_REMOVE(e, le_inlink);
956 e->le_from = NULL;
957 e->le_to = NULL;
958 lf_free_edge(e);
959 }
960
961 /*
962 * Remove all out-going edges from lock x.
963 */
964 static void
965 lf_remove_outgoing(struct lockf_entry *x)
966 {
967 struct lockf_edge *e;
968
969 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
970 lf_remove_edge(e);
971 }
972 }
973
974 /*
975 * Remove all in-coming edges from lock x.
976 */
977 static void
978 lf_remove_incoming(struct lockf_entry *x)
979 {
980 struct lockf_edge *e;
981
982 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
983 lf_remove_edge(e);
984 }
985 }
986
987 /*
988 * Walk the list of locks for the file and create an out-going edge
989 * from lock to each blocking lock.
990 */
991 static int
992 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
993 {
994 struct lockf_entry *overlap;
995 int error;
996
997 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
998 /*
999 * We may assume that the active list is sorted by
1000 * lf_start.
1001 */
1002 if (overlap->lf_start > lock->lf_end)
1003 break;
1004 if (!lf_blocks(lock, overlap))
1005 continue;
1006
1007 /*
1008 * We've found a blocking lock. Add the corresponding
1009 * edge to the graphs and see if it would cause a
1010 * deadlock.
1011 */
1012 error = lf_add_edge(lock, overlap);
1013
1014 /*
1015 * The only error that lf_add_edge returns is EDEADLK.
1016 * Remove any edges we added and return the error.
1017 */
1018 if (error) {
1019 lf_remove_outgoing(lock);
1020 return (error);
1021 }
1022 }
1023
1024 /*
1025 * We also need to add edges to sleeping locks that block
1026 * us. This ensures that lf_wakeup_lock cannot grant two
1027 * mutually blocking locks simultaneously and also enforces a
1028 * 'first come, first served' fairness model. Note that this
1029 * only happens if we are blocked by at least one active lock
1030 * due to the call to lf_getblock in lf_setlock below.
1031 */
1032 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1033 if (!lf_blocks(lock, overlap))
1034 continue;
1035 /*
1036 * We've found a blocking lock. Add the corresponding
1037 * edge to the graphs and see if it would cause a
1038 * deadlock.
1039 */
1040 error = lf_add_edge(lock, overlap);
1041
1042 /*
1043 * The only error that lf_add_edge returns is EDEADLK.
1044 * Remove any edges we added and return the error.
1045 */
1046 if (error) {
1047 lf_remove_outgoing(lock);
1048 return (error);
1049 }
1050 }
1051
1052 return (0);
1053 }
1054
1055 /*
1056 * Walk the list of pending locks for the file and create an in-coming
1057 * edge from lock to each blocking lock.
1058 */
1059 static int
1060 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1061 {
1062 struct lockf_entry *overlap;
1063 int error;
1064
1065 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1066 if (!lf_blocks(lock, overlap))
1067 continue;
1068
1069 /*
1070 * We've found a blocking lock. Add the corresponding
1071 * edge to the graphs and see if it would cause a
1072 * deadlock.
1073 */
1074 error = lf_add_edge(overlap, lock);
1075
1076 /*
1077 * The only error that lf_add_edge returns is EDEADLK.
1078 * Remove any edges we added and return the error.
1079 */
1080 if (error) {
1081 lf_remove_incoming(lock);
1082 return (error);
1083 }
1084 }
1085 return (0);
1086 }
1087
1088 /*
1089 * Insert lock into the active list, keeping list entries ordered by
1090 * increasing values of lf_start.
1091 */
1092 static void
1093 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1094 {
1095 struct lockf_entry *lf, *lfprev;
1096
1097 if (LIST_EMPTY(&state->ls_active)) {
1098 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1099 return;
1100 }
1101
1102 lfprev = NULL;
1103 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1104 if (lf->lf_start > lock->lf_start) {
1105 LIST_INSERT_BEFORE(lf, lock, lf_link);
1106 return;
1107 }
1108 lfprev = lf;
1109 }
1110 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1111 }
1112
1113 /*
1114 * Wake up a sleeping lock and remove it from the pending list now
1115 * that all its dependencies have been resolved. The caller should
1116 * arrange for the lock to be added to the active list, adjusting any
1117 * existing locks for the same owner as needed.
1118 */
1119 static void
1120 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1121 {
1122
1123 /*
1124 * Remove from ls_pending list and wake up the caller
1125 * or start the async notification, as appropriate.
1126 */
1127 LIST_REMOVE(wakelock, lf_link);
1128 #ifdef LOCKF_DEBUG
1129 if (lockf_debug & 1)
1130 lf_print("lf_wakeup_lock: awakening", wakelock);
1131 #endif /* LOCKF_DEBUG */
1132 if (wakelock->lf_async_task) {
1133 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1134 } else {
1135 wakeup(wakelock);
1136 }
1137 }
1138
1139 /*
1140 * Re-check all dependent locks and remove edges to locks that we no
1141 * longer block. If 'all' is non-zero, the lock has been removed and
1142 * we must remove all the dependencies, otherwise it has simply been
1143 * reduced but remains active. Any pending locks which have been been
1144 * unblocked are added to 'granted'
1145 */
1146 static void
1147 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1148 struct lockf_entry_list *granted)
1149 {
1150 struct lockf_edge *e, *ne;
1151 struct lockf_entry *deplock;
1152
1153 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1154 deplock = e->le_from;
1155 if (all || !lf_blocks(lock, deplock)) {
1156 sx_xlock(&lf_owner_graph_lock);
1157 lf_remove_edge(e);
1158 sx_xunlock(&lf_owner_graph_lock);
1159 if (LIST_EMPTY(&deplock->lf_outedges)) {
1160 lf_wakeup_lock(state, deplock);
1161 LIST_INSERT_HEAD(granted, deplock, lf_link);
1162 }
1163 }
1164 }
1165 }
1166
1167 /*
1168 * Set the start of an existing active lock, updating dependencies and
1169 * adding any newly woken locks to 'granted'.
1170 */
1171 static void
1172 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1173 struct lockf_entry_list *granted)
1174 {
1175
1176 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1177 lock->lf_start = new_start;
1178 LIST_REMOVE(lock, lf_link);
1179 lf_insert_lock(state, lock);
1180 lf_update_dependancies(state, lock, FALSE, granted);
1181 }
1182
1183 /*
1184 * Set the end of an existing active lock, updating dependencies and
1185 * adding any newly woken locks to 'granted'.
1186 */
1187 static void
1188 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1189 struct lockf_entry_list *granted)
1190 {
1191
1192 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1193 lock->lf_end = new_end;
1194 lf_update_dependancies(state, lock, FALSE, granted);
1195 }
1196
1197 /*
1198 * Add a lock to the active list, updating or removing any current
1199 * locks owned by the same owner and processing any pending locks that
1200 * become unblocked as a result. This code is also used for unlock
1201 * since the logic for updating existing locks is identical.
1202 *
1203 * As a result of processing the new lock, we may unblock existing
1204 * pending locks as a result of downgrading/unlocking. We simply
1205 * activate the newly granted locks by looping.
1206 *
1207 * Since the new lock already has its dependencies set up, we always
1208 * add it to the list (unless its an unlock request). This may
1209 * fragment the lock list in some pathological cases but its probably
1210 * not a real problem.
1211 */
1212 static void
1213 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1214 {
1215 struct lockf_entry *overlap, *lf;
1216 struct lockf_entry_list granted;
1217 int ovcase;
1218
1219 LIST_INIT(&granted);
1220 LIST_INSERT_HEAD(&granted, lock, lf_link);
1221
1222 while (!LIST_EMPTY(&granted)) {
1223 lock = LIST_FIRST(&granted);
1224 LIST_REMOVE(lock, lf_link);
1225
1226 /*
1227 * Skip over locks owned by other processes. Handle
1228 * any locks that overlap and are owned by ourselves.
1229 */
1230 overlap = LIST_FIRST(&state->ls_active);
1231 for (;;) {
1232 ovcase = lf_findoverlap(&overlap, lock, SELF);
1233
1234 #ifdef LOCKF_DEBUG
1235 if (ovcase && (lockf_debug & 2)) {
1236 printf("lf_setlock: overlap %d", ovcase);
1237 lf_print("", overlap);
1238 }
1239 #endif
1240 /*
1241 * Six cases:
1242 * 0) no overlap
1243 * 1) overlap == lock
1244 * 2) overlap contains lock
1245 * 3) lock contains overlap
1246 * 4) overlap starts before lock
1247 * 5) overlap ends after lock
1248 */
1249 switch (ovcase) {
1250 case 0: /* no overlap */
1251 break;
1252
1253 case 1: /* overlap == lock */
1254 /*
1255 * We have already setup the
1256 * dependants for the new lock, taking
1257 * into account a possible downgrade
1258 * or unlock. Remove the old lock.
1259 */
1260 LIST_REMOVE(overlap, lf_link);
1261 lf_update_dependancies(state, overlap, TRUE,
1262 &granted);
1263 lf_free_lock(overlap);
1264 break;
1265
1266 case 2: /* overlap contains lock */
1267 /*
1268 * Just split the existing lock.
1269 */
1270 lf_split(state, overlap, lock, &granted);
1271 break;
1272
1273 case 3: /* lock contains overlap */
1274 /*
1275 * Delete the overlap and advance to
1276 * the next entry in the list.
1277 */
1278 lf = LIST_NEXT(overlap, lf_link);
1279 LIST_REMOVE(overlap, lf_link);
1280 lf_update_dependancies(state, overlap, TRUE,
1281 &granted);
1282 lf_free_lock(overlap);
1283 overlap = lf;
1284 continue;
1285
1286 case 4: /* overlap starts before lock */
1287 /*
1288 * Just update the overlap end and
1289 * move on.
1290 */
1291 lf_set_end(state, overlap, lock->lf_start - 1,
1292 &granted);
1293 overlap = LIST_NEXT(overlap, lf_link);
1294 continue;
1295
1296 case 5: /* overlap ends after lock */
1297 /*
1298 * Change the start of overlap and
1299 * re-insert.
1300 */
1301 lf_set_start(state, overlap, lock->lf_end + 1,
1302 &granted);
1303 break;
1304 }
1305 break;
1306 }
1307 #ifdef LOCKF_DEBUG
1308 if (lockf_debug & 1) {
1309 if (lock->lf_type != F_UNLCK)
1310 lf_print("lf_activate_lock: activated", lock);
1311 else
1312 lf_print("lf_activate_lock: unlocked", lock);
1313 lf_printlist("lf_activate_lock", lock);
1314 }
1315 #endif /* LOCKF_DEBUG */
1316 if (lock->lf_type != F_UNLCK)
1317 lf_insert_lock(state, lock);
1318 }
1319 }
1320
1321 /*
1322 * Cancel a pending lock request, either as a result of a signal or a
1323 * cancel request for an async lock.
1324 */
1325 static void
1326 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1327 {
1328 struct lockf_entry_list granted;
1329
1330 /*
1331 * Note it is theoretically possible that cancelling this lock
1332 * may allow some other pending lock to become
1333 * active. Consider this case:
1334 *
1335 * Owner Action Result Dependencies
1336 *
1337 * A: lock [0..0] succeeds
1338 * B: lock [2..2] succeeds
1339 * C: lock [1..2] blocked C->B
1340 * D: lock [0..1] blocked C->B,D->A,D->C
1341 * A: unlock [0..0] C->B,D->C
1342 * C: cancel [1..2]
1343 */
1344
1345 LIST_REMOVE(lock, lf_link);
1346
1347 /*
1348 * Removing out-going edges is simple.
1349 */
1350 sx_xlock(&lf_owner_graph_lock);
1351 lf_remove_outgoing(lock);
1352 sx_xunlock(&lf_owner_graph_lock);
1353
1354 /*
1355 * Removing in-coming edges may allow some other lock to
1356 * become active - we use lf_update_dependancies to figure
1357 * this out.
1358 */
1359 LIST_INIT(&granted);
1360 lf_update_dependancies(state, lock, TRUE, &granted);
1361 lf_free_lock(lock);
1362
1363 /*
1364 * Feed any newly active locks to lf_activate_lock.
1365 */
1366 while (!LIST_EMPTY(&granted)) {
1367 lock = LIST_FIRST(&granted);
1368 LIST_REMOVE(lock, lf_link);
1369 lf_activate_lock(state, lock);
1370 }
1371 }
1372
1373 /*
1374 * Set a byte-range lock.
1375 */
1376 static int
1377 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1378 void **cookiep)
1379 {
1380 static char lockstr[] = "lockf";
1381 int error, priority, stops_deferred;
1382
1383 #ifdef LOCKF_DEBUG
1384 if (lockf_debug & 1)
1385 lf_print("lf_setlock", lock);
1386 #endif /* LOCKF_DEBUG */
1387
1388 /*
1389 * Set the priority
1390 */
1391 priority = PLOCK;
1392 if (lock->lf_type == F_WRLCK)
1393 priority += 4;
1394 if (!(lock->lf_flags & F_NOINTR))
1395 priority |= PCATCH;
1396 /*
1397 * Scan lock list for this file looking for locks that would block us.
1398 */
1399 if (lf_getblock(state, lock)) {
1400 /*
1401 * Free the structure and return if nonblocking.
1402 */
1403 if ((lock->lf_flags & F_WAIT) == 0
1404 && lock->lf_async_task == NULL) {
1405 lf_free_lock(lock);
1406 error = EAGAIN;
1407 goto out;
1408 }
1409
1410 /*
1411 * For flock type locks, we must first remove
1412 * any shared locks that we hold before we sleep
1413 * waiting for an exclusive lock.
1414 */
1415 if ((lock->lf_flags & F_FLOCK) &&
1416 lock->lf_type == F_WRLCK) {
1417 lock->lf_type = F_UNLCK;
1418 lf_activate_lock(state, lock);
1419 lock->lf_type = F_WRLCK;
1420 }
1421
1422 /*
1423 * We are blocked. Create edges to each blocking lock,
1424 * checking for deadlock using the owner graph. For
1425 * simplicity, we run deadlock detection for all
1426 * locks, posix and otherwise.
1427 */
1428 sx_xlock(&lf_owner_graph_lock);
1429 error = lf_add_outgoing(state, lock);
1430 sx_xunlock(&lf_owner_graph_lock);
1431
1432 if (error) {
1433 #ifdef LOCKF_DEBUG
1434 if (lockf_debug & 1)
1435 lf_print("lf_setlock: deadlock", lock);
1436 #endif
1437 lf_free_lock(lock);
1438 goto out;
1439 }
1440
1441 /*
1442 * We have added edges to everything that blocks
1443 * us. Sleep until they all go away.
1444 */
1445 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1446 #ifdef LOCKF_DEBUG
1447 if (lockf_debug & 1) {
1448 struct lockf_edge *e;
1449 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1450 lf_print("lf_setlock: blocking on", e->le_to);
1451 lf_printlist("lf_setlock", e->le_to);
1452 }
1453 }
1454 #endif /* LOCKF_DEBUG */
1455
1456 if ((lock->lf_flags & F_WAIT) == 0) {
1457 /*
1458 * The caller requested async notification -
1459 * this callback happens when the blocking
1460 * lock is released, allowing the caller to
1461 * make another attempt to take the lock.
1462 */
1463 *cookiep = (void *) lock;
1464 error = EINPROGRESS;
1465 goto out;
1466 }
1467
1468 lock->lf_refs++;
1469 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1470 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1471 sigallowstop(stops_deferred);
1472 if (lf_free_lock(lock)) {
1473 error = EDOOFUS;
1474 goto out;
1475 }
1476
1477 /*
1478 * We may have been awakened by a signal and/or by a
1479 * debugger continuing us (in which cases we must
1480 * remove our lock graph edges) and/or by another
1481 * process releasing a lock (in which case our edges
1482 * have already been removed and we have been moved to
1483 * the active list). We may also have been woken by
1484 * lf_purgelocks which we report to the caller as
1485 * EINTR. In that case, lf_purgelocks will have
1486 * removed our lock graph edges.
1487 *
1488 * Note that it is possible to receive a signal after
1489 * we were successfully woken (and moved to the active
1490 * list) but before we resumed execution. In this
1491 * case, our lf_outedges list will be clear. We
1492 * pretend there was no error.
1493 *
1494 * Note also, if we have been sleeping long enough, we
1495 * may now have incoming edges from some newer lock
1496 * which is waiting behind us in the queue.
1497 */
1498 if (lock->lf_flags & F_INTR) {
1499 error = EINTR;
1500 lf_free_lock(lock);
1501 goto out;
1502 }
1503 if (LIST_EMPTY(&lock->lf_outedges)) {
1504 error = 0;
1505 } else {
1506 lf_cancel_lock(state, lock);
1507 goto out;
1508 }
1509 #ifdef LOCKF_DEBUG
1510 if (lockf_debug & 1) {
1511 lf_print("lf_setlock: granted", lock);
1512 }
1513 #endif
1514 goto out;
1515 }
1516 /*
1517 * It looks like we are going to grant the lock. First add
1518 * edges from any currently pending lock that the new lock
1519 * would block.
1520 */
1521 sx_xlock(&lf_owner_graph_lock);
1522 error = lf_add_incoming(state, lock);
1523 sx_xunlock(&lf_owner_graph_lock);
1524 if (error) {
1525 #ifdef LOCKF_DEBUG
1526 if (lockf_debug & 1)
1527 lf_print("lf_setlock: deadlock", lock);
1528 #endif
1529 lf_free_lock(lock);
1530 goto out;
1531 }
1532
1533 /*
1534 * No blocks!! Add the lock. Note that we will
1535 * downgrade or upgrade any overlapping locks this
1536 * process already owns.
1537 */
1538 lf_activate_lock(state, lock);
1539 error = 0;
1540 out:
1541 return (error);
1542 }
1543
1544 /*
1545 * Remove a byte-range lock on an inode.
1546 *
1547 * Generally, find the lock (or an overlap to that lock)
1548 * and remove it (or shrink it), then wakeup anyone we can.
1549 */
1550 static int
1551 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1552 {
1553 struct lockf_entry *overlap;
1554
1555 overlap = LIST_FIRST(&state->ls_active);
1556
1557 if (overlap == NOLOCKF)
1558 return (0);
1559 #ifdef LOCKF_DEBUG
1560 if (unlock->lf_type != F_UNLCK)
1561 panic("lf_clearlock: bad type");
1562 if (lockf_debug & 1)
1563 lf_print("lf_clearlock", unlock);
1564 #endif /* LOCKF_DEBUG */
1565
1566 lf_activate_lock(state, unlock);
1567
1568 return (0);
1569 }
1570
1571 /*
1572 * Check whether there is a blocking lock, and if so return its
1573 * details in '*fl'.
1574 */
1575 static int
1576 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1577 {
1578 struct lockf_entry *block;
1579
1580 #ifdef LOCKF_DEBUG
1581 if (lockf_debug & 1)
1582 lf_print("lf_getlock", lock);
1583 #endif /* LOCKF_DEBUG */
1584
1585 if ((block = lf_getblock(state, lock))) {
1586 fl->l_type = block->lf_type;
1587 fl->l_whence = SEEK_SET;
1588 fl->l_start = block->lf_start;
1589 if (block->lf_end == OFF_MAX)
1590 fl->l_len = 0;
1591 else
1592 fl->l_len = block->lf_end - block->lf_start + 1;
1593 fl->l_pid = block->lf_owner->lo_pid;
1594 fl->l_sysid = block->lf_owner->lo_sysid;
1595 } else {
1596 fl->l_type = F_UNLCK;
1597 }
1598 return (0);
1599 }
1600
1601 /*
1602 * Cancel an async lock request.
1603 */
1604 static int
1605 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1606 {
1607 struct lockf_entry *reallock;
1608
1609 /*
1610 * We need to match this request with an existing lock
1611 * request.
1612 */
1613 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1614 if ((void *) reallock == cookie) {
1615 /*
1616 * Double-check that this lock looks right
1617 * (maybe use a rolling ID for the cancel
1618 * cookie instead?)
1619 */
1620 if (!(reallock->lf_vnode == lock->lf_vnode
1621 && reallock->lf_start == lock->lf_start
1622 && reallock->lf_end == lock->lf_end)) {
1623 return (ENOENT);
1624 }
1625
1626 /*
1627 * Make sure this lock was async and then just
1628 * remove it from its wait lists.
1629 */
1630 if (!reallock->lf_async_task) {
1631 return (ENOENT);
1632 }
1633
1634 /*
1635 * Note that since any other thread must take
1636 * state->ls_lock before it can possibly
1637 * trigger the async callback, we are safe
1638 * from a race with lf_wakeup_lock, i.e. we
1639 * can free the lock (actually our caller does
1640 * this).
1641 */
1642 lf_cancel_lock(state, reallock);
1643 return (0);
1644 }
1645 }
1646
1647 /*
1648 * We didn't find a matching lock - not much we can do here.
1649 */
1650 return (ENOENT);
1651 }
1652
1653 /*
1654 * Walk the list of locks for an inode and
1655 * return the first blocking lock.
1656 */
1657 static struct lockf_entry *
1658 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1659 {
1660 struct lockf_entry *overlap;
1661
1662 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1663 /*
1664 * We may assume that the active list is sorted by
1665 * lf_start.
1666 */
1667 if (overlap->lf_start > lock->lf_end)
1668 break;
1669 if (!lf_blocks(lock, overlap))
1670 continue;
1671 return (overlap);
1672 }
1673 return (NOLOCKF);
1674 }
1675
1676 /*
1677 * Walk the list of locks for an inode to find an overlapping lock (if
1678 * any) and return a classification of that overlap.
1679 *
1680 * Arguments:
1681 * *overlap The place in the lock list to start looking
1682 * lock The lock which is being tested
1683 * type Pass 'SELF' to test only locks with the same
1684 * owner as lock, or 'OTHER' to test only locks
1685 * with a different owner
1686 *
1687 * Returns one of six values:
1688 * 0) no overlap
1689 * 1) overlap == lock
1690 * 2) overlap contains lock
1691 * 3) lock contains overlap
1692 * 4) overlap starts before lock
1693 * 5) overlap ends after lock
1694 *
1695 * If there is an overlapping lock, '*overlap' is set to point at the
1696 * overlapping lock.
1697 *
1698 * NOTE: this returns only the FIRST overlapping lock. There
1699 * may be more than one.
1700 */
1701 static int
1702 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1703 {
1704 struct lockf_entry *lf;
1705 off_t start, end;
1706 int res;
1707
1708 if ((*overlap) == NOLOCKF) {
1709 return (0);
1710 }
1711 #ifdef LOCKF_DEBUG
1712 if (lockf_debug & 2)
1713 lf_print("lf_findoverlap: looking for overlap in", lock);
1714 #endif /* LOCKF_DEBUG */
1715 start = lock->lf_start;
1716 end = lock->lf_end;
1717 res = 0;
1718 while (*overlap) {
1719 lf = *overlap;
1720 if (lf->lf_start > end)
1721 break;
1722 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1723 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1724 *overlap = LIST_NEXT(lf, lf_link);
1725 continue;
1726 }
1727 #ifdef LOCKF_DEBUG
1728 if (lockf_debug & 2)
1729 lf_print("\tchecking", lf);
1730 #endif /* LOCKF_DEBUG */
1731 /*
1732 * OK, check for overlap
1733 *
1734 * Six cases:
1735 * 0) no overlap
1736 * 1) overlap == lock
1737 * 2) overlap contains lock
1738 * 3) lock contains overlap
1739 * 4) overlap starts before lock
1740 * 5) overlap ends after lock
1741 */
1742 if (start > lf->lf_end) {
1743 /* Case 0 */
1744 #ifdef LOCKF_DEBUG
1745 if (lockf_debug & 2)
1746 printf("no overlap\n");
1747 #endif /* LOCKF_DEBUG */
1748 *overlap = LIST_NEXT(lf, lf_link);
1749 continue;
1750 }
1751 if (lf->lf_start == start && lf->lf_end == end) {
1752 /* Case 1 */
1753 #ifdef LOCKF_DEBUG
1754 if (lockf_debug & 2)
1755 printf("overlap == lock\n");
1756 #endif /* LOCKF_DEBUG */
1757 res = 1;
1758 break;
1759 }
1760 if (lf->lf_start <= start && lf->lf_end >= end) {
1761 /* Case 2 */
1762 #ifdef LOCKF_DEBUG
1763 if (lockf_debug & 2)
1764 printf("overlap contains lock\n");
1765 #endif /* LOCKF_DEBUG */
1766 res = 2;
1767 break;
1768 }
1769 if (start <= lf->lf_start && end >= lf->lf_end) {
1770 /* Case 3 */
1771 #ifdef LOCKF_DEBUG
1772 if (lockf_debug & 2)
1773 printf("lock contains overlap\n");
1774 #endif /* LOCKF_DEBUG */
1775 res = 3;
1776 break;
1777 }
1778 if (lf->lf_start < start && lf->lf_end >= start) {
1779 /* Case 4 */
1780 #ifdef LOCKF_DEBUG
1781 if (lockf_debug & 2)
1782 printf("overlap starts before lock\n");
1783 #endif /* LOCKF_DEBUG */
1784 res = 4;
1785 break;
1786 }
1787 if (lf->lf_start > start && lf->lf_end > end) {
1788 /* Case 5 */
1789 #ifdef LOCKF_DEBUG
1790 if (lockf_debug & 2)
1791 printf("overlap ends after lock\n");
1792 #endif /* LOCKF_DEBUG */
1793 res = 5;
1794 break;
1795 }
1796 panic("lf_findoverlap: default");
1797 }
1798 return (res);
1799 }
1800
1801 /*
1802 * Split an the existing 'lock1', based on the extent of the lock
1803 * described by 'lock2'. The existing lock should cover 'lock2'
1804 * entirely.
1805 *
1806 * Any pending locks which have been been unblocked are added to
1807 * 'granted'
1808 */
1809 static void
1810 lf_split(struct lockf *state, struct lockf_entry *lock1,
1811 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1812 {
1813 struct lockf_entry *splitlock;
1814
1815 #ifdef LOCKF_DEBUG
1816 if (lockf_debug & 2) {
1817 lf_print("lf_split", lock1);
1818 lf_print("splitting from", lock2);
1819 }
1820 #endif /* LOCKF_DEBUG */
1821 /*
1822 * Check to see if we don't need to split at all.
1823 */
1824 if (lock1->lf_start == lock2->lf_start) {
1825 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1826 return;
1827 }
1828 if (lock1->lf_end == lock2->lf_end) {
1829 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1830 return;
1831 }
1832 /*
1833 * Make a new lock consisting of the last part of
1834 * the encompassing lock.
1835 */
1836 splitlock = lf_alloc_lock(lock1->lf_owner);
1837 memcpy(splitlock, lock1, sizeof *splitlock);
1838 splitlock->lf_refs = 1;
1839 if (splitlock->lf_flags & F_REMOTE)
1840 vref(splitlock->lf_vnode);
1841
1842 /*
1843 * This cannot cause a deadlock since any edges we would add
1844 * to splitlock already exist in lock1. We must be sure to add
1845 * necessary dependencies to splitlock before we reduce lock1
1846 * otherwise we may accidentally grant a pending lock that
1847 * was blocked by the tail end of lock1.
1848 */
1849 splitlock->lf_start = lock2->lf_end + 1;
1850 LIST_INIT(&splitlock->lf_outedges);
1851 LIST_INIT(&splitlock->lf_inedges);
1852 sx_xlock(&lf_owner_graph_lock);
1853 lf_add_incoming(state, splitlock);
1854 sx_xunlock(&lf_owner_graph_lock);
1855
1856 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1857
1858 /*
1859 * OK, now link it in
1860 */
1861 lf_insert_lock(state, splitlock);
1862 }
1863
1864 struct lockdesc {
1865 STAILQ_ENTRY(lockdesc) link;
1866 struct vnode *vp;
1867 struct flock fl;
1868 };
1869 STAILQ_HEAD(lockdesclist, lockdesc);
1870
1871 int
1872 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1873 {
1874 struct lockf *ls;
1875 struct lockf_entry *lf;
1876 struct lockdesc *ldesc;
1877 struct lockdesclist locks;
1878 int error;
1879
1880 /*
1881 * In order to keep the locking simple, we iterate over the
1882 * active lock lists to build a list of locks that need
1883 * releasing. We then call the iterator for each one in turn.
1884 *
1885 * We take an extra reference to the vnode for the duration to
1886 * make sure it doesn't go away before we are finished.
1887 */
1888 STAILQ_INIT(&locks);
1889 sx_xlock(&lf_lock_states_lock);
1890 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1891 sx_xlock(&ls->ls_lock);
1892 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1893 if (lf->lf_owner->lo_sysid != sysid)
1894 continue;
1895
1896 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1897 M_WAITOK);
1898 ldesc->vp = lf->lf_vnode;
1899 vref(ldesc->vp);
1900 ldesc->fl.l_start = lf->lf_start;
1901 if (lf->lf_end == OFF_MAX)
1902 ldesc->fl.l_len = 0;
1903 else
1904 ldesc->fl.l_len =
1905 lf->lf_end - lf->lf_start + 1;
1906 ldesc->fl.l_whence = SEEK_SET;
1907 ldesc->fl.l_type = F_UNLCK;
1908 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1909 ldesc->fl.l_sysid = sysid;
1910 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1911 }
1912 sx_xunlock(&ls->ls_lock);
1913 }
1914 sx_xunlock(&lf_lock_states_lock);
1915
1916 /*
1917 * Call the iterator function for each lock in turn. If the
1918 * iterator returns an error code, just free the rest of the
1919 * lockdesc structures.
1920 */
1921 error = 0;
1922 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1923 STAILQ_REMOVE_HEAD(&locks, link);
1924 if (!error)
1925 error = fn(ldesc->vp, &ldesc->fl, arg);
1926 vrele(ldesc->vp);
1927 free(ldesc, M_LOCKF);
1928 }
1929
1930 return (error);
1931 }
1932
1933 int
1934 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1935 {
1936 struct lockf *ls;
1937 struct lockf_entry *lf;
1938 struct lockdesc *ldesc;
1939 struct lockdesclist locks;
1940 int error;
1941
1942 /*
1943 * In order to keep the locking simple, we iterate over the
1944 * active lock lists to build a list of locks that need
1945 * releasing. We then call the iterator for each one in turn.
1946 *
1947 * We take an extra reference to the vnode for the duration to
1948 * make sure it doesn't go away before we are finished.
1949 */
1950 STAILQ_INIT(&locks);
1951 VI_LOCK(vp);
1952 ls = vp->v_lockf;
1953 if (!ls) {
1954 VI_UNLOCK(vp);
1955 return (0);
1956 }
1957 ls->ls_threads++;
1958 VI_UNLOCK(vp);
1959
1960 sx_xlock(&ls->ls_lock);
1961 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1962 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1963 M_WAITOK);
1964 ldesc->vp = lf->lf_vnode;
1965 vref(ldesc->vp);
1966 ldesc->fl.l_start = lf->lf_start;
1967 if (lf->lf_end == OFF_MAX)
1968 ldesc->fl.l_len = 0;
1969 else
1970 ldesc->fl.l_len =
1971 lf->lf_end - lf->lf_start + 1;
1972 ldesc->fl.l_whence = SEEK_SET;
1973 ldesc->fl.l_type = F_UNLCK;
1974 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1975 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1976 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1977 }
1978 sx_xunlock(&ls->ls_lock);
1979 VI_LOCK(vp);
1980 ls->ls_threads--;
1981 wakeup(ls);
1982 VI_UNLOCK(vp);
1983
1984 /*
1985 * Call the iterator function for each lock in turn. If the
1986 * iterator returns an error code, just free the rest of the
1987 * lockdesc structures.
1988 */
1989 error = 0;
1990 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1991 STAILQ_REMOVE_HEAD(&locks, link);
1992 if (!error)
1993 error = fn(ldesc->vp, &ldesc->fl, arg);
1994 vrele(ldesc->vp);
1995 free(ldesc, M_LOCKF);
1996 }
1997
1998 return (error);
1999 }
2000
2001 static int
2002 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
2003 {
2004
2005 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2006 return (0);
2007 }
2008
2009 void
2010 lf_clearremotesys(int sysid)
2011 {
2012
2013 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2014 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2015 }
2016
2017 int
2018 lf_countlocks(int sysid)
2019 {
2020 int i;
2021 struct lock_owner *lo;
2022 int count;
2023
2024 count = 0;
2025 sx_xlock(&lf_lock_owners_lock);
2026 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
2027 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
2028 if (lo->lo_sysid == sysid)
2029 count += lo->lo_refs;
2030 sx_xunlock(&lf_lock_owners_lock);
2031
2032 return (count);
2033 }
2034
2035 #ifdef LOCKF_DEBUG
2036
2037 /*
2038 * Return non-zero if y is reachable from x using a brute force
2039 * search. If reachable and path is non-null, return the route taken
2040 * in path.
2041 */
2042 static int
2043 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2044 struct owner_vertex_list *path)
2045 {
2046 struct owner_edge *e;
2047
2048 if (x == y) {
2049 if (path)
2050 TAILQ_INSERT_HEAD(path, x, v_link);
2051 return 1;
2052 }
2053
2054 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2055 if (graph_reaches(e->e_to, y, path)) {
2056 if (path)
2057 TAILQ_INSERT_HEAD(path, x, v_link);
2058 return 1;
2059 }
2060 }
2061 return 0;
2062 }
2063
2064 /*
2065 * Perform consistency checks on the graph. Make sure the values of
2066 * v_order are correct. If checkorder is non-zero, check no vertex can
2067 * reach any other vertex with a smaller order.
2068 */
2069 static void
2070 graph_check(struct owner_graph *g, int checkorder)
2071 {
2072 int i, j;
2073
2074 for (i = 0; i < g->g_size; i++) {
2075 if (!g->g_vertices[i]->v_owner)
2076 continue;
2077 KASSERT(g->g_vertices[i]->v_order == i,
2078 ("lock graph vertices disordered"));
2079 if (checkorder) {
2080 for (j = 0; j < i; j++) {
2081 if (!g->g_vertices[j]->v_owner)
2082 continue;
2083 KASSERT(!graph_reaches(g->g_vertices[i],
2084 g->g_vertices[j], NULL),
2085 ("lock graph vertices disordered"));
2086 }
2087 }
2088 }
2089 }
2090
2091 static void
2092 graph_print_vertices(struct owner_vertex_list *set)
2093 {
2094 struct owner_vertex *v;
2095
2096 printf("{ ");
2097 TAILQ_FOREACH(v, set, v_link) {
2098 printf("%d:", v->v_order);
2099 lf_print_owner(v->v_owner);
2100 if (TAILQ_NEXT(v, v_link))
2101 printf(", ");
2102 }
2103 printf(" }\n");
2104 }
2105
2106 #endif
2107
2108 /*
2109 * Calculate the sub-set of vertices v from the affected region [y..x]
2110 * where v is reachable from y. Return -1 if a loop was detected
2111 * (i.e. x is reachable from y, otherwise the number of vertices in
2112 * this subset.
2113 */
2114 static int
2115 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2116 struct owner_vertex *y, struct owner_vertex_list *delta)
2117 {
2118 uint32_t gen;
2119 struct owner_vertex *v;
2120 struct owner_edge *e;
2121 int n;
2122
2123 /*
2124 * We start with a set containing just y. Then for each vertex
2125 * v in the set so far unprocessed, we add each vertex that v
2126 * has an out-edge to and that is within the affected region
2127 * [y..x]. If we see the vertex x on our travels, stop
2128 * immediately.
2129 */
2130 TAILQ_INIT(delta);
2131 TAILQ_INSERT_TAIL(delta, y, v_link);
2132 v = y;
2133 n = 1;
2134 gen = g->g_gen;
2135 while (v) {
2136 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2137 if (e->e_to == x)
2138 return -1;
2139 if (e->e_to->v_order < x->v_order
2140 && e->e_to->v_gen != gen) {
2141 e->e_to->v_gen = gen;
2142 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2143 n++;
2144 }
2145 }
2146 v = TAILQ_NEXT(v, v_link);
2147 }
2148
2149 return (n);
2150 }
2151
2152 /*
2153 * Calculate the sub-set of vertices v from the affected region [y..x]
2154 * where v reaches x. Return the number of vertices in this subset.
2155 */
2156 static int
2157 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2158 struct owner_vertex *y, struct owner_vertex_list *delta)
2159 {
2160 uint32_t gen;
2161 struct owner_vertex *v;
2162 struct owner_edge *e;
2163 int n;
2164
2165 /*
2166 * We start with a set containing just x. Then for each vertex
2167 * v in the set so far unprocessed, we add each vertex that v
2168 * has an in-edge from and that is within the affected region
2169 * [y..x].
2170 */
2171 TAILQ_INIT(delta);
2172 TAILQ_INSERT_TAIL(delta, x, v_link);
2173 v = x;
2174 n = 1;
2175 gen = g->g_gen;
2176 while (v) {
2177 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2178 if (e->e_from->v_order > y->v_order
2179 && e->e_from->v_gen != gen) {
2180 e->e_from->v_gen = gen;
2181 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2182 n++;
2183 }
2184 }
2185 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2186 }
2187
2188 return (n);
2189 }
2190
2191 static int
2192 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2193 {
2194 struct owner_vertex *v;
2195 int i, j;
2196
2197 TAILQ_FOREACH(v, set, v_link) {
2198 for (i = n;
2199 i > 0 && indices[i - 1] > v->v_order; i--)
2200 ;
2201 for (j = n - 1; j >= i; j--)
2202 indices[j + 1] = indices[j];
2203 indices[i] = v->v_order;
2204 n++;
2205 }
2206
2207 return (n);
2208 }
2209
2210 static int
2211 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2212 struct owner_vertex_list *set)
2213 {
2214 struct owner_vertex *v, *vlowest;
2215
2216 while (!TAILQ_EMPTY(set)) {
2217 vlowest = NULL;
2218 TAILQ_FOREACH(v, set, v_link) {
2219 if (!vlowest || v->v_order < vlowest->v_order)
2220 vlowest = v;
2221 }
2222 TAILQ_REMOVE(set, vlowest, v_link);
2223 vlowest->v_order = indices[nextunused];
2224 g->g_vertices[vlowest->v_order] = vlowest;
2225 nextunused++;
2226 }
2227
2228 return (nextunused);
2229 }
2230
2231 static int
2232 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2233 struct owner_vertex *y)
2234 {
2235 struct owner_edge *e;
2236 struct owner_vertex_list deltaF, deltaB;
2237 int nF, nB, n, vi, i;
2238 int *indices;
2239
2240 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2241
2242 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2243 if (e->e_to == y) {
2244 e->e_refs++;
2245 return (0);
2246 }
2247 }
2248
2249 #ifdef LOCKF_DEBUG
2250 if (lockf_debug & 8) {
2251 printf("adding edge %d:", x->v_order);
2252 lf_print_owner(x->v_owner);
2253 printf(" -> %d:", y->v_order);
2254 lf_print_owner(y->v_owner);
2255 printf("\n");
2256 }
2257 #endif
2258 if (y->v_order < x->v_order) {
2259 /*
2260 * The new edge violates the order. First find the set
2261 * of affected vertices reachable from y (deltaF) and
2262 * the set of affect vertices affected that reach x
2263 * (deltaB), using the graph generation number to
2264 * detect whether we have visited a given vertex
2265 * already. We re-order the graph so that each vertex
2266 * in deltaB appears before each vertex in deltaF.
2267 *
2268 * If x is a member of deltaF, then the new edge would
2269 * create a cycle. Otherwise, we may assume that
2270 * deltaF and deltaB are disjoint.
2271 */
2272 g->g_gen++;
2273 if (g->g_gen == 0) {
2274 /*
2275 * Generation wrap.
2276 */
2277 for (vi = 0; vi < g->g_size; vi++) {
2278 g->g_vertices[vi]->v_gen = 0;
2279 }
2280 g->g_gen++;
2281 }
2282 nF = graph_delta_forward(g, x, y, &deltaF);
2283 if (nF < 0) {
2284 #ifdef LOCKF_DEBUG
2285 if (lockf_debug & 8) {
2286 struct owner_vertex_list path;
2287 printf("deadlock: ");
2288 TAILQ_INIT(&path);
2289 graph_reaches(y, x, &path);
2290 graph_print_vertices(&path);
2291 }
2292 #endif
2293 return (EDEADLK);
2294 }
2295
2296 #ifdef LOCKF_DEBUG
2297 if (lockf_debug & 8) {
2298 printf("re-ordering graph vertices\n");
2299 printf("deltaF = ");
2300 graph_print_vertices(&deltaF);
2301 }
2302 #endif
2303
2304 nB = graph_delta_backward(g, x, y, &deltaB);
2305
2306 #ifdef LOCKF_DEBUG
2307 if (lockf_debug & 8) {
2308 printf("deltaB = ");
2309 graph_print_vertices(&deltaB);
2310 }
2311 #endif
2312
2313 /*
2314 * We first build a set of vertex indices (vertex
2315 * order values) that we may use, then we re-assign
2316 * orders first to those vertices in deltaB, then to
2317 * deltaF. Note that the contents of deltaF and deltaB
2318 * may be partially disordered - we perform an
2319 * insertion sort while building our index set.
2320 */
2321 indices = g->g_indexbuf;
2322 n = graph_add_indices(indices, 0, &deltaF);
2323 graph_add_indices(indices, n, &deltaB);
2324
2325 /*
2326 * We must also be sure to maintain the relative
2327 * ordering of deltaF and deltaB when re-assigning
2328 * vertices. We do this by iteratively removing the
2329 * lowest ordered element from the set and assigning
2330 * it the next value from our new ordering.
2331 */
2332 i = graph_assign_indices(g, indices, 0, &deltaB);
2333 graph_assign_indices(g, indices, i, &deltaF);
2334
2335 #ifdef LOCKF_DEBUG
2336 if (lockf_debug & 8) {
2337 struct owner_vertex_list set;
2338 TAILQ_INIT(&set);
2339 for (i = 0; i < nB + nF; i++)
2340 TAILQ_INSERT_TAIL(&set,
2341 g->g_vertices[indices[i]], v_link);
2342 printf("new ordering = ");
2343 graph_print_vertices(&set);
2344 }
2345 #endif
2346 }
2347
2348 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2349
2350 #ifdef LOCKF_DEBUG
2351 if (lockf_debug & 8) {
2352 graph_check(g, TRUE);
2353 }
2354 #endif
2355
2356 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2357
2358 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2359 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2360 e->e_refs = 1;
2361 e->e_from = x;
2362 e->e_to = y;
2363
2364 return (0);
2365 }
2366
2367 /*
2368 * Remove an edge x->y from the graph.
2369 */
2370 static void
2371 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2372 struct owner_vertex *y)
2373 {
2374 struct owner_edge *e;
2375
2376 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2377
2378 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2379 if (e->e_to == y)
2380 break;
2381 }
2382 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2383
2384 e->e_refs--;
2385 if (e->e_refs == 0) {
2386 #ifdef LOCKF_DEBUG
2387 if (lockf_debug & 8) {
2388 printf("removing edge %d:", x->v_order);
2389 lf_print_owner(x->v_owner);
2390 printf(" -> %d:", y->v_order);
2391 lf_print_owner(y->v_owner);
2392 printf("\n");
2393 }
2394 #endif
2395 LIST_REMOVE(e, e_outlink);
2396 LIST_REMOVE(e, e_inlink);
2397 free(e, M_LOCKF);
2398 }
2399 }
2400
2401 /*
2402 * Allocate a vertex from the free list. Return ENOMEM if there are
2403 * none.
2404 */
2405 static struct owner_vertex *
2406 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2407 {
2408 struct owner_vertex *v;
2409
2410 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2411
2412 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2413 if (g->g_size == g->g_space) {
2414 g->g_vertices = realloc(g->g_vertices,
2415 2 * g->g_space * sizeof(struct owner_vertex *),
2416 M_LOCKF, M_WAITOK);
2417 free(g->g_indexbuf, M_LOCKF);
2418 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2419 M_LOCKF, M_WAITOK);
2420 g->g_space = 2 * g->g_space;
2421 }
2422 v->v_order = g->g_size;
2423 v->v_gen = g->g_gen;
2424 g->g_vertices[g->g_size] = v;
2425 g->g_size++;
2426
2427 LIST_INIT(&v->v_outedges);
2428 LIST_INIT(&v->v_inedges);
2429 v->v_owner = lo;
2430
2431 return (v);
2432 }
2433
2434 static void
2435 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2436 {
2437 struct owner_vertex *w;
2438 int i;
2439
2440 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2441
2442 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2443 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2444
2445 /*
2446 * Remove from the graph's array and close up the gap,
2447 * renumbering the other vertices.
2448 */
2449 for (i = v->v_order + 1; i < g->g_size; i++) {
2450 w = g->g_vertices[i];
2451 w->v_order--;
2452 g->g_vertices[i - 1] = w;
2453 }
2454 g->g_size--;
2455
2456 free(v, M_LOCKF);
2457 }
2458
2459 static struct owner_graph *
2460 graph_init(struct owner_graph *g)
2461 {
2462
2463 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2464 M_LOCKF, M_WAITOK);
2465 g->g_size = 0;
2466 g->g_space = 10;
2467 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2468 g->g_gen = 0;
2469
2470 return (g);
2471 }
2472
2473 #ifdef LOCKF_DEBUG
2474 /*
2475 * Print description of a lock owner
2476 */
2477 static void
2478 lf_print_owner(struct lock_owner *lo)
2479 {
2480
2481 if (lo->lo_flags & F_REMOTE) {
2482 printf("remote pid %d, system %d",
2483 lo->lo_pid, lo->lo_sysid);
2484 } else if (lo->lo_flags & F_FLOCK) {
2485 printf("file %p", lo->lo_id);
2486 } else {
2487 printf("local pid %d", lo->lo_pid);
2488 }
2489 }
2490
2491 /*
2492 * Print out a lock.
2493 */
2494 static void
2495 lf_print(char *tag, struct lockf_entry *lock)
2496 {
2497
2498 printf("%s: lock %p for ", tag, (void *)lock);
2499 lf_print_owner(lock->lf_owner);
2500 if (lock->lf_inode != (struct inode *)0)
2501 printf(" in ino %ju on dev <%s>,",
2502 (uintmax_t)lock->lf_inode->i_number,
2503 devtoname(lock->lf_inode->i_dev));
2504 printf(" %s, start %jd, end ",
2505 lock->lf_type == F_RDLCK ? "shared" :
2506 lock->lf_type == F_WRLCK ? "exclusive" :
2507 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2508 (intmax_t)lock->lf_start);
2509 if (lock->lf_end == OFF_MAX)
2510 printf("EOF");
2511 else
2512 printf("%jd", (intmax_t)lock->lf_end);
2513 if (!LIST_EMPTY(&lock->lf_outedges))
2514 printf(" block %p\n",
2515 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2516 else
2517 printf("\n");
2518 }
2519
2520 static void
2521 lf_printlist(char *tag, struct lockf_entry *lock)
2522 {
2523 struct lockf_entry *lf, *blk;
2524 struct lockf_edge *e;
2525
2526 if (lock->lf_inode == (struct inode *)0)
2527 return;
2528
2529 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2530 tag, (uintmax_t)lock->lf_inode->i_number,
2531 devtoname(lock->lf_inode->i_dev));
2532 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2533 printf("\tlock %p for ",(void *)lf);
2534 lf_print_owner(lock->lf_owner);
2535 printf(", %s, start %jd, end %jd",
2536 lf->lf_type == F_RDLCK ? "shared" :
2537 lf->lf_type == F_WRLCK ? "exclusive" :
2538 lf->lf_type == F_UNLCK ? "unlock" :
2539 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2540 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2541 blk = e->le_to;
2542 printf("\n\t\tlock request %p for ", (void *)blk);
2543 lf_print_owner(blk->lf_owner);
2544 printf(", %s, start %jd, end %jd",
2545 blk->lf_type == F_RDLCK ? "shared" :
2546 blk->lf_type == F_WRLCK ? "exclusive" :
2547 blk->lf_type == F_UNLCK ? "unlock" :
2548 "unknown", (intmax_t)blk->lf_start,
2549 (intmax_t)blk->lf_end);
2550 if (!LIST_EMPTY(&blk->lf_inedges))
2551 panic("lf_printlist: bad list");
2552 }
2553 printf("\n");
2554 }
2555 }
2556 #endif /* LOCKF_DEBUG */
Cache object: adce03d262c33f6b0a5296036cb2590e
|