FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5 * Authors: Doug Rabson <dfr@rabson.org>
6 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Scooter Morris at Genentech Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
61 */
62
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
65
66 #include "opt_debug_lockf.h"
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/hash.h>
71 #include <sys/kernel.h>
72 #include <sys/limits.h>
73 #include <sys/lock.h>
74 #include <sys/mount.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/sx.h>
78 #include <sys/unistd.h>
79 #include <sys/vnode.h>
80 #include <sys/malloc.h>
81 #include <sys/fcntl.h>
82 #include <sys/lockf.h>
83 #include <sys/taskqueue.h>
84
85 #ifdef LOCKF_DEBUG
86 #include <sys/sysctl.h>
87
88 #include <ufs/ufs/extattr.h>
89 #include <ufs/ufs/quota.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/inode.h>
92
93 static int lockf_debug = 0; /* control debug output */
94 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
95 #endif
96
97 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
98
99 struct owner_edge;
100 struct owner_vertex;
101 struct owner_vertex_list;
102 struct owner_graph;
103
104 #define NOLOCKF (struct lockf_entry *)0
105 #define SELF 0x1
106 #define OTHERS 0x2
107 static void lf_init(void *);
108 static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
109 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
110 int);
111 static struct lockf_entry *
112 lf_alloc_lock(struct lock_owner *);
113 static int lf_free_lock(struct lockf_entry *);
114 static int lf_clearlock(struct lockf *, struct lockf_entry *);
115 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
116 static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
117 static void lf_free_edge(struct lockf_edge *);
118 static struct lockf_edge *
119 lf_alloc_edge(void);
120 static void lf_alloc_vertex(struct lockf_entry *);
121 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
122 static void lf_remove_edge(struct lockf_edge *);
123 static void lf_remove_outgoing(struct lockf_entry *);
124 static void lf_remove_incoming(struct lockf_entry *);
125 static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
126 static int lf_add_incoming(struct lockf *, struct lockf_entry *);
127 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
128 int);
129 static struct lockf_entry *
130 lf_getblock(struct lockf *, struct lockf_entry *);
131 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
132 static void lf_insert_lock(struct lockf *, struct lockf_entry *);
133 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
134 static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
135 int all, struct lockf_entry_list *);
136 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
137 struct lockf_entry_list*);
138 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
139 struct lockf_entry_list*);
140 static int lf_setlock(struct lockf *, struct lockf_entry *,
141 struct vnode *, void **cookiep);
142 static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
143 static void lf_split(struct lockf *, struct lockf_entry *,
144 struct lockf_entry *, struct lockf_entry_list *);
145 #ifdef LOCKF_DEBUG
146 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
147 struct owner_vertex_list *path);
148 static void graph_check(struct owner_graph *g, int checkorder);
149 static void graph_print_vertices(struct owner_vertex_list *set);
150 #endif
151 static int graph_delta_forward(struct owner_graph *g,
152 struct owner_vertex *x, struct owner_vertex *y,
153 struct owner_vertex_list *delta);
154 static int graph_delta_backward(struct owner_graph *g,
155 struct owner_vertex *x, struct owner_vertex *y,
156 struct owner_vertex_list *delta);
157 static int graph_add_indices(int *indices, int n,
158 struct owner_vertex_list *set);
159 static int graph_assign_indices(struct owner_graph *g, int *indices,
160 int nextunused, struct owner_vertex_list *set);
161 static int graph_add_edge(struct owner_graph *g,
162 struct owner_vertex *x, struct owner_vertex *y);
163 static void graph_remove_edge(struct owner_graph *g,
164 struct owner_vertex *x, struct owner_vertex *y);
165 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
166 struct lock_owner *lo);
167 static void graph_free_vertex(struct owner_graph *g,
168 struct owner_vertex *v);
169 static struct owner_graph * graph_init(struct owner_graph *g);
170 #ifdef LOCKF_DEBUG
171 static void lf_print(char *, struct lockf_entry *);
172 static void lf_printlist(char *, struct lockf_entry *);
173 static void lf_print_owner(struct lock_owner *);
174 #endif
175
176 /*
177 * This structure is used to keep track of both local and remote lock
178 * owners. The lf_owner field of the struct lockf_entry points back at
179 * the lock owner structure. Each possible lock owner (local proc for
180 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
181 * pair for remote locks) is represented by a unique instance of
182 * struct lock_owner.
183 *
184 * If a lock owner has a lock that blocks some other lock or a lock
185 * that is waiting for some other lock, it also has a vertex in the
186 * owner_graph below.
187 *
188 * Locks:
189 * (s) locked by state->ls_lock
190 * (S) locked by lf_lock_states_lock
191 * (g) locked by lf_owner_graph_lock
192 * (c) const until freeing
193 */
194 #define LOCK_OWNER_HASH_SIZE 256
195
196 struct lock_owner {
197 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
198 int lo_refs; /* (l) Number of locks referring to this */
199 int lo_flags; /* (c) Flags passwd to lf_advlock */
200 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
201 pid_t lo_pid; /* (c) Process Id of the lock owner */
202 int lo_sysid; /* (c) System Id of the lock owner */
203 int lo_hash; /* (c) Used to lock the appropriate chain */
204 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
205 };
206
207 LIST_HEAD(lock_owner_list, lock_owner);
208
209 struct lock_owner_chain {
210 struct sx lock;
211 struct lock_owner_list list;
212 };
213
214 static struct sx lf_lock_states_lock;
215 static struct lockf_list lf_lock_states; /* (S) */
216 static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE];
217
218 /*
219 * Structures for deadlock detection.
220 *
221 * We have two types of directed graph, the first is the set of locks,
222 * both active and pending on a vnode. Within this graph, active locks
223 * are terminal nodes in the graph (i.e. have no out-going
224 * edges). Pending locks have out-going edges to each blocking active
225 * lock that prevents the lock from being granted and also to each
226 * older pending lock that would block them if it was active. The
227 * graph for each vnode is naturally acyclic; new edges are only ever
228 * added to or from new nodes (either new pending locks which only add
229 * out-going edges or new active locks which only add in-coming edges)
230 * therefore they cannot create loops in the lock graph.
231 *
232 * The second graph is a global graph of lock owners. Each lock owner
233 * is a vertex in that graph and an edge is added to the graph
234 * whenever an edge is added to a vnode graph, with end points
235 * corresponding to owner of the new pending lock and the owner of the
236 * lock upon which it waits. In order to prevent deadlock, we only add
237 * an edge to this graph if the new edge would not create a cycle.
238 *
239 * The lock owner graph is topologically sorted, i.e. if a node has
240 * any outgoing edges, then it has an order strictly less than any
241 * node to which it has an outgoing edge. We preserve this ordering
242 * (and detect cycles) on edge insertion using Algorithm PK from the
243 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
244 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
245 * No. 1.7)
246 */
247 struct owner_vertex;
248
249 struct owner_edge {
250 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
251 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
252 int e_refs; /* (g) number of times added */
253 struct owner_vertex *e_from; /* (c) out-going from here */
254 struct owner_vertex *e_to; /* (c) in-coming to here */
255 };
256 LIST_HEAD(owner_edge_list, owner_edge);
257
258 struct owner_vertex {
259 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
260 uint32_t v_gen; /* (g) workspace for edge insertion */
261 int v_order; /* (g) order of vertex in graph */
262 struct owner_edge_list v_outedges;/* (g) list of out-edges */
263 struct owner_edge_list v_inedges; /* (g) list of in-edges */
264 struct lock_owner *v_owner; /* (c) corresponding lock owner */
265 };
266 TAILQ_HEAD(owner_vertex_list, owner_vertex);
267
268 struct owner_graph {
269 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
270 int g_size; /* (g) number of vertices */
271 int g_space; /* (g) space allocated for vertices */
272 int *g_indexbuf; /* (g) workspace for loop detection */
273 uint32_t g_gen; /* (g) increment when re-ordering */
274 };
275
276 static struct sx lf_owner_graph_lock;
277 static struct owner_graph lf_owner_graph;
278
279 /*
280 * Initialise various structures and locks.
281 */
282 static void
283 lf_init(void *dummy)
284 {
285 int i;
286
287 sx_init(&lf_lock_states_lock, "lock states lock");
288 LIST_INIT(&lf_lock_states);
289
290 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
291 sx_init(&lf_lock_owners[i].lock, "lock owners lock");
292 LIST_INIT(&lf_lock_owners[i].list);
293 }
294
295 sx_init(&lf_owner_graph_lock, "owner graph lock");
296 graph_init(&lf_owner_graph);
297 }
298 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
299
300 /*
301 * Generate a hash value for a lock owner.
302 */
303 static int
304 lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
305 {
306 uint32_t h;
307
308 if (flags & F_REMOTE) {
309 h = HASHSTEP(0, fl->l_pid);
310 h = HASHSTEP(h, fl->l_sysid);
311 } else if (flags & F_FLOCK) {
312 h = ((uintptr_t) id) >> 7;
313 } else {
314 h = ((uintptr_t) vp) >> 7;
315 }
316
317 return (h % LOCK_OWNER_HASH_SIZE);
318 }
319
320 /*
321 * Return true if a lock owner matches the details passed to
322 * lf_advlock.
323 */
324 static int
325 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
326 int flags)
327 {
328 if (flags & F_REMOTE) {
329 return lo->lo_pid == fl->l_pid
330 && lo->lo_sysid == fl->l_sysid;
331 } else {
332 return lo->lo_id == id;
333 }
334 }
335
336 static struct lockf_entry *
337 lf_alloc_lock(struct lock_owner *lo)
338 {
339 struct lockf_entry *lf;
340
341 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
342
343 #ifdef LOCKF_DEBUG
344 if (lockf_debug & 4)
345 printf("Allocated lock %p\n", lf);
346 #endif
347 if (lo) {
348 sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
349 lo->lo_refs++;
350 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
351 lf->lf_owner = lo;
352 }
353
354 return (lf);
355 }
356
357 static int
358 lf_free_lock(struct lockf_entry *lock)
359 {
360 struct sx *chainlock;
361
362 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
363 if (--lock->lf_refs > 0)
364 return (0);
365 /*
366 * Adjust the lock_owner reference count and
367 * reclaim the entry if this is the last lock
368 * for that owner.
369 */
370 struct lock_owner *lo = lock->lf_owner;
371 if (lo) {
372 KASSERT(LIST_EMPTY(&lock->lf_outedges),
373 ("freeing lock with dependencies"));
374 KASSERT(LIST_EMPTY(&lock->lf_inedges),
375 ("freeing lock with dependants"));
376 chainlock = &lf_lock_owners[lo->lo_hash].lock;
377 sx_xlock(chainlock);
378 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
379 lo->lo_refs--;
380 if (lo->lo_refs == 0) {
381 #ifdef LOCKF_DEBUG
382 if (lockf_debug & 1)
383 printf("lf_free_lock: freeing lock owner %p\n",
384 lo);
385 #endif
386 if (lo->lo_vertex) {
387 sx_xlock(&lf_owner_graph_lock);
388 graph_free_vertex(&lf_owner_graph,
389 lo->lo_vertex);
390 sx_xunlock(&lf_owner_graph_lock);
391 }
392 LIST_REMOVE(lo, lo_link);
393 free(lo, M_LOCKF);
394 #ifdef LOCKF_DEBUG
395 if (lockf_debug & 4)
396 printf("Freed lock owner %p\n", lo);
397 #endif
398 }
399 sx_unlock(chainlock);
400 }
401 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
402 vrele(lock->lf_vnode);
403 lock->lf_vnode = NULL;
404 }
405 #ifdef LOCKF_DEBUG
406 if (lockf_debug & 4)
407 printf("Freed lock %p\n", lock);
408 #endif
409 free(lock, M_LOCKF);
410 return (1);
411 }
412
413 /*
414 * Advisory record locking support
415 */
416 int
417 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
418 u_quad_t size)
419 {
420 struct lockf *state;
421 struct flock *fl = ap->a_fl;
422 struct lockf_entry *lock;
423 struct vnode *vp = ap->a_vp;
424 caddr_t id = ap->a_id;
425 int flags = ap->a_flags;
426 int hash;
427 struct lock_owner *lo;
428 off_t start, end, oadd;
429 int error;
430
431 /*
432 * Handle the F_UNLKSYS case first - no need to mess about
433 * creating a lock owner for this one.
434 */
435 if (ap->a_op == F_UNLCKSYS) {
436 lf_clearremotesys(fl->l_sysid);
437 return (0);
438 }
439
440 /*
441 * Convert the flock structure into a start and end.
442 */
443 switch (fl->l_whence) {
444
445 case SEEK_SET:
446 case SEEK_CUR:
447 /*
448 * Caller is responsible for adding any necessary offset
449 * when SEEK_CUR is used.
450 */
451 start = fl->l_start;
452 break;
453
454 case SEEK_END:
455 if (size > OFF_MAX ||
456 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
457 return (EOVERFLOW);
458 start = size + fl->l_start;
459 break;
460
461 default:
462 return (EINVAL);
463 }
464 if (start < 0)
465 return (EINVAL);
466 if (fl->l_len < 0) {
467 if (start == 0)
468 return (EINVAL);
469 end = start - 1;
470 start += fl->l_len;
471 if (start < 0)
472 return (EINVAL);
473 } else if (fl->l_len == 0) {
474 end = OFF_MAX;
475 } else {
476 oadd = fl->l_len - 1;
477 if (oadd > OFF_MAX - start)
478 return (EOVERFLOW);
479 end = start + oadd;
480 }
481
482 retry_setlock:
483
484 /*
485 * Avoid the common case of unlocking when inode has no locks.
486 */
487 if (ap->a_op != F_SETLK && (*statep) == NULL) {
488 VI_LOCK(vp);
489 if ((*statep) == NULL) {
490 fl->l_type = F_UNLCK;
491 VI_UNLOCK(vp);
492 return (0);
493 }
494 VI_UNLOCK(vp);
495 }
496
497 /*
498 * Map our arguments to an existing lock owner or create one
499 * if this is the first time we have seen this owner.
500 */
501 hash = lf_hash_owner(id, vp, fl, flags);
502 sx_xlock(&lf_lock_owners[hash].lock);
503 LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
504 if (lf_owner_matches(lo, id, fl, flags))
505 break;
506 if (!lo) {
507 /*
508 * We initialise the lock with a reference
509 * count which matches the new lockf_entry
510 * structure created below.
511 */
512 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
513 M_WAITOK|M_ZERO);
514 #ifdef LOCKF_DEBUG
515 if (lockf_debug & 4)
516 printf("Allocated lock owner %p\n", lo);
517 #endif
518
519 lo->lo_refs = 1;
520 lo->lo_flags = flags;
521 lo->lo_id = id;
522 lo->lo_hash = hash;
523 if (flags & F_REMOTE) {
524 lo->lo_pid = fl->l_pid;
525 lo->lo_sysid = fl->l_sysid;
526 } else if (flags & F_FLOCK) {
527 lo->lo_pid = -1;
528 lo->lo_sysid = 0;
529 } else {
530 struct proc *p = (struct proc *) id;
531 lo->lo_pid = p->p_pid;
532 lo->lo_sysid = 0;
533 }
534 lo->lo_vertex = NULL;
535
536 #ifdef LOCKF_DEBUG
537 if (lockf_debug & 1) {
538 printf("lf_advlockasync: new lock owner %p ", lo);
539 lf_print_owner(lo);
540 printf("\n");
541 }
542 #endif
543
544 LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
545 } else {
546 /*
547 * We have seen this lock owner before, increase its
548 * reference count to account for the new lockf_entry
549 * structure we create below.
550 */
551 lo->lo_refs++;
552 }
553 sx_xunlock(&lf_lock_owners[hash].lock);
554
555 /*
556 * Create the lockf structure. We initialise the lf_owner
557 * field here instead of in lf_alloc_lock() to avoid paying
558 * the lf_lock_owners_lock tax twice.
559 */
560 lock = lf_alloc_lock(NULL);
561 lock->lf_refs = 1;
562 lock->lf_start = start;
563 lock->lf_end = end;
564 lock->lf_owner = lo;
565 lock->lf_vnode = vp;
566 if (flags & F_REMOTE) {
567 /*
568 * For remote locks, the caller may release its ref to
569 * the vnode at any time - we have to ref it here to
570 * prevent it from being recycled unexpectedly.
571 */
572 vref(vp);
573 }
574
575 /*
576 * XXX The problem is that VTOI is ufs specific, so it will
577 * break LOCKF_DEBUG for all other FS's other than UFS because
578 * it casts the vnode->data ptr to struct inode *.
579 */
580 /* lock->lf_inode = VTOI(ap->a_vp); */
581 lock->lf_inode = (struct inode *)0;
582 lock->lf_type = fl->l_type;
583 LIST_INIT(&lock->lf_outedges);
584 LIST_INIT(&lock->lf_inedges);
585 lock->lf_async_task = ap->a_task;
586 lock->lf_flags = ap->a_flags;
587
588 /*
589 * Do the requested operation. First find our state structure
590 * and create a new one if necessary - the caller's *statep
591 * variable and the state's ls_threads count is protected by
592 * the vnode interlock.
593 */
594 VI_LOCK(vp);
595 if (vp->v_iflag & VI_DOOMED) {
596 VI_UNLOCK(vp);
597 lf_free_lock(lock);
598 return (ENOENT);
599 }
600
601 /*
602 * Allocate a state structure if necessary.
603 */
604 state = *statep;
605 if (state == NULL) {
606 struct lockf *ls;
607
608 VI_UNLOCK(vp);
609
610 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
611 sx_init(&ls->ls_lock, "ls_lock");
612 LIST_INIT(&ls->ls_active);
613 LIST_INIT(&ls->ls_pending);
614 ls->ls_threads = 1;
615
616 sx_xlock(&lf_lock_states_lock);
617 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
618 sx_xunlock(&lf_lock_states_lock);
619
620 /*
621 * Cope if we lost a race with some other thread while
622 * trying to allocate memory.
623 */
624 VI_LOCK(vp);
625 if (vp->v_iflag & VI_DOOMED) {
626 VI_UNLOCK(vp);
627 sx_xlock(&lf_lock_states_lock);
628 LIST_REMOVE(ls, ls_link);
629 sx_xunlock(&lf_lock_states_lock);
630 sx_destroy(&ls->ls_lock);
631 free(ls, M_LOCKF);
632 lf_free_lock(lock);
633 return (ENOENT);
634 }
635 if ((*statep) == NULL) {
636 state = *statep = ls;
637 VI_UNLOCK(vp);
638 } else {
639 state = *statep;
640 state->ls_threads++;
641 VI_UNLOCK(vp);
642
643 sx_xlock(&lf_lock_states_lock);
644 LIST_REMOVE(ls, ls_link);
645 sx_xunlock(&lf_lock_states_lock);
646 sx_destroy(&ls->ls_lock);
647 free(ls, M_LOCKF);
648 }
649 } else {
650 state->ls_threads++;
651 VI_UNLOCK(vp);
652 }
653
654 sx_xlock(&state->ls_lock);
655 /*
656 * Recheck the doomed vnode after state->ls_lock is
657 * locked. lf_purgelocks() requires that no new threads add
658 * pending locks when vnode is marked by VI_DOOMED flag.
659 */
660 VI_LOCK(vp);
661 if (vp->v_iflag & VI_DOOMED) {
662 state->ls_threads--;
663 wakeup(state);
664 VI_UNLOCK(vp);
665 sx_xunlock(&state->ls_lock);
666 lf_free_lock(lock);
667 return (ENOENT);
668 }
669 VI_UNLOCK(vp);
670
671 switch (ap->a_op) {
672 case F_SETLK:
673 error = lf_setlock(state, lock, vp, ap->a_cookiep);
674 break;
675
676 case F_UNLCK:
677 error = lf_clearlock(state, lock);
678 lf_free_lock(lock);
679 break;
680
681 case F_GETLK:
682 error = lf_getlock(state, lock, fl);
683 lf_free_lock(lock);
684 break;
685
686 case F_CANCEL:
687 if (ap->a_cookiep)
688 error = lf_cancel(state, lock, *ap->a_cookiep);
689 else
690 error = EINVAL;
691 lf_free_lock(lock);
692 break;
693
694 default:
695 lf_free_lock(lock);
696 error = EINVAL;
697 break;
698 }
699
700 #ifdef DIAGNOSTIC
701 /*
702 * Check for some can't happen stuff. In this case, the active
703 * lock list becoming disordered or containing mutually
704 * blocking locks. We also check the pending list for locks
705 * which should be active (i.e. have no out-going edges).
706 */
707 LIST_FOREACH(lock, &state->ls_active, lf_link) {
708 struct lockf_entry *lf;
709 if (LIST_NEXT(lock, lf_link))
710 KASSERT((lock->lf_start
711 <= LIST_NEXT(lock, lf_link)->lf_start),
712 ("locks disordered"));
713 LIST_FOREACH(lf, &state->ls_active, lf_link) {
714 if (lock == lf)
715 break;
716 KASSERT(!lf_blocks(lock, lf),
717 ("two conflicting active locks"));
718 if (lock->lf_owner == lf->lf_owner)
719 KASSERT(!lf_overlaps(lock, lf),
720 ("two overlapping locks from same owner"));
721 }
722 }
723 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
724 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
725 ("pending lock which should be active"));
726 }
727 #endif
728 sx_xunlock(&state->ls_lock);
729
730 VI_LOCK(vp);
731
732 state->ls_threads--;
733 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
734 KASSERT(LIST_EMPTY(&state->ls_pending),
735 ("freeable state with pending locks"));
736 } else {
737 wakeup(state);
738 }
739
740 VI_UNLOCK(vp);
741
742 if (error == EDOOFUS) {
743 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
744 goto retry_setlock;
745 }
746 return (error);
747 }
748
749 int
750 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
751 {
752 struct vop_advlockasync_args a;
753
754 a.a_vp = ap->a_vp;
755 a.a_id = ap->a_id;
756 a.a_op = ap->a_op;
757 a.a_fl = ap->a_fl;
758 a.a_flags = ap->a_flags;
759 a.a_task = NULL;
760 a.a_cookiep = NULL;
761
762 return (lf_advlockasync(&a, statep, size));
763 }
764
765 void
766 lf_purgelocks(struct vnode *vp, struct lockf **statep)
767 {
768 struct lockf *state;
769 struct lockf_entry *lock, *nlock;
770
771 /*
772 * For this to work correctly, the caller must ensure that no
773 * other threads enter the locking system for this vnode,
774 * e.g. by checking VI_DOOMED. We wake up any threads that are
775 * sleeping waiting for locks on this vnode and then free all
776 * the remaining locks.
777 */
778 VI_LOCK(vp);
779 KASSERT(vp->v_iflag & VI_DOOMED,
780 ("lf_purgelocks: vp %p has not vgone yet", vp));
781 state = *statep;
782 if (state == NULL) {
783 VI_UNLOCK(vp);
784 return;
785 }
786 *statep = NULL;
787 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
788 KASSERT(LIST_EMPTY(&state->ls_pending),
789 ("freeing state with pending locks"));
790 VI_UNLOCK(vp);
791 goto out_free;
792 }
793 state->ls_threads++;
794 VI_UNLOCK(vp);
795
796 sx_xlock(&state->ls_lock);
797 sx_xlock(&lf_owner_graph_lock);
798 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
799 LIST_REMOVE(lock, lf_link);
800 lf_remove_outgoing(lock);
801 lf_remove_incoming(lock);
802
803 /*
804 * If its an async lock, we can just free it
805 * here, otherwise we let the sleeping thread
806 * free it.
807 */
808 if (lock->lf_async_task) {
809 lf_free_lock(lock);
810 } else {
811 lock->lf_flags |= F_INTR;
812 wakeup(lock);
813 }
814 }
815 sx_xunlock(&lf_owner_graph_lock);
816 sx_xunlock(&state->ls_lock);
817
818 /*
819 * Wait for all other threads, sleeping and otherwise
820 * to leave.
821 */
822 VI_LOCK(vp);
823 while (state->ls_threads > 1)
824 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
825 VI_UNLOCK(vp);
826
827 /*
828 * We can just free all the active locks since they
829 * will have no dependencies (we removed them all
830 * above). We don't need to bother locking since we
831 * are the last thread using this state structure.
832 */
833 KASSERT(LIST_EMPTY(&state->ls_pending),
834 ("lock pending for %p", state));
835 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
836 LIST_REMOVE(lock, lf_link);
837 lf_free_lock(lock);
838 }
839 out_free:
840 sx_xlock(&lf_lock_states_lock);
841 LIST_REMOVE(state, ls_link);
842 sx_xunlock(&lf_lock_states_lock);
843 sx_destroy(&state->ls_lock);
844 free(state, M_LOCKF);
845 }
846
847 /*
848 * Return non-zero if locks 'x' and 'y' overlap.
849 */
850 static int
851 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
852 {
853
854 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
855 }
856
857 /*
858 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
859 */
860 static int
861 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
862 {
863
864 return x->lf_owner != y->lf_owner
865 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
866 && lf_overlaps(x, y);
867 }
868
869 /*
870 * Allocate a lock edge from the free list
871 */
872 static struct lockf_edge *
873 lf_alloc_edge(void)
874 {
875
876 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
877 }
878
879 /*
880 * Free a lock edge.
881 */
882 static void
883 lf_free_edge(struct lockf_edge *e)
884 {
885
886 free(e, M_LOCKF);
887 }
888
889
890 /*
891 * Ensure that the lock's owner has a corresponding vertex in the
892 * owner graph.
893 */
894 static void
895 lf_alloc_vertex(struct lockf_entry *lock)
896 {
897 struct owner_graph *g = &lf_owner_graph;
898
899 if (!lock->lf_owner->lo_vertex)
900 lock->lf_owner->lo_vertex =
901 graph_alloc_vertex(g, lock->lf_owner);
902 }
903
904 /*
905 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
906 * the new edge would cause a cycle in the owner graph.
907 */
908 static int
909 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
910 {
911 struct owner_graph *g = &lf_owner_graph;
912 struct lockf_edge *e;
913 int error;
914
915 #ifdef DIAGNOSTIC
916 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
917 KASSERT(e->le_to != y, ("adding lock edge twice"));
918 #endif
919
920 /*
921 * Make sure the two owners have entries in the owner graph.
922 */
923 lf_alloc_vertex(x);
924 lf_alloc_vertex(y);
925
926 error = graph_add_edge(g, x->lf_owner->lo_vertex,
927 y->lf_owner->lo_vertex);
928 if (error)
929 return (error);
930
931 e = lf_alloc_edge();
932 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
933 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
934 e->le_from = x;
935 e->le_to = y;
936
937 return (0);
938 }
939
940 /*
941 * Remove an edge from the lock graph.
942 */
943 static void
944 lf_remove_edge(struct lockf_edge *e)
945 {
946 struct owner_graph *g = &lf_owner_graph;
947 struct lockf_entry *x = e->le_from;
948 struct lockf_entry *y = e->le_to;
949
950 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
951 LIST_REMOVE(e, le_outlink);
952 LIST_REMOVE(e, le_inlink);
953 e->le_from = NULL;
954 e->le_to = NULL;
955 lf_free_edge(e);
956 }
957
958 /*
959 * Remove all out-going edges from lock x.
960 */
961 static void
962 lf_remove_outgoing(struct lockf_entry *x)
963 {
964 struct lockf_edge *e;
965
966 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
967 lf_remove_edge(e);
968 }
969 }
970
971 /*
972 * Remove all in-coming edges from lock x.
973 */
974 static void
975 lf_remove_incoming(struct lockf_entry *x)
976 {
977 struct lockf_edge *e;
978
979 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
980 lf_remove_edge(e);
981 }
982 }
983
984 /*
985 * Walk the list of locks for the file and create an out-going edge
986 * from lock to each blocking lock.
987 */
988 static int
989 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
990 {
991 struct lockf_entry *overlap;
992 int error;
993
994 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
995 /*
996 * We may assume that the active list is sorted by
997 * lf_start.
998 */
999 if (overlap->lf_start > lock->lf_end)
1000 break;
1001 if (!lf_blocks(lock, overlap))
1002 continue;
1003
1004 /*
1005 * We've found a blocking lock. Add the corresponding
1006 * edge to the graphs and see if it would cause a
1007 * deadlock.
1008 */
1009 error = lf_add_edge(lock, overlap);
1010
1011 /*
1012 * The only error that lf_add_edge returns is EDEADLK.
1013 * Remove any edges we added and return the error.
1014 */
1015 if (error) {
1016 lf_remove_outgoing(lock);
1017 return (error);
1018 }
1019 }
1020
1021 /*
1022 * We also need to add edges to sleeping locks that block
1023 * us. This ensures that lf_wakeup_lock cannot grant two
1024 * mutually blocking locks simultaneously and also enforces a
1025 * 'first come, first served' fairness model. Note that this
1026 * only happens if we are blocked by at least one active lock
1027 * due to the call to lf_getblock in lf_setlock below.
1028 */
1029 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1030 if (!lf_blocks(lock, overlap))
1031 continue;
1032 /*
1033 * We've found a blocking lock. Add the corresponding
1034 * edge to the graphs and see if it would cause a
1035 * deadlock.
1036 */
1037 error = lf_add_edge(lock, overlap);
1038
1039 /*
1040 * The only error that lf_add_edge returns is EDEADLK.
1041 * Remove any edges we added and return the error.
1042 */
1043 if (error) {
1044 lf_remove_outgoing(lock);
1045 return (error);
1046 }
1047 }
1048
1049 return (0);
1050 }
1051
1052 /*
1053 * Walk the list of pending locks for the file and create an in-coming
1054 * edge from lock to each blocking lock.
1055 */
1056 static int
1057 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1058 {
1059 struct lockf_entry *overlap;
1060 int error;
1061
1062 sx_assert(&state->ls_lock, SX_XLOCKED);
1063 if (LIST_EMPTY(&state->ls_pending))
1064 return (0);
1065
1066 error = 0;
1067 sx_xlock(&lf_owner_graph_lock);
1068 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1069 if (!lf_blocks(lock, overlap))
1070 continue;
1071
1072 /*
1073 * We've found a blocking lock. Add the corresponding
1074 * edge to the graphs and see if it would cause a
1075 * deadlock.
1076 */
1077 error = lf_add_edge(overlap, lock);
1078
1079 /*
1080 * The only error that lf_add_edge returns is EDEADLK.
1081 * Remove any edges we added and return the error.
1082 */
1083 if (error) {
1084 lf_remove_incoming(lock);
1085 break;
1086 }
1087 }
1088 sx_xunlock(&lf_owner_graph_lock);
1089 return (error);
1090 }
1091
1092 /*
1093 * Insert lock into the active list, keeping list entries ordered by
1094 * increasing values of lf_start.
1095 */
1096 static void
1097 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1098 {
1099 struct lockf_entry *lf, *lfprev;
1100
1101 if (LIST_EMPTY(&state->ls_active)) {
1102 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1103 return;
1104 }
1105
1106 lfprev = NULL;
1107 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1108 if (lf->lf_start > lock->lf_start) {
1109 LIST_INSERT_BEFORE(lf, lock, lf_link);
1110 return;
1111 }
1112 lfprev = lf;
1113 }
1114 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1115 }
1116
1117 /*
1118 * Wake up a sleeping lock and remove it from the pending list now
1119 * that all its dependencies have been resolved. The caller should
1120 * arrange for the lock to be added to the active list, adjusting any
1121 * existing locks for the same owner as needed.
1122 */
1123 static void
1124 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1125 {
1126
1127 /*
1128 * Remove from ls_pending list and wake up the caller
1129 * or start the async notification, as appropriate.
1130 */
1131 LIST_REMOVE(wakelock, lf_link);
1132 #ifdef LOCKF_DEBUG
1133 if (lockf_debug & 1)
1134 lf_print("lf_wakeup_lock: awakening", wakelock);
1135 #endif /* LOCKF_DEBUG */
1136 if (wakelock->lf_async_task) {
1137 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1138 } else {
1139 wakeup(wakelock);
1140 }
1141 }
1142
1143 /*
1144 * Re-check all dependent locks and remove edges to locks that we no
1145 * longer block. If 'all' is non-zero, the lock has been removed and
1146 * we must remove all the dependencies, otherwise it has simply been
1147 * reduced but remains active. Any pending locks which have been been
1148 * unblocked are added to 'granted'
1149 */
1150 static void
1151 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1152 struct lockf_entry_list *granted)
1153 {
1154 struct lockf_edge *e, *ne;
1155 struct lockf_entry *deplock;
1156
1157 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1158 deplock = e->le_from;
1159 if (all || !lf_blocks(lock, deplock)) {
1160 sx_xlock(&lf_owner_graph_lock);
1161 lf_remove_edge(e);
1162 sx_xunlock(&lf_owner_graph_lock);
1163 if (LIST_EMPTY(&deplock->lf_outedges)) {
1164 lf_wakeup_lock(state, deplock);
1165 LIST_INSERT_HEAD(granted, deplock, lf_link);
1166 }
1167 }
1168 }
1169 }
1170
1171 /*
1172 * Set the start of an existing active lock, updating dependencies and
1173 * adding any newly woken locks to 'granted'.
1174 */
1175 static void
1176 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1177 struct lockf_entry_list *granted)
1178 {
1179
1180 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1181 lock->lf_start = new_start;
1182 LIST_REMOVE(lock, lf_link);
1183 lf_insert_lock(state, lock);
1184 lf_update_dependancies(state, lock, FALSE, granted);
1185 }
1186
1187 /*
1188 * Set the end of an existing active lock, updating dependencies and
1189 * adding any newly woken locks to 'granted'.
1190 */
1191 static void
1192 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1193 struct lockf_entry_list *granted)
1194 {
1195
1196 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1197 lock->lf_end = new_end;
1198 lf_update_dependancies(state, lock, FALSE, granted);
1199 }
1200
1201 /*
1202 * Add a lock to the active list, updating or removing any current
1203 * locks owned by the same owner and processing any pending locks that
1204 * become unblocked as a result. This code is also used for unlock
1205 * since the logic for updating existing locks is identical.
1206 *
1207 * As a result of processing the new lock, we may unblock existing
1208 * pending locks as a result of downgrading/unlocking. We simply
1209 * activate the newly granted locks by looping.
1210 *
1211 * Since the new lock already has its dependencies set up, we always
1212 * add it to the list (unless its an unlock request). This may
1213 * fragment the lock list in some pathological cases but its probably
1214 * not a real problem.
1215 */
1216 static void
1217 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1218 {
1219 struct lockf_entry *overlap, *lf;
1220 struct lockf_entry_list granted;
1221 int ovcase;
1222
1223 LIST_INIT(&granted);
1224 LIST_INSERT_HEAD(&granted, lock, lf_link);
1225
1226 while (!LIST_EMPTY(&granted)) {
1227 lock = LIST_FIRST(&granted);
1228 LIST_REMOVE(lock, lf_link);
1229
1230 /*
1231 * Skip over locks owned by other processes. Handle
1232 * any locks that overlap and are owned by ourselves.
1233 */
1234 overlap = LIST_FIRST(&state->ls_active);
1235 for (;;) {
1236 ovcase = lf_findoverlap(&overlap, lock, SELF);
1237
1238 #ifdef LOCKF_DEBUG
1239 if (ovcase && (lockf_debug & 2)) {
1240 printf("lf_setlock: overlap %d", ovcase);
1241 lf_print("", overlap);
1242 }
1243 #endif
1244 /*
1245 * Six cases:
1246 * 0) no overlap
1247 * 1) overlap == lock
1248 * 2) overlap contains lock
1249 * 3) lock contains overlap
1250 * 4) overlap starts before lock
1251 * 5) overlap ends after lock
1252 */
1253 switch (ovcase) {
1254 case 0: /* no overlap */
1255 break;
1256
1257 case 1: /* overlap == lock */
1258 /*
1259 * We have already setup the
1260 * dependants for the new lock, taking
1261 * into account a possible downgrade
1262 * or unlock. Remove the old lock.
1263 */
1264 LIST_REMOVE(overlap, lf_link);
1265 lf_update_dependancies(state, overlap, TRUE,
1266 &granted);
1267 lf_free_lock(overlap);
1268 break;
1269
1270 case 2: /* overlap contains lock */
1271 /*
1272 * Just split the existing lock.
1273 */
1274 lf_split(state, overlap, lock, &granted);
1275 break;
1276
1277 case 3: /* lock contains overlap */
1278 /*
1279 * Delete the overlap and advance to
1280 * the next entry in the list.
1281 */
1282 lf = LIST_NEXT(overlap, lf_link);
1283 LIST_REMOVE(overlap, lf_link);
1284 lf_update_dependancies(state, overlap, TRUE,
1285 &granted);
1286 lf_free_lock(overlap);
1287 overlap = lf;
1288 continue;
1289
1290 case 4: /* overlap starts before lock */
1291 /*
1292 * Just update the overlap end and
1293 * move on.
1294 */
1295 lf_set_end(state, overlap, lock->lf_start - 1,
1296 &granted);
1297 overlap = LIST_NEXT(overlap, lf_link);
1298 continue;
1299
1300 case 5: /* overlap ends after lock */
1301 /*
1302 * Change the start of overlap and
1303 * re-insert.
1304 */
1305 lf_set_start(state, overlap, lock->lf_end + 1,
1306 &granted);
1307 break;
1308 }
1309 break;
1310 }
1311 #ifdef LOCKF_DEBUG
1312 if (lockf_debug & 1) {
1313 if (lock->lf_type != F_UNLCK)
1314 lf_print("lf_activate_lock: activated", lock);
1315 else
1316 lf_print("lf_activate_lock: unlocked", lock);
1317 lf_printlist("lf_activate_lock", lock);
1318 }
1319 #endif /* LOCKF_DEBUG */
1320 if (lock->lf_type != F_UNLCK)
1321 lf_insert_lock(state, lock);
1322 }
1323 }
1324
1325 /*
1326 * Cancel a pending lock request, either as a result of a signal or a
1327 * cancel request for an async lock.
1328 */
1329 static void
1330 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1331 {
1332 struct lockf_entry_list granted;
1333
1334 /*
1335 * Note it is theoretically possible that cancelling this lock
1336 * may allow some other pending lock to become
1337 * active. Consider this case:
1338 *
1339 * Owner Action Result Dependencies
1340 *
1341 * A: lock [0..0] succeeds
1342 * B: lock [2..2] succeeds
1343 * C: lock [1..2] blocked C->B
1344 * D: lock [0..1] blocked C->B,D->A,D->C
1345 * A: unlock [0..0] C->B,D->C
1346 * C: cancel [1..2]
1347 */
1348
1349 LIST_REMOVE(lock, lf_link);
1350
1351 /*
1352 * Removing out-going edges is simple.
1353 */
1354 sx_xlock(&lf_owner_graph_lock);
1355 lf_remove_outgoing(lock);
1356 sx_xunlock(&lf_owner_graph_lock);
1357
1358 /*
1359 * Removing in-coming edges may allow some other lock to
1360 * become active - we use lf_update_dependancies to figure
1361 * this out.
1362 */
1363 LIST_INIT(&granted);
1364 lf_update_dependancies(state, lock, TRUE, &granted);
1365 lf_free_lock(lock);
1366
1367 /*
1368 * Feed any newly active locks to lf_activate_lock.
1369 */
1370 while (!LIST_EMPTY(&granted)) {
1371 lock = LIST_FIRST(&granted);
1372 LIST_REMOVE(lock, lf_link);
1373 lf_activate_lock(state, lock);
1374 }
1375 }
1376
1377 /*
1378 * Set a byte-range lock.
1379 */
1380 static int
1381 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1382 void **cookiep)
1383 {
1384 static char lockstr[] = "lockf";
1385 int error, priority, stops_deferred;
1386
1387 #ifdef LOCKF_DEBUG
1388 if (lockf_debug & 1)
1389 lf_print("lf_setlock", lock);
1390 #endif /* LOCKF_DEBUG */
1391
1392 /*
1393 * Set the priority
1394 */
1395 priority = PLOCK;
1396 if (lock->lf_type == F_WRLCK)
1397 priority += 4;
1398 if (!(lock->lf_flags & F_NOINTR))
1399 priority |= PCATCH;
1400 /*
1401 * Scan lock list for this file looking for locks that would block us.
1402 */
1403 if (lf_getblock(state, lock)) {
1404 /*
1405 * Free the structure and return if nonblocking.
1406 */
1407 if ((lock->lf_flags & F_WAIT) == 0
1408 && lock->lf_async_task == NULL) {
1409 lf_free_lock(lock);
1410 error = EAGAIN;
1411 goto out;
1412 }
1413
1414 /*
1415 * For flock type locks, we must first remove
1416 * any shared locks that we hold before we sleep
1417 * waiting for an exclusive lock.
1418 */
1419 if ((lock->lf_flags & F_FLOCK) &&
1420 lock->lf_type == F_WRLCK) {
1421 lock->lf_type = F_UNLCK;
1422 lf_activate_lock(state, lock);
1423 lock->lf_type = F_WRLCK;
1424 }
1425
1426 /*
1427 * We are blocked. Create edges to each blocking lock,
1428 * checking for deadlock using the owner graph. For
1429 * simplicity, we run deadlock detection for all
1430 * locks, posix and otherwise.
1431 */
1432 sx_xlock(&lf_owner_graph_lock);
1433 error = lf_add_outgoing(state, lock);
1434 sx_xunlock(&lf_owner_graph_lock);
1435
1436 if (error) {
1437 #ifdef LOCKF_DEBUG
1438 if (lockf_debug & 1)
1439 lf_print("lf_setlock: deadlock", lock);
1440 #endif
1441 lf_free_lock(lock);
1442 goto out;
1443 }
1444
1445 /*
1446 * We have added edges to everything that blocks
1447 * us. Sleep until they all go away.
1448 */
1449 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1450 #ifdef LOCKF_DEBUG
1451 if (lockf_debug & 1) {
1452 struct lockf_edge *e;
1453 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1454 lf_print("lf_setlock: blocking on", e->le_to);
1455 lf_printlist("lf_setlock", e->le_to);
1456 }
1457 }
1458 #endif /* LOCKF_DEBUG */
1459
1460 if ((lock->lf_flags & F_WAIT) == 0) {
1461 /*
1462 * The caller requested async notification -
1463 * this callback happens when the blocking
1464 * lock is released, allowing the caller to
1465 * make another attempt to take the lock.
1466 */
1467 *cookiep = (void *) lock;
1468 error = EINPROGRESS;
1469 goto out;
1470 }
1471
1472 lock->lf_refs++;
1473 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1474 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1475 sigallowstop(stops_deferred);
1476 if (lf_free_lock(lock)) {
1477 error = EDOOFUS;
1478 goto out;
1479 }
1480
1481 /*
1482 * We may have been awakened by a signal and/or by a
1483 * debugger continuing us (in which cases we must
1484 * remove our lock graph edges) and/or by another
1485 * process releasing a lock (in which case our edges
1486 * have already been removed and we have been moved to
1487 * the active list). We may also have been woken by
1488 * lf_purgelocks which we report to the caller as
1489 * EINTR. In that case, lf_purgelocks will have
1490 * removed our lock graph edges.
1491 *
1492 * Note that it is possible to receive a signal after
1493 * we were successfully woken (and moved to the active
1494 * list) but before we resumed execution. In this
1495 * case, our lf_outedges list will be clear. We
1496 * pretend there was no error.
1497 *
1498 * Note also, if we have been sleeping long enough, we
1499 * may now have incoming edges from some newer lock
1500 * which is waiting behind us in the queue.
1501 */
1502 if (lock->lf_flags & F_INTR) {
1503 error = EINTR;
1504 lf_free_lock(lock);
1505 goto out;
1506 }
1507 if (LIST_EMPTY(&lock->lf_outedges)) {
1508 error = 0;
1509 } else {
1510 lf_cancel_lock(state, lock);
1511 goto out;
1512 }
1513 #ifdef LOCKF_DEBUG
1514 if (lockf_debug & 1) {
1515 lf_print("lf_setlock: granted", lock);
1516 }
1517 #endif
1518 goto out;
1519 }
1520 /*
1521 * It looks like we are going to grant the lock. First add
1522 * edges from any currently pending lock that the new lock
1523 * would block.
1524 */
1525 error = lf_add_incoming(state, lock);
1526 if (error) {
1527 #ifdef LOCKF_DEBUG
1528 if (lockf_debug & 1)
1529 lf_print("lf_setlock: deadlock", lock);
1530 #endif
1531 lf_free_lock(lock);
1532 goto out;
1533 }
1534
1535 /*
1536 * No blocks!! Add the lock. Note that we will
1537 * downgrade or upgrade any overlapping locks this
1538 * process already owns.
1539 */
1540 lf_activate_lock(state, lock);
1541 error = 0;
1542 out:
1543 return (error);
1544 }
1545
1546 /*
1547 * Remove a byte-range lock on an inode.
1548 *
1549 * Generally, find the lock (or an overlap to that lock)
1550 * and remove it (or shrink it), then wakeup anyone we can.
1551 */
1552 static int
1553 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1554 {
1555 struct lockf_entry *overlap;
1556
1557 overlap = LIST_FIRST(&state->ls_active);
1558
1559 if (overlap == NOLOCKF)
1560 return (0);
1561 #ifdef LOCKF_DEBUG
1562 if (unlock->lf_type != F_UNLCK)
1563 panic("lf_clearlock: bad type");
1564 if (lockf_debug & 1)
1565 lf_print("lf_clearlock", unlock);
1566 #endif /* LOCKF_DEBUG */
1567
1568 lf_activate_lock(state, unlock);
1569
1570 return (0);
1571 }
1572
1573 /*
1574 * Check whether there is a blocking lock, and if so return its
1575 * details in '*fl'.
1576 */
1577 static int
1578 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1579 {
1580 struct lockf_entry *block;
1581
1582 #ifdef LOCKF_DEBUG
1583 if (lockf_debug & 1)
1584 lf_print("lf_getlock", lock);
1585 #endif /* LOCKF_DEBUG */
1586
1587 if ((block = lf_getblock(state, lock))) {
1588 fl->l_type = block->lf_type;
1589 fl->l_whence = SEEK_SET;
1590 fl->l_start = block->lf_start;
1591 if (block->lf_end == OFF_MAX)
1592 fl->l_len = 0;
1593 else
1594 fl->l_len = block->lf_end - block->lf_start + 1;
1595 fl->l_pid = block->lf_owner->lo_pid;
1596 fl->l_sysid = block->lf_owner->lo_sysid;
1597 } else {
1598 fl->l_type = F_UNLCK;
1599 }
1600 return (0);
1601 }
1602
1603 /*
1604 * Cancel an async lock request.
1605 */
1606 static int
1607 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1608 {
1609 struct lockf_entry *reallock;
1610
1611 /*
1612 * We need to match this request with an existing lock
1613 * request.
1614 */
1615 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1616 if ((void *) reallock == cookie) {
1617 /*
1618 * Double-check that this lock looks right
1619 * (maybe use a rolling ID for the cancel
1620 * cookie instead?)
1621 */
1622 if (!(reallock->lf_vnode == lock->lf_vnode
1623 && reallock->lf_start == lock->lf_start
1624 && reallock->lf_end == lock->lf_end)) {
1625 return (ENOENT);
1626 }
1627
1628 /*
1629 * Make sure this lock was async and then just
1630 * remove it from its wait lists.
1631 */
1632 if (!reallock->lf_async_task) {
1633 return (ENOENT);
1634 }
1635
1636 /*
1637 * Note that since any other thread must take
1638 * state->ls_lock before it can possibly
1639 * trigger the async callback, we are safe
1640 * from a race with lf_wakeup_lock, i.e. we
1641 * can free the lock (actually our caller does
1642 * this).
1643 */
1644 lf_cancel_lock(state, reallock);
1645 return (0);
1646 }
1647 }
1648
1649 /*
1650 * We didn't find a matching lock - not much we can do here.
1651 */
1652 return (ENOENT);
1653 }
1654
1655 /*
1656 * Walk the list of locks for an inode and
1657 * return the first blocking lock.
1658 */
1659 static struct lockf_entry *
1660 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1661 {
1662 struct lockf_entry *overlap;
1663
1664 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1665 /*
1666 * We may assume that the active list is sorted by
1667 * lf_start.
1668 */
1669 if (overlap->lf_start > lock->lf_end)
1670 break;
1671 if (!lf_blocks(lock, overlap))
1672 continue;
1673 return (overlap);
1674 }
1675 return (NOLOCKF);
1676 }
1677
1678 /*
1679 * Walk the list of locks for an inode to find an overlapping lock (if
1680 * any) and return a classification of that overlap.
1681 *
1682 * Arguments:
1683 * *overlap The place in the lock list to start looking
1684 * lock The lock which is being tested
1685 * type Pass 'SELF' to test only locks with the same
1686 * owner as lock, or 'OTHER' to test only locks
1687 * with a different owner
1688 *
1689 * Returns one of six values:
1690 * 0) no overlap
1691 * 1) overlap == lock
1692 * 2) overlap contains lock
1693 * 3) lock contains overlap
1694 * 4) overlap starts before lock
1695 * 5) overlap ends after lock
1696 *
1697 * If there is an overlapping lock, '*overlap' is set to point at the
1698 * overlapping lock.
1699 *
1700 * NOTE: this returns only the FIRST overlapping lock. There
1701 * may be more than one.
1702 */
1703 static int
1704 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1705 {
1706 struct lockf_entry *lf;
1707 off_t start, end;
1708 int res;
1709
1710 if ((*overlap) == NOLOCKF) {
1711 return (0);
1712 }
1713 #ifdef LOCKF_DEBUG
1714 if (lockf_debug & 2)
1715 lf_print("lf_findoverlap: looking for overlap in", lock);
1716 #endif /* LOCKF_DEBUG */
1717 start = lock->lf_start;
1718 end = lock->lf_end;
1719 res = 0;
1720 while (*overlap) {
1721 lf = *overlap;
1722 if (lf->lf_start > end)
1723 break;
1724 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1725 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1726 *overlap = LIST_NEXT(lf, lf_link);
1727 continue;
1728 }
1729 #ifdef LOCKF_DEBUG
1730 if (lockf_debug & 2)
1731 lf_print("\tchecking", lf);
1732 #endif /* LOCKF_DEBUG */
1733 /*
1734 * OK, check for overlap
1735 *
1736 * Six cases:
1737 * 0) no overlap
1738 * 1) overlap == lock
1739 * 2) overlap contains lock
1740 * 3) lock contains overlap
1741 * 4) overlap starts before lock
1742 * 5) overlap ends after lock
1743 */
1744 if (start > lf->lf_end) {
1745 /* Case 0 */
1746 #ifdef LOCKF_DEBUG
1747 if (lockf_debug & 2)
1748 printf("no overlap\n");
1749 #endif /* LOCKF_DEBUG */
1750 *overlap = LIST_NEXT(lf, lf_link);
1751 continue;
1752 }
1753 if (lf->lf_start == start && lf->lf_end == end) {
1754 /* Case 1 */
1755 #ifdef LOCKF_DEBUG
1756 if (lockf_debug & 2)
1757 printf("overlap == lock\n");
1758 #endif /* LOCKF_DEBUG */
1759 res = 1;
1760 break;
1761 }
1762 if (lf->lf_start <= start && lf->lf_end >= end) {
1763 /* Case 2 */
1764 #ifdef LOCKF_DEBUG
1765 if (lockf_debug & 2)
1766 printf("overlap contains lock\n");
1767 #endif /* LOCKF_DEBUG */
1768 res = 2;
1769 break;
1770 }
1771 if (start <= lf->lf_start && end >= lf->lf_end) {
1772 /* Case 3 */
1773 #ifdef LOCKF_DEBUG
1774 if (lockf_debug & 2)
1775 printf("lock contains overlap\n");
1776 #endif /* LOCKF_DEBUG */
1777 res = 3;
1778 break;
1779 }
1780 if (lf->lf_start < start && lf->lf_end >= start) {
1781 /* Case 4 */
1782 #ifdef LOCKF_DEBUG
1783 if (lockf_debug & 2)
1784 printf("overlap starts before lock\n");
1785 #endif /* LOCKF_DEBUG */
1786 res = 4;
1787 break;
1788 }
1789 if (lf->lf_start > start && lf->lf_end > end) {
1790 /* Case 5 */
1791 #ifdef LOCKF_DEBUG
1792 if (lockf_debug & 2)
1793 printf("overlap ends after lock\n");
1794 #endif /* LOCKF_DEBUG */
1795 res = 5;
1796 break;
1797 }
1798 panic("lf_findoverlap: default");
1799 }
1800 return (res);
1801 }
1802
1803 /*
1804 * Split an the existing 'lock1', based on the extent of the lock
1805 * described by 'lock2'. The existing lock should cover 'lock2'
1806 * entirely.
1807 *
1808 * Any pending locks which have been been unblocked are added to
1809 * 'granted'
1810 */
1811 static void
1812 lf_split(struct lockf *state, struct lockf_entry *lock1,
1813 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1814 {
1815 struct lockf_entry *splitlock;
1816
1817 #ifdef LOCKF_DEBUG
1818 if (lockf_debug & 2) {
1819 lf_print("lf_split", lock1);
1820 lf_print("splitting from", lock2);
1821 }
1822 #endif /* LOCKF_DEBUG */
1823 /*
1824 * Check to see if we don't need to split at all.
1825 */
1826 if (lock1->lf_start == lock2->lf_start) {
1827 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1828 return;
1829 }
1830 if (lock1->lf_end == lock2->lf_end) {
1831 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1832 return;
1833 }
1834 /*
1835 * Make a new lock consisting of the last part of
1836 * the encompassing lock.
1837 */
1838 splitlock = lf_alloc_lock(lock1->lf_owner);
1839 memcpy(splitlock, lock1, sizeof *splitlock);
1840 splitlock->lf_refs = 1;
1841 if (splitlock->lf_flags & F_REMOTE)
1842 vref(splitlock->lf_vnode);
1843
1844 /*
1845 * This cannot cause a deadlock since any edges we would add
1846 * to splitlock already exist in lock1. We must be sure to add
1847 * necessary dependencies to splitlock before we reduce lock1
1848 * otherwise we may accidentally grant a pending lock that
1849 * was blocked by the tail end of lock1.
1850 */
1851 splitlock->lf_start = lock2->lf_end + 1;
1852 LIST_INIT(&splitlock->lf_outedges);
1853 LIST_INIT(&splitlock->lf_inedges);
1854 lf_add_incoming(state, splitlock);
1855
1856 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1857
1858 /*
1859 * OK, now link it in
1860 */
1861 lf_insert_lock(state, splitlock);
1862 }
1863
1864 struct lockdesc {
1865 STAILQ_ENTRY(lockdesc) link;
1866 struct vnode *vp;
1867 struct flock fl;
1868 };
1869 STAILQ_HEAD(lockdesclist, lockdesc);
1870
1871 int
1872 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1873 {
1874 struct lockf *ls;
1875 struct lockf_entry *lf;
1876 struct lockdesc *ldesc;
1877 struct lockdesclist locks;
1878 int error;
1879
1880 /*
1881 * In order to keep the locking simple, we iterate over the
1882 * active lock lists to build a list of locks that need
1883 * releasing. We then call the iterator for each one in turn.
1884 *
1885 * We take an extra reference to the vnode for the duration to
1886 * make sure it doesn't go away before we are finished.
1887 */
1888 STAILQ_INIT(&locks);
1889 sx_xlock(&lf_lock_states_lock);
1890 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1891 sx_xlock(&ls->ls_lock);
1892 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1893 if (lf->lf_owner->lo_sysid != sysid)
1894 continue;
1895
1896 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1897 M_WAITOK);
1898 ldesc->vp = lf->lf_vnode;
1899 vref(ldesc->vp);
1900 ldesc->fl.l_start = lf->lf_start;
1901 if (lf->lf_end == OFF_MAX)
1902 ldesc->fl.l_len = 0;
1903 else
1904 ldesc->fl.l_len =
1905 lf->lf_end - lf->lf_start + 1;
1906 ldesc->fl.l_whence = SEEK_SET;
1907 ldesc->fl.l_type = F_UNLCK;
1908 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1909 ldesc->fl.l_sysid = sysid;
1910 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1911 }
1912 sx_xunlock(&ls->ls_lock);
1913 }
1914 sx_xunlock(&lf_lock_states_lock);
1915
1916 /*
1917 * Call the iterator function for each lock in turn. If the
1918 * iterator returns an error code, just free the rest of the
1919 * lockdesc structures.
1920 */
1921 error = 0;
1922 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1923 STAILQ_REMOVE_HEAD(&locks, link);
1924 if (!error)
1925 error = fn(ldesc->vp, &ldesc->fl, arg);
1926 vrele(ldesc->vp);
1927 free(ldesc, M_LOCKF);
1928 }
1929
1930 return (error);
1931 }
1932
1933 int
1934 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1935 {
1936 struct lockf *ls;
1937 struct lockf_entry *lf;
1938 struct lockdesc *ldesc;
1939 struct lockdesclist locks;
1940 int error;
1941
1942 /*
1943 * In order to keep the locking simple, we iterate over the
1944 * active lock lists to build a list of locks that need
1945 * releasing. We then call the iterator for each one in turn.
1946 *
1947 * We take an extra reference to the vnode for the duration to
1948 * make sure it doesn't go away before we are finished.
1949 */
1950 STAILQ_INIT(&locks);
1951 VI_LOCK(vp);
1952 ls = vp->v_lockf;
1953 if (!ls) {
1954 VI_UNLOCK(vp);
1955 return (0);
1956 }
1957 ls->ls_threads++;
1958 VI_UNLOCK(vp);
1959
1960 sx_xlock(&ls->ls_lock);
1961 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1962 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1963 M_WAITOK);
1964 ldesc->vp = lf->lf_vnode;
1965 vref(ldesc->vp);
1966 ldesc->fl.l_start = lf->lf_start;
1967 if (lf->lf_end == OFF_MAX)
1968 ldesc->fl.l_len = 0;
1969 else
1970 ldesc->fl.l_len =
1971 lf->lf_end - lf->lf_start + 1;
1972 ldesc->fl.l_whence = SEEK_SET;
1973 ldesc->fl.l_type = F_UNLCK;
1974 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1975 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1976 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1977 }
1978 sx_xunlock(&ls->ls_lock);
1979 VI_LOCK(vp);
1980 ls->ls_threads--;
1981 wakeup(ls);
1982 VI_UNLOCK(vp);
1983
1984 /*
1985 * Call the iterator function for each lock in turn. If the
1986 * iterator returns an error code, just free the rest of the
1987 * lockdesc structures.
1988 */
1989 error = 0;
1990 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1991 STAILQ_REMOVE_HEAD(&locks, link);
1992 if (!error)
1993 error = fn(ldesc->vp, &ldesc->fl, arg);
1994 vrele(ldesc->vp);
1995 free(ldesc, M_LOCKF);
1996 }
1997
1998 return (error);
1999 }
2000
2001 static int
2002 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
2003 {
2004
2005 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2006 return (0);
2007 }
2008
2009 void
2010 lf_clearremotesys(int sysid)
2011 {
2012
2013 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2014 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2015 }
2016
2017 int
2018 lf_countlocks(int sysid)
2019 {
2020 int i;
2021 struct lock_owner *lo;
2022 int count;
2023
2024 count = 0;
2025 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
2026 sx_xlock(&lf_lock_owners[i].lock);
2027 LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
2028 if (lo->lo_sysid == sysid)
2029 count += lo->lo_refs;
2030 sx_xunlock(&lf_lock_owners[i].lock);
2031 }
2032
2033 return (count);
2034 }
2035
2036 #ifdef LOCKF_DEBUG
2037
2038 /*
2039 * Return non-zero if y is reachable from x using a brute force
2040 * search. If reachable and path is non-null, return the route taken
2041 * in path.
2042 */
2043 static int
2044 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2045 struct owner_vertex_list *path)
2046 {
2047 struct owner_edge *e;
2048
2049 if (x == y) {
2050 if (path)
2051 TAILQ_INSERT_HEAD(path, x, v_link);
2052 return 1;
2053 }
2054
2055 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2056 if (graph_reaches(e->e_to, y, path)) {
2057 if (path)
2058 TAILQ_INSERT_HEAD(path, x, v_link);
2059 return 1;
2060 }
2061 }
2062 return 0;
2063 }
2064
2065 /*
2066 * Perform consistency checks on the graph. Make sure the values of
2067 * v_order are correct. If checkorder is non-zero, check no vertex can
2068 * reach any other vertex with a smaller order.
2069 */
2070 static void
2071 graph_check(struct owner_graph *g, int checkorder)
2072 {
2073 int i, j;
2074
2075 for (i = 0; i < g->g_size; i++) {
2076 if (!g->g_vertices[i]->v_owner)
2077 continue;
2078 KASSERT(g->g_vertices[i]->v_order == i,
2079 ("lock graph vertices disordered"));
2080 if (checkorder) {
2081 for (j = 0; j < i; j++) {
2082 if (!g->g_vertices[j]->v_owner)
2083 continue;
2084 KASSERT(!graph_reaches(g->g_vertices[i],
2085 g->g_vertices[j], NULL),
2086 ("lock graph vertices disordered"));
2087 }
2088 }
2089 }
2090 }
2091
2092 static void
2093 graph_print_vertices(struct owner_vertex_list *set)
2094 {
2095 struct owner_vertex *v;
2096
2097 printf("{ ");
2098 TAILQ_FOREACH(v, set, v_link) {
2099 printf("%d:", v->v_order);
2100 lf_print_owner(v->v_owner);
2101 if (TAILQ_NEXT(v, v_link))
2102 printf(", ");
2103 }
2104 printf(" }\n");
2105 }
2106
2107 #endif
2108
2109 /*
2110 * Calculate the sub-set of vertices v from the affected region [y..x]
2111 * where v is reachable from y. Return -1 if a loop was detected
2112 * (i.e. x is reachable from y, otherwise the number of vertices in
2113 * this subset.
2114 */
2115 static int
2116 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2117 struct owner_vertex *y, struct owner_vertex_list *delta)
2118 {
2119 uint32_t gen;
2120 struct owner_vertex *v;
2121 struct owner_edge *e;
2122 int n;
2123
2124 /*
2125 * We start with a set containing just y. Then for each vertex
2126 * v in the set so far unprocessed, we add each vertex that v
2127 * has an out-edge to and that is within the affected region
2128 * [y..x]. If we see the vertex x on our travels, stop
2129 * immediately.
2130 */
2131 TAILQ_INIT(delta);
2132 TAILQ_INSERT_TAIL(delta, y, v_link);
2133 v = y;
2134 n = 1;
2135 gen = g->g_gen;
2136 while (v) {
2137 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2138 if (e->e_to == x)
2139 return -1;
2140 if (e->e_to->v_order < x->v_order
2141 && e->e_to->v_gen != gen) {
2142 e->e_to->v_gen = gen;
2143 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2144 n++;
2145 }
2146 }
2147 v = TAILQ_NEXT(v, v_link);
2148 }
2149
2150 return (n);
2151 }
2152
2153 /*
2154 * Calculate the sub-set of vertices v from the affected region [y..x]
2155 * where v reaches x. Return the number of vertices in this subset.
2156 */
2157 static int
2158 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2159 struct owner_vertex *y, struct owner_vertex_list *delta)
2160 {
2161 uint32_t gen;
2162 struct owner_vertex *v;
2163 struct owner_edge *e;
2164 int n;
2165
2166 /*
2167 * We start with a set containing just x. Then for each vertex
2168 * v in the set so far unprocessed, we add each vertex that v
2169 * has an in-edge from and that is within the affected region
2170 * [y..x].
2171 */
2172 TAILQ_INIT(delta);
2173 TAILQ_INSERT_TAIL(delta, x, v_link);
2174 v = x;
2175 n = 1;
2176 gen = g->g_gen;
2177 while (v) {
2178 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2179 if (e->e_from->v_order > y->v_order
2180 && e->e_from->v_gen != gen) {
2181 e->e_from->v_gen = gen;
2182 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2183 n++;
2184 }
2185 }
2186 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2187 }
2188
2189 return (n);
2190 }
2191
2192 static int
2193 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2194 {
2195 struct owner_vertex *v;
2196 int i, j;
2197
2198 TAILQ_FOREACH(v, set, v_link) {
2199 for (i = n;
2200 i > 0 && indices[i - 1] > v->v_order; i--)
2201 ;
2202 for (j = n - 1; j >= i; j--)
2203 indices[j + 1] = indices[j];
2204 indices[i] = v->v_order;
2205 n++;
2206 }
2207
2208 return (n);
2209 }
2210
2211 static int
2212 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2213 struct owner_vertex_list *set)
2214 {
2215 struct owner_vertex *v, *vlowest;
2216
2217 while (!TAILQ_EMPTY(set)) {
2218 vlowest = NULL;
2219 TAILQ_FOREACH(v, set, v_link) {
2220 if (!vlowest || v->v_order < vlowest->v_order)
2221 vlowest = v;
2222 }
2223 TAILQ_REMOVE(set, vlowest, v_link);
2224 vlowest->v_order = indices[nextunused];
2225 g->g_vertices[vlowest->v_order] = vlowest;
2226 nextunused++;
2227 }
2228
2229 return (nextunused);
2230 }
2231
2232 static int
2233 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2234 struct owner_vertex *y)
2235 {
2236 struct owner_edge *e;
2237 struct owner_vertex_list deltaF, deltaB;
2238 int nF, n, vi, i;
2239 int *indices;
2240 int nB __unused;
2241
2242 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2243
2244 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2245 if (e->e_to == y) {
2246 e->e_refs++;
2247 return (0);
2248 }
2249 }
2250
2251 #ifdef LOCKF_DEBUG
2252 if (lockf_debug & 8) {
2253 printf("adding edge %d:", x->v_order);
2254 lf_print_owner(x->v_owner);
2255 printf(" -> %d:", y->v_order);
2256 lf_print_owner(y->v_owner);
2257 printf("\n");
2258 }
2259 #endif
2260 if (y->v_order < x->v_order) {
2261 /*
2262 * The new edge violates the order. First find the set
2263 * of affected vertices reachable from y (deltaF) and
2264 * the set of affect vertices affected that reach x
2265 * (deltaB), using the graph generation number to
2266 * detect whether we have visited a given vertex
2267 * already. We re-order the graph so that each vertex
2268 * in deltaB appears before each vertex in deltaF.
2269 *
2270 * If x is a member of deltaF, then the new edge would
2271 * create a cycle. Otherwise, we may assume that
2272 * deltaF and deltaB are disjoint.
2273 */
2274 g->g_gen++;
2275 if (g->g_gen == 0) {
2276 /*
2277 * Generation wrap.
2278 */
2279 for (vi = 0; vi < g->g_size; vi++) {
2280 g->g_vertices[vi]->v_gen = 0;
2281 }
2282 g->g_gen++;
2283 }
2284 nF = graph_delta_forward(g, x, y, &deltaF);
2285 if (nF < 0) {
2286 #ifdef LOCKF_DEBUG
2287 if (lockf_debug & 8) {
2288 struct owner_vertex_list path;
2289 printf("deadlock: ");
2290 TAILQ_INIT(&path);
2291 graph_reaches(y, x, &path);
2292 graph_print_vertices(&path);
2293 }
2294 #endif
2295 return (EDEADLK);
2296 }
2297
2298 #ifdef LOCKF_DEBUG
2299 if (lockf_debug & 8) {
2300 printf("re-ordering graph vertices\n");
2301 printf("deltaF = ");
2302 graph_print_vertices(&deltaF);
2303 }
2304 #endif
2305
2306 nB = graph_delta_backward(g, x, y, &deltaB);
2307
2308 #ifdef LOCKF_DEBUG
2309 if (lockf_debug & 8) {
2310 printf("deltaB = ");
2311 graph_print_vertices(&deltaB);
2312 }
2313 #endif
2314
2315 /*
2316 * We first build a set of vertex indices (vertex
2317 * order values) that we may use, then we re-assign
2318 * orders first to those vertices in deltaB, then to
2319 * deltaF. Note that the contents of deltaF and deltaB
2320 * may be partially disordered - we perform an
2321 * insertion sort while building our index set.
2322 */
2323 indices = g->g_indexbuf;
2324 n = graph_add_indices(indices, 0, &deltaF);
2325 graph_add_indices(indices, n, &deltaB);
2326
2327 /*
2328 * We must also be sure to maintain the relative
2329 * ordering of deltaF and deltaB when re-assigning
2330 * vertices. We do this by iteratively removing the
2331 * lowest ordered element from the set and assigning
2332 * it the next value from our new ordering.
2333 */
2334 i = graph_assign_indices(g, indices, 0, &deltaB);
2335 graph_assign_indices(g, indices, i, &deltaF);
2336
2337 #ifdef LOCKF_DEBUG
2338 if (lockf_debug & 8) {
2339 struct owner_vertex_list set;
2340 TAILQ_INIT(&set);
2341 for (i = 0; i < nB + nF; i++)
2342 TAILQ_INSERT_TAIL(&set,
2343 g->g_vertices[indices[i]], v_link);
2344 printf("new ordering = ");
2345 graph_print_vertices(&set);
2346 }
2347 #endif
2348 }
2349
2350 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2351
2352 #ifdef LOCKF_DEBUG
2353 if (lockf_debug & 8) {
2354 graph_check(g, TRUE);
2355 }
2356 #endif
2357
2358 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2359
2360 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2361 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2362 e->e_refs = 1;
2363 e->e_from = x;
2364 e->e_to = y;
2365
2366 return (0);
2367 }
2368
2369 /*
2370 * Remove an edge x->y from the graph.
2371 */
2372 static void
2373 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2374 struct owner_vertex *y)
2375 {
2376 struct owner_edge *e;
2377
2378 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2379
2380 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2381 if (e->e_to == y)
2382 break;
2383 }
2384 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2385
2386 e->e_refs--;
2387 if (e->e_refs == 0) {
2388 #ifdef LOCKF_DEBUG
2389 if (lockf_debug & 8) {
2390 printf("removing edge %d:", x->v_order);
2391 lf_print_owner(x->v_owner);
2392 printf(" -> %d:", y->v_order);
2393 lf_print_owner(y->v_owner);
2394 printf("\n");
2395 }
2396 #endif
2397 LIST_REMOVE(e, e_outlink);
2398 LIST_REMOVE(e, e_inlink);
2399 free(e, M_LOCKF);
2400 }
2401 }
2402
2403 /*
2404 * Allocate a vertex from the free list. Return ENOMEM if there are
2405 * none.
2406 */
2407 static struct owner_vertex *
2408 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2409 {
2410 struct owner_vertex *v;
2411
2412 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2413
2414 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2415 if (g->g_size == g->g_space) {
2416 g->g_vertices = realloc(g->g_vertices,
2417 2 * g->g_space * sizeof(struct owner_vertex *),
2418 M_LOCKF, M_WAITOK);
2419 free(g->g_indexbuf, M_LOCKF);
2420 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2421 M_LOCKF, M_WAITOK);
2422 g->g_space = 2 * g->g_space;
2423 }
2424 v->v_order = g->g_size;
2425 v->v_gen = g->g_gen;
2426 g->g_vertices[g->g_size] = v;
2427 g->g_size++;
2428
2429 LIST_INIT(&v->v_outedges);
2430 LIST_INIT(&v->v_inedges);
2431 v->v_owner = lo;
2432
2433 return (v);
2434 }
2435
2436 static void
2437 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2438 {
2439 struct owner_vertex *w;
2440 int i;
2441
2442 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2443
2444 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2445 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2446
2447 /*
2448 * Remove from the graph's array and close up the gap,
2449 * renumbering the other vertices.
2450 */
2451 for (i = v->v_order + 1; i < g->g_size; i++) {
2452 w = g->g_vertices[i];
2453 w->v_order--;
2454 g->g_vertices[i - 1] = w;
2455 }
2456 g->g_size--;
2457
2458 free(v, M_LOCKF);
2459 }
2460
2461 static struct owner_graph *
2462 graph_init(struct owner_graph *g)
2463 {
2464
2465 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2466 M_LOCKF, M_WAITOK);
2467 g->g_size = 0;
2468 g->g_space = 10;
2469 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2470 g->g_gen = 0;
2471
2472 return (g);
2473 }
2474
2475 #ifdef LOCKF_DEBUG
2476 /*
2477 * Print description of a lock owner
2478 */
2479 static void
2480 lf_print_owner(struct lock_owner *lo)
2481 {
2482
2483 if (lo->lo_flags & F_REMOTE) {
2484 printf("remote pid %d, system %d",
2485 lo->lo_pid, lo->lo_sysid);
2486 } else if (lo->lo_flags & F_FLOCK) {
2487 printf("file %p", lo->lo_id);
2488 } else {
2489 printf("local pid %d", lo->lo_pid);
2490 }
2491 }
2492
2493 /*
2494 * Print out a lock.
2495 */
2496 static void
2497 lf_print(char *tag, struct lockf_entry *lock)
2498 {
2499
2500 printf("%s: lock %p for ", tag, (void *)lock);
2501 lf_print_owner(lock->lf_owner);
2502 if (lock->lf_inode != (struct inode *)0)
2503 printf(" in ino %ju on dev <%s>,",
2504 (uintmax_t)lock->lf_inode->i_number,
2505 devtoname(ITODEV(lock->lf_inode)));
2506 printf(" %s, start %jd, end ",
2507 lock->lf_type == F_RDLCK ? "shared" :
2508 lock->lf_type == F_WRLCK ? "exclusive" :
2509 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2510 (intmax_t)lock->lf_start);
2511 if (lock->lf_end == OFF_MAX)
2512 printf("EOF");
2513 else
2514 printf("%jd", (intmax_t)lock->lf_end);
2515 if (!LIST_EMPTY(&lock->lf_outedges))
2516 printf(" block %p\n",
2517 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2518 else
2519 printf("\n");
2520 }
2521
2522 static void
2523 lf_printlist(char *tag, struct lockf_entry *lock)
2524 {
2525 struct lockf_entry *lf, *blk;
2526 struct lockf_edge *e;
2527
2528 if (lock->lf_inode == (struct inode *)0)
2529 return;
2530
2531 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2532 tag, (uintmax_t)lock->lf_inode->i_number,
2533 devtoname(ITODEV(lock->lf_inode)));
2534 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2535 printf("\tlock %p for ",(void *)lf);
2536 lf_print_owner(lock->lf_owner);
2537 printf(", %s, start %jd, end %jd",
2538 lf->lf_type == F_RDLCK ? "shared" :
2539 lf->lf_type == F_WRLCK ? "exclusive" :
2540 lf->lf_type == F_UNLCK ? "unlock" :
2541 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2542 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2543 blk = e->le_to;
2544 printf("\n\t\tlock request %p for ", (void *)blk);
2545 lf_print_owner(blk->lf_owner);
2546 printf(", %s, start %jd, end %jd",
2547 blk->lf_type == F_RDLCK ? "shared" :
2548 blk->lf_type == F_WRLCK ? "exclusive" :
2549 blk->lf_type == F_UNLCK ? "unlock" :
2550 "unknown", (intmax_t)blk->lf_start,
2551 (intmax_t)blk->lf_end);
2552 if (!LIST_EMPTY(&blk->lf_inedges))
2553 panic("lf_printlist: bad list");
2554 }
2555 printf("\n");
2556 }
2557 }
2558 #endif /* LOCKF_DEBUG */
Cache object: 869ed250a00d74b669b02a9eb81e94ea
|