The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
    5  * Authors: Doug Rabson <dfr@rabson.org>
    6  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 /*-
   30  * Copyright (c) 1982, 1986, 1989, 1993
   31  *      The Regents of the University of California.  All rights reserved.
   32  *
   33  * This code is derived from software contributed to Berkeley by
   34  * Scooter Morris at Genentech Inc.
   35  *
   36  * Redistribution and use in source and binary forms, with or without
   37  * modification, are permitted provided that the following conditions
   38  * are met:
   39  * 1. Redistributions of source code must retain the above copyright
   40  *    notice, this list of conditions and the following disclaimer.
   41  * 2. Redistributions in binary form must reproduce the above copyright
   42  *    notice, this list of conditions and the following disclaimer in the
   43  *    documentation and/or other materials provided with the distribution.
   44  * 3. Neither the name of the University nor the names of its contributors
   45  *    may be used to endorse or promote products derived from this software
   46  *    without specific prior written permission.
   47  *
   48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   58  * SUCH DAMAGE.
   59  *
   60  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
   61  */
   62 
   63 #include <sys/cdefs.h>
   64 __FBSDID("$FreeBSD$");
   65 
   66 #include "opt_debug_lockf.h"
   67 
   68 #include <sys/param.h>
   69 #include <sys/systm.h>
   70 #include <sys/hash.h>
   71 #include <sys/jail.h>
   72 #include <sys/kernel.h>
   73 #include <sys/limits.h>
   74 #include <sys/lock.h>
   75 #include <sys/mount.h>
   76 #include <sys/mutex.h>
   77 #include <sys/proc.h>
   78 #include <sys/sbuf.h>
   79 #include <sys/stat.h>
   80 #include <sys/sx.h>
   81 #include <sys/unistd.h>
   82 #include <sys/user.h>
   83 #include <sys/vnode.h>
   84 #include <sys/malloc.h>
   85 #include <sys/fcntl.h>
   86 #include <sys/lockf.h>
   87 #include <sys/taskqueue.h>
   88 
   89 #ifdef LOCKF_DEBUG
   90 #include <sys/sysctl.h>
   91 
   92 static int      lockf_debug = 0; /* control debug output */
   93 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
   94 #endif
   95 
   96 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
   97 
   98 struct owner_edge;
   99 struct owner_vertex;
  100 struct owner_vertex_list;
  101 struct owner_graph;
  102 
  103 #define NOLOCKF (struct lockf_entry *)0
  104 #define SELF    0x1
  105 #define OTHERS  0x2
  106 static void      lf_init(void *);
  107 static int       lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
  108 static int       lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
  109     int);
  110 static struct lockf_entry *
  111                  lf_alloc_lock(struct lock_owner *);
  112 static int       lf_free_lock(struct lockf_entry *);
  113 static int       lf_clearlock(struct lockf *, struct lockf_entry *);
  114 static int       lf_overlaps(struct lockf_entry *, struct lockf_entry *);
  115 static int       lf_blocks(struct lockf_entry *, struct lockf_entry *);
  116 static void      lf_free_edge(struct lockf_edge *);
  117 static struct lockf_edge *
  118                  lf_alloc_edge(void);
  119 static void      lf_alloc_vertex(struct lockf_entry *);
  120 static int       lf_add_edge(struct lockf_entry *, struct lockf_entry *);
  121 static void      lf_remove_edge(struct lockf_edge *);
  122 static void      lf_remove_outgoing(struct lockf_entry *);
  123 static void      lf_remove_incoming(struct lockf_entry *);
  124 static int       lf_add_outgoing(struct lockf *, struct lockf_entry *);
  125 static int       lf_add_incoming(struct lockf *, struct lockf_entry *);
  126 static int       lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
  127     int);
  128 static struct lockf_entry *
  129                  lf_getblock(struct lockf *, struct lockf_entry *);
  130 static int       lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
  131 static void      lf_insert_lock(struct lockf *, struct lockf_entry *);
  132 static void      lf_wakeup_lock(struct lockf *, struct lockf_entry *);
  133 static void      lf_update_dependancies(struct lockf *, struct lockf_entry *,
  134     int all, struct lockf_entry_list *);
  135 static void      lf_set_start(struct lockf *, struct lockf_entry *, off_t,
  136         struct lockf_entry_list*);
  137 static void      lf_set_end(struct lockf *, struct lockf_entry *, off_t,
  138         struct lockf_entry_list*);
  139 static int       lf_setlock(struct lockf *, struct lockf_entry *,
  140     struct vnode *, void **cookiep);
  141 static int       lf_cancel(struct lockf *, struct lockf_entry *, void *);
  142 static void      lf_split(struct lockf *, struct lockf_entry *,
  143     struct lockf_entry *, struct lockf_entry_list *);
  144 #ifdef LOCKF_DEBUG
  145 static int       graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
  146     struct owner_vertex_list *path);
  147 static void      graph_check(struct owner_graph *g, int checkorder);
  148 static void      graph_print_vertices(struct owner_vertex_list *set);
  149 #endif
  150 static int       graph_delta_forward(struct owner_graph *g,
  151     struct owner_vertex *x, struct owner_vertex *y,
  152     struct owner_vertex_list *delta);
  153 static int       graph_delta_backward(struct owner_graph *g,
  154     struct owner_vertex *x, struct owner_vertex *y,
  155     struct owner_vertex_list *delta);
  156 static int       graph_add_indices(int *indices, int n,
  157     struct owner_vertex_list *set);
  158 static int       graph_assign_indices(struct owner_graph *g, int *indices,
  159     int nextunused, struct owner_vertex_list *set);
  160 static int       graph_add_edge(struct owner_graph *g,
  161     struct owner_vertex *x, struct owner_vertex *y);
  162 static void      graph_remove_edge(struct owner_graph *g,
  163     struct owner_vertex *x, struct owner_vertex *y);
  164 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
  165     struct lock_owner *lo);
  166 static void      graph_free_vertex(struct owner_graph *g,
  167     struct owner_vertex *v);
  168 static struct owner_graph * graph_init(struct owner_graph *g);
  169 #ifdef LOCKF_DEBUG
  170 static void      lf_print(char *, struct lockf_entry *);
  171 static void      lf_printlist(char *, struct lockf_entry *);
  172 static void      lf_print_owner(struct lock_owner *);
  173 #endif
  174 
  175 /*
  176  * This structure is used to keep track of both local and remote lock
  177  * owners. The lf_owner field of the struct lockf_entry points back at
  178  * the lock owner structure. Each possible lock owner (local proc for
  179  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
  180  * pair for remote locks) is represented by a unique instance of
  181  * struct lock_owner.
  182  *
  183  * If a lock owner has a lock that blocks some other lock or a lock
  184  * that is waiting for some other lock, it also has a vertex in the
  185  * owner_graph below.
  186  *
  187  * Locks:
  188  * (s)          locked by state->ls_lock
  189  * (S)          locked by lf_lock_states_lock
  190  * (g)          locked by lf_owner_graph_lock
  191  * (c)          const until freeing
  192  */
  193 #define LOCK_OWNER_HASH_SIZE    256
  194 
  195 struct lock_owner {
  196         LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
  197         int     lo_refs;            /* (l) Number of locks referring to this */
  198         int     lo_flags;           /* (c) Flags passwd to lf_advlock */
  199         caddr_t lo_id;              /* (c) Id value passed to lf_advlock */
  200         pid_t   lo_pid;             /* (c) Process Id of the lock owner */
  201         int     lo_sysid;           /* (c) System Id of the lock owner */
  202         int     lo_hash;            /* (c) Used to lock the appropriate chain */
  203         struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
  204 };
  205 
  206 LIST_HEAD(lock_owner_list, lock_owner);
  207 
  208 struct lock_owner_chain {
  209         struct sx               lock;
  210         struct lock_owner_list  list;
  211 };
  212 
  213 static struct sx                lf_lock_states_lock;
  214 static struct lockf_list        lf_lock_states; /* (S) */
  215 static struct lock_owner_chain  lf_lock_owners[LOCK_OWNER_HASH_SIZE];
  216 
  217 /*
  218  * Structures for deadlock detection.
  219  *
  220  * We have two types of directed graph, the first is the set of locks,
  221  * both active and pending on a vnode. Within this graph, active locks
  222  * are terminal nodes in the graph (i.e. have no out-going
  223  * edges). Pending locks have out-going edges to each blocking active
  224  * lock that prevents the lock from being granted and also to each
  225  * older pending lock that would block them if it was active. The
  226  * graph for each vnode is naturally acyclic; new edges are only ever
  227  * added to or from new nodes (either new pending locks which only add
  228  * out-going edges or new active locks which only add in-coming edges)
  229  * therefore they cannot create loops in the lock graph.
  230  *
  231  * The second graph is a global graph of lock owners. Each lock owner
  232  * is a vertex in that graph and an edge is added to the graph
  233  * whenever an edge is added to a vnode graph, with end points
  234  * corresponding to owner of the new pending lock and the owner of the
  235  * lock upon which it waits. In order to prevent deadlock, we only add
  236  * an edge to this graph if the new edge would not create a cycle.
  237  * 
  238  * The lock owner graph is topologically sorted, i.e. if a node has
  239  * any outgoing edges, then it has an order strictly less than any
  240  * node to which it has an outgoing edge. We preserve this ordering
  241  * (and detect cycles) on edge insertion using Algorithm PK from the
  242  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
  243  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
  244  * No. 1.7)
  245  */
  246 struct owner_vertex;
  247 
  248 struct owner_edge {
  249         LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
  250         LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
  251         int             e_refs;           /* (g) number of times added */
  252         struct owner_vertex *e_from;      /* (c) out-going from here */
  253         struct owner_vertex *e_to;        /* (c) in-coming to here */
  254 };
  255 LIST_HEAD(owner_edge_list, owner_edge);
  256 
  257 struct owner_vertex {
  258         TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
  259         uint32_t        v_gen;            /* (g) workspace for edge insertion */
  260         int             v_order;          /* (g) order of vertex in graph */
  261         struct owner_edge_list v_outedges;/* (g) list of out-edges */
  262         struct owner_edge_list v_inedges; /* (g) list of in-edges */
  263         struct lock_owner *v_owner;       /* (c) corresponding lock owner */
  264 };
  265 TAILQ_HEAD(owner_vertex_list, owner_vertex);
  266 
  267 struct owner_graph {
  268         struct owner_vertex** g_vertices; /* (g) pointers to vertices */
  269         int             g_size;           /* (g) number of vertices */
  270         int             g_space;          /* (g) space allocated for vertices */
  271         int             *g_indexbuf;      /* (g) workspace for loop detection */
  272         uint32_t        g_gen;            /* (g) increment when re-ordering */
  273 };
  274 
  275 static struct sx                lf_owner_graph_lock;
  276 static struct owner_graph       lf_owner_graph;
  277 
  278 /*
  279  * Initialise various structures and locks.
  280  */
  281 static void
  282 lf_init(void *dummy)
  283 {
  284         int i;
  285 
  286         sx_init(&lf_lock_states_lock, "lock states lock");
  287         LIST_INIT(&lf_lock_states);
  288 
  289         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
  290                 sx_init(&lf_lock_owners[i].lock, "lock owners lock");
  291                 LIST_INIT(&lf_lock_owners[i].list);
  292         }
  293 
  294         sx_init(&lf_owner_graph_lock, "owner graph lock");
  295         graph_init(&lf_owner_graph);
  296 }
  297 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
  298 
  299 /*
  300  * Generate a hash value for a lock owner.
  301  */
  302 static int
  303 lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
  304 {
  305         uint32_t h;
  306 
  307         if (flags & F_REMOTE) {
  308                 h = HASHSTEP(0, fl->l_pid);
  309                 h = HASHSTEP(h, fl->l_sysid);
  310         } else if (flags & F_FLOCK) {
  311                 h = ((uintptr_t) id) >> 7;
  312         } else {
  313                 h = ((uintptr_t) vp) >> 7;
  314         }
  315 
  316         return (h % LOCK_OWNER_HASH_SIZE);
  317 }
  318 
  319 /*
  320  * Return true if a lock owner matches the details passed to
  321  * lf_advlock.
  322  */
  323 static int
  324 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
  325     int flags)
  326 {
  327         if (flags & F_REMOTE) {
  328                 return lo->lo_pid == fl->l_pid
  329                         && lo->lo_sysid == fl->l_sysid;
  330         } else {
  331                 return lo->lo_id == id;
  332         }
  333 }
  334 
  335 static struct lockf_entry *
  336 lf_alloc_lock(struct lock_owner *lo)
  337 {
  338         struct lockf_entry *lf;
  339 
  340         lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
  341 
  342 #ifdef LOCKF_DEBUG
  343         if (lockf_debug & 4)
  344                 printf("Allocated lock %p\n", lf);
  345 #endif
  346         if (lo) {
  347                 sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
  348                 lo->lo_refs++;
  349                 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
  350                 lf->lf_owner = lo;
  351         }
  352 
  353         return (lf);
  354 }
  355 
  356 static int
  357 lf_free_lock(struct lockf_entry *lock)
  358 {
  359         struct sx *chainlock;
  360 
  361         KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
  362         if (--lock->lf_refs > 0)
  363                 return (0);
  364         /*
  365          * Adjust the lock_owner reference count and
  366          * reclaim the entry if this is the last lock
  367          * for that owner.
  368          */
  369         struct lock_owner *lo = lock->lf_owner;
  370         if (lo) {
  371                 KASSERT(LIST_EMPTY(&lock->lf_outedges),
  372                     ("freeing lock with dependencies"));
  373                 KASSERT(LIST_EMPTY(&lock->lf_inedges),
  374                     ("freeing lock with dependants"));
  375                 chainlock = &lf_lock_owners[lo->lo_hash].lock;
  376                 sx_xlock(chainlock);
  377                 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
  378                 lo->lo_refs--;
  379                 if (lo->lo_refs == 0) {
  380 #ifdef LOCKF_DEBUG
  381                         if (lockf_debug & 1)
  382                                 printf("lf_free_lock: freeing lock owner %p\n",
  383                                     lo);
  384 #endif
  385                         if (lo->lo_vertex) {
  386                                 sx_xlock(&lf_owner_graph_lock);
  387                                 graph_free_vertex(&lf_owner_graph,
  388                                     lo->lo_vertex);
  389                                 sx_xunlock(&lf_owner_graph_lock);
  390                         }
  391                         LIST_REMOVE(lo, lo_link);
  392                         free(lo, M_LOCKF);
  393 #ifdef LOCKF_DEBUG
  394                         if (lockf_debug & 4)
  395                                 printf("Freed lock owner %p\n", lo);
  396 #endif
  397                 }
  398                 sx_unlock(chainlock);
  399         }
  400         if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
  401                 vrele(lock->lf_vnode);
  402                 lock->lf_vnode = NULL;
  403         }
  404 #ifdef LOCKF_DEBUG
  405         if (lockf_debug & 4)
  406                 printf("Freed lock %p\n", lock);
  407 #endif
  408         free(lock, M_LOCKF);
  409         return (1);
  410 }
  411 
  412 /*
  413  * Advisory record locking support
  414  */
  415 int
  416 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
  417     u_quad_t size)
  418 {
  419         struct lockf *state;
  420         struct flock *fl = ap->a_fl;
  421         struct lockf_entry *lock;
  422         struct vnode *vp = ap->a_vp;
  423         caddr_t id = ap->a_id;
  424         int flags = ap->a_flags;
  425         int hash;
  426         struct lock_owner *lo;
  427         off_t start, end, oadd;
  428         int error;
  429 
  430         /*
  431          * Handle the F_UNLKSYS case first - no need to mess about
  432          * creating a lock owner for this one.
  433          */
  434         if (ap->a_op == F_UNLCKSYS) {
  435                 lf_clearremotesys(fl->l_sysid);
  436                 return (0);
  437         }
  438 
  439         /*
  440          * Convert the flock structure into a start and end.
  441          */
  442         switch (fl->l_whence) {
  443         case SEEK_SET:
  444         case SEEK_CUR:
  445                 /*
  446                  * Caller is responsible for adding any necessary offset
  447                  * when SEEK_CUR is used.
  448                  */
  449                 start = fl->l_start;
  450                 break;
  451 
  452         case SEEK_END:
  453                 if (size > OFF_MAX ||
  454                     (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
  455                         return (EOVERFLOW);
  456                 start = size + fl->l_start;
  457                 break;
  458 
  459         default:
  460                 return (EINVAL);
  461         }
  462         if (start < 0)
  463                 return (EINVAL);
  464         if (fl->l_len < 0) {
  465                 if (start == 0)
  466                         return (EINVAL);
  467                 end = start - 1;
  468                 start += fl->l_len;
  469                 if (start < 0)
  470                         return (EINVAL);
  471         } else if (fl->l_len == 0) {
  472                 end = OFF_MAX;
  473         } else {
  474                 oadd = fl->l_len - 1;
  475                 if (oadd > OFF_MAX - start)
  476                         return (EOVERFLOW);
  477                 end = start + oadd;
  478         }
  479 
  480 retry_setlock:
  481 
  482         /*
  483          * Avoid the common case of unlocking when inode has no locks.
  484          */
  485         if (ap->a_op != F_SETLK && (*statep) == NULL) {
  486                 VI_LOCK(vp);
  487                 if ((*statep) == NULL) {
  488                         fl->l_type = F_UNLCK;
  489                         VI_UNLOCK(vp);
  490                         return (0);
  491                 }
  492                 VI_UNLOCK(vp);
  493         }
  494 
  495         /*
  496          * Map our arguments to an existing lock owner or create one
  497          * if this is the first time we have seen this owner.
  498          */
  499         hash = lf_hash_owner(id, vp, fl, flags);
  500         sx_xlock(&lf_lock_owners[hash].lock);
  501         LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
  502                 if (lf_owner_matches(lo, id, fl, flags))
  503                         break;
  504         if (!lo) {
  505                 /*
  506                  * We initialise the lock with a reference
  507                  * count which matches the new lockf_entry
  508                  * structure created below.
  509                  */
  510                 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
  511                     M_WAITOK|M_ZERO);
  512 #ifdef LOCKF_DEBUG
  513                 if (lockf_debug & 4)
  514                         printf("Allocated lock owner %p\n", lo);
  515 #endif
  516 
  517                 lo->lo_refs = 1;
  518                 lo->lo_flags = flags;
  519                 lo->lo_id = id;
  520                 lo->lo_hash = hash;
  521                 if (flags & F_REMOTE) {
  522                         lo->lo_pid = fl->l_pid;
  523                         lo->lo_sysid = fl->l_sysid;
  524                 } else if (flags & F_FLOCK) {
  525                         lo->lo_pid = -1;
  526                         lo->lo_sysid = 0;
  527                 } else {
  528                         struct proc *p = (struct proc *) id;
  529                         lo->lo_pid = p->p_pid;
  530                         lo->lo_sysid = 0;
  531                 }
  532                 lo->lo_vertex = NULL;
  533 
  534 #ifdef LOCKF_DEBUG
  535                 if (lockf_debug & 1) {
  536                         printf("lf_advlockasync: new lock owner %p ", lo);
  537                         lf_print_owner(lo);
  538                         printf("\n");
  539                 }
  540 #endif
  541 
  542                 LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
  543         } else {
  544                 /*
  545                  * We have seen this lock owner before, increase its
  546                  * reference count to account for the new lockf_entry
  547                  * structure we create below.
  548                  */
  549                 lo->lo_refs++;
  550         }
  551         sx_xunlock(&lf_lock_owners[hash].lock);
  552 
  553         /*
  554          * Create the lockf structure. We initialise the lf_owner
  555          * field here instead of in lf_alloc_lock() to avoid paying
  556          * the lf_lock_owners_lock tax twice.
  557          */
  558         lock = lf_alloc_lock(NULL);
  559         lock->lf_refs = 1;
  560         lock->lf_start = start;
  561         lock->lf_end = end;
  562         lock->lf_owner = lo;
  563         lock->lf_vnode = vp;
  564         if (flags & F_REMOTE) {
  565                 /*
  566                  * For remote locks, the caller may release its ref to
  567                  * the vnode at any time - we have to ref it here to
  568                  * prevent it from being recycled unexpectedly.
  569                  */
  570                 vref(vp);
  571         }
  572 
  573         lock->lf_type = fl->l_type;
  574         LIST_INIT(&lock->lf_outedges);
  575         LIST_INIT(&lock->lf_inedges);
  576         lock->lf_async_task = ap->a_task;
  577         lock->lf_flags = ap->a_flags;
  578 
  579         /*
  580          * Do the requested operation. First find our state structure
  581          * and create a new one if necessary - the caller's *statep
  582          * variable and the state's ls_threads count is protected by
  583          * the vnode interlock.
  584          */
  585         VI_LOCK(vp);
  586         if (VN_IS_DOOMED(vp)) {
  587                 VI_UNLOCK(vp);
  588                 lf_free_lock(lock);
  589                 return (ENOENT);
  590         }
  591 
  592         /*
  593          * Allocate a state structure if necessary.
  594          */
  595         state = *statep;
  596         if (state == NULL) {
  597                 struct lockf *ls;
  598 
  599                 VI_UNLOCK(vp);
  600 
  601                 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
  602                 sx_init(&ls->ls_lock, "ls_lock");
  603                 LIST_INIT(&ls->ls_active);
  604                 LIST_INIT(&ls->ls_pending);
  605                 ls->ls_threads = 1;
  606 
  607                 sx_xlock(&lf_lock_states_lock);
  608                 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
  609                 sx_xunlock(&lf_lock_states_lock);
  610 
  611                 /*
  612                  * Cope if we lost a race with some other thread while
  613                  * trying to allocate memory.
  614                  */
  615                 VI_LOCK(vp);
  616                 if (VN_IS_DOOMED(vp)) {
  617                         VI_UNLOCK(vp);
  618                         sx_xlock(&lf_lock_states_lock);
  619                         LIST_REMOVE(ls, ls_link);
  620                         sx_xunlock(&lf_lock_states_lock);
  621                         sx_destroy(&ls->ls_lock);
  622                         free(ls, M_LOCKF);
  623                         lf_free_lock(lock);
  624                         return (ENOENT);
  625                 }
  626                 if ((*statep) == NULL) {
  627                         state = *statep = ls;
  628                         VI_UNLOCK(vp);
  629                 } else {
  630                         state = *statep;
  631                         MPASS(state->ls_threads >= 0);
  632                         state->ls_threads++;
  633                         VI_UNLOCK(vp);
  634 
  635                         sx_xlock(&lf_lock_states_lock);
  636                         LIST_REMOVE(ls, ls_link);
  637                         sx_xunlock(&lf_lock_states_lock);
  638                         sx_destroy(&ls->ls_lock);
  639                         free(ls, M_LOCKF);
  640                 }
  641         } else {
  642                 MPASS(state->ls_threads >= 0);
  643                 state->ls_threads++;
  644                 VI_UNLOCK(vp);
  645         }
  646 
  647         sx_xlock(&state->ls_lock);
  648         /*
  649          * Recheck the doomed vnode after state->ls_lock is
  650          * locked. lf_purgelocks() requires that no new threads add
  651          * pending locks when vnode is marked by VIRF_DOOMED flag.
  652          */
  653         if (VN_IS_DOOMED(vp)) {
  654                 VI_LOCK(vp);
  655                 MPASS(state->ls_threads > 0);
  656                 state->ls_threads--;
  657                 wakeup(state);
  658                 VI_UNLOCK(vp);
  659                 sx_xunlock(&state->ls_lock);
  660                 lf_free_lock(lock);
  661                 return (ENOENT);
  662         }
  663 
  664         switch (ap->a_op) {
  665         case F_SETLK:
  666                 error = lf_setlock(state, lock, vp, ap->a_cookiep);
  667                 break;
  668 
  669         case F_UNLCK:
  670                 error = lf_clearlock(state, lock);
  671                 lf_free_lock(lock);
  672                 break;
  673 
  674         case F_GETLK:
  675                 error = lf_getlock(state, lock, fl);
  676                 lf_free_lock(lock);
  677                 break;
  678 
  679         case F_CANCEL:
  680                 if (ap->a_cookiep)
  681                         error = lf_cancel(state, lock, *ap->a_cookiep);
  682                 else
  683                         error = EINVAL;
  684                 lf_free_lock(lock);
  685                 break;
  686 
  687         default:
  688                 lf_free_lock(lock);
  689                 error = EINVAL;
  690                 break;
  691         }
  692 
  693 #ifdef DIAGNOSTIC
  694         /*
  695          * Check for some can't happen stuff. In this case, the active
  696          * lock list becoming disordered or containing mutually
  697          * blocking locks. We also check the pending list for locks
  698          * which should be active (i.e. have no out-going edges).
  699          */
  700         LIST_FOREACH(lock, &state->ls_active, lf_link) {
  701                 struct lockf_entry *lf;
  702                 if (LIST_NEXT(lock, lf_link))
  703                         KASSERT((lock->lf_start
  704                                 <= LIST_NEXT(lock, lf_link)->lf_start),
  705                             ("locks disordered"));
  706                 LIST_FOREACH(lf, &state->ls_active, lf_link) {
  707                         if (lock == lf)
  708                                 break;
  709                         KASSERT(!lf_blocks(lock, lf),
  710                             ("two conflicting active locks"));
  711                         if (lock->lf_owner == lf->lf_owner)
  712                                 KASSERT(!lf_overlaps(lock, lf),
  713                                     ("two overlapping locks from same owner"));
  714                 }
  715         }
  716         LIST_FOREACH(lock, &state->ls_pending, lf_link) {
  717                 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
  718                     ("pending lock which should be active"));
  719         }
  720 #endif
  721         sx_xunlock(&state->ls_lock);
  722 
  723         VI_LOCK(vp);
  724         MPASS(state->ls_threads > 0);
  725         state->ls_threads--;
  726         if (state->ls_threads != 0) {
  727                 wakeup(state);
  728         }
  729         VI_UNLOCK(vp);
  730 
  731         if (error == EDOOFUS) {
  732                 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
  733                 goto retry_setlock;
  734         }
  735         return (error);
  736 }
  737 
  738 int
  739 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
  740 {
  741         struct vop_advlockasync_args a;
  742 
  743         a.a_vp = ap->a_vp;
  744         a.a_id = ap->a_id;
  745         a.a_op = ap->a_op;
  746         a.a_fl = ap->a_fl;
  747         a.a_flags = ap->a_flags;
  748         a.a_task = NULL;
  749         a.a_cookiep = NULL;
  750 
  751         return (lf_advlockasync(&a, statep, size));
  752 }
  753 
  754 void
  755 lf_purgelocks(struct vnode *vp, struct lockf **statep)
  756 {
  757         struct lockf *state;
  758         struct lockf_entry *lock, *nlock;
  759 
  760         /*
  761          * For this to work correctly, the caller must ensure that no
  762          * other threads enter the locking system for this vnode,
  763          * e.g. by checking VIRF_DOOMED. We wake up any threads that are
  764          * sleeping waiting for locks on this vnode and then free all
  765          * the remaining locks.
  766          */
  767         KASSERT(VN_IS_DOOMED(vp),
  768             ("lf_purgelocks: vp %p has not vgone yet", vp));
  769         state = *statep;
  770         if (state == NULL) {
  771                 return;
  772         }
  773         VI_LOCK(vp);
  774         *statep = NULL;
  775         if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
  776                 KASSERT(LIST_EMPTY(&state->ls_pending),
  777                     ("freeing state with pending locks"));
  778                 VI_UNLOCK(vp);
  779                 goto out_free;
  780         }
  781         MPASS(state->ls_threads >= 0);
  782         state->ls_threads++;
  783         VI_UNLOCK(vp);
  784 
  785         sx_xlock(&state->ls_lock);
  786         sx_xlock(&lf_owner_graph_lock);
  787         LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
  788                 LIST_REMOVE(lock, lf_link);
  789                 lf_remove_outgoing(lock);
  790                 lf_remove_incoming(lock);
  791 
  792                 /*
  793                  * If its an async lock, we can just free it
  794                  * here, otherwise we let the sleeping thread
  795                  * free it.
  796                  */
  797                 if (lock->lf_async_task) {
  798                         lf_free_lock(lock);
  799                 } else {
  800                         lock->lf_flags |= F_INTR;
  801                         wakeup(lock);
  802                 }
  803         }
  804         sx_xunlock(&lf_owner_graph_lock);
  805         sx_xunlock(&state->ls_lock);
  806 
  807         /*
  808          * Wait for all other threads, sleeping and otherwise
  809          * to leave.
  810          */
  811         VI_LOCK(vp);
  812         while (state->ls_threads > 1)
  813                 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
  814         VI_UNLOCK(vp);
  815 
  816         /*
  817          * We can just free all the active locks since they
  818          * will have no dependencies (we removed them all
  819          * above). We don't need to bother locking since we
  820          * are the last thread using this state structure.
  821          */
  822         KASSERT(LIST_EMPTY(&state->ls_pending),
  823             ("lock pending for %p", state));
  824         LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
  825                 LIST_REMOVE(lock, lf_link);
  826                 lf_free_lock(lock);
  827         }
  828 out_free:
  829         sx_xlock(&lf_lock_states_lock);
  830         LIST_REMOVE(state, ls_link);
  831         sx_xunlock(&lf_lock_states_lock);
  832         sx_destroy(&state->ls_lock);
  833         free(state, M_LOCKF);
  834 }
  835 
  836 /*
  837  * Return non-zero if locks 'x' and 'y' overlap.
  838  */
  839 static int
  840 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
  841 {
  842 
  843         return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
  844 }
  845 
  846 /*
  847  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
  848  */
  849 static int
  850 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
  851 {
  852 
  853         return x->lf_owner != y->lf_owner
  854                 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
  855                 && lf_overlaps(x, y);
  856 }
  857 
  858 /*
  859  * Allocate a lock edge from the free list
  860  */
  861 static struct lockf_edge *
  862 lf_alloc_edge(void)
  863 {
  864 
  865         return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
  866 }
  867 
  868 /*
  869  * Free a lock edge.
  870  */
  871 static void
  872 lf_free_edge(struct lockf_edge *e)
  873 {
  874 
  875         free(e, M_LOCKF);
  876 }
  877 
  878 /*
  879  * Ensure that the lock's owner has a corresponding vertex in the
  880  * owner graph.
  881  */
  882 static void
  883 lf_alloc_vertex(struct lockf_entry *lock)
  884 {
  885         struct owner_graph *g = &lf_owner_graph;
  886 
  887         if (!lock->lf_owner->lo_vertex)
  888                 lock->lf_owner->lo_vertex =
  889                         graph_alloc_vertex(g, lock->lf_owner);
  890 }
  891 
  892 /*
  893  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
  894  * the new edge would cause a cycle in the owner graph.
  895  */
  896 static int
  897 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
  898 {
  899         struct owner_graph *g = &lf_owner_graph;
  900         struct lockf_edge *e;
  901         int error;
  902 
  903 #ifdef DIAGNOSTIC
  904         LIST_FOREACH(e, &x->lf_outedges, le_outlink)
  905                 KASSERT(e->le_to != y, ("adding lock edge twice"));
  906 #endif
  907 
  908         /*
  909          * Make sure the two owners have entries in the owner graph.
  910          */
  911         lf_alloc_vertex(x);
  912         lf_alloc_vertex(y);
  913 
  914         error = graph_add_edge(g, x->lf_owner->lo_vertex,
  915             y->lf_owner->lo_vertex);
  916         if (error)
  917                 return (error);
  918 
  919         e = lf_alloc_edge();
  920         LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
  921         LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
  922         e->le_from = x;
  923         e->le_to = y;
  924 
  925         return (0);
  926 }
  927 
  928 /*
  929  * Remove an edge from the lock graph.
  930  */
  931 static void
  932 lf_remove_edge(struct lockf_edge *e)
  933 {
  934         struct owner_graph *g = &lf_owner_graph;
  935         struct lockf_entry *x = e->le_from;
  936         struct lockf_entry *y = e->le_to;
  937 
  938         graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
  939         LIST_REMOVE(e, le_outlink);
  940         LIST_REMOVE(e, le_inlink);
  941         e->le_from = NULL;
  942         e->le_to = NULL;
  943         lf_free_edge(e);
  944 }
  945 
  946 /*
  947  * Remove all out-going edges from lock x.
  948  */
  949 static void
  950 lf_remove_outgoing(struct lockf_entry *x)
  951 {
  952         struct lockf_edge *e;
  953 
  954         while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
  955                 lf_remove_edge(e);
  956         }
  957 }
  958 
  959 /*
  960  * Remove all in-coming edges from lock x.
  961  */
  962 static void
  963 lf_remove_incoming(struct lockf_entry *x)
  964 {
  965         struct lockf_edge *e;
  966 
  967         while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
  968                 lf_remove_edge(e);
  969         }
  970 }
  971 
  972 /*
  973  * Walk the list of locks for the file and create an out-going edge
  974  * from lock to each blocking lock.
  975  */
  976 static int
  977 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
  978 {
  979         struct lockf_entry *overlap;
  980         int error;
  981 
  982         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
  983                 /*
  984                  * We may assume that the active list is sorted by
  985                  * lf_start.
  986                  */
  987                 if (overlap->lf_start > lock->lf_end)
  988                         break;
  989                 if (!lf_blocks(lock, overlap))
  990                         continue;
  991 
  992                 /*
  993                  * We've found a blocking lock. Add the corresponding
  994                  * edge to the graphs and see if it would cause a
  995                  * deadlock.
  996                  */
  997                 error = lf_add_edge(lock, overlap);
  998 
  999                 /*
 1000                  * The only error that lf_add_edge returns is EDEADLK.
 1001                  * Remove any edges we added and return the error.
 1002                  */
 1003                 if (error) {
 1004                         lf_remove_outgoing(lock);
 1005                         return (error);
 1006                 }
 1007         }
 1008 
 1009         /*
 1010          * We also need to add edges to sleeping locks that block
 1011          * us. This ensures that lf_wakeup_lock cannot grant two
 1012          * mutually blocking locks simultaneously and also enforces a
 1013          * 'first come, first served' fairness model. Note that this
 1014          * only happens if we are blocked by at least one active lock
 1015          * due to the call to lf_getblock in lf_setlock below.
 1016          */
 1017         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
 1018                 if (!lf_blocks(lock, overlap))
 1019                         continue;
 1020                 /*
 1021                  * We've found a blocking lock. Add the corresponding
 1022                  * edge to the graphs and see if it would cause a
 1023                  * deadlock.
 1024                  */
 1025                 error = lf_add_edge(lock, overlap);
 1026 
 1027                 /*
 1028                  * The only error that lf_add_edge returns is EDEADLK.
 1029                  * Remove any edges we added and return the error.
 1030                  */
 1031                 if (error) {
 1032                         lf_remove_outgoing(lock);
 1033                         return (error);
 1034                 }
 1035         }
 1036 
 1037         return (0);
 1038 }
 1039 
 1040 /*
 1041  * Walk the list of pending locks for the file and create an in-coming
 1042  * edge from lock to each blocking lock.
 1043  */
 1044 static int
 1045 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
 1046 {
 1047         struct lockf_entry *overlap;
 1048         int error;
 1049 
 1050         sx_assert(&state->ls_lock, SX_XLOCKED);
 1051         if (LIST_EMPTY(&state->ls_pending))
 1052                 return (0);
 1053 
 1054         error = 0;
 1055         sx_xlock(&lf_owner_graph_lock);
 1056         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
 1057                 if (!lf_blocks(lock, overlap))
 1058                         continue;
 1059 
 1060                 /*
 1061                  * We've found a blocking lock. Add the corresponding
 1062                  * edge to the graphs and see if it would cause a
 1063                  * deadlock.
 1064                  */
 1065                 error = lf_add_edge(overlap, lock);
 1066 
 1067                 /*
 1068                  * The only error that lf_add_edge returns is EDEADLK.
 1069                  * Remove any edges we added and return the error.
 1070                  */
 1071                 if (error) {
 1072                         lf_remove_incoming(lock);
 1073                         break;
 1074                 }
 1075         }
 1076         sx_xunlock(&lf_owner_graph_lock);
 1077         return (error);
 1078 }
 1079 
 1080 /*
 1081  * Insert lock into the active list, keeping list entries ordered by
 1082  * increasing values of lf_start.
 1083  */
 1084 static void
 1085 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
 1086 {
 1087         struct lockf_entry *lf, *lfprev;
 1088 
 1089         if (LIST_EMPTY(&state->ls_active)) {
 1090                 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
 1091                 return;
 1092         }
 1093 
 1094         lfprev = NULL;
 1095         LIST_FOREACH(lf, &state->ls_active, lf_link) {
 1096                 if (lf->lf_start > lock->lf_start) {
 1097                         LIST_INSERT_BEFORE(lf, lock, lf_link);
 1098                         return;
 1099                 }
 1100                 lfprev = lf;
 1101         }
 1102         LIST_INSERT_AFTER(lfprev, lock, lf_link);
 1103 }
 1104 
 1105 /*
 1106  * Wake up a sleeping lock and remove it from the pending list now
 1107  * that all its dependencies have been resolved. The caller should
 1108  * arrange for the lock to be added to the active list, adjusting any
 1109  * existing locks for the same owner as needed.
 1110  */
 1111 static void
 1112 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
 1113 {
 1114 
 1115         /*
 1116          * Remove from ls_pending list and wake up the caller
 1117          * or start the async notification, as appropriate.
 1118          */
 1119         LIST_REMOVE(wakelock, lf_link);
 1120 #ifdef LOCKF_DEBUG
 1121         if (lockf_debug & 1)
 1122                 lf_print("lf_wakeup_lock: awakening", wakelock);
 1123 #endif /* LOCKF_DEBUG */
 1124         if (wakelock->lf_async_task) {
 1125                 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
 1126         } else {
 1127                 wakeup(wakelock);
 1128         }
 1129 }
 1130 
 1131 /*
 1132  * Re-check all dependent locks and remove edges to locks that we no
 1133  * longer block. If 'all' is non-zero, the lock has been removed and
 1134  * we must remove all the dependencies, otherwise it has simply been
 1135  * reduced but remains active. Any pending locks which have been been
 1136  * unblocked are added to 'granted'
 1137  */
 1138 static void
 1139 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
 1140         struct lockf_entry_list *granted)
 1141 {
 1142         struct lockf_edge *e, *ne;
 1143         struct lockf_entry *deplock;
 1144 
 1145         LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
 1146                 deplock = e->le_from;
 1147                 if (all || !lf_blocks(lock, deplock)) {
 1148                         sx_xlock(&lf_owner_graph_lock);
 1149                         lf_remove_edge(e);
 1150                         sx_xunlock(&lf_owner_graph_lock);
 1151                         if (LIST_EMPTY(&deplock->lf_outedges)) {
 1152                                 lf_wakeup_lock(state, deplock);
 1153                                 LIST_INSERT_HEAD(granted, deplock, lf_link);
 1154                         }
 1155                 }
 1156         }
 1157 }
 1158 
 1159 /*
 1160  * Set the start of an existing active lock, updating dependencies and
 1161  * adding any newly woken locks to 'granted'.
 1162  */
 1163 static void
 1164 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
 1165         struct lockf_entry_list *granted)
 1166 {
 1167 
 1168         KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
 1169         lock->lf_start = new_start;
 1170         LIST_REMOVE(lock, lf_link);
 1171         lf_insert_lock(state, lock);
 1172         lf_update_dependancies(state, lock, FALSE, granted);
 1173 }
 1174 
 1175 /*
 1176  * Set the end of an existing active lock, updating dependencies and
 1177  * adding any newly woken locks to 'granted'.
 1178  */
 1179 static void
 1180 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
 1181         struct lockf_entry_list *granted)
 1182 {
 1183 
 1184         KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
 1185         lock->lf_end = new_end;
 1186         lf_update_dependancies(state, lock, FALSE, granted);
 1187 }
 1188 
 1189 /*
 1190  * Add a lock to the active list, updating or removing any current
 1191  * locks owned by the same owner and processing any pending locks that
 1192  * become unblocked as a result. This code is also used for unlock
 1193  * since the logic for updating existing locks is identical.
 1194  *
 1195  * As a result of processing the new lock, we may unblock existing
 1196  * pending locks as a result of downgrading/unlocking. We simply
 1197  * activate the newly granted locks by looping.
 1198  *
 1199  * Since the new lock already has its dependencies set up, we always
 1200  * add it to the list (unless its an unlock request). This may
 1201  * fragment the lock list in some pathological cases but its probably
 1202  * not a real problem.
 1203  */
 1204 static void
 1205 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
 1206 {
 1207         struct lockf_entry *overlap, *lf;
 1208         struct lockf_entry_list granted;
 1209         int ovcase;
 1210 
 1211         LIST_INIT(&granted);
 1212         LIST_INSERT_HEAD(&granted, lock, lf_link);
 1213 
 1214         while (!LIST_EMPTY(&granted)) {
 1215                 lock = LIST_FIRST(&granted);
 1216                 LIST_REMOVE(lock, lf_link);
 1217 
 1218                 /*
 1219                  * Skip over locks owned by other processes.  Handle
 1220                  * any locks that overlap and are owned by ourselves.
 1221                  */
 1222                 overlap = LIST_FIRST(&state->ls_active);
 1223                 for (;;) {
 1224                         ovcase = lf_findoverlap(&overlap, lock, SELF);
 1225 
 1226 #ifdef LOCKF_DEBUG
 1227                         if (ovcase && (lockf_debug & 2)) {
 1228                                 printf("lf_setlock: overlap %d", ovcase);
 1229                                 lf_print("", overlap);
 1230                         }
 1231 #endif
 1232                         /*
 1233                          * Six cases:
 1234                          *      0) no overlap
 1235                          *      1) overlap == lock
 1236                          *      2) overlap contains lock
 1237                          *      3) lock contains overlap
 1238                          *      4) overlap starts before lock
 1239                          *      5) overlap ends after lock
 1240                          */
 1241                         switch (ovcase) {
 1242                         case 0: /* no overlap */
 1243                                 break;
 1244 
 1245                         case 1: /* overlap == lock */
 1246                                 /*
 1247                                  * We have already setup the
 1248                                  * dependants for the new lock, taking
 1249                                  * into account a possible downgrade
 1250                                  * or unlock. Remove the old lock.
 1251                                  */
 1252                                 LIST_REMOVE(overlap, lf_link);
 1253                                 lf_update_dependancies(state, overlap, TRUE,
 1254                                         &granted);
 1255                                 lf_free_lock(overlap);
 1256                                 break;
 1257 
 1258                         case 2: /* overlap contains lock */
 1259                                 /*
 1260                                  * Just split the existing lock.
 1261                                  */
 1262                                 lf_split(state, overlap, lock, &granted);
 1263                                 break;
 1264 
 1265                         case 3: /* lock contains overlap */
 1266                                 /*
 1267                                  * Delete the overlap and advance to
 1268                                  * the next entry in the list.
 1269                                  */
 1270                                 lf = LIST_NEXT(overlap, lf_link);
 1271                                 LIST_REMOVE(overlap, lf_link);
 1272                                 lf_update_dependancies(state, overlap, TRUE,
 1273                                         &granted);
 1274                                 lf_free_lock(overlap);
 1275                                 overlap = lf;
 1276                                 continue;
 1277 
 1278                         case 4: /* overlap starts before lock */
 1279                                 /*
 1280                                  * Just update the overlap end and
 1281                                  * move on.
 1282                                  */
 1283                                 lf_set_end(state, overlap, lock->lf_start - 1,
 1284                                     &granted);
 1285                                 overlap = LIST_NEXT(overlap, lf_link);
 1286                                 continue;
 1287 
 1288                         case 5: /* overlap ends after lock */
 1289                                 /*
 1290                                  * Change the start of overlap and
 1291                                  * re-insert.
 1292                                  */
 1293                                 lf_set_start(state, overlap, lock->lf_end + 1,
 1294                                     &granted);
 1295                                 break;
 1296                         }
 1297                         break;
 1298                 }
 1299 #ifdef LOCKF_DEBUG
 1300                 if (lockf_debug & 1) {
 1301                         if (lock->lf_type != F_UNLCK)
 1302                                 lf_print("lf_activate_lock: activated", lock);
 1303                         else
 1304                                 lf_print("lf_activate_lock: unlocked", lock);
 1305                         lf_printlist("lf_activate_lock", lock);
 1306                 }
 1307 #endif /* LOCKF_DEBUG */
 1308                 if (lock->lf_type != F_UNLCK)
 1309                         lf_insert_lock(state, lock);
 1310         }
 1311 }
 1312 
 1313 /*
 1314  * Cancel a pending lock request, either as a result of a signal or a
 1315  * cancel request for an async lock.
 1316  */
 1317 static void
 1318 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
 1319 {
 1320         struct lockf_entry_list granted;
 1321 
 1322         /*
 1323          * Note it is theoretically possible that cancelling this lock
 1324          * may allow some other pending lock to become
 1325          * active. Consider this case:
 1326          *
 1327          * Owner        Action          Result          Dependencies
 1328          * 
 1329          * A:           lock [0..0]     succeeds        
 1330          * B:           lock [2..2]     succeeds        
 1331          * C:           lock [1..2]     blocked         C->B
 1332          * D:           lock [0..1]     blocked         C->B,D->A,D->C
 1333          * A:           unlock [0..0]                   C->B,D->C
 1334          * C:           cancel [1..2]   
 1335          */
 1336 
 1337         LIST_REMOVE(lock, lf_link);
 1338 
 1339         /*
 1340          * Removing out-going edges is simple.
 1341          */
 1342         sx_xlock(&lf_owner_graph_lock);
 1343         lf_remove_outgoing(lock);
 1344         sx_xunlock(&lf_owner_graph_lock);
 1345 
 1346         /*
 1347          * Removing in-coming edges may allow some other lock to
 1348          * become active - we use lf_update_dependancies to figure
 1349          * this out.
 1350          */
 1351         LIST_INIT(&granted);
 1352         lf_update_dependancies(state, lock, TRUE, &granted);
 1353         lf_free_lock(lock);
 1354 
 1355         /*
 1356          * Feed any newly active locks to lf_activate_lock.
 1357          */
 1358         while (!LIST_EMPTY(&granted)) {
 1359                 lock = LIST_FIRST(&granted);
 1360                 LIST_REMOVE(lock, lf_link);
 1361                 lf_activate_lock(state, lock);
 1362         }
 1363 }
 1364 
 1365 /*
 1366  * Set a byte-range lock.
 1367  */
 1368 static int
 1369 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
 1370     void **cookiep)
 1371 {
 1372         static char lockstr[] = "lockf";
 1373         int error, priority, stops_deferred;
 1374 
 1375 #ifdef LOCKF_DEBUG
 1376         if (lockf_debug & 1)
 1377                 lf_print("lf_setlock", lock);
 1378 #endif /* LOCKF_DEBUG */
 1379 
 1380         /*
 1381          * Set the priority
 1382          */
 1383         priority = PLOCK;
 1384         if (lock->lf_type == F_WRLCK)
 1385                 priority += 4;
 1386         if (!(lock->lf_flags & F_NOINTR))
 1387                 priority |= PCATCH;
 1388         /*
 1389          * Scan lock list for this file looking for locks that would block us.
 1390          */
 1391         if (lf_getblock(state, lock)) {
 1392                 /*
 1393                  * Free the structure and return if nonblocking.
 1394                  */
 1395                 if ((lock->lf_flags & F_WAIT) == 0
 1396                     && lock->lf_async_task == NULL) {
 1397                         lf_free_lock(lock);
 1398                         error = EAGAIN;
 1399                         goto out;
 1400                 }
 1401 
 1402                 /*
 1403                  * For flock type locks, we must first remove
 1404                  * any shared locks that we hold before we sleep
 1405                  * waiting for an exclusive lock.
 1406                  */
 1407                 if ((lock->lf_flags & F_FLOCK) &&
 1408                     lock->lf_type == F_WRLCK) {
 1409                         lock->lf_type = F_UNLCK;
 1410                         lf_activate_lock(state, lock);
 1411                         lock->lf_type = F_WRLCK;
 1412                 }
 1413 
 1414                 /*
 1415                  * We are blocked. Create edges to each blocking lock,
 1416                  * checking for deadlock using the owner graph. For
 1417                  * simplicity, we run deadlock detection for all
 1418                  * locks, posix and otherwise.
 1419                  */
 1420                 sx_xlock(&lf_owner_graph_lock);
 1421                 error = lf_add_outgoing(state, lock);
 1422                 sx_xunlock(&lf_owner_graph_lock);
 1423 
 1424                 if (error) {
 1425 #ifdef LOCKF_DEBUG
 1426                         if (lockf_debug & 1)
 1427                                 lf_print("lf_setlock: deadlock", lock);
 1428 #endif
 1429                         lf_free_lock(lock);
 1430                         goto out;
 1431                 }
 1432 
 1433                 /*
 1434                  * We have added edges to everything that blocks
 1435                  * us. Sleep until they all go away.
 1436                  */
 1437                 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
 1438 #ifdef LOCKF_DEBUG
 1439                 if (lockf_debug & 1) {
 1440                         struct lockf_edge *e;
 1441                         LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
 1442                                 lf_print("lf_setlock: blocking on", e->le_to);
 1443                                 lf_printlist("lf_setlock", e->le_to);
 1444                         }
 1445                 }
 1446 #endif /* LOCKF_DEBUG */
 1447 
 1448                 if ((lock->lf_flags & F_WAIT) == 0) {
 1449                         /*
 1450                          * The caller requested async notification -
 1451                          * this callback happens when the blocking
 1452                          * lock is released, allowing the caller to
 1453                          * make another attempt to take the lock.
 1454                          */
 1455                         *cookiep = (void *) lock;
 1456                         error = EINPROGRESS;
 1457                         goto out;
 1458                 }
 1459 
 1460                 lock->lf_refs++;
 1461                 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
 1462                 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
 1463                 sigallowstop(stops_deferred);
 1464                 if (lf_free_lock(lock)) {
 1465                         error = EDOOFUS;
 1466                         goto out;
 1467                 }
 1468 
 1469                 /*
 1470                  * We may have been awakened by a signal and/or by a
 1471                  * debugger continuing us (in which cases we must
 1472                  * remove our lock graph edges) and/or by another
 1473                  * process releasing a lock (in which case our edges
 1474                  * have already been removed and we have been moved to
 1475                  * the active list). We may also have been woken by
 1476                  * lf_purgelocks which we report to the caller as
 1477                  * EINTR. In that case, lf_purgelocks will have
 1478                  * removed our lock graph edges.
 1479                  *
 1480                  * Note that it is possible to receive a signal after
 1481                  * we were successfully woken (and moved to the active
 1482                  * list) but before we resumed execution. In this
 1483                  * case, our lf_outedges list will be clear. We
 1484                  * pretend there was no error.
 1485                  *
 1486                  * Note also, if we have been sleeping long enough, we
 1487                  * may now have incoming edges from some newer lock
 1488                  * which is waiting behind us in the queue.
 1489                  */
 1490                 if (lock->lf_flags & F_INTR) {
 1491                         error = EINTR;
 1492                         lf_free_lock(lock);
 1493                         goto out;
 1494                 }
 1495                 if (LIST_EMPTY(&lock->lf_outedges)) {
 1496                         error = 0;
 1497                 } else {
 1498                         lf_cancel_lock(state, lock);
 1499                         goto out;
 1500                 }
 1501 #ifdef LOCKF_DEBUG
 1502                 if (lockf_debug & 1) {
 1503                         lf_print("lf_setlock: granted", lock);
 1504                 }
 1505 #endif
 1506                 goto out;
 1507         }
 1508         /*
 1509          * It looks like we are going to grant the lock. First add
 1510          * edges from any currently pending lock that the new lock
 1511          * would block.
 1512          */
 1513         error = lf_add_incoming(state, lock);
 1514         if (error) {
 1515 #ifdef LOCKF_DEBUG
 1516                 if (lockf_debug & 1)
 1517                         lf_print("lf_setlock: deadlock", lock);
 1518 #endif
 1519                 lf_free_lock(lock);
 1520                 goto out;
 1521         }
 1522 
 1523         /*
 1524          * No blocks!!  Add the lock.  Note that we will
 1525          * downgrade or upgrade any overlapping locks this
 1526          * process already owns.
 1527          */
 1528         lf_activate_lock(state, lock);
 1529         error = 0;
 1530 out:
 1531         return (error);
 1532 }
 1533 
 1534 /*
 1535  * Remove a byte-range lock on an inode.
 1536  *
 1537  * Generally, find the lock (or an overlap to that lock)
 1538  * and remove it (or shrink it), then wakeup anyone we can.
 1539  */
 1540 static int
 1541 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
 1542 {
 1543         struct lockf_entry *overlap;
 1544 
 1545         overlap = LIST_FIRST(&state->ls_active);
 1546 
 1547         if (overlap == NOLOCKF)
 1548                 return (0);
 1549 #ifdef LOCKF_DEBUG
 1550         if (unlock->lf_type != F_UNLCK)
 1551                 panic("lf_clearlock: bad type");
 1552         if (lockf_debug & 1)
 1553                 lf_print("lf_clearlock", unlock);
 1554 #endif /* LOCKF_DEBUG */
 1555 
 1556         lf_activate_lock(state, unlock);
 1557 
 1558         return (0);
 1559 }
 1560 
 1561 /*
 1562  * Check whether there is a blocking lock, and if so return its
 1563  * details in '*fl'.
 1564  */
 1565 static int
 1566 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
 1567 {
 1568         struct lockf_entry *block;
 1569 
 1570 #ifdef LOCKF_DEBUG
 1571         if (lockf_debug & 1)
 1572                 lf_print("lf_getlock", lock);
 1573 #endif /* LOCKF_DEBUG */
 1574 
 1575         if ((block = lf_getblock(state, lock))) {
 1576                 fl->l_type = block->lf_type;
 1577                 fl->l_whence = SEEK_SET;
 1578                 fl->l_start = block->lf_start;
 1579                 if (block->lf_end == OFF_MAX)
 1580                         fl->l_len = 0;
 1581                 else
 1582                         fl->l_len = block->lf_end - block->lf_start + 1;
 1583                 fl->l_pid = block->lf_owner->lo_pid;
 1584                 fl->l_sysid = block->lf_owner->lo_sysid;
 1585         } else {
 1586                 fl->l_type = F_UNLCK;
 1587         }
 1588         return (0);
 1589 }
 1590 
 1591 /*
 1592  * Cancel an async lock request.
 1593  */
 1594 static int
 1595 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
 1596 {
 1597         struct lockf_entry *reallock;
 1598 
 1599         /*
 1600          * We need to match this request with an existing lock
 1601          * request.
 1602          */
 1603         LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
 1604                 if ((void *) reallock == cookie) {
 1605                         /*
 1606                          * Double-check that this lock looks right
 1607                          * (maybe use a rolling ID for the cancel
 1608                          * cookie instead?)
 1609                          */
 1610                         if (!(reallock->lf_vnode == lock->lf_vnode
 1611                                 && reallock->lf_start == lock->lf_start
 1612                                 && reallock->lf_end == lock->lf_end)) {
 1613                                 return (ENOENT);
 1614                         }
 1615 
 1616                         /*
 1617                          * Make sure this lock was async and then just
 1618                          * remove it from its wait lists.
 1619                          */
 1620                         if (!reallock->lf_async_task) {
 1621                                 return (ENOENT);
 1622                         }
 1623 
 1624                         /*
 1625                          * Note that since any other thread must take
 1626                          * state->ls_lock before it can possibly
 1627                          * trigger the async callback, we are safe
 1628                          * from a race with lf_wakeup_lock, i.e. we
 1629                          * can free the lock (actually our caller does
 1630                          * this).
 1631                          */
 1632                         lf_cancel_lock(state, reallock);
 1633                         return (0);
 1634                 }
 1635         }
 1636 
 1637         /*
 1638          * We didn't find a matching lock - not much we can do here.
 1639          */
 1640         return (ENOENT);
 1641 }
 1642 
 1643 /*
 1644  * Walk the list of locks for an inode and
 1645  * return the first blocking lock.
 1646  */
 1647 static struct lockf_entry *
 1648 lf_getblock(struct lockf *state, struct lockf_entry *lock)
 1649 {
 1650         struct lockf_entry *overlap;
 1651 
 1652         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
 1653                 /*
 1654                  * We may assume that the active list is sorted by
 1655                  * lf_start.
 1656                  */
 1657                 if (overlap->lf_start > lock->lf_end)
 1658                         break;
 1659                 if (!lf_blocks(lock, overlap))
 1660                         continue;
 1661                 return (overlap);
 1662         }
 1663         return (NOLOCKF);
 1664 }
 1665 
 1666 /*
 1667  * Walk the list of locks for an inode to find an overlapping lock (if
 1668  * any) and return a classification of that overlap.
 1669  *
 1670  * Arguments:
 1671  *      *overlap        The place in the lock list to start looking
 1672  *      lock            The lock which is being tested
 1673  *      type            Pass 'SELF' to test only locks with the same
 1674  *                      owner as lock, or 'OTHER' to test only locks
 1675  *                      with a different owner
 1676  *
 1677  * Returns one of six values:
 1678  *      0) no overlap
 1679  *      1) overlap == lock
 1680  *      2) overlap contains lock
 1681  *      3) lock contains overlap
 1682  *      4) overlap starts before lock
 1683  *      5) overlap ends after lock
 1684  *
 1685  * If there is an overlapping lock, '*overlap' is set to point at the
 1686  * overlapping lock.
 1687  *
 1688  * NOTE: this returns only the FIRST overlapping lock.  There
 1689  *       may be more than one.
 1690  */
 1691 static int
 1692 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
 1693 {
 1694         struct lockf_entry *lf;
 1695         off_t start, end;
 1696         int res;
 1697 
 1698         if ((*overlap) == NOLOCKF) {
 1699                 return (0);
 1700         }
 1701 #ifdef LOCKF_DEBUG
 1702         if (lockf_debug & 2)
 1703                 lf_print("lf_findoverlap: looking for overlap in", lock);
 1704 #endif /* LOCKF_DEBUG */
 1705         start = lock->lf_start;
 1706         end = lock->lf_end;
 1707         res = 0;
 1708         while (*overlap) {
 1709                 lf = *overlap;
 1710                 if (lf->lf_start > end)
 1711                         break;
 1712                 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
 1713                     ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
 1714                         *overlap = LIST_NEXT(lf, lf_link);
 1715                         continue;
 1716                 }
 1717 #ifdef LOCKF_DEBUG
 1718                 if (lockf_debug & 2)
 1719                         lf_print("\tchecking", lf);
 1720 #endif /* LOCKF_DEBUG */
 1721                 /*
 1722                  * OK, check for overlap
 1723                  *
 1724                  * Six cases:
 1725                  *      0) no overlap
 1726                  *      1) overlap == lock
 1727                  *      2) overlap contains lock
 1728                  *      3) lock contains overlap
 1729                  *      4) overlap starts before lock
 1730                  *      5) overlap ends after lock
 1731                  */
 1732                 if (start > lf->lf_end) {
 1733                         /* Case 0 */
 1734 #ifdef LOCKF_DEBUG
 1735                         if (lockf_debug & 2)
 1736                                 printf("no overlap\n");
 1737 #endif /* LOCKF_DEBUG */
 1738                         *overlap = LIST_NEXT(lf, lf_link);
 1739                         continue;
 1740                 }
 1741                 if (lf->lf_start == start && lf->lf_end == end) {
 1742                         /* Case 1 */
 1743 #ifdef LOCKF_DEBUG
 1744                         if (lockf_debug & 2)
 1745                                 printf("overlap == lock\n");
 1746 #endif /* LOCKF_DEBUG */
 1747                         res = 1;
 1748                         break;
 1749                 }
 1750                 if (lf->lf_start <= start && lf->lf_end >= end) {
 1751                         /* Case 2 */
 1752 #ifdef LOCKF_DEBUG
 1753                         if (lockf_debug & 2)
 1754                                 printf("overlap contains lock\n");
 1755 #endif /* LOCKF_DEBUG */
 1756                         res = 2;
 1757                         break;
 1758                 }
 1759                 if (start <= lf->lf_start && end >= lf->lf_end) {
 1760                         /* Case 3 */
 1761 #ifdef LOCKF_DEBUG
 1762                         if (lockf_debug & 2)
 1763                                 printf("lock contains overlap\n");
 1764 #endif /* LOCKF_DEBUG */
 1765                         res = 3;
 1766                         break;
 1767                 }
 1768                 if (lf->lf_start < start && lf->lf_end >= start) {
 1769                         /* Case 4 */
 1770 #ifdef LOCKF_DEBUG
 1771                         if (lockf_debug & 2)
 1772                                 printf("overlap starts before lock\n");
 1773 #endif /* LOCKF_DEBUG */
 1774                         res = 4;
 1775                         break;
 1776                 }
 1777                 if (lf->lf_start > start && lf->lf_end > end) {
 1778                         /* Case 5 */
 1779 #ifdef LOCKF_DEBUG
 1780                         if (lockf_debug & 2)
 1781                                 printf("overlap ends after lock\n");
 1782 #endif /* LOCKF_DEBUG */
 1783                         res = 5;
 1784                         break;
 1785                 }
 1786                 panic("lf_findoverlap: default");
 1787         }
 1788         return (res);
 1789 }
 1790 
 1791 /*
 1792  * Split an the existing 'lock1', based on the extent of the lock
 1793  * described by 'lock2'. The existing lock should cover 'lock2'
 1794  * entirely.
 1795  *
 1796  * Any pending locks which have been been unblocked are added to
 1797  * 'granted'
 1798  */
 1799 static void
 1800 lf_split(struct lockf *state, struct lockf_entry *lock1,
 1801     struct lockf_entry *lock2, struct lockf_entry_list *granted)
 1802 {
 1803         struct lockf_entry *splitlock;
 1804 
 1805 #ifdef LOCKF_DEBUG
 1806         if (lockf_debug & 2) {
 1807                 lf_print("lf_split", lock1);
 1808                 lf_print("splitting from", lock2);
 1809         }
 1810 #endif /* LOCKF_DEBUG */
 1811         /*
 1812          * Check to see if we don't need to split at all.
 1813          */
 1814         if (lock1->lf_start == lock2->lf_start) {
 1815                 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
 1816                 return;
 1817         }
 1818         if (lock1->lf_end == lock2->lf_end) {
 1819                 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
 1820                 return;
 1821         }
 1822         /*
 1823          * Make a new lock consisting of the last part of
 1824          * the encompassing lock.
 1825          */
 1826         splitlock = lf_alloc_lock(lock1->lf_owner);
 1827         memcpy(splitlock, lock1, sizeof *splitlock);
 1828         splitlock->lf_refs = 1;
 1829         if (splitlock->lf_flags & F_REMOTE)
 1830                 vref(splitlock->lf_vnode);
 1831 
 1832         /*
 1833          * This cannot cause a deadlock since any edges we would add
 1834          * to splitlock already exist in lock1. We must be sure to add
 1835          * necessary dependencies to splitlock before we reduce lock1
 1836          * otherwise we may accidentally grant a pending lock that
 1837          * was blocked by the tail end of lock1.
 1838          */
 1839         splitlock->lf_start = lock2->lf_end + 1;
 1840         LIST_INIT(&splitlock->lf_outedges);
 1841         LIST_INIT(&splitlock->lf_inedges);
 1842         lf_add_incoming(state, splitlock);
 1843 
 1844         lf_set_end(state, lock1, lock2->lf_start - 1, granted);
 1845 
 1846         /*
 1847          * OK, now link it in
 1848          */
 1849         lf_insert_lock(state, splitlock);
 1850 }
 1851 
 1852 struct lockdesc {
 1853         STAILQ_ENTRY(lockdesc) link;
 1854         struct vnode *vp;
 1855         struct flock fl;
 1856 };
 1857 STAILQ_HEAD(lockdesclist, lockdesc);
 1858 
 1859 int
 1860 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
 1861 {
 1862         struct lockf *ls;
 1863         struct lockf_entry *lf;
 1864         struct lockdesc *ldesc;
 1865         struct lockdesclist locks;
 1866         int error;
 1867 
 1868         /*
 1869          * In order to keep the locking simple, we iterate over the
 1870          * active lock lists to build a list of locks that need
 1871          * releasing. We then call the iterator for each one in turn.
 1872          *
 1873          * We take an extra reference to the vnode for the duration to
 1874          * make sure it doesn't go away before we are finished.
 1875          */
 1876         STAILQ_INIT(&locks);
 1877         sx_xlock(&lf_lock_states_lock);
 1878         LIST_FOREACH(ls, &lf_lock_states, ls_link) {
 1879                 sx_xlock(&ls->ls_lock);
 1880                 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
 1881                         if (lf->lf_owner->lo_sysid != sysid)
 1882                                 continue;
 1883 
 1884                         ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
 1885                             M_WAITOK);
 1886                         ldesc->vp = lf->lf_vnode;
 1887                         vref(ldesc->vp);
 1888                         ldesc->fl.l_start = lf->lf_start;
 1889                         if (lf->lf_end == OFF_MAX)
 1890                                 ldesc->fl.l_len = 0;
 1891                         else
 1892                                 ldesc->fl.l_len =
 1893                                         lf->lf_end - lf->lf_start + 1;
 1894                         ldesc->fl.l_whence = SEEK_SET;
 1895                         ldesc->fl.l_type = F_UNLCK;
 1896                         ldesc->fl.l_pid = lf->lf_owner->lo_pid;
 1897                         ldesc->fl.l_sysid = sysid;
 1898                         STAILQ_INSERT_TAIL(&locks, ldesc, link);
 1899                 }
 1900                 sx_xunlock(&ls->ls_lock);
 1901         }
 1902         sx_xunlock(&lf_lock_states_lock);
 1903 
 1904         /*
 1905          * Call the iterator function for each lock in turn. If the
 1906          * iterator returns an error code, just free the rest of the
 1907          * lockdesc structures.
 1908          */
 1909         error = 0;
 1910         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
 1911                 STAILQ_REMOVE_HEAD(&locks, link);
 1912                 if (!error)
 1913                         error = fn(ldesc->vp, &ldesc->fl, arg);
 1914                 vrele(ldesc->vp);
 1915                 free(ldesc, M_LOCKF);
 1916         }
 1917 
 1918         return (error);
 1919 }
 1920 
 1921 int
 1922 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
 1923 {
 1924         struct lockf *ls;
 1925         struct lockf_entry *lf;
 1926         struct lockdesc *ldesc;
 1927         struct lockdesclist locks;
 1928         int error;
 1929 
 1930         /*
 1931          * In order to keep the locking simple, we iterate over the
 1932          * active lock lists to build a list of locks that need
 1933          * releasing. We then call the iterator for each one in turn.
 1934          *
 1935          * We take an extra reference to the vnode for the duration to
 1936          * make sure it doesn't go away before we are finished.
 1937          */
 1938         STAILQ_INIT(&locks);
 1939         VI_LOCK(vp);
 1940         ls = vp->v_lockf;
 1941         if (!ls) {
 1942                 VI_UNLOCK(vp);
 1943                 return (0);
 1944         }
 1945         MPASS(ls->ls_threads >= 0);
 1946         ls->ls_threads++;
 1947         VI_UNLOCK(vp);
 1948 
 1949         sx_xlock(&ls->ls_lock);
 1950         LIST_FOREACH(lf, &ls->ls_active, lf_link) {
 1951                 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
 1952                     M_WAITOK);
 1953                 ldesc->vp = lf->lf_vnode;
 1954                 vref(ldesc->vp);
 1955                 ldesc->fl.l_start = lf->lf_start;
 1956                 if (lf->lf_end == OFF_MAX)
 1957                         ldesc->fl.l_len = 0;
 1958                 else
 1959                         ldesc->fl.l_len =
 1960                                 lf->lf_end - lf->lf_start + 1;
 1961                 ldesc->fl.l_whence = SEEK_SET;
 1962                 ldesc->fl.l_type = F_UNLCK;
 1963                 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
 1964                 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
 1965                 STAILQ_INSERT_TAIL(&locks, ldesc, link);
 1966         }
 1967         sx_xunlock(&ls->ls_lock);
 1968         VI_LOCK(vp);
 1969         MPASS(ls->ls_threads > 0);
 1970         ls->ls_threads--;
 1971         wakeup(ls);
 1972         VI_UNLOCK(vp);
 1973 
 1974         /*
 1975          * Call the iterator function for each lock in turn. If the
 1976          * iterator returns an error code, just free the rest of the
 1977          * lockdesc structures.
 1978          */
 1979         error = 0;
 1980         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
 1981                 STAILQ_REMOVE_HEAD(&locks, link);
 1982                 if (!error)
 1983                         error = fn(ldesc->vp, &ldesc->fl, arg);
 1984                 vrele(ldesc->vp);
 1985                 free(ldesc, M_LOCKF);
 1986         }
 1987 
 1988         return (error);
 1989 }
 1990 
 1991 static int
 1992 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
 1993 {
 1994 
 1995         VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
 1996         return (0);
 1997 }
 1998 
 1999 void
 2000 lf_clearremotesys(int sysid)
 2001 {
 2002 
 2003         KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
 2004         lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
 2005 }
 2006 
 2007 int
 2008 lf_countlocks(int sysid)
 2009 {
 2010         int i;
 2011         struct lock_owner *lo;
 2012         int count;
 2013 
 2014         count = 0;
 2015         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
 2016                 sx_xlock(&lf_lock_owners[i].lock);
 2017                 LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
 2018                         if (lo->lo_sysid == sysid)
 2019                                 count += lo->lo_refs;
 2020                 sx_xunlock(&lf_lock_owners[i].lock);
 2021         }
 2022 
 2023         return (count);
 2024 }
 2025 
 2026 #ifdef LOCKF_DEBUG
 2027 
 2028 /*
 2029  * Return non-zero if y is reachable from x using a brute force
 2030  * search. If reachable and path is non-null, return the route taken
 2031  * in path.
 2032  */
 2033 static int
 2034 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
 2035     struct owner_vertex_list *path)
 2036 {
 2037         struct owner_edge *e;
 2038 
 2039         if (x == y) {
 2040                 if (path)
 2041                         TAILQ_INSERT_HEAD(path, x, v_link);
 2042                 return 1;
 2043         }
 2044 
 2045         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2046                 if (graph_reaches(e->e_to, y, path)) {
 2047                         if (path)
 2048                                 TAILQ_INSERT_HEAD(path, x, v_link);
 2049                         return 1;
 2050                 }
 2051         }
 2052         return 0;
 2053 }
 2054 
 2055 /*
 2056  * Perform consistency checks on the graph. Make sure the values of
 2057  * v_order are correct. If checkorder is non-zero, check no vertex can
 2058  * reach any other vertex with a smaller order.
 2059  */
 2060 static void
 2061 graph_check(struct owner_graph *g, int checkorder)
 2062 {
 2063         int i, j;
 2064 
 2065         for (i = 0; i < g->g_size; i++) {
 2066                 if (!g->g_vertices[i]->v_owner)
 2067                         continue;
 2068                 KASSERT(g->g_vertices[i]->v_order == i,
 2069                     ("lock graph vertices disordered"));
 2070                 if (checkorder) {
 2071                         for (j = 0; j < i; j++) {
 2072                                 if (!g->g_vertices[j]->v_owner)
 2073                                         continue;
 2074                                 KASSERT(!graph_reaches(g->g_vertices[i],
 2075                                         g->g_vertices[j], NULL),
 2076                                     ("lock graph vertices disordered"));
 2077                         }
 2078                 }
 2079         }
 2080 }
 2081 
 2082 static void
 2083 graph_print_vertices(struct owner_vertex_list *set)
 2084 {
 2085         struct owner_vertex *v;
 2086 
 2087         printf("{ ");
 2088         TAILQ_FOREACH(v, set, v_link) {
 2089                 printf("%d:", v->v_order);
 2090                 lf_print_owner(v->v_owner);
 2091                 if (TAILQ_NEXT(v, v_link))
 2092                         printf(", ");
 2093         }
 2094         printf(" }\n");
 2095 }
 2096 
 2097 #endif
 2098 
 2099 /*
 2100  * Calculate the sub-set of vertices v from the affected region [y..x]
 2101  * where v is reachable from y. Return -1 if a loop was detected
 2102  * (i.e. x is reachable from y, otherwise the number of vertices in
 2103  * this subset.
 2104  */
 2105 static int
 2106 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
 2107     struct owner_vertex *y, struct owner_vertex_list *delta)
 2108 {
 2109         uint32_t gen;
 2110         struct owner_vertex *v;
 2111         struct owner_edge *e;
 2112         int n;
 2113 
 2114         /*
 2115          * We start with a set containing just y. Then for each vertex
 2116          * v in the set so far unprocessed, we add each vertex that v
 2117          * has an out-edge to and that is within the affected region
 2118          * [y..x]. If we see the vertex x on our travels, stop
 2119          * immediately.
 2120          */
 2121         TAILQ_INIT(delta);
 2122         TAILQ_INSERT_TAIL(delta, y, v_link);
 2123         v = y;
 2124         n = 1;
 2125         gen = g->g_gen;
 2126         while (v) {
 2127                 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
 2128                         if (e->e_to == x)
 2129                                 return -1;
 2130                         if (e->e_to->v_order < x->v_order
 2131                             && e->e_to->v_gen != gen) {
 2132                                 e->e_to->v_gen = gen;
 2133                                 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
 2134                                 n++;
 2135                         }
 2136                 }
 2137                 v = TAILQ_NEXT(v, v_link);
 2138         }
 2139 
 2140         return (n);
 2141 }
 2142 
 2143 /*
 2144  * Calculate the sub-set of vertices v from the affected region [y..x]
 2145  * where v reaches x. Return the number of vertices in this subset.
 2146  */
 2147 static int
 2148 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
 2149     struct owner_vertex *y, struct owner_vertex_list *delta)
 2150 {
 2151         uint32_t gen;
 2152         struct owner_vertex *v;
 2153         struct owner_edge *e;
 2154         int n;
 2155 
 2156         /*
 2157          * We start with a set containing just x. Then for each vertex
 2158          * v in the set so far unprocessed, we add each vertex that v
 2159          * has an in-edge from and that is within the affected region
 2160          * [y..x].
 2161          */
 2162         TAILQ_INIT(delta);
 2163         TAILQ_INSERT_TAIL(delta, x, v_link);
 2164         v = x;
 2165         n = 1;
 2166         gen = g->g_gen;
 2167         while (v) {
 2168                 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
 2169                         if (e->e_from->v_order > y->v_order
 2170                             && e->e_from->v_gen != gen) {
 2171                                 e->e_from->v_gen = gen;
 2172                                 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
 2173                                 n++;
 2174                         }
 2175                 }
 2176                 v = TAILQ_PREV(v, owner_vertex_list, v_link);
 2177         }
 2178 
 2179         return (n);
 2180 }
 2181 
 2182 static int
 2183 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
 2184 {
 2185         struct owner_vertex *v;
 2186         int i, j;
 2187 
 2188         TAILQ_FOREACH(v, set, v_link) {
 2189                 for (i = n;
 2190                      i > 0 && indices[i - 1] > v->v_order; i--)
 2191                         ;
 2192                 for (j = n - 1; j >= i; j--)
 2193                         indices[j + 1] = indices[j];
 2194                 indices[i] = v->v_order;
 2195                 n++;
 2196         }
 2197 
 2198         return (n);
 2199 }
 2200 
 2201 static int
 2202 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
 2203     struct owner_vertex_list *set)
 2204 {
 2205         struct owner_vertex *v, *vlowest;
 2206 
 2207         while (!TAILQ_EMPTY(set)) {
 2208                 vlowest = NULL;
 2209                 TAILQ_FOREACH(v, set, v_link) {
 2210                         if (!vlowest || v->v_order < vlowest->v_order)
 2211                                 vlowest = v;
 2212                 }
 2213                 TAILQ_REMOVE(set, vlowest, v_link);
 2214                 vlowest->v_order = indices[nextunused];
 2215                 g->g_vertices[vlowest->v_order] = vlowest;
 2216                 nextunused++;
 2217         }
 2218 
 2219         return (nextunused);
 2220 }
 2221 
 2222 static int
 2223 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
 2224     struct owner_vertex *y)
 2225 {
 2226         struct owner_edge *e;
 2227         struct owner_vertex_list deltaF, deltaB;
 2228         int nF, n, vi, i;
 2229         int *indices;
 2230         int nB __unused;
 2231 
 2232         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2233 
 2234         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2235                 if (e->e_to == y) {
 2236                         e->e_refs++;
 2237                         return (0);
 2238                 }
 2239         }
 2240 
 2241 #ifdef LOCKF_DEBUG
 2242         if (lockf_debug & 8) {
 2243                 printf("adding edge %d:", x->v_order);
 2244                 lf_print_owner(x->v_owner);
 2245                 printf(" -> %d:", y->v_order);
 2246                 lf_print_owner(y->v_owner);
 2247                 printf("\n");
 2248         }
 2249 #endif
 2250         if (y->v_order < x->v_order) {
 2251                 /*
 2252                  * The new edge violates the order. First find the set
 2253                  * of affected vertices reachable from y (deltaF) and
 2254                  * the set of affect vertices affected that reach x
 2255                  * (deltaB), using the graph generation number to
 2256                  * detect whether we have visited a given vertex
 2257                  * already. We re-order the graph so that each vertex
 2258                  * in deltaB appears before each vertex in deltaF.
 2259                  *
 2260                  * If x is a member of deltaF, then the new edge would
 2261                  * create a cycle. Otherwise, we may assume that
 2262                  * deltaF and deltaB are disjoint.
 2263                  */
 2264                 g->g_gen++;
 2265                 if (g->g_gen == 0) {
 2266                         /*
 2267                          * Generation wrap.
 2268                          */
 2269                         for (vi = 0; vi < g->g_size; vi++) {
 2270                                 g->g_vertices[vi]->v_gen = 0;
 2271                         }
 2272                         g->g_gen++;
 2273                 }
 2274                 nF = graph_delta_forward(g, x, y, &deltaF);
 2275                 if (nF < 0) {
 2276 #ifdef LOCKF_DEBUG
 2277                         if (lockf_debug & 8) {
 2278                                 struct owner_vertex_list path;
 2279                                 printf("deadlock: ");
 2280                                 TAILQ_INIT(&path);
 2281                                 graph_reaches(y, x, &path);
 2282                                 graph_print_vertices(&path);
 2283                         }
 2284 #endif
 2285                         return (EDEADLK);
 2286                 }
 2287 
 2288 #ifdef LOCKF_DEBUG
 2289                 if (lockf_debug & 8) {
 2290                         printf("re-ordering graph vertices\n");
 2291                         printf("deltaF = ");
 2292                         graph_print_vertices(&deltaF);
 2293                 }
 2294 #endif
 2295 
 2296                 nB = graph_delta_backward(g, x, y, &deltaB);
 2297 
 2298 #ifdef LOCKF_DEBUG
 2299                 if (lockf_debug & 8) {
 2300                         printf("deltaB = ");
 2301                         graph_print_vertices(&deltaB);
 2302                 }
 2303 #endif
 2304 
 2305                 /*
 2306                  * We first build a set of vertex indices (vertex
 2307                  * order values) that we may use, then we re-assign
 2308                  * orders first to those vertices in deltaB, then to
 2309                  * deltaF. Note that the contents of deltaF and deltaB
 2310                  * may be partially disordered - we perform an
 2311                  * insertion sort while building our index set.
 2312                  */
 2313                 indices = g->g_indexbuf;
 2314                 n = graph_add_indices(indices, 0, &deltaF);
 2315                 graph_add_indices(indices, n, &deltaB);
 2316 
 2317                 /*
 2318                  * We must also be sure to maintain the relative
 2319                  * ordering of deltaF and deltaB when re-assigning
 2320                  * vertices. We do this by iteratively removing the
 2321                  * lowest ordered element from the set and assigning
 2322                  * it the next value from our new ordering.
 2323                  */
 2324                 i = graph_assign_indices(g, indices, 0, &deltaB);
 2325                 graph_assign_indices(g, indices, i, &deltaF);
 2326 
 2327 #ifdef LOCKF_DEBUG
 2328                 if (lockf_debug & 8) {
 2329                         struct owner_vertex_list set;
 2330                         TAILQ_INIT(&set);
 2331                         for (i = 0; i < nB + nF; i++)
 2332                                 TAILQ_INSERT_TAIL(&set,
 2333                                     g->g_vertices[indices[i]], v_link);
 2334                         printf("new ordering = ");
 2335                         graph_print_vertices(&set);
 2336                 }
 2337 #endif
 2338         }
 2339 
 2340         KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
 2341 
 2342 #ifdef LOCKF_DEBUG
 2343         if (lockf_debug & 8) {
 2344                 graph_check(g, TRUE);
 2345         }
 2346 #endif
 2347 
 2348         e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
 2349 
 2350         LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
 2351         LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
 2352         e->e_refs = 1;
 2353         e->e_from = x;
 2354         e->e_to = y;
 2355 
 2356         return (0);
 2357 }
 2358 
 2359 /*
 2360  * Remove an edge x->y from the graph.
 2361  */
 2362 static void
 2363 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
 2364     struct owner_vertex *y)
 2365 {
 2366         struct owner_edge *e;
 2367 
 2368         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2369 
 2370         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2371                 if (e->e_to == y)
 2372                         break;
 2373         }
 2374         KASSERT(e, ("Removing non-existent edge from deadlock graph"));
 2375 
 2376         e->e_refs--;
 2377         if (e->e_refs == 0) {
 2378 #ifdef LOCKF_DEBUG
 2379                 if (lockf_debug & 8) {
 2380                         printf("removing edge %d:", x->v_order);
 2381                         lf_print_owner(x->v_owner);
 2382                         printf(" -> %d:", y->v_order);
 2383                         lf_print_owner(y->v_owner);
 2384                         printf("\n");
 2385                 }
 2386 #endif
 2387                 LIST_REMOVE(e, e_outlink);
 2388                 LIST_REMOVE(e, e_inlink);
 2389                 free(e, M_LOCKF);
 2390         }
 2391 }
 2392 
 2393 /*
 2394  * Allocate a vertex from the free list. Return ENOMEM if there are
 2395  * none.
 2396  */
 2397 static struct owner_vertex *
 2398 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
 2399 {
 2400         struct owner_vertex *v;
 2401 
 2402         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2403 
 2404         v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
 2405         if (g->g_size == g->g_space) {
 2406                 g->g_vertices = realloc(g->g_vertices,
 2407                     2 * g->g_space * sizeof(struct owner_vertex *),
 2408                     M_LOCKF, M_WAITOK);
 2409                 free(g->g_indexbuf, M_LOCKF);
 2410                 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
 2411                     M_LOCKF, M_WAITOK);
 2412                 g->g_space = 2 * g->g_space;
 2413         }
 2414         v->v_order = g->g_size;
 2415         v->v_gen = g->g_gen;
 2416         g->g_vertices[g->g_size] = v;
 2417         g->g_size++;
 2418 
 2419         LIST_INIT(&v->v_outedges);
 2420         LIST_INIT(&v->v_inedges);
 2421         v->v_owner = lo;
 2422 
 2423         return (v);
 2424 }
 2425 
 2426 static void
 2427 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
 2428 {
 2429         struct owner_vertex *w;
 2430         int i;
 2431 
 2432         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2433 
 2434         KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
 2435         KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
 2436 
 2437         /*
 2438          * Remove from the graph's array and close up the gap,
 2439          * renumbering the other vertices.
 2440          */
 2441         for (i = v->v_order + 1; i < g->g_size; i++) {
 2442                 w = g->g_vertices[i];
 2443                 w->v_order--;
 2444                 g->g_vertices[i - 1] = w;
 2445         }
 2446         g->g_size--;
 2447 
 2448         free(v, M_LOCKF);
 2449 }
 2450 
 2451 static struct owner_graph *
 2452 graph_init(struct owner_graph *g)
 2453 {
 2454 
 2455         g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
 2456             M_LOCKF, M_WAITOK);
 2457         g->g_size = 0;
 2458         g->g_space = 10;
 2459         g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
 2460         g->g_gen = 0;
 2461 
 2462         return (g);
 2463 }
 2464 
 2465 struct kinfo_lockf_linked {
 2466         struct kinfo_lockf kl;
 2467         struct vnode *vp;
 2468         STAILQ_ENTRY(kinfo_lockf_linked) link;
 2469 };
 2470 
 2471 int
 2472 vfs_report_lockf(struct mount *mp, struct sbuf *sb)
 2473 {
 2474         struct lockf *ls;
 2475         struct lockf_entry *lf;
 2476         struct kinfo_lockf_linked *klf;
 2477         struct vnode *vp;
 2478         struct ucred *ucred;
 2479         char *fullpath, *freepath;
 2480         struct stat stt;
 2481         STAILQ_HEAD(, kinfo_lockf_linked) locks;
 2482         int error, gerror;
 2483 
 2484         STAILQ_INIT(&locks);
 2485         sx_slock(&lf_lock_states_lock);
 2486         LIST_FOREACH(ls, &lf_lock_states, ls_link) {
 2487                 sx_slock(&ls->ls_lock);
 2488                 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
 2489                         vp = lf->lf_vnode;
 2490                         if (VN_IS_DOOMED(vp) || vp->v_mount != mp)
 2491                                 continue;
 2492                         vhold(vp);
 2493                         klf = malloc(sizeof(struct kinfo_lockf_linked),
 2494                             M_LOCKF, M_WAITOK | M_ZERO);
 2495                         klf->vp = vp;
 2496                         klf->kl.kl_structsize = sizeof(struct kinfo_lockf);
 2497                         klf->kl.kl_start = lf->lf_start;
 2498                         klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 :
 2499                             lf->lf_end - lf->lf_start + 1;
 2500                         klf->kl.kl_rw = lf->lf_type == F_RDLCK ?
 2501                             KLOCKF_RW_READ : KLOCKF_RW_WRITE;
 2502                         if (lf->lf_owner->lo_sysid != 0) {
 2503                                 klf->kl.kl_pid = lf->lf_owner->lo_pid;
 2504                                 klf->kl.kl_sysid = lf->lf_owner->lo_sysid;
 2505                                 klf->kl.kl_type = KLOCKF_TYPE_REMOTE;
 2506                         } else if (lf->lf_owner->lo_pid == -1) {
 2507                                 klf->kl.kl_pid = -1;
 2508                                 klf->kl.kl_sysid = 0;
 2509                                 klf->kl.kl_type = KLOCKF_TYPE_FLOCK;
 2510                         } else {
 2511                                 klf->kl.kl_pid = lf->lf_owner->lo_pid;
 2512                                 klf->kl.kl_sysid = 0;
 2513                                 klf->kl.kl_type = KLOCKF_TYPE_PID;
 2514                         }
 2515                         STAILQ_INSERT_TAIL(&locks, klf, link);
 2516                 }
 2517                 sx_sunlock(&ls->ls_lock);
 2518         }
 2519         sx_sunlock(&lf_lock_states_lock);
 2520 
 2521         gerror = 0;
 2522         ucred = curthread->td_ucred;
 2523         while ((klf = STAILQ_FIRST(&locks)) != NULL) {
 2524                 STAILQ_REMOVE_HEAD(&locks, link);
 2525                 vp = klf->vp;
 2526                 if (gerror == 0 && vn_lock(vp, LK_SHARED) == 0) {
 2527                         error = prison_canseemount(ucred, vp->v_mount);
 2528                         if (error == 0)
 2529                                 error = VOP_STAT(vp, &stt, ucred, NOCRED);
 2530                         VOP_UNLOCK(vp);
 2531                         if (error == 0) {
 2532                                 klf->kl.kl_file_fsid = stt.st_dev;
 2533                                 klf->kl.kl_file_rdev = stt.st_rdev;
 2534                                 klf->kl.kl_file_fileid = stt.st_ino;
 2535                                 freepath = NULL;
 2536                                 fullpath = "-";
 2537                                 error = vn_fullpath(vp, &fullpath, &freepath);
 2538                                 if (error == 0)
 2539                                         strlcpy(klf->kl.kl_path, fullpath,
 2540                                             sizeof(klf->kl.kl_path));
 2541                                 free(freepath, M_TEMP);
 2542                                 if (sbuf_bcat(sb, &klf->kl,
 2543                                     klf->kl.kl_structsize) != 0) {
 2544                                         gerror = sbuf_error(sb);
 2545                                 }
 2546                         }
 2547                 }
 2548                 vdrop(vp);
 2549                 free(klf, M_LOCKF);
 2550         }
 2551 
 2552         return (gerror);
 2553 }
 2554 
 2555 static int
 2556 sysctl_kern_lockf_run(struct sbuf *sb)
 2557 {
 2558         struct mount *mp;
 2559         int error;
 2560 
 2561         error = 0;
 2562         mtx_lock(&mountlist_mtx);
 2563         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
 2564                 error = vfs_busy(mp, MBF_MNTLSTLOCK);
 2565                 if (error != 0)
 2566                         continue;
 2567                 error = mp->mnt_op->vfs_report_lockf(mp, sb);
 2568                 mtx_lock(&mountlist_mtx);
 2569                 vfs_unbusy(mp);
 2570                 if (error != 0)
 2571                         break;
 2572         }
 2573         mtx_unlock(&mountlist_mtx);
 2574         return (error);
 2575 }
 2576 
 2577 static int
 2578 sysctl_kern_lockf(SYSCTL_HANDLER_ARGS)
 2579 {
 2580         struct sbuf sb;
 2581         int error, error2;
 2582 
 2583         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_lockf) * 5, req);
 2584         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
 2585         error = sysctl_kern_lockf_run(&sb);
 2586         error2 = sbuf_finish(&sb);
 2587         sbuf_delete(&sb);
 2588         return (error != 0 ? error : error2);
 2589 }
 2590 SYSCTL_PROC(_kern, KERN_LOCKF, lockf,
 2591     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
 2592     0, 0, sysctl_kern_lockf, "S,lockf",
 2593     "Advisory locks table");
 2594 
 2595 #ifdef LOCKF_DEBUG
 2596 /*
 2597  * Print description of a lock owner
 2598  */
 2599 static void
 2600 lf_print_owner(struct lock_owner *lo)
 2601 {
 2602 
 2603         if (lo->lo_flags & F_REMOTE) {
 2604                 printf("remote pid %d, system %d",
 2605                     lo->lo_pid, lo->lo_sysid);
 2606         } else if (lo->lo_flags & F_FLOCK) {
 2607                 printf("file %p", lo->lo_id);
 2608         } else {
 2609                 printf("local pid %d", lo->lo_pid);
 2610         }
 2611 }
 2612 
 2613 /*
 2614  * Print out a lock.
 2615  */
 2616 static void
 2617 lf_print(char *tag, struct lockf_entry *lock)
 2618 {
 2619 
 2620         printf("%s: lock %p for ", tag, (void *)lock);
 2621         lf_print_owner(lock->lf_owner);
 2622         printf("\nvnode %p", lock->lf_vnode);
 2623         VOP_PRINT(lock->lf_vnode);
 2624         printf(" %s, start %jd, end ",
 2625             lock->lf_type == F_RDLCK ? "shared" :
 2626             lock->lf_type == F_WRLCK ? "exclusive" :
 2627             lock->lf_type == F_UNLCK ? "unlock" : "unknown",
 2628             (intmax_t)lock->lf_start);
 2629         if (lock->lf_end == OFF_MAX)
 2630                 printf("EOF");
 2631         else
 2632                 printf("%jd", (intmax_t)lock->lf_end);
 2633         if (!LIST_EMPTY(&lock->lf_outedges))
 2634                 printf(" block %p\n",
 2635                     (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
 2636         else
 2637                 printf("\n");
 2638 }
 2639 
 2640 static void
 2641 lf_printlist(char *tag, struct lockf_entry *lock)
 2642 {
 2643         struct lockf_entry *lf, *blk;
 2644         struct lockf_edge *e;
 2645 
 2646         printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode);
 2647         LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
 2648                 printf("\tlock %p for ",(void *)lf);
 2649                 lf_print_owner(lock->lf_owner);
 2650                 printf(", %s, start %jd, end %jd",
 2651                     lf->lf_type == F_RDLCK ? "shared" :
 2652                     lf->lf_type == F_WRLCK ? "exclusive" :
 2653                     lf->lf_type == F_UNLCK ? "unlock" :
 2654                     "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
 2655                 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
 2656                         blk = e->le_to;
 2657                         printf("\n\t\tlock request %p for ", (void *)blk);
 2658                         lf_print_owner(blk->lf_owner);
 2659                         printf(", %s, start %jd, end %jd",
 2660                             blk->lf_type == F_RDLCK ? "shared" :
 2661                             blk->lf_type == F_WRLCK ? "exclusive" :
 2662                             blk->lf_type == F_UNLCK ? "unlock" :
 2663                             "unknown", (intmax_t)blk->lf_start,
 2664                             (intmax_t)blk->lf_end);
 2665                         if (!LIST_EMPTY(&blk->lf_inedges))
 2666                                 panic("lf_printlist: bad list");
 2667                 }
 2668                 printf("\n");
 2669         }
 2670 }
 2671 #endif /* LOCKF_DEBUG */

Cache object: c47b1abe570f77eb6c456d5c971b9f06


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.