The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lockf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
    3  * Authors: Doug Rabson <dfr@rabson.org>
    4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 /*-
   28  * Copyright (c) 1982, 1986, 1989, 1993
   29  *      The Regents of the University of California.  All rights reserved.
   30  *
   31  * This code is derived from software contributed to Berkeley by
   32  * Scooter Morris at Genentech Inc.
   33  *
   34  * Redistribution and use in source and binary forms, with or without
   35  * modification, are permitted provided that the following conditions
   36  * are met:
   37  * 1. Redistributions of source code must retain the above copyright
   38  *    notice, this list of conditions and the following disclaimer.
   39  * 2. Redistributions in binary form must reproduce the above copyright
   40  *    notice, this list of conditions and the following disclaimer in the
   41  *    documentation and/or other materials provided with the distribution.
   42  * 4. Neither the name of the University nor the names of its contributors
   43  *    may be used to endorse or promote products derived from this software
   44  *    without specific prior written permission.
   45  *
   46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   56  * SUCH DAMAGE.
   57  *
   58  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
   59  */
   60 
   61 #include <sys/cdefs.h>
   62 __FBSDID("$FreeBSD$");
   63 
   64 #include "opt_debug_lockf.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/hash.h>
   69 #include <sys/kernel.h>
   70 #include <sys/limits.h>
   71 #include <sys/lock.h>
   72 #include <sys/mount.h>
   73 #include <sys/mutex.h>
   74 #include <sys/proc.h>
   75 #include <sys/sx.h>
   76 #include <sys/unistd.h>
   77 #include <sys/vnode.h>
   78 #include <sys/malloc.h>
   79 #include <sys/fcntl.h>
   80 #include <sys/lockf.h>
   81 #include <sys/taskqueue.h>
   82 
   83 #ifdef LOCKF_DEBUG
   84 #include <sys/sysctl.h>
   85 
   86 #include <ufs/ufs/quota.h>
   87 #include <ufs/ufs/inode.h>
   88 
   89 static int      lockf_debug = 0; /* control debug output */
   90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
   91 #endif
   92 
   93 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
   94 
   95 struct owner_edge;
   96 struct owner_vertex;
   97 struct owner_vertex_list;
   98 struct owner_graph;
   99 
  100 #define NOLOCKF (struct lockf_entry *)0
  101 #define SELF    0x1
  102 #define OTHERS  0x2
  103 static void      lf_init(void *);
  104 static int       lf_hash_owner(caddr_t, struct flock *, int);
  105 static int       lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
  106     int);
  107 static struct lockf_entry *
  108                  lf_alloc_lock(struct lock_owner *);
  109 static int       lf_free_lock(struct lockf_entry *);
  110 static int       lf_clearlock(struct lockf *, struct lockf_entry *);
  111 static int       lf_overlaps(struct lockf_entry *, struct lockf_entry *);
  112 static int       lf_blocks(struct lockf_entry *, struct lockf_entry *);
  113 static void      lf_free_edge(struct lockf_edge *);
  114 static struct lockf_edge *
  115                  lf_alloc_edge(void);
  116 static void      lf_alloc_vertex(struct lockf_entry *);
  117 static int       lf_add_edge(struct lockf_entry *, struct lockf_entry *);
  118 static void      lf_remove_edge(struct lockf_edge *);
  119 static void      lf_remove_outgoing(struct lockf_entry *);
  120 static void      lf_remove_incoming(struct lockf_entry *);
  121 static int       lf_add_outgoing(struct lockf *, struct lockf_entry *);
  122 static int       lf_add_incoming(struct lockf *, struct lockf_entry *);
  123 static int       lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
  124     int);
  125 static struct lockf_entry *
  126                  lf_getblock(struct lockf *, struct lockf_entry *);
  127 static int       lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
  128 static void      lf_insert_lock(struct lockf *, struct lockf_entry *);
  129 static void      lf_wakeup_lock(struct lockf *, struct lockf_entry *);
  130 static void      lf_update_dependancies(struct lockf *, struct lockf_entry *,
  131     int all, struct lockf_entry_list *);
  132 static void      lf_set_start(struct lockf *, struct lockf_entry *, off_t,
  133         struct lockf_entry_list*);
  134 static void      lf_set_end(struct lockf *, struct lockf_entry *, off_t,
  135         struct lockf_entry_list*);
  136 static int       lf_setlock(struct lockf *, struct lockf_entry *,
  137     struct vnode *, void **cookiep);
  138 static int       lf_cancel(struct lockf *, struct lockf_entry *, void *);
  139 static void      lf_split(struct lockf *, struct lockf_entry *,
  140     struct lockf_entry *, struct lockf_entry_list *);
  141 #ifdef LOCKF_DEBUG
  142 static int       graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
  143     struct owner_vertex_list *path);
  144 static void      graph_check(struct owner_graph *g, int checkorder);
  145 static void      graph_print_vertices(struct owner_vertex_list *set);
  146 #endif
  147 static int       graph_delta_forward(struct owner_graph *g,
  148     struct owner_vertex *x, struct owner_vertex *y,
  149     struct owner_vertex_list *delta);
  150 static int       graph_delta_backward(struct owner_graph *g,
  151     struct owner_vertex *x, struct owner_vertex *y,
  152     struct owner_vertex_list *delta);
  153 static int       graph_add_indices(int *indices, int n,
  154     struct owner_vertex_list *set);
  155 static int       graph_assign_indices(struct owner_graph *g, int *indices,
  156     int nextunused, struct owner_vertex_list *set);
  157 static int       graph_add_edge(struct owner_graph *g,
  158     struct owner_vertex *x, struct owner_vertex *y);
  159 static void      graph_remove_edge(struct owner_graph *g,
  160     struct owner_vertex *x, struct owner_vertex *y);
  161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
  162     struct lock_owner *lo);
  163 static void      graph_free_vertex(struct owner_graph *g,
  164     struct owner_vertex *v);
  165 static struct owner_graph * graph_init(struct owner_graph *g);
  166 #ifdef LOCKF_DEBUG
  167 static void      lf_print(char *, struct lockf_entry *);
  168 static void      lf_printlist(char *, struct lockf_entry *);
  169 static void      lf_print_owner(struct lock_owner *);
  170 #endif
  171 
  172 /*
  173  * This structure is used to keep track of both local and remote lock
  174  * owners. The lf_owner field of the struct lockf_entry points back at
  175  * the lock owner structure. Each possible lock owner (local proc for
  176  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
  177  * pair for remote locks) is represented by a unique instance of
  178  * struct lock_owner.
  179  *
  180  * If a lock owner has a lock that blocks some other lock or a lock
  181  * that is waiting for some other lock, it also has a vertex in the
  182  * owner_graph below.
  183  *
  184  * Locks:
  185  * (s)          locked by state->ls_lock
  186  * (S)          locked by lf_lock_states_lock
  187  * (l)          locked by lf_lock_owners_lock
  188  * (g)          locked by lf_owner_graph_lock
  189  * (c)          const until freeing
  190  */
  191 #define LOCK_OWNER_HASH_SIZE    256
  192 
  193 struct lock_owner {
  194         LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
  195         int     lo_refs;            /* (l) Number of locks referring to this */
  196         int     lo_flags;           /* (c) Flags passwd to lf_advlock */
  197         caddr_t lo_id;              /* (c) Id value passed to lf_advlock */
  198         pid_t   lo_pid;             /* (c) Process Id of the lock owner */
  199         int     lo_sysid;           /* (c) System Id of the lock owner */
  200         struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
  201 };
  202 
  203 LIST_HEAD(lock_owner_list, lock_owner);
  204 
  205 static struct sx                lf_lock_states_lock;
  206 static struct lockf_list        lf_lock_states; /* (S) */
  207 static struct sx                lf_lock_owners_lock;
  208 static struct lock_owner_list   lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
  209 
  210 /*
  211  * Structures for deadlock detection.
  212  *
  213  * We have two types of directed graph, the first is the set of locks,
  214  * both active and pending on a vnode. Within this graph, active locks
  215  * are terminal nodes in the graph (i.e. have no out-going
  216  * edges). Pending locks have out-going edges to each blocking active
  217  * lock that prevents the lock from being granted and also to each
  218  * older pending lock that would block them if it was active. The
  219  * graph for each vnode is naturally acyclic; new edges are only ever
  220  * added to or from new nodes (either new pending locks which only add
  221  * out-going edges or new active locks which only add in-coming edges)
  222  * therefore they cannot create loops in the lock graph.
  223  *
  224  * The second graph is a global graph of lock owners. Each lock owner
  225  * is a vertex in that graph and an edge is added to the graph
  226  * whenever an edge is added to a vnode graph, with end points
  227  * corresponding to owner of the new pending lock and the owner of the
  228  * lock upon which it waits. In order to prevent deadlock, we only add
  229  * an edge to this graph if the new edge would not create a cycle.
  230  * 
  231  * The lock owner graph is topologically sorted, i.e. if a node has
  232  * any outgoing edges, then it has an order strictly less than any
  233  * node to which it has an outgoing edge. We preserve this ordering
  234  * (and detect cycles) on edge insertion using Algorithm PK from the
  235  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
  236  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
  237  * No. 1.7)
  238  */
  239 struct owner_vertex;
  240 
  241 struct owner_edge {
  242         LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
  243         LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
  244         int             e_refs;           /* (g) number of times added */
  245         struct owner_vertex *e_from;      /* (c) out-going from here */
  246         struct owner_vertex *e_to;        /* (c) in-coming to here */
  247 };
  248 LIST_HEAD(owner_edge_list, owner_edge);
  249 
  250 struct owner_vertex {
  251         TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
  252         uint32_t        v_gen;            /* (g) workspace for edge insertion */
  253         int             v_order;          /* (g) order of vertex in graph */
  254         struct owner_edge_list v_outedges;/* (g) list of out-edges */
  255         struct owner_edge_list v_inedges; /* (g) list of in-edges */
  256         struct lock_owner *v_owner;       /* (c) corresponding lock owner */
  257 };
  258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
  259 
  260 struct owner_graph {
  261         struct owner_vertex** g_vertices; /* (g) pointers to vertices */
  262         int             g_size;           /* (g) number of vertices */
  263         int             g_space;          /* (g) space allocated for vertices */
  264         int             *g_indexbuf;      /* (g) workspace for loop detection */
  265         uint32_t        g_gen;            /* (g) increment when re-ordering */
  266 };
  267 
  268 static struct sx                lf_owner_graph_lock;
  269 static struct owner_graph       lf_owner_graph;
  270 
  271 /*
  272  * Initialise various structures and locks.
  273  */
  274 static void
  275 lf_init(void *dummy)
  276 {
  277         int i;
  278 
  279         sx_init(&lf_lock_states_lock, "lock states lock");
  280         LIST_INIT(&lf_lock_states);
  281 
  282         sx_init(&lf_lock_owners_lock, "lock owners lock");
  283         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
  284                 LIST_INIT(&lf_lock_owners[i]);
  285 
  286         sx_init(&lf_owner_graph_lock, "owner graph lock");
  287         graph_init(&lf_owner_graph);
  288 }
  289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
  290 
  291 /*
  292  * Generate a hash value for a lock owner.
  293  */
  294 static int
  295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
  296 {
  297         uint32_t h;
  298 
  299         if (flags & F_REMOTE) {
  300                 h = HASHSTEP(0, fl->l_pid);
  301                 h = HASHSTEP(h, fl->l_sysid);
  302         } else if (flags & F_FLOCK) {
  303                 h = ((uintptr_t) id) >> 7;
  304         } else {
  305                 struct proc *p = (struct proc *) id;
  306                 h = HASHSTEP(0, p->p_pid);
  307                 h = HASHSTEP(h, 0);
  308         }
  309 
  310         return (h % LOCK_OWNER_HASH_SIZE);
  311 }
  312 
  313 /*
  314  * Return true if a lock owner matches the details passed to
  315  * lf_advlock.
  316  */
  317 static int
  318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
  319     int flags)
  320 {
  321         if (flags & F_REMOTE) {
  322                 return lo->lo_pid == fl->l_pid
  323                         && lo->lo_sysid == fl->l_sysid;
  324         } else {
  325                 return lo->lo_id == id;
  326         }
  327 }
  328 
  329 static struct lockf_entry *
  330 lf_alloc_lock(struct lock_owner *lo)
  331 {
  332         struct lockf_entry *lf;
  333 
  334         lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
  335 
  336 #ifdef LOCKF_DEBUG
  337         if (lockf_debug & 4)
  338                 printf("Allocated lock %p\n", lf);
  339 #endif
  340         if (lo) {
  341                 sx_xlock(&lf_lock_owners_lock);
  342                 lo->lo_refs++;
  343                 sx_xunlock(&lf_lock_owners_lock);
  344                 lf->lf_owner = lo;
  345         }
  346 
  347         return (lf);
  348 }
  349 
  350 static int
  351 lf_free_lock(struct lockf_entry *lock)
  352 {
  353 
  354         KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
  355         if (--lock->lf_refs > 0)
  356                 return (0);
  357         /*
  358          * Adjust the lock_owner reference count and
  359          * reclaim the entry if this is the last lock
  360          * for that owner.
  361          */
  362         struct lock_owner *lo = lock->lf_owner;
  363         if (lo) {
  364                 KASSERT(LIST_EMPTY(&lock->lf_outedges),
  365                     ("freeing lock with dependancies"));
  366                 KASSERT(LIST_EMPTY(&lock->lf_inedges),
  367                     ("freeing lock with dependants"));
  368                 sx_xlock(&lf_lock_owners_lock);
  369                 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
  370                 lo->lo_refs--;
  371                 if (lo->lo_refs == 0) {
  372 #ifdef LOCKF_DEBUG
  373                         if (lockf_debug & 1)
  374                                 printf("lf_free_lock: freeing lock owner %p\n",
  375                                     lo);
  376 #endif
  377                         if (lo->lo_vertex) {
  378                                 sx_xlock(&lf_owner_graph_lock);
  379                                 graph_free_vertex(&lf_owner_graph,
  380                                     lo->lo_vertex);
  381                                 sx_xunlock(&lf_owner_graph_lock);
  382                         }
  383                         LIST_REMOVE(lo, lo_link);
  384                         free(lo, M_LOCKF);
  385 #ifdef LOCKF_DEBUG
  386                         if (lockf_debug & 4)
  387                                 printf("Freed lock owner %p\n", lo);
  388 #endif
  389                 }
  390                 sx_unlock(&lf_lock_owners_lock);
  391         }
  392         if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
  393                 vrele(lock->lf_vnode);
  394                 lock->lf_vnode = NULL;
  395         }
  396 #ifdef LOCKF_DEBUG
  397         if (lockf_debug & 4)
  398                 printf("Freed lock %p\n", lock);
  399 #endif
  400         free(lock, M_LOCKF);
  401         return (1);
  402 }
  403 
  404 /*
  405  * Advisory record locking support
  406  */
  407 int
  408 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
  409     u_quad_t size)
  410 {
  411         struct lockf *state, *freestate = NULL;
  412         struct flock *fl = ap->a_fl;
  413         struct lockf_entry *lock;
  414         struct vnode *vp = ap->a_vp;
  415         caddr_t id = ap->a_id;
  416         int flags = ap->a_flags;
  417         int hash;
  418         struct lock_owner *lo;
  419         off_t start, end, oadd;
  420         int error;
  421 
  422         /*
  423          * Handle the F_UNLKSYS case first - no need to mess about
  424          * creating a lock owner for this one.
  425          */
  426         if (ap->a_op == F_UNLCKSYS) {
  427                 lf_clearremotesys(fl->l_sysid);
  428                 return (0);
  429         }
  430 
  431         /*
  432          * Convert the flock structure into a start and end.
  433          */
  434         switch (fl->l_whence) {
  435 
  436         case SEEK_SET:
  437         case SEEK_CUR:
  438                 /*
  439                  * Caller is responsible for adding any necessary offset
  440                  * when SEEK_CUR is used.
  441                  */
  442                 start = fl->l_start;
  443                 break;
  444 
  445         case SEEK_END:
  446                 if (size > OFF_MAX ||
  447                     (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
  448                         return (EOVERFLOW);
  449                 start = size + fl->l_start;
  450                 break;
  451 
  452         default:
  453                 return (EINVAL);
  454         }
  455         if (start < 0)
  456                 return (EINVAL);
  457         if (fl->l_len < 0) {
  458                 if (start == 0)
  459                         return (EINVAL);
  460                 end = start - 1;
  461                 start += fl->l_len;
  462                 if (start < 0)
  463                         return (EINVAL);
  464         } else if (fl->l_len == 0) {
  465                 end = OFF_MAX;
  466         } else {
  467                 oadd = fl->l_len - 1;
  468                 if (oadd > OFF_MAX - start)
  469                         return (EOVERFLOW);
  470                 end = start + oadd;
  471         }
  472 
  473 retry_setlock:
  474 
  475         /*
  476          * Avoid the common case of unlocking when inode has no locks.
  477          */
  478         VI_LOCK(vp);
  479         if ((*statep) == NULL) {
  480                 if (ap->a_op != F_SETLK) {
  481                         fl->l_type = F_UNLCK;
  482                         VI_UNLOCK(vp);
  483                         return (0);
  484                 }
  485         }
  486         VI_UNLOCK(vp);
  487 
  488         /*
  489          * Map our arguments to an existing lock owner or create one
  490          * if this is the first time we have seen this owner.
  491          */
  492         hash = lf_hash_owner(id, fl, flags);
  493         sx_xlock(&lf_lock_owners_lock);
  494         LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
  495                 if (lf_owner_matches(lo, id, fl, flags))
  496                         break;
  497         if (!lo) {
  498                 /*
  499                  * We initialise the lock with a reference
  500                  * count which matches the new lockf_entry
  501                  * structure created below.
  502                  */
  503                 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
  504                     M_WAITOK|M_ZERO);
  505 #ifdef LOCKF_DEBUG
  506                 if (lockf_debug & 4)
  507                         printf("Allocated lock owner %p\n", lo);
  508 #endif
  509 
  510                 lo->lo_refs = 1;
  511                 lo->lo_flags = flags;
  512                 lo->lo_id = id;
  513                 if (flags & F_REMOTE) {
  514                         lo->lo_pid = fl->l_pid;
  515                         lo->lo_sysid = fl->l_sysid;
  516                 } else if (flags & F_FLOCK) {
  517                         lo->lo_pid = -1;
  518                         lo->lo_sysid = 0;
  519                 } else {
  520                         struct proc *p = (struct proc *) id;
  521                         lo->lo_pid = p->p_pid;
  522                         lo->lo_sysid = 0;
  523                 }
  524                 lo->lo_vertex = NULL;
  525 
  526 #ifdef LOCKF_DEBUG
  527                 if (lockf_debug & 1) {
  528                         printf("lf_advlockasync: new lock owner %p ", lo);
  529                         lf_print_owner(lo);
  530                         printf("\n");
  531                 }
  532 #endif
  533 
  534                 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
  535         } else {
  536                 /*
  537                  * We have seen this lock owner before, increase its
  538                  * reference count to account for the new lockf_entry
  539                  * structure we create below.
  540                  */
  541                 lo->lo_refs++;
  542         }
  543         sx_xunlock(&lf_lock_owners_lock);
  544 
  545         /*
  546          * Create the lockf structure. We initialise the lf_owner
  547          * field here instead of in lf_alloc_lock() to avoid paying
  548          * the lf_lock_owners_lock tax twice.
  549          */
  550         lock = lf_alloc_lock(NULL);
  551         lock->lf_refs = 1;
  552         lock->lf_start = start;
  553         lock->lf_end = end;
  554         lock->lf_owner = lo;
  555         lock->lf_vnode = vp;
  556         if (flags & F_REMOTE) {
  557                 /*
  558                  * For remote locks, the caller may release its ref to
  559                  * the vnode at any time - we have to ref it here to
  560                  * prevent it from being recycled unexpectedly.
  561                  */
  562                 vref(vp);
  563         }
  564 
  565         /*
  566          * XXX The problem is that VTOI is ufs specific, so it will
  567          * break LOCKF_DEBUG for all other FS's other than UFS because
  568          * it casts the vnode->data ptr to struct inode *.
  569          */
  570 /*      lock->lf_inode = VTOI(ap->a_vp); */
  571         lock->lf_inode = (struct inode *)0;
  572         lock->lf_type = fl->l_type;
  573         LIST_INIT(&lock->lf_outedges);
  574         LIST_INIT(&lock->lf_inedges);
  575         lock->lf_async_task = ap->a_task;
  576         lock->lf_flags = ap->a_flags;
  577 
  578         /*
  579          * Do the requested operation. First find our state structure
  580          * and create a new one if necessary - the caller's *statep
  581          * variable and the state's ls_threads count is protected by
  582          * the vnode interlock.
  583          */
  584         VI_LOCK(vp);
  585         if (vp->v_iflag & VI_DOOMED) {
  586                 VI_UNLOCK(vp);
  587                 lf_free_lock(lock);
  588                 return (ENOENT);
  589         }
  590 
  591         /*
  592          * Allocate a state structure if necessary.
  593          */
  594         state = *statep;
  595         if (state == NULL) {
  596                 struct lockf *ls;
  597 
  598                 VI_UNLOCK(vp);
  599 
  600                 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
  601                 sx_init(&ls->ls_lock, "ls_lock");
  602                 LIST_INIT(&ls->ls_active);
  603                 LIST_INIT(&ls->ls_pending);
  604                 ls->ls_threads = 1;
  605 
  606                 sx_xlock(&lf_lock_states_lock);
  607                 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
  608                 sx_xunlock(&lf_lock_states_lock);
  609 
  610                 /*
  611                  * Cope if we lost a race with some other thread while
  612                  * trying to allocate memory.
  613                  */
  614                 VI_LOCK(vp);
  615                 if (vp->v_iflag & VI_DOOMED) {
  616                         VI_UNLOCK(vp);
  617                         sx_xlock(&lf_lock_states_lock);
  618                         LIST_REMOVE(ls, ls_link);
  619                         sx_xunlock(&lf_lock_states_lock);
  620                         sx_destroy(&ls->ls_lock);
  621                         free(ls, M_LOCKF);
  622                         lf_free_lock(lock);
  623                         return (ENOENT);
  624                 }
  625                 if ((*statep) == NULL) {
  626                         state = *statep = ls;
  627                         VI_UNLOCK(vp);
  628                 } else {
  629                         state = *statep;
  630                         state->ls_threads++;
  631                         VI_UNLOCK(vp);
  632 
  633                         sx_xlock(&lf_lock_states_lock);
  634                         LIST_REMOVE(ls, ls_link);
  635                         sx_xunlock(&lf_lock_states_lock);
  636                         sx_destroy(&ls->ls_lock);
  637                         free(ls, M_LOCKF);
  638                 }
  639         } else {
  640                 state->ls_threads++;
  641                 VI_UNLOCK(vp);
  642         }
  643 
  644         sx_xlock(&state->ls_lock);
  645         /*
  646          * Recheck the doomed vnode after state->ls_lock is
  647          * locked. lf_purgelocks() requires that no new threads add
  648          * pending locks when vnode is marked by VI_DOOMED flag.
  649          */
  650         VI_LOCK(vp);
  651         if (vp->v_iflag & VI_DOOMED) {
  652                 state->ls_threads--;
  653                 wakeup(state);
  654                 VI_UNLOCK(vp);
  655                 sx_xunlock(&state->ls_lock);
  656                 lf_free_lock(lock);
  657                 return (ENOENT);
  658         }
  659         VI_UNLOCK(vp);
  660 
  661         switch (ap->a_op) {
  662         case F_SETLK:
  663                 error = lf_setlock(state, lock, vp, ap->a_cookiep);
  664                 break;
  665 
  666         case F_UNLCK:
  667                 error = lf_clearlock(state, lock);
  668                 lf_free_lock(lock);
  669                 break;
  670 
  671         case F_GETLK:
  672                 error = lf_getlock(state, lock, fl);
  673                 lf_free_lock(lock);
  674                 break;
  675 
  676         case F_CANCEL:
  677                 if (ap->a_cookiep)
  678                         error = lf_cancel(state, lock, *ap->a_cookiep);
  679                 else
  680                         error = EINVAL;
  681                 lf_free_lock(lock);
  682                 break;
  683 
  684         default:
  685                 lf_free_lock(lock);
  686                 error = EINVAL;
  687                 break;
  688         }
  689 
  690 #ifdef INVARIANTS
  691         /*
  692          * Check for some can't happen stuff. In this case, the active
  693          * lock list becoming disordered or containing mutually
  694          * blocking locks. We also check the pending list for locks
  695          * which should be active (i.e. have no out-going edges).
  696          */
  697         LIST_FOREACH(lock, &state->ls_active, lf_link) {
  698                 struct lockf_entry *lf;
  699                 if (LIST_NEXT(lock, lf_link))
  700                         KASSERT((lock->lf_start
  701                                 <= LIST_NEXT(lock, lf_link)->lf_start),
  702                             ("locks disordered"));
  703                 LIST_FOREACH(lf, &state->ls_active, lf_link) {
  704                         if (lock == lf)
  705                                 break;
  706                         KASSERT(!lf_blocks(lock, lf),
  707                             ("two conflicting active locks"));
  708                         if (lock->lf_owner == lf->lf_owner)
  709                                 KASSERT(!lf_overlaps(lock, lf),
  710                                     ("two overlapping locks from same owner"));
  711                 }
  712         }
  713         LIST_FOREACH(lock, &state->ls_pending, lf_link) {
  714                 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
  715                     ("pending lock which should be active"));
  716         }
  717 #endif
  718         sx_xunlock(&state->ls_lock);
  719 
  720         /*
  721          * If we have removed the last active lock on the vnode and
  722          * this is the last thread that was in-progress, we can free
  723          * the state structure. We update the caller's pointer inside
  724          * the vnode interlock but call free outside.
  725          *
  726          * XXX alternatively, keep the state structure around until
  727          * the filesystem recycles - requires a callback from the
  728          * filesystem.
  729          */
  730         VI_LOCK(vp);
  731 
  732         state->ls_threads--;
  733         wakeup(state);
  734         if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
  735                 KASSERT(LIST_EMPTY(&state->ls_pending),
  736                     ("freeing state with pending locks"));
  737                 freestate = state;
  738                 *statep = NULL;
  739         }
  740 
  741         VI_UNLOCK(vp);
  742 
  743         if (freestate) {
  744                 sx_xlock(&lf_lock_states_lock);
  745                 LIST_REMOVE(freestate, ls_link);
  746                 sx_xunlock(&lf_lock_states_lock);
  747                 sx_destroy(&freestate->ls_lock);
  748                 free(freestate, M_LOCKF);
  749         }
  750 
  751         if (error == EDOOFUS) {
  752                 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
  753                 goto retry_setlock;
  754         }
  755         return (error);
  756 }
  757 
  758 int
  759 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
  760 {
  761         struct vop_advlockasync_args a;
  762 
  763         a.a_vp = ap->a_vp;
  764         a.a_id = ap->a_id;
  765         a.a_op = ap->a_op;
  766         a.a_fl = ap->a_fl;
  767         a.a_flags = ap->a_flags;
  768         a.a_task = NULL;
  769         a.a_cookiep = NULL;
  770 
  771         return (lf_advlockasync(&a, statep, size));
  772 }
  773 
  774 void
  775 lf_purgelocks(struct vnode *vp, struct lockf **statep)
  776 {
  777         struct lockf *state;
  778         struct lockf_entry *lock, *nlock;
  779 
  780         /*
  781          * For this to work correctly, the caller must ensure that no
  782          * other threads enter the locking system for this vnode,
  783          * e.g. by checking VI_DOOMED. We wake up any threads that are
  784          * sleeping waiting for locks on this vnode and then free all
  785          * the remaining locks.
  786          */
  787         VI_LOCK(vp);
  788         KASSERT(vp->v_iflag & VI_DOOMED,
  789             ("lf_purgelocks: vp %p has not vgone yet", vp));
  790         state = *statep;
  791         if (state) {
  792                 *statep = NULL;
  793                 state->ls_threads++;
  794                 VI_UNLOCK(vp);
  795 
  796                 sx_xlock(&state->ls_lock);
  797                 sx_xlock(&lf_owner_graph_lock);
  798                 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
  799                         LIST_REMOVE(lock, lf_link);
  800                         lf_remove_outgoing(lock);
  801                         lf_remove_incoming(lock);
  802 
  803                         /*
  804                          * If its an async lock, we can just free it
  805                          * here, otherwise we let the sleeping thread
  806                          * free it.
  807                          */
  808                         if (lock->lf_async_task) {
  809                                 lf_free_lock(lock);
  810                         } else {
  811                                 lock->lf_flags |= F_INTR;
  812                                 wakeup(lock);
  813                         }
  814                 }
  815                 sx_xunlock(&lf_owner_graph_lock);
  816                 sx_xunlock(&state->ls_lock);
  817 
  818                 /*
  819                  * Wait for all other threads, sleeping and otherwise
  820                  * to leave.
  821                  */
  822                 VI_LOCK(vp);
  823                 while (state->ls_threads > 1)
  824                         msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
  825                 VI_UNLOCK(vp);
  826 
  827                 /*
  828                  * We can just free all the active locks since they
  829                  * will have no dependancies (we removed them all
  830                  * above). We don't need to bother locking since we
  831                  * are the last thread using this state structure.
  832                  */
  833                 KASSERT(LIST_EMPTY(&state->ls_pending),
  834                     ("lock pending for %p", state));
  835                 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
  836                         LIST_REMOVE(lock, lf_link);
  837                         lf_free_lock(lock);
  838                 }
  839                 sx_xlock(&lf_lock_states_lock);
  840                 LIST_REMOVE(state, ls_link);
  841                 sx_xunlock(&lf_lock_states_lock);
  842                 sx_destroy(&state->ls_lock);
  843                 free(state, M_LOCKF);
  844         } else {
  845                 VI_UNLOCK(vp);
  846         }
  847 }
  848 
  849 /*
  850  * Return non-zero if locks 'x' and 'y' overlap.
  851  */
  852 static int
  853 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
  854 {
  855 
  856         return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
  857 }
  858 
  859 /*
  860  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
  861  */
  862 static int
  863 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
  864 {
  865 
  866         return x->lf_owner != y->lf_owner
  867                 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
  868                 && lf_overlaps(x, y);
  869 }
  870 
  871 /*
  872  * Allocate a lock edge from the free list
  873  */
  874 static struct lockf_edge *
  875 lf_alloc_edge(void)
  876 {
  877 
  878         return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
  879 }
  880 
  881 /*
  882  * Free a lock edge.
  883  */
  884 static void
  885 lf_free_edge(struct lockf_edge *e)
  886 {
  887 
  888         free(e, M_LOCKF);
  889 }
  890 
  891 
  892 /*
  893  * Ensure that the lock's owner has a corresponding vertex in the
  894  * owner graph.
  895  */
  896 static void
  897 lf_alloc_vertex(struct lockf_entry *lock)
  898 {
  899         struct owner_graph *g = &lf_owner_graph;
  900 
  901         if (!lock->lf_owner->lo_vertex)
  902                 lock->lf_owner->lo_vertex =
  903                         graph_alloc_vertex(g, lock->lf_owner);
  904 }
  905 
  906 /*
  907  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
  908  * the new edge would cause a cycle in the owner graph.
  909  */
  910 static int
  911 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
  912 {
  913         struct owner_graph *g = &lf_owner_graph;
  914         struct lockf_edge *e;
  915         int error;
  916 
  917 #ifdef INVARIANTS
  918         LIST_FOREACH(e, &x->lf_outedges, le_outlink)
  919                 KASSERT(e->le_to != y, ("adding lock edge twice"));
  920 #endif
  921 
  922         /*
  923          * Make sure the two owners have entries in the owner graph.
  924          */
  925         lf_alloc_vertex(x);
  926         lf_alloc_vertex(y);
  927 
  928         error = graph_add_edge(g, x->lf_owner->lo_vertex,
  929             y->lf_owner->lo_vertex);
  930         if (error)
  931                 return (error);
  932 
  933         e = lf_alloc_edge();
  934         LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
  935         LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
  936         e->le_from = x;
  937         e->le_to = y;
  938 
  939         return (0);
  940 }
  941 
  942 /*
  943  * Remove an edge from the lock graph.
  944  */
  945 static void
  946 lf_remove_edge(struct lockf_edge *e)
  947 {
  948         struct owner_graph *g = &lf_owner_graph;
  949         struct lockf_entry *x = e->le_from;
  950         struct lockf_entry *y = e->le_to;
  951 
  952         graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
  953         LIST_REMOVE(e, le_outlink);
  954         LIST_REMOVE(e, le_inlink);
  955         e->le_from = NULL;
  956         e->le_to = NULL;
  957         lf_free_edge(e);
  958 }
  959 
  960 /*
  961  * Remove all out-going edges from lock x.
  962  */
  963 static void
  964 lf_remove_outgoing(struct lockf_entry *x)
  965 {
  966         struct lockf_edge *e;
  967 
  968         while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
  969                 lf_remove_edge(e);
  970         }
  971 }
  972 
  973 /*
  974  * Remove all in-coming edges from lock x.
  975  */
  976 static void
  977 lf_remove_incoming(struct lockf_entry *x)
  978 {
  979         struct lockf_edge *e;
  980 
  981         while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
  982                 lf_remove_edge(e);
  983         }
  984 }
  985 
  986 /*
  987  * Walk the list of locks for the file and create an out-going edge
  988  * from lock to each blocking lock.
  989  */
  990 static int
  991 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
  992 {
  993         struct lockf_entry *overlap;
  994         int error;
  995 
  996         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
  997                 /*
  998                  * We may assume that the active list is sorted by
  999                  * lf_start.
 1000                  */
 1001                 if (overlap->lf_start > lock->lf_end)
 1002                         break;
 1003                 if (!lf_blocks(lock, overlap))
 1004                         continue;
 1005 
 1006                 /*
 1007                  * We've found a blocking lock. Add the corresponding
 1008                  * edge to the graphs and see if it would cause a
 1009                  * deadlock.
 1010                  */
 1011                 error = lf_add_edge(lock, overlap);
 1012 
 1013                 /*
 1014                  * The only error that lf_add_edge returns is EDEADLK.
 1015                  * Remove any edges we added and return the error.
 1016                  */
 1017                 if (error) {
 1018                         lf_remove_outgoing(lock);
 1019                         return (error);
 1020                 }
 1021         }
 1022 
 1023         /*
 1024          * We also need to add edges to sleeping locks that block
 1025          * us. This ensures that lf_wakeup_lock cannot grant two
 1026          * mutually blocking locks simultaneously and also enforces a
 1027          * 'first come, first served' fairness model. Note that this
 1028          * only happens if we are blocked by at least one active lock
 1029          * due to the call to lf_getblock in lf_setlock below.
 1030          */
 1031         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
 1032                 if (!lf_blocks(lock, overlap))
 1033                         continue;
 1034                 /*
 1035                  * We've found a blocking lock. Add the corresponding
 1036                  * edge to the graphs and see if it would cause a
 1037                  * deadlock.
 1038                  */
 1039                 error = lf_add_edge(lock, overlap);
 1040 
 1041                 /*
 1042                  * The only error that lf_add_edge returns is EDEADLK.
 1043                  * Remove any edges we added and return the error.
 1044                  */
 1045                 if (error) {
 1046                         lf_remove_outgoing(lock);
 1047                         return (error);
 1048                 }
 1049         }
 1050 
 1051         return (0);
 1052 }
 1053 
 1054 /*
 1055  * Walk the list of pending locks for the file and create an in-coming
 1056  * edge from lock to each blocking lock.
 1057  */
 1058 static int
 1059 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
 1060 {
 1061         struct lockf_entry *overlap;
 1062         int error;
 1063 
 1064         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
 1065                 if (!lf_blocks(lock, overlap))
 1066                         continue;
 1067 
 1068                 /*
 1069                  * We've found a blocking lock. Add the corresponding
 1070                  * edge to the graphs and see if it would cause a
 1071                  * deadlock.
 1072                  */
 1073                 error = lf_add_edge(overlap, lock);
 1074 
 1075                 /*
 1076                  * The only error that lf_add_edge returns is EDEADLK.
 1077                  * Remove any edges we added and return the error.
 1078                  */
 1079                 if (error) {
 1080                         lf_remove_incoming(lock);
 1081                         return (error);
 1082                 }
 1083         }
 1084         return (0);
 1085 }
 1086 
 1087 /*
 1088  * Insert lock into the active list, keeping list entries ordered by
 1089  * increasing values of lf_start.
 1090  */
 1091 static void
 1092 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
 1093 {
 1094         struct lockf_entry *lf, *lfprev;
 1095 
 1096         if (LIST_EMPTY(&state->ls_active)) {
 1097                 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
 1098                 return;
 1099         }
 1100 
 1101         lfprev = NULL;
 1102         LIST_FOREACH(lf, &state->ls_active, lf_link) {
 1103                 if (lf->lf_start > lock->lf_start) {
 1104                         LIST_INSERT_BEFORE(lf, lock, lf_link);
 1105                         return;
 1106                 }
 1107                 lfprev = lf;
 1108         }
 1109         LIST_INSERT_AFTER(lfprev, lock, lf_link);
 1110 }
 1111 
 1112 /*
 1113  * Wake up a sleeping lock and remove it from the pending list now
 1114  * that all its dependancies have been resolved. The caller should
 1115  * arrange for the lock to be added to the active list, adjusting any
 1116  * existing locks for the same owner as needed.
 1117  */
 1118 static void
 1119 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
 1120 {
 1121 
 1122         /*
 1123          * Remove from ls_pending list and wake up the caller
 1124          * or start the async notification, as appropriate.
 1125          */
 1126         LIST_REMOVE(wakelock, lf_link);
 1127 #ifdef LOCKF_DEBUG
 1128         if (lockf_debug & 1)
 1129                 lf_print("lf_wakeup_lock: awakening", wakelock);
 1130 #endif /* LOCKF_DEBUG */
 1131         if (wakelock->lf_async_task) {
 1132                 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
 1133         } else {
 1134                 wakeup(wakelock);
 1135         }
 1136 }
 1137 
 1138 /*
 1139  * Re-check all dependant locks and remove edges to locks that we no
 1140  * longer block. If 'all' is non-zero, the lock has been removed and
 1141  * we must remove all the dependancies, otherwise it has simply been
 1142  * reduced but remains active. Any pending locks which have been been
 1143  * unblocked are added to 'granted'
 1144  */
 1145 static void
 1146 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
 1147         struct lockf_entry_list *granted)
 1148 {
 1149         struct lockf_edge *e, *ne;
 1150         struct lockf_entry *deplock;
 1151 
 1152         LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
 1153                 deplock = e->le_from;
 1154                 if (all || !lf_blocks(lock, deplock)) {
 1155                         sx_xlock(&lf_owner_graph_lock);
 1156                         lf_remove_edge(e);
 1157                         sx_xunlock(&lf_owner_graph_lock);
 1158                         if (LIST_EMPTY(&deplock->lf_outedges)) {
 1159                                 lf_wakeup_lock(state, deplock);
 1160                                 LIST_INSERT_HEAD(granted, deplock, lf_link);
 1161                         }
 1162                 }
 1163         }
 1164 }
 1165 
 1166 /*
 1167  * Set the start of an existing active lock, updating dependancies and
 1168  * adding any newly woken locks to 'granted'.
 1169  */
 1170 static void
 1171 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
 1172         struct lockf_entry_list *granted)
 1173 {
 1174 
 1175         KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
 1176         lock->lf_start = new_start;
 1177         LIST_REMOVE(lock, lf_link);
 1178         lf_insert_lock(state, lock);
 1179         lf_update_dependancies(state, lock, FALSE, granted);
 1180 }
 1181 
 1182 /*
 1183  * Set the end of an existing active lock, updating dependancies and
 1184  * adding any newly woken locks to 'granted'.
 1185  */
 1186 static void
 1187 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
 1188         struct lockf_entry_list *granted)
 1189 {
 1190 
 1191         KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
 1192         lock->lf_end = new_end;
 1193         lf_update_dependancies(state, lock, FALSE, granted);
 1194 }
 1195 
 1196 /*
 1197  * Add a lock to the active list, updating or removing any current
 1198  * locks owned by the same owner and processing any pending locks that
 1199  * become unblocked as a result. This code is also used for unlock
 1200  * since the logic for updating existing locks is identical.
 1201  *
 1202  * As a result of processing the new lock, we may unblock existing
 1203  * pending locks as a result of downgrading/unlocking. We simply
 1204  * activate the newly granted locks by looping.
 1205  *
 1206  * Since the new lock already has its dependancies set up, we always
 1207  * add it to the list (unless its an unlock request). This may
 1208  * fragment the lock list in some pathological cases but its probably
 1209  * not a real problem.
 1210  */
 1211 static void
 1212 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
 1213 {
 1214         struct lockf_entry *overlap, *lf;
 1215         struct lockf_entry_list granted;
 1216         int ovcase;
 1217 
 1218         LIST_INIT(&granted);
 1219         LIST_INSERT_HEAD(&granted, lock, lf_link);
 1220 
 1221         while (!LIST_EMPTY(&granted)) {
 1222                 lock = LIST_FIRST(&granted);
 1223                 LIST_REMOVE(lock, lf_link);
 1224 
 1225                 /*
 1226                  * Skip over locks owned by other processes.  Handle
 1227                  * any locks that overlap and are owned by ourselves.
 1228                  */
 1229                 overlap = LIST_FIRST(&state->ls_active);
 1230                 for (;;) {
 1231                         ovcase = lf_findoverlap(&overlap, lock, SELF);
 1232 
 1233 #ifdef LOCKF_DEBUG
 1234                         if (ovcase && (lockf_debug & 2)) {
 1235                                 printf("lf_setlock: overlap %d", ovcase);
 1236                                 lf_print("", overlap);
 1237                         }
 1238 #endif
 1239                         /*
 1240                          * Six cases:
 1241                          *      0) no overlap
 1242                          *      1) overlap == lock
 1243                          *      2) overlap contains lock
 1244                          *      3) lock contains overlap
 1245                          *      4) overlap starts before lock
 1246                          *      5) overlap ends after lock
 1247                          */
 1248                         switch (ovcase) {
 1249                         case 0: /* no overlap */
 1250                                 break;
 1251 
 1252                         case 1: /* overlap == lock */
 1253                                 /*
 1254                                  * We have already setup the
 1255                                  * dependants for the new lock, taking
 1256                                  * into account a possible downgrade
 1257                                  * or unlock. Remove the old lock.
 1258                                  */
 1259                                 LIST_REMOVE(overlap, lf_link);
 1260                                 lf_update_dependancies(state, overlap, TRUE,
 1261                                         &granted);
 1262                                 lf_free_lock(overlap);
 1263                                 break;
 1264 
 1265                         case 2: /* overlap contains lock */
 1266                                 /*
 1267                                  * Just split the existing lock.
 1268                                  */
 1269                                 lf_split(state, overlap, lock, &granted);
 1270                                 break;
 1271 
 1272                         case 3: /* lock contains overlap */
 1273                                 /*
 1274                                  * Delete the overlap and advance to
 1275                                  * the next entry in the list.
 1276                                  */
 1277                                 lf = LIST_NEXT(overlap, lf_link);
 1278                                 LIST_REMOVE(overlap, lf_link);
 1279                                 lf_update_dependancies(state, overlap, TRUE,
 1280                                         &granted);
 1281                                 lf_free_lock(overlap);
 1282                                 overlap = lf;
 1283                                 continue;
 1284 
 1285                         case 4: /* overlap starts before lock */
 1286                                 /*
 1287                                  * Just update the overlap end and
 1288                                  * move on.
 1289                                  */
 1290                                 lf_set_end(state, overlap, lock->lf_start - 1,
 1291                                     &granted);
 1292                                 overlap = LIST_NEXT(overlap, lf_link);
 1293                                 continue;
 1294 
 1295                         case 5: /* overlap ends after lock */
 1296                                 /*
 1297                                  * Change the start of overlap and
 1298                                  * re-insert.
 1299                                  */
 1300                                 lf_set_start(state, overlap, lock->lf_end + 1,
 1301                                     &granted);
 1302                                 break;
 1303                         }
 1304                         break;
 1305                 }
 1306 #ifdef LOCKF_DEBUG
 1307                 if (lockf_debug & 1) {
 1308                         if (lock->lf_type != F_UNLCK)
 1309                                 lf_print("lf_activate_lock: activated", lock);
 1310                         else
 1311                                 lf_print("lf_activate_lock: unlocked", lock);
 1312                         lf_printlist("lf_activate_lock", lock);
 1313                 }
 1314 #endif /* LOCKF_DEBUG */
 1315                 if (lock->lf_type != F_UNLCK)
 1316                         lf_insert_lock(state, lock);
 1317         }
 1318 }
 1319 
 1320 /*
 1321  * Cancel a pending lock request, either as a result of a signal or a
 1322  * cancel request for an async lock.
 1323  */
 1324 static void
 1325 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
 1326 {
 1327         struct lockf_entry_list granted;
 1328 
 1329         /*
 1330          * Note it is theoretically possible that cancelling this lock
 1331          * may allow some other pending lock to become
 1332          * active. Consider this case:
 1333          *
 1334          * Owner        Action          Result          Dependancies
 1335          * 
 1336          * A:           lock [0..0]     succeeds        
 1337          * B:           lock [2..2]     succeeds        
 1338          * C:           lock [1..2]     blocked         C->B
 1339          * D:           lock [0..1]     blocked         C->B,D->A,D->C
 1340          * A:           unlock [0..0]                   C->B,D->C
 1341          * C:           cancel [1..2]   
 1342          */
 1343 
 1344         LIST_REMOVE(lock, lf_link);
 1345 
 1346         /*
 1347          * Removing out-going edges is simple.
 1348          */
 1349         sx_xlock(&lf_owner_graph_lock);
 1350         lf_remove_outgoing(lock);
 1351         sx_xunlock(&lf_owner_graph_lock);
 1352 
 1353         /*
 1354          * Removing in-coming edges may allow some other lock to
 1355          * become active - we use lf_update_dependancies to figure
 1356          * this out.
 1357          */
 1358         LIST_INIT(&granted);
 1359         lf_update_dependancies(state, lock, TRUE, &granted);
 1360         lf_free_lock(lock);
 1361 
 1362         /*
 1363          * Feed any newly active locks to lf_activate_lock.
 1364          */
 1365         while (!LIST_EMPTY(&granted)) {
 1366                 lock = LIST_FIRST(&granted);
 1367                 LIST_REMOVE(lock, lf_link);
 1368                 lf_activate_lock(state, lock);
 1369         }
 1370 }
 1371 
 1372 /*
 1373  * Set a byte-range lock.
 1374  */
 1375 static int
 1376 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
 1377     void **cookiep)
 1378 {
 1379         static char lockstr[] = "lockf";
 1380         int priority, error;
 1381 
 1382 #ifdef LOCKF_DEBUG
 1383         if (lockf_debug & 1)
 1384                 lf_print("lf_setlock", lock);
 1385 #endif /* LOCKF_DEBUG */
 1386 
 1387         /*
 1388          * Set the priority
 1389          */
 1390         priority = PLOCK;
 1391         if (lock->lf_type == F_WRLCK)
 1392                 priority += 4;
 1393         if (!(lock->lf_flags & F_NOINTR))
 1394                 priority |= PCATCH;
 1395         /*
 1396          * Scan lock list for this file looking for locks that would block us.
 1397          */
 1398         if (lf_getblock(state, lock)) {
 1399                 /*
 1400                  * Free the structure and return if nonblocking.
 1401                  */
 1402                 if ((lock->lf_flags & F_WAIT) == 0
 1403                     && lock->lf_async_task == NULL) {
 1404                         lf_free_lock(lock);
 1405                         error = EAGAIN;
 1406                         goto out;
 1407                 }
 1408 
 1409                 /*
 1410                  * For flock type locks, we must first remove
 1411                  * any shared locks that we hold before we sleep
 1412                  * waiting for an exclusive lock.
 1413                  */
 1414                 if ((lock->lf_flags & F_FLOCK) &&
 1415                     lock->lf_type == F_WRLCK) {
 1416                         lock->lf_type = F_UNLCK;
 1417                         lf_activate_lock(state, lock);
 1418                         lock->lf_type = F_WRLCK;
 1419                 }
 1420 
 1421                 /*
 1422                  * We are blocked. Create edges to each blocking lock,
 1423                  * checking for deadlock using the owner graph. For
 1424                  * simplicity, we run deadlock detection for all
 1425                  * locks, posix and otherwise.
 1426                  */
 1427                 sx_xlock(&lf_owner_graph_lock);
 1428                 error = lf_add_outgoing(state, lock);
 1429                 sx_xunlock(&lf_owner_graph_lock);
 1430 
 1431                 if (error) {
 1432 #ifdef LOCKF_DEBUG
 1433                         if (lockf_debug & 1)
 1434                                 lf_print("lf_setlock: deadlock", lock);
 1435 #endif
 1436                         lf_free_lock(lock);
 1437                         goto out;
 1438                 }
 1439 
 1440                 /*
 1441                  * We have added edges to everything that blocks
 1442                  * us. Sleep until they all go away.
 1443                  */
 1444                 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
 1445 #ifdef LOCKF_DEBUG
 1446                 if (lockf_debug & 1) {
 1447                         struct lockf_edge *e;
 1448                         LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
 1449                                 lf_print("lf_setlock: blocking on", e->le_to);
 1450                                 lf_printlist("lf_setlock", e->le_to);
 1451                         }
 1452                 }
 1453 #endif /* LOCKF_DEBUG */
 1454 
 1455                 if ((lock->lf_flags & F_WAIT) == 0) {
 1456                         /*
 1457                          * The caller requested async notification -
 1458                          * this callback happens when the blocking
 1459                          * lock is released, allowing the caller to
 1460                          * make another attempt to take the lock.
 1461                          */
 1462                         *cookiep = (void *) lock;
 1463                         error = EINPROGRESS;
 1464                         goto out;
 1465                 }
 1466 
 1467                 lock->lf_refs++;
 1468                 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
 1469                 if (lf_free_lock(lock)) {
 1470                         error = EDOOFUS;
 1471                         goto out;
 1472                 }
 1473 
 1474                 /*
 1475                  * We may have been awakened by a signal and/or by a
 1476                  * debugger continuing us (in which cases we must
 1477                  * remove our lock graph edges) and/or by another
 1478                  * process releasing a lock (in which case our edges
 1479                  * have already been removed and we have been moved to
 1480                  * the active list). We may also have been woken by
 1481                  * lf_purgelocks which we report to the caller as
 1482                  * EINTR. In that case, lf_purgelocks will have
 1483                  * removed our lock graph edges.
 1484                  *
 1485                  * Note that it is possible to receive a signal after
 1486                  * we were successfully woken (and moved to the active
 1487                  * list) but before we resumed execution. In this
 1488                  * case, our lf_outedges list will be clear. We
 1489                  * pretend there was no error.
 1490                  *
 1491                  * Note also, if we have been sleeping long enough, we
 1492                  * may now have incoming edges from some newer lock
 1493                  * which is waiting behind us in the queue.
 1494                  */
 1495                 if (lock->lf_flags & F_INTR) {
 1496                         error = EINTR;
 1497                         lf_free_lock(lock);
 1498                         goto out;
 1499                 }
 1500                 if (LIST_EMPTY(&lock->lf_outedges)) {
 1501                         error = 0;
 1502                 } else {
 1503                         lf_cancel_lock(state, lock);
 1504                         goto out;
 1505                 }
 1506 #ifdef LOCKF_DEBUG
 1507                 if (lockf_debug & 1) {
 1508                         lf_print("lf_setlock: granted", lock);
 1509                 }
 1510 #endif
 1511                 goto out;
 1512         }
 1513         /*
 1514          * It looks like we are going to grant the lock. First add
 1515          * edges from any currently pending lock that the new lock
 1516          * would block.
 1517          */
 1518         sx_xlock(&lf_owner_graph_lock);
 1519         error = lf_add_incoming(state, lock);
 1520         sx_xunlock(&lf_owner_graph_lock);
 1521         if (error) {
 1522 #ifdef LOCKF_DEBUG
 1523                 if (lockf_debug & 1)
 1524                         lf_print("lf_setlock: deadlock", lock);
 1525 #endif
 1526                 lf_free_lock(lock);
 1527                 goto out;
 1528         }
 1529 
 1530         /*
 1531          * No blocks!!  Add the lock.  Note that we will
 1532          * downgrade or upgrade any overlapping locks this
 1533          * process already owns.
 1534          */
 1535         lf_activate_lock(state, lock);
 1536         error = 0;
 1537 out:
 1538         return (error);
 1539 }
 1540 
 1541 /*
 1542  * Remove a byte-range lock on an inode.
 1543  *
 1544  * Generally, find the lock (or an overlap to that lock)
 1545  * and remove it (or shrink it), then wakeup anyone we can.
 1546  */
 1547 static int
 1548 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
 1549 {
 1550         struct lockf_entry *overlap;
 1551 
 1552         overlap = LIST_FIRST(&state->ls_active);
 1553 
 1554         if (overlap == NOLOCKF)
 1555                 return (0);
 1556 #ifdef LOCKF_DEBUG
 1557         if (unlock->lf_type != F_UNLCK)
 1558                 panic("lf_clearlock: bad type");
 1559         if (lockf_debug & 1)
 1560                 lf_print("lf_clearlock", unlock);
 1561 #endif /* LOCKF_DEBUG */
 1562 
 1563         lf_activate_lock(state, unlock);
 1564 
 1565         return (0);
 1566 }
 1567 
 1568 /*
 1569  * Check whether there is a blocking lock, and if so return its
 1570  * details in '*fl'.
 1571  */
 1572 static int
 1573 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
 1574 {
 1575         struct lockf_entry *block;
 1576 
 1577 #ifdef LOCKF_DEBUG
 1578         if (lockf_debug & 1)
 1579                 lf_print("lf_getlock", lock);
 1580 #endif /* LOCKF_DEBUG */
 1581 
 1582         if ((block = lf_getblock(state, lock))) {
 1583                 fl->l_type = block->lf_type;
 1584                 fl->l_whence = SEEK_SET;
 1585                 fl->l_start = block->lf_start;
 1586                 if (block->lf_end == OFF_MAX)
 1587                         fl->l_len = 0;
 1588                 else
 1589                         fl->l_len = block->lf_end - block->lf_start + 1;
 1590                 fl->l_pid = block->lf_owner->lo_pid;
 1591                 fl->l_sysid = block->lf_owner->lo_sysid;
 1592         } else {
 1593                 fl->l_type = F_UNLCK;
 1594         }
 1595         return (0);
 1596 }
 1597 
 1598 /*
 1599  * Cancel an async lock request.
 1600  */
 1601 static int
 1602 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
 1603 {
 1604         struct lockf_entry *reallock;
 1605 
 1606         /*
 1607          * We need to match this request with an existing lock
 1608          * request.
 1609          */
 1610         LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
 1611                 if ((void *) reallock == cookie) {
 1612                         /*
 1613                          * Double-check that this lock looks right
 1614                          * (maybe use a rolling ID for the cancel
 1615                          * cookie instead?)
 1616                          */
 1617                         if (!(reallock->lf_vnode == lock->lf_vnode
 1618                                 && reallock->lf_start == lock->lf_start
 1619                                 && reallock->lf_end == lock->lf_end)) {
 1620                                 return (ENOENT);
 1621                         }
 1622 
 1623                         /*
 1624                          * Make sure this lock was async and then just
 1625                          * remove it from its wait lists.
 1626                          */
 1627                         if (!reallock->lf_async_task) {
 1628                                 return (ENOENT);
 1629                         }
 1630 
 1631                         /*
 1632                          * Note that since any other thread must take
 1633                          * state->ls_lock before it can possibly
 1634                          * trigger the async callback, we are safe
 1635                          * from a race with lf_wakeup_lock, i.e. we
 1636                          * can free the lock (actually our caller does
 1637                          * this).
 1638                          */
 1639                         lf_cancel_lock(state, reallock);
 1640                         return (0);
 1641                 }
 1642         }
 1643 
 1644         /*
 1645          * We didn't find a matching lock - not much we can do here.
 1646          */
 1647         return (ENOENT);
 1648 }
 1649 
 1650 /*
 1651  * Walk the list of locks for an inode and
 1652  * return the first blocking lock.
 1653  */
 1654 static struct lockf_entry *
 1655 lf_getblock(struct lockf *state, struct lockf_entry *lock)
 1656 {
 1657         struct lockf_entry *overlap;
 1658 
 1659         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
 1660                 /*
 1661                  * We may assume that the active list is sorted by
 1662                  * lf_start.
 1663                  */
 1664                 if (overlap->lf_start > lock->lf_end)
 1665                         break;
 1666                 if (!lf_blocks(lock, overlap))
 1667                         continue;
 1668                 return (overlap);
 1669         }
 1670         return (NOLOCKF);
 1671 }
 1672 
 1673 /*
 1674  * Walk the list of locks for an inode to find an overlapping lock (if
 1675  * any) and return a classification of that overlap.
 1676  *
 1677  * Arguments:
 1678  *      *overlap        The place in the lock list to start looking
 1679  *      lock            The lock which is being tested
 1680  *      type            Pass 'SELF' to test only locks with the same
 1681  *                      owner as lock, or 'OTHER' to test only locks
 1682  *                      with a different owner
 1683  *
 1684  * Returns one of six values:
 1685  *      0) no overlap
 1686  *      1) overlap == lock
 1687  *      2) overlap contains lock
 1688  *      3) lock contains overlap
 1689  *      4) overlap starts before lock
 1690  *      5) overlap ends after lock
 1691  *
 1692  * If there is an overlapping lock, '*overlap' is set to point at the
 1693  * overlapping lock.
 1694  *
 1695  * NOTE: this returns only the FIRST overlapping lock.  There
 1696  *       may be more than one.
 1697  */
 1698 static int
 1699 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
 1700 {
 1701         struct lockf_entry *lf;
 1702         off_t start, end;
 1703         int res;
 1704 
 1705         if ((*overlap) == NOLOCKF) {
 1706                 return (0);
 1707         }
 1708 #ifdef LOCKF_DEBUG
 1709         if (lockf_debug & 2)
 1710                 lf_print("lf_findoverlap: looking for overlap in", lock);
 1711 #endif /* LOCKF_DEBUG */
 1712         start = lock->lf_start;
 1713         end = lock->lf_end;
 1714         res = 0;
 1715         while (*overlap) {
 1716                 lf = *overlap;
 1717                 if (lf->lf_start > end)
 1718                         break;
 1719                 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
 1720                     ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
 1721                         *overlap = LIST_NEXT(lf, lf_link);
 1722                         continue;
 1723                 }
 1724 #ifdef LOCKF_DEBUG
 1725                 if (lockf_debug & 2)
 1726                         lf_print("\tchecking", lf);
 1727 #endif /* LOCKF_DEBUG */
 1728                 /*
 1729                  * OK, check for overlap
 1730                  *
 1731                  * Six cases:
 1732                  *      0) no overlap
 1733                  *      1) overlap == lock
 1734                  *      2) overlap contains lock
 1735                  *      3) lock contains overlap
 1736                  *      4) overlap starts before lock
 1737                  *      5) overlap ends after lock
 1738                  */
 1739                 if (start > lf->lf_end) {
 1740                         /* Case 0 */
 1741 #ifdef LOCKF_DEBUG
 1742                         if (lockf_debug & 2)
 1743                                 printf("no overlap\n");
 1744 #endif /* LOCKF_DEBUG */
 1745                         *overlap = LIST_NEXT(lf, lf_link);
 1746                         continue;
 1747                 }
 1748                 if (lf->lf_start == start && lf->lf_end == end) {
 1749                         /* Case 1 */
 1750 #ifdef LOCKF_DEBUG
 1751                         if (lockf_debug & 2)
 1752                                 printf("overlap == lock\n");
 1753 #endif /* LOCKF_DEBUG */
 1754                         res = 1;
 1755                         break;
 1756                 }
 1757                 if (lf->lf_start <= start && lf->lf_end >= end) {
 1758                         /* Case 2 */
 1759 #ifdef LOCKF_DEBUG
 1760                         if (lockf_debug & 2)
 1761                                 printf("overlap contains lock\n");
 1762 #endif /* LOCKF_DEBUG */
 1763                         res = 2;
 1764                         break;
 1765                 }
 1766                 if (start <= lf->lf_start && end >= lf->lf_end) {
 1767                         /* Case 3 */
 1768 #ifdef LOCKF_DEBUG
 1769                         if (lockf_debug & 2)
 1770                                 printf("lock contains overlap\n");
 1771 #endif /* LOCKF_DEBUG */
 1772                         res = 3;
 1773                         break;
 1774                 }
 1775                 if (lf->lf_start < start && lf->lf_end >= start) {
 1776                         /* Case 4 */
 1777 #ifdef LOCKF_DEBUG
 1778                         if (lockf_debug & 2)
 1779                                 printf("overlap starts before lock\n");
 1780 #endif /* LOCKF_DEBUG */
 1781                         res = 4;
 1782                         break;
 1783                 }
 1784                 if (lf->lf_start > start && lf->lf_end > end) {
 1785                         /* Case 5 */
 1786 #ifdef LOCKF_DEBUG
 1787                         if (lockf_debug & 2)
 1788                                 printf("overlap ends after lock\n");
 1789 #endif /* LOCKF_DEBUG */
 1790                         res = 5;
 1791                         break;
 1792                 }
 1793                 panic("lf_findoverlap: default");
 1794         }
 1795         return (res);
 1796 }
 1797 
 1798 /*
 1799  * Split an the existing 'lock1', based on the extent of the lock
 1800  * described by 'lock2'. The existing lock should cover 'lock2'
 1801  * entirely.
 1802  *
 1803  * Any pending locks which have been been unblocked are added to
 1804  * 'granted'
 1805  */
 1806 static void
 1807 lf_split(struct lockf *state, struct lockf_entry *lock1,
 1808     struct lockf_entry *lock2, struct lockf_entry_list *granted)
 1809 {
 1810         struct lockf_entry *splitlock;
 1811 
 1812 #ifdef LOCKF_DEBUG
 1813         if (lockf_debug & 2) {
 1814                 lf_print("lf_split", lock1);
 1815                 lf_print("splitting from", lock2);
 1816         }
 1817 #endif /* LOCKF_DEBUG */
 1818         /*
 1819          * Check to see if we don't need to split at all.
 1820          */
 1821         if (lock1->lf_start == lock2->lf_start) {
 1822                 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
 1823                 return;
 1824         }
 1825         if (lock1->lf_end == lock2->lf_end) {
 1826                 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
 1827                 return;
 1828         }
 1829         /*
 1830          * Make a new lock consisting of the last part of
 1831          * the encompassing lock.
 1832          */
 1833         splitlock = lf_alloc_lock(lock1->lf_owner);
 1834         memcpy(splitlock, lock1, sizeof *splitlock);
 1835         splitlock->lf_refs = 1;
 1836         if (splitlock->lf_flags & F_REMOTE)
 1837                 vref(splitlock->lf_vnode);
 1838 
 1839         /*
 1840          * This cannot cause a deadlock since any edges we would add
 1841          * to splitlock already exist in lock1. We must be sure to add
 1842          * necessary dependancies to splitlock before we reduce lock1
 1843          * otherwise we may accidentally grant a pending lock that
 1844          * was blocked by the tail end of lock1.
 1845          */
 1846         splitlock->lf_start = lock2->lf_end + 1;
 1847         LIST_INIT(&splitlock->lf_outedges);
 1848         LIST_INIT(&splitlock->lf_inedges);
 1849         sx_xlock(&lf_owner_graph_lock);
 1850         lf_add_incoming(state, splitlock);
 1851         sx_xunlock(&lf_owner_graph_lock);
 1852 
 1853         lf_set_end(state, lock1, lock2->lf_start - 1, granted);
 1854 
 1855         /*
 1856          * OK, now link it in
 1857          */
 1858         lf_insert_lock(state, splitlock);
 1859 }
 1860 
 1861 struct lockdesc {
 1862         STAILQ_ENTRY(lockdesc) link;
 1863         struct vnode *vp;
 1864         struct flock fl;
 1865 };
 1866 STAILQ_HEAD(lockdesclist, lockdesc);
 1867 
 1868 int
 1869 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
 1870 {
 1871         struct lockf *ls;
 1872         struct lockf_entry *lf;
 1873         struct lockdesc *ldesc;
 1874         struct lockdesclist locks;
 1875         int error;
 1876 
 1877         /*
 1878          * In order to keep the locking simple, we iterate over the
 1879          * active lock lists to build a list of locks that need
 1880          * releasing. We then call the iterator for each one in turn.
 1881          *
 1882          * We take an extra reference to the vnode for the duration to
 1883          * make sure it doesn't go away before we are finished.
 1884          */
 1885         STAILQ_INIT(&locks);
 1886         sx_xlock(&lf_lock_states_lock);
 1887         LIST_FOREACH(ls, &lf_lock_states, ls_link) {
 1888                 sx_xlock(&ls->ls_lock);
 1889                 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
 1890                         if (lf->lf_owner->lo_sysid != sysid)
 1891                                 continue;
 1892 
 1893                         ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
 1894                             M_WAITOK);
 1895                         ldesc->vp = lf->lf_vnode;
 1896                         vref(ldesc->vp);
 1897                         ldesc->fl.l_start = lf->lf_start;
 1898                         if (lf->lf_end == OFF_MAX)
 1899                                 ldesc->fl.l_len = 0;
 1900                         else
 1901                                 ldesc->fl.l_len =
 1902                                         lf->lf_end - lf->lf_start + 1;
 1903                         ldesc->fl.l_whence = SEEK_SET;
 1904                         ldesc->fl.l_type = F_UNLCK;
 1905                         ldesc->fl.l_pid = lf->lf_owner->lo_pid;
 1906                         ldesc->fl.l_sysid = sysid;
 1907                         STAILQ_INSERT_TAIL(&locks, ldesc, link);
 1908                 }
 1909                 sx_xunlock(&ls->ls_lock);
 1910         }
 1911         sx_xunlock(&lf_lock_states_lock);
 1912 
 1913         /*
 1914          * Call the iterator function for each lock in turn. If the
 1915          * iterator returns an error code, just free the rest of the
 1916          * lockdesc structures.
 1917          */
 1918         error = 0;
 1919         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
 1920                 STAILQ_REMOVE_HEAD(&locks, link);
 1921                 if (!error)
 1922                         error = fn(ldesc->vp, &ldesc->fl, arg);
 1923                 vrele(ldesc->vp);
 1924                 free(ldesc, M_LOCKF);
 1925         }
 1926 
 1927         return (error);
 1928 }
 1929 
 1930 int
 1931 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
 1932 {
 1933         struct lockf *ls;
 1934         struct lockf_entry *lf;
 1935         struct lockdesc *ldesc;
 1936         struct lockdesclist locks;
 1937         int error;
 1938 
 1939         /*
 1940          * In order to keep the locking simple, we iterate over the
 1941          * active lock lists to build a list of locks that need
 1942          * releasing. We then call the iterator for each one in turn.
 1943          *
 1944          * We take an extra reference to the vnode for the duration to
 1945          * make sure it doesn't go away before we are finished.
 1946          */
 1947         STAILQ_INIT(&locks);
 1948         VI_LOCK(vp);
 1949         ls = vp->v_lockf;
 1950         if (!ls) {
 1951                 VI_UNLOCK(vp);
 1952                 return (0);
 1953         }
 1954         ls->ls_threads++;
 1955         VI_UNLOCK(vp);
 1956 
 1957         sx_xlock(&ls->ls_lock);
 1958         LIST_FOREACH(lf, &ls->ls_active, lf_link) {
 1959                 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
 1960                     M_WAITOK);
 1961                 ldesc->vp = lf->lf_vnode;
 1962                 vref(ldesc->vp);
 1963                 ldesc->fl.l_start = lf->lf_start;
 1964                 if (lf->lf_end == OFF_MAX)
 1965                         ldesc->fl.l_len = 0;
 1966                 else
 1967                         ldesc->fl.l_len =
 1968                                 lf->lf_end - lf->lf_start + 1;
 1969                 ldesc->fl.l_whence = SEEK_SET;
 1970                 ldesc->fl.l_type = F_UNLCK;
 1971                 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
 1972                 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
 1973                 STAILQ_INSERT_TAIL(&locks, ldesc, link);
 1974         }
 1975         sx_xunlock(&ls->ls_lock);
 1976         VI_LOCK(vp);
 1977         ls->ls_threads--;
 1978         wakeup(ls);
 1979         VI_UNLOCK(vp);
 1980 
 1981         /*
 1982          * Call the iterator function for each lock in turn. If the
 1983          * iterator returns an error code, just free the rest of the
 1984          * lockdesc structures.
 1985          */
 1986         error = 0;
 1987         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
 1988                 STAILQ_REMOVE_HEAD(&locks, link);
 1989                 if (!error)
 1990                         error = fn(ldesc->vp, &ldesc->fl, arg);
 1991                 vrele(ldesc->vp);
 1992                 free(ldesc, M_LOCKF);
 1993         }
 1994 
 1995         return (error);
 1996 }
 1997 
 1998 static int
 1999 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
 2000 {
 2001 
 2002         VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
 2003         return (0);
 2004 }
 2005 
 2006 void
 2007 lf_clearremotesys(int sysid)
 2008 {
 2009 
 2010         KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
 2011         lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
 2012 }
 2013 
 2014 int
 2015 lf_countlocks(int sysid)
 2016 {
 2017         int i;
 2018         struct lock_owner *lo;
 2019         int count;
 2020 
 2021         count = 0;
 2022         sx_xlock(&lf_lock_owners_lock);
 2023         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
 2024                 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
 2025                         if (lo->lo_sysid == sysid)
 2026                                 count += lo->lo_refs;
 2027         sx_xunlock(&lf_lock_owners_lock);
 2028 
 2029         return (count);
 2030 }
 2031 
 2032 #ifdef LOCKF_DEBUG
 2033 
 2034 /*
 2035  * Return non-zero if y is reachable from x using a brute force
 2036  * search. If reachable and path is non-null, return the route taken
 2037  * in path.
 2038  */
 2039 static int
 2040 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
 2041     struct owner_vertex_list *path)
 2042 {
 2043         struct owner_edge *e;
 2044 
 2045         if (x == y) {
 2046                 if (path)
 2047                         TAILQ_INSERT_HEAD(path, x, v_link);
 2048                 return 1;
 2049         }
 2050 
 2051         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2052                 if (graph_reaches(e->e_to, y, path)) {
 2053                         if (path)
 2054                                 TAILQ_INSERT_HEAD(path, x, v_link);
 2055                         return 1;
 2056                 }
 2057         }
 2058         return 0;
 2059 }
 2060 
 2061 /*
 2062  * Perform consistency checks on the graph. Make sure the values of
 2063  * v_order are correct. If checkorder is non-zero, check no vertex can
 2064  * reach any other vertex with a smaller order.
 2065  */
 2066 static void
 2067 graph_check(struct owner_graph *g, int checkorder)
 2068 {
 2069         int i, j;
 2070 
 2071         for (i = 0; i < g->g_size; i++) {
 2072                 if (!g->g_vertices[i]->v_owner)
 2073                         continue;
 2074                 KASSERT(g->g_vertices[i]->v_order == i,
 2075                     ("lock graph vertices disordered"));
 2076                 if (checkorder) {
 2077                         for (j = 0; j < i; j++) {
 2078                                 if (!g->g_vertices[j]->v_owner)
 2079                                         continue;
 2080                                 KASSERT(!graph_reaches(g->g_vertices[i],
 2081                                         g->g_vertices[j], NULL),
 2082                                     ("lock graph vertices disordered"));
 2083                         }
 2084                 }
 2085         }
 2086 }
 2087 
 2088 static void
 2089 graph_print_vertices(struct owner_vertex_list *set)
 2090 {
 2091         struct owner_vertex *v;
 2092 
 2093         printf("{ ");
 2094         TAILQ_FOREACH(v, set, v_link) {
 2095                 printf("%d:", v->v_order);
 2096                 lf_print_owner(v->v_owner);
 2097                 if (TAILQ_NEXT(v, v_link))
 2098                         printf(", ");
 2099         }
 2100         printf(" }\n");
 2101 }
 2102 
 2103 #endif
 2104 
 2105 /*
 2106  * Calculate the sub-set of vertices v from the affected region [y..x]
 2107  * where v is reachable from y. Return -1 if a loop was detected
 2108  * (i.e. x is reachable from y, otherwise the number of vertices in
 2109  * this subset.
 2110  */
 2111 static int
 2112 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
 2113     struct owner_vertex *y, struct owner_vertex_list *delta)
 2114 {
 2115         uint32_t gen;
 2116         struct owner_vertex *v;
 2117         struct owner_edge *e;
 2118         int n;
 2119 
 2120         /*
 2121          * We start with a set containing just y. Then for each vertex
 2122          * v in the set so far unprocessed, we add each vertex that v
 2123          * has an out-edge to and that is within the affected region
 2124          * [y..x]. If we see the vertex x on our travels, stop
 2125          * immediately.
 2126          */
 2127         TAILQ_INIT(delta);
 2128         TAILQ_INSERT_TAIL(delta, y, v_link);
 2129         v = y;
 2130         n = 1;
 2131         gen = g->g_gen;
 2132         while (v) {
 2133                 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
 2134                         if (e->e_to == x)
 2135                                 return -1;
 2136                         if (e->e_to->v_order < x->v_order
 2137                             && e->e_to->v_gen != gen) {
 2138                                 e->e_to->v_gen = gen;
 2139                                 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
 2140                                 n++;
 2141                         }
 2142                 }
 2143                 v = TAILQ_NEXT(v, v_link);
 2144         }
 2145 
 2146         return (n);
 2147 }
 2148 
 2149 /*
 2150  * Calculate the sub-set of vertices v from the affected region [y..x]
 2151  * where v reaches x. Return the number of vertices in this subset.
 2152  */
 2153 static int
 2154 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
 2155     struct owner_vertex *y, struct owner_vertex_list *delta)
 2156 {
 2157         uint32_t gen;
 2158         struct owner_vertex *v;
 2159         struct owner_edge *e;
 2160         int n;
 2161 
 2162         /*
 2163          * We start with a set containing just x. Then for each vertex
 2164          * v in the set so far unprocessed, we add each vertex that v
 2165          * has an in-edge from and that is within the affected region
 2166          * [y..x].
 2167          */
 2168         TAILQ_INIT(delta);
 2169         TAILQ_INSERT_TAIL(delta, x, v_link);
 2170         v = x;
 2171         n = 1;
 2172         gen = g->g_gen;
 2173         while (v) {
 2174                 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
 2175                         if (e->e_from->v_order > y->v_order
 2176                             && e->e_from->v_gen != gen) {
 2177                                 e->e_from->v_gen = gen;
 2178                                 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
 2179                                 n++;
 2180                         }
 2181                 }
 2182                 v = TAILQ_PREV(v, owner_vertex_list, v_link);
 2183         }
 2184 
 2185         return (n);
 2186 }
 2187 
 2188 static int
 2189 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
 2190 {
 2191         struct owner_vertex *v;
 2192         int i, j;
 2193 
 2194         TAILQ_FOREACH(v, set, v_link) {
 2195                 for (i = n;
 2196                      i > 0 && indices[i - 1] > v->v_order; i--)
 2197                         ;
 2198                 for (j = n - 1; j >= i; j--)
 2199                         indices[j + 1] = indices[j];
 2200                 indices[i] = v->v_order;
 2201                 n++;
 2202         }
 2203 
 2204         return (n);
 2205 }
 2206 
 2207 static int
 2208 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
 2209     struct owner_vertex_list *set)
 2210 {
 2211         struct owner_vertex *v, *vlowest;
 2212 
 2213         while (!TAILQ_EMPTY(set)) {
 2214                 vlowest = NULL;
 2215                 TAILQ_FOREACH(v, set, v_link) {
 2216                         if (!vlowest || v->v_order < vlowest->v_order)
 2217                                 vlowest = v;
 2218                 }
 2219                 TAILQ_REMOVE(set, vlowest, v_link);
 2220                 vlowest->v_order = indices[nextunused];
 2221                 g->g_vertices[vlowest->v_order] = vlowest;
 2222                 nextunused++;
 2223         }
 2224 
 2225         return (nextunused);
 2226 }
 2227 
 2228 static int
 2229 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
 2230     struct owner_vertex *y)
 2231 {
 2232         struct owner_edge *e;
 2233         struct owner_vertex_list deltaF, deltaB;
 2234         int nF, nB, n, vi, i;
 2235         int *indices;
 2236 
 2237         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2238 
 2239         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2240                 if (e->e_to == y) {
 2241                         e->e_refs++;
 2242                         return (0);
 2243                 }
 2244         }
 2245 
 2246 #ifdef LOCKF_DEBUG
 2247         if (lockf_debug & 8) {
 2248                 printf("adding edge %d:", x->v_order);
 2249                 lf_print_owner(x->v_owner);
 2250                 printf(" -> %d:", y->v_order);
 2251                 lf_print_owner(y->v_owner);
 2252                 printf("\n");
 2253         }
 2254 #endif
 2255         if (y->v_order < x->v_order) {
 2256                 /*
 2257                  * The new edge violates the order. First find the set
 2258                  * of affected vertices reachable from y (deltaF) and
 2259                  * the set of affect vertices affected that reach x
 2260                  * (deltaB), using the graph generation number to
 2261                  * detect whether we have visited a given vertex
 2262                  * already. We re-order the graph so that each vertex
 2263                  * in deltaB appears before each vertex in deltaF.
 2264                  *
 2265                  * If x is a member of deltaF, then the new edge would
 2266                  * create a cycle. Otherwise, we may assume that
 2267                  * deltaF and deltaB are disjoint.
 2268                  */
 2269                 g->g_gen++;
 2270                 if (g->g_gen == 0) {
 2271                         /*
 2272                          * Generation wrap.
 2273                          */
 2274                         for (vi = 0; vi < g->g_size; vi++) {
 2275                                 g->g_vertices[vi]->v_gen = 0;
 2276                         }
 2277                         g->g_gen++;
 2278                 }
 2279                 nF = graph_delta_forward(g, x, y, &deltaF);
 2280                 if (nF < 0) {
 2281 #ifdef LOCKF_DEBUG
 2282                         if (lockf_debug & 8) {
 2283                                 struct owner_vertex_list path;
 2284                                 printf("deadlock: ");
 2285                                 TAILQ_INIT(&path);
 2286                                 graph_reaches(y, x, &path);
 2287                                 graph_print_vertices(&path);
 2288                         }
 2289 #endif
 2290                         return (EDEADLK);
 2291                 }
 2292 
 2293 #ifdef LOCKF_DEBUG
 2294                 if (lockf_debug & 8) {
 2295                         printf("re-ordering graph vertices\n");
 2296                         printf("deltaF = ");
 2297                         graph_print_vertices(&deltaF);
 2298                 }
 2299 #endif
 2300 
 2301                 nB = graph_delta_backward(g, x, y, &deltaB);
 2302 
 2303 #ifdef LOCKF_DEBUG
 2304                 if (lockf_debug & 8) {
 2305                         printf("deltaB = ");
 2306                         graph_print_vertices(&deltaB);
 2307                 }
 2308 #endif
 2309 
 2310                 /*
 2311                  * We first build a set of vertex indices (vertex
 2312                  * order values) that we may use, then we re-assign
 2313                  * orders first to those vertices in deltaB, then to
 2314                  * deltaF. Note that the contents of deltaF and deltaB
 2315                  * may be partially disordered - we perform an
 2316                  * insertion sort while building our index set.
 2317                  */
 2318                 indices = g->g_indexbuf;
 2319                 n = graph_add_indices(indices, 0, &deltaF);
 2320                 graph_add_indices(indices, n, &deltaB);
 2321 
 2322                 /*
 2323                  * We must also be sure to maintain the relative
 2324                  * ordering of deltaF and deltaB when re-assigning
 2325                  * vertices. We do this by iteratively removing the
 2326                  * lowest ordered element from the set and assigning
 2327                  * it the next value from our new ordering.
 2328                  */
 2329                 i = graph_assign_indices(g, indices, 0, &deltaB);
 2330                 graph_assign_indices(g, indices, i, &deltaF);
 2331 
 2332 #ifdef LOCKF_DEBUG
 2333                 if (lockf_debug & 8) {
 2334                         struct owner_vertex_list set;
 2335                         TAILQ_INIT(&set);
 2336                         for (i = 0; i < nB + nF; i++)
 2337                                 TAILQ_INSERT_TAIL(&set,
 2338                                     g->g_vertices[indices[i]], v_link);
 2339                         printf("new ordering = ");
 2340                         graph_print_vertices(&set);
 2341                 }
 2342 #endif
 2343         }
 2344 
 2345         KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
 2346 
 2347 #ifdef LOCKF_DEBUG
 2348         if (lockf_debug & 8) {
 2349                 graph_check(g, TRUE);
 2350         }
 2351 #endif
 2352 
 2353         e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
 2354 
 2355         LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
 2356         LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
 2357         e->e_refs = 1;
 2358         e->e_from = x;
 2359         e->e_to = y;
 2360 
 2361         return (0);
 2362 }
 2363 
 2364 /*
 2365  * Remove an edge x->y from the graph.
 2366  */
 2367 static void
 2368 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
 2369     struct owner_vertex *y)
 2370 {
 2371         struct owner_edge *e;
 2372 
 2373         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2374 
 2375         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
 2376                 if (e->e_to == y)
 2377                         break;
 2378         }
 2379         KASSERT(e, ("Removing non-existent edge from deadlock graph"));
 2380 
 2381         e->e_refs--;
 2382         if (e->e_refs == 0) {
 2383 #ifdef LOCKF_DEBUG
 2384                 if (lockf_debug & 8) {
 2385                         printf("removing edge %d:", x->v_order);
 2386                         lf_print_owner(x->v_owner);
 2387                         printf(" -> %d:", y->v_order);
 2388                         lf_print_owner(y->v_owner);
 2389                         printf("\n");
 2390                 }
 2391 #endif
 2392                 LIST_REMOVE(e, e_outlink);
 2393                 LIST_REMOVE(e, e_inlink);
 2394                 free(e, M_LOCKF);
 2395         }
 2396 }
 2397 
 2398 /*
 2399  * Allocate a vertex from the free list. Return ENOMEM if there are
 2400  * none.
 2401  */
 2402 static struct owner_vertex *
 2403 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
 2404 {
 2405         struct owner_vertex *v;
 2406 
 2407         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2408 
 2409         v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
 2410         if (g->g_size == g->g_space) {
 2411                 g->g_vertices = realloc(g->g_vertices,
 2412                     2 * g->g_space * sizeof(struct owner_vertex *),
 2413                     M_LOCKF, M_WAITOK);
 2414                 free(g->g_indexbuf, M_LOCKF);
 2415                 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
 2416                     M_LOCKF, M_WAITOK);
 2417                 g->g_space = 2 * g->g_space;
 2418         }
 2419         v->v_order = g->g_size;
 2420         v->v_gen = g->g_gen;
 2421         g->g_vertices[g->g_size] = v;
 2422         g->g_size++;
 2423 
 2424         LIST_INIT(&v->v_outedges);
 2425         LIST_INIT(&v->v_inedges);
 2426         v->v_owner = lo;
 2427 
 2428         return (v);
 2429 }
 2430 
 2431 static void
 2432 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
 2433 {
 2434         struct owner_vertex *w;
 2435         int i;
 2436 
 2437         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
 2438         
 2439         KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
 2440         KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
 2441 
 2442         /*
 2443          * Remove from the graph's array and close up the gap,
 2444          * renumbering the other vertices.
 2445          */
 2446         for (i = v->v_order + 1; i < g->g_size; i++) {
 2447                 w = g->g_vertices[i];
 2448                 w->v_order--;
 2449                 g->g_vertices[i - 1] = w;
 2450         }
 2451         g->g_size--;
 2452 
 2453         free(v, M_LOCKF);
 2454 }
 2455 
 2456 static struct owner_graph *
 2457 graph_init(struct owner_graph *g)
 2458 {
 2459 
 2460         g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
 2461             M_LOCKF, M_WAITOK);
 2462         g->g_size = 0;
 2463         g->g_space = 10;
 2464         g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
 2465         g->g_gen = 0;
 2466 
 2467         return (g);
 2468 }
 2469 
 2470 #ifdef LOCKF_DEBUG
 2471 /*
 2472  * Print description of a lock owner
 2473  */
 2474 static void
 2475 lf_print_owner(struct lock_owner *lo)
 2476 {
 2477 
 2478         if (lo->lo_flags & F_REMOTE) {
 2479                 printf("remote pid %d, system %d",
 2480                     lo->lo_pid, lo->lo_sysid);
 2481         } else if (lo->lo_flags & F_FLOCK) {
 2482                 printf("file %p", lo->lo_id);
 2483         } else {
 2484                 printf("local pid %d", lo->lo_pid);
 2485         }
 2486 }
 2487 
 2488 /*
 2489  * Print out a lock.
 2490  */
 2491 static void
 2492 lf_print(char *tag, struct lockf_entry *lock)
 2493 {
 2494 
 2495         printf("%s: lock %p for ", tag, (void *)lock);
 2496         lf_print_owner(lock->lf_owner);
 2497         if (lock->lf_inode != (struct inode *)0)
 2498                 printf(" in ino %ju on dev <%s>,",
 2499                     (uintmax_t)lock->lf_inode->i_number,
 2500                     devtoname(lock->lf_inode->i_dev));
 2501         printf(" %s, start %jd, end ",
 2502             lock->lf_type == F_RDLCK ? "shared" :
 2503             lock->lf_type == F_WRLCK ? "exclusive" :
 2504             lock->lf_type == F_UNLCK ? "unlock" : "unknown",
 2505             (intmax_t)lock->lf_start);
 2506         if (lock->lf_end == OFF_MAX)
 2507                 printf("EOF");
 2508         else
 2509                 printf("%jd", (intmax_t)lock->lf_end);
 2510         if (!LIST_EMPTY(&lock->lf_outedges))
 2511                 printf(" block %p\n",
 2512                     (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
 2513         else
 2514                 printf("\n");
 2515 }
 2516 
 2517 static void
 2518 lf_printlist(char *tag, struct lockf_entry *lock)
 2519 {
 2520         struct lockf_entry *lf, *blk;
 2521         struct lockf_edge *e;
 2522 
 2523         if (lock->lf_inode == (struct inode *)0)
 2524                 return;
 2525 
 2526         printf("%s: Lock list for ino %ju on dev <%s>:\n",
 2527             tag, (uintmax_t)lock->lf_inode->i_number,
 2528             devtoname(lock->lf_inode->i_dev));
 2529         LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
 2530                 printf("\tlock %p for ",(void *)lf);
 2531                 lf_print_owner(lock->lf_owner);
 2532                 printf(", %s, start %jd, end %jd",
 2533                     lf->lf_type == F_RDLCK ? "shared" :
 2534                     lf->lf_type == F_WRLCK ? "exclusive" :
 2535                     lf->lf_type == F_UNLCK ? "unlock" :
 2536                     "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
 2537                 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
 2538                         blk = e->le_to;
 2539                         printf("\n\t\tlock request %p for ", (void *)blk);
 2540                         lf_print_owner(blk->lf_owner);
 2541                         printf(", %s, start %jd, end %jd",
 2542                             blk->lf_type == F_RDLCK ? "shared" :
 2543                             blk->lf_type == F_WRLCK ? "exclusive" :
 2544                             blk->lf_type == F_UNLCK ? "unlock" :
 2545                             "unknown", (intmax_t)blk->lf_start,
 2546                             (intmax_t)blk->lf_end);
 2547                         if (!LIST_EMPTY(&blk->lf_inedges))
 2548                                 panic("lf_printlist: bad list");
 2549                 }
 2550                 printf("\n");
 2551         }
 2552 }
 2553 #endif /* LOCKF_DEBUG */

Cache object: c514e86eebf63df703f690167b008f62


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.