The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_descrip.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      @(#)kern_descrip.c      8.6 (Berkeley) 4/19/94
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include "opt_capsicum.h"
   43 #include "opt_ddb.h"
   44 #include "opt_ktrace.h"
   45 
   46 #include <sys/param.h>
   47 #include <sys/systm.h>
   48 
   49 #include <sys/capsicum.h>
   50 #include <sys/conf.h>
   51 #include <sys/fcntl.h>
   52 #include <sys/file.h>
   53 #include <sys/filedesc.h>
   54 #include <sys/filio.h>
   55 #include <sys/jail.h>
   56 #include <sys/kernel.h>
   57 #include <sys/limits.h>
   58 #include <sys/lock.h>
   59 #include <sys/malloc.h>
   60 #include <sys/mount.h>
   61 #include <sys/mutex.h>
   62 #include <sys/namei.h>
   63 #include <sys/selinfo.h>
   64 #include <sys/poll.h>
   65 #include <sys/priv.h>
   66 #include <sys/proc.h>
   67 #include <sys/protosw.h>
   68 #include <sys/racct.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/sbuf.h>
   71 #include <sys/signalvar.h>
   72 #include <sys/kdb.h>
   73 #include <sys/smr.h>
   74 #include <sys/stat.h>
   75 #include <sys/sx.h>
   76 #include <sys/syscallsubr.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/sysproto.h>
   79 #include <sys/unistd.h>
   80 #include <sys/user.h>
   81 #include <sys/vnode.h>
   82 #include <sys/ktrace.h>
   83 
   84 #include <net/vnet.h>
   85 
   86 #include <security/audit/audit.h>
   87 
   88 #include <vm/uma.h>
   89 #include <vm/vm.h>
   90 
   91 #include <ddb/ddb.h>
   92 
   93 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
   94 static MALLOC_DEFINE(M_PWD, "pwd", "Descriptor table vnodes");
   95 static MALLOC_DEFINE(M_PWDDESC, "pwddesc", "Pwd descriptors");
   96 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
   97     "file desc to leader structures");
   98 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
   99 MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities");
  100 
  101 MALLOC_DECLARE(M_FADVISE);
  102 
  103 static __read_mostly uma_zone_t file_zone;
  104 static __read_mostly uma_zone_t filedesc0_zone;
  105 __read_mostly uma_zone_t pwd_zone;
  106 VFS_SMR_DECLARE;
  107 
  108 static int      closefp(struct filedesc *fdp, int fd, struct file *fp,
  109                     struct thread *td, bool holdleaders, bool audit);
  110 static void     export_file_to_kinfo(struct file *fp, int fd,
  111                     cap_rights_t *rightsp, struct kinfo_file *kif,
  112                     struct filedesc *fdp, int flags);
  113 static int      fd_first_free(struct filedesc *fdp, int low, int size);
  114 static void     fdgrowtable(struct filedesc *fdp, int nfd);
  115 static void     fdgrowtable_exp(struct filedesc *fdp, int nfd);
  116 static void     fdunused(struct filedesc *fdp, int fd);
  117 static void     fdused(struct filedesc *fdp, int fd);
  118 static int      getmaxfd(struct thread *td);
  119 static u_long   *filecaps_copy_prep(const struct filecaps *src);
  120 static void     filecaps_copy_finish(const struct filecaps *src,
  121                     struct filecaps *dst, u_long *ioctls);
  122 static u_long   *filecaps_free_prep(struct filecaps *fcaps);
  123 static void     filecaps_free_finish(u_long *ioctls);
  124 
  125 static struct pwd *pwd_alloc(void);
  126 
  127 /*
  128  * Each process has:
  129  *
  130  * - An array of open file descriptors (fd_ofiles)
  131  * - An array of file flags (fd_ofileflags)
  132  * - A bitmap recording which descriptors are in use (fd_map)
  133  *
  134  * A process starts out with NDFILE descriptors.  The value of NDFILE has
  135  * been selected based the historical limit of 20 open files, and an
  136  * assumption that the majority of processes, especially short-lived
  137  * processes like shells, will never need more.
  138  *
  139  * If this initial allocation is exhausted, a larger descriptor table and
  140  * map are allocated dynamically, and the pointers in the process's struct
  141  * filedesc are updated to point to those.  This is repeated every time
  142  * the process runs out of file descriptors (provided it hasn't hit its
  143  * resource limit).
  144  *
  145  * Since threads may hold references to individual descriptor table
  146  * entries, the tables are never freed.  Instead, they are placed on a
  147  * linked list and freed only when the struct filedesc is released.
  148  */
  149 #define NDFILE          20
  150 #define NDSLOTSIZE      sizeof(NDSLOTTYPE)
  151 #define NDENTRIES       (NDSLOTSIZE * __CHAR_BIT)
  152 #define NDSLOT(x)       ((x) / NDENTRIES)
  153 #define NDBIT(x)        ((NDSLOTTYPE)1 << ((x) % NDENTRIES))
  154 #define NDSLOTS(x)      (((x) + NDENTRIES - 1) / NDENTRIES)
  155 
  156 /*
  157  * SLIST entry used to keep track of ofiles which must be reclaimed when
  158  * the process exits.
  159  */
  160 struct freetable {
  161         struct fdescenttbl *ft_table;
  162         SLIST_ENTRY(freetable) ft_next;
  163 };
  164 
  165 /*
  166  * Initial allocation: a filedesc structure + the head of SLIST used to
  167  * keep track of old ofiles + enough space for NDFILE descriptors.
  168  */
  169 
  170 struct fdescenttbl0 {
  171         int     fdt_nfiles;
  172         struct  filedescent fdt_ofiles[NDFILE];
  173 };
  174 
  175 struct filedesc0 {
  176         struct filedesc fd_fd;
  177         SLIST_HEAD(, freetable) fd_free;
  178         struct  fdescenttbl0 fd_dfiles;
  179         NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
  180 };
  181 
  182 /*
  183  * Descriptor management.
  184  */
  185 static int __exclusive_cache_line openfiles; /* actual number of open files */
  186 struct mtx sigio_lock;          /* mtx to protect pointers to sigio */
  187 void __read_mostly (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
  188 
  189 /*
  190  * If low >= size, just return low. Otherwise find the first zero bit in the
  191  * given bitmap, starting at low and not exceeding size - 1. Return size if
  192  * not found.
  193  */
  194 static int
  195 fd_first_free(struct filedesc *fdp, int low, int size)
  196 {
  197         NDSLOTTYPE *map = fdp->fd_map;
  198         NDSLOTTYPE mask;
  199         int off, maxoff;
  200 
  201         if (low >= size)
  202                 return (low);
  203 
  204         off = NDSLOT(low);
  205         if (low % NDENTRIES) {
  206                 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
  207                 if ((mask &= ~map[off]) != 0UL)
  208                         return (off * NDENTRIES + ffsl(mask) - 1);
  209                 ++off;
  210         }
  211         for (maxoff = NDSLOTS(size); off < maxoff; ++off)
  212                 if (map[off] != ~0UL)
  213                         return (off * NDENTRIES + ffsl(~map[off]) - 1);
  214         return (size);
  215 }
  216 
  217 /*
  218  * Find the last used fd.
  219  *
  220  * Call this variant if fdp can't be modified by anyone else (e.g, during exec).
  221  * Otherwise use fdlastfile.
  222  */
  223 int
  224 fdlastfile_single(struct filedesc *fdp)
  225 {
  226         NDSLOTTYPE *map = fdp->fd_map;
  227         int off, minoff;
  228 
  229         off = NDSLOT(fdp->fd_nfiles - 1);
  230         for (minoff = NDSLOT(0); off >= minoff; --off)
  231                 if (map[off] != 0)
  232                         return (off * NDENTRIES + flsl(map[off]) - 1);
  233         return (-1);
  234 }
  235 
  236 int
  237 fdlastfile(struct filedesc *fdp)
  238 {
  239 
  240         FILEDESC_LOCK_ASSERT(fdp);
  241         return (fdlastfile_single(fdp));
  242 }
  243 
  244 static int
  245 fdisused(struct filedesc *fdp, int fd)
  246 {
  247 
  248         KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
  249             ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
  250 
  251         return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
  252 }
  253 
  254 /*
  255  * Mark a file descriptor as used.
  256  */
  257 static void
  258 fdused_init(struct filedesc *fdp, int fd)
  259 {
  260 
  261         KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
  262 
  263         fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
  264 }
  265 
  266 static void
  267 fdused(struct filedesc *fdp, int fd)
  268 {
  269 
  270         FILEDESC_XLOCK_ASSERT(fdp);
  271 
  272         fdused_init(fdp, fd);
  273         if (fd == fdp->fd_freefile)
  274                 fdp->fd_freefile++;
  275 }
  276 
  277 /*
  278  * Mark a file descriptor as unused.
  279  */
  280 static void
  281 fdunused(struct filedesc *fdp, int fd)
  282 {
  283 
  284         FILEDESC_XLOCK_ASSERT(fdp);
  285 
  286         KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
  287         KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
  288             ("fd=%d is still in use", fd));
  289 
  290         fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
  291         if (fd < fdp->fd_freefile)
  292                 fdp->fd_freefile = fd;
  293 }
  294 
  295 /*
  296  * Free a file descriptor.
  297  *
  298  * Avoid some work if fdp is about to be destroyed.
  299  */
  300 static inline void
  301 fdefree_last(struct filedescent *fde)
  302 {
  303 
  304         filecaps_free(&fde->fde_caps);
  305 }
  306 
  307 static inline void
  308 fdfree(struct filedesc *fdp, int fd)
  309 {
  310         struct filedescent *fde;
  311 
  312         FILEDESC_XLOCK_ASSERT(fdp);
  313         fde = &fdp->fd_ofiles[fd];
  314 #ifdef CAPABILITIES
  315         seqc_write_begin(&fde->fde_seqc);
  316 #endif
  317         fde->fde_file = NULL;
  318 #ifdef CAPABILITIES
  319         seqc_write_end(&fde->fde_seqc);
  320 #endif
  321         fdefree_last(fde);
  322         fdunused(fdp, fd);
  323 }
  324 
  325 /*
  326  * System calls on descriptors.
  327  */
  328 #ifndef _SYS_SYSPROTO_H_
  329 struct getdtablesize_args {
  330         int     dummy;
  331 };
  332 #endif
  333 /* ARGSUSED */
  334 int
  335 sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
  336 {
  337 #ifdef  RACCT
  338         uint64_t lim;
  339 #endif
  340 
  341         td->td_retval[0] = getmaxfd(td);
  342 #ifdef  RACCT
  343         PROC_LOCK(td->td_proc);
  344         lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
  345         PROC_UNLOCK(td->td_proc);
  346         if (lim < td->td_retval[0])
  347                 td->td_retval[0] = lim;
  348 #endif
  349         return (0);
  350 }
  351 
  352 /*
  353  * Duplicate a file descriptor to a particular value.
  354  *
  355  * Note: keep in mind that a potential race condition exists when closing
  356  * descriptors from a shared descriptor table (via rfork).
  357  */
  358 #ifndef _SYS_SYSPROTO_H_
  359 struct dup2_args {
  360         u_int   from;
  361         u_int   to;
  362 };
  363 #endif
  364 /* ARGSUSED */
  365 int
  366 sys_dup2(struct thread *td, struct dup2_args *uap)
  367 {
  368 
  369         return (kern_dup(td, FDDUP_FIXED, 0, (int)uap->from, (int)uap->to));
  370 }
  371 
  372 /*
  373  * Duplicate a file descriptor.
  374  */
  375 #ifndef _SYS_SYSPROTO_H_
  376 struct dup_args {
  377         u_int   fd;
  378 };
  379 #endif
  380 /* ARGSUSED */
  381 int
  382 sys_dup(struct thread *td, struct dup_args *uap)
  383 {
  384 
  385         return (kern_dup(td, FDDUP_NORMAL, 0, (int)uap->fd, 0));
  386 }
  387 
  388 /*
  389  * The file control system call.
  390  */
  391 #ifndef _SYS_SYSPROTO_H_
  392 struct fcntl_args {
  393         int     fd;
  394         int     cmd;
  395         long    arg;
  396 };
  397 #endif
  398 /* ARGSUSED */
  399 int
  400 sys_fcntl(struct thread *td, struct fcntl_args *uap)
  401 {
  402 
  403         return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, uap->arg));
  404 }
  405 
  406 int
  407 kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg)
  408 {
  409         struct flock fl;
  410         struct __oflock ofl;
  411         intptr_t arg1;
  412         int error, newcmd;
  413 
  414         error = 0;
  415         newcmd = cmd;
  416         switch (cmd) {
  417         case F_OGETLK:
  418         case F_OSETLK:
  419         case F_OSETLKW:
  420                 /*
  421                  * Convert old flock structure to new.
  422                  */
  423                 error = copyin((void *)(intptr_t)arg, &ofl, sizeof(ofl));
  424                 fl.l_start = ofl.l_start;
  425                 fl.l_len = ofl.l_len;
  426                 fl.l_pid = ofl.l_pid;
  427                 fl.l_type = ofl.l_type;
  428                 fl.l_whence = ofl.l_whence;
  429                 fl.l_sysid = 0;
  430 
  431                 switch (cmd) {
  432                 case F_OGETLK:
  433                         newcmd = F_GETLK;
  434                         break;
  435                 case F_OSETLK:
  436                         newcmd = F_SETLK;
  437                         break;
  438                 case F_OSETLKW:
  439                         newcmd = F_SETLKW;
  440                         break;
  441                 }
  442                 arg1 = (intptr_t)&fl;
  443                 break;
  444         case F_GETLK:
  445         case F_SETLK:
  446         case F_SETLKW:
  447         case F_SETLK_REMOTE:
  448                 error = copyin((void *)(intptr_t)arg, &fl, sizeof(fl));
  449                 arg1 = (intptr_t)&fl;
  450                 break;
  451         default:
  452                 arg1 = arg;
  453                 break;
  454         }
  455         if (error)
  456                 return (error);
  457         error = kern_fcntl(td, fd, newcmd, arg1);
  458         if (error)
  459                 return (error);
  460         if (cmd == F_OGETLK) {
  461                 ofl.l_start = fl.l_start;
  462                 ofl.l_len = fl.l_len;
  463                 ofl.l_pid = fl.l_pid;
  464                 ofl.l_type = fl.l_type;
  465                 ofl.l_whence = fl.l_whence;
  466                 error = copyout(&ofl, (void *)(intptr_t)arg, sizeof(ofl));
  467         } else if (cmd == F_GETLK) {
  468                 error = copyout(&fl, (void *)(intptr_t)arg, sizeof(fl));
  469         }
  470         return (error);
  471 }
  472 
  473 int
  474 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
  475 {
  476         struct filedesc *fdp;
  477         struct flock *flp;
  478         struct file *fp, *fp2;
  479         struct filedescent *fde;
  480         struct proc *p;
  481         struct vnode *vp;
  482         struct mount *mp;
  483         struct kinfo_file *kif;
  484         int error, flg, kif_sz, seals, tmp;
  485         uint64_t bsize;
  486         off_t foffset;
  487 
  488         error = 0;
  489         flg = F_POSIX;
  490         p = td->td_proc;
  491         fdp = p->p_fd;
  492 
  493         AUDIT_ARG_FD(cmd);
  494         AUDIT_ARG_CMD(cmd);
  495         switch (cmd) {
  496         case F_DUPFD:
  497                 tmp = arg;
  498                 error = kern_dup(td, FDDUP_FCNTL, 0, fd, tmp);
  499                 break;
  500 
  501         case F_DUPFD_CLOEXEC:
  502                 tmp = arg;
  503                 error = kern_dup(td, FDDUP_FCNTL, FDDUP_FLAG_CLOEXEC, fd, tmp);
  504                 break;
  505 
  506         case F_DUP2FD:
  507                 tmp = arg;
  508                 error = kern_dup(td, FDDUP_FIXED, 0, fd, tmp);
  509                 break;
  510 
  511         case F_DUP2FD_CLOEXEC:
  512                 tmp = arg;
  513                 error = kern_dup(td, FDDUP_FIXED, FDDUP_FLAG_CLOEXEC, fd, tmp);
  514                 break;
  515 
  516         case F_GETFD:
  517                 error = EBADF;
  518                 FILEDESC_SLOCK(fdp);
  519                 fde = fdeget_locked(fdp, fd);
  520                 if (fde != NULL) {
  521                         td->td_retval[0] =
  522                             (fde->fde_flags & UF_EXCLOSE) ? FD_CLOEXEC : 0;
  523                         error = 0;
  524                 }
  525                 FILEDESC_SUNLOCK(fdp);
  526                 break;
  527 
  528         case F_SETFD:
  529                 error = EBADF;
  530                 FILEDESC_XLOCK(fdp);
  531                 fde = fdeget_locked(fdp, fd);
  532                 if (fde != NULL) {
  533                         fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE) |
  534                             (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
  535                         error = 0;
  536                 }
  537                 FILEDESC_XUNLOCK(fdp);
  538                 break;
  539 
  540         case F_GETFL:
  541                 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETFL, &fp);
  542                 if (error != 0)
  543                         break;
  544                 td->td_retval[0] = OFLAGS(fp->f_flag);
  545                 fdrop(fp, td);
  546                 break;
  547 
  548         case F_SETFL:
  549                 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETFL, &fp);
  550                 if (error != 0)
  551                         break;
  552                 if (fp->f_ops == &path_fileops) {
  553                         fdrop(fp, td);
  554                         error = EBADF;
  555                         break;
  556                 }
  557                 do {
  558                         tmp = flg = fp->f_flag;
  559                         tmp &= ~FCNTLFLAGS;
  560                         tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
  561                 } while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
  562                 tmp = fp->f_flag & FNONBLOCK;
  563                 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
  564                 if (error != 0) {
  565                         fdrop(fp, td);
  566                         break;
  567                 }
  568                 tmp = fp->f_flag & FASYNC;
  569                 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
  570                 if (error == 0) {
  571                         fdrop(fp, td);
  572                         break;
  573                 }
  574                 atomic_clear_int(&fp->f_flag, FNONBLOCK);
  575                 tmp = 0;
  576                 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
  577                 fdrop(fp, td);
  578                 break;
  579 
  580         case F_GETOWN:
  581                 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETOWN, &fp);
  582                 if (error != 0)
  583                         break;
  584                 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
  585                 if (error == 0)
  586                         td->td_retval[0] = tmp;
  587                 fdrop(fp, td);
  588                 break;
  589 
  590         case F_SETOWN:
  591                 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETOWN, &fp);
  592                 if (error != 0)
  593                         break;
  594                 tmp = arg;
  595                 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
  596                 fdrop(fp, td);
  597                 break;
  598 
  599         case F_SETLK_REMOTE:
  600                 error = priv_check(td, PRIV_NFS_LOCKD);
  601                 if (error != 0)
  602                         return (error);
  603                 flg = F_REMOTE;
  604                 goto do_setlk;
  605 
  606         case F_SETLKW:
  607                 flg |= F_WAIT;
  608                 /* FALLTHROUGH F_SETLK */
  609 
  610         case F_SETLK:
  611         do_setlk:
  612                 flp = (struct flock *)arg;
  613                 if ((flg & F_REMOTE) != 0 && flp->l_sysid == 0) {
  614                         error = EINVAL;
  615                         break;
  616                 }
  617 
  618                 error = fget_unlocked(fdp, fd, &cap_flock_rights, &fp);
  619                 if (error != 0)
  620                         break;
  621                 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
  622                         error = EBADF;
  623                         fdrop(fp, td);
  624                         break;
  625                 }
  626 
  627                 if (flp->l_whence == SEEK_CUR) {
  628                         foffset = foffset_get(fp);
  629                         if (foffset < 0 ||
  630                             (flp->l_start > 0 &&
  631                              foffset > OFF_MAX - flp->l_start)) {
  632                                 error = EOVERFLOW;
  633                                 fdrop(fp, td);
  634                                 break;
  635                         }
  636                         flp->l_start += foffset;
  637                 }
  638 
  639                 vp = fp->f_vnode;
  640                 switch (flp->l_type) {
  641                 case F_RDLCK:
  642                         if ((fp->f_flag & FREAD) == 0) {
  643                                 error = EBADF;
  644                                 break;
  645                         }
  646                         if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
  647                                 PROC_LOCK(p->p_leader);
  648                                 p->p_leader->p_flag |= P_ADVLOCK;
  649                                 PROC_UNLOCK(p->p_leader);
  650                         }
  651                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  652                             flp, flg);
  653                         break;
  654                 case F_WRLCK:
  655                         if ((fp->f_flag & FWRITE) == 0) {
  656                                 error = EBADF;
  657                                 break;
  658                         }
  659                         if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
  660                                 PROC_LOCK(p->p_leader);
  661                                 p->p_leader->p_flag |= P_ADVLOCK;
  662                                 PROC_UNLOCK(p->p_leader);
  663                         }
  664                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  665                             flp, flg);
  666                         break;
  667                 case F_UNLCK:
  668                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
  669                             flp, flg);
  670                         break;
  671                 case F_UNLCKSYS:
  672                         if (flg != F_REMOTE) {
  673                                 error = EINVAL;
  674                                 break;
  675                         }
  676                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
  677                             F_UNLCKSYS, flp, flg);
  678                         break;
  679                 default:
  680                         error = EINVAL;
  681                         break;
  682                 }
  683                 if (error != 0 || flp->l_type == F_UNLCK ||
  684                     flp->l_type == F_UNLCKSYS) {
  685                         fdrop(fp, td);
  686                         break;
  687                 }
  688 
  689                 /*
  690                  * Check for a race with close.
  691                  *
  692                  * The vnode is now advisory locked (or unlocked, but this case
  693                  * is not really important) as the caller requested.
  694                  * We had to drop the filedesc lock, so we need to recheck if
  695                  * the descriptor is still valid, because if it was closed
  696                  * in the meantime we need to remove advisory lock from the
  697                  * vnode - close on any descriptor leading to an advisory
  698                  * locked vnode, removes that lock.
  699                  * We will return 0 on purpose in that case, as the result of
  700                  * successful advisory lock might have been externally visible
  701                  * already. This is fine - effectively we pretend to the caller
  702                  * that the closing thread was a bit slower and that the
  703                  * advisory lock succeeded before the close.
  704                  */
  705                 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp2);
  706                 if (error != 0) {
  707                         fdrop(fp, td);
  708                         break;
  709                 }
  710                 if (fp != fp2) {
  711                         flp->l_whence = SEEK_SET;
  712                         flp->l_start = 0;
  713                         flp->l_len = 0;
  714                         flp->l_type = F_UNLCK;
  715                         (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
  716                             F_UNLCK, flp, F_POSIX);
  717                 }
  718                 fdrop(fp, td);
  719                 fdrop(fp2, td);
  720                 break;
  721 
  722         case F_GETLK:
  723                 error = fget_unlocked(fdp, fd, &cap_flock_rights, &fp);
  724                 if (error != 0)
  725                         break;
  726                 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
  727                         error = EBADF;
  728                         fdrop(fp, td);
  729                         break;
  730                 }
  731                 flp = (struct flock *)arg;
  732                 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
  733                     flp->l_type != F_UNLCK) {
  734                         error = EINVAL;
  735                         fdrop(fp, td);
  736                         break;
  737                 }
  738                 if (flp->l_whence == SEEK_CUR) {
  739                         foffset = foffset_get(fp);
  740                         if ((flp->l_start > 0 &&
  741                             foffset > OFF_MAX - flp->l_start) ||
  742                             (flp->l_start < 0 &&
  743                             foffset < OFF_MIN - flp->l_start)) {
  744                                 error = EOVERFLOW;
  745                                 fdrop(fp, td);
  746                                 break;
  747                         }
  748                         flp->l_start += foffset;
  749                 }
  750                 vp = fp->f_vnode;
  751                 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
  752                     F_POSIX);
  753                 fdrop(fp, td);
  754                 break;
  755 
  756         case F_ADD_SEALS:
  757                 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
  758                 if (error != 0)
  759                         break;
  760                 error = fo_add_seals(fp, arg);
  761                 fdrop(fp, td);
  762                 break;
  763 
  764         case F_GET_SEALS:
  765                 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
  766                 if (error != 0)
  767                         break;
  768                 if (fo_get_seals(fp, &seals) == 0)
  769                         td->td_retval[0] = seals;
  770                 else
  771                         error = EINVAL;
  772                 fdrop(fp, td);
  773                 break;
  774 
  775         case F_RDAHEAD:
  776                 arg = arg ? 128 * 1024: 0;
  777                 /* FALLTHROUGH */
  778         case F_READAHEAD:
  779                 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
  780                 if (error != 0)
  781                         break;
  782                 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
  783                         fdrop(fp, td);
  784                         error = EBADF;
  785                         break;
  786                 }
  787                 vp = fp->f_vnode;
  788                 if (vp->v_type != VREG) {
  789                         fdrop(fp, td);
  790                         error = ENOTTY;
  791                         break;
  792                 }
  793 
  794                 /*
  795                  * Exclusive lock synchronizes against f_seqcount reads and
  796                  * writes in sequential_heuristic().
  797                  */
  798                 error = vn_lock(vp, LK_EXCLUSIVE);
  799                 if (error != 0) {
  800                         fdrop(fp, td);
  801                         break;
  802                 }
  803                 if (arg >= 0) {
  804                         bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
  805                         arg = MIN(arg, INT_MAX - bsize + 1);
  806                         fp->f_seqcount[UIO_READ] = MIN(IO_SEQMAX,
  807                             (arg + bsize - 1) / bsize);
  808                         atomic_set_int(&fp->f_flag, FRDAHEAD);
  809                 } else {
  810                         atomic_clear_int(&fp->f_flag, FRDAHEAD);
  811                 }
  812                 VOP_UNLOCK(vp);
  813                 fdrop(fp, td);
  814                 break;
  815 
  816         case F_ISUNIONSTACK:
  817                 /*
  818                  * Check if the vnode is part of a union stack (either the
  819                  * "union" flag from mount(2) or unionfs).
  820                  *
  821                  * Prior to introduction of this op libc's readdir would call
  822                  * fstatfs(2), in effect unnecessarily copying kilobytes of
  823                  * data just to check fs name and a mount flag.
  824                  *
  825                  * Fixing the code to handle everything in the kernel instead
  826                  * is a non-trivial endeavor and has low priority, thus this
  827                  * horrible kludge facilitates the current behavior in a much
  828                  * cheaper manner until someone(tm) sorts this out.
  829                  */
  830                 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
  831                 if (error != 0)
  832                         break;
  833                 if (fp->f_type != DTYPE_VNODE) {
  834                         fdrop(fp, td);
  835                         error = EBADF;
  836                         break;
  837                 }
  838                 vp = fp->f_vnode;
  839                 /*
  840                  * Since we don't prevent dooming the vnode even non-null mp
  841                  * found can become immediately stale. This is tolerable since
  842                  * mount points are type-stable (providing safe memory access)
  843                  * and any vfs op on this vnode going forward will return an
  844                  * error (meaning return value in this case is meaningless).
  845                  */
  846                 mp = atomic_load_ptr(&vp->v_mount);
  847                 if (__predict_false(mp == NULL)) {
  848                         fdrop(fp, td);
  849                         error = EBADF;
  850                         break;
  851                 }
  852                 td->td_retval[0] = 0;
  853                 if (mp->mnt_kern_flag & MNTK_UNIONFS ||
  854                     mp->mnt_flag & MNT_UNION)
  855                         td->td_retval[0] = 1;
  856                 fdrop(fp, td);
  857                 break;
  858 
  859         case F_KINFO:
  860 #ifdef CAPABILITY_MODE
  861                 if (IN_CAPABILITY_MODE(td)) {
  862                         error = ECAPMODE;
  863                         break;
  864                 }
  865 #endif
  866                 error = copyin((void *)arg, &kif_sz, sizeof(kif_sz));
  867                 if (error != 0)
  868                         break;
  869                 if (kif_sz != sizeof(*kif)) {
  870                         error = EINVAL;
  871                         break;
  872                 }
  873                 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK | M_ZERO);
  874                 FILEDESC_SLOCK(fdp);
  875                 error = fget_cap_locked(fdp, fd, &cap_fcntl_rights, &fp, NULL);
  876                 if (error == 0 && fhold(fp)) {
  877                         export_file_to_kinfo(fp, fd, NULL, kif, fdp, 0);
  878                         FILEDESC_SUNLOCK(fdp);
  879                         fdrop(fp, td);
  880                         if ((kif->kf_status & KF_ATTR_VALID) != 0) {
  881                                 kif->kf_structsize = sizeof(*kif);
  882                                 error = copyout(kif, (void *)arg, sizeof(*kif));
  883                         } else {
  884                                 error = EBADF;
  885                         }
  886                 } else {
  887                         FILEDESC_SUNLOCK(fdp);
  888                         if (error == 0)
  889                                 error = EBADF;
  890                 }
  891                 free(kif, M_TEMP);
  892                 break;
  893 
  894         default:
  895                 error = EINVAL;
  896                 break;
  897         }
  898         return (error);
  899 }
  900 
  901 static int
  902 getmaxfd(struct thread *td)
  903 {
  904 
  905         return (min((int)lim_cur(td, RLIMIT_NOFILE), maxfilesperproc));
  906 }
  907 
  908 /*
  909  * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
  910  */
  911 int
  912 kern_dup(struct thread *td, u_int mode, int flags, int old, int new)
  913 {
  914         struct filedesc *fdp;
  915         struct filedescent *oldfde, *newfde;
  916         struct proc *p;
  917         struct file *delfp, *oldfp;
  918         u_long *oioctls, *nioctls;
  919         int error, maxfd;
  920 
  921         p = td->td_proc;
  922         fdp = p->p_fd;
  923         oioctls = NULL;
  924 
  925         MPASS((flags & ~(FDDUP_FLAG_CLOEXEC)) == 0);
  926         MPASS(mode < FDDUP_LASTMODE);
  927 
  928         AUDIT_ARG_FD(old);
  929         /* XXXRW: if (flags & FDDUP_FIXED) AUDIT_ARG_FD2(new); */
  930 
  931         /*
  932          * Verify we have a valid descriptor to dup from and possibly to
  933          * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
  934          * return EINVAL when the new descriptor is out of bounds.
  935          */
  936         if (old < 0)
  937                 return (EBADF);
  938         if (new < 0)
  939                 return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
  940         maxfd = getmaxfd(td);
  941         if (new >= maxfd)
  942                 return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
  943 
  944         error = EBADF;
  945         FILEDESC_XLOCK(fdp);
  946         if (fget_locked(fdp, old) == NULL)
  947                 goto unlock;
  948         if ((mode == FDDUP_FIXED || mode == FDDUP_MUSTREPLACE) && old == new) {
  949                 td->td_retval[0] = new;
  950                 if (flags & FDDUP_FLAG_CLOEXEC)
  951                         fdp->fd_ofiles[new].fde_flags |= UF_EXCLOSE;
  952                 error = 0;
  953                 goto unlock;
  954         }
  955 
  956         oldfde = &fdp->fd_ofiles[old];
  957         oldfp = oldfde->fde_file;
  958         if (!fhold(oldfp))
  959                 goto unlock;
  960 
  961         /*
  962          * If the caller specified a file descriptor, make sure the file
  963          * table is large enough to hold it, and grab it.  Otherwise, just
  964          * allocate a new descriptor the usual way.
  965          */
  966         switch (mode) {
  967         case FDDUP_NORMAL:
  968         case FDDUP_FCNTL:
  969                 if ((error = fdalloc(td, new, &new)) != 0) {
  970                         fdrop(oldfp, td);
  971                         goto unlock;
  972                 }
  973                 break;
  974         case FDDUP_MUSTREPLACE:
  975                 /* Target file descriptor must exist. */
  976                 if (fget_locked(fdp, new) == NULL) {
  977                         fdrop(oldfp, td);
  978                         goto unlock;
  979                 }
  980                 break;
  981         case FDDUP_FIXED:
  982                 if (new >= fdp->fd_nfiles) {
  983                         /*
  984                          * The resource limits are here instead of e.g.
  985                          * fdalloc(), because the file descriptor table may be
  986                          * shared between processes, so we can't really use
  987                          * racct_add()/racct_sub().  Instead of counting the
  988                          * number of actually allocated descriptors, just put
  989                          * the limit on the size of the file descriptor table.
  990                          */
  991 #ifdef RACCT
  992                         if (RACCT_ENABLED()) {
  993                                 error = racct_set_unlocked(p, RACCT_NOFILE, new + 1);
  994                                 if (error != 0) {
  995                                         error = EMFILE;
  996                                         fdrop(oldfp, td);
  997                                         goto unlock;
  998                                 }
  999                         }
 1000 #endif
 1001                         fdgrowtable_exp(fdp, new + 1);
 1002                 }
 1003                 if (!fdisused(fdp, new))
 1004                         fdused(fdp, new);
 1005                 break;
 1006         default:
 1007                 KASSERT(0, ("%s unsupported mode %d", __func__, mode));
 1008         }
 1009 
 1010         KASSERT(old != new, ("new fd is same as old"));
 1011 
 1012         /* Refetch oldfde because the table may have grown and old one freed. */
 1013         oldfde = &fdp->fd_ofiles[old];
 1014         KASSERT(oldfp == oldfde->fde_file,
 1015             ("fdt_ofiles shift from growth observed at fd %d",
 1016             old));
 1017 
 1018         newfde = &fdp->fd_ofiles[new];
 1019         delfp = newfde->fde_file;
 1020 
 1021         nioctls = filecaps_copy_prep(&oldfde->fde_caps);
 1022 
 1023         /*
 1024          * Duplicate the source descriptor.
 1025          */
 1026 #ifdef CAPABILITIES
 1027         seqc_write_begin(&newfde->fde_seqc);
 1028 #endif
 1029         oioctls = filecaps_free_prep(&newfde->fde_caps);
 1030         memcpy(newfde, oldfde, fde_change_size);
 1031         filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
 1032             nioctls);
 1033         if ((flags & FDDUP_FLAG_CLOEXEC) != 0)
 1034                 newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
 1035         else
 1036                 newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE;
 1037 #ifdef CAPABILITIES
 1038         seqc_write_end(&newfde->fde_seqc);
 1039 #endif
 1040         td->td_retval[0] = new;
 1041 
 1042         error = 0;
 1043 
 1044         if (delfp != NULL) {
 1045                 (void) closefp(fdp, new, delfp, td, true, false);
 1046                 FILEDESC_UNLOCK_ASSERT(fdp);
 1047         } else {
 1048 unlock:
 1049                 FILEDESC_XUNLOCK(fdp);
 1050         }
 1051 
 1052         filecaps_free_finish(oioctls);
 1053         return (error);
 1054 }
 1055 
 1056 static void
 1057 sigiofree(struct sigio *sigio)
 1058 {
 1059         crfree(sigio->sio_ucred);
 1060         free(sigio, M_SIGIO);
 1061 }
 1062 
 1063 static struct sigio *
 1064 funsetown_locked(struct sigio *sigio)
 1065 {
 1066         struct proc *p;
 1067         struct pgrp *pg;
 1068 
 1069         SIGIO_ASSERT_LOCKED();
 1070 
 1071         if (sigio == NULL)
 1072                 return (NULL);
 1073         *sigio->sio_myref = NULL;
 1074         if (sigio->sio_pgid < 0) {
 1075                 pg = sigio->sio_pgrp;
 1076                 PGRP_LOCK(pg);
 1077                 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, sio_pgsigio);
 1078                 PGRP_UNLOCK(pg);
 1079         } else {
 1080                 p = sigio->sio_proc;
 1081                 PROC_LOCK(p);
 1082                 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
 1083                 PROC_UNLOCK(p);
 1084         }
 1085         return (sigio);
 1086 }
 1087 
 1088 /*
 1089  * If sigio is on the list associated with a process or process group,
 1090  * disable signalling from the device, remove sigio from the list and
 1091  * free sigio.
 1092  */
 1093 void
 1094 funsetown(struct sigio **sigiop)
 1095 {
 1096         struct sigio *sigio;
 1097 
 1098         /* Racy check, consumers must provide synchronization. */
 1099         if (*sigiop == NULL)
 1100                 return;
 1101 
 1102         SIGIO_LOCK();
 1103         sigio = funsetown_locked(*sigiop);
 1104         SIGIO_UNLOCK();
 1105         if (sigio != NULL)
 1106                 sigiofree(sigio);
 1107 }
 1108 
 1109 /*
 1110  * Free a list of sigio structures.  The caller must ensure that new sigio
 1111  * structures cannot be added after this point.  For process groups this is
 1112  * guaranteed using the proctree lock; for processes, the P_WEXIT flag serves
 1113  * as an interlock.
 1114  */
 1115 void
 1116 funsetownlst(struct sigiolst *sigiolst)
 1117 {
 1118         struct proc *p;
 1119         struct pgrp *pg;
 1120         struct sigio *sigio, *tmp;
 1121 
 1122         /* Racy check. */
 1123         sigio = SLIST_FIRST(sigiolst);
 1124         if (sigio == NULL)
 1125                 return;
 1126 
 1127         p = NULL;
 1128         pg = NULL;
 1129 
 1130         SIGIO_LOCK();
 1131         sigio = SLIST_FIRST(sigiolst);
 1132         if (sigio == NULL) {
 1133                 SIGIO_UNLOCK();
 1134                 return;
 1135         }
 1136 
 1137         /*
 1138          * Every entry of the list should belong to a single proc or pgrp.
 1139          */
 1140         if (sigio->sio_pgid < 0) {
 1141                 pg = sigio->sio_pgrp;
 1142                 sx_assert(&proctree_lock, SX_XLOCKED);
 1143                 PGRP_LOCK(pg);
 1144         } else /* if (sigio->sio_pgid > 0) */ {
 1145                 p = sigio->sio_proc;
 1146                 PROC_LOCK(p);
 1147                 KASSERT((p->p_flag & P_WEXIT) != 0,
 1148                     ("%s: process %p is not exiting", __func__, p));
 1149         }
 1150 
 1151         SLIST_FOREACH(sigio, sigiolst, sio_pgsigio) {
 1152                 *sigio->sio_myref = NULL;
 1153                 if (pg != NULL) {
 1154                         KASSERT(sigio->sio_pgid < 0,
 1155                             ("Proc sigio in pgrp sigio list"));
 1156                         KASSERT(sigio->sio_pgrp == pg,
 1157                             ("Bogus pgrp in sigio list"));
 1158                 } else /* if (p != NULL) */ {
 1159                         KASSERT(sigio->sio_pgid > 0,
 1160                             ("Pgrp sigio in proc sigio list"));
 1161                         KASSERT(sigio->sio_proc == p,
 1162                             ("Bogus proc in sigio list"));
 1163                 }
 1164         }
 1165 
 1166         if (pg != NULL)
 1167                 PGRP_UNLOCK(pg);
 1168         else
 1169                 PROC_UNLOCK(p);
 1170         SIGIO_UNLOCK();
 1171 
 1172         SLIST_FOREACH_SAFE(sigio, sigiolst, sio_pgsigio, tmp)
 1173                 sigiofree(sigio);
 1174 }
 1175 
 1176 /*
 1177  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
 1178  *
 1179  * After permission checking, add a sigio structure to the sigio list for
 1180  * the process or process group.
 1181  */
 1182 int
 1183 fsetown(pid_t pgid, struct sigio **sigiop)
 1184 {
 1185         struct proc *proc;
 1186         struct pgrp *pgrp;
 1187         struct sigio *osigio, *sigio;
 1188         int ret;
 1189 
 1190         if (pgid == 0) {
 1191                 funsetown(sigiop);
 1192                 return (0);
 1193         }
 1194 
 1195         sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
 1196         sigio->sio_pgid = pgid;
 1197         sigio->sio_ucred = crhold(curthread->td_ucred);
 1198         sigio->sio_myref = sigiop;
 1199 
 1200         ret = 0;
 1201         if (pgid > 0) {
 1202                 ret = pget(pgid, PGET_NOTWEXIT | PGET_NOTID | PGET_HOLD, &proc);
 1203                 SIGIO_LOCK();
 1204                 osigio = funsetown_locked(*sigiop);
 1205                 if (ret == 0) {
 1206                         PROC_LOCK(proc);
 1207                         _PRELE(proc);
 1208                         if ((proc->p_flag & P_WEXIT) != 0) {
 1209                                 ret = ESRCH;
 1210                         } else if (proc->p_session !=
 1211                             curthread->td_proc->p_session) {
 1212                                 /*
 1213                                  * Policy - Don't allow a process to FSETOWN a
 1214                                  * process in another session.
 1215                                  *
 1216                                  * Remove this test to allow maximum flexibility
 1217                                  * or restrict FSETOWN to the current process or
 1218                                  * process group for maximum safety.
 1219                                  */
 1220                                 ret = EPERM;
 1221                         } else {
 1222                                 sigio->sio_proc = proc;
 1223                                 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio,
 1224                                     sio_pgsigio);
 1225                         }
 1226                         PROC_UNLOCK(proc);
 1227                 }
 1228         } else /* if (pgid < 0) */ {
 1229                 sx_slock(&proctree_lock);
 1230                 SIGIO_LOCK();
 1231                 osigio = funsetown_locked(*sigiop);
 1232                 pgrp = pgfind(-pgid);
 1233                 if (pgrp == NULL) {
 1234                         ret = ESRCH;
 1235                 } else {
 1236                         if (pgrp->pg_session != curthread->td_proc->p_session) {
 1237                                 /*
 1238                                  * Policy - Don't allow a process to FSETOWN a
 1239                                  * process in another session.
 1240                                  *
 1241                                  * Remove this test to allow maximum flexibility
 1242                                  * or restrict FSETOWN to the current process or
 1243                                  * process group for maximum safety.
 1244                                  */
 1245                                 ret = EPERM;
 1246                         } else {
 1247                                 sigio->sio_pgrp = pgrp;
 1248                                 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio,
 1249                                     sio_pgsigio);
 1250                         }
 1251                         PGRP_UNLOCK(pgrp);
 1252                 }
 1253                 sx_sunlock(&proctree_lock);
 1254         }
 1255         if (ret == 0)
 1256                 *sigiop = sigio;
 1257         SIGIO_UNLOCK();
 1258         if (osigio != NULL)
 1259                 sigiofree(osigio);
 1260         return (ret);
 1261 }
 1262 
 1263 /*
 1264  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
 1265  */
 1266 pid_t
 1267 fgetown(struct sigio **sigiop)
 1268 {
 1269         pid_t pgid;
 1270 
 1271         SIGIO_LOCK();
 1272         pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
 1273         SIGIO_UNLOCK();
 1274         return (pgid);
 1275 }
 1276 
 1277 static int
 1278 closefp_impl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
 1279     bool audit)
 1280 {
 1281         int error;
 1282 
 1283         FILEDESC_XLOCK_ASSERT(fdp);
 1284 
 1285         /*
 1286          * We now hold the fp reference that used to be owned by the
 1287          * descriptor array.  We have to unlock the FILEDESC *AFTER*
 1288          * knote_fdclose to prevent a race of the fd getting opened, a knote
 1289          * added, and deleteing a knote for the new fd.
 1290          */
 1291         if (__predict_false(!TAILQ_EMPTY(&fdp->fd_kqlist)))
 1292                 knote_fdclose(td, fd);
 1293 
 1294         /*
 1295          * We need to notify mqueue if the object is of type mqueue.
 1296          */
 1297         if (__predict_false(fp->f_type == DTYPE_MQUEUE))
 1298                 mq_fdclose(td, fd, fp);
 1299         FILEDESC_XUNLOCK(fdp);
 1300 
 1301 #ifdef AUDIT
 1302         if (AUDITING_TD(td) && audit)
 1303                 audit_sysclose(td, fd, fp);
 1304 #endif
 1305         error = closef(fp, td);
 1306 
 1307         /*
 1308          * All paths leading up to closefp() will have already removed or
 1309          * replaced the fd in the filedesc table, so a restart would not
 1310          * operate on the same file.
 1311          */
 1312         if (error == ERESTART)
 1313                 error = EINTR;
 1314 
 1315         return (error);
 1316 }
 1317 
 1318 static int
 1319 closefp_hl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
 1320     bool holdleaders, bool audit)
 1321 {
 1322         int error;
 1323 
 1324         FILEDESC_XLOCK_ASSERT(fdp);
 1325 
 1326         if (holdleaders) {
 1327                 if (td->td_proc->p_fdtol != NULL) {
 1328                         /*
 1329                          * Ask fdfree() to sleep to ensure that all relevant
 1330                          * process leaders can be traversed in closef().
 1331                          */
 1332                         fdp->fd_holdleaderscount++;
 1333                 } else {
 1334                         holdleaders = false;
 1335                 }
 1336         }
 1337 
 1338         error = closefp_impl(fdp, fd, fp, td, audit);
 1339         if (holdleaders) {
 1340                 FILEDESC_XLOCK(fdp);
 1341                 fdp->fd_holdleaderscount--;
 1342                 if (fdp->fd_holdleaderscount == 0 &&
 1343                     fdp->fd_holdleaderswakeup != 0) {
 1344                         fdp->fd_holdleaderswakeup = 0;
 1345                         wakeup(&fdp->fd_holdleaderscount);
 1346                 }
 1347                 FILEDESC_XUNLOCK(fdp);
 1348         }
 1349         return (error);
 1350 }
 1351 
 1352 static int
 1353 closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
 1354     bool holdleaders, bool audit)
 1355 {
 1356 
 1357         FILEDESC_XLOCK_ASSERT(fdp);
 1358 
 1359         if (__predict_false(td->td_proc->p_fdtol != NULL)) {
 1360                 return (closefp_hl(fdp, fd, fp, td, holdleaders, audit));
 1361         } else {
 1362                 return (closefp_impl(fdp, fd, fp, td, audit));
 1363         }
 1364 }
 1365 
 1366 /*
 1367  * Close a file descriptor.
 1368  */
 1369 #ifndef _SYS_SYSPROTO_H_
 1370 struct close_args {
 1371         int     fd;
 1372 };
 1373 #endif
 1374 /* ARGSUSED */
 1375 int
 1376 sys_close(struct thread *td, struct close_args *uap)
 1377 {
 1378 
 1379         return (kern_close(td, uap->fd));
 1380 }
 1381 
 1382 int
 1383 kern_close(struct thread *td, int fd)
 1384 {
 1385         struct filedesc *fdp;
 1386         struct file *fp;
 1387 
 1388         fdp = td->td_proc->p_fd;
 1389 
 1390         FILEDESC_XLOCK(fdp);
 1391         if ((fp = fget_locked(fdp, fd)) == NULL) {
 1392                 FILEDESC_XUNLOCK(fdp);
 1393                 return (EBADF);
 1394         }
 1395         fdfree(fdp, fd);
 1396 
 1397         /* closefp() drops the FILEDESC lock for us. */
 1398         return (closefp(fdp, fd, fp, td, true, true));
 1399 }
 1400 
 1401 int
 1402 kern_close_range(struct thread *td, u_int lowfd, u_int highfd)
 1403 {
 1404         struct filedesc *fdp;
 1405         const struct fdescenttbl *fdt;
 1406         struct file *fp;
 1407         int fd;
 1408 
 1409         /*
 1410          * Check this prior to clamping; closefrom(3) with only fd 0, 1, and 2
 1411          * open should not be a usage error.  From a close_range() perspective,
 1412          * close_range(3, ~0U, 0) in the same scenario should also likely not
 1413          * be a usage error as all fd above 3 are in-fact already closed.
 1414          */
 1415         if (highfd < lowfd) {
 1416                 return (EINVAL);
 1417         }
 1418 
 1419         fdp = td->td_proc->p_fd;
 1420         FILEDESC_XLOCK(fdp);
 1421         fdt = atomic_load_ptr(&fdp->fd_files);
 1422         highfd = MIN(highfd, fdt->fdt_nfiles - 1);
 1423         fd = lowfd;
 1424         if (__predict_false(fd > highfd)) {
 1425                 goto out_locked;
 1426         }
 1427         for (;;) {
 1428                 fp = fdt->fdt_ofiles[fd].fde_file;
 1429                 if (fp == NULL) {
 1430                         if (fd == highfd)
 1431                                 goto out_locked;
 1432                 } else {
 1433                         fdfree(fdp, fd);
 1434                         (void) closefp(fdp, fd, fp, td, true, true);
 1435                         if (fd == highfd)
 1436                                 goto out_unlocked;
 1437                         FILEDESC_XLOCK(fdp);
 1438                         fdt = atomic_load_ptr(&fdp->fd_files);
 1439                 }
 1440                 fd++;
 1441         }
 1442 out_locked:
 1443         FILEDESC_XUNLOCK(fdp);
 1444 out_unlocked:
 1445         return (0);
 1446 }
 1447 
 1448 #ifndef _SYS_SYSPROTO_H_
 1449 struct close_range_args {
 1450         u_int   lowfd;
 1451         u_int   highfd;
 1452         int     flags;
 1453 };
 1454 #endif
 1455 int
 1456 sys_close_range(struct thread *td, struct close_range_args *uap)
 1457 {
 1458 
 1459         AUDIT_ARG_FD(uap->lowfd);
 1460         AUDIT_ARG_CMD(uap->highfd);
 1461         AUDIT_ARG_FFLAGS(uap->flags);
 1462 
 1463         /* No flags currently defined */
 1464         if (uap->flags != 0)
 1465                 return (EINVAL);
 1466         return (kern_close_range(td, uap->lowfd, uap->highfd));
 1467 }
 1468 
 1469 #ifdef COMPAT_FREEBSD12
 1470 /*
 1471  * Close open file descriptors.
 1472  */
 1473 #ifndef _SYS_SYSPROTO_H_
 1474 struct freebsd12_closefrom_args {
 1475         int     lowfd;
 1476 };
 1477 #endif
 1478 /* ARGSUSED */
 1479 int
 1480 freebsd12_closefrom(struct thread *td, struct freebsd12_closefrom_args *uap)
 1481 {
 1482         u_int lowfd;
 1483 
 1484         AUDIT_ARG_FD(uap->lowfd);
 1485 
 1486         /*
 1487          * Treat negative starting file descriptor values identical to
 1488          * closefrom(0) which closes all files.
 1489          */
 1490         lowfd = MAX(0, uap->lowfd);
 1491         return (kern_close_range(td, lowfd, ~0U));
 1492 }
 1493 #endif  /* COMPAT_FREEBSD12 */
 1494 
 1495 #if defined(COMPAT_43)
 1496 /*
 1497  * Return status information about a file descriptor.
 1498  */
 1499 #ifndef _SYS_SYSPROTO_H_
 1500 struct ofstat_args {
 1501         int     fd;
 1502         struct  ostat *sb;
 1503 };
 1504 #endif
 1505 /* ARGSUSED */
 1506 int
 1507 ofstat(struct thread *td, struct ofstat_args *uap)
 1508 {
 1509         struct ostat oub;
 1510         struct stat ub;
 1511         int error;
 1512 
 1513         error = kern_fstat(td, uap->fd, &ub);
 1514         if (error == 0) {
 1515                 cvtstat(&ub, &oub);
 1516                 error = copyout(&oub, uap->sb, sizeof(oub));
 1517         }
 1518         return (error);
 1519 }
 1520 #endif /* COMPAT_43 */
 1521 
 1522 #if defined(COMPAT_FREEBSD11)
 1523 int
 1524 freebsd11_fstat(struct thread *td, struct freebsd11_fstat_args *uap)
 1525 {
 1526         struct stat sb;
 1527         struct freebsd11_stat osb;
 1528         int error;
 1529 
 1530         error = kern_fstat(td, uap->fd, &sb);
 1531         if (error != 0)
 1532                 return (error);
 1533         error = freebsd11_cvtstat(&sb, &osb);
 1534         if (error == 0)
 1535                 error = copyout(&osb, uap->sb, sizeof(osb));
 1536         return (error);
 1537 }
 1538 #endif  /* COMPAT_FREEBSD11 */
 1539 
 1540 /*
 1541  * Return status information about a file descriptor.
 1542  */
 1543 #ifndef _SYS_SYSPROTO_H_
 1544 struct fstat_args {
 1545         int     fd;
 1546         struct  stat *sb;
 1547 };
 1548 #endif
 1549 /* ARGSUSED */
 1550 int
 1551 sys_fstat(struct thread *td, struct fstat_args *uap)
 1552 {
 1553         struct stat ub;
 1554         int error;
 1555 
 1556         error = kern_fstat(td, uap->fd, &ub);
 1557         if (error == 0)
 1558                 error = copyout(&ub, uap->sb, sizeof(ub));
 1559         return (error);
 1560 }
 1561 
 1562 int
 1563 kern_fstat(struct thread *td, int fd, struct stat *sbp)
 1564 {
 1565         struct file *fp;
 1566         int error;
 1567 
 1568         AUDIT_ARG_FD(fd);
 1569 
 1570         error = fget(td, fd, &cap_fstat_rights, &fp);
 1571         if (__predict_false(error != 0))
 1572                 return (error);
 1573 
 1574         AUDIT_ARG_FILE(td->td_proc, fp);
 1575 
 1576         error = fo_stat(fp, sbp, td->td_ucred, td);
 1577         fdrop(fp, td);
 1578 #ifdef __STAT_TIME_T_EXT
 1579         sbp->st_atim_ext = 0;
 1580         sbp->st_mtim_ext = 0;
 1581         sbp->st_ctim_ext = 0;
 1582         sbp->st_btim_ext = 0;
 1583 #endif
 1584 #ifdef KTRACE
 1585         if (KTRPOINT(td, KTR_STRUCT))
 1586                 ktrstat_error(sbp, error);
 1587 #endif
 1588         return (error);
 1589 }
 1590 
 1591 #if defined(COMPAT_FREEBSD11)
 1592 /*
 1593  * Return status information about a file descriptor.
 1594  */
 1595 #ifndef _SYS_SYSPROTO_H_
 1596 struct freebsd11_nfstat_args {
 1597         int     fd;
 1598         struct  nstat *sb;
 1599 };
 1600 #endif
 1601 /* ARGSUSED */
 1602 int
 1603 freebsd11_nfstat(struct thread *td, struct freebsd11_nfstat_args *uap)
 1604 {
 1605         struct nstat nub;
 1606         struct stat ub;
 1607         int error;
 1608 
 1609         error = kern_fstat(td, uap->fd, &ub);
 1610         if (error == 0) {
 1611                 freebsd11_cvtnstat(&ub, &nub);
 1612                 error = copyout(&nub, uap->sb, sizeof(nub));
 1613         }
 1614         return (error);
 1615 }
 1616 #endif /* COMPAT_FREEBSD11 */
 1617 
 1618 /*
 1619  * Return pathconf information about a file descriptor.
 1620  */
 1621 #ifndef _SYS_SYSPROTO_H_
 1622 struct fpathconf_args {
 1623         int     fd;
 1624         int     name;
 1625 };
 1626 #endif
 1627 /* ARGSUSED */
 1628 int
 1629 sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
 1630 {
 1631         long value;
 1632         int error;
 1633 
 1634         error = kern_fpathconf(td, uap->fd, uap->name, &value);
 1635         if (error == 0)
 1636                 td->td_retval[0] = value;
 1637         return (error);
 1638 }
 1639 
 1640 int
 1641 kern_fpathconf(struct thread *td, int fd, int name, long *valuep)
 1642 {
 1643         struct file *fp;
 1644         struct vnode *vp;
 1645         int error;
 1646 
 1647         error = fget(td, fd, &cap_fpathconf_rights, &fp);
 1648         if (error != 0)
 1649                 return (error);
 1650 
 1651         if (name == _PC_ASYNC_IO) {
 1652                 *valuep = _POSIX_ASYNCHRONOUS_IO;
 1653                 goto out;
 1654         }
 1655         vp = fp->f_vnode;
 1656         if (vp != NULL) {
 1657                 vn_lock(vp, LK_SHARED | LK_RETRY);
 1658                 error = VOP_PATHCONF(vp, name, valuep);
 1659                 VOP_UNLOCK(vp);
 1660         } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
 1661                 if (name != _PC_PIPE_BUF) {
 1662                         error = EINVAL;
 1663                 } else {
 1664                         *valuep = PIPE_BUF;
 1665                         error = 0;
 1666                 }
 1667         } else {
 1668                 error = EOPNOTSUPP;
 1669         }
 1670 out:
 1671         fdrop(fp, td);
 1672         return (error);
 1673 }
 1674 
 1675 /*
 1676  * Copy filecaps structure allocating memory for ioctls array if needed.
 1677  *
 1678  * The last parameter indicates whether the fdtable is locked. If it is not and
 1679  * ioctls are encountered, copying fails and the caller must lock the table.
 1680  *
 1681  * Note that if the table was not locked, the caller has to check the relevant
 1682  * sequence counter to determine whether the operation was successful.
 1683  */
 1684 bool
 1685 filecaps_copy(const struct filecaps *src, struct filecaps *dst, bool locked)
 1686 {
 1687         size_t size;
 1688 
 1689         if (src->fc_ioctls != NULL && !locked)
 1690                 return (false);
 1691         memcpy(dst, src, sizeof(*src));
 1692         if (src->fc_ioctls == NULL)
 1693                 return (true);
 1694 
 1695         KASSERT(src->fc_nioctls > 0,
 1696             ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
 1697 
 1698         size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
 1699         dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
 1700         memcpy(dst->fc_ioctls, src->fc_ioctls, size);
 1701         return (true);
 1702 }
 1703 
 1704 static u_long *
 1705 filecaps_copy_prep(const struct filecaps *src)
 1706 {
 1707         u_long *ioctls;
 1708         size_t size;
 1709 
 1710         if (__predict_true(src->fc_ioctls == NULL))
 1711                 return (NULL);
 1712 
 1713         KASSERT(src->fc_nioctls > 0,
 1714             ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
 1715 
 1716         size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
 1717         ioctls = malloc(size, M_FILECAPS, M_WAITOK);
 1718         return (ioctls);
 1719 }
 1720 
 1721 static void
 1722 filecaps_copy_finish(const struct filecaps *src, struct filecaps *dst,
 1723     u_long *ioctls)
 1724 {
 1725         size_t size;
 1726 
 1727         *dst = *src;
 1728         if (__predict_true(src->fc_ioctls == NULL)) {
 1729                 MPASS(ioctls == NULL);
 1730                 return;
 1731         }
 1732 
 1733         size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
 1734         dst->fc_ioctls = ioctls;
 1735         bcopy(src->fc_ioctls, dst->fc_ioctls, size);
 1736 }
 1737 
 1738 /*
 1739  * Move filecaps structure to the new place and clear the old place.
 1740  */
 1741 void
 1742 filecaps_move(struct filecaps *src, struct filecaps *dst)
 1743 {
 1744 
 1745         *dst = *src;
 1746         bzero(src, sizeof(*src));
 1747 }
 1748 
 1749 /*
 1750  * Fill the given filecaps structure with full rights.
 1751  */
 1752 static void
 1753 filecaps_fill(struct filecaps *fcaps)
 1754 {
 1755 
 1756         CAP_ALL(&fcaps->fc_rights);
 1757         fcaps->fc_ioctls = NULL;
 1758         fcaps->fc_nioctls = -1;
 1759         fcaps->fc_fcntls = CAP_FCNTL_ALL;
 1760 }
 1761 
 1762 /*
 1763  * Free memory allocated within filecaps structure.
 1764  */
 1765 void
 1766 filecaps_free(struct filecaps *fcaps)
 1767 {
 1768 
 1769         free(fcaps->fc_ioctls, M_FILECAPS);
 1770         bzero(fcaps, sizeof(*fcaps));
 1771 }
 1772 
 1773 static u_long *
 1774 filecaps_free_prep(struct filecaps *fcaps)
 1775 {
 1776         u_long *ioctls;
 1777 
 1778         ioctls = fcaps->fc_ioctls;
 1779         bzero(fcaps, sizeof(*fcaps));
 1780         return (ioctls);
 1781 }
 1782 
 1783 static void
 1784 filecaps_free_finish(u_long *ioctls)
 1785 {
 1786 
 1787         free(ioctls, M_FILECAPS);
 1788 }
 1789 
 1790 /*
 1791  * Validate the given filecaps structure.
 1792  */
 1793 static void
 1794 filecaps_validate(const struct filecaps *fcaps, const char *func)
 1795 {
 1796 
 1797         KASSERT(cap_rights_is_valid(&fcaps->fc_rights),
 1798             ("%s: invalid rights", func));
 1799         KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,
 1800             ("%s: invalid fcntls", func));
 1801         KASSERT(fcaps->fc_fcntls == 0 ||
 1802             cap_rights_is_set(&fcaps->fc_rights, CAP_FCNTL),
 1803             ("%s: fcntls without CAP_FCNTL", func));
 1804         KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :
 1805             (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),
 1806             ("%s: invalid ioctls", func));
 1807         KASSERT(fcaps->fc_nioctls == 0 ||
 1808             cap_rights_is_set(&fcaps->fc_rights, CAP_IOCTL),
 1809             ("%s: ioctls without CAP_IOCTL", func));
 1810 }
 1811 
 1812 static void
 1813 fdgrowtable_exp(struct filedesc *fdp, int nfd)
 1814 {
 1815         int nfd1;
 1816 
 1817         FILEDESC_XLOCK_ASSERT(fdp);
 1818 
 1819         nfd1 = fdp->fd_nfiles * 2;
 1820         if (nfd1 < nfd)
 1821                 nfd1 = nfd;
 1822         fdgrowtable(fdp, nfd1);
 1823 }
 1824 
 1825 /*
 1826  * Grow the file table to accommodate (at least) nfd descriptors.
 1827  */
 1828 static void
 1829 fdgrowtable(struct filedesc *fdp, int nfd)
 1830 {
 1831         struct filedesc0 *fdp0;
 1832         struct freetable *ft;
 1833         struct fdescenttbl *ntable;
 1834         struct fdescenttbl *otable;
 1835         int nnfiles, onfiles;
 1836         NDSLOTTYPE *nmap, *omap;
 1837 
 1838         KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"));
 1839 
 1840         /* save old values */
 1841         onfiles = fdp->fd_nfiles;
 1842         otable = fdp->fd_files;
 1843         omap = fdp->fd_map;
 1844 
 1845         /* compute the size of the new table */
 1846         nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
 1847         if (nnfiles <= onfiles)
 1848                 /* the table is already large enough */
 1849                 return;
 1850 
 1851         /*
 1852          * Allocate a new table.  We need enough space for the number of
 1853          * entries, file entries themselves and the struct freetable we will use
 1854          * when we decommission the table and place it on the freelist.
 1855          * We place the struct freetable in the middle so we don't have
 1856          * to worry about padding.
 1857          */
 1858         ntable = malloc(offsetof(struct fdescenttbl, fdt_ofiles) +
 1859             nnfiles * sizeof(ntable->fdt_ofiles[0]) +
 1860             sizeof(struct freetable),
 1861             M_FILEDESC, M_ZERO | M_WAITOK);
 1862         /* copy the old data */
 1863         ntable->fdt_nfiles = nnfiles;
 1864         memcpy(ntable->fdt_ofiles, otable->fdt_ofiles,
 1865             onfiles * sizeof(ntable->fdt_ofiles[0]));
 1866 
 1867         /*
 1868          * Allocate a new map only if the old is not large enough.  It will
 1869          * grow at a slower rate than the table as it can map more
 1870          * entries than the table can hold.
 1871          */
 1872         if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
 1873                 nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, M_FILEDESC,
 1874                     M_ZERO | M_WAITOK);
 1875                 /* copy over the old data and update the pointer */
 1876                 memcpy(nmap, omap, NDSLOTS(onfiles) * sizeof(*omap));
 1877                 fdp->fd_map = nmap;
 1878         }
 1879 
 1880         /*
 1881          * Make sure that ntable is correctly initialized before we replace
 1882          * fd_files poiner. Otherwise fget_unlocked() may see inconsistent
 1883          * data.
 1884          */
 1885         atomic_store_rel_ptr((volatile void *)&fdp->fd_files, (uintptr_t)ntable);
 1886 
 1887         /*
 1888          * Free the old file table when not shared by other threads or processes.
 1889          * The old file table is considered to be shared when either are true:
 1890          * - The process has more than one thread.
 1891          * - The file descriptor table has been shared via fdshare().
 1892          *
 1893          * When shared, the old file table will be placed on a freelist
 1894          * which will be processed when the struct filedesc is released.
 1895          *
 1896          * Note that if onfiles == NDFILE, we're dealing with the original
 1897          * static allocation contained within (struct filedesc0 *)fdp,
 1898          * which must not be freed.
 1899          */
 1900         if (onfiles > NDFILE) {
 1901                 /*
 1902                  * Note we may be called here from fdinit while allocating a
 1903                  * table for a new process in which case ->p_fd points
 1904                  * elsewhere.
 1905                  */
 1906                 if (curproc->p_fd != fdp || FILEDESC_IS_ONLY_USER(fdp)) {
 1907                         free(otable, M_FILEDESC);
 1908                 } else {
 1909                         ft = (struct freetable *)&otable->fdt_ofiles[onfiles];
 1910                         fdp0 = (struct filedesc0 *)fdp;
 1911                         ft->ft_table = otable;
 1912                         SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next);
 1913                 }
 1914         }
 1915         /*
 1916          * The map does not have the same possibility of threads still
 1917          * holding references to it.  So always free it as long as it
 1918          * does not reference the original static allocation.
 1919          */
 1920         if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
 1921                 free(omap, M_FILEDESC);
 1922 }
 1923 
 1924 /*
 1925  * Allocate a file descriptor for the process.
 1926  */
 1927 int
 1928 fdalloc(struct thread *td, int minfd, int *result)
 1929 {
 1930         struct proc *p = td->td_proc;
 1931         struct filedesc *fdp = p->p_fd;
 1932         int fd, maxfd, allocfd;
 1933 #ifdef RACCT
 1934         int error;
 1935 #endif
 1936 
 1937         FILEDESC_XLOCK_ASSERT(fdp);
 1938 
 1939         if (fdp->fd_freefile > minfd)
 1940                 minfd = fdp->fd_freefile;
 1941 
 1942         maxfd = getmaxfd(td);
 1943 
 1944         /*
 1945          * Search the bitmap for a free descriptor starting at minfd.
 1946          * If none is found, grow the file table.
 1947          */
 1948         fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
 1949         if (__predict_false(fd >= maxfd))
 1950                 return (EMFILE);
 1951         if (__predict_false(fd >= fdp->fd_nfiles)) {
 1952                 allocfd = min(fd * 2, maxfd);
 1953 #ifdef RACCT
 1954                 if (RACCT_ENABLED()) {
 1955                         error = racct_set_unlocked(p, RACCT_NOFILE, allocfd);
 1956                         if (error != 0)
 1957                                 return (EMFILE);
 1958                 }
 1959 #endif
 1960                 /*
 1961                  * fd is already equal to first free descriptor >= minfd, so
 1962                  * we only need to grow the table and we are done.
 1963                  */
 1964                 fdgrowtable_exp(fdp, allocfd);
 1965         }
 1966 
 1967         /*
 1968          * Perform some sanity checks, then mark the file descriptor as
 1969          * used and return it to the caller.
 1970          */
 1971         KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
 1972             ("invalid descriptor %d", fd));
 1973         KASSERT(!fdisused(fdp, fd),
 1974             ("fd_first_free() returned non-free descriptor"));
 1975         KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
 1976             ("file descriptor isn't free"));
 1977         fdused(fdp, fd);
 1978         *result = fd;
 1979         return (0);
 1980 }
 1981 
 1982 /*
 1983  * Allocate n file descriptors for the process.
 1984  */
 1985 int
 1986 fdallocn(struct thread *td, int minfd, int *fds, int n)
 1987 {
 1988         struct proc *p = td->td_proc;
 1989         struct filedesc *fdp = p->p_fd;
 1990         int i;
 1991 
 1992         FILEDESC_XLOCK_ASSERT(fdp);
 1993 
 1994         for (i = 0; i < n; i++)
 1995                 if (fdalloc(td, 0, &fds[i]) != 0)
 1996                         break;
 1997 
 1998         if (i < n) {
 1999                 for (i--; i >= 0; i--)
 2000                         fdunused(fdp, fds[i]);
 2001                 return (EMFILE);
 2002         }
 2003 
 2004         return (0);
 2005 }
 2006 
 2007 /*
 2008  * Create a new open file structure and allocate a file descriptor for the
 2009  * process that refers to it.  We add one reference to the file for the
 2010  * descriptor table and one reference for resultfp. This is to prevent us
 2011  * being preempted and the entry in the descriptor table closed after we
 2012  * release the FILEDESC lock.
 2013  */
 2014 int
 2015 falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags,
 2016     struct filecaps *fcaps)
 2017 {
 2018         struct file *fp;
 2019         int error, fd;
 2020 
 2021         MPASS(resultfp != NULL);
 2022         MPASS(resultfd != NULL);
 2023 
 2024         error = _falloc_noinstall(td, &fp, 2);
 2025         if (__predict_false(error != 0)) {
 2026                 return (error);
 2027         }
 2028 
 2029         error = finstall_refed(td, fp, &fd, flags, fcaps);
 2030         if (__predict_false(error != 0)) {
 2031                 falloc_abort(td, fp);
 2032                 return (error);
 2033         }
 2034 
 2035         *resultfp = fp;
 2036         *resultfd = fd;
 2037 
 2038         return (0);
 2039 }
 2040 
 2041 /*
 2042  * Create a new open file structure without allocating a file descriptor.
 2043  */
 2044 int
 2045 _falloc_noinstall(struct thread *td, struct file **resultfp, u_int n)
 2046 {
 2047         struct file *fp;
 2048         int maxuserfiles = maxfiles - (maxfiles / 20);
 2049         int openfiles_new;
 2050         static struct timeval lastfail;
 2051         static int curfail;
 2052 
 2053         KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
 2054         MPASS(n > 0);
 2055 
 2056         openfiles_new = atomic_fetchadd_int(&openfiles, 1) + 1;
 2057         if ((openfiles_new >= maxuserfiles &&
 2058             priv_check(td, PRIV_MAXFILES) != 0) ||
 2059             openfiles_new >= maxfiles) {
 2060                 atomic_subtract_int(&openfiles, 1);
 2061                 if (ppsratecheck(&lastfail, &curfail, 1)) {
 2062                         printf("kern.maxfiles limit exceeded by uid %i, (%s) "
 2063                             "please see tuning(7).\n", td->td_ucred->cr_ruid, td->td_proc->p_comm);
 2064                 }
 2065                 return (ENFILE);
 2066         }
 2067         fp = uma_zalloc(file_zone, M_WAITOK);
 2068         bzero(fp, sizeof(*fp));
 2069         refcount_init(&fp->f_count, n);
 2070         fp->f_cred = crhold(td->td_ucred);
 2071         fp->f_ops = &badfileops;
 2072         *resultfp = fp;
 2073         return (0);
 2074 }
 2075 
 2076 void
 2077 falloc_abort(struct thread *td, struct file *fp)
 2078 {
 2079 
 2080         /*
 2081          * For assertion purposes.
 2082          */
 2083         refcount_init(&fp->f_count, 0);
 2084         _fdrop(fp, td);
 2085 }
 2086 
 2087 /*
 2088  * Install a file in a file descriptor table.
 2089  */
 2090 void
 2091 _finstall(struct filedesc *fdp, struct file *fp, int fd, int flags,
 2092     struct filecaps *fcaps)
 2093 {
 2094         struct filedescent *fde;
 2095 
 2096         MPASS(fp != NULL);
 2097         if (fcaps != NULL)
 2098                 filecaps_validate(fcaps, __func__);
 2099         FILEDESC_XLOCK_ASSERT(fdp);
 2100 
 2101         fde = &fdp->fd_ofiles[fd];
 2102 #ifdef CAPABILITIES
 2103         seqc_write_begin(&fde->fde_seqc);
 2104 #endif
 2105         fde->fde_file = fp;
 2106         fde->fde_flags = (flags & O_CLOEXEC) != 0 ? UF_EXCLOSE : 0;
 2107         if (fcaps != NULL)
 2108                 filecaps_move(fcaps, &fde->fde_caps);
 2109         else
 2110                 filecaps_fill(&fde->fde_caps);
 2111 #ifdef CAPABILITIES
 2112         seqc_write_end(&fde->fde_seqc);
 2113 #endif
 2114 }
 2115 
 2116 int
 2117 finstall_refed(struct thread *td, struct file *fp, int *fd, int flags,
 2118     struct filecaps *fcaps)
 2119 {
 2120         struct filedesc *fdp = td->td_proc->p_fd;
 2121         int error;
 2122 
 2123         MPASS(fd != NULL);
 2124 
 2125         FILEDESC_XLOCK(fdp);
 2126         error = fdalloc(td, 0, fd);
 2127         if (__predict_true(error == 0)) {
 2128                 _finstall(fdp, fp, *fd, flags, fcaps);
 2129         }
 2130         FILEDESC_XUNLOCK(fdp);
 2131         return (error);
 2132 }
 2133 
 2134 int
 2135 finstall(struct thread *td, struct file *fp, int *fd, int flags,
 2136     struct filecaps *fcaps)
 2137 {
 2138         int error;
 2139 
 2140         MPASS(fd != NULL);
 2141 
 2142         if (!fhold(fp))
 2143                 return (EBADF);
 2144         error = finstall_refed(td, fp, fd, flags, fcaps);
 2145         if (__predict_false(error != 0)) {
 2146                 fdrop(fp, td);
 2147         }
 2148         return (error);
 2149 }
 2150 
 2151 /*
 2152  * Build a new filedesc structure from another.
 2153  *
 2154  * If fdp is not NULL, return with it shared locked.
 2155  */
 2156 struct filedesc *
 2157 fdinit(struct filedesc *fdp, bool prepfiles, int *lastfile)
 2158 {
 2159         struct filedesc0 *newfdp0;
 2160         struct filedesc *newfdp;
 2161 
 2162         if (prepfiles)
 2163                 MPASS(lastfile != NULL);
 2164         else
 2165                 MPASS(lastfile == NULL);
 2166 
 2167         newfdp0 = uma_zalloc(filedesc0_zone, M_WAITOK | M_ZERO);
 2168         newfdp = &newfdp0->fd_fd;
 2169 
 2170         /* Create the file descriptor table. */
 2171         FILEDESC_LOCK_INIT(newfdp);
 2172         refcount_init(&newfdp->fd_refcnt, 1);
 2173         refcount_init(&newfdp->fd_holdcnt, 1);
 2174         newfdp->fd_map = newfdp0->fd_dmap;
 2175         newfdp->fd_files = (struct fdescenttbl *)&newfdp0->fd_dfiles;
 2176         newfdp->fd_files->fdt_nfiles = NDFILE;
 2177 
 2178         if (fdp == NULL)
 2179                 return (newfdp);
 2180 
 2181         FILEDESC_SLOCK(fdp);
 2182         if (!prepfiles) {
 2183                 FILEDESC_SUNLOCK(fdp);
 2184                 return (newfdp);
 2185         }
 2186 
 2187         for (;;) {
 2188                 *lastfile = fdlastfile(fdp);
 2189                 if (*lastfile < newfdp->fd_nfiles)
 2190                         break;
 2191                 FILEDESC_SUNLOCK(fdp);
 2192                 fdgrowtable(newfdp, *lastfile + 1);
 2193                 FILEDESC_SLOCK(fdp);
 2194         }
 2195 
 2196         return (newfdp);
 2197 }
 2198 
 2199 /*
 2200  * Build a pwddesc structure from another.
 2201  * Copy the current, root, and jail root vnode references.
 2202  *
 2203  * If pdp is not NULL, return with it shared locked.
 2204  */
 2205 struct pwddesc *
 2206 pdinit(struct pwddesc *pdp, bool keeplock)
 2207 {
 2208         struct pwddesc *newpdp;
 2209         struct pwd *newpwd;
 2210 
 2211         newpdp = malloc(sizeof(*newpdp), M_PWDDESC, M_WAITOK | M_ZERO);
 2212 
 2213         PWDDESC_LOCK_INIT(newpdp);
 2214         refcount_init(&newpdp->pd_refcount, 1);
 2215         newpdp->pd_cmask = CMASK;
 2216 
 2217         if (pdp == NULL) {
 2218                 newpwd = pwd_alloc();
 2219                 smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
 2220                 return (newpdp);
 2221         }
 2222 
 2223         PWDDESC_XLOCK(pdp);
 2224         newpwd = pwd_hold_pwddesc(pdp);
 2225         smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
 2226         if (!keeplock)
 2227                 PWDDESC_XUNLOCK(pdp);
 2228         return (newpdp);
 2229 }
 2230 
 2231 /*
 2232  * Hold either filedesc or pwddesc of the passed process.
 2233  *
 2234  * The process lock is used to synchronize against the target exiting and
 2235  * freeing the data.
 2236  *
 2237  * Clearing can be ilustrated in 3 steps:
 2238  * 1. set the pointer to NULL. Either routine can race against it, hence
 2239  *   atomic_load_ptr.
 2240  * 2. observe the process lock as not taken. Until then fdhold/pdhold can
 2241  *   race to either still see the pointer or find NULL. It is still safe to
 2242  *   grab a reference as clearing is stalled.
 2243  * 3. after the lock is observed as not taken, any fdhold/pdhold calls are
 2244  *   guaranteed to see NULL, making it safe to finish clearing
 2245  */
 2246 static struct filedesc *
 2247 fdhold(struct proc *p)
 2248 {
 2249         struct filedesc *fdp;
 2250 
 2251         PROC_LOCK_ASSERT(p, MA_OWNED);
 2252         fdp = atomic_load_ptr(&p->p_fd);
 2253         if (fdp != NULL)
 2254                 refcount_acquire(&fdp->fd_holdcnt);
 2255         return (fdp);
 2256 }
 2257 
 2258 static struct pwddesc *
 2259 pdhold(struct proc *p)
 2260 {
 2261         struct pwddesc *pdp;
 2262 
 2263         PROC_LOCK_ASSERT(p, MA_OWNED);
 2264         pdp = atomic_load_ptr(&p->p_pd);
 2265         if (pdp != NULL)
 2266                 refcount_acquire(&pdp->pd_refcount);
 2267         return (pdp);
 2268 }
 2269 
 2270 static void
 2271 fddrop(struct filedesc *fdp)
 2272 {
 2273 
 2274         if (refcount_load(&fdp->fd_holdcnt) > 1) {
 2275                 if (refcount_release(&fdp->fd_holdcnt) == 0)
 2276                         return;
 2277         }
 2278 
 2279         FILEDESC_LOCK_DESTROY(fdp);
 2280         uma_zfree(filedesc0_zone, fdp);
 2281 }
 2282 
 2283 static void
 2284 pddrop(struct pwddesc *pdp)
 2285 {
 2286         struct pwd *pwd;
 2287 
 2288         if (refcount_release_if_not_last(&pdp->pd_refcount))
 2289                 return;
 2290 
 2291         PWDDESC_XLOCK(pdp);
 2292         if (refcount_release(&pdp->pd_refcount) == 0) {
 2293                 PWDDESC_XUNLOCK(pdp);
 2294                 return;
 2295         }
 2296         pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 2297         pwd_set(pdp, NULL);
 2298         PWDDESC_XUNLOCK(pdp);
 2299         pwd_drop(pwd);
 2300 
 2301         PWDDESC_LOCK_DESTROY(pdp);
 2302         free(pdp, M_PWDDESC);
 2303 }
 2304 
 2305 /*
 2306  * Share a filedesc structure.
 2307  */
 2308 struct filedesc *
 2309 fdshare(struct filedesc *fdp)
 2310 {
 2311 
 2312         refcount_acquire(&fdp->fd_refcnt);
 2313         return (fdp);
 2314 }
 2315 
 2316 /*
 2317  * Share a pwddesc structure.
 2318  */
 2319 struct pwddesc *
 2320 pdshare(struct pwddesc *pdp)
 2321 {
 2322         refcount_acquire(&pdp->pd_refcount);
 2323         return (pdp);
 2324 }
 2325 
 2326 /*
 2327  * Unshare a filedesc structure, if necessary by making a copy
 2328  */
 2329 void
 2330 fdunshare(struct thread *td)
 2331 {
 2332         struct filedesc *tmp;
 2333         struct proc *p = td->td_proc;
 2334 
 2335         if (refcount_load(&p->p_fd->fd_refcnt) == 1)
 2336                 return;
 2337 
 2338         tmp = fdcopy(p->p_fd);
 2339         fdescfree(td);
 2340         p->p_fd = tmp;
 2341 }
 2342 
 2343 /*
 2344  * Unshare a pwddesc structure.
 2345  */
 2346 void
 2347 pdunshare(struct thread *td)
 2348 {
 2349         struct pwddesc *pdp;
 2350         struct proc *p;
 2351 
 2352         p = td->td_proc;
 2353         /* Not shared. */
 2354         if (p->p_pd->pd_refcount == 1)
 2355                 return;
 2356 
 2357         pdp = pdcopy(p->p_pd);
 2358         pdescfree(td);
 2359         p->p_pd = pdp;
 2360 }
 2361 
 2362 void
 2363 fdinstall_remapped(struct thread *td, struct filedesc *fdp)
 2364 {
 2365 
 2366         fdescfree(td);
 2367         td->td_proc->p_fd = fdp;
 2368 }
 2369 
 2370 /*
 2371  * Copy a filedesc structure.  A NULL pointer in returns a NULL reference,
 2372  * this is to ease callers, not catch errors.
 2373  */
 2374 struct filedesc *
 2375 fdcopy(struct filedesc *fdp)
 2376 {
 2377         struct filedesc *newfdp;
 2378         struct filedescent *nfde, *ofde;
 2379         int i, lastfile;
 2380 
 2381         MPASS(fdp != NULL);
 2382 
 2383         newfdp = fdinit(fdp, true, &lastfile);
 2384         /* copy all passable descriptors (i.e. not kqueue) */
 2385         newfdp->fd_freefile = -1;
 2386         for (i = 0; i <= lastfile; ++i) {
 2387                 ofde = &fdp->fd_ofiles[i];
 2388                 if (ofde->fde_file == NULL ||
 2389                     (ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) == 0 ||
 2390                     !fhold(ofde->fde_file)) {
 2391                         if (newfdp->fd_freefile == -1)
 2392                                 newfdp->fd_freefile = i;
 2393                         continue;
 2394                 }
 2395                 nfde = &newfdp->fd_ofiles[i];
 2396                 *nfde = *ofde;
 2397                 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
 2398                 fdused_init(newfdp, i);
 2399         }
 2400         if (newfdp->fd_freefile == -1)
 2401                 newfdp->fd_freefile = i;
 2402         FILEDESC_SUNLOCK(fdp);
 2403         return (newfdp);
 2404 }
 2405 
 2406 /*
 2407  * Copy a pwddesc structure.
 2408  */
 2409 struct pwddesc *
 2410 pdcopy(struct pwddesc *pdp)
 2411 {
 2412         struct pwddesc *newpdp;
 2413 
 2414         MPASS(pdp != NULL);
 2415 
 2416         newpdp = pdinit(pdp, true);
 2417         newpdp->pd_cmask = pdp->pd_cmask;
 2418         PWDDESC_XUNLOCK(pdp);
 2419         return (newpdp);
 2420 }
 2421 
 2422 /*
 2423  * Copies a filedesc structure, while remapping all file descriptors
 2424  * stored inside using a translation table.
 2425  *
 2426  * File descriptors are copied over to the new file descriptor table,
 2427  * regardless of whether the close-on-exec flag is set.
 2428  */
 2429 int
 2430 fdcopy_remapped(struct filedesc *fdp, const int *fds, size_t nfds,
 2431     struct filedesc **ret)
 2432 {
 2433         struct filedesc *newfdp;
 2434         struct filedescent *nfde, *ofde;
 2435         int error, i, lastfile;
 2436 
 2437         MPASS(fdp != NULL);
 2438 
 2439         newfdp = fdinit(fdp, true, &lastfile);
 2440         if (nfds > lastfile + 1) {
 2441                 /* New table cannot be larger than the old one. */
 2442                 error = E2BIG;
 2443                 goto bad;
 2444         }
 2445         /* Copy all passable descriptors (i.e. not kqueue). */
 2446         newfdp->fd_freefile = nfds;
 2447         for (i = 0; i < nfds; ++i) {
 2448                 if (fds[i] < 0 || fds[i] > lastfile) {
 2449                         /* File descriptor out of bounds. */
 2450                         error = EBADF;
 2451                         goto bad;
 2452                 }
 2453                 ofde = &fdp->fd_ofiles[fds[i]];
 2454                 if (ofde->fde_file == NULL) {
 2455                         /* Unused file descriptor. */
 2456                         error = EBADF;
 2457                         goto bad;
 2458                 }
 2459                 if ((ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) == 0) {
 2460                         /* File descriptor cannot be passed. */
 2461                         error = EINVAL;
 2462                         goto bad;
 2463                 }
 2464                 if (!fhold(ofde->fde_file)) {
 2465                         error = EBADF;
 2466                         goto bad;
 2467                 }
 2468                 nfde = &newfdp->fd_ofiles[i];
 2469                 *nfde = *ofde;
 2470                 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
 2471                 fdused_init(newfdp, i);
 2472         }
 2473         FILEDESC_SUNLOCK(fdp);
 2474         *ret = newfdp;
 2475         return (0);
 2476 bad:
 2477         FILEDESC_SUNLOCK(fdp);
 2478         fdescfree_remapped(newfdp);
 2479         return (error);
 2480 }
 2481 
 2482 /*
 2483  * Clear POSIX style locks. This is only used when fdp looses a reference (i.e.
 2484  * one of processes using it exits) and the table used to be shared.
 2485  */
 2486 static void
 2487 fdclearlocks(struct thread *td)
 2488 {
 2489         struct filedesc *fdp;
 2490         struct filedesc_to_leader *fdtol;
 2491         struct flock lf;
 2492         struct file *fp;
 2493         struct proc *p;
 2494         struct vnode *vp;
 2495         int i, lastfile;
 2496 
 2497         p = td->td_proc;
 2498         fdp = p->p_fd;
 2499         fdtol = p->p_fdtol;
 2500         MPASS(fdtol != NULL);
 2501 
 2502         FILEDESC_XLOCK(fdp);
 2503         KASSERT(fdtol->fdl_refcount > 0,
 2504             ("filedesc_to_refcount botch: fdl_refcount=%d",
 2505             fdtol->fdl_refcount));
 2506         if (fdtol->fdl_refcount == 1 &&
 2507             (p->p_leader->p_flag & P_ADVLOCK) != 0) {
 2508                 lastfile = fdlastfile(fdp);
 2509                 for (i = 0; i <= lastfile; i++) {
 2510                         fp = fdp->fd_ofiles[i].fde_file;
 2511                         if (fp == NULL || fp->f_type != DTYPE_VNODE ||
 2512                             !fhold(fp))
 2513                                 continue;
 2514                         FILEDESC_XUNLOCK(fdp);
 2515                         lf.l_whence = SEEK_SET;
 2516                         lf.l_start = 0;
 2517                         lf.l_len = 0;
 2518                         lf.l_type = F_UNLCK;
 2519                         vp = fp->f_vnode;
 2520                         (void) VOP_ADVLOCK(vp,
 2521                             (caddr_t)p->p_leader, F_UNLCK,
 2522                             &lf, F_POSIX);
 2523                         FILEDESC_XLOCK(fdp);
 2524                         fdrop(fp, td);
 2525                 }
 2526         }
 2527 retry:
 2528         if (fdtol->fdl_refcount == 1) {
 2529                 if (fdp->fd_holdleaderscount > 0 &&
 2530                     (p->p_leader->p_flag & P_ADVLOCK) != 0) {
 2531                         /*
 2532                          * close() or kern_dup() has cleared a reference
 2533                          * in a shared file descriptor table.
 2534                          */
 2535                         fdp->fd_holdleaderswakeup = 1;
 2536                         sx_sleep(&fdp->fd_holdleaderscount,
 2537                             FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
 2538                         goto retry;
 2539                 }
 2540                 if (fdtol->fdl_holdcount > 0) {
 2541                         /*
 2542                          * Ensure that fdtol->fdl_leader remains
 2543                          * valid in closef().
 2544                          */
 2545                         fdtol->fdl_wakeup = 1;
 2546                         sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
 2547                             "fdlhold", 0);
 2548                         goto retry;
 2549                 }
 2550         }
 2551         fdtol->fdl_refcount--;
 2552         if (fdtol->fdl_refcount == 0 &&
 2553             fdtol->fdl_holdcount == 0) {
 2554                 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
 2555                 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
 2556         } else
 2557                 fdtol = NULL;
 2558         p->p_fdtol = NULL;
 2559         FILEDESC_XUNLOCK(fdp);
 2560         if (fdtol != NULL)
 2561                 free(fdtol, M_FILEDESC_TO_LEADER);
 2562 }
 2563 
 2564 /*
 2565  * Release a filedesc structure.
 2566  */
 2567 static void
 2568 fdescfree_fds(struct thread *td, struct filedesc *fdp, bool needclose)
 2569 {
 2570         struct filedesc0 *fdp0;
 2571         struct freetable *ft, *tft;
 2572         struct filedescent *fde;
 2573         struct file *fp;
 2574         int i, lastfile;
 2575 
 2576         KASSERT(refcount_load(&fdp->fd_refcnt) == 0,
 2577             ("%s: fd table %p carries references", __func__, fdp));
 2578 
 2579         /*
 2580          * Serialize with threads iterating over the table, if any.
 2581          */
 2582         if (refcount_load(&fdp->fd_holdcnt) > 1) {
 2583                 FILEDESC_XLOCK(fdp);
 2584                 FILEDESC_XUNLOCK(fdp);
 2585         }
 2586 
 2587         lastfile = fdlastfile_single(fdp);
 2588         for (i = 0; i <= lastfile; i++) {
 2589                 fde = &fdp->fd_ofiles[i];
 2590                 fp = fde->fde_file;
 2591                 if (fp != NULL) {
 2592                         fdefree_last(fde);
 2593                         if (needclose)
 2594                                 (void) closef(fp, td);
 2595                         else
 2596                                 fdrop(fp, td);
 2597                 }
 2598         }
 2599 
 2600         if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
 2601                 free(fdp->fd_map, M_FILEDESC);
 2602         if (fdp->fd_nfiles > NDFILE)
 2603                 free(fdp->fd_files, M_FILEDESC);
 2604 
 2605         fdp0 = (struct filedesc0 *)fdp;
 2606         SLIST_FOREACH_SAFE(ft, &fdp0->fd_free, ft_next, tft)
 2607                 free(ft->ft_table, M_FILEDESC);
 2608 
 2609         fddrop(fdp);
 2610 }
 2611 
 2612 void
 2613 fdescfree(struct thread *td)
 2614 {
 2615         struct proc *p;
 2616         struct filedesc *fdp;
 2617 
 2618         p = td->td_proc;
 2619         fdp = p->p_fd;
 2620         MPASS(fdp != NULL);
 2621 
 2622 #ifdef RACCT
 2623         if (RACCT_ENABLED())
 2624                 racct_set_unlocked(p, RACCT_NOFILE, 0);
 2625 #endif
 2626 
 2627         if (p->p_fdtol != NULL)
 2628                 fdclearlocks(td);
 2629 
 2630         /*
 2631          * Check fdhold for an explanation.
 2632          */
 2633         atomic_store_ptr(&p->p_fd, NULL);
 2634         atomic_thread_fence_seq_cst();
 2635         PROC_WAIT_UNLOCKED(p);
 2636 
 2637         if (refcount_release(&fdp->fd_refcnt) == 0)
 2638                 return;
 2639 
 2640         fdescfree_fds(td, fdp, 1);
 2641 }
 2642 
 2643 void
 2644 pdescfree(struct thread *td)
 2645 {
 2646         struct proc *p;
 2647         struct pwddesc *pdp;
 2648 
 2649         p = td->td_proc;
 2650         pdp = p->p_pd;
 2651         MPASS(pdp != NULL);
 2652 
 2653         /*
 2654          * Check pdhold for an explanation.
 2655          */
 2656         atomic_store_ptr(&p->p_pd, NULL);
 2657         atomic_thread_fence_seq_cst();
 2658         PROC_WAIT_UNLOCKED(p);
 2659 
 2660         pddrop(pdp);
 2661 }
 2662 
 2663 void
 2664 fdescfree_remapped(struct filedesc *fdp)
 2665 {
 2666 #ifdef INVARIANTS
 2667         /* fdescfree_fds() asserts that fd_refcnt == 0. */
 2668         if (!refcount_release(&fdp->fd_refcnt))
 2669                 panic("%s: fd table %p has extra references", __func__, fdp);
 2670 #endif
 2671         fdescfree_fds(curthread, fdp, 0);
 2672 }
 2673 
 2674 /*
 2675  * For setugid programs, we don't want to people to use that setugidness
 2676  * to generate error messages which write to a file which otherwise would
 2677  * otherwise be off-limits to the process.  We check for filesystems where
 2678  * the vnode can change out from under us after execve (like [lin]procfs).
 2679  *
 2680  * Since fdsetugidsafety calls this only for fd 0, 1 and 2, this check is
 2681  * sufficient.  We also don't check for setugidness since we know we are.
 2682  */
 2683 static bool
 2684 is_unsafe(struct file *fp)
 2685 {
 2686         struct vnode *vp;
 2687 
 2688         if (fp->f_type != DTYPE_VNODE)
 2689                 return (false);
 2690 
 2691         vp = fp->f_vnode;
 2692         return ((vp->v_vflag & VV_PROCDEP) != 0);
 2693 }
 2694 
 2695 /*
 2696  * Make this setguid thing safe, if at all possible.
 2697  */
 2698 void
 2699 fdsetugidsafety(struct thread *td)
 2700 {
 2701         struct filedesc *fdp;
 2702         struct file *fp;
 2703         int i;
 2704 
 2705         fdp = td->td_proc->p_fd;
 2706         KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
 2707             ("the fdtable should not be shared"));
 2708         MPASS(fdp->fd_nfiles >= 3);
 2709         for (i = 0; i <= 2; i++) {
 2710                 fp = fdp->fd_ofiles[i].fde_file;
 2711                 if (fp != NULL && is_unsafe(fp)) {
 2712                         FILEDESC_XLOCK(fdp);
 2713                         knote_fdclose(td, i);
 2714                         /*
 2715                          * NULL-out descriptor prior to close to avoid
 2716                          * a race while close blocks.
 2717                          */
 2718                         fdfree(fdp, i);
 2719                         FILEDESC_XUNLOCK(fdp);
 2720                         (void) closef(fp, td);
 2721                 }
 2722         }
 2723 }
 2724 
 2725 /*
 2726  * If a specific file object occupies a specific file descriptor, close the
 2727  * file descriptor entry and drop a reference on the file object.  This is a
 2728  * convenience function to handle a subsequent error in a function that calls
 2729  * falloc() that handles the race that another thread might have closed the
 2730  * file descriptor out from under the thread creating the file object.
 2731  */
 2732 void
 2733 fdclose(struct thread *td, struct file *fp, int idx)
 2734 {
 2735         struct filedesc *fdp = td->td_proc->p_fd;
 2736 
 2737         FILEDESC_XLOCK(fdp);
 2738         if (fdp->fd_ofiles[idx].fde_file == fp) {
 2739                 fdfree(fdp, idx);
 2740                 FILEDESC_XUNLOCK(fdp);
 2741                 fdrop(fp, td);
 2742         } else
 2743                 FILEDESC_XUNLOCK(fdp);
 2744 }
 2745 
 2746 /*
 2747  * Close any files on exec?
 2748  */
 2749 void
 2750 fdcloseexec(struct thread *td)
 2751 {
 2752         struct filedesc *fdp;
 2753         struct filedescent *fde;
 2754         struct file *fp;
 2755         int i, lastfile;
 2756 
 2757         fdp = td->td_proc->p_fd;
 2758         KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
 2759             ("the fdtable should not be shared"));
 2760         lastfile = fdlastfile_single(fdp);
 2761         for (i = 0; i <= lastfile; i++) {
 2762                 fde = &fdp->fd_ofiles[i];
 2763                 fp = fde->fde_file;
 2764                 if (fp != NULL && (fp->f_type == DTYPE_MQUEUE ||
 2765                     (fde->fde_flags & UF_EXCLOSE))) {
 2766                         FILEDESC_XLOCK(fdp);
 2767                         fdfree(fdp, i);
 2768                         (void) closefp(fdp, i, fp, td, false, false);
 2769                         FILEDESC_UNLOCK_ASSERT(fdp);
 2770                 }
 2771         }
 2772 }
 2773 
 2774 /*
 2775  * It is unsafe for set[ug]id processes to be started with file
 2776  * descriptors 0..2 closed, as these descriptors are given implicit
 2777  * significance in the Standard C library.  fdcheckstd() will create a
 2778  * descriptor referencing /dev/null for each of stdin, stdout, and
 2779  * stderr that is not already open.
 2780  */
 2781 int
 2782 fdcheckstd(struct thread *td)
 2783 {
 2784         struct filedesc *fdp;
 2785         register_t save;
 2786         int i, error, devnull;
 2787 
 2788         fdp = td->td_proc->p_fd;
 2789         KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
 2790             ("the fdtable should not be shared"));
 2791         MPASS(fdp->fd_nfiles >= 3);
 2792         devnull = -1;
 2793         for (i = 0; i <= 2; i++) {
 2794                 if (fdp->fd_ofiles[i].fde_file != NULL)
 2795                         continue;
 2796 
 2797                 save = td->td_retval[0];
 2798                 if (devnull != -1) {
 2799                         error = kern_dup(td, FDDUP_FIXED, 0, devnull, i);
 2800                 } else {
 2801                         error = kern_openat(td, AT_FDCWD, "/dev/null",
 2802                             UIO_SYSSPACE, O_RDWR, 0);
 2803                         if (error == 0) {
 2804                                 devnull = td->td_retval[0];
 2805                                 KASSERT(devnull == i, ("we didn't get our fd"));
 2806                         }
 2807                 }
 2808                 td->td_retval[0] = save;
 2809                 if (error != 0)
 2810                         return (error);
 2811         }
 2812         return (0);
 2813 }
 2814 
 2815 /*
 2816  * Internal form of close.  Decrement reference count on file structure.
 2817  * Note: td may be NULL when closing a file that was being passed in a
 2818  * message.
 2819  */
 2820 int
 2821 closef(struct file *fp, struct thread *td)
 2822 {
 2823         struct vnode *vp;
 2824         struct flock lf;
 2825         struct filedesc_to_leader *fdtol;
 2826         struct filedesc *fdp;
 2827 
 2828         MPASS(td != NULL);
 2829 
 2830         /*
 2831          * POSIX record locking dictates that any close releases ALL
 2832          * locks owned by this process.  This is handled by setting
 2833          * a flag in the unlock to free ONLY locks obeying POSIX
 2834          * semantics, and not to free BSD-style file locks.
 2835          * If the descriptor was in a message, POSIX-style locks
 2836          * aren't passed with the descriptor, and the thread pointer
 2837          * will be NULL.  Callers should be careful only to pass a
 2838          * NULL thread pointer when there really is no owning
 2839          * context that might have locks, or the locks will be
 2840          * leaked.
 2841          */
 2842         if (fp->f_type == DTYPE_VNODE) {
 2843                 vp = fp->f_vnode;
 2844                 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
 2845                         lf.l_whence = SEEK_SET;
 2846                         lf.l_start = 0;
 2847                         lf.l_len = 0;
 2848                         lf.l_type = F_UNLCK;
 2849                         (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
 2850                             F_UNLCK, &lf, F_POSIX);
 2851                 }
 2852                 fdtol = td->td_proc->p_fdtol;
 2853                 if (fdtol != NULL) {
 2854                         /*
 2855                          * Handle special case where file descriptor table is
 2856                          * shared between multiple process leaders.
 2857                          */
 2858                         fdp = td->td_proc->p_fd;
 2859                         FILEDESC_XLOCK(fdp);
 2860                         for (fdtol = fdtol->fdl_next;
 2861                             fdtol != td->td_proc->p_fdtol;
 2862                             fdtol = fdtol->fdl_next) {
 2863                                 if ((fdtol->fdl_leader->p_flag &
 2864                                     P_ADVLOCK) == 0)
 2865                                         continue;
 2866                                 fdtol->fdl_holdcount++;
 2867                                 FILEDESC_XUNLOCK(fdp);
 2868                                 lf.l_whence = SEEK_SET;
 2869                                 lf.l_start = 0;
 2870                                 lf.l_len = 0;
 2871                                 lf.l_type = F_UNLCK;
 2872                                 vp = fp->f_vnode;
 2873                                 (void) VOP_ADVLOCK(vp,
 2874                                     (caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
 2875                                     F_POSIX);
 2876                                 FILEDESC_XLOCK(fdp);
 2877                                 fdtol->fdl_holdcount--;
 2878                                 if (fdtol->fdl_holdcount == 0 &&
 2879                                     fdtol->fdl_wakeup != 0) {
 2880                                         fdtol->fdl_wakeup = 0;
 2881                                         wakeup(fdtol);
 2882                                 }
 2883                         }
 2884                         FILEDESC_XUNLOCK(fdp);
 2885                 }
 2886         }
 2887         return (fdrop_close(fp, td));
 2888 }
 2889 
 2890 /*
 2891  * Hack for file descriptor passing code.
 2892  */
 2893 void
 2894 closef_nothread(struct file *fp)
 2895 {
 2896 
 2897         fdrop(fp, NULL);
 2898 }
 2899 
 2900 /*
 2901  * Initialize the file pointer with the specified properties.
 2902  *
 2903  * The ops are set with release semantics to be certain that the flags, type,
 2904  * and data are visible when ops is.  This is to prevent ops methods from being
 2905  * called with bad data.
 2906  */
 2907 void
 2908 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
 2909 {
 2910         fp->f_data = data;
 2911         fp->f_flag = flag;
 2912         fp->f_type = type;
 2913         atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
 2914 }
 2915 
 2916 void
 2917 finit_vnode(struct file *fp, u_int flag, void *data, struct fileops *ops)
 2918 {
 2919         fp->f_seqcount[UIO_READ] = 1;
 2920         fp->f_seqcount[UIO_WRITE] = 1;
 2921         finit(fp, (flag & FMASK) | (fp->f_flag & FHASLOCK), DTYPE_VNODE,
 2922             data, ops);
 2923 }
 2924 
 2925 int
 2926 fget_cap_locked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
 2927     struct file **fpp, struct filecaps *havecapsp)
 2928 {
 2929         struct filedescent *fde;
 2930         int error;
 2931 
 2932         FILEDESC_LOCK_ASSERT(fdp);
 2933 
 2934         fde = fdeget_locked(fdp, fd);
 2935         if (fde == NULL) {
 2936                 error = EBADF;
 2937                 goto out;
 2938         }
 2939 
 2940 #ifdef CAPABILITIES
 2941         error = cap_check(cap_rights_fde_inline(fde), needrightsp);
 2942         if (error != 0)
 2943                 goto out;
 2944 #endif
 2945 
 2946         if (havecapsp != NULL)
 2947                 filecaps_copy(&fde->fde_caps, havecapsp, true);
 2948 
 2949         *fpp = fde->fde_file;
 2950 
 2951         error = 0;
 2952 out:
 2953         return (error);
 2954 }
 2955 
 2956 int
 2957 fget_cap(struct thread *td, int fd, cap_rights_t *needrightsp,
 2958     struct file **fpp, struct filecaps *havecapsp)
 2959 {
 2960         struct filedesc *fdp = td->td_proc->p_fd;
 2961         int error;
 2962 #ifndef CAPABILITIES
 2963         error = fget_unlocked(fdp, fd, needrightsp, fpp);
 2964         if (havecapsp != NULL && error == 0)
 2965                 filecaps_fill(havecapsp);
 2966 #else
 2967         struct file *fp;
 2968         seqc_t seq;
 2969 
 2970         *fpp = NULL;
 2971         for (;;) {
 2972                 error = fget_unlocked_seq(fdp, fd, needrightsp, &fp, &seq);
 2973                 if (error != 0)
 2974                         return (error);
 2975 
 2976                 if (havecapsp != NULL) {
 2977                         if (!filecaps_copy(&fdp->fd_ofiles[fd].fde_caps,
 2978                             havecapsp, false)) {
 2979                                 fdrop(fp, td);
 2980                                 goto get_locked;
 2981                         }
 2982                 }
 2983 
 2984                 if (!fd_modified(fdp, fd, seq))
 2985                         break;
 2986                 fdrop(fp, td);
 2987         }
 2988 
 2989         *fpp = fp;
 2990         return (0);
 2991 
 2992 get_locked:
 2993         FILEDESC_SLOCK(fdp);
 2994         error = fget_cap_locked(fdp, fd, needrightsp, fpp, havecapsp);
 2995         if (error == 0 && !fhold(*fpp))
 2996                 error = EBADF;
 2997         FILEDESC_SUNLOCK(fdp);
 2998 #endif
 2999         return (error);
 3000 }
 3001 
 3002 #ifdef CAPABILITIES
 3003 int
 3004 fgetvp_lookup_smr(int fd, struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
 3005 {
 3006         const struct filedescent *fde;
 3007         const struct fdescenttbl *fdt;
 3008         struct filedesc *fdp;
 3009         struct file *fp;
 3010         struct vnode *vp;
 3011         const cap_rights_t *haverights;
 3012         cap_rights_t rights;
 3013         seqc_t seq;
 3014 
 3015         VFS_SMR_ASSERT_ENTERED();
 3016 
 3017         rights = *ndp->ni_rightsneeded;
 3018         cap_rights_set_one(&rights, CAP_LOOKUP);
 3019 
 3020         fdp = curproc->p_fd;
 3021         fdt = fdp->fd_files;
 3022         if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
 3023                 return (EBADF);
 3024         seq = seqc_read_notmodify(fd_seqc(fdt, fd));
 3025         fde = &fdt->fdt_ofiles[fd];
 3026         haverights = cap_rights_fde_inline(fde);
 3027         fp = fde->fde_file;
 3028         if (__predict_false(fp == NULL))
 3029                 return (EAGAIN);
 3030         if (__predict_false(cap_check_inline_transient(haverights, &rights)))
 3031                 return (EAGAIN);
 3032         *fsearch = ((fp->f_flag & FSEARCH) != 0);
 3033         vp = fp->f_vnode;
 3034         if (__predict_false(vp == NULL || vp->v_type != VDIR)) {
 3035                 return (EAGAIN);
 3036         }
 3037         if (!filecaps_copy(&fde->fde_caps, &ndp->ni_filecaps, false)) {
 3038                 return (EAGAIN);
 3039         }
 3040         /*
 3041          * Use an acquire barrier to force re-reading of fdt so it is
 3042          * refreshed for verification.
 3043          */
 3044         atomic_thread_fence_acq();
 3045         fdt = fdp->fd_files;
 3046         if (__predict_false(!seqc_consistent_nomb(fd_seqc(fdt, fd), seq)))
 3047                 return (EAGAIN);
 3048         /*
 3049          * If file descriptor doesn't have all rights,
 3050          * all lookups relative to it must also be
 3051          * strictly relative.
 3052          *
 3053          * Not yet supported by fast path.
 3054          */
 3055         CAP_ALL(&rights);
 3056         if (!cap_rights_contains(&ndp->ni_filecaps.fc_rights, &rights) ||
 3057             ndp->ni_filecaps.fc_fcntls != CAP_FCNTL_ALL ||
 3058             ndp->ni_filecaps.fc_nioctls != -1) {
 3059 #ifdef notyet
 3060                 ndp->ni_lcf |= NI_LCF_STRICTRELATIVE;
 3061 #else
 3062                 return (EAGAIN);
 3063 #endif
 3064         }
 3065         *vpp = vp;
 3066         return (0);
 3067 }
 3068 #else
 3069 int
 3070 fgetvp_lookup_smr(int fd, struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
 3071 {
 3072         const struct fdescenttbl *fdt;
 3073         struct filedesc *fdp;
 3074         struct file *fp;
 3075         struct vnode *vp;
 3076 
 3077         VFS_SMR_ASSERT_ENTERED();
 3078 
 3079         fdp = curproc->p_fd;
 3080         fdt = fdp->fd_files;
 3081         if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
 3082                 return (EBADF);
 3083         fp = fdt->fdt_ofiles[fd].fde_file;
 3084         if (__predict_false(fp == NULL))
 3085                 return (EAGAIN);
 3086         *fsearch = ((fp->f_flag & FSEARCH) != 0);
 3087         vp = fp->f_vnode;
 3088         if (__predict_false(vp == NULL || vp->v_type != VDIR)) {
 3089                 return (EAGAIN);
 3090         }
 3091         /*
 3092          * Use an acquire barrier to force re-reading of fdt so it is
 3093          * refreshed for verification.
 3094          */
 3095         atomic_thread_fence_acq();
 3096         fdt = fdp->fd_files;
 3097         if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
 3098                 return (EAGAIN);
 3099         filecaps_fill(&ndp->ni_filecaps);
 3100         *vpp = vp;
 3101         return (0);
 3102 }
 3103 #endif
 3104 
 3105 int
 3106 fget_unlocked_seq(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
 3107     struct file **fpp, seqc_t *seqp)
 3108 {
 3109 #ifdef CAPABILITIES
 3110         const struct filedescent *fde;
 3111 #endif
 3112         const struct fdescenttbl *fdt;
 3113         struct file *fp;
 3114 #ifdef CAPABILITIES
 3115         seqc_t seq;
 3116         cap_rights_t haverights;
 3117         int error;
 3118 #endif
 3119 
 3120         fdt = fdp->fd_files;
 3121         if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
 3122                 return (EBADF);
 3123         /*
 3124          * Fetch the descriptor locklessly.  We avoid fdrop() races by
 3125          * never raising a refcount above 0.  To accomplish this we have
 3126          * to use a cmpset loop rather than an atomic_add.  The descriptor
 3127          * must be re-verified once we acquire a reference to be certain
 3128          * that the identity is still correct and we did not lose a race
 3129          * due to preemption.
 3130          */
 3131         for (;;) {
 3132 #ifdef CAPABILITIES
 3133                 seq = seqc_read_notmodify(fd_seqc(fdt, fd));
 3134                 fde = &fdt->fdt_ofiles[fd];
 3135                 haverights = *cap_rights_fde_inline(fde);
 3136                 fp = fde->fde_file;
 3137                 if (!seqc_consistent(fd_seqc(fdt, fd), seq))
 3138                         continue;
 3139 #else
 3140                 fp = fdt->fdt_ofiles[fd].fde_file;
 3141 #endif
 3142                 if (fp == NULL)
 3143                         return (EBADF);
 3144 #ifdef CAPABILITIES
 3145                 error = cap_check_inline(&haverights, needrightsp);
 3146                 if (error != 0)
 3147                         return (error);
 3148 #endif
 3149                 if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count))) {
 3150                         /*
 3151                          * Force a reload. Other thread could reallocate the
 3152                          * table before this fd was closed, so it is possible
 3153                          * that there is a stale fp pointer in cached version.
 3154                          */
 3155                         fdt = atomic_load_ptr(&fdp->fd_files);
 3156                         continue;
 3157                 }
 3158                 /*
 3159                  * Use an acquire barrier to force re-reading of fdt so it is
 3160                  * refreshed for verification.
 3161                  */
 3162                 atomic_thread_fence_acq();
 3163                 fdt = fdp->fd_files;
 3164 #ifdef  CAPABILITIES
 3165                 if (seqc_consistent_nomb(fd_seqc(fdt, fd), seq))
 3166 #else
 3167                 if (fp == fdt->fdt_ofiles[fd].fde_file)
 3168 #endif
 3169                         break;
 3170                 fdrop(fp, curthread);
 3171         }
 3172         *fpp = fp;
 3173         if (seqp != NULL) {
 3174 #ifdef CAPABILITIES
 3175                 *seqp = seq;
 3176 #endif
 3177         }
 3178         return (0);
 3179 }
 3180 
 3181 /*
 3182  * See the comments in fget_unlocked_seq for an explanation of how this works.
 3183  *
 3184  * This is a simplified variant which bails out to the aforementioned routine
 3185  * if anything goes wrong. In practice this only happens when userspace is
 3186  * racing with itself.
 3187  */
 3188 int
 3189 fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
 3190     struct file **fpp)
 3191 {
 3192 #ifdef CAPABILITIES
 3193         const struct filedescent *fde;
 3194 #endif
 3195         const struct fdescenttbl *fdt;
 3196         struct file *fp;
 3197 #ifdef CAPABILITIES
 3198         seqc_t seq;
 3199         const cap_rights_t *haverights;
 3200 #endif
 3201 
 3202         fdt = fdp->fd_files;
 3203         if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
 3204                 return (EBADF);
 3205 #ifdef CAPABILITIES
 3206         seq = seqc_read_notmodify(fd_seqc(fdt, fd));
 3207         fde = &fdt->fdt_ofiles[fd];
 3208         haverights = cap_rights_fde_inline(fde);
 3209         fp = fde->fde_file;
 3210 #else
 3211         fp = fdt->fdt_ofiles[fd].fde_file;
 3212 #endif
 3213         if (__predict_false(fp == NULL))
 3214                 goto out_fallback;
 3215 #ifdef CAPABILITIES
 3216         if (__predict_false(cap_check_inline_transient(haverights, needrightsp)))
 3217                 goto out_fallback;
 3218 #endif
 3219         if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count)))
 3220                 goto out_fallback;
 3221 
 3222         /*
 3223          * Use an acquire barrier to force re-reading of fdt so it is
 3224          * refreshed for verification.
 3225          */
 3226         atomic_thread_fence_acq();
 3227         fdt = fdp->fd_files;
 3228 #ifdef  CAPABILITIES
 3229         if (__predict_false(!seqc_consistent_nomb(fd_seqc(fdt, fd), seq)))
 3230 #else
 3231         if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
 3232 #endif
 3233                 goto out_fdrop;
 3234         *fpp = fp;
 3235         return (0);
 3236 out_fdrop:
 3237         fdrop(fp, curthread);
 3238 out_fallback:
 3239         return (fget_unlocked_seq(fdp, fd, needrightsp, fpp, NULL));
 3240 }
 3241 
 3242 /*
 3243  * Translate fd -> file when the caller guarantees the file descriptor table
 3244  * can't be changed by others.
 3245  *
 3246  * Note this does not mean the file object itself is only visible to the caller,
 3247  * merely that it wont disappear without having to be referenced.
 3248  *
 3249  * Must be paired with fput_only_user.
 3250  */
 3251 #ifdef  CAPABILITIES
 3252 int
 3253 fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
 3254     struct file **fpp)
 3255 {
 3256         const struct filedescent *fde;
 3257         const struct fdescenttbl *fdt;
 3258         const cap_rights_t *haverights;
 3259         struct file *fp;
 3260         int error;
 3261 
 3262         MPASS(FILEDESC_IS_ONLY_USER(fdp));
 3263 
 3264         if (__predict_false(fd >= fdp->fd_nfiles))
 3265                 return (EBADF);
 3266 
 3267         fdt = fdp->fd_files;
 3268         fde = &fdt->fdt_ofiles[fd];
 3269         fp = fde->fde_file;
 3270         if (__predict_false(fp == NULL))
 3271                 return (EBADF);
 3272         MPASS(refcount_load(&fp->f_count) > 0);
 3273         haverights = cap_rights_fde_inline(fde);
 3274         error = cap_check_inline(haverights, needrightsp);
 3275         if (__predict_false(error != 0))
 3276                 return (error);
 3277         *fpp = fp;
 3278         return (0);
 3279 }
 3280 #else
 3281 int
 3282 fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
 3283     struct file **fpp)
 3284 {
 3285         struct file *fp;
 3286 
 3287         MPASS(FILEDESC_IS_ONLY_USER(fdp));
 3288 
 3289         if (__predict_false(fd >= fdp->fd_nfiles))
 3290                 return (EBADF);
 3291 
 3292         fp = fdp->fd_ofiles[fd].fde_file;
 3293         if (__predict_false(fp == NULL))
 3294                 return (EBADF);
 3295 
 3296         MPASS(refcount_load(&fp->f_count) > 0);
 3297         *fpp = fp;
 3298         return (0);
 3299 }
 3300 #endif
 3301 
 3302 /*
 3303  * Extract the file pointer associated with the specified descriptor for the
 3304  * current user process.
 3305  *
 3306  * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
 3307  * returned.
 3308  *
 3309  * File's rights will be checked against the capability rights mask.
 3310  *
 3311  * If an error occurred the non-zero error is returned and *fpp is set to
 3312  * NULL.  Otherwise *fpp is held and set and zero is returned.  Caller is
 3313  * responsible for fdrop().
 3314  */
 3315 static __inline int
 3316 _fget(struct thread *td, int fd, struct file **fpp, int flags,
 3317     cap_rights_t *needrightsp)
 3318 {
 3319         struct filedesc *fdp;
 3320         struct file *fp;
 3321         int error;
 3322 
 3323         *fpp = NULL;
 3324         fdp = td->td_proc->p_fd;
 3325         error = fget_unlocked(fdp, fd, needrightsp, &fp);
 3326         if (__predict_false(error != 0))
 3327                 return (error);
 3328         if (__predict_false(fp->f_ops == &badfileops)) {
 3329                 fdrop(fp, td);
 3330                 return (EBADF);
 3331         }
 3332 
 3333         /*
 3334          * FREAD and FWRITE failure return EBADF as per POSIX.
 3335          */
 3336         error = 0;
 3337         switch (flags) {
 3338         case FREAD:
 3339         case FWRITE:
 3340                 if ((fp->f_flag & flags) == 0)
 3341                         error = EBADF;
 3342                 break;
 3343         case FEXEC:
 3344                 if (fp->f_ops != &path_fileops &&
 3345                     ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
 3346                     (fp->f_flag & FWRITE) != 0))
 3347                         error = EBADF;
 3348                 break;
 3349         case 0:
 3350                 break;
 3351         default:
 3352                 KASSERT(0, ("wrong flags"));
 3353         }
 3354 
 3355         if (error != 0) {
 3356                 fdrop(fp, td);
 3357                 return (error);
 3358         }
 3359 
 3360         *fpp = fp;
 3361         return (0);
 3362 }
 3363 
 3364 int
 3365 fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
 3366 {
 3367 
 3368         return (_fget(td, fd, fpp, 0, rightsp));
 3369 }
 3370 
 3371 int
 3372 fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, vm_prot_t *maxprotp,
 3373     struct file **fpp)
 3374 {
 3375         int error;
 3376 #ifndef CAPABILITIES
 3377         error = _fget(td, fd, fpp, 0, rightsp);
 3378         if (maxprotp != NULL)
 3379                 *maxprotp = VM_PROT_ALL;
 3380         return (error);
 3381 #else
 3382         cap_rights_t fdrights;
 3383         struct filedesc *fdp;
 3384         struct file *fp;
 3385         seqc_t seq;
 3386 
 3387         *fpp = NULL;
 3388         fdp = td->td_proc->p_fd;
 3389         MPASS(cap_rights_is_set(rightsp, CAP_MMAP));
 3390         for (;;) {
 3391                 error = fget_unlocked_seq(fdp, fd, rightsp, &fp, &seq);
 3392                 if (__predict_false(error != 0))
 3393                         return (error);
 3394                 if (__predict_false(fp->f_ops == &badfileops)) {
 3395                         fdrop(fp, td);
 3396                         return (EBADF);
 3397                 }
 3398                 if (maxprotp != NULL)
 3399                         fdrights = *cap_rights(fdp, fd);
 3400                 if (!fd_modified(fdp, fd, seq))
 3401                         break;
 3402                 fdrop(fp, td);
 3403         }
 3404 
 3405         /*
 3406          * If requested, convert capability rights to access flags.
 3407          */
 3408         if (maxprotp != NULL)
 3409                 *maxprotp = cap_rights_to_vmprot(&fdrights);
 3410         *fpp = fp;
 3411         return (0);
 3412 #endif
 3413 }
 3414 
 3415 int
 3416 fget_read(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
 3417 {
 3418 
 3419         return (_fget(td, fd, fpp, FREAD, rightsp));
 3420 }
 3421 
 3422 int
 3423 fget_write(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
 3424 {
 3425 
 3426         return (_fget(td, fd, fpp, FWRITE, rightsp));
 3427 }
 3428 
 3429 int
 3430 fget_fcntl(struct thread *td, int fd, cap_rights_t *rightsp, int needfcntl,
 3431     struct file **fpp)
 3432 {
 3433         struct filedesc *fdp = td->td_proc->p_fd;
 3434 #ifndef CAPABILITIES
 3435         return (fget_unlocked(fdp, fd, rightsp, fpp));
 3436 #else
 3437         struct file *fp;
 3438         int error;
 3439         seqc_t seq;
 3440 
 3441         *fpp = NULL;
 3442         MPASS(cap_rights_is_set(rightsp, CAP_FCNTL));
 3443         for (;;) {
 3444                 error = fget_unlocked_seq(fdp, fd, rightsp, &fp, &seq);
 3445                 if (error != 0)
 3446                         return (error);
 3447                 error = cap_fcntl_check(fdp, fd, needfcntl);
 3448                 if (!fd_modified(fdp, fd, seq))
 3449                         break;
 3450                 fdrop(fp, td);
 3451         }
 3452         if (error != 0) {
 3453                 fdrop(fp, td);
 3454                 return (error);
 3455         }
 3456         *fpp = fp;
 3457         return (0);
 3458 #endif
 3459 }
 3460 
 3461 /*
 3462  * Like fget() but loads the underlying vnode, or returns an error if the
 3463  * descriptor does not represent a vnode.  Note that pipes use vnodes but
 3464  * never have VM objects.  The returned vnode will be vref()'d.
 3465  *
 3466  * XXX: what about the unused flags ?
 3467  */
 3468 static __inline int
 3469 _fgetvp(struct thread *td, int fd, int flags, cap_rights_t *needrightsp,
 3470     struct vnode **vpp)
 3471 {
 3472         struct file *fp;
 3473         int error;
 3474 
 3475         *vpp = NULL;
 3476         error = _fget(td, fd, &fp, flags, needrightsp);
 3477         if (error != 0)
 3478                 return (error);
 3479         if (fp->f_vnode == NULL) {
 3480                 error = EINVAL;
 3481         } else {
 3482                 *vpp = fp->f_vnode;
 3483                 vref(*vpp);
 3484         }
 3485         fdrop(fp, td);
 3486 
 3487         return (error);
 3488 }
 3489 
 3490 int
 3491 fgetvp(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
 3492 {
 3493 
 3494         return (_fgetvp(td, fd, 0, rightsp, vpp));
 3495 }
 3496 
 3497 int
 3498 fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
 3499     struct filecaps *havecaps, struct vnode **vpp)
 3500 {
 3501         struct filecaps caps;
 3502         struct file *fp;
 3503         int error;
 3504 
 3505         error = fget_cap(td, fd, needrightsp, &fp, &caps);
 3506         if (error != 0)
 3507                 return (error);
 3508         if (fp->f_ops == &badfileops) {
 3509                 error = EBADF;
 3510                 goto out;
 3511         }
 3512         if (fp->f_vnode == NULL) {
 3513                 error = EINVAL;
 3514                 goto out;
 3515         }
 3516 
 3517         *havecaps = caps;
 3518         *vpp = fp->f_vnode;
 3519         vref(*vpp);
 3520         fdrop(fp, td);
 3521 
 3522         return (0);
 3523 out:
 3524         filecaps_free(&caps);
 3525         fdrop(fp, td);
 3526         return (error);
 3527 }
 3528 
 3529 int
 3530 fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
 3531 {
 3532 
 3533         return (_fgetvp(td, fd, FREAD, rightsp, vpp));
 3534 }
 3535 
 3536 int
 3537 fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
 3538 {
 3539 
 3540         return (_fgetvp(td, fd, FEXEC, rightsp, vpp));
 3541 }
 3542 
 3543 #ifdef notyet
 3544 int
 3545 fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
 3546     struct vnode **vpp)
 3547 {
 3548 
 3549         return (_fgetvp(td, fd, FWRITE, rightsp, vpp));
 3550 }
 3551 #endif
 3552 
 3553 /*
 3554  * Handle the last reference to a file being closed.
 3555  *
 3556  * Without the noinline attribute clang keeps inlining the func thorough this
 3557  * file when fdrop is used.
 3558  */
 3559 int __noinline
 3560 _fdrop(struct file *fp, struct thread *td)
 3561 {
 3562         int error;
 3563 #ifdef INVARIANTS
 3564         int count;
 3565 
 3566         count = refcount_load(&fp->f_count);
 3567         if (count != 0)
 3568                 panic("fdrop: fp %p count %d", fp, count);
 3569 #endif
 3570         error = fo_close(fp, td);
 3571         atomic_subtract_int(&openfiles, 1);
 3572         crfree(fp->f_cred);
 3573         free(fp->f_advice, M_FADVISE);
 3574         uma_zfree(file_zone, fp);
 3575 
 3576         return (error);
 3577 }
 3578 
 3579 /*
 3580  * Apply an advisory lock on a file descriptor.
 3581  *
 3582  * Just attempt to get a record lock of the requested type on the entire file
 3583  * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
 3584  */
 3585 #ifndef _SYS_SYSPROTO_H_
 3586 struct flock_args {
 3587         int     fd;
 3588         int     how;
 3589 };
 3590 #endif
 3591 /* ARGSUSED */
 3592 int
 3593 sys_flock(struct thread *td, struct flock_args *uap)
 3594 {
 3595         struct file *fp;
 3596         struct vnode *vp;
 3597         struct flock lf;
 3598         int error;
 3599 
 3600         error = fget(td, uap->fd, &cap_flock_rights, &fp);
 3601         if (error != 0)
 3602                 return (error);
 3603         error = EOPNOTSUPP;
 3604         if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
 3605                 goto done;
 3606         }
 3607         if (fp->f_ops == &path_fileops) {
 3608                 goto done;
 3609         }
 3610 
 3611         error = 0;
 3612         vp = fp->f_vnode;
 3613         lf.l_whence = SEEK_SET;
 3614         lf.l_start = 0;
 3615         lf.l_len = 0;
 3616         if (uap->how & LOCK_UN) {
 3617                 lf.l_type = F_UNLCK;
 3618                 atomic_clear_int(&fp->f_flag, FHASLOCK);
 3619                 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
 3620                 goto done;
 3621         }
 3622         if (uap->how & LOCK_EX)
 3623                 lf.l_type = F_WRLCK;
 3624         else if (uap->how & LOCK_SH)
 3625                 lf.l_type = F_RDLCK;
 3626         else {
 3627                 error = EBADF;
 3628                 goto done;
 3629         }
 3630         atomic_set_int(&fp->f_flag, FHASLOCK);
 3631         error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
 3632             (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
 3633 done:
 3634         fdrop(fp, td);
 3635         return (error);
 3636 }
 3637 /*
 3638  * Duplicate the specified descriptor to a free descriptor.
 3639  */
 3640 int
 3641 dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
 3642     int openerror, int *indxp)
 3643 {
 3644         struct filedescent *newfde, *oldfde;
 3645         struct file *fp;
 3646         u_long *ioctls;
 3647         int error, indx;
 3648 
 3649         KASSERT(openerror == ENODEV || openerror == ENXIO,
 3650             ("unexpected error %d in %s", openerror, __func__));
 3651 
 3652         /*
 3653          * If the to-be-dup'd fd number is greater than the allowed number
 3654          * of file descriptors, or the fd to be dup'd has already been
 3655          * closed, then reject.
 3656          */
 3657         FILEDESC_XLOCK(fdp);
 3658         if ((fp = fget_locked(fdp, dfd)) == NULL) {
 3659                 FILEDESC_XUNLOCK(fdp);
 3660                 return (EBADF);
 3661         }
 3662 
 3663         error = fdalloc(td, 0, &indx);
 3664         if (error != 0) {
 3665                 FILEDESC_XUNLOCK(fdp);
 3666                 return (error);
 3667         }
 3668 
 3669         /*
 3670          * There are two cases of interest here.
 3671          *
 3672          * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
 3673          *
 3674          * For ENXIO steal away the file structure from (dfd) and store it in
 3675          * (indx).  (dfd) is effectively closed by this operation.
 3676          */
 3677         switch (openerror) {
 3678         case ENODEV:
 3679                 /*
 3680                  * Check that the mode the file is being opened for is a
 3681                  * subset of the mode of the existing descriptor.
 3682                  */
 3683                 if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
 3684                         fdunused(fdp, indx);
 3685                         FILEDESC_XUNLOCK(fdp);
 3686                         return (EACCES);
 3687                 }
 3688                 if (!fhold(fp)) {
 3689                         fdunused(fdp, indx);
 3690                         FILEDESC_XUNLOCK(fdp);
 3691                         return (EBADF);
 3692                 }
 3693                 newfde = &fdp->fd_ofiles[indx];
 3694                 oldfde = &fdp->fd_ofiles[dfd];
 3695                 ioctls = filecaps_copy_prep(&oldfde->fde_caps);
 3696 #ifdef CAPABILITIES
 3697                 seqc_write_begin(&newfde->fde_seqc);
 3698 #endif
 3699                 memcpy(newfde, oldfde, fde_change_size);
 3700                 filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
 3701                     ioctls);
 3702 #ifdef CAPABILITIES
 3703                 seqc_write_end(&newfde->fde_seqc);
 3704 #endif
 3705                 break;
 3706         case ENXIO:
 3707                 /*
 3708                  * Steal away the file pointer from dfd and stuff it into indx.
 3709                  */
 3710                 newfde = &fdp->fd_ofiles[indx];
 3711                 oldfde = &fdp->fd_ofiles[dfd];
 3712 #ifdef CAPABILITIES
 3713                 seqc_write_begin(&newfde->fde_seqc);
 3714 #endif
 3715                 memcpy(newfde, oldfde, fde_change_size);
 3716                 oldfde->fde_file = NULL;
 3717                 fdunused(fdp, dfd);
 3718 #ifdef CAPABILITIES
 3719                 seqc_write_end(&newfde->fde_seqc);
 3720 #endif
 3721                 break;
 3722         }
 3723         FILEDESC_XUNLOCK(fdp);
 3724         *indxp = indx;
 3725         return (0);
 3726 }
 3727 
 3728 /*
 3729  * This sysctl determines if we will allow a process to chroot(2) if it
 3730  * has a directory open:
 3731  *      0: disallowed for all processes.
 3732  *      1: allowed for processes that were not already chroot(2)'ed.
 3733  *      2: allowed for all processes.
 3734  */
 3735 
 3736 static int chroot_allow_open_directories = 1;
 3737 
 3738 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
 3739     &chroot_allow_open_directories, 0,
 3740     "Allow a process to chroot(2) if it has a directory open");
 3741 
 3742 /*
 3743  * Helper function for raised chroot(2) security function:  Refuse if
 3744  * any filedescriptors are open directories.
 3745  */
 3746 static int
 3747 chroot_refuse_vdir_fds(struct filedesc *fdp)
 3748 {
 3749         struct vnode *vp;
 3750         struct file *fp;
 3751         int fd, lastfile;
 3752 
 3753         FILEDESC_LOCK_ASSERT(fdp);
 3754 
 3755         lastfile = fdlastfile(fdp);
 3756         for (fd = 0; fd <= lastfile; fd++) {
 3757                 fp = fget_locked(fdp, fd);
 3758                 if (fp == NULL)
 3759                         continue;
 3760                 if (fp->f_type == DTYPE_VNODE) {
 3761                         vp = fp->f_vnode;
 3762                         if (vp->v_type == VDIR)
 3763                                 return (EPERM);
 3764                 }
 3765         }
 3766         return (0);
 3767 }
 3768 
 3769 static void
 3770 pwd_fill(struct pwd *oldpwd, struct pwd *newpwd)
 3771 {
 3772 
 3773         if (newpwd->pwd_cdir == NULL && oldpwd->pwd_cdir != NULL) {
 3774                 vrefact(oldpwd->pwd_cdir);
 3775                 newpwd->pwd_cdir = oldpwd->pwd_cdir;
 3776         }
 3777 
 3778         if (newpwd->pwd_rdir == NULL && oldpwd->pwd_rdir != NULL) {
 3779                 vrefact(oldpwd->pwd_rdir);
 3780                 newpwd->pwd_rdir = oldpwd->pwd_rdir;
 3781         }
 3782 
 3783         if (newpwd->pwd_jdir == NULL && oldpwd->pwd_jdir != NULL) {
 3784                 vrefact(oldpwd->pwd_jdir);
 3785                 newpwd->pwd_jdir = oldpwd->pwd_jdir;
 3786         }
 3787 }
 3788 
 3789 struct pwd *
 3790 pwd_hold_pwddesc(struct pwddesc *pdp)
 3791 {
 3792         struct pwd *pwd;
 3793 
 3794         PWDDESC_ASSERT_XLOCKED(pdp);
 3795         pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 3796         if (pwd != NULL)
 3797                 refcount_acquire(&pwd->pwd_refcount);
 3798         return (pwd);
 3799 }
 3800 
 3801 bool
 3802 pwd_hold_smr(struct pwd *pwd)
 3803 {
 3804 
 3805         MPASS(pwd != NULL);
 3806         if (__predict_true(refcount_acquire_if_not_zero(&pwd->pwd_refcount))) {
 3807                 return (true);
 3808         }
 3809         return (false);
 3810 }
 3811 
 3812 struct pwd *
 3813 pwd_hold(struct thread *td)
 3814 {
 3815         struct pwddesc *pdp;
 3816         struct pwd *pwd;
 3817 
 3818         pdp = td->td_proc->p_pd;
 3819 
 3820         vfs_smr_enter();
 3821         pwd = vfs_smr_entered_load(&pdp->pd_pwd);
 3822         if (pwd_hold_smr(pwd)) {
 3823                 vfs_smr_exit();
 3824                 return (pwd);
 3825         }
 3826         vfs_smr_exit();
 3827         PWDDESC_XLOCK(pdp);
 3828         pwd = pwd_hold_pwddesc(pdp);
 3829         MPASS(pwd != NULL);
 3830         PWDDESC_XUNLOCK(pdp);
 3831         return (pwd);
 3832 }
 3833 
 3834 struct pwd *
 3835 pwd_hold_proc(struct proc *p)
 3836 {
 3837         struct pwddesc *pdp;
 3838         struct pwd *pwd;
 3839 
 3840         PROC_ASSERT_HELD(p);
 3841         PROC_LOCK(p);
 3842         pdp = pdhold(p);
 3843         MPASS(pdp != NULL);
 3844         PROC_UNLOCK(p);
 3845 
 3846         PWDDESC_XLOCK(pdp);
 3847         pwd = pwd_hold_pwddesc(pdp);
 3848         MPASS(pwd != NULL);
 3849         PWDDESC_XUNLOCK(pdp);
 3850         pddrop(pdp);
 3851         return (pwd);
 3852 }
 3853 
 3854 static struct pwd *
 3855 pwd_alloc(void)
 3856 {
 3857         struct pwd *pwd;
 3858 
 3859         pwd = uma_zalloc_smr(pwd_zone, M_WAITOK);
 3860         bzero(pwd, sizeof(*pwd));
 3861         refcount_init(&pwd->pwd_refcount, 1);
 3862         return (pwd);
 3863 }
 3864 
 3865 void
 3866 pwd_drop(struct pwd *pwd)
 3867 {
 3868 
 3869         if (!refcount_release(&pwd->pwd_refcount))
 3870                 return;
 3871 
 3872         if (pwd->pwd_cdir != NULL)
 3873                 vrele(pwd->pwd_cdir);
 3874         if (pwd->pwd_rdir != NULL)
 3875                 vrele(pwd->pwd_rdir);
 3876         if (pwd->pwd_jdir != NULL)
 3877                 vrele(pwd->pwd_jdir);
 3878         uma_zfree_smr(pwd_zone, pwd);
 3879 }
 3880 
 3881 /*
 3882 * The caller is responsible for invoking priv_check() and
 3883 * mac_vnode_check_chroot() to authorize this operation.
 3884 */
 3885 int
 3886 pwd_chroot(struct thread *td, struct vnode *vp)
 3887 {
 3888         struct pwddesc *pdp;
 3889         struct filedesc *fdp;
 3890         struct pwd *newpwd, *oldpwd;
 3891         int error;
 3892 
 3893         fdp = td->td_proc->p_fd;
 3894         pdp = td->td_proc->p_pd;
 3895         newpwd = pwd_alloc();
 3896         FILEDESC_SLOCK(fdp);
 3897         PWDDESC_XLOCK(pdp);
 3898         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 3899         if (chroot_allow_open_directories == 0 ||
 3900             (chroot_allow_open_directories == 1 &&
 3901             oldpwd->pwd_rdir != rootvnode)) {
 3902                 error = chroot_refuse_vdir_fds(fdp);
 3903                 FILEDESC_SUNLOCK(fdp);
 3904                 if (error != 0) {
 3905                         PWDDESC_XUNLOCK(pdp);
 3906                         pwd_drop(newpwd);
 3907                         return (error);
 3908                 }
 3909         } else {
 3910                 FILEDESC_SUNLOCK(fdp);
 3911         }
 3912 
 3913         vrefact(vp);
 3914         newpwd->pwd_rdir = vp;
 3915         if (oldpwd->pwd_jdir == NULL) {
 3916                 vrefact(vp);
 3917                 newpwd->pwd_jdir = vp;
 3918         }
 3919         pwd_fill(oldpwd, newpwd);
 3920         pwd_set(pdp, newpwd);
 3921         PWDDESC_XUNLOCK(pdp);
 3922         pwd_drop(oldpwd);
 3923         return (0);
 3924 }
 3925 
 3926 void
 3927 pwd_chdir(struct thread *td, struct vnode *vp)
 3928 {
 3929         struct pwddesc *pdp;
 3930         struct pwd *newpwd, *oldpwd;
 3931 
 3932         VNPASS(vp->v_usecount > 0, vp);
 3933 
 3934         newpwd = pwd_alloc();
 3935         pdp = td->td_proc->p_pd;
 3936         PWDDESC_XLOCK(pdp);
 3937         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 3938         newpwd->pwd_cdir = vp;
 3939         pwd_fill(oldpwd, newpwd);
 3940         pwd_set(pdp, newpwd);
 3941         PWDDESC_XUNLOCK(pdp);
 3942         pwd_drop(oldpwd);
 3943 }
 3944 
 3945 /*
 3946  * jail_attach(2) changes both root and working directories.
 3947  */
 3948 int
 3949 pwd_chroot_chdir(struct thread *td, struct vnode *vp)
 3950 {
 3951         struct pwddesc *pdp;
 3952         struct filedesc *fdp;
 3953         struct pwd *newpwd, *oldpwd;
 3954         int error;
 3955 
 3956         fdp = td->td_proc->p_fd;
 3957         pdp = td->td_proc->p_pd;
 3958         newpwd = pwd_alloc();
 3959         FILEDESC_SLOCK(fdp);
 3960         PWDDESC_XLOCK(pdp);
 3961         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 3962         error = chroot_refuse_vdir_fds(fdp);
 3963         FILEDESC_SUNLOCK(fdp);
 3964         if (error != 0) {
 3965                 PWDDESC_XUNLOCK(pdp);
 3966                 pwd_drop(newpwd);
 3967                 return (error);
 3968         }
 3969 
 3970         vrefact(vp);
 3971         newpwd->pwd_rdir = vp;
 3972         vrefact(vp);
 3973         newpwd->pwd_cdir = vp;
 3974         if (oldpwd->pwd_jdir == NULL) {
 3975                 vrefact(vp);
 3976                 newpwd->pwd_jdir = vp;
 3977         }
 3978         pwd_fill(oldpwd, newpwd);
 3979         pwd_set(pdp, newpwd);
 3980         PWDDESC_XUNLOCK(pdp);
 3981         pwd_drop(oldpwd);
 3982         return (0);
 3983 }
 3984 
 3985 void
 3986 pwd_ensure_dirs(void)
 3987 {
 3988         struct pwddesc *pdp;
 3989         struct pwd *oldpwd, *newpwd;
 3990 
 3991         pdp = curproc->p_pd;
 3992         PWDDESC_XLOCK(pdp);
 3993         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 3994         if (oldpwd->pwd_cdir != NULL && oldpwd->pwd_rdir != NULL) {
 3995                 PWDDESC_XUNLOCK(pdp);
 3996                 return;
 3997         }
 3998         PWDDESC_XUNLOCK(pdp);
 3999 
 4000         newpwd = pwd_alloc();
 4001         PWDDESC_XLOCK(pdp);
 4002         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 4003         pwd_fill(oldpwd, newpwd);
 4004         if (newpwd->pwd_cdir == NULL) {
 4005                 vrefact(rootvnode);
 4006                 newpwd->pwd_cdir = rootvnode;
 4007         }
 4008         if (newpwd->pwd_rdir == NULL) {
 4009                 vrefact(rootvnode);
 4010                 newpwd->pwd_rdir = rootvnode;
 4011         }
 4012         pwd_set(pdp, newpwd);
 4013         PWDDESC_XUNLOCK(pdp);
 4014         pwd_drop(oldpwd);
 4015 }
 4016 
 4017 void
 4018 pwd_set_rootvnode(void)
 4019 {
 4020         struct pwddesc *pdp;
 4021         struct pwd *oldpwd, *newpwd;
 4022 
 4023         pdp = curproc->p_pd;
 4024 
 4025         newpwd = pwd_alloc();
 4026         PWDDESC_XLOCK(pdp);
 4027         oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 4028         vrefact(rootvnode);
 4029         newpwd->pwd_cdir = rootvnode;
 4030         vrefact(rootvnode);
 4031         newpwd->pwd_rdir = rootvnode;
 4032         pwd_fill(oldpwd, newpwd);
 4033         pwd_set(pdp, newpwd);
 4034         PWDDESC_XUNLOCK(pdp);
 4035         pwd_drop(oldpwd);
 4036 }
 4037 
 4038 /*
 4039  * Scan all active processes and prisons to see if any of them have a current
 4040  * or root directory of `olddp'. If so, replace them with the new mount point.
 4041  */
 4042 void
 4043 mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
 4044 {
 4045         struct pwddesc *pdp;
 4046         struct pwd *newpwd, *oldpwd;
 4047         struct prison *pr;
 4048         struct proc *p;
 4049         int nrele;
 4050 
 4051         if (vrefcnt(olddp) == 1)
 4052                 return;
 4053         nrele = 0;
 4054         newpwd = pwd_alloc();
 4055         sx_slock(&allproc_lock);
 4056         FOREACH_PROC_IN_SYSTEM(p) {
 4057                 PROC_LOCK(p);
 4058                 pdp = pdhold(p);
 4059                 PROC_UNLOCK(p);
 4060                 if (pdp == NULL)
 4061                         continue;
 4062                 PWDDESC_XLOCK(pdp);
 4063                 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 4064                 if (oldpwd == NULL ||
 4065                     (oldpwd->pwd_cdir != olddp &&
 4066                     oldpwd->pwd_rdir != olddp &&
 4067                     oldpwd->pwd_jdir != olddp)) {
 4068                         PWDDESC_XUNLOCK(pdp);
 4069                         pddrop(pdp);
 4070                         continue;
 4071                 }
 4072                 if (oldpwd->pwd_cdir == olddp) {
 4073                         vrefact(newdp);
 4074                         newpwd->pwd_cdir = newdp;
 4075                 }
 4076                 if (oldpwd->pwd_rdir == olddp) {
 4077                         vrefact(newdp);
 4078                         newpwd->pwd_rdir = newdp;
 4079                 }
 4080                 if (oldpwd->pwd_jdir == olddp) {
 4081                         vrefact(newdp);
 4082                         newpwd->pwd_jdir = newdp;
 4083                 }
 4084                 pwd_fill(oldpwd, newpwd);
 4085                 pwd_set(pdp, newpwd);
 4086                 PWDDESC_XUNLOCK(pdp);
 4087                 pwd_drop(oldpwd);
 4088                 pddrop(pdp);
 4089                 newpwd = pwd_alloc();
 4090         }
 4091         sx_sunlock(&allproc_lock);
 4092         pwd_drop(newpwd);
 4093         if (rootvnode == olddp) {
 4094                 vrefact(newdp);
 4095                 rootvnode = newdp;
 4096                 nrele++;
 4097         }
 4098         mtx_lock(&prison0.pr_mtx);
 4099         if (prison0.pr_root == olddp) {
 4100                 vrefact(newdp);
 4101                 prison0.pr_root = newdp;
 4102                 nrele++;
 4103         }
 4104         mtx_unlock(&prison0.pr_mtx);
 4105         sx_slock(&allprison_lock);
 4106         TAILQ_FOREACH(pr, &allprison, pr_list) {
 4107                 mtx_lock(&pr->pr_mtx);
 4108                 if (pr->pr_root == olddp) {
 4109                         vrefact(newdp);
 4110                         pr->pr_root = newdp;
 4111                         nrele++;
 4112                 }
 4113                 mtx_unlock(&pr->pr_mtx);
 4114         }
 4115         sx_sunlock(&allprison_lock);
 4116         while (nrele--)
 4117                 vrele(olddp);
 4118 }
 4119 
 4120 struct filedesc_to_leader *
 4121 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
 4122 {
 4123         struct filedesc_to_leader *fdtol;
 4124 
 4125         fdtol = malloc(sizeof(struct filedesc_to_leader),
 4126             M_FILEDESC_TO_LEADER, M_WAITOK);
 4127         fdtol->fdl_refcount = 1;
 4128         fdtol->fdl_holdcount = 0;
 4129         fdtol->fdl_wakeup = 0;
 4130         fdtol->fdl_leader = leader;
 4131         if (old != NULL) {
 4132                 FILEDESC_XLOCK(fdp);
 4133                 fdtol->fdl_next = old->fdl_next;
 4134                 fdtol->fdl_prev = old;
 4135                 old->fdl_next = fdtol;
 4136                 fdtol->fdl_next->fdl_prev = fdtol;
 4137                 FILEDESC_XUNLOCK(fdp);
 4138         } else {
 4139                 fdtol->fdl_next = fdtol;
 4140                 fdtol->fdl_prev = fdtol;
 4141         }
 4142         return (fdtol);
 4143 }
 4144 
 4145 static int
 4146 sysctl_kern_proc_nfds(SYSCTL_HANDLER_ARGS)
 4147 {
 4148         NDSLOTTYPE *map;
 4149         struct filedesc *fdp;
 4150         u_int namelen;
 4151         int count, off, minoff;
 4152 
 4153         namelen = arg2;
 4154         if (namelen != 1)
 4155                 return (EINVAL);
 4156 
 4157         if (*(int *)arg1 != 0)
 4158                 return (EINVAL);
 4159 
 4160         fdp = curproc->p_fd;
 4161         count = 0;
 4162         FILEDESC_SLOCK(fdp);
 4163         map = fdp->fd_map;
 4164         off = NDSLOT(fdp->fd_nfiles - 1);
 4165         for (minoff = NDSLOT(0); off >= minoff; --off)
 4166                 count += bitcountl(map[off]);
 4167         FILEDESC_SUNLOCK(fdp);
 4168 
 4169         return (SYSCTL_OUT(req, &count, sizeof(count)));
 4170 }
 4171 
 4172 static SYSCTL_NODE(_kern_proc, KERN_PROC_NFDS, nfds,
 4173     CTLFLAG_RD|CTLFLAG_CAPRD|CTLFLAG_MPSAFE, sysctl_kern_proc_nfds,
 4174     "Number of open file descriptors");
 4175 
 4176 /*
 4177  * Get file structures globally.
 4178  */
 4179 static int
 4180 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
 4181 {
 4182         struct xfile xf;
 4183         struct filedesc *fdp;
 4184         struct file *fp;
 4185         struct proc *p;
 4186         int error, n, lastfile;
 4187 
 4188         error = sysctl_wire_old_buffer(req, 0);
 4189         if (error != 0)
 4190                 return (error);
 4191         if (req->oldptr == NULL) {
 4192                 n = 0;
 4193                 sx_slock(&allproc_lock);
 4194                 FOREACH_PROC_IN_SYSTEM(p) {
 4195                         PROC_LOCK(p);
 4196                         if (p->p_state == PRS_NEW) {
 4197                                 PROC_UNLOCK(p);
 4198                                 continue;
 4199                         }
 4200                         fdp = fdhold(p);
 4201                         PROC_UNLOCK(p);
 4202                         if (fdp == NULL)
 4203                                 continue;
 4204                         /* overestimates sparse tables. */
 4205                         n += fdp->fd_nfiles;
 4206                         fddrop(fdp);
 4207                 }
 4208                 sx_sunlock(&allproc_lock);
 4209                 return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
 4210         }
 4211         error = 0;
 4212         bzero(&xf, sizeof(xf));
 4213         xf.xf_size = sizeof(xf);
 4214         sx_slock(&allproc_lock);
 4215         FOREACH_PROC_IN_SYSTEM(p) {
 4216                 PROC_LOCK(p);
 4217                 if (p->p_state == PRS_NEW) {
 4218                         PROC_UNLOCK(p);
 4219                         continue;
 4220                 }
 4221                 if (p_cansee(req->td, p) != 0) {
 4222                         PROC_UNLOCK(p);
 4223                         continue;
 4224                 }
 4225                 xf.xf_pid = p->p_pid;
 4226                 xf.xf_uid = p->p_ucred->cr_uid;
 4227                 fdp = fdhold(p);
 4228                 PROC_UNLOCK(p);
 4229                 if (fdp == NULL)
 4230                         continue;
 4231                 FILEDESC_SLOCK(fdp);
 4232                 lastfile = fdlastfile(fdp);
 4233                 for (n = 0; refcount_load(&fdp->fd_refcnt) > 0 && n <= lastfile;
 4234                     n++) {
 4235                         if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
 4236                                 continue;
 4237                         xf.xf_fd = n;
 4238                         xf.xf_file = (uintptr_t)fp;
 4239                         xf.xf_data = (uintptr_t)fp->f_data;
 4240                         xf.xf_vnode = (uintptr_t)fp->f_vnode;
 4241                         xf.xf_type = (uintptr_t)fp->f_type;
 4242                         xf.xf_count = refcount_load(&fp->f_count);
 4243                         xf.xf_msgcount = 0;
 4244                         xf.xf_offset = foffset_get(fp);
 4245                         xf.xf_flag = fp->f_flag;
 4246                         error = SYSCTL_OUT(req, &xf, sizeof(xf));
 4247                         if (error)
 4248                                 break;
 4249                 }
 4250                 FILEDESC_SUNLOCK(fdp);
 4251                 fddrop(fdp);
 4252                 if (error)
 4253                         break;
 4254         }
 4255         sx_sunlock(&allproc_lock);
 4256         return (error);
 4257 }
 4258 
 4259 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
 4260     0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
 4261 
 4262 #ifdef KINFO_FILE_SIZE
 4263 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
 4264 #endif
 4265 
 4266 static int
 4267 xlate_fflags(int fflags)
 4268 {
 4269         static const struct {
 4270                 int     fflag;
 4271                 int     kf_fflag;
 4272         } fflags_table[] = {
 4273                 { FAPPEND, KF_FLAG_APPEND },
 4274                 { FASYNC, KF_FLAG_ASYNC },
 4275                 { FFSYNC, KF_FLAG_FSYNC },
 4276                 { FHASLOCK, KF_FLAG_HASLOCK },
 4277                 { FNONBLOCK, KF_FLAG_NONBLOCK },
 4278                 { FREAD, KF_FLAG_READ },
 4279                 { FWRITE, KF_FLAG_WRITE },
 4280                 { O_CREAT, KF_FLAG_CREAT },
 4281                 { O_DIRECT, KF_FLAG_DIRECT },
 4282                 { O_EXCL, KF_FLAG_EXCL },
 4283                 { O_EXEC, KF_FLAG_EXEC },
 4284                 { O_EXLOCK, KF_FLAG_EXLOCK },
 4285                 { O_NOFOLLOW, KF_FLAG_NOFOLLOW },
 4286                 { O_SHLOCK, KF_FLAG_SHLOCK },
 4287                 { O_TRUNC, KF_FLAG_TRUNC }
 4288         };
 4289         unsigned int i;
 4290         int kflags;
 4291 
 4292         kflags = 0;
 4293         for (i = 0; i < nitems(fflags_table); i++)
 4294                 if (fflags & fflags_table[i].fflag)
 4295                         kflags |=  fflags_table[i].kf_fflag;
 4296         return (kflags);
 4297 }
 4298 
 4299 /* Trim unused data from kf_path by truncating the structure size. */
 4300 void
 4301 pack_kinfo(struct kinfo_file *kif)
 4302 {
 4303 
 4304         kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
 4305             strlen(kif->kf_path) + 1;
 4306         kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
 4307 }
 4308 
 4309 static void
 4310 export_file_to_kinfo(struct file *fp, int fd, cap_rights_t *rightsp,
 4311     struct kinfo_file *kif, struct filedesc *fdp, int flags)
 4312 {
 4313         int error;
 4314 
 4315         bzero(kif, sizeof(*kif));
 4316 
 4317         /* Set a default type to allow for empty fill_kinfo() methods. */
 4318         kif->kf_type = KF_TYPE_UNKNOWN;
 4319         kif->kf_flags = xlate_fflags(fp->f_flag);
 4320         if (rightsp != NULL)
 4321                 kif->kf_cap_rights = *rightsp;
 4322         else
 4323                 cap_rights_init_zero(&kif->kf_cap_rights);
 4324         kif->kf_fd = fd;
 4325         kif->kf_ref_count = refcount_load(&fp->f_count);
 4326         kif->kf_offset = foffset_get(fp);
 4327 
 4328         /*
 4329          * This may drop the filedesc lock, so the 'fp' cannot be
 4330          * accessed after this call.
 4331          */
 4332         error = fo_fill_kinfo(fp, kif, fdp);
 4333         if (error == 0)
 4334                 kif->kf_status |= KF_ATTR_VALID;
 4335         if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
 4336                 pack_kinfo(kif);
 4337         else
 4338                 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
 4339 }
 4340 
 4341 static void
 4342 export_vnode_to_kinfo(struct vnode *vp, int fd, int fflags,
 4343     struct kinfo_file *kif, int flags)
 4344 {
 4345         int error;
 4346 
 4347         bzero(kif, sizeof(*kif));
 4348 
 4349         kif->kf_type = KF_TYPE_VNODE;
 4350         error = vn_fill_kinfo_vnode(vp, kif);
 4351         if (error == 0)
 4352                 kif->kf_status |= KF_ATTR_VALID;
 4353         kif->kf_flags = xlate_fflags(fflags);
 4354         cap_rights_init_zero(&kif->kf_cap_rights);
 4355         kif->kf_fd = fd;
 4356         kif->kf_ref_count = -1;
 4357         kif->kf_offset = -1;
 4358         if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
 4359                 pack_kinfo(kif);
 4360         else
 4361                 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
 4362         vrele(vp);
 4363 }
 4364 
 4365 struct export_fd_buf {
 4366         struct filedesc         *fdp;
 4367         struct pwddesc  *pdp;
 4368         struct sbuf             *sb;
 4369         ssize_t                 remainder;
 4370         struct kinfo_file       kif;
 4371         int                     flags;
 4372 };
 4373 
 4374 static int
 4375 export_kinfo_to_sb(struct export_fd_buf *efbuf)
 4376 {
 4377         struct kinfo_file *kif;
 4378 
 4379         kif = &efbuf->kif;
 4380         if (efbuf->remainder != -1) {
 4381                 if (efbuf->remainder < kif->kf_structsize) {
 4382                         /* Terminate export. */
 4383                         efbuf->remainder = 0;
 4384                         return (0);
 4385                 }
 4386                 efbuf->remainder -= kif->kf_structsize;
 4387         }
 4388         return (sbuf_bcat(efbuf->sb, kif, kif->kf_structsize) == 0 ? 0 : ENOMEM);
 4389 }
 4390 
 4391 static int
 4392 export_file_to_sb(struct file *fp, int fd, cap_rights_t *rightsp,
 4393     struct export_fd_buf *efbuf)
 4394 {
 4395         int error;
 4396 
 4397         if (efbuf->remainder == 0)
 4398                 return (0);
 4399         export_file_to_kinfo(fp, fd, rightsp, &efbuf->kif, efbuf->fdp,
 4400             efbuf->flags);
 4401         FILEDESC_SUNLOCK(efbuf->fdp);
 4402         error = export_kinfo_to_sb(efbuf);
 4403         FILEDESC_SLOCK(efbuf->fdp);
 4404         return (error);
 4405 }
 4406 
 4407 static int
 4408 export_vnode_to_sb(struct vnode *vp, int fd, int fflags,
 4409     struct export_fd_buf *efbuf)
 4410 {
 4411         int error;
 4412 
 4413         if (efbuf->remainder == 0)
 4414                 return (0);
 4415         if (efbuf->pdp != NULL)
 4416                 PWDDESC_XUNLOCK(efbuf->pdp);
 4417         export_vnode_to_kinfo(vp, fd, fflags, &efbuf->kif, efbuf->flags);
 4418         error = export_kinfo_to_sb(efbuf);
 4419         if (efbuf->pdp != NULL)
 4420                 PWDDESC_XLOCK(efbuf->pdp);
 4421         return (error);
 4422 }
 4423 
 4424 /*
 4425  * Store a process file descriptor information to sbuf.
 4426  *
 4427  * Takes a locked proc as argument, and returns with the proc unlocked.
 4428  */
 4429 int
 4430 kern_proc_filedesc_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen,
 4431     int flags)
 4432 {
 4433         struct file *fp;
 4434         struct filedesc *fdp;
 4435         struct pwddesc *pdp;
 4436         struct export_fd_buf *efbuf;
 4437         struct vnode *cttyvp, *textvp, *tracevp;
 4438         struct pwd *pwd;
 4439         int error, i, lastfile;
 4440         cap_rights_t rights;
 4441 
 4442         PROC_LOCK_ASSERT(p, MA_OWNED);
 4443 
 4444         /* ktrace vnode */
 4445         tracevp = ktr_get_tracevp(p, true);
 4446         /* text vnode */
 4447         textvp = p->p_textvp;
 4448         if (textvp != NULL)
 4449                 vrefact(textvp);
 4450         /* Controlling tty. */
 4451         cttyvp = NULL;
 4452         if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
 4453                 cttyvp = p->p_pgrp->pg_session->s_ttyvp;
 4454                 if (cttyvp != NULL)
 4455                         vrefact(cttyvp);
 4456         }
 4457         fdp = fdhold(p);
 4458         pdp = pdhold(p);
 4459         PROC_UNLOCK(p);
 4460         efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
 4461         efbuf->fdp = NULL;
 4462         efbuf->pdp = NULL;
 4463         efbuf->sb = sb;
 4464         efbuf->remainder = maxlen;
 4465         efbuf->flags = flags;
 4466         if (tracevp != NULL)
 4467                 export_vnode_to_sb(tracevp, KF_FD_TYPE_TRACE, FREAD | FWRITE,
 4468                     efbuf);
 4469         if (textvp != NULL)
 4470                 export_vnode_to_sb(textvp, KF_FD_TYPE_TEXT, FREAD, efbuf);
 4471         if (cttyvp != NULL)
 4472                 export_vnode_to_sb(cttyvp, KF_FD_TYPE_CTTY, FREAD | FWRITE,
 4473                     efbuf);
 4474         error = 0;
 4475         if (pdp == NULL || fdp == NULL)
 4476                 goto fail;
 4477         efbuf->fdp = fdp;
 4478         efbuf->pdp = pdp;
 4479         PWDDESC_XLOCK(pdp);
 4480         pwd = pwd_hold_pwddesc(pdp);
 4481         if (pwd != NULL) {
 4482                 /* working directory */
 4483                 if (pwd->pwd_cdir != NULL) {
 4484                         vrefact(pwd->pwd_cdir);
 4485                         export_vnode_to_sb(pwd->pwd_cdir, KF_FD_TYPE_CWD,
 4486                             FREAD, efbuf);
 4487                 }
 4488                 /* root directory */
 4489                 if (pwd->pwd_rdir != NULL) {
 4490                         vrefact(pwd->pwd_rdir);
 4491                         export_vnode_to_sb(pwd->pwd_rdir, KF_FD_TYPE_ROOT,
 4492                             FREAD, efbuf);
 4493                 }
 4494                 /* jail directory */
 4495                 if (pwd->pwd_jdir != NULL) {
 4496                         vrefact(pwd->pwd_jdir);
 4497                         export_vnode_to_sb(pwd->pwd_jdir, KF_FD_TYPE_JAIL,
 4498                             FREAD, efbuf);
 4499                 }
 4500         }
 4501         PWDDESC_XUNLOCK(pdp);
 4502         if (pwd != NULL)
 4503                 pwd_drop(pwd);
 4504         FILEDESC_SLOCK(fdp);
 4505         lastfile = fdlastfile(fdp);
 4506         for (i = 0; refcount_load(&fdp->fd_refcnt) > 0 && i <= lastfile; i++) {
 4507                 if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
 4508                         continue;
 4509 #ifdef CAPABILITIES
 4510                 rights = *cap_rights(fdp, i);
 4511 #else /* !CAPABILITIES */
 4512                 rights = cap_no_rights;
 4513 #endif
 4514                 /*
 4515                  * Create sysctl entry.  It is OK to drop the filedesc
 4516                  * lock inside of export_file_to_sb() as we will
 4517                  * re-validate and re-evaluate its properties when the
 4518                  * loop continues.
 4519                  */
 4520                 error = export_file_to_sb(fp, i, &rights, efbuf);
 4521                 if (error != 0 || efbuf->remainder == 0)
 4522                         break;
 4523         }
 4524         FILEDESC_SUNLOCK(fdp);
 4525 fail:
 4526         if (fdp != NULL)
 4527                 fddrop(fdp);
 4528         if (pdp != NULL)
 4529                 pddrop(pdp);
 4530         free(efbuf, M_TEMP);
 4531         return (error);
 4532 }
 4533 
 4534 #define FILEDESC_SBUF_SIZE      (sizeof(struct kinfo_file) * 5)
 4535 
 4536 /*
 4537  * Get per-process file descriptors for use by procstat(1), et al.
 4538  */
 4539 static int
 4540 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
 4541 {
 4542         struct sbuf sb;
 4543         struct proc *p;
 4544         ssize_t maxlen;
 4545         u_int namelen;
 4546         int error, error2, *name;
 4547 
 4548         namelen = arg2;
 4549         if (namelen != 1)
 4550                 return (EINVAL);
 4551 
 4552         name = (int *)arg1;
 4553 
 4554         sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
 4555         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
 4556         error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
 4557         if (error != 0) {
 4558                 sbuf_delete(&sb);
 4559                 return (error);
 4560         }
 4561         maxlen = req->oldptr != NULL ? req->oldlen : -1;
 4562         error = kern_proc_filedesc_out(p, &sb, maxlen,
 4563             KERN_FILEDESC_PACK_KINFO);
 4564         error2 = sbuf_finish(&sb);
 4565         sbuf_delete(&sb);
 4566         return (error != 0 ? error : error2);
 4567 }
 4568 
 4569 #ifdef COMPAT_FREEBSD7
 4570 #ifdef KINFO_OFILE_SIZE
 4571 CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
 4572 #endif
 4573 
 4574 static void
 4575 kinfo_to_okinfo(struct kinfo_file *kif, struct kinfo_ofile *okif)
 4576 {
 4577 
 4578         okif->kf_structsize = sizeof(*okif);
 4579         okif->kf_type = kif->kf_type;
 4580         okif->kf_fd = kif->kf_fd;
 4581         okif->kf_ref_count = kif->kf_ref_count;
 4582         okif->kf_flags = kif->kf_flags & (KF_FLAG_READ | KF_FLAG_WRITE |
 4583             KF_FLAG_APPEND | KF_FLAG_ASYNC | KF_FLAG_FSYNC | KF_FLAG_NONBLOCK |
 4584             KF_FLAG_DIRECT | KF_FLAG_HASLOCK);
 4585         okif->kf_offset = kif->kf_offset;
 4586         if (kif->kf_type == KF_TYPE_VNODE)
 4587                 okif->kf_vnode_type = kif->kf_un.kf_file.kf_file_type;
 4588         else
 4589                 okif->kf_vnode_type = KF_VTYPE_VNON;
 4590         strlcpy(okif->kf_path, kif->kf_path, sizeof(okif->kf_path));
 4591         if (kif->kf_type == KF_TYPE_SOCKET) {
 4592                 okif->kf_sock_domain = kif->kf_un.kf_sock.kf_sock_domain0;
 4593                 okif->kf_sock_type = kif->kf_un.kf_sock.kf_sock_type0;
 4594                 okif->kf_sock_protocol = kif->kf_un.kf_sock.kf_sock_protocol0;
 4595                 okif->kf_sa_local = kif->kf_un.kf_sock.kf_sa_local;
 4596                 okif->kf_sa_peer = kif->kf_un.kf_sock.kf_sa_peer;
 4597         } else {
 4598                 okif->kf_sa_local.ss_family = AF_UNSPEC;
 4599                 okif->kf_sa_peer.ss_family = AF_UNSPEC;
 4600         }
 4601 }
 4602 
 4603 static int
 4604 export_vnode_for_osysctl(struct vnode *vp, int type, struct kinfo_file *kif,
 4605     struct kinfo_ofile *okif, struct pwddesc *pdp, struct sysctl_req *req)
 4606 {
 4607         int error;
 4608 
 4609         vrefact(vp);
 4610         PWDDESC_XUNLOCK(pdp);
 4611         export_vnode_to_kinfo(vp, type, 0, kif, KERN_FILEDESC_PACK_KINFO);
 4612         kinfo_to_okinfo(kif, okif);
 4613         error = SYSCTL_OUT(req, okif, sizeof(*okif));
 4614         PWDDESC_XLOCK(pdp);
 4615         return (error);
 4616 }
 4617 
 4618 /*
 4619  * Get per-process file descriptors for use by procstat(1), et al.
 4620  */
 4621 static int
 4622 sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
 4623 {
 4624         struct kinfo_ofile *okif;
 4625         struct kinfo_file *kif;
 4626         struct filedesc *fdp;
 4627         struct pwddesc *pdp;
 4628         struct pwd *pwd;
 4629         u_int namelen;
 4630         int error, i, lastfile, *name;
 4631         struct file *fp;
 4632         struct proc *p;
 4633 
 4634         namelen = arg2;
 4635         if (namelen != 1)
 4636                 return (EINVAL);
 4637 
 4638         name = (int *)arg1;
 4639         error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
 4640         if (error != 0)
 4641                 return (error);
 4642         fdp = fdhold(p);
 4643         if (fdp != NULL)
 4644                 pdp = pdhold(p);
 4645         PROC_UNLOCK(p);
 4646         if (fdp == NULL || pdp == NULL) {
 4647                 if (fdp != NULL)
 4648                         fddrop(fdp);
 4649                 return (ENOENT);
 4650         }
 4651         kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
 4652         okif = malloc(sizeof(*okif), M_TEMP, M_WAITOK);
 4653         PWDDESC_XLOCK(pdp);
 4654         pwd = pwd_hold_pwddesc(pdp);
 4655         if (pwd != NULL) {
 4656                 if (pwd->pwd_cdir != NULL)
 4657                         export_vnode_for_osysctl(pwd->pwd_cdir, KF_FD_TYPE_CWD, kif,
 4658                             okif, pdp, req);
 4659                 if (pwd->pwd_rdir != NULL)
 4660                         export_vnode_for_osysctl(pwd->pwd_rdir, KF_FD_TYPE_ROOT, kif,
 4661                             okif, pdp, req);
 4662                 if (pwd->pwd_jdir != NULL)
 4663                         export_vnode_for_osysctl(pwd->pwd_jdir, KF_FD_TYPE_JAIL, kif,
 4664                             okif, pdp, req);
 4665         }
 4666         PWDDESC_XUNLOCK(pdp);
 4667         if (pwd != NULL)
 4668                 pwd_drop(pwd);
 4669         FILEDESC_SLOCK(fdp);
 4670         lastfile = fdlastfile(fdp);
 4671         for (i = 0; refcount_load(&fdp->fd_refcnt) > 0 && i <= lastfile; i++) {
 4672                 if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
 4673                         continue;
 4674                 export_file_to_kinfo(fp, i, NULL, kif, fdp,
 4675                     KERN_FILEDESC_PACK_KINFO);
 4676                 FILEDESC_SUNLOCK(fdp);
 4677                 kinfo_to_okinfo(kif, okif);
 4678                 error = SYSCTL_OUT(req, okif, sizeof(*okif));
 4679                 FILEDESC_SLOCK(fdp);
 4680                 if (error)
 4681                         break;
 4682         }
 4683         FILEDESC_SUNLOCK(fdp);
 4684         fddrop(fdp);
 4685         pddrop(pdp);
 4686         free(kif, M_TEMP);
 4687         free(okif, M_TEMP);
 4688         return (0);
 4689 }
 4690 
 4691 static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc,
 4692     CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_ofiledesc,
 4693     "Process ofiledesc entries");
 4694 #endif  /* COMPAT_FREEBSD7 */
 4695 
 4696 int
 4697 vntype_to_kinfo(int vtype)
 4698 {
 4699         struct {
 4700                 int     vtype;
 4701                 int     kf_vtype;
 4702         } vtypes_table[] = {
 4703                 { VBAD, KF_VTYPE_VBAD },
 4704                 { VBLK, KF_VTYPE_VBLK },
 4705                 { VCHR, KF_VTYPE_VCHR },
 4706                 { VDIR, KF_VTYPE_VDIR },
 4707                 { VFIFO, KF_VTYPE_VFIFO },
 4708                 { VLNK, KF_VTYPE_VLNK },
 4709                 { VNON, KF_VTYPE_VNON },
 4710                 { VREG, KF_VTYPE_VREG },
 4711                 { VSOCK, KF_VTYPE_VSOCK }
 4712         };
 4713         unsigned int i;
 4714 
 4715         /*
 4716          * Perform vtype translation.
 4717          */
 4718         for (i = 0; i < nitems(vtypes_table); i++)
 4719                 if (vtypes_table[i].vtype == vtype)
 4720                         return (vtypes_table[i].kf_vtype);
 4721 
 4722         return (KF_VTYPE_UNKNOWN);
 4723 }
 4724 
 4725 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc,
 4726     CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_filedesc,
 4727     "Process filedesc entries");
 4728 
 4729 /*
 4730  * Store a process current working directory information to sbuf.
 4731  *
 4732  * Takes a locked proc as argument, and returns with the proc unlocked.
 4733  */
 4734 int
 4735 kern_proc_cwd_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen)
 4736 {
 4737         struct pwddesc *pdp;
 4738         struct pwd *pwd;
 4739         struct export_fd_buf *efbuf;
 4740         struct vnode *cdir;
 4741         int error;
 4742 
 4743         PROC_LOCK_ASSERT(p, MA_OWNED);
 4744 
 4745         pdp = pdhold(p);
 4746         PROC_UNLOCK(p);
 4747         if (pdp == NULL)
 4748                 return (EINVAL);
 4749 
 4750         efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
 4751         efbuf->pdp = pdp;
 4752         efbuf->sb = sb;
 4753         efbuf->remainder = maxlen;
 4754 
 4755         PWDDESC_XLOCK(pdp);
 4756         pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
 4757         cdir = pwd->pwd_cdir;
 4758         if (cdir == NULL) {
 4759                 error = EINVAL;
 4760         } else {
 4761                 vrefact(cdir);
 4762                 error = export_vnode_to_sb(cdir, KF_FD_TYPE_CWD, FREAD, efbuf);
 4763         }
 4764         PWDDESC_XUNLOCK(pdp);
 4765         pddrop(pdp);
 4766         free(efbuf, M_TEMP);
 4767         return (error);
 4768 }
 4769 
 4770 /*
 4771  * Get per-process current working directory.
 4772  */
 4773 static int
 4774 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
 4775 {
 4776         struct sbuf sb;
 4777         struct proc *p;
 4778         ssize_t maxlen;
 4779         u_int namelen;
 4780         int error, error2, *name;
 4781 
 4782         namelen = arg2;
 4783         if (namelen != 1)
 4784                 return (EINVAL);
 4785 
 4786         name = (int *)arg1;
 4787 
 4788         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file), req);
 4789         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
 4790         error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
 4791         if (error != 0) {
 4792                 sbuf_delete(&sb);
 4793                 return (error);
 4794         }
 4795         maxlen = req->oldptr != NULL ? req->oldlen : -1;
 4796         error = kern_proc_cwd_out(p, &sb, maxlen);
 4797         error2 = sbuf_finish(&sb);
 4798         sbuf_delete(&sb);
 4799         return (error != 0 ? error : error2);
 4800 }
 4801 
 4802 static SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD|CTLFLAG_MPSAFE,
 4803     sysctl_kern_proc_cwd, "Process current working directory");
 4804 
 4805 #ifdef DDB
 4806 /*
 4807  * For the purposes of debugging, generate a human-readable string for the
 4808  * file type.
 4809  */
 4810 static const char *
 4811 file_type_to_name(short type)
 4812 {
 4813 
 4814         switch (type) {
 4815         case 0:
 4816                 return ("zero");
 4817         case DTYPE_VNODE:
 4818                 return ("vnode");
 4819         case DTYPE_SOCKET:
 4820                 return ("socket");
 4821         case DTYPE_PIPE:
 4822                 return ("pipe");
 4823         case DTYPE_FIFO:
 4824                 return ("fifo");
 4825         case DTYPE_KQUEUE:
 4826                 return ("kqueue");
 4827         case DTYPE_CRYPTO:
 4828                 return ("crypto");
 4829         case DTYPE_MQUEUE:
 4830                 return ("mqueue");
 4831         case DTYPE_SHM:
 4832                 return ("shm");
 4833         case DTYPE_SEM:
 4834                 return ("ksem");
 4835         case DTYPE_PTS:
 4836                 return ("pts");
 4837         case DTYPE_DEV:
 4838                 return ("dev");
 4839         case DTYPE_PROCDESC:
 4840                 return ("proc");
 4841         case DTYPE_EVENTFD:
 4842                 return ("eventfd");
 4843         case DTYPE_LINUXTFD:
 4844                 return ("ltimer");
 4845         default:
 4846                 return ("unkn");
 4847         }
 4848 }
 4849 
 4850 /*
 4851  * For the purposes of debugging, identify a process (if any, perhaps one of
 4852  * many) that references the passed file in its file descriptor array. Return
 4853  * NULL if none.
 4854  */
 4855 static struct proc *
 4856 file_to_first_proc(struct file *fp)
 4857 {
 4858         struct filedesc *fdp;
 4859         struct proc *p;
 4860         int n;
 4861 
 4862         FOREACH_PROC_IN_SYSTEM(p) {
 4863                 if (p->p_state == PRS_NEW)
 4864                         continue;
 4865                 fdp = p->p_fd;
 4866                 if (fdp == NULL)
 4867                         continue;
 4868                 for (n = 0; n < fdp->fd_nfiles; n++) {
 4869                         if (fp == fdp->fd_ofiles[n].fde_file)
 4870                                 return (p);
 4871                 }
 4872         }
 4873         return (NULL);
 4874 }
 4875 
 4876 static void
 4877 db_print_file(struct file *fp, int header)
 4878 {
 4879 #define XPTRWIDTH ((int)howmany(sizeof(void *) * NBBY, 4))
 4880         struct proc *p;
 4881 
 4882         if (header)
 4883                 db_printf("%*s %6s %*s %8s %4s %5s %6s %*s %5s %s\n",
 4884                     XPTRWIDTH, "File", "Type", XPTRWIDTH, "Data", "Flag",
 4885                     "GCFl", "Count", "MCount", XPTRWIDTH, "Vnode", "FPID",
 4886                     "FCmd");
 4887         p = file_to_first_proc(fp);
 4888         db_printf("%*p %6s %*p %08x %04x %5d %6d %*p %5d %s\n", XPTRWIDTH,
 4889             fp, file_type_to_name(fp->f_type), XPTRWIDTH, fp->f_data,
 4890             fp->f_flag, 0, refcount_load(&fp->f_count), 0, XPTRWIDTH, fp->f_vnode,
 4891             p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
 4892 
 4893 #undef XPTRWIDTH
 4894 }
 4895 
 4896 DB_SHOW_COMMAND(file, db_show_file)
 4897 {
 4898         struct file *fp;
 4899 
 4900         if (!have_addr) {
 4901                 db_printf("usage: show file <addr>\n");
 4902                 return;
 4903         }
 4904         fp = (struct file *)addr;
 4905         db_print_file(fp, 1);
 4906 }
 4907 
 4908 DB_SHOW_COMMAND(files, db_show_files)
 4909 {
 4910         struct filedesc *fdp;
 4911         struct file *fp;
 4912         struct proc *p;
 4913         int header;
 4914         int n;
 4915 
 4916         header = 1;
 4917         FOREACH_PROC_IN_SYSTEM(p) {
 4918                 if (p->p_state == PRS_NEW)
 4919                         continue;
 4920                 if ((fdp = p->p_fd) == NULL)
 4921                         continue;
 4922                 for (n = 0; n < fdp->fd_nfiles; ++n) {
 4923                         if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
 4924                                 continue;
 4925                         db_print_file(fp, header);
 4926                         header = 0;
 4927                 }
 4928         }
 4929 }
 4930 #endif
 4931 
 4932 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
 4933     &maxfilesperproc, 0, "Maximum files allowed open per process");
 4934 
 4935 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
 4936     &maxfiles, 0, "Maximum number of files");
 4937 
 4938 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
 4939     &openfiles, 0, "System-wide number of open files");
 4940 
 4941 /* ARGSUSED*/
 4942 static void
 4943 filelistinit(void *dummy)
 4944 {
 4945 
 4946         file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
 4947             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
 4948         filedesc0_zone = uma_zcreate("filedesc0", sizeof(struct filedesc0),
 4949             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 4950         pwd_zone = uma_zcreate("PWD", sizeof(struct pwd), NULL, NULL,
 4951             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR);
 4952         /*
 4953          * XXXMJG this is a temporary hack due to boot ordering issues against
 4954          * the vnode zone.
 4955          */
 4956         vfs_smr = uma_zone_get_smr(pwd_zone);
 4957         mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
 4958 }
 4959 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
 4960 
 4961 /*-------------------------------------------------------------------*/
 4962 
 4963 static int
 4964 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
 4965     int flags, struct thread *td)
 4966 {
 4967 
 4968         return (EBADF);
 4969 }
 4970 
 4971 static int
 4972 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
 4973     struct thread *td)
 4974 {
 4975 
 4976         return (EINVAL);
 4977 }
 4978 
 4979 static int
 4980 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
 4981     struct thread *td)
 4982 {
 4983 
 4984         return (EBADF);
 4985 }
 4986 
 4987 static int
 4988 badfo_poll(struct file *fp, int events, struct ucred *active_cred,
 4989     struct thread *td)
 4990 {
 4991 
 4992         return (0);
 4993 }
 4994 
 4995 static int
 4996 badfo_kqfilter(struct file *fp, struct knote *kn)
 4997 {
 4998 
 4999         return (EBADF);
 5000 }
 5001 
 5002 static int
 5003 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
 5004     struct thread *td)
 5005 {
 5006 
 5007         return (EBADF);
 5008 }
 5009 
 5010 static int
 5011 badfo_close(struct file *fp, struct thread *td)
 5012 {
 5013 
 5014         return (0);
 5015 }
 5016 
 5017 static int
 5018 badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
 5019     struct thread *td)
 5020 {
 5021 
 5022         return (EBADF);
 5023 }
 5024 
 5025 static int
 5026 badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
 5027     struct thread *td)
 5028 {
 5029 
 5030         return (EBADF);
 5031 }
 5032 
 5033 static int
 5034 badfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
 5035     struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
 5036     struct thread *td)
 5037 {
 5038 
 5039         return (EBADF);
 5040 }
 5041 
 5042 static int
 5043 badfo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
 5044 {
 5045 
 5046         return (0);
 5047 }
 5048 
 5049 struct fileops badfileops = {
 5050         .fo_read = badfo_readwrite,
 5051         .fo_write = badfo_readwrite,
 5052         .fo_truncate = badfo_truncate,
 5053         .fo_ioctl = badfo_ioctl,
 5054         .fo_poll = badfo_poll,
 5055         .fo_kqfilter = badfo_kqfilter,
 5056         .fo_stat = badfo_stat,
 5057         .fo_close = badfo_close,
 5058         .fo_chmod = badfo_chmod,
 5059         .fo_chown = badfo_chown,
 5060         .fo_sendfile = badfo_sendfile,
 5061         .fo_fill_kinfo = badfo_fill_kinfo,
 5062 };
 5063 
 5064 static int
 5065 path_poll(struct file *fp, int events, struct ucred *active_cred,
 5066     struct thread *td)
 5067 {
 5068         return (POLLNVAL);
 5069 }
 5070 
 5071 static int
 5072 path_close(struct file *fp, struct thread *td)
 5073 {
 5074         MPASS(fp->f_type == DTYPE_VNODE);
 5075         fp->f_ops = &badfileops;
 5076         vdrop(fp->f_vnode);
 5077         return (0);
 5078 }
 5079 
 5080 struct fileops path_fileops = {
 5081         .fo_read = badfo_readwrite,
 5082         .fo_write = badfo_readwrite,
 5083         .fo_truncate = badfo_truncate,
 5084         .fo_ioctl = badfo_ioctl,
 5085         .fo_poll = path_poll,
 5086         .fo_kqfilter = vn_kqfilter_opath,
 5087         .fo_stat = vn_statfile,
 5088         .fo_close = path_close,
 5089         .fo_chmod = badfo_chmod,
 5090         .fo_chown = badfo_chown,
 5091         .fo_sendfile = badfo_sendfile,
 5092         .fo_fill_kinfo = vn_fill_kinfo,
 5093         .fo_flags = DFLAG_PASSABLE,
 5094 };
 5095 
 5096 int
 5097 invfo_rdwr(struct file *fp, struct uio *uio, struct ucred *active_cred,
 5098     int flags, struct thread *td)
 5099 {
 5100 
 5101         return (EOPNOTSUPP);
 5102 }
 5103 
 5104 int
 5105 invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
 5106     struct thread *td)
 5107 {
 5108 
 5109         return (EINVAL);
 5110 }
 5111 
 5112 int
 5113 invfo_ioctl(struct file *fp, u_long com, void *data,
 5114     struct ucred *active_cred, struct thread *td)
 5115 {
 5116 
 5117         return (ENOTTY);
 5118 }
 5119 
 5120 int
 5121 invfo_poll(struct file *fp, int events, struct ucred *active_cred,
 5122     struct thread *td)
 5123 {
 5124 
 5125         return (poll_no_poll(events));
 5126 }
 5127 
 5128 int
 5129 invfo_kqfilter(struct file *fp, struct knote *kn)
 5130 {
 5131 
 5132         return (EINVAL);
 5133 }
 5134 
 5135 int
 5136 invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
 5137     struct thread *td)
 5138 {
 5139 
 5140         return (EINVAL);
 5141 }
 5142 
 5143 int
 5144 invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
 5145     struct thread *td)
 5146 {
 5147 
 5148         return (EINVAL);
 5149 }
 5150 
 5151 int
 5152 invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
 5153     struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
 5154     struct thread *td)
 5155 {
 5156 
 5157         return (EINVAL);
 5158 }
 5159 
 5160 /*-------------------------------------------------------------------*/
 5161 
 5162 /*
 5163  * File Descriptor pseudo-device driver (/dev/fd/).
 5164  *
 5165  * Opening minor device N dup()s the file (if any) connected to file
 5166  * descriptor N belonging to the calling process.  Note that this driver
 5167  * consists of only the ``open()'' routine, because all subsequent
 5168  * references to this file will be direct to the other driver.
 5169  *
 5170  * XXX: we could give this one a cloning event handler if necessary.
 5171  */
 5172 
 5173 /* ARGSUSED */
 5174 static int
 5175 fdopen(struct cdev *dev, int mode, int type, struct thread *td)
 5176 {
 5177 
 5178         /*
 5179          * XXX Kludge: set curthread->td_dupfd to contain the value of the
 5180          * the file descriptor being sought for duplication. The error
 5181          * return ensures that the vnode for this device will be released
 5182          * by vn_open. Open will detect this special error and take the
 5183          * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
 5184          * will simply report the error.
 5185          */
 5186         td->td_dupfd = dev2unit(dev);
 5187         return (ENODEV);
 5188 }
 5189 
 5190 static struct cdevsw fildesc_cdevsw = {
 5191         .d_version =    D_VERSION,
 5192         .d_open =       fdopen,
 5193         .d_name =       "FD",
 5194 };
 5195 
 5196 static void
 5197 fildesc_drvinit(void *unused)
 5198 {
 5199         struct cdev *dev;
 5200 
 5201         dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
 5202             UID_ROOT, GID_WHEEL, 0666, "fd/0");
 5203         make_dev_alias(dev, "stdin");
 5204         dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
 5205             UID_ROOT, GID_WHEEL, 0666, "fd/1");
 5206         make_dev_alias(dev, "stdout");
 5207         dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
 5208             UID_ROOT, GID_WHEEL, 0666, "fd/2");
 5209         make_dev_alias(dev, "stderr");
 5210 }
 5211 
 5212 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);

Cache object: 65aa52b9c1380b42b7c144d63dac3712


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.