The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_descrip.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_descrip.c      8.6 (Berkeley) 4/19/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD$");
   39 
   40 #include "opt_compat.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 
   45 #include <sys/conf.h>
   46 #include <sys/fcntl.h>
   47 #include <sys/file.h>
   48 #include <sys/filedesc.h>
   49 #include <sys/filio.h>
   50 #include <sys/jail.h>
   51 #include <sys/kernel.h>
   52 #include <sys/limits.h>
   53 #include <sys/lock.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mount.h>
   56 #include <sys/mutex.h>
   57 #include <sys/namei.h>
   58 #include <sys/proc.h>
   59 #include <sys/resourcevar.h>
   60 #include <sys/signalvar.h>
   61 #include <sys/socketvar.h>
   62 #include <sys/stat.h>
   63 #include <sys/sx.h>
   64 #include <sys/syscallsubr.h>
   65 #include <sys/sysctl.h>
   66 #include <sys/sysproto.h>
   67 #include <sys/unistd.h>
   68 #include <sys/vnode.h>
   69 
   70 #include <vm/uma.h>
   71 
   72 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
   73 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
   74                      "file desc to leader structures");
   75 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
   76 
   77 static uma_zone_t file_zone;
   78 
   79 
   80 /* How to treat 'new' parameter when allocating a fd for do_dup(). */
   81 enum dup_type { DUP_VARIABLE, DUP_FIXED };
   82 
   83 static int do_dup(struct thread *td, enum dup_type type, int old, int new,
   84     register_t *retval);
   85 static int      fd_first_free(struct filedesc *, int, int);
   86 static int      fd_last_used(struct filedesc *, int, int);
   87 static void     fdgrowtable(struct filedesc *, int);
   88 static int      fdrop_locked(struct file *fp, struct thread *td);
   89 static void     fdunused(struct filedesc *fdp, int fd);
   90 static void     fdused(struct filedesc *fdp, int fd);
   91 
   92 /*
   93  * A process is initially started out with NDFILE descriptors stored within
   94  * this structure, selected to be enough for typical applications based on
   95  * the historical limit of 20 open files (and the usage of descriptors by
   96  * shells).  If these descriptors are exhausted, a larger descriptor table
   97  * may be allocated, up to a process' resource limit; the internal arrays
   98  * are then unused.
   99  */
  100 #define NDFILE          20
  101 #define NDSLOTSIZE      sizeof(NDSLOTTYPE)
  102 #define NDENTRIES       (NDSLOTSIZE * __CHAR_BIT)
  103 #define NDSLOT(x)       ((x) / NDENTRIES)
  104 #define NDBIT(x)        ((NDSLOTTYPE)1 << ((x) % NDENTRIES))
  105 #define NDSLOTS(x)      (((x) + NDENTRIES - 1) / NDENTRIES)
  106 
  107 /*
  108  * Storage required per open file descriptor.
  109  */
  110 #define OFILESIZE (sizeof(struct file *) + sizeof(char))
  111 
  112 /*
  113  * Basic allocation of descriptors:
  114  * one of the above, plus arrays for NDFILE descriptors.
  115  */
  116 struct filedesc0 {
  117         struct  filedesc fd_fd;
  118         /*
  119          * These arrays are used when the number of open files is
  120          * <= NDFILE, and are then pointed to by the pointers above.
  121          */
  122         struct  file *fd_dfiles[NDFILE];
  123         char    fd_dfileflags[NDFILE];
  124         NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
  125 };
  126 
  127 /*
  128  * Descriptor management.
  129  */
  130 struct filelist filehead;       /* head of list of open files */
  131 int nfiles;                     /* actual number of open files */
  132 struct sx filelist_lock;        /* sx to protect filelist */
  133 struct mtx sigio_lock;          /* mtx to protect pointers to sigio */
  134 
  135 /* A mutex to protect the association between a proc and filedesc. */
  136 static struct mtx       fdesc_mtx;
  137 
  138 /*
  139  * Find the first zero bit in the given bitmap, starting at low and not
  140  * exceeding size - 1.
  141  */
  142 static int
  143 fd_first_free(struct filedesc *fdp, int low, int size)
  144 {
  145         NDSLOTTYPE *map = fdp->fd_map;
  146         NDSLOTTYPE mask;
  147         int off, maxoff;
  148 
  149         if (low >= size)
  150                 return (low);
  151 
  152         off = NDSLOT(low);
  153         if (low % NDENTRIES) {
  154                 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
  155                 if ((mask &= ~map[off]) != 0UL)
  156                         return (off * NDENTRIES + ffsl(mask) - 1);
  157                 ++off;
  158         }
  159         for (maxoff = NDSLOTS(size); off < maxoff; ++off)
  160                 if (map[off] != ~0UL)
  161                         return (off * NDENTRIES + ffsl(~map[off]) - 1);
  162         return (size);
  163 }
  164 
  165 /*
  166  * Find the highest non-zero bit in the given bitmap, starting at low and
  167  * not exceeding size - 1.
  168  */
  169 static int
  170 fd_last_used(struct filedesc *fdp, int low, int size)
  171 {
  172         NDSLOTTYPE *map = fdp->fd_map;
  173         NDSLOTTYPE mask;
  174         int off, minoff;
  175 
  176         if (low >= size)
  177                 return (-1);
  178 
  179         off = NDSLOT(size);
  180         if (size % NDENTRIES) {
  181                 mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
  182                 if ((mask &= map[off]) != 0)
  183                         return (off * NDENTRIES + flsl(mask) - 1);
  184                 --off;
  185         }
  186         for (minoff = NDSLOT(low); off >= minoff; --off)
  187                 if (map[off] != 0)
  188                         return (off * NDENTRIES + flsl(map[off]) - 1);
  189         return (low - 1);
  190 }
  191 
  192 static int
  193 fdisused(struct filedesc *fdp, int fd)
  194 {
  195         KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
  196             ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
  197         return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
  198 }
  199 
  200 /*
  201  * Mark a file descriptor as used.
  202  */
  203 static void
  204 fdused(struct filedesc *fdp, int fd)
  205 {
  206         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
  207         KASSERT(!fdisused(fdp, fd),
  208             ("fd already used"));
  209         fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
  210         if (fd > fdp->fd_lastfile)
  211                 fdp->fd_lastfile = fd;
  212         if (fd == fdp->fd_freefile)
  213                 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
  214 }
  215 
  216 /*
  217  * Mark a file descriptor as unused.
  218  */
  219 static void
  220 fdunused(struct filedesc *fdp, int fd)
  221 {
  222         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
  223         KASSERT(fdisused(fdp, fd),
  224             ("fd is already unused"));
  225         KASSERT(fdp->fd_ofiles[fd] == NULL,
  226             ("fd is still in use"));
  227         fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
  228         if (fd < fdp->fd_freefile)
  229                 fdp->fd_freefile = fd;
  230         if (fd == fdp->fd_lastfile)
  231                 fdp->fd_lastfile = fd_last_used(fdp, 0, fd);
  232 }
  233 
  234 /*
  235  * System calls on descriptors.
  236  */
  237 #ifndef _SYS_SYSPROTO_H_
  238 struct getdtablesize_args {
  239         int     dummy;
  240 };
  241 #endif
  242 /*
  243  * MPSAFE
  244  */
  245 /* ARGSUSED */
  246 int
  247 getdtablesize(struct thread *td, struct getdtablesize_args *uap)
  248 {
  249         struct proc *p = td->td_proc;
  250 
  251         PROC_LOCK(p);
  252         td->td_retval[0] =
  253             min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
  254         PROC_UNLOCK(p);
  255         return (0);
  256 }
  257 
  258 /*
  259  * Duplicate a file descriptor to a particular value.
  260  *
  261  * note: keep in mind that a potential race condition exists when closing
  262  * descriptors from a shared descriptor table (via rfork).
  263  */
  264 #ifndef _SYS_SYSPROTO_H_
  265 struct dup2_args {
  266         u_int   from;
  267         u_int   to;
  268 };
  269 #endif
  270 /*
  271  * MPSAFE
  272  */
  273 /* ARGSUSED */
  274 int
  275 dup2(struct thread *td, struct dup2_args *uap)
  276 {
  277 
  278         return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
  279                     td->td_retval));
  280 }
  281 
  282 /*
  283  * Duplicate a file descriptor.
  284  */
  285 #ifndef _SYS_SYSPROTO_H_
  286 struct dup_args {
  287         u_int   fd;
  288 };
  289 #endif
  290 /*
  291  * MPSAFE
  292  */
  293 /* ARGSUSED */
  294 int
  295 dup(struct thread *td, struct dup_args *uap)
  296 {
  297 
  298         return (do_dup(td, DUP_VARIABLE, (int)uap->fd, 0, td->td_retval));
  299 }
  300 
  301 /*
  302  * The file control system call.
  303  */
  304 #ifndef _SYS_SYSPROTO_H_
  305 struct fcntl_args {
  306         int     fd;
  307         int     cmd;
  308         long    arg;
  309 };
  310 #endif
  311 /*
  312  * MPSAFE
  313  */
  314 /* ARGSUSED */
  315 int
  316 fcntl(struct thread *td, struct fcntl_args *uap)
  317 {
  318         struct flock fl;
  319         intptr_t arg;
  320         int error;
  321 
  322         error = 0;
  323         switch (uap->cmd) {
  324         case F_GETLK:
  325         case F_SETLK:
  326         case F_SETLKW:
  327                 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
  328                 arg = (intptr_t)&fl;
  329                 break;
  330         default:
  331                 arg = uap->arg;
  332                 break;
  333         }
  334         if (error)
  335                 return (error);
  336         error = kern_fcntl(td, uap->fd, uap->cmd, arg);
  337         if (error)
  338                 return (error);
  339         if (uap->cmd == F_GETLK)
  340                 error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
  341         return (error);
  342 }
  343 
  344 int
  345 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
  346 {
  347         struct filedesc *fdp;
  348         struct flock *flp;
  349         struct file *fp;
  350         struct proc *p;
  351         char *pop;
  352         struct vnode *vp;
  353         u_int newmin;
  354         int error, flg, tmp;
  355         int giant_locked;
  356 
  357         /*
  358          * XXXRW: Some fcntl() calls require Giant -- others don't.  Try to
  359          * avoid grabbing Giant for calls we know don't need it.
  360          */
  361         switch (cmd) {
  362         case F_DUPFD:
  363         case F_GETFD:
  364         case F_SETFD:
  365         case F_GETFL:
  366                 giant_locked = 0;
  367                 break;
  368 
  369         default:
  370                 giant_locked = 1;
  371                 mtx_lock(&Giant);
  372         }
  373 
  374         error = 0;
  375         flg = F_POSIX;
  376         p = td->td_proc;
  377         fdp = p->p_fd;
  378         FILEDESC_LOCK(fdp);
  379         if ((unsigned)fd >= fdp->fd_nfiles ||
  380             (fp = fdp->fd_ofiles[fd]) == NULL) {
  381                 FILEDESC_UNLOCK(fdp);
  382                 error = EBADF;
  383                 goto done2;
  384         }
  385         pop = &fdp->fd_ofileflags[fd];
  386 
  387         switch (cmd) {
  388         case F_DUPFD:
  389                 /* mtx_assert(&Giant, MA_NOTOWNED); */
  390                 FILEDESC_UNLOCK(fdp);
  391                 newmin = arg;
  392                 PROC_LOCK(p);
  393                 if (newmin >= lim_cur(p, RLIMIT_NOFILE) ||
  394                     newmin >= maxfilesperproc) {
  395                         PROC_UNLOCK(p);
  396                         error = EINVAL;
  397                         break;
  398                 }
  399                 PROC_UNLOCK(p);
  400                 error = do_dup(td, DUP_VARIABLE, fd, newmin, td->td_retval);
  401                 break;
  402 
  403         case F_GETFD:
  404                 /* mtx_assert(&Giant, MA_NOTOWNED); */
  405                 td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
  406                 FILEDESC_UNLOCK(fdp);
  407                 break;
  408 
  409         case F_SETFD:
  410                 /* mtx_assert(&Giant, MA_NOTOWNED); */
  411                 *pop = (*pop &~ UF_EXCLOSE) |
  412                     (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
  413                 FILEDESC_UNLOCK(fdp);
  414                 break;
  415 
  416         case F_GETFL:
  417                 /* mtx_assert(&Giant, MA_NOTOWNED); */
  418                 FILE_LOCK(fp);
  419                 td->td_retval[0] = OFLAGS(fp->f_flag);
  420                 FILE_UNLOCK(fp);
  421                 FILEDESC_UNLOCK(fdp);
  422                 break;
  423 
  424         case F_SETFL:
  425                 mtx_assert(&Giant, MA_OWNED);
  426                 FILE_LOCK(fp);
  427                 fhold_locked(fp);
  428                 fp->f_flag &= ~FCNTLFLAGS;
  429                 fp->f_flag |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
  430                 FILE_UNLOCK(fp);
  431                 FILEDESC_UNLOCK(fdp);
  432                 tmp = fp->f_flag & FNONBLOCK;
  433                 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
  434                 if (error) {
  435                         fdrop(fp, td);
  436                         break;
  437                 }
  438                 tmp = fp->f_flag & FASYNC;
  439                 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
  440                 if (error == 0) {
  441                         fdrop(fp, td);
  442                         break;
  443                 }
  444                 FILE_LOCK(fp);
  445                 fp->f_flag &= ~FNONBLOCK;
  446                 FILE_UNLOCK(fp);
  447                 tmp = 0;
  448                 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
  449                 fdrop(fp, td);
  450                 break;
  451 
  452         case F_GETOWN:
  453                 mtx_assert(&Giant, MA_OWNED);
  454                 fhold(fp);
  455                 FILEDESC_UNLOCK(fdp);
  456                 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
  457                 if (error == 0)
  458                         td->td_retval[0] = tmp;
  459                 fdrop(fp, td);
  460                 break;
  461 
  462         case F_SETOWN:
  463                 mtx_assert(&Giant, MA_OWNED);
  464                 fhold(fp);
  465                 FILEDESC_UNLOCK(fdp);
  466                 tmp = arg;
  467                 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
  468                 fdrop(fp, td);
  469                 break;
  470 
  471         case F_SETLKW:
  472                 mtx_assert(&Giant, MA_OWNED);
  473                 flg |= F_WAIT;
  474                 /* FALLTHROUGH F_SETLK */
  475 
  476         case F_SETLK:
  477                 mtx_assert(&Giant, MA_OWNED);
  478                 if (fp->f_type != DTYPE_VNODE) {
  479                         FILEDESC_UNLOCK(fdp);
  480                         error = EBADF;
  481                         break;
  482                 }
  483 
  484                 flp = (struct flock *)arg;
  485                 if (flp->l_whence == SEEK_CUR) {
  486                         if (fp->f_offset < 0 ||
  487                             (flp->l_start > 0 &&
  488                              fp->f_offset > OFF_MAX - flp->l_start)) {
  489                                 FILEDESC_UNLOCK(fdp);
  490                                 error = EOVERFLOW;
  491                                 break;
  492                         }
  493                         flp->l_start += fp->f_offset;
  494                 }
  495 
  496                 /*
  497                  * VOP_ADVLOCK() may block.
  498                  */
  499                 fhold(fp);
  500                 FILEDESC_UNLOCK(fdp);
  501                 vp = fp->f_vnode;
  502 
  503                 switch (flp->l_type) {
  504                 case F_RDLCK:
  505                         if ((fp->f_flag & FREAD) == 0) {
  506                                 error = EBADF;
  507                                 break;
  508                         }
  509                         PROC_LOCK(p->p_leader);
  510                         p->p_leader->p_flag |= P_ADVLOCK;
  511                         PROC_UNLOCK(p->p_leader);
  512                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  513                             flp, flg);
  514                         break;
  515                 case F_WRLCK:
  516                         if ((fp->f_flag & FWRITE) == 0) {
  517                                 error = EBADF;
  518                                 break;
  519                         }
  520                         PROC_LOCK(p->p_leader);
  521                         p->p_leader->p_flag |= P_ADVLOCK;
  522                         PROC_UNLOCK(p->p_leader);
  523                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  524                             flp, flg);
  525                         break;
  526                 case F_UNLCK:
  527                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
  528                             flp, F_POSIX);
  529                         break;
  530                 default:
  531                         error = EINVAL;
  532                         break;
  533                 }
  534                 /* Check for race with close */
  535                 FILEDESC_LOCK_FAST(fdp);
  536                 if ((unsigned) fd >= fdp->fd_nfiles ||
  537                     fp != fdp->fd_ofiles[fd]) {
  538                         FILEDESC_UNLOCK_FAST(fdp);
  539                         flp->l_whence = SEEK_SET;
  540                         flp->l_start = 0;
  541                         flp->l_len = 0;
  542                         flp->l_type = F_UNLCK;
  543                         (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
  544                                            F_UNLCK, flp, F_POSIX);
  545                 } else
  546                         FILEDESC_UNLOCK_FAST(fdp);
  547                 fdrop(fp, td);
  548                 break;
  549 
  550         case F_GETLK:
  551                 mtx_assert(&Giant, MA_OWNED);
  552                 if (fp->f_type != DTYPE_VNODE) {
  553                         FILEDESC_UNLOCK(fdp);
  554                         error = EBADF;
  555                         break;
  556                 }
  557                 flp = (struct flock *)arg;
  558                 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
  559                     flp->l_type != F_UNLCK) {
  560                         FILEDESC_UNLOCK(fdp);
  561                         error = EINVAL;
  562                         break;
  563                 }
  564                 if (flp->l_whence == SEEK_CUR) {
  565                         if ((flp->l_start > 0 &&
  566                             fp->f_offset > OFF_MAX - flp->l_start) ||
  567                             (flp->l_start < 0 &&
  568                              fp->f_offset < OFF_MIN - flp->l_start)) {
  569                                 FILEDESC_UNLOCK(fdp);
  570                                 error = EOVERFLOW;
  571                                 break;
  572                         }
  573                         flp->l_start += fp->f_offset;
  574                 }
  575                 /*
  576                  * VOP_ADVLOCK() may block.
  577                  */
  578                 fhold(fp);
  579                 FILEDESC_UNLOCK(fdp);
  580                 vp = fp->f_vnode;
  581                 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
  582                     F_POSIX);
  583                 fdrop(fp, td);
  584                 break;
  585         default:
  586                 FILEDESC_UNLOCK(fdp);
  587                 error = EINVAL;
  588                 break;
  589         }
  590 done2:
  591         if (giant_locked)
  592                 mtx_unlock(&Giant);
  593         return (error);
  594 }
  595 
  596 /*
  597  * Common code for dup, dup2, and fcntl(F_DUPFD).
  598  */
  599 static int
  600 do_dup(struct thread *td, enum dup_type type, int old, int new, register_t *retval)
  601 {
  602         struct filedesc *fdp;
  603         struct proc *p;
  604         struct file *fp;
  605         struct file *delfp;
  606         int error, holdleaders, maxfd;
  607 
  608         KASSERT((type == DUP_VARIABLE || type == DUP_FIXED),
  609             ("invalid dup type %d", type));
  610 
  611         p = td->td_proc;
  612         fdp = p->p_fd;
  613 
  614         /*
  615          * Verify we have a valid descriptor to dup from and possibly to
  616          * dup to.
  617          */
  618         if (old < 0 || new < 0)
  619                 return (EBADF);
  620         PROC_LOCK(p);
  621         maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
  622         PROC_UNLOCK(p);
  623         if (new >= maxfd)
  624                 return (EMFILE);
  625 
  626         FILEDESC_LOCK(fdp);
  627         if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) {
  628                 FILEDESC_UNLOCK(fdp);
  629                 return (EBADF);
  630         }
  631         if (type == DUP_FIXED && old == new) {
  632                 *retval = new;
  633                 FILEDESC_UNLOCK(fdp);
  634                 return (0);
  635         }
  636         fp = fdp->fd_ofiles[old];
  637         fhold(fp);
  638 
  639         /*
  640          * If the caller specified a file descriptor, make sure the file
  641          * table is large enough to hold it, and grab it.  Otherwise, just
  642          * allocate a new descriptor the usual way.  Since the filedesc
  643          * lock may be temporarily dropped in the process, we have to look
  644          * out for a race.
  645          */
  646         if (type == DUP_FIXED) {
  647                 if (new >= fdp->fd_nfiles)
  648                         fdgrowtable(fdp, new + 1);
  649                 if (fdp->fd_ofiles[new] == NULL)
  650                         fdused(fdp, new);
  651         } else {
  652                 if ((error = fdalloc(td, new, &new)) != 0) {
  653                         FILEDESC_UNLOCK(fdp);
  654                         fdrop(fp, td);
  655                         return (error);
  656                 }
  657         }
  658 
  659         /*
  660          * If the old file changed out from under us then treat it as a
  661          * bad file descriptor.  Userland should do its own locking to
  662          * avoid this case.
  663          */
  664         if (fdp->fd_ofiles[old] != fp) {
  665                 /* we've allocated a descriptor which we won't use */
  666                 if (fdp->fd_ofiles[new] == NULL)
  667                         fdunused(fdp, new);
  668                 FILEDESC_UNLOCK(fdp);
  669                 fdrop(fp, td);
  670                 return (EBADF);
  671         }
  672         KASSERT(old != new,
  673             ("new fd is same as old"));
  674 
  675         /*
  676          * Save info on the descriptor being overwritten.  We cannot close
  677          * it without introducing an ownership race for the slot, since we
  678          * need to drop the filedesc lock to call closef().
  679          *
  680          * XXX this duplicates parts of close().
  681          */
  682         delfp = fdp->fd_ofiles[new];
  683         holdleaders = 0;
  684         if (delfp != NULL) {
  685                 if (td->td_proc->p_fdtol != NULL) {
  686                         /*
  687                          * Ask fdfree() to sleep to ensure that all relevant
  688                          * process leaders can be traversed in closef().
  689                          */
  690                         fdp->fd_holdleaderscount++;
  691                         holdleaders = 1;
  692                 }
  693         }
  694 
  695         /*
  696          * Duplicate the source descriptor
  697          */
  698         fdp->fd_ofiles[new] = fp;
  699         fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
  700         if (new > fdp->fd_lastfile)
  701                 fdp->fd_lastfile = new;
  702         *retval = new;
  703 
  704         /*
  705          * If we dup'd over a valid file, we now own the reference to it
  706          * and must dispose of it using closef() semantics (as if a
  707          * close() were performed on it).
  708          *
  709          * XXX this duplicates parts of close().
  710          */
  711         if (delfp != NULL) {
  712                 knote_fdclose(td, new);
  713                 FILEDESC_UNLOCK(fdp);
  714                 (void) closef(delfp, td);
  715                 if (holdleaders) {
  716                         FILEDESC_LOCK_FAST(fdp);
  717                         fdp->fd_holdleaderscount--;
  718                         if (fdp->fd_holdleaderscount == 0 &&
  719                             fdp->fd_holdleaderswakeup != 0) {
  720                                 fdp->fd_holdleaderswakeup = 0;
  721                                 wakeup(&fdp->fd_holdleaderscount);
  722                         }
  723                         FILEDESC_UNLOCK_FAST(fdp);
  724                 }
  725         } else {
  726                 FILEDESC_UNLOCK(fdp);
  727         }
  728         return (0);
  729 }
  730 
  731 /*
  732  * If sigio is on the list associated with a process or process group,
  733  * disable signalling from the device, remove sigio from the list and
  734  * free sigio.
  735  */
  736 void
  737 funsetown(struct sigio **sigiop)
  738 {
  739         struct sigio *sigio;
  740 
  741         SIGIO_LOCK();
  742         sigio = *sigiop;
  743         if (sigio == NULL) {
  744                 SIGIO_UNLOCK();
  745                 return;
  746         }
  747         *(sigio->sio_myref) = NULL;
  748         if ((sigio)->sio_pgid < 0) {
  749                 struct pgrp *pg = (sigio)->sio_pgrp;
  750                 PGRP_LOCK(pg);
  751                 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
  752                              sigio, sio_pgsigio);
  753                 PGRP_UNLOCK(pg);
  754         } else {
  755                 struct proc *p = (sigio)->sio_proc;
  756                 PROC_LOCK(p);
  757                 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
  758                              sigio, sio_pgsigio);
  759                 PROC_UNLOCK(p);
  760         }
  761         SIGIO_UNLOCK();
  762         crfree(sigio->sio_ucred);
  763         FREE(sigio, M_SIGIO);
  764 }
  765 
  766 /*
  767  * Free a list of sigio structures.
  768  * We only need to lock the SIGIO_LOCK because we have made ourselves
  769  * inaccessable to callers of fsetown and therefore do not need to lock
  770  * the proc or pgrp struct for the list manipulation.
  771  */
  772 void
  773 funsetownlst(struct sigiolst *sigiolst)
  774 {
  775         struct proc *p;
  776         struct pgrp *pg;
  777         struct sigio *sigio;
  778 
  779         sigio = SLIST_FIRST(sigiolst);
  780         if (sigio == NULL)
  781                 return;
  782         p = NULL;
  783         pg = NULL;
  784 
  785         /*
  786          * Every entry of the list should belong
  787          * to a single proc or pgrp.
  788          */
  789         if (sigio->sio_pgid < 0) {
  790                 pg = sigio->sio_pgrp;
  791                 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
  792         } else /* if (sigio->sio_pgid > 0) */ {
  793                 p = sigio->sio_proc;
  794                 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
  795         }
  796 
  797         SIGIO_LOCK();
  798         while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
  799                 *(sigio->sio_myref) = NULL;
  800                 if (pg != NULL) {
  801                         KASSERT(sigio->sio_pgid < 0,
  802                             ("Proc sigio in pgrp sigio list"));
  803                         KASSERT(sigio->sio_pgrp == pg,
  804                             ("Bogus pgrp in sigio list"));
  805                         PGRP_LOCK(pg);
  806                         SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
  807                             sio_pgsigio);
  808                         PGRP_UNLOCK(pg);
  809                 } else /* if (p != NULL) */ {
  810                         KASSERT(sigio->sio_pgid > 0,
  811                             ("Pgrp sigio in proc sigio list"));
  812                         KASSERT(sigio->sio_proc == p,
  813                             ("Bogus proc in sigio list"));
  814                         PROC_LOCK(p);
  815                         SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
  816                             sio_pgsigio);
  817                         PROC_UNLOCK(p);
  818                 }
  819                 SIGIO_UNLOCK();
  820                 crfree(sigio->sio_ucred);
  821                 FREE(sigio, M_SIGIO);
  822                 SIGIO_LOCK();
  823         }
  824         SIGIO_UNLOCK();
  825 }
  826 
  827 /*
  828  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
  829  *
  830  * After permission checking, add a sigio structure to the sigio list for
  831  * the process or process group.
  832  */
  833 int
  834 fsetown(pid_t pgid, struct sigio **sigiop)
  835 {
  836         struct proc *proc;
  837         struct pgrp *pgrp;
  838         struct sigio *sigio;
  839         int ret;
  840 
  841         if (pgid == 0) {
  842                 funsetown(sigiop);
  843                 return (0);
  844         }
  845 
  846         ret = 0;
  847 
  848         /* Allocate and fill in the new sigio out of locks. */
  849         MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK);
  850         sigio->sio_pgid = pgid;
  851         sigio->sio_ucred = crhold(curthread->td_ucred);
  852         sigio->sio_myref = sigiop;
  853 
  854         sx_slock(&proctree_lock);
  855         if (pgid > 0) {
  856                 proc = pfind(pgid);
  857                 if (proc == NULL) {
  858                         ret = ESRCH;
  859                         goto fail;
  860                 }
  861 
  862                 /*
  863                  * Policy - Don't allow a process to FSETOWN a process
  864                  * in another session.
  865                  *
  866                  * Remove this test to allow maximum flexibility or
  867                  * restrict FSETOWN to the current process or process
  868                  * group for maximum safety.
  869                  */
  870                 PROC_UNLOCK(proc);
  871                 if (proc->p_session != curthread->td_proc->p_session) {
  872                         ret = EPERM;
  873                         goto fail;
  874                 }
  875 
  876                 pgrp = NULL;
  877         } else /* if (pgid < 0) */ {
  878                 pgrp = pgfind(-pgid);
  879                 if (pgrp == NULL) {
  880                         ret = ESRCH;
  881                         goto fail;
  882                 }
  883                 PGRP_UNLOCK(pgrp);
  884 
  885                 /*
  886                  * Policy - Don't allow a process to FSETOWN a process
  887                  * in another session.
  888                  *
  889                  * Remove this test to allow maximum flexibility or
  890                  * restrict FSETOWN to the current process or process
  891                  * group for maximum safety.
  892                  */
  893                 if (pgrp->pg_session != curthread->td_proc->p_session) {
  894                         ret = EPERM;
  895                         goto fail;
  896                 }
  897 
  898                 proc = NULL;
  899         }
  900         funsetown(sigiop);
  901         if (pgid > 0) {
  902                 PROC_LOCK(proc);
  903                 /*
  904                  * Since funsetownlst() is called without the proctree
  905                  * locked, we need to check for P_WEXIT.
  906                  * XXX: is ESRCH correct?
  907                  */
  908                 if ((proc->p_flag & P_WEXIT) != 0) {
  909                         PROC_UNLOCK(proc);
  910                         ret = ESRCH;
  911                         goto fail;
  912                 }
  913                 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
  914                 sigio->sio_proc = proc;
  915                 PROC_UNLOCK(proc);
  916         } else {
  917                 PGRP_LOCK(pgrp);
  918                 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
  919                 sigio->sio_pgrp = pgrp;
  920                 PGRP_UNLOCK(pgrp);
  921         }
  922         sx_sunlock(&proctree_lock);
  923         SIGIO_LOCK();
  924         *sigiop = sigio;
  925         SIGIO_UNLOCK();
  926         return (0);
  927 
  928 fail:
  929         sx_sunlock(&proctree_lock);
  930         crfree(sigio->sio_ucred);
  931         FREE(sigio, M_SIGIO);
  932         return (ret);
  933 }
  934 
  935 /*
  936  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
  937  */
  938 pid_t
  939 fgetown(sigiop)
  940         struct sigio **sigiop;
  941 {
  942         pid_t pgid;
  943 
  944         SIGIO_LOCK();
  945         pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
  946         SIGIO_UNLOCK();
  947         return (pgid);
  948 }
  949 
  950 /*
  951  * Close a file descriptor.
  952  */
  953 #ifndef _SYS_SYSPROTO_H_
  954 struct close_args {
  955         int     fd;
  956 };
  957 #endif
  958 /*
  959  * MPSAFE
  960  */
  961 /* ARGSUSED */
  962 int
  963 close(td, uap)
  964         struct thread *td;
  965         struct close_args *uap;
  966 {
  967         struct filedesc *fdp;
  968         struct file *fp;
  969         int fd, error;
  970         int holdleaders;
  971 
  972         fd = uap->fd;
  973         error = 0;
  974         holdleaders = 0;
  975         fdp = td->td_proc->p_fd;
  976         FILEDESC_LOCK(fdp);
  977         if ((unsigned)fd >= fdp->fd_nfiles ||
  978             (fp = fdp->fd_ofiles[fd]) == NULL) {
  979                 FILEDESC_UNLOCK(fdp);
  980                 return (EBADF);
  981         }
  982         fdp->fd_ofiles[fd] = NULL;
  983         fdp->fd_ofileflags[fd] = 0;
  984         fdunused(fdp, fd);
  985         if (td->td_proc->p_fdtol != NULL) {
  986                 /*
  987                  * Ask fdfree() to sleep to ensure that all relevant
  988                  * process leaders can be traversed in closef().
  989                  */
  990                 fdp->fd_holdleaderscount++;
  991                 holdleaders = 1;
  992         }
  993 
  994         /*
  995          * we now hold the fp reference that used to be owned by the descriptor
  996          * array.
  997          * We have to unlock the FILEDESC *AFTER* knote_fdclose to prevent a
  998          * race of the fd getting opened, a knote added, and deleteing a knote
  999          * for the new fd.
 1000          */
 1001         knote_fdclose(td, fd);
 1002         FILEDESC_UNLOCK(fdp);
 1003 
 1004         error = closef(fp, td);
 1005         if (holdleaders) {
 1006                 FILEDESC_LOCK_FAST(fdp);
 1007                 fdp->fd_holdleaderscount--;
 1008                 if (fdp->fd_holdleaderscount == 0 &&
 1009                     fdp->fd_holdleaderswakeup != 0) {
 1010                         fdp->fd_holdleaderswakeup = 0;
 1011                         wakeup(&fdp->fd_holdleaderscount);
 1012                 }
 1013                 FILEDESC_UNLOCK_FAST(fdp);
 1014         }
 1015         return (error);
 1016 }
 1017 
 1018 #if defined(COMPAT_43)
 1019 /*
 1020  * Return status information about a file descriptor.
 1021  */
 1022 #ifndef _SYS_SYSPROTO_H_
 1023 struct ofstat_args {
 1024         int     fd;
 1025         struct  ostat *sb;
 1026 };
 1027 #endif
 1028 /*
 1029  * MPSAFE
 1030  */
 1031 /* ARGSUSED */
 1032 int
 1033 ofstat(struct thread *td, struct ofstat_args *uap)
 1034 {
 1035         struct ostat oub;
 1036         struct stat ub;
 1037         int error;
 1038 
 1039         error = kern_fstat(td, uap->fd, &ub);
 1040         if (error == 0) {
 1041                 cvtstat(&ub, &oub);
 1042                 error = copyout(&oub, uap->sb, sizeof(oub));
 1043         }
 1044         return (error);
 1045 }
 1046 #endif /* COMPAT_43 */
 1047 
 1048 /*
 1049  * Return status information about a file descriptor.
 1050  */
 1051 #ifndef _SYS_SYSPROTO_H_
 1052 struct fstat_args {
 1053         int     fd;
 1054         struct  stat *sb;
 1055 };
 1056 #endif
 1057 /*
 1058  * MPSAFE
 1059  */
 1060 /* ARGSUSED */
 1061 int
 1062 fstat(struct thread *td, struct fstat_args *uap)
 1063 {
 1064         struct stat ub;
 1065         int error;
 1066 
 1067         error = kern_fstat(td, uap->fd, &ub);
 1068         if (error == 0)
 1069                 error = copyout(&ub, uap->sb, sizeof(ub));
 1070         return (error);
 1071 }
 1072 
 1073 int
 1074 kern_fstat(struct thread *td, int fd, struct stat *sbp)
 1075 {
 1076         struct file *fp;
 1077         int error;
 1078 
 1079         if ((error = fget(td, fd, &fp)) != 0)
 1080                 return (error);
 1081         error = fo_stat(fp, sbp, td->td_ucred, td);
 1082         fdrop(fp, td);
 1083         return (error);
 1084 }
 1085 
 1086 /*
 1087  * Return status information about a file descriptor.
 1088  */
 1089 #ifndef _SYS_SYSPROTO_H_
 1090 struct nfstat_args {
 1091         int     fd;
 1092         struct  nstat *sb;
 1093 };
 1094 #endif
 1095 /*
 1096  * MPSAFE
 1097  */
 1098 /* ARGSUSED */
 1099 int
 1100 nfstat(struct thread *td, struct nfstat_args *uap)
 1101 {
 1102         struct nstat nub;
 1103         struct stat ub;
 1104         int error;
 1105 
 1106         error = kern_fstat(td, uap->fd, &ub);
 1107         if (error == 0) {
 1108                 cvtnstat(&ub, &nub);
 1109                 error = copyout(&nub, uap->sb, sizeof(nub));
 1110         }
 1111         return (error);
 1112 }
 1113 
 1114 /*
 1115  * Return pathconf information about a file descriptor.
 1116  */
 1117 #ifndef _SYS_SYSPROTO_H_
 1118 struct fpathconf_args {
 1119         int     fd;
 1120         int     name;
 1121 };
 1122 #endif
 1123 /*
 1124  * MPSAFE
 1125  */
 1126 /* ARGSUSED */
 1127 int
 1128 fpathconf(struct thread *td, struct fpathconf_args *uap)
 1129 {
 1130         struct file *fp;
 1131         struct vnode *vp;
 1132         int error;
 1133 
 1134         if ((error = fget(td, uap->fd, &fp)) != 0)
 1135                 return (error);
 1136 
 1137         /* If asynchronous I/O is available, it works for all descriptors. */
 1138         if (uap->name == _PC_ASYNC_IO) {
 1139                 td->td_retval[0] = async_io_version;
 1140                 goto out;
 1141         }
 1142         vp = fp->f_vnode;
 1143         if (vp != NULL) {
 1144                 mtx_lock(&Giant);
 1145                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
 1146                 error = VOP_PATHCONF(vp, uap->name, td->td_retval);
 1147                 VOP_UNLOCK(vp, 0, td);
 1148                 mtx_unlock(&Giant);
 1149         } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
 1150                 if (uap->name != _PC_PIPE_BUF) {
 1151                         error = EINVAL;
 1152                 } else {
 1153                         td->td_retval[0] = PIPE_BUF;
 1154                 error = 0;
 1155                 }
 1156         } else {
 1157                 error = EOPNOTSUPP;
 1158         }
 1159 out:
 1160         fdrop(fp, td);
 1161         return (error);
 1162 }
 1163 
 1164 /*
 1165  * Grow the file table to accomodate (at least) nfd descriptors.  This may
 1166  * block and drop the filedesc lock, but it will reacquire it before
 1167  * returing.
 1168  */
 1169 static void
 1170 fdgrowtable(struct filedesc *fdp, int nfd)
 1171 {
 1172         struct file **ntable;
 1173         char *nfileflags;
 1174         int nnfiles, onfiles;
 1175         NDSLOTTYPE *nmap;
 1176 
 1177         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
 1178 
 1179         KASSERT(fdp->fd_nfiles > 0,
 1180             ("zero-length file table"));
 1181 
 1182         /* compute the size of the new table */
 1183         onfiles = fdp->fd_nfiles;
 1184         nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
 1185         if (nnfiles <= onfiles)
 1186                 /* the table is already large enough */
 1187                 return;
 1188 
 1189         /* allocate a new table and (if required) new bitmaps */
 1190         FILEDESC_UNLOCK(fdp);
 1191         MALLOC(ntable, struct file **, nnfiles * OFILESIZE,
 1192             M_FILEDESC, M_ZERO | M_WAITOK);
 1193         nfileflags = (char *)&ntable[nnfiles];
 1194         if (NDSLOTS(nnfiles) > NDSLOTS(onfiles))
 1195                 MALLOC(nmap, NDSLOTTYPE *, NDSLOTS(nnfiles) * NDSLOTSIZE,
 1196                     M_FILEDESC, M_ZERO | M_WAITOK);
 1197         else
 1198                 nmap = NULL;
 1199         FILEDESC_LOCK(fdp);
 1200 
 1201         /*
 1202          * We now have new tables ready to go.  Since we dropped the
 1203          * filedesc lock to call malloc(), watch out for a race.
 1204          */
 1205         onfiles = fdp->fd_nfiles;
 1206         if (onfiles >= nnfiles) {
 1207                 /* we lost the race, but that's OK */
 1208                 free(ntable, M_FILEDESC);
 1209                 if (nmap != NULL)
 1210                         free(nmap, M_FILEDESC);
 1211                 return;
 1212         }
 1213         bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable));
 1214         bcopy(fdp->fd_ofileflags, nfileflags, onfiles);
 1215         if (onfiles > NDFILE)
 1216                 free(fdp->fd_ofiles, M_FILEDESC);
 1217         fdp->fd_ofiles = ntable;
 1218         fdp->fd_ofileflags = nfileflags;
 1219         if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
 1220                 bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap));
 1221                 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
 1222                         free(fdp->fd_map, M_FILEDESC);
 1223                 fdp->fd_map = nmap;
 1224         }
 1225         fdp->fd_nfiles = nnfiles;
 1226 }
 1227 
 1228 /*
 1229  * Allocate a file descriptor for the process.
 1230  */
 1231 int
 1232 fdalloc(struct thread *td, int minfd, int *result)
 1233 {
 1234         struct proc *p = td->td_proc;
 1235         struct filedesc *fdp = p->p_fd;
 1236         int fd = -1, maxfd;
 1237 
 1238         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
 1239 
 1240         if (fdp->fd_freefile > minfd)
 1241                 minfd = fdp->fd_freefile;          
 1242 
 1243         PROC_LOCK(p);
 1244         maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
 1245         PROC_UNLOCK(p);
 1246 
 1247         /*
 1248          * Search the bitmap for a free descriptor.  If none is found, try
 1249          * to grow the file table.  Keep at it until we either get a file
 1250          * descriptor or run into process or system limits; fdgrowtable()
 1251          * may drop the filedesc lock, so we're in a race.
 1252          */
 1253         for (;;) {
 1254                 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
 1255                 if (fd >= maxfd)
 1256                         return (EMFILE);
 1257                 if (fd < fdp->fd_nfiles)
 1258                         break;
 1259                 fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd));
 1260         }
 1261 
 1262         /*
 1263          * Perform some sanity checks, then mark the file descriptor as
 1264          * used and return it to the caller.
 1265          */
 1266         KASSERT(!fdisused(fdp, fd),
 1267             ("fd_first_free() returned non-free descriptor"));
 1268         KASSERT(fdp->fd_ofiles[fd] == NULL,
 1269             ("free descriptor isn't"));
 1270         fdp->fd_ofileflags[fd] = 0; /* XXX needed? */
 1271         fdused(fdp, fd);
 1272         *result = fd;
 1273         return (0);
 1274 }
 1275 
 1276 /*
 1277  * Check to see whether n user file descriptors
 1278  * are available to the process p.
 1279  */
 1280 int
 1281 fdavail(struct thread *td, int n)
 1282 {
 1283         struct proc *p = td->td_proc;
 1284         struct filedesc *fdp = td->td_proc->p_fd;
 1285         struct file **fpp;
 1286         int i, lim, last;
 1287 
 1288         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
 1289 
 1290         PROC_LOCK(p);
 1291         lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
 1292         PROC_UNLOCK(p);
 1293         if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
 1294                 return (1);
 1295         last = min(fdp->fd_nfiles, lim);
 1296         fpp = &fdp->fd_ofiles[fdp->fd_freefile];
 1297         for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
 1298                 if (*fpp == NULL && --n <= 0)
 1299                         return (1);
 1300         }
 1301         return (0);
 1302 }
 1303 
 1304 /*
 1305  * Create a new open file structure and allocate
 1306  * a file decriptor for the process that refers to it.
 1307  * We add one reference to the file for the descriptor table
 1308  * and one reference for resultfp. This is to prevent us being
 1309  * prempted and the entry in the descriptor table closed after
 1310  * we release the FILEDESC lock.
 1311  */
 1312 int
 1313 falloc(struct thread *td, struct file **resultfp, int *resultfd)
 1314 {
 1315         struct proc *p = td->td_proc;
 1316         struct file *fp, *fq;
 1317         int error, i;
 1318         int maxuserfiles = maxfiles - (maxfiles / 20);
 1319         static struct timeval lastfail;
 1320         static int curfail;
 1321 
 1322         fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
 1323         sx_xlock(&filelist_lock);
 1324         if ((nfiles >= maxuserfiles && (td->td_ucred->cr_ruid != 0 ||
 1325            jailed(td->td_ucred))) || nfiles >= maxfiles) {
 1326                 if (ppsratecheck(&lastfail, &curfail, 1)) {
 1327                         printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n",
 1328                                 td->td_ucred->cr_ruid);
 1329                 }
 1330                 sx_xunlock(&filelist_lock);
 1331                 uma_zfree(file_zone, fp);
 1332                 return (ENFILE);
 1333         }
 1334         nfiles++;
 1335 
 1336         /*
 1337          * If the process has file descriptor zero open, add the new file
 1338          * descriptor to the list of open files at that point, otherwise
 1339          * put it at the front of the list of open files.
 1340          */
 1341         fp->f_mtxp = mtx_pool_alloc(mtxpool_sleep);
 1342         fp->f_count = 1;
 1343         if (resultfp)
 1344                 fp->f_count++;
 1345         fp->f_cred = crhold(td->td_ucred);
 1346         fp->f_ops = &badfileops;
 1347         fp->f_data = NULL;
 1348         fp->f_vnode = NULL;
 1349         FILEDESC_LOCK(p->p_fd);
 1350         if ((fq = p->p_fd->fd_ofiles[0])) {
 1351                 LIST_INSERT_AFTER(fq, fp, f_list);
 1352         } else {
 1353                 LIST_INSERT_HEAD(&filehead, fp, f_list);
 1354         }
 1355         sx_xunlock(&filelist_lock);
 1356         if ((error = fdalloc(td, 0, &i))) {
 1357                 FILEDESC_UNLOCK(p->p_fd);
 1358                 fdrop(fp, td);
 1359                 if (resultfp)
 1360                         fdrop(fp, td);
 1361                 return (error);
 1362         }
 1363         p->p_fd->fd_ofiles[i] = fp;
 1364         FILEDESC_UNLOCK(p->p_fd);
 1365         if (resultfp)
 1366                 *resultfp = fp;
 1367         if (resultfd)
 1368                 *resultfd = i;
 1369         return (0);
 1370 }
 1371 
 1372 /*
 1373  * Build a new filedesc structure from another.
 1374  * Copy the current, root, and jail root vnode references.
 1375  */
 1376 struct filedesc *
 1377 fdinit(struct filedesc *fdp)
 1378 {
 1379         struct filedesc0 *newfdp;
 1380 
 1381         newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
 1382         mtx_init(&newfdp->fd_fd.fd_mtx, FILEDESC_LOCK_DESC, NULL, MTX_DEF);
 1383         if (fdp != NULL) {
 1384                 FILEDESC_LOCK(fdp);
 1385                 newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
 1386                 if (newfdp->fd_fd.fd_cdir)
 1387                         VREF(newfdp->fd_fd.fd_cdir);
 1388                 newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
 1389                 if (newfdp->fd_fd.fd_rdir)
 1390                         VREF(newfdp->fd_fd.fd_rdir);
 1391                 newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
 1392                 if (newfdp->fd_fd.fd_jdir)
 1393                         VREF(newfdp->fd_fd.fd_jdir);
 1394                 FILEDESC_UNLOCK(fdp);
 1395         }
 1396 
 1397         /* Create the file descriptor table. */
 1398         newfdp->fd_fd.fd_refcnt = 1;
 1399         newfdp->fd_fd.fd_holdcnt = 1;
 1400         newfdp->fd_fd.fd_cmask = CMASK;
 1401         newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
 1402         newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
 1403         newfdp->fd_fd.fd_nfiles = NDFILE;
 1404         newfdp->fd_fd.fd_map = newfdp->fd_dmap;
 1405         newfdp->fd_fd.fd_lastfile = -1;
 1406         return (&newfdp->fd_fd);
 1407 }
 1408 
 1409 static struct filedesc *
 1410 fdhold(struct proc *p)
 1411 {
 1412         struct filedesc *fdp;
 1413 
 1414         mtx_lock(&fdesc_mtx);
 1415         fdp = p->p_fd;
 1416         if (fdp != NULL)
 1417                 fdp->fd_holdcnt++;
 1418         mtx_unlock(&fdesc_mtx);
 1419         return (fdp);
 1420 }
 1421 
 1422 static void
 1423 fddrop(struct filedesc *fdp)
 1424 {
 1425         int i;
 1426 
 1427         mtx_lock(&fdesc_mtx);
 1428         i = --fdp->fd_holdcnt;
 1429         mtx_unlock(&fdesc_mtx);
 1430         if (i > 0)
 1431                 return;
 1432 
 1433         mtx_destroy(&fdp->fd_mtx);
 1434         FREE(fdp, M_FILEDESC);
 1435 }
 1436 
 1437 /*
 1438  * Share a filedesc structure.
 1439  */
 1440 struct filedesc *
 1441 fdshare(struct filedesc *fdp)
 1442 {
 1443         FILEDESC_LOCK_FAST(fdp);
 1444         fdp->fd_refcnt++;
 1445         FILEDESC_UNLOCK_FAST(fdp);
 1446         return (fdp);
 1447 }
 1448 
 1449 /*
 1450  * Unshare a filedesc structure, if necessary by making a copy
 1451  */
 1452 void
 1453 fdunshare(struct proc *p, struct thread *td)
 1454 {
 1455 
 1456         FILEDESC_LOCK_FAST(p->p_fd);
 1457         if (p->p_fd->fd_refcnt > 1) {
 1458                 struct filedesc *tmp;
 1459 
 1460                 FILEDESC_UNLOCK_FAST(p->p_fd);
 1461                 tmp = fdcopy(p->p_fd);
 1462                 fdfree(td);
 1463                 p->p_fd = tmp;
 1464         } else
 1465                 FILEDESC_UNLOCK_FAST(p->p_fd);
 1466 }
 1467 
 1468 /*
 1469  * Copy a filedesc structure.
 1470  * A NULL pointer in returns a NULL reference, this is to ease callers,
 1471  * not catch errors.
 1472  */
 1473 struct filedesc *
 1474 fdcopy(struct filedesc *fdp)
 1475 {
 1476         struct filedesc *newfdp;
 1477         int i;
 1478 
 1479         /* Certain daemons might not have file descriptors. */
 1480         if (fdp == NULL)
 1481                 return (NULL);
 1482 
 1483         newfdp = fdinit(fdp);
 1484         FILEDESC_LOCK_FAST(fdp);
 1485         while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
 1486                 FILEDESC_UNLOCK_FAST(fdp);
 1487                 FILEDESC_LOCK(newfdp);
 1488                 fdgrowtable(newfdp, fdp->fd_lastfile + 1);
 1489                 FILEDESC_UNLOCK(newfdp);
 1490                 FILEDESC_LOCK_FAST(fdp);
 1491         }
 1492         /* copy everything except kqueue descriptors */
 1493         newfdp->fd_freefile = -1;
 1494         for (i = 0; i <= fdp->fd_lastfile; ++i) {
 1495                 if (fdisused(fdp, i) &&
 1496                     fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE) {
 1497                         newfdp->fd_ofiles[i] = fdp->fd_ofiles[i];
 1498                         newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i];
 1499                         fhold(newfdp->fd_ofiles[i]);
 1500                         newfdp->fd_lastfile = i;
 1501                 } else {
 1502                         if (newfdp->fd_freefile == -1)
 1503                                 newfdp->fd_freefile = i;
 1504                 }
 1505         }
 1506         FILEDESC_UNLOCK_FAST(fdp);
 1507         FILEDESC_LOCK(newfdp);
 1508         for (i = 0; i <= newfdp->fd_lastfile; ++i)
 1509                 if (newfdp->fd_ofiles[i] != NULL)
 1510                         fdused(newfdp, i);
 1511         FILEDESC_UNLOCK(newfdp);
 1512         FILEDESC_LOCK_FAST(fdp);
 1513         if (newfdp->fd_freefile == -1)
 1514                 newfdp->fd_freefile = i;
 1515         newfdp->fd_cmask = fdp->fd_cmask;
 1516         FILEDESC_UNLOCK_FAST(fdp);
 1517         return (newfdp);
 1518 }
 1519 
 1520 /*
 1521  * Release a filedesc structure.
 1522  */
 1523 void
 1524 fdfree(struct thread *td)
 1525 {
 1526         struct filedesc *fdp;
 1527         struct file **fpp;
 1528         int i;
 1529         struct filedesc_to_leader *fdtol;
 1530         struct file *fp;
 1531         struct vnode *vp;
 1532         struct flock lf;
 1533 
 1534         GIANT_REQUIRED;         /* VFS */
 1535 
 1536         /* Certain daemons might not have file descriptors. */
 1537         fdp = td->td_proc->p_fd;
 1538         if (fdp == NULL)
 1539                 return;
 1540 
 1541         /* Check for special need to clear POSIX style locks */
 1542         fdtol = td->td_proc->p_fdtol;
 1543         if (fdtol != NULL) {
 1544                 FILEDESC_LOCK(fdp);
 1545                 KASSERT(fdtol->fdl_refcount > 0,
 1546                         ("filedesc_to_refcount botch: fdl_refcount=%d",
 1547                          fdtol->fdl_refcount));
 1548                 if (fdtol->fdl_refcount == 1 &&
 1549                     (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
 1550                         i = 0;
 1551                         fpp = fdp->fd_ofiles;
 1552                         for (i = 0, fpp = fdp->fd_ofiles;
 1553                              i <= fdp->fd_lastfile;
 1554                              i++, fpp++) {
 1555                                 if (*fpp == NULL ||
 1556                                     (*fpp)->f_type != DTYPE_VNODE)
 1557                                         continue;
 1558                                 fp = *fpp;
 1559                                 fhold(fp);
 1560                                 FILEDESC_UNLOCK(fdp);
 1561                                 lf.l_whence = SEEK_SET;
 1562                                 lf.l_start = 0;
 1563                                 lf.l_len = 0;
 1564                                 lf.l_type = F_UNLCK;
 1565                                 vp = fp->f_vnode;
 1566                                 (void) VOP_ADVLOCK(vp,
 1567                                                    (caddr_t)td->td_proc->
 1568                                                    p_leader,
 1569                                                    F_UNLCK,
 1570                                                    &lf,
 1571                                                    F_POSIX);
 1572                                 FILEDESC_LOCK(fdp);
 1573                                 fdrop(fp, td);
 1574                                 fpp = fdp->fd_ofiles + i;
 1575                         }
 1576                 }
 1577         retry:
 1578                 if (fdtol->fdl_refcount == 1) {
 1579                         if (fdp->fd_holdleaderscount > 0 &&
 1580                             (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
 1581                                 /*
 1582                                  * close() or do_dup() has cleared a reference
 1583                                  * in a shared file descriptor table.
 1584                                  */
 1585                                 fdp->fd_holdleaderswakeup = 1;
 1586                                 msleep(&fdp->fd_holdleaderscount, &fdp->fd_mtx,
 1587                                        PLOCK, "fdlhold", 0);
 1588                                 goto retry;
 1589                         }
 1590                         if (fdtol->fdl_holdcount > 0) {
 1591                                 /*
 1592                                  * Ensure that fdtol->fdl_leader
 1593                                  * remains valid in closef().
 1594                                  */
 1595                                 fdtol->fdl_wakeup = 1;
 1596                                 msleep(fdtol, &fdp->fd_mtx,
 1597                                        PLOCK, "fdlhold", 0);
 1598                                 goto retry;
 1599                         }
 1600                 }
 1601                 fdtol->fdl_refcount--;
 1602                 if (fdtol->fdl_refcount == 0 &&
 1603                     fdtol->fdl_holdcount == 0) {
 1604                         fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
 1605                         fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
 1606                 } else
 1607                         fdtol = NULL;
 1608                 td->td_proc->p_fdtol = NULL;
 1609                 FILEDESC_UNLOCK(fdp);
 1610                 if (fdtol != NULL)
 1611                         FREE(fdtol, M_FILEDESC_TO_LEADER);
 1612         }
 1613         FILEDESC_LOCK_FAST(fdp);
 1614         i = --fdp->fd_refcnt;
 1615         FILEDESC_UNLOCK_FAST(fdp);
 1616         if (i > 0)
 1617                 return;
 1618         /*
 1619          * We are the last reference to the structure, so we can
 1620          * safely assume it will not change out from under us.
 1621          */
 1622         fpp = fdp->fd_ofiles;
 1623         for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
 1624                 if (*fpp)
 1625                         (void) closef(*fpp, td);
 1626         }
 1627         FILEDESC_LOCK(fdp);
 1628 
 1629         /* XXX This should happen earlier. */
 1630         mtx_lock(&fdesc_mtx);
 1631         td->td_proc->p_fd = NULL;
 1632         mtx_unlock(&fdesc_mtx);
 1633 
 1634         if (fdp->fd_nfiles > NDFILE)
 1635                 FREE(fdp->fd_ofiles, M_FILEDESC);
 1636         if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
 1637                 FREE(fdp->fd_map, M_FILEDESC);
 1638 
 1639         fdp->fd_nfiles = 0;
 1640 
 1641         if (fdp->fd_cdir)
 1642                 vrele(fdp->fd_cdir);
 1643         fdp->fd_cdir = NULL;
 1644         if (fdp->fd_rdir)
 1645                 vrele(fdp->fd_rdir);
 1646         fdp->fd_rdir = NULL;
 1647         if (fdp->fd_jdir)
 1648                 vrele(fdp->fd_jdir);
 1649         fdp->fd_jdir = NULL;
 1650 
 1651         FILEDESC_UNLOCK(fdp);
 1652 
 1653         fddrop(fdp);
 1654 }
 1655 
 1656 /*
 1657  * For setugid programs, we don't want to people to use that setugidness
 1658  * to generate error messages which write to a file which otherwise would
 1659  * otherwise be off-limits to the process.  We check for filesystems where
 1660  * the vnode can change out from under us after execve (like [lin]procfs).
 1661  *
 1662  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
 1663  * sufficient.  We also don't for check setugidness since we know we are.
 1664  */
 1665 static int
 1666 is_unsafe(struct file *fp)
 1667 {
 1668         if (fp->f_type == DTYPE_VNODE) {
 1669                 struct vnode *vp = fp->f_vnode;
 1670 
 1671                 if ((vp->v_vflag & VV_PROCDEP) != 0)
 1672                         return (1);
 1673         }
 1674         return (0);
 1675 }
 1676 
 1677 /*
 1678  * Make this setguid thing safe, if at all possible.
 1679  */
 1680 void
 1681 setugidsafety(struct thread *td)
 1682 {
 1683         struct filedesc *fdp;
 1684         int i;
 1685 
 1686         /* Certain daemons might not have file descriptors. */
 1687         fdp = td->td_proc->p_fd;
 1688         if (fdp == NULL)
 1689                 return;
 1690 
 1691         /*
 1692          * Note: fdp->fd_ofiles may be reallocated out from under us while
 1693          * we are blocked in a close.  Be careful!
 1694          */
 1695         FILEDESC_LOCK(fdp);
 1696         for (i = 0; i <= fdp->fd_lastfile; i++) {
 1697                 if (i > 2)
 1698                         break;
 1699                 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
 1700                         struct file *fp;
 1701 
 1702                         knote_fdclose(td, i);
 1703                         /*
 1704                          * NULL-out descriptor prior to close to avoid
 1705                          * a race while close blocks.
 1706                          */
 1707                         fp = fdp->fd_ofiles[i];
 1708                         fdp->fd_ofiles[i] = NULL;
 1709                         fdp->fd_ofileflags[i] = 0;
 1710                         fdunused(fdp, i);
 1711                         FILEDESC_UNLOCK(fdp);
 1712                         (void) closef(fp, td);
 1713                         FILEDESC_LOCK(fdp);
 1714                 }
 1715         }
 1716         FILEDESC_UNLOCK(fdp);
 1717 }
 1718 
 1719 void
 1720 fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
 1721 {
 1722 
 1723         FILEDESC_LOCK(fdp);
 1724         if (fdp->fd_ofiles[idx] == fp) {
 1725                 fdp->fd_ofiles[idx] = NULL;
 1726                 fdunused(fdp, idx);
 1727                 FILEDESC_UNLOCK(fdp);
 1728                 fdrop(fp, td);
 1729         } else {
 1730                 FILEDESC_UNLOCK(fdp);
 1731         }
 1732 }
 1733 
 1734 /*
 1735  * Close any files on exec?
 1736  */
 1737 void
 1738 fdcloseexec(struct thread *td)
 1739 {
 1740         struct filedesc *fdp;
 1741         int i;
 1742 
 1743         /* Certain daemons might not have file descriptors. */
 1744         fdp = td->td_proc->p_fd;
 1745         if (fdp == NULL)
 1746                 return;
 1747 
 1748         FILEDESC_LOCK(fdp);
 1749 
 1750         /*
 1751          * We cannot cache fd_ofiles or fd_ofileflags since operations
 1752          * may block and rip them out from under us.
 1753          */
 1754         for (i = 0; i <= fdp->fd_lastfile; i++) {
 1755                 if (fdp->fd_ofiles[i] != NULL &&
 1756                     (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
 1757                         struct file *fp;
 1758 
 1759                         knote_fdclose(td, i);
 1760                         /*
 1761                          * NULL-out descriptor prior to close to avoid
 1762                          * a race while close blocks.
 1763                          */
 1764                         fp = fdp->fd_ofiles[i];
 1765                         fdp->fd_ofiles[i] = NULL;
 1766                         fdp->fd_ofileflags[i] = 0;
 1767                         fdunused(fdp, i);
 1768                         FILEDESC_UNLOCK(fdp);
 1769                         (void) closef(fp, td);
 1770                         FILEDESC_LOCK(fdp);
 1771                 }
 1772         }
 1773         FILEDESC_UNLOCK(fdp);
 1774 }
 1775 
 1776 /*
 1777  * It is unsafe for set[ug]id processes to be started with file
 1778  * descriptors 0..2 closed, as these descriptors are given implicit
 1779  * significance in the Standard C library.  fdcheckstd() will create a
 1780  * descriptor referencing /dev/null for each of stdin, stdout, and
 1781  * stderr that is not already open.
 1782  */
 1783 int
 1784 fdcheckstd(struct thread *td)
 1785 {
 1786         struct nameidata nd;
 1787         struct filedesc *fdp;
 1788         struct file *fp;
 1789         register_t retval;
 1790         int fd, i, error, flags, devnull;
 1791 
 1792         GIANT_REQUIRED;         /* VFS */
 1793 
 1794         fdp = td->td_proc->p_fd;
 1795         if (fdp == NULL)
 1796                 return (0);
 1797         KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
 1798         devnull = -1;
 1799         error = 0;
 1800         for (i = 0; i < 3; i++) {
 1801                 if (fdp->fd_ofiles[i] != NULL)
 1802                         continue;
 1803                 if (devnull < 0) {
 1804                         error = falloc(td, &fp, &fd);
 1805                         if (error != 0)
 1806                                 break;
 1807                         /* Note extra ref on `fp' held for us by falloc(). */
 1808                         KASSERT(fd == i, ("oof, we didn't get our fd"));
 1809                         NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, "/dev/null",
 1810                             td);
 1811                         flags = FREAD | FWRITE;
 1812                         error = vn_open(&nd, &flags, 0, -1);
 1813                         if (error != 0) {
 1814                                 /*
 1815                                  * Someone may have closed the entry in the
 1816                                  * file descriptor table, so check it hasn't
 1817                                  * changed before dropping the reference count.
 1818                                  */
 1819                                 FILEDESC_LOCK(fdp);
 1820                                 KASSERT(fdp->fd_ofiles[fd] == fp,
 1821                                     ("table not shared, how did it change?"));
 1822                                 fdp->fd_ofiles[fd] = NULL;
 1823                                 fdunused(fdp, fd);
 1824                                 FILEDESC_UNLOCK(fdp);
 1825                                 fdrop(fp, td);
 1826                                 fdrop(fp, td);
 1827                                 break;
 1828                         }
 1829                         NDFREE(&nd, NDF_ONLY_PNBUF);
 1830                         fp->f_flag = flags;
 1831                         fp->f_vnode = nd.ni_vp;
 1832                         if (fp->f_data == NULL)
 1833                                 fp->f_data = nd.ni_vp;
 1834                         if (fp->f_ops == &badfileops)
 1835                                 fp->f_ops = &vnops;
 1836                         fp->f_type = DTYPE_VNODE;
 1837                         VOP_UNLOCK(nd.ni_vp, 0, td);
 1838                         devnull = fd;
 1839                         fdrop(fp, td);
 1840                 } else {
 1841                         error = do_dup(td, DUP_FIXED, devnull, i, &retval);
 1842                         if (error != 0)
 1843                                 break;
 1844                 }
 1845         }
 1846         return (error);
 1847 }
 1848 
 1849 /*
 1850  * Internal form of close.
 1851  * Decrement reference count on file structure.
 1852  * Note: td may be NULL when closing a file that was being passed in a
 1853  * message.
 1854  *
 1855  * XXXRW: Giant is not required for the caller, but often will be held; this
 1856  * makes it moderately likely the Giant will be recursed in the VFS case.
 1857  */
 1858 int
 1859 closef(struct file *fp, struct thread *td)
 1860 {
 1861         struct vnode *vp;
 1862         struct flock lf;
 1863         struct filedesc_to_leader *fdtol;
 1864         struct filedesc *fdp;
 1865 
 1866         /*
 1867          * POSIX record locking dictates that any close releases ALL
 1868          * locks owned by this process.  This is handled by setting
 1869          * a flag in the unlock to free ONLY locks obeying POSIX
 1870          * semantics, and not to free BSD-style file locks.
 1871          * If the descriptor was in a message, POSIX-style locks
 1872          * aren't passed with the descriptor, and the thread pointer
 1873          * will be NULL.  Callers should be careful only to pass a
 1874          * NULL thread pointer when there really is no owning
 1875          * context that might have locks, or the locks will be
 1876          * leaked.
 1877          */
 1878         if (fp->f_type == DTYPE_VNODE && td != NULL) {
 1879                 mtx_lock(&Giant);
 1880                 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
 1881                         lf.l_whence = SEEK_SET;
 1882                         lf.l_start = 0;
 1883                         lf.l_len = 0;
 1884                         lf.l_type = F_UNLCK;
 1885                         vp = fp->f_vnode;
 1886                         (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
 1887                                            F_UNLCK, &lf, F_POSIX);
 1888                 }
 1889                 fdtol = td->td_proc->p_fdtol;
 1890                 if (fdtol != NULL) {
 1891                         /*
 1892                          * Handle special case where file descriptor table
 1893                          * is shared between multiple process leaders.
 1894                          */
 1895                         fdp = td->td_proc->p_fd;
 1896                         FILEDESC_LOCK(fdp);
 1897                         for (fdtol = fdtol->fdl_next;
 1898                              fdtol != td->td_proc->p_fdtol;
 1899                              fdtol = fdtol->fdl_next) {
 1900                                 if ((fdtol->fdl_leader->p_flag &
 1901                                      P_ADVLOCK) == 0)
 1902                                         continue;
 1903                                 fdtol->fdl_holdcount++;
 1904                                 FILEDESC_UNLOCK(fdp);
 1905                                 lf.l_whence = SEEK_SET;
 1906                                 lf.l_start = 0;
 1907                                 lf.l_len = 0;
 1908                                 lf.l_type = F_UNLCK;
 1909                                 vp = fp->f_vnode;
 1910                                 (void) VOP_ADVLOCK(vp,
 1911                                                    (caddr_t)fdtol->fdl_leader,
 1912                                                    F_UNLCK, &lf, F_POSIX);
 1913                                 FILEDESC_LOCK(fdp);
 1914                                 fdtol->fdl_holdcount--;
 1915                                 if (fdtol->fdl_holdcount == 0 &&
 1916                                     fdtol->fdl_wakeup != 0) {
 1917                                         fdtol->fdl_wakeup = 0;
 1918                                         wakeup(fdtol);
 1919                                 }
 1920                         }
 1921                         FILEDESC_UNLOCK(fdp);
 1922                 }
 1923                 mtx_unlock(&Giant);
 1924         }
 1925         return (fdrop(fp, td));
 1926 }
 1927 
 1928 /*
 1929  * Extract the file pointer associated with the specified descriptor for
 1930  * the current user process.
 1931  *
 1932  * If the descriptor doesn't exist, EBADF is returned.
 1933  *
 1934  * If the descriptor exists but doesn't match 'flags' then
 1935  * return EBADF for read attempts and EINVAL for write attempts.
 1936  *
 1937  * If 'hold' is set (non-zero) the file's refcount will be bumped on return.
 1938  * It should be droped with fdrop().
 1939  * If it is not set, then the refcount will not be bumped however the
 1940  * thread's filedesc struct will be returned locked (for fgetsock).
 1941  *
 1942  * If an error occured the non-zero error is returned and *fpp is set to NULL.
 1943  * Otherwise *fpp is set and zero is returned.
 1944  */
 1945 static __inline int
 1946 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold)
 1947 {
 1948         struct filedesc *fdp;
 1949         struct file *fp;
 1950 
 1951         *fpp = NULL;
 1952         if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
 1953                 return (EBADF);
 1954         FILEDESC_LOCK(fdp);
 1955         if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) {
 1956                 FILEDESC_UNLOCK(fdp);
 1957                 return (EBADF);
 1958         }
 1959 
 1960         /*
 1961          * Note: FREAD failures returns EBADF to maintain backwards
 1962          * compatibility with what routines returned before.
 1963          *
 1964          * Only one flag, or 0, may be specified.
 1965          */
 1966         if (flags == FREAD && (fp->f_flag & FREAD) == 0) {
 1967                 FILEDESC_UNLOCK(fdp);
 1968                 return (EBADF);
 1969         }
 1970         if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) {
 1971                 FILEDESC_UNLOCK(fdp);
 1972                 return (EINVAL);
 1973         }
 1974         if (hold) {
 1975                 fhold(fp);
 1976                 FILEDESC_UNLOCK(fdp);
 1977         }
 1978         *fpp = fp;
 1979         return (0);
 1980 }
 1981 
 1982 int
 1983 fget(struct thread *td, int fd, struct file **fpp)
 1984 {
 1985 
 1986         return(_fget(td, fd, fpp, 0, 1));
 1987 }
 1988 
 1989 int
 1990 fget_read(struct thread *td, int fd, struct file **fpp)
 1991 {
 1992 
 1993         return(_fget(td, fd, fpp, FREAD, 1));
 1994 }
 1995 
 1996 int
 1997 fget_write(struct thread *td, int fd, struct file **fpp)
 1998 {
 1999 
 2000         return(_fget(td, fd, fpp, FWRITE, 1));
 2001 }
 2002 
 2003 /*
 2004  * Like fget() but loads the underlying vnode, or returns an error if
 2005  * the descriptor does not represent a vnode.  Note that pipes use vnodes
 2006  * but never have VM objects (so VOP_GETVOBJECT() calls will return an
 2007  * error).  The returned vnode will be vref()d.
 2008  *
 2009  * XXX: what about the unused flags ?
 2010  */
 2011 static __inline int
 2012 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags)
 2013 {
 2014         struct file *fp;
 2015         int error;
 2016 
 2017         GIANT_REQUIRED;         /* VFS */
 2018 
 2019         *vpp = NULL;
 2020         if ((error = _fget(td, fd, &fp, flags, 0)) != 0)
 2021                 return (error);
 2022         if (fp->f_vnode == NULL) {
 2023                 error = EINVAL;
 2024         } else {
 2025                 *vpp = fp->f_vnode;
 2026                 vref(*vpp);
 2027         }
 2028         FILEDESC_UNLOCK(td->td_proc->p_fd);
 2029         return (error);
 2030 }
 2031 
 2032 int
 2033 fgetvp(struct thread *td, int fd, struct vnode **vpp)
 2034 {
 2035 
 2036         return (_fgetvp(td, fd, vpp, 0));
 2037 }
 2038 
 2039 int
 2040 fgetvp_read(struct thread *td, int fd, struct vnode **vpp)
 2041 {
 2042 
 2043         return (_fgetvp(td, fd, vpp, FREAD));
 2044 }
 2045 
 2046 #ifdef notyet
 2047 int
 2048 fgetvp_write(struct thread *td, int fd, struct vnode **vpp)
 2049 {
 2050 
 2051         return (_fgetvp(td, fd, vpp, FWRITE));
 2052 }
 2053 #endif
 2054 
 2055 /*
 2056  * Like fget() but loads the underlying socket, or returns an error if
 2057  * the descriptor does not represent a socket.
 2058  *
 2059  * We bump the ref count on the returned socket.  XXX Also obtain the SX
 2060  * lock in the future.
 2061  */
 2062 int
 2063 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp)
 2064 {
 2065         struct file *fp;
 2066         int error;
 2067 
 2068         NET_ASSERT_GIANT();
 2069 
 2070         *spp = NULL;
 2071         if (fflagp != NULL)
 2072                 *fflagp = 0;
 2073         if ((error = _fget(td, fd, &fp, 0, 0)) != 0)
 2074                 return (error);
 2075         if (fp->f_type != DTYPE_SOCKET) {
 2076                 error = ENOTSOCK;
 2077         } else {
 2078                 *spp = fp->f_data;
 2079                 if (fflagp)
 2080                         *fflagp = fp->f_flag;
 2081                 SOCK_LOCK(*spp);
 2082                 soref(*spp);
 2083                 SOCK_UNLOCK(*spp);
 2084         }
 2085         FILEDESC_UNLOCK(td->td_proc->p_fd);
 2086         return (error);
 2087 }
 2088 
 2089 /*
 2090  * Drop the reference count on the the socket and XXX release the SX lock in
 2091  * the future.  The last reference closes the socket.
 2092  */
 2093 void
 2094 fputsock(struct socket *so)
 2095 {
 2096 
 2097         NET_ASSERT_GIANT();
 2098         ACCEPT_LOCK();
 2099         SOCK_LOCK(so);
 2100         sorele(so);
 2101 }
 2102 
 2103 int
 2104 fdrop(struct file *fp, struct thread *td)
 2105 {
 2106 
 2107         FILE_LOCK(fp);
 2108         return (fdrop_locked(fp, td));
 2109 }
 2110 
 2111 /*
 2112  * Drop reference on struct file passed in, may call closef if the
 2113  * reference hits zero.
 2114  * Expects struct file locked, and will unlock it.
 2115  */
 2116 static int
 2117 fdrop_locked(struct file *fp, struct thread *td)
 2118 {
 2119         int error;
 2120 
 2121         FILE_LOCK_ASSERT(fp, MA_OWNED);
 2122 
 2123         if (--fp->f_count > 0) {
 2124                 FILE_UNLOCK(fp);
 2125                 return (0);
 2126         }
 2127         /* We have the last ref so we can proceed without the file lock. */
 2128         FILE_UNLOCK(fp);
 2129         if (fp->f_count < 0)
 2130                 panic("fdrop: count < 0");
 2131         if (fp->f_ops != &badfileops)
 2132                 error = fo_close(fp, td);
 2133         else
 2134                 error = 0;
 2135 
 2136         sx_xlock(&filelist_lock);
 2137         LIST_REMOVE(fp, f_list);
 2138         nfiles--;
 2139         sx_xunlock(&filelist_lock);
 2140         crfree(fp->f_cred);
 2141         uma_zfree(file_zone, fp);
 2142 
 2143         return (error);
 2144 }
 2145 
 2146 /*
 2147  * Apply an advisory lock on a file descriptor.
 2148  *
 2149  * Just attempt to get a record lock of the requested type on
 2150  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
 2151  */
 2152 #ifndef _SYS_SYSPROTO_H_
 2153 struct flock_args {
 2154         int     fd;
 2155         int     how;
 2156 };
 2157 #endif
 2158 /*
 2159  * MPSAFE
 2160  */
 2161 /* ARGSUSED */
 2162 int
 2163 flock(struct thread *td, struct flock_args *uap)
 2164 {
 2165         struct file *fp;
 2166         struct vnode *vp;
 2167         struct flock lf;
 2168         int error;
 2169 
 2170         if ((error = fget(td, uap->fd, &fp)) != 0)
 2171                 return (error);
 2172         if (fp->f_type != DTYPE_VNODE) {
 2173                 fdrop(fp, td);
 2174                 return (EOPNOTSUPP);
 2175         }
 2176 
 2177         mtx_lock(&Giant);
 2178         vp = fp->f_vnode;
 2179         lf.l_whence = SEEK_SET;
 2180         lf.l_start = 0;
 2181         lf.l_len = 0;
 2182         if (uap->how & LOCK_UN) {
 2183                 lf.l_type = F_UNLCK;
 2184                 FILE_LOCK(fp);
 2185                 fp->f_flag &= ~FHASLOCK;
 2186                 FILE_UNLOCK(fp);
 2187                 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
 2188                 goto done2;
 2189         }
 2190         if (uap->how & LOCK_EX)
 2191                 lf.l_type = F_WRLCK;
 2192         else if (uap->how & LOCK_SH)
 2193                 lf.l_type = F_RDLCK;
 2194         else {
 2195                 error = EBADF;
 2196                 goto done2;
 2197         }
 2198         FILE_LOCK(fp);
 2199         fp->f_flag |= FHASLOCK;
 2200         FILE_UNLOCK(fp);
 2201         error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
 2202             (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
 2203 done2:
 2204         fdrop(fp, td);
 2205         mtx_unlock(&Giant);
 2206         return (error);
 2207 }
 2208 /*
 2209  * Duplicate the specified descriptor to a free descriptor.
 2210  */
 2211 int
 2212 dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error)
 2213 {
 2214         struct file *wfp;
 2215         struct file *fp;
 2216 
 2217         /*
 2218          * If the to-be-dup'd fd number is greater than the allowed number
 2219          * of file descriptors, or the fd to be dup'd has already been
 2220          * closed, then reject.
 2221          */
 2222         FILEDESC_LOCK(fdp);
 2223         if (dfd < 0 || dfd >= fdp->fd_nfiles ||
 2224             (wfp = fdp->fd_ofiles[dfd]) == NULL) {
 2225                 FILEDESC_UNLOCK(fdp);
 2226                 return (EBADF);
 2227         }
 2228 
 2229         /*
 2230          * There are two cases of interest here.
 2231          *
 2232          * For ENODEV simply dup (dfd) to file descriptor
 2233          * (indx) and return.
 2234          *
 2235          * For ENXIO steal away the file structure from (dfd) and
 2236          * store it in (indx).  (dfd) is effectively closed by
 2237          * this operation.
 2238          *
 2239          * Any other error code is just returned.
 2240          */
 2241         switch (error) {
 2242         case ENODEV:
 2243                 /*
 2244                  * Check that the mode the file is being opened for is a
 2245                  * subset of the mode of the existing descriptor.
 2246                  */
 2247                 FILE_LOCK(wfp);
 2248                 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
 2249                         FILE_UNLOCK(wfp);
 2250                         FILEDESC_UNLOCK(fdp);
 2251                         return (EACCES);
 2252                 }
 2253                 fp = fdp->fd_ofiles[indx];
 2254                 fdp->fd_ofiles[indx] = wfp;
 2255                 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
 2256                 if (fp == NULL)
 2257                         fdused(fdp, indx);
 2258                 fhold_locked(wfp);
 2259                 FILE_UNLOCK(wfp);
 2260                 FILEDESC_UNLOCK(fdp);
 2261                 if (fp != NULL) {
 2262                         /*
 2263                          * We now own the reference to fp that the ofiles[]
 2264                          * array used to own.  Release it.
 2265                          */
 2266                         FILE_LOCK(fp);
 2267                         fdrop_locked(fp, td);
 2268                 }
 2269                 return (0);
 2270 
 2271         case ENXIO:
 2272                 /*
 2273                  * Steal away the file pointer from dfd and stuff it into indx.
 2274                  */
 2275                 fp = fdp->fd_ofiles[indx];
 2276                 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
 2277                 fdp->fd_ofiles[dfd] = NULL;
 2278                 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
 2279                 fdp->fd_ofileflags[dfd] = 0;
 2280                 fdunused(fdp, dfd);
 2281                 if (fp == NULL)
 2282                         fdused(fdp, indx);
 2283                 if (fp != NULL)
 2284                         FILE_LOCK(fp);
 2285 
 2286                 /*
 2287                  * we now own the reference to fp that the ofiles[] array
 2288                  * used to own.  Release it.
 2289                  */
 2290                 if (fp != NULL)
 2291                         fdrop_locked(fp, td);
 2292 
 2293                 FILEDESC_UNLOCK(fdp);
 2294 
 2295                 return (0);
 2296 
 2297         default:
 2298                 FILEDESC_UNLOCK(fdp);
 2299                 return (error);
 2300         }
 2301         /* NOTREACHED */
 2302 }
 2303 /*
 2304  * Scan all active processes to see if any of them have a current
 2305  * or root directory of `olddp'. If so, replace them with the new
 2306  * mount point.
 2307  */
 2308 void
 2309 mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
 2310 {
 2311         struct filedesc *fdp;
 2312         struct proc *p;
 2313         int nrele;
 2314 
 2315         if (vrefcnt(olddp) == 1)
 2316                 return;
 2317         sx_slock(&allproc_lock);
 2318         LIST_FOREACH(p, &allproc, p_list) {
 2319                 fdp = fdhold(p);
 2320                 if (fdp == NULL)
 2321                         continue;
 2322                 nrele = 0;
 2323                 FILEDESC_LOCK_FAST(fdp);
 2324                 if (fdp->fd_cdir == olddp) {
 2325                         vref(newdp);
 2326                         fdp->fd_cdir = newdp;
 2327                         nrele++;
 2328                 }
 2329                 if (fdp->fd_rdir == olddp) {
 2330                         vref(newdp);
 2331                         fdp->fd_rdir = newdp;
 2332                         nrele++;
 2333                 }
 2334                 FILEDESC_UNLOCK_FAST(fdp);
 2335                 fddrop(fdp);
 2336                 while (nrele--)
 2337                         vrele(olddp);
 2338         }
 2339         sx_sunlock(&allproc_lock);
 2340         if (rootvnode == olddp) {
 2341                 vrele(rootvnode);
 2342                 vref(newdp);
 2343                 rootvnode = newdp;
 2344         }
 2345 }
 2346 
 2347 struct filedesc_to_leader *
 2348 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
 2349 {
 2350         struct filedesc_to_leader *fdtol;
 2351 
 2352         MALLOC(fdtol, struct filedesc_to_leader *,
 2353                sizeof(struct filedesc_to_leader),
 2354                M_FILEDESC_TO_LEADER,
 2355                M_WAITOK);
 2356         fdtol->fdl_refcount = 1;
 2357         fdtol->fdl_holdcount = 0;
 2358         fdtol->fdl_wakeup = 0;
 2359         fdtol->fdl_leader = leader;
 2360         if (old != NULL) {
 2361                 FILEDESC_LOCK(fdp);
 2362                 fdtol->fdl_next = old->fdl_next;
 2363                 fdtol->fdl_prev = old;
 2364                 old->fdl_next = fdtol;
 2365                 fdtol->fdl_next->fdl_prev = fdtol;
 2366                 FILEDESC_UNLOCK(fdp);
 2367         } else {
 2368                 fdtol->fdl_next = fdtol;
 2369                 fdtol->fdl_prev = fdtol;
 2370         }
 2371         return (fdtol);
 2372 }
 2373 
 2374 /*
 2375  * Get file structures.
 2376  */
 2377 static int
 2378 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
 2379 {
 2380         struct xfile xf;
 2381         struct filedesc *fdp;
 2382         struct file *fp;
 2383         struct proc *p;
 2384         int error, n;
 2385 
 2386         /*
 2387          * Note: because the number of file descriptors is calculated
 2388          * in different ways for sizing vs returning the data,
 2389          * there is information leakage from the first loop.  However,
 2390          * it is of a similar order of magnitude to the leakage from
 2391          * global system statistics such as kern.openfiles.
 2392          */
 2393         error = sysctl_wire_old_buffer(req, 0);
 2394         if (error != 0)
 2395                 return (error);
 2396         if (req->oldptr == NULL) {
 2397                 n = 16;         /* A slight overestimate. */
 2398                 sx_slock(&filelist_lock);
 2399                 LIST_FOREACH(fp, &filehead, f_list) {
 2400                         /*
 2401                          * We should grab the lock, but this is an
 2402                          * estimate, so does it really matter?
 2403                          */
 2404                         /* mtx_lock(fp->f_mtxp); */
 2405                         n += fp->f_count;
 2406                         /* mtx_unlock(f->f_mtxp); */
 2407                 }
 2408                 sx_sunlock(&filelist_lock);
 2409                 return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
 2410         }
 2411         error = 0;
 2412         bzero(&xf, sizeof(xf));
 2413         xf.xf_size = sizeof(xf);
 2414         sx_slock(&allproc_lock);
 2415         LIST_FOREACH(p, &allproc, p_list) {
 2416                 if (p->p_state == PRS_NEW)
 2417                         continue;
 2418                 PROC_LOCK(p);
 2419                 if (p_cansee(req->td, p) != 0) {
 2420                         PROC_UNLOCK(p);
 2421                         continue;
 2422                 }
 2423                 xf.xf_pid = p->p_pid;
 2424                 xf.xf_uid = p->p_ucred->cr_uid;
 2425                 PROC_UNLOCK(p);
 2426                 fdp = fdhold(p);
 2427                 if (fdp == NULL)
 2428                         continue;
 2429                 FILEDESC_LOCK_FAST(fdp);
 2430                 for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) {
 2431                         if ((fp = fdp->fd_ofiles[n]) == NULL)
 2432                                 continue;
 2433                         xf.xf_fd = n;
 2434                         xf.xf_file = fp;
 2435                         xf.xf_data = fp->f_data;
 2436                         xf.xf_vnode = fp->f_vnode;
 2437                         xf.xf_type = fp->f_type;
 2438                         xf.xf_count = fp->f_count;
 2439                         xf.xf_msgcount = fp->f_msgcount;
 2440                         xf.xf_offset = fp->f_offset;
 2441                         xf.xf_flag = fp->f_flag;
 2442                         error = SYSCTL_OUT(req, &xf, sizeof(xf));
 2443                         if (error)
 2444                                 break;
 2445                 }
 2446                 FILEDESC_UNLOCK_FAST(fdp);
 2447                 fddrop(fdp);
 2448                 if (error)
 2449                         break;
 2450         }
 2451         sx_sunlock(&allproc_lock);
 2452         return (error);
 2453 }
 2454 
 2455 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
 2456     0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
 2457 
 2458 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
 2459     &maxfilesperproc, 0, "Maximum files allowed open per process");
 2460 
 2461 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
 2462     &maxfiles, 0, "Maximum number of files");
 2463 
 2464 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
 2465     &nfiles, 0, "System-wide number of open files");
 2466 
 2467 /* ARGSUSED*/
 2468 static void
 2469 filelistinit(void *dummy)
 2470 {
 2471 
 2472         file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
 2473             NULL, NULL, UMA_ALIGN_PTR, 0);
 2474         sx_init(&filelist_lock, "filelist lock");
 2475         mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
 2476         mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
 2477 }
 2478 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL)
 2479 
 2480 /*-------------------------------------------------------------------*/
 2481 
 2482 static int
 2483 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td)
 2484 {
 2485 
 2486         return (EBADF);
 2487 }
 2488 
 2489 static int
 2490 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td)
 2491 {
 2492 
 2493         return (EBADF);
 2494 }
 2495 
 2496 static int
 2497 badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td)
 2498 {
 2499 
 2500         return (0);
 2501 }
 2502 
 2503 static int
 2504 badfo_kqfilter(struct file *fp, struct knote *kn)
 2505 {
 2506 
 2507         return (0);
 2508 }
 2509 
 2510 static int
 2511 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td)
 2512 {
 2513 
 2514         return (EBADF);
 2515 }
 2516 
 2517 static int
 2518 badfo_close(struct file *fp, struct thread *td)
 2519 {
 2520 
 2521         return (EBADF);
 2522 }
 2523 
 2524 struct fileops badfileops = {
 2525         .fo_read = badfo_readwrite,
 2526         .fo_write = badfo_readwrite,
 2527         .fo_ioctl = badfo_ioctl,
 2528         .fo_poll = badfo_poll,
 2529         .fo_kqfilter = badfo_kqfilter,
 2530         .fo_stat = badfo_stat,
 2531         .fo_close = badfo_close,
 2532 };
 2533 
 2534 
 2535 /*-------------------------------------------------------------------*/
 2536 
 2537 /*
 2538  * File Descriptor pseudo-device driver (/dev/fd/).
 2539  *
 2540  * Opening minor device N dup()s the file (if any) connected to file
 2541  * descriptor N belonging to the calling process.  Note that this driver
 2542  * consists of only the ``open()'' routine, because all subsequent
 2543  * references to this file will be direct to the other driver.
 2544  *
 2545  * XXX: we could give this one a cloning event handler if necessary.
 2546  */
 2547 
 2548 /* ARGSUSED */
 2549 static int
 2550 fdopen(struct cdev *dev, int mode, int type, struct thread *td)
 2551 {
 2552 
 2553         /*
 2554          * XXX Kludge: set curthread->td_dupfd to contain the value of the
 2555          * the file descriptor being sought for duplication. The error
 2556          * return ensures that the vnode for this device will be released
 2557          * by vn_open. Open will detect this special error and take the
 2558          * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
 2559          * will simply report the error.
 2560          */
 2561         td->td_dupfd = dev2unit(dev);
 2562         return (ENODEV);
 2563 }
 2564 
 2565 static struct cdevsw fildesc_cdevsw = {
 2566         .d_version =    D_VERSION,
 2567         .d_flags =      D_NEEDGIANT,
 2568         .d_open =       fdopen,
 2569         .d_name =       "FD",
 2570 };
 2571 
 2572 static void
 2573 fildesc_drvinit(void *unused)
 2574 {
 2575         struct cdev *dev;
 2576 
 2577         dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0");
 2578         make_dev_alias(dev, "stdin");
 2579         dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1");
 2580         make_dev_alias(dev, "stdout");
 2581         dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2");
 2582         make_dev_alias(dev, "stderr");
 2583 }
 2584 
 2585 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL)

Cache object: 242ca7a59d89b015b2ec4fbdc8000f7d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.