The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_descrip.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
    3  * 
    4  * This code is derived from software contributed to The DragonFly Project
    5  * by Jeffrey Hsu.
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  * 3. Neither the name of The DragonFly Project nor the names of its
   18  *    contributors may be used to endorse or promote products derived
   19  *    from this software without specific, prior written permission.
   20  * 
   21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *
   35  * Copyright (c) 1982, 1986, 1989, 1991, 1993
   36  *      The Regents of the University of California.  All rights reserved.
   37  * (c) UNIX System Laboratories, Inc.
   38  * All or some portions of this file are derived from material licensed
   39  * to the University of California by American Telephone and Telegraph
   40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   41  * the permission of UNIX System Laboratories, Inc.
   42  *
   43  * Redistribution and use in source and binary forms, with or without
   44  * modification, are permitted provided that the following conditions
   45  * are met:
   46  * 1. Redistributions of source code must retain the above copyright
   47  *    notice, this list of conditions and the following disclaimer.
   48  * 2. Redistributions in binary form must reproduce the above copyright
   49  *    notice, this list of conditions and the following disclaimer in the
   50  *    documentation and/or other materials provided with the distribution.
   51  * 3. Neither the name of the University nor the names of its contributors
   52  *    may be used to endorse or promote products derived from this software
   53  *    without specific prior written permission.
   54  *
   55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   65  * SUCH DAMAGE.
   66  *
   67  *      @(#)kern_descrip.c      8.6 (Berkeley) 4/19/94
   68  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
   69  */
   70 
   71 #include "opt_compat.h"
   72 #include <sys/param.h>
   73 #include <sys/systm.h>
   74 #include <sys/malloc.h>
   75 #include <sys/sysproto.h>
   76 #include <sys/conf.h>
   77 #include <sys/device.h>
   78 #include <sys/file.h>
   79 #include <sys/filedesc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/sysctl.h>
   82 #include <sys/vnode.h>
   83 #include <sys/proc.h>
   84 #include <sys/nlookup.h>
   85 #include <sys/stat.h>
   86 #include <sys/filio.h>
   87 #include <sys/fcntl.h>
   88 #include <sys/unistd.h>
   89 #include <sys/resourcevar.h>
   90 #include <sys/event.h>
   91 #include <sys/kern_syscall.h>
   92 #include <sys/kcore.h>
   93 #include <sys/kinfo.h>
   94 #include <sys/un.h>
   95 
   96 #include <vm/vm.h>
   97 #include <vm/vm_extern.h>
   98 
   99 #include <sys/thread2.h>
  100 #include <sys/file2.h>
  101 #include <sys/spinlock2.h>
  102 
  103 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
  104 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
  105 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
  106 static void ffree(struct file *fp);
  107 
  108 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
  109 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
  110                      "file desc to leader structures");
  111 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
  112 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
  113 
  114 static struct krate krate_uidinfo = { .freq = 1 };
  115 
  116 static   d_open_t  fdopen;
  117 #define NUMFDESC 64
  118 
  119 #define CDEV_MAJOR 22
  120 static struct dev_ops fildesc_ops = {
  121         { "FD", 0, 0 },
  122         .d_open =       fdopen,
  123 };
  124 
  125 /*
  126  * Descriptor management.
  127  */
  128 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
  129 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
  130 static int nfiles;              /* actual number of open files */
  131 extern int cmask;       
  132 
  133 /*
  134  * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
  135  *
  136  * MPSAFE - must be called with fdp->fd_spin exclusively held
  137  */
  138 static __inline
  139 void
  140 fdfixup_locked(struct filedesc *fdp, int fd)
  141 {
  142         if (fd < fdp->fd_freefile) {
  143                fdp->fd_freefile = fd;
  144         }
  145         while (fdp->fd_lastfile >= 0 &&
  146                fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
  147                fdp->fd_files[fdp->fd_lastfile].reserved == 0
  148         ) {
  149                 --fdp->fd_lastfile;
  150         }
  151 }
  152 
  153 /*
  154  * System calls on descriptors.
  155  *
  156  * MPSAFE
  157  */
  158 int
  159 sys_getdtablesize(struct getdtablesize_args *uap) 
  160 {
  161         struct proc *p = curproc;
  162         struct plimit *limit = p->p_limit;
  163         int dtsize;
  164 
  165         spin_lock(&limit->p_spin);
  166         if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
  167                 dtsize = INT_MAX;
  168         else
  169                 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
  170         spin_unlock(&limit->p_spin);
  171 
  172         if (dtsize > maxfilesperproc)
  173                 dtsize = maxfilesperproc;
  174         if (dtsize < minfilesperproc)
  175                 dtsize = minfilesperproc;
  176         if (p->p_ucred->cr_uid && dtsize > maxfilesperuser)
  177                 dtsize = maxfilesperuser;
  178         uap->sysmsg_result = dtsize;
  179         return (0);
  180 }
  181 
  182 /*
  183  * Duplicate a file descriptor to a particular value.
  184  *
  185  * note: keep in mind that a potential race condition exists when closing
  186  * descriptors from a shared descriptor table (via rfork).
  187  *
  188  * MPSAFE
  189  */
  190 int
  191 sys_dup2(struct dup2_args *uap)
  192 {
  193         int error;
  194         int fd = 0;
  195 
  196         error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
  197         uap->sysmsg_fds[0] = fd;
  198 
  199         return (error);
  200 }
  201 
  202 /*
  203  * Duplicate a file descriptor.
  204  *
  205  * MPSAFE
  206  */
  207 int
  208 sys_dup(struct dup_args *uap)
  209 {
  210         int error;
  211         int fd = 0;
  212 
  213         error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
  214         uap->sysmsg_fds[0] = fd;
  215 
  216         return (error);
  217 }
  218 
  219 /*
  220  * MPALMOSTSAFE - acquires mplock for fp operations
  221  */
  222 int
  223 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
  224 {
  225         struct thread *td = curthread;
  226         struct proc *p = td->td_proc;
  227         struct file *fp;
  228         struct vnode *vp;
  229         u_int newmin;
  230         u_int oflags;
  231         u_int nflags;
  232         int tmp, error, flg = F_POSIX;
  233 
  234         KKASSERT(p);
  235 
  236         /*
  237          * Operations on file descriptors that do not require a file pointer.
  238          */
  239         switch (cmd) {
  240         case F_GETFD:
  241                 error = fgetfdflags(p->p_fd, fd, &tmp);
  242                 if (error == 0)
  243                         dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
  244                 return (error);
  245 
  246         case F_SETFD:
  247                 if (dat->fc_cloexec & FD_CLOEXEC)
  248                         error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
  249                 else
  250                         error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
  251                 return (error);
  252         case F_DUPFD:
  253                 newmin = dat->fc_fd;
  254                 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
  255                 return (error);
  256         case F_DUP2FD:
  257                 newmin = dat->fc_fd;
  258                 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd);
  259                 return (error);
  260         case F_DUPFD_CLOEXEC:
  261                 newmin = dat->fc_fd;
  262                 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC, fd, newmin,
  263                                  &dat->fc_fd);
  264                 return (error);
  265         case F_DUP2FD_CLOEXEC:
  266                 newmin = dat->fc_fd;
  267                 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin,
  268                                  &dat->fc_fd);
  269                 return (error);
  270         default:
  271                 break;
  272         }
  273 
  274         /*
  275          * Operations on file pointers
  276          */
  277         if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
  278                 return (EBADF);
  279 
  280         switch (cmd) {
  281         case F_GETFL:
  282                 dat->fc_flags = OFLAGS(fp->f_flag);
  283                 error = 0;
  284                 break;
  285 
  286         case F_SETFL:
  287                 oflags = fp->f_flag;
  288                 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
  289                 nflags |= oflags & ~FCNTLFLAGS;
  290 
  291                 error = 0;
  292                 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
  293                         error = EINVAL;
  294                 if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
  295                         tmp = nflags & FASYNC;
  296                         error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
  297                                          cred, NULL);
  298                 }
  299                 if (error == 0)
  300                         fp->f_flag = nflags;
  301                 break;
  302 
  303         case F_GETOWN:
  304                 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
  305                                  cred, NULL);
  306                 break;
  307 
  308         case F_SETOWN:
  309                 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
  310                                  cred, NULL);
  311                 break;
  312 
  313         case F_SETLKW:
  314                 flg |= F_WAIT;
  315                 /* Fall into F_SETLK */
  316 
  317         case F_SETLK:
  318                 if (fp->f_type != DTYPE_VNODE) {
  319                         error = EBADF;
  320                         break;
  321                 }
  322                 vp = (struct vnode *)fp->f_data;
  323 
  324                 /*
  325                  * copyin/lockop may block
  326                  */
  327                 if (dat->fc_flock.l_whence == SEEK_CUR)
  328                         dat->fc_flock.l_start += fp->f_offset;
  329 
  330                 switch (dat->fc_flock.l_type) {
  331                 case F_RDLCK:
  332                         if ((fp->f_flag & FREAD) == 0) {
  333                                 error = EBADF;
  334                                 break;
  335                         }
  336                         if ((p->p_leader->p_flags & P_ADVLOCK) == 0) {
  337                                 lwkt_gettoken(&p->p_leader->p_token);
  338                                 p->p_leader->p_flags |= P_ADVLOCK;
  339                                 lwkt_reltoken(&p->p_leader->p_token);
  340                         }
  341                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  342                             &dat->fc_flock, flg);
  343                         break;
  344                 case F_WRLCK:
  345                         if ((fp->f_flag & FWRITE) == 0) {
  346                                 error = EBADF;
  347                                 break;
  348                         }
  349                         if ((p->p_leader->p_flags & P_ADVLOCK) == 0) {
  350                                 lwkt_gettoken(&p->p_leader->p_token);
  351                                 p->p_leader->p_flags |= P_ADVLOCK;
  352                                 lwkt_reltoken(&p->p_leader->p_token);
  353                         }
  354                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
  355                             &dat->fc_flock, flg);
  356                         break;
  357                 case F_UNLCK:
  358                         error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
  359                                 &dat->fc_flock, F_POSIX);
  360                         break;
  361                 default:
  362                         error = EINVAL;
  363                         break;
  364                 }
  365 
  366                 /*
  367                  * It is possible to race a close() on the descriptor while
  368                  * we were blocked getting the lock.  If this occurs the
  369                  * close might not have caught the lock.
  370                  */
  371                 if (checkfdclosed(p->p_fd, fd, fp)) {
  372                         dat->fc_flock.l_whence = SEEK_SET;
  373                         dat->fc_flock.l_start = 0;
  374                         dat->fc_flock.l_len = 0;
  375                         dat->fc_flock.l_type = F_UNLCK;
  376                         (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
  377                                            F_UNLCK, &dat->fc_flock, F_POSIX);
  378                 }
  379                 break;
  380 
  381         case F_GETLK:
  382                 if (fp->f_type != DTYPE_VNODE) {
  383                         error = EBADF;
  384                         break;
  385                 }
  386                 vp = (struct vnode *)fp->f_data;
  387                 /*
  388                  * copyin/lockop may block
  389                  */
  390                 if (dat->fc_flock.l_type != F_RDLCK &&
  391                     dat->fc_flock.l_type != F_WRLCK &&
  392                     dat->fc_flock.l_type != F_UNLCK) {
  393                         error = EINVAL;
  394                         break;
  395                 }
  396                 if (dat->fc_flock.l_whence == SEEK_CUR)
  397                         dat->fc_flock.l_start += fp->f_offset;
  398                 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
  399                             &dat->fc_flock, F_POSIX);
  400                 break;
  401         default:
  402                 error = EINVAL;
  403                 break;
  404         }
  405 
  406         fdrop(fp);
  407         return (error);
  408 }
  409 
  410 /*
  411  * The file control system call.
  412  *
  413  * MPSAFE
  414  */
  415 int
  416 sys_fcntl(struct fcntl_args *uap)
  417 {
  418         union fcntl_dat dat;
  419         int error;
  420 
  421         switch (uap->cmd) {
  422         case F_DUPFD:
  423         case F_DUP2FD:
  424         case F_DUPFD_CLOEXEC:
  425         case F_DUP2FD_CLOEXEC:
  426                 dat.fc_fd = uap->arg;
  427                 break;
  428         case F_SETFD:
  429                 dat.fc_cloexec = uap->arg;
  430                 break;
  431         case F_SETFL:
  432                 dat.fc_flags = uap->arg;
  433                 break;
  434         case F_SETOWN:
  435                 dat.fc_owner = uap->arg;
  436                 break;
  437         case F_SETLKW:
  438         case F_SETLK:
  439         case F_GETLK:
  440                 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
  441                                sizeof(struct flock));
  442                 if (error)
  443                         return (error);
  444                 break;
  445         }
  446 
  447         error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred);
  448 
  449         if (error == 0) {
  450                 switch (uap->cmd) {
  451                 case F_DUPFD:
  452                 case F_DUP2FD:
  453                 case F_DUPFD_CLOEXEC:
  454                 case F_DUP2FD_CLOEXEC:
  455                         uap->sysmsg_result = dat.fc_fd;
  456                         break;
  457                 case F_GETFD:
  458                         uap->sysmsg_result = dat.fc_cloexec;
  459                         break;
  460                 case F_GETFL:
  461                         uap->sysmsg_result = dat.fc_flags;
  462                         break;
  463                 case F_GETOWN:
  464                         uap->sysmsg_result = dat.fc_owner;
  465                         break;
  466                 case F_GETLK:
  467                         error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
  468                             sizeof(struct flock));
  469                         break;
  470                 }
  471         }
  472 
  473         return (error);
  474 }
  475 
  476 /*
  477  * Common code for dup, dup2, and fcntl(F_DUPFD).
  478  *
  479  * There are three type flags: DUP_FIXED, DUP_VARIABLE, and DUP_CLOEXEC.
  480  * The first two flags are mutually exclusive, and the third is optional.
  481  * DUP_FIXED tells kern_dup() to destructively dup over an existing file
  482  * descriptor if "new" is already open.  DUP_VARIABLE tells kern_dup()
  483  * to find the lowest unused file descriptor that is greater than or
  484  * equal to "new".  DUP_CLOEXEC, which works with either of the first
  485  * two flags, sets the close-on-exec flag on the "new" file descriptor.
  486  *
  487  * MPSAFE
  488  */
  489 int
  490 kern_dup(int flags, int old, int new, int *res)
  491 {
  492         struct thread *td = curthread;
  493         struct proc *p = td->td_proc;
  494         struct filedesc *fdp = p->p_fd;
  495         struct file *fp;
  496         struct file *delfp;
  497         int oldflags;
  498         int holdleaders;
  499         int dtsize;
  500         int error, newfd;
  501 
  502         /*
  503          * Verify that we have a valid descriptor to dup from and
  504          * possibly to dup to.
  505          *
  506          * NOTE: maxfilesperuser is not applicable to dup()
  507          */
  508 retry:
  509         if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
  510                 dtsize = INT_MAX;
  511         else
  512                 dtsize = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
  513         if (dtsize > maxfilesperproc)
  514                 dtsize = maxfilesperproc;
  515         if (dtsize < minfilesperproc)
  516                 dtsize = minfilesperproc;
  517 
  518         if (new < 0 || new > dtsize)
  519                 return (EINVAL);
  520 
  521         spin_lock(&fdp->fd_spin);
  522         if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
  523                 spin_unlock(&fdp->fd_spin);
  524                 return (EBADF);
  525         }
  526         if ((flags & DUP_FIXED) && old == new) {
  527                 *res = new;
  528                 if (flags & DUP_CLOEXEC)
  529                         fdp->fd_files[new].fileflags |= UF_EXCLOSE;
  530                 spin_unlock(&fdp->fd_spin);
  531                 return (0);
  532         }
  533         fp = fdp->fd_files[old].fp;
  534         oldflags = fdp->fd_files[old].fileflags;
  535         fhold(fp);      /* MPSAFE - can be called with a spinlock held */
  536 
  537         /*
  538          * Allocate a new descriptor if DUP_VARIABLE, or expand the table
  539          * if the requested descriptor is beyond the current table size.
  540          *
  541          * This can block.  Retry if the source descriptor no longer matches
  542          * or if our expectation in the expansion case races.
  543          *
  544          * If we are not expanding or allocating a new decriptor, then reset
  545          * the target descriptor to a reserved state so we have a uniform
  546          * setup for the next code block.
  547          */
  548         if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) {
  549                 spin_unlock(&fdp->fd_spin);
  550                 error = fdalloc(p, new, &newfd);
  551                 spin_lock(&fdp->fd_spin);
  552                 if (error) {
  553                         spin_unlock(&fdp->fd_spin);
  554                         fdrop(fp);
  555                         return (error);
  556                 }
  557                 /*
  558                  * Check for ripout
  559                  */
  560                 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
  561                         fsetfd_locked(fdp, NULL, newfd);
  562                         spin_unlock(&fdp->fd_spin);
  563                         fdrop(fp);
  564                         goto retry;
  565                 }
  566                 /*
  567                  * Check for expansion race
  568                  */
  569                 if ((flags & DUP_VARIABLE) == 0 && new != newfd) {
  570                         fsetfd_locked(fdp, NULL, newfd);
  571                         spin_unlock(&fdp->fd_spin);
  572                         fdrop(fp);
  573                         goto retry;
  574                 }
  575                 /*
  576                  * Check for ripout, newfd reused old (this case probably
  577                  * can't occur).
  578                  */
  579                 if (old == newfd) {
  580                         fsetfd_locked(fdp, NULL, newfd);
  581                         spin_unlock(&fdp->fd_spin);
  582                         fdrop(fp);
  583                         goto retry;
  584                 }
  585                 new = newfd;
  586                 delfp = NULL;
  587         } else {
  588                 if (fdp->fd_files[new].reserved) {
  589                         spin_unlock(&fdp->fd_spin);
  590                         fdrop(fp);
  591                         kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
  592                         tsleep(fdp, 0, "fdres", hz);
  593                         goto retry;
  594                 }
  595 
  596                 /*
  597                  * If the target descriptor was never allocated we have
  598                  * to allocate it.  If it was we have to clean out the
  599                  * old descriptor.  delfp inherits the ref from the 
  600                  * descriptor table.
  601                  */
  602                 delfp = fdp->fd_files[new].fp;
  603                 fdp->fd_files[new].fp = NULL;
  604                 fdp->fd_files[new].reserved = 1;
  605                 if (delfp == NULL) {
  606                         fdreserve_locked(fdp, new, 1);
  607                         if (new > fdp->fd_lastfile)
  608                                 fdp->fd_lastfile = new;
  609                 }
  610 
  611         }
  612 
  613         /*
  614          * NOTE: still holding an exclusive spinlock
  615          */
  616 
  617         /*
  618          * If a descriptor is being overwritten we may hve to tell 
  619          * fdfree() to sleep to ensure that all relevant process
  620          * leaders can be traversed in closef().
  621          */
  622         if (delfp != NULL && p->p_fdtol != NULL) {
  623                 fdp->fd_holdleaderscount++;
  624                 holdleaders = 1;
  625         } else {
  626                 holdleaders = 0;
  627         }
  628         KASSERT(delfp == NULL || (flags & DUP_FIXED),
  629                 ("dup() picked an open file"));
  630 
  631         /*
  632          * Duplicate the source descriptor, update lastfile.  If the new
  633          * descriptor was not allocated and we aren't replacing an existing
  634          * descriptor we have to mark the descriptor as being in use.
  635          *
  636          * The fd_files[] array inherits fp's hold reference.
  637          */
  638         fsetfd_locked(fdp, fp, new);
  639         if ((flags & DUP_CLOEXEC) != 0)
  640                 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE;
  641         else
  642                 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
  643         spin_unlock(&fdp->fd_spin);
  644         fdrop(fp);
  645         *res = new;
  646 
  647         /*
  648          * If we dup'd over a valid file, we now own the reference to it
  649          * and must dispose of it using closef() semantics (as if a
  650          * close() were performed on it).
  651          */
  652         if (delfp) {
  653                 if (SLIST_FIRST(&delfp->f_klist))
  654                         knote_fdclose(delfp, fdp, new);
  655                 closef(delfp, p);
  656                 if (holdleaders) {
  657                         spin_lock(&fdp->fd_spin);
  658                         fdp->fd_holdleaderscount--;
  659                         if (fdp->fd_holdleaderscount == 0 &&
  660                             fdp->fd_holdleaderswakeup != 0) {
  661                                 fdp->fd_holdleaderswakeup = 0;
  662                                 spin_unlock(&fdp->fd_spin);
  663                                 wakeup(&fdp->fd_holdleaderscount);
  664                         } else {
  665                                 spin_unlock(&fdp->fd_spin);
  666                         }
  667                 }
  668         }
  669         return (0);
  670 }
  671 
  672 /*
  673  * If sigio is on the list associated with a process or process group,
  674  * disable signalling from the device, remove sigio from the list and
  675  * free sigio.
  676  *
  677  * MPSAFE
  678  */
  679 void
  680 funsetown(struct sigio **sigiop)
  681 {
  682         struct pgrp *pgrp;
  683         struct proc *p;
  684         struct sigio *sigio;
  685 
  686         if ((sigio = *sigiop) != NULL) {
  687                 lwkt_gettoken(&sigio_token);    /* protect sigio */
  688                 KKASSERT(sigiop == sigio->sio_myref);
  689                 sigio = *sigiop;
  690                 *sigiop = NULL;
  691                 lwkt_reltoken(&sigio_token);
  692         }
  693         if (sigio == NULL)
  694                 return;
  695 
  696         if (sigio->sio_pgid < 0) {
  697                 pgrp = sigio->sio_pgrp;
  698                 sigio->sio_pgrp = NULL;
  699                 lwkt_gettoken(&pgrp->pg_token);
  700                 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio);
  701                 lwkt_reltoken(&pgrp->pg_token);
  702                 pgrel(pgrp);
  703         } else /* if ((*sigiop)->sio_pgid > 0) */ {
  704                 p = sigio->sio_proc;
  705                 sigio->sio_proc = NULL;
  706                 PHOLD(p);
  707                 lwkt_gettoken(&p->p_token);
  708                 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
  709                 lwkt_reltoken(&p->p_token);
  710                 PRELE(p);
  711         }
  712         crfree(sigio->sio_ucred);
  713         sigio->sio_ucred = NULL;
  714         kfree(sigio, M_SIGIO);
  715 }
  716 
  717 /*
  718  * Free a list of sigio structures.  Caller is responsible for ensuring
  719  * that the list is MPSAFE.
  720  *
  721  * MPSAFE
  722  */
  723 void
  724 funsetownlst(struct sigiolst *sigiolst)
  725 {
  726         struct sigio *sigio;
  727 
  728         while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
  729                 funsetown(sigio->sio_myref);
  730 }
  731 
  732 /*
  733  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
  734  *
  735  * After permission checking, add a sigio structure to the sigio list for
  736  * the process or process group.
  737  *
  738  * MPSAFE
  739  */
  740 int
  741 fsetown(pid_t pgid, struct sigio **sigiop)
  742 {
  743         struct proc *proc = NULL;
  744         struct pgrp *pgrp = NULL;
  745         struct sigio *sigio;
  746         int error;
  747 
  748         if (pgid == 0) {
  749                 funsetown(sigiop);
  750                 return (0);
  751         }
  752 
  753         if (pgid > 0) {
  754                 proc = pfind(pgid);
  755                 if (proc == NULL) {
  756                         error = ESRCH;
  757                         goto done;
  758                 }
  759 
  760                 /*
  761                  * Policy - Don't allow a process to FSETOWN a process
  762                  * in another session.
  763                  *
  764                  * Remove this test to allow maximum flexibility or
  765                  * restrict FSETOWN to the current process or process
  766                  * group for maximum safety.
  767                  */
  768                 if (proc->p_session != curproc->p_session) {
  769                         error = EPERM;
  770                         goto done;
  771                 }
  772         } else /* if (pgid < 0) */ {
  773                 pgrp = pgfind(-pgid);
  774                 if (pgrp == NULL) {
  775                         error = ESRCH;
  776                         goto done;
  777                 }
  778 
  779                 /*
  780                  * Policy - Don't allow a process to FSETOWN a process
  781                  * in another session.
  782                  *
  783                  * Remove this test to allow maximum flexibility or
  784                  * restrict FSETOWN to the current process or process
  785                  * group for maximum safety.
  786                  */
  787                 if (pgrp->pg_session != curproc->p_session) {
  788                         error = EPERM;
  789                         goto done;
  790                 }
  791         }
  792         sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO);
  793         if (pgid > 0) {
  794                 KKASSERT(pgrp == NULL);
  795                 lwkt_gettoken(&proc->p_token);
  796                 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
  797                 sigio->sio_proc = proc;
  798                 lwkt_reltoken(&proc->p_token);
  799         } else {
  800                 KKASSERT(proc == NULL);
  801                 lwkt_gettoken(&pgrp->pg_token);
  802                 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
  803                 sigio->sio_pgrp = pgrp;
  804                 lwkt_reltoken(&pgrp->pg_token);
  805                 pgrp = NULL;
  806         }
  807         sigio->sio_pgid = pgid;
  808         sigio->sio_ucred = crhold(curthread->td_ucred);
  809         /* It would be convenient if p_ruid was in ucred. */
  810         sigio->sio_ruid = sigio->sio_ucred->cr_ruid;
  811         sigio->sio_myref = sigiop;
  812 
  813         lwkt_gettoken(&sigio_token);
  814         while (*sigiop)
  815                 funsetown(sigiop);
  816         *sigiop = sigio;
  817         lwkt_reltoken(&sigio_token);
  818         error = 0;
  819 done:
  820         if (pgrp)
  821                 pgrel(pgrp);
  822         if (proc)
  823                 PRELE(proc);
  824         return (error);
  825 }
  826 
  827 /*
  828  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
  829  *
  830  * MPSAFE
  831  */
  832 pid_t
  833 fgetown(struct sigio **sigiop)
  834 {
  835         struct sigio *sigio;
  836         pid_t own;
  837 
  838         lwkt_gettoken_shared(&sigio_token);
  839         sigio = *sigiop;
  840         own = (sigio != NULL ? sigio->sio_pgid : 0);
  841         lwkt_reltoken(&sigio_token);
  842 
  843         return (own);
  844 }
  845 
  846 /*
  847  * Close many file descriptors.
  848  *
  849  * MPSAFE
  850  */
  851 int
  852 sys_closefrom(struct closefrom_args *uap)
  853 {
  854         return(kern_closefrom(uap->fd));
  855 }
  856 
  857 /*
  858  * Close all file descriptors greater then or equal to fd
  859  *
  860  * MPSAFE
  861  */
  862 int
  863 kern_closefrom(int fd)
  864 {
  865         struct thread *td = curthread;
  866         struct proc *p = td->td_proc;
  867         struct filedesc *fdp;
  868 
  869         KKASSERT(p);
  870         fdp = p->p_fd;
  871 
  872         if (fd < 0)
  873                 return (EINVAL);
  874 
  875         /*
  876          * NOTE: This function will skip unassociated descriptors and
  877          * reserved descriptors that have not yet been assigned.  
  878          * fd_lastfile can change as a side effect of kern_close().
  879          */
  880         spin_lock(&fdp->fd_spin);
  881         while (fd <= fdp->fd_lastfile) {
  882                 if (fdp->fd_files[fd].fp != NULL) {
  883                         spin_unlock(&fdp->fd_spin);
  884                         /* ok if this races another close */
  885                         if (kern_close(fd) == EINTR)
  886                                 return (EINTR);
  887                         spin_lock(&fdp->fd_spin);
  888                 }
  889                 ++fd;
  890         }
  891         spin_unlock(&fdp->fd_spin);
  892         return (0);
  893 }
  894 
  895 /*
  896  * Close a file descriptor.
  897  *
  898  * MPSAFE
  899  */
  900 int
  901 sys_close(struct close_args *uap)
  902 {
  903         return(kern_close(uap->fd));
  904 }
  905 
  906 /*
  907  * MPSAFE
  908  */
  909 int
  910 kern_close(int fd)
  911 {
  912         struct thread *td = curthread;
  913         struct proc *p = td->td_proc;
  914         struct filedesc *fdp;
  915         struct file *fp;
  916         int error;
  917         int holdleaders;
  918 
  919         KKASSERT(p);
  920         fdp = p->p_fd;
  921 
  922         spin_lock(&fdp->fd_spin);
  923         if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
  924                 spin_unlock(&fdp->fd_spin);
  925                 return (EBADF);
  926         }
  927         holdleaders = 0;
  928         if (p->p_fdtol != NULL) {
  929                 /*
  930                  * Ask fdfree() to sleep to ensure that all relevant
  931                  * process leaders can be traversed in closef().
  932                  */
  933                 fdp->fd_holdleaderscount++;
  934                 holdleaders = 1;
  935         }
  936 
  937         /*
  938          * we now hold the fp reference that used to be owned by the descriptor
  939          * array.
  940          */
  941         spin_unlock(&fdp->fd_spin);
  942         if (SLIST_FIRST(&fp->f_klist))
  943                 knote_fdclose(fp, fdp, fd);
  944         error = closef(fp, p);
  945         if (holdleaders) {
  946                 spin_lock(&fdp->fd_spin);
  947                 fdp->fd_holdleaderscount--;
  948                 if (fdp->fd_holdleaderscount == 0 &&
  949                     fdp->fd_holdleaderswakeup != 0) {
  950                         fdp->fd_holdleaderswakeup = 0;
  951                         spin_unlock(&fdp->fd_spin);
  952                         wakeup(&fdp->fd_holdleaderscount);
  953                 } else {
  954                         spin_unlock(&fdp->fd_spin);
  955                 }
  956         }
  957         return (error);
  958 }
  959 
  960 /*
  961  * shutdown_args(int fd, int how)
  962  */
  963 int
  964 kern_shutdown(int fd, int how)
  965 {
  966         struct thread *td = curthread;
  967         struct proc *p = td->td_proc;
  968         struct file *fp;
  969         int error;
  970 
  971         KKASSERT(p);
  972 
  973         if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
  974                 return (EBADF);
  975         error = fo_shutdown(fp, how);
  976         fdrop(fp);
  977 
  978         return (error);
  979 }
  980 
  981 /*
  982  * MPALMOSTSAFE
  983  */
  984 int
  985 sys_shutdown(struct shutdown_args *uap)
  986 {
  987         int error;
  988 
  989         error = kern_shutdown(uap->s, uap->how);
  990 
  991         return (error);
  992 }
  993 
  994 /*
  995  * MPSAFE
  996  */
  997 int
  998 kern_fstat(int fd, struct stat *ub)
  999 {
 1000         struct thread *td = curthread;
 1001         struct proc *p = td->td_proc;
 1002         struct file *fp;
 1003         int error;
 1004 
 1005         KKASSERT(p);
 1006 
 1007         if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
 1008                 return (EBADF);
 1009         error = fo_stat(fp, ub, td->td_ucred);
 1010         fdrop(fp);
 1011 
 1012         return (error);
 1013 }
 1014 
 1015 /*
 1016  * Return status information about a file descriptor.
 1017  *
 1018  * MPSAFE
 1019  */
 1020 int
 1021 sys_fstat(struct fstat_args *uap)
 1022 {
 1023         struct stat st;
 1024         int error;
 1025 
 1026         error = kern_fstat(uap->fd, &st);
 1027 
 1028         if (error == 0)
 1029                 error = copyout(&st, uap->sb, sizeof(st));
 1030         return (error);
 1031 }
 1032 
 1033 /*
 1034  * Return pathconf information about a file descriptor.
 1035  *
 1036  * MPALMOSTSAFE
 1037  */
 1038 int
 1039 sys_fpathconf(struct fpathconf_args *uap)
 1040 {
 1041         struct thread *td = curthread;
 1042         struct proc *p = td->td_proc;
 1043         struct file *fp;
 1044         struct vnode *vp;
 1045         int error = 0;
 1046 
 1047         if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
 1048                 return (EBADF);
 1049 
 1050         switch (fp->f_type) {
 1051         case DTYPE_PIPE:
 1052         case DTYPE_SOCKET:
 1053                 if (uap->name != _PC_PIPE_BUF) {
 1054                         error = EINVAL;
 1055                 } else {
 1056                         uap->sysmsg_result = PIPE_BUF;
 1057                         error = 0;
 1058                 }
 1059                 break;
 1060         case DTYPE_FIFO:
 1061         case DTYPE_VNODE:
 1062                 vp = (struct vnode *)fp->f_data;
 1063                 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg);
 1064                 break;
 1065         default:
 1066                 error = EOPNOTSUPP;
 1067                 break;
 1068         }
 1069         fdrop(fp);
 1070         return(error);
 1071 }
 1072 
 1073 static int fdexpand;
 1074 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0,
 1075     "Number of times a file table has been expanded");
 1076 
 1077 /*
 1078  * Grow the file table so it can hold through descriptor (want).
 1079  *
 1080  * The fdp's spinlock must be held exclusively on entry and may be held
 1081  * exclusively on return.  The spinlock may be cycled by the routine.
 1082  *
 1083  * MPSAFE
 1084  */
 1085 static void
 1086 fdgrow_locked(struct filedesc *fdp, int want)
 1087 {
 1088         struct fdnode *newfiles;
 1089         struct fdnode *oldfiles;
 1090         int nf, extra;
 1091 
 1092         nf = fdp->fd_nfiles;
 1093         do {
 1094                 /* nf has to be of the form 2^n - 1 */
 1095                 nf = 2 * nf + 1;
 1096         } while (nf <= want);
 1097 
 1098         spin_unlock(&fdp->fd_spin);
 1099         newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
 1100         spin_lock(&fdp->fd_spin);
 1101 
 1102         /*
 1103          * We could have raced another extend while we were not holding
 1104          * the spinlock.
 1105          */
 1106         if (fdp->fd_nfiles >= nf) {
 1107                 spin_unlock(&fdp->fd_spin);
 1108                 kfree(newfiles, M_FILEDESC);
 1109                 spin_lock(&fdp->fd_spin);
 1110                 return;
 1111         }
 1112         /*
 1113          * Copy the existing ofile and ofileflags arrays
 1114          * and zero the new portion of each array.
 1115          */
 1116         extra = nf - fdp->fd_nfiles;
 1117         bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
 1118         bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
 1119 
 1120         oldfiles = fdp->fd_files;
 1121         fdp->fd_files = newfiles;
 1122         fdp->fd_nfiles = nf;
 1123 
 1124         if (oldfiles != fdp->fd_builtin_files) {
 1125                 spin_unlock(&fdp->fd_spin);
 1126                 kfree(oldfiles, M_FILEDESC);
 1127                 spin_lock(&fdp->fd_spin);
 1128         }
 1129         fdexpand++;
 1130 }
 1131 
 1132 /*
 1133  * Number of nodes in right subtree, including the root.
 1134  */
 1135 static __inline int
 1136 right_subtree_size(int n)
 1137 {
 1138         return (n ^ (n | (n + 1)));
 1139 }
 1140 
 1141 /*
 1142  * Bigger ancestor.
 1143  */
 1144 static __inline int
 1145 right_ancestor(int n)
 1146 {
 1147         return (n | (n + 1));
 1148 }
 1149 
 1150 /*
 1151  * Smaller ancestor.
 1152  */
 1153 static __inline int
 1154 left_ancestor(int n)
 1155 {
 1156         return ((n & (n + 1)) - 1);
 1157 }
 1158 
 1159 /*
 1160  * Traverse the in-place binary tree buttom-up adjusting the allocation
 1161  * count so scans can determine where free descriptors are located.
 1162  *
 1163  * MPSAFE - caller must be holding an exclusive spinlock on fdp
 1164  */
 1165 static
 1166 void
 1167 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
 1168 {
 1169         while (fd >= 0) {
 1170                 fdp->fd_files[fd].allocated += incr;
 1171                 KKASSERT(fdp->fd_files[fd].allocated >= 0);
 1172                 fd = left_ancestor(fd);
 1173         }
 1174 }
 1175 
 1176 /*
 1177  * Reserve a file descriptor for the process.  If no error occurs, the
 1178  * caller MUST at some point call fsetfd() or assign a file pointer
 1179  * or dispose of the reservation.
 1180  *
 1181  * MPSAFE
 1182  */
 1183 int
 1184 fdalloc(struct proc *p, int want, int *result)
 1185 {
 1186         struct filedesc *fdp = p->p_fd;
 1187         struct uidinfo *uip;
 1188         int fd, rsize, rsum, node, lim;
 1189 
 1190         /*
 1191          * Check dtable size limit
 1192          */
 1193         spin_lock(&p->p_limit->p_spin);
 1194         if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
 1195                 lim = INT_MAX;
 1196         else
 1197                 lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
 1198         spin_unlock(&p->p_limit->p_spin);
 1199 
 1200         if (lim > maxfilesperproc)
 1201                 lim = maxfilesperproc;
 1202         if (lim < minfilesperproc)
 1203                 lim = minfilesperproc;
 1204         if (want >= lim)
 1205                 return (EMFILE);
 1206 
 1207         /*
 1208          * Check that the user has not run out of descriptors (non-root only).
 1209          * As a safety measure the dtable is allowed to have at least
 1210          * minfilesperproc open fds regardless of the maxfilesperuser limit.
 1211          */
 1212         if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) {
 1213                 uip = p->p_ucred->cr_uidinfo;
 1214                 if (uip->ui_openfiles > maxfilesperuser) {
 1215                         krateprintf(&krate_uidinfo,
 1216                                     "Warning: user %d pid %d (%s) ran out of "
 1217                                     "file descriptors (%d/%d)\n",
 1218                                     p->p_ucred->cr_uid, (int)p->p_pid,
 1219                                     p->p_comm,
 1220                                     uip->ui_openfiles, maxfilesperuser);
 1221                         return(ENFILE);
 1222                 }
 1223         }
 1224 
 1225         /*
 1226          * Grow the dtable if necessary
 1227          */
 1228         spin_lock(&fdp->fd_spin);
 1229         if (want >= fdp->fd_nfiles)
 1230                 fdgrow_locked(fdp, want);
 1231 
 1232         /*
 1233          * Search for a free descriptor starting at the higher
 1234          * of want or fd_freefile.  If that fails, consider
 1235          * expanding the ofile array.
 1236          *
 1237          * NOTE! the 'allocated' field is a cumulative recursive allocation
 1238          * count.  If we happen to see a value of 0 then we can shortcut
 1239          * our search.  Otherwise we run through through the tree going
 1240          * down branches we know have free descriptor(s) until we hit a
 1241          * leaf node.  The leaf node will be free but will not necessarily
 1242          * have an allocated field of 0.
 1243          */
 1244 retry:
 1245         /* move up the tree looking for a subtree with a free node */
 1246         for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
 1247              fd = right_ancestor(fd)) {
 1248                 if (fdp->fd_files[fd].allocated == 0)
 1249                         goto found;
 1250 
 1251                 rsize = right_subtree_size(fd);
 1252                 if (fdp->fd_files[fd].allocated == rsize)
 1253                         continue;       /* right subtree full */
 1254 
 1255                 /*
 1256                  * Free fd is in the right subtree of the tree rooted at fd.
 1257                  * Call that subtree R.  Look for the smallest (leftmost)
 1258                  * subtree of R with an unallocated fd: continue moving
 1259                  * down the left branch until encountering a full left
 1260                  * subtree, then move to the right.
 1261                  */
 1262                 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
 1263                         node = fd + rsize;
 1264                         rsum += fdp->fd_files[node].allocated;
 1265                         if (fdp->fd_files[fd].allocated == rsum + rsize) {
 1266                                 fd = node;      /* move to the right */
 1267                                 if (fdp->fd_files[node].allocated == 0)
 1268                                         goto found;
 1269                                 rsum = 0;
 1270                         }
 1271                 }
 1272                 goto found;
 1273         }
 1274 
 1275         /*
 1276          * No space in current array.  Expand?
 1277          */
 1278         if (fdp->fd_nfiles >= lim) {
 1279                 spin_unlock(&fdp->fd_spin);
 1280                 return (EMFILE);
 1281         }
 1282         fdgrow_locked(fdp, want);
 1283         goto retry;
 1284 
 1285 found:
 1286         KKASSERT(fd < fdp->fd_nfiles);
 1287         if (fd > fdp->fd_lastfile)
 1288                 fdp->fd_lastfile = fd;
 1289         if (want <= fdp->fd_freefile)
 1290                 fdp->fd_freefile = fd;
 1291         *result = fd;
 1292         KKASSERT(fdp->fd_files[fd].fp == NULL);
 1293         KKASSERT(fdp->fd_files[fd].reserved == 0);
 1294         fdp->fd_files[fd].fileflags = 0;
 1295         fdp->fd_files[fd].reserved = 1;
 1296         fdreserve_locked(fdp, fd, 1);
 1297         spin_unlock(&fdp->fd_spin);
 1298         return (0);
 1299 }
 1300 
 1301 /*
 1302  * Check to see whether n user file descriptors
 1303  * are available to the process p.
 1304  *
 1305  * MPSAFE
 1306  */
 1307 int
 1308 fdavail(struct proc *p, int n)
 1309 {
 1310         struct filedesc *fdp = p->p_fd;
 1311         struct fdnode *fdnode;
 1312         int i, lim, last;
 1313 
 1314         spin_lock(&p->p_limit->p_spin);
 1315         if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
 1316                 lim = INT_MAX;
 1317         else
 1318                 lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
 1319         spin_unlock(&p->p_limit->p_spin);
 1320 
 1321         if (lim > maxfilesperproc)
 1322                 lim = maxfilesperproc;
 1323         if (lim < minfilesperproc)
 1324                 lim = minfilesperproc;
 1325 
 1326         spin_lock(&fdp->fd_spin);
 1327         if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
 1328                 spin_unlock(&fdp->fd_spin);
 1329                 return (1);
 1330         }
 1331         last = min(fdp->fd_nfiles, lim);
 1332         fdnode = &fdp->fd_files[fdp->fd_freefile];
 1333         for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
 1334                 if (fdnode->fp == NULL && --n <= 0) {
 1335                         spin_unlock(&fdp->fd_spin);
 1336                         return (1);
 1337                 }
 1338         }
 1339         spin_unlock(&fdp->fd_spin);
 1340         return (0);
 1341 }
 1342 
 1343 /*
 1344  * Revoke open descriptors referencing (f_data, f_type)
 1345  *
 1346  * Any revoke executed within a prison is only able to
 1347  * revoke descriptors for processes within that prison.
 1348  *
 1349  * Returns 0 on success or an error code.
 1350  */
 1351 struct fdrevoke_info {
 1352         void *data;
 1353         short type;
 1354         short unused;
 1355         int count;
 1356         int intransit;
 1357         struct ucred *cred;
 1358         struct file *nfp;
 1359 };
 1360 
 1361 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
 1362 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
 1363 
 1364 int
 1365 fdrevoke(void *f_data, short f_type, struct ucred *cred)
 1366 {
 1367         struct fdrevoke_info info;
 1368         int error;
 1369 
 1370         bzero(&info, sizeof(info));
 1371         info.data = f_data;
 1372         info.type = f_type;
 1373         info.cred = cred;
 1374         error = falloc(NULL, &info.nfp, NULL);
 1375         if (error)
 1376                 return (error);
 1377 
 1378         /*
 1379          * Scan the file pointer table once.  dups do not dup file pointers,
 1380          * only descriptors, so there is no leak.  Set FREVOKED on the fps
 1381          * being revoked.
 1382          */
 1383         allfiles_scan_exclusive(fdrevoke_check_callback, &info);
 1384 
 1385         /*
 1386          * If any fps were marked track down the related descriptors
 1387          * and close them.  Any dup()s at this point will notice
 1388          * the FREVOKED already set in the fp and do the right thing.
 1389          *
 1390          * Any fps with non-zero msgcounts (aka sent over a unix-domain
 1391          * socket) bumped the intransit counter and will require a
 1392          * scan.  Races against fps leaving the socket are closed by
 1393          * the socket code checking for FREVOKED.
 1394          */
 1395         if (info.count)
 1396                 allproc_scan(fdrevoke_proc_callback, &info);
 1397         if (info.intransit)
 1398                 unp_revoke_gc(info.nfp);
 1399         fdrop(info.nfp);
 1400         return(0);
 1401 }
 1402 
 1403 /*
 1404  * Locate matching file pointers directly.
 1405  *
 1406  * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
 1407  */
 1408 static int
 1409 fdrevoke_check_callback(struct file *fp, void *vinfo)
 1410 {
 1411         struct fdrevoke_info *info = vinfo;
 1412 
 1413         /*
 1414          * File pointers already flagged for revokation are skipped.
 1415          */
 1416         if (fp->f_flag & FREVOKED)
 1417                 return(0);
 1418 
 1419         /*
 1420          * If revoking from a prison file pointers created outside of
 1421          * that prison, or file pointers without creds, cannot be revoked.
 1422          */
 1423         if (info->cred->cr_prison &&
 1424             (fp->f_cred == NULL ||
 1425              info->cred->cr_prison != fp->f_cred->cr_prison)) {
 1426                 return(0);
 1427         }
 1428 
 1429         /*
 1430          * If the file pointer matches then mark it for revocation.  The
 1431          * flag is currently only used by unp_revoke_gc().
 1432          *
 1433          * info->count is a heuristic and can race in a SMP environment.
 1434          */
 1435         if (info->data == fp->f_data && info->type == fp->f_type) {
 1436                 atomic_set_int(&fp->f_flag, FREVOKED);
 1437                 info->count += fp->f_count;
 1438                 if (fp->f_msgcount)
 1439                         ++info->intransit;
 1440         }
 1441         return(0);
 1442 }
 1443 
 1444 /*
 1445  * Locate matching file pointers via process descriptor tables.
 1446  */
 1447 static int
 1448 fdrevoke_proc_callback(struct proc *p, void *vinfo)
 1449 {
 1450         struct fdrevoke_info *info = vinfo;
 1451         struct filedesc *fdp;
 1452         struct file *fp;
 1453         int n;
 1454 
 1455         if (p->p_stat == SIDL || p->p_stat == SZOMB)
 1456                 return(0);
 1457         if (info->cred->cr_prison &&
 1458             info->cred->cr_prison != p->p_ucred->cr_prison) {
 1459                 return(0);
 1460         }
 1461 
 1462         /*
 1463          * If the controlling terminal of the process matches the
 1464          * vnode being revoked we clear the controlling terminal.
 1465          *
 1466          * The normal spec_close() may not catch this because it
 1467          * uses curproc instead of p.
 1468          */
 1469         if (p->p_session && info->type == DTYPE_VNODE &&
 1470             info->data == p->p_session->s_ttyvp) {
 1471                 p->p_session->s_ttyvp = NULL;
 1472                 vrele(info->data);
 1473         }
 1474 
 1475         /*
 1476          * Softref the fdp to prevent it from being destroyed
 1477          */
 1478         spin_lock(&p->p_spin);
 1479         if ((fdp = p->p_fd) == NULL) {
 1480                 spin_unlock(&p->p_spin);
 1481                 return(0);
 1482         }
 1483         atomic_add_int(&fdp->fd_softrefs, 1);
 1484         spin_unlock(&p->p_spin);
 1485 
 1486         /*
 1487          * Locate and close any matching file descriptors.
 1488          */
 1489         spin_lock(&fdp->fd_spin);
 1490         for (n = 0; n < fdp->fd_nfiles; ++n) {
 1491                 if ((fp = fdp->fd_files[n].fp) == NULL)
 1492                         continue;
 1493                 if (fp->f_flag & FREVOKED) {
 1494                         fhold(info->nfp);
 1495                         fdp->fd_files[n].fp = info->nfp;
 1496                         spin_unlock(&fdp->fd_spin);
 1497                         knote_fdclose(fp, fdp, n);      /* XXX */
 1498                         closef(fp, p);
 1499                         spin_lock(&fdp->fd_spin);
 1500                         --info->count;
 1501                 }
 1502         }
 1503         spin_unlock(&fdp->fd_spin);
 1504         atomic_subtract_int(&fdp->fd_softrefs, 1);
 1505         return(0);
 1506 }
 1507 
 1508 /*
 1509  * falloc:
 1510  *      Create a new open file structure and reserve a file decriptor
 1511  *      for the process that refers to it.
 1512  *
 1513  *      Root creds are checked using lp, or assumed if lp is NULL.  If
 1514  *      resultfd is non-NULL then lp must also be non-NULL.  No file
 1515  *      descriptor is reserved (and no process context is needed) if
 1516  *      resultfd is NULL.
 1517  *
 1518  *      A file pointer with a refcount of 1 is returned.  Note that the
 1519  *      file pointer is NOT associated with the descriptor.  If falloc
 1520  *      returns success, fsetfd() MUST be called to either associate the
 1521  *      file pointer or clear the reservation.
 1522  *
 1523  * MPSAFE
 1524  */
 1525 int
 1526 falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
 1527 {
 1528         static struct timeval lastfail;
 1529         static int curfail;
 1530         struct file *fp;
 1531         struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred;
 1532         int error;
 1533 
 1534         fp = NULL;
 1535 
 1536         /*
 1537          * Handle filetable full issues and root overfill.
 1538          */
 1539         if (nfiles >= maxfiles - maxfilesrootres &&
 1540             (cred->cr_ruid != 0 || nfiles >= maxfiles)) {
 1541                 if (ppsratecheck(&lastfail, &curfail, 1)) {
 1542                         kprintf("kern.maxfiles limit exceeded by uid %d, "
 1543                                 "please see tuning(7).\n",
 1544                                 cred->cr_ruid);
 1545                 }
 1546                 error = ENFILE;
 1547                 goto done;
 1548         }
 1549 
 1550         /*
 1551          * Allocate a new file descriptor.
 1552          */
 1553         fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
 1554         spin_init(&fp->f_spin);
 1555         SLIST_INIT(&fp->f_klist);
 1556         fp->f_count = 1;
 1557         fp->f_ops = &badfileops;
 1558         fp->f_seqcount = 1;
 1559         fsetcred(fp, cred);
 1560         spin_lock(&filehead_spin);
 1561         nfiles++;
 1562         LIST_INSERT_HEAD(&filehead, fp, f_list);
 1563         spin_unlock(&filehead_spin);
 1564         if (resultfd) {
 1565                 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) {
 1566                         fdrop(fp);
 1567                         fp = NULL;
 1568                 }
 1569         } else {
 1570                 error = 0;
 1571         }
 1572 done:
 1573         *resultfp = fp;
 1574         return (error);
 1575 }
 1576 
 1577 /*
 1578  * Check for races against a file descriptor by determining that the
 1579  * file pointer is still associated with the specified file descriptor,
 1580  * and a close is not currently in progress.
 1581  *
 1582  * MPSAFE
 1583  */
 1584 int
 1585 checkfdclosed(struct filedesc *fdp, int fd, struct file *fp)
 1586 {
 1587         int error;
 1588 
 1589         spin_lock_shared(&fdp->fd_spin);
 1590         if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
 1591                 error = EBADF;
 1592         else
 1593                 error = 0;
 1594         spin_unlock_shared(&fdp->fd_spin);
 1595         return (error);
 1596 }
 1597 
 1598 /*
 1599  * Associate a file pointer with a previously reserved file descriptor.
 1600  * This function always succeeds.
 1601  *
 1602  * If fp is NULL, the file descriptor is returned to the pool.
 1603  */
 1604 
 1605 /*
 1606  * MPSAFE (exclusive spinlock must be held on call)
 1607  */
 1608 static void
 1609 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
 1610 {
 1611         KKASSERT((unsigned)fd < fdp->fd_nfiles);
 1612         KKASSERT(fdp->fd_files[fd].reserved != 0);
 1613         if (fp) {
 1614                 fhold(fp);
 1615                 fdp->fd_files[fd].fp = fp;
 1616                 fdp->fd_files[fd].reserved = 0;
 1617         } else {
 1618                 fdp->fd_files[fd].reserved = 0;
 1619                 fdreserve_locked(fdp, fd, -1);
 1620                 fdfixup_locked(fdp, fd);
 1621         }
 1622 }
 1623 
 1624 /*
 1625  * MPSAFE
 1626  */
 1627 void
 1628 fsetfd(struct filedesc *fdp, struct file *fp, int fd)
 1629 {
 1630         spin_lock(&fdp->fd_spin);
 1631         fsetfd_locked(fdp, fp, fd);
 1632         spin_unlock(&fdp->fd_spin);
 1633 }
 1634 
 1635 /*
 1636  * MPSAFE (exclusive spinlock must be held on call)
 1637  */
 1638 static 
 1639 struct file *
 1640 funsetfd_locked(struct filedesc *fdp, int fd)
 1641 {
 1642         struct file *fp;
 1643 
 1644         if ((unsigned)fd >= fdp->fd_nfiles)
 1645                 return (NULL);
 1646         if ((fp = fdp->fd_files[fd].fp) == NULL)
 1647                 return (NULL);
 1648         fdp->fd_files[fd].fp = NULL;
 1649         fdp->fd_files[fd].fileflags = 0;
 1650 
 1651         fdreserve_locked(fdp, fd, -1);
 1652         fdfixup_locked(fdp, fd);
 1653         return(fp);
 1654 }
 1655 
 1656 /*
 1657  * MPSAFE
 1658  */
 1659 int
 1660 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
 1661 {
 1662         int error;
 1663 
 1664         spin_lock(&fdp->fd_spin);
 1665         if (((u_int)fd) >= fdp->fd_nfiles) {
 1666                 error = EBADF;
 1667         } else if (fdp->fd_files[fd].fp == NULL) {
 1668                 error = EBADF;
 1669         } else {
 1670                 *flagsp = fdp->fd_files[fd].fileflags;
 1671                 error = 0;
 1672         }
 1673         spin_unlock(&fdp->fd_spin);
 1674         return (error);
 1675 }
 1676 
 1677 /*
 1678  * MPSAFE
 1679  */
 1680 int
 1681 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
 1682 {
 1683         int error;
 1684 
 1685         spin_lock(&fdp->fd_spin);
 1686         if (((u_int)fd) >= fdp->fd_nfiles) {
 1687                 error = EBADF;
 1688         } else if (fdp->fd_files[fd].fp == NULL) {
 1689                 error = EBADF;
 1690         } else {
 1691                 fdp->fd_files[fd].fileflags |= add_flags;
 1692                 error = 0;
 1693         }
 1694         spin_unlock(&fdp->fd_spin);
 1695         return (error);
 1696 }
 1697 
 1698 /*
 1699  * MPSAFE
 1700  */
 1701 int
 1702 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
 1703 {
 1704         int error;
 1705 
 1706         spin_lock(&fdp->fd_spin);
 1707         if (((u_int)fd) >= fdp->fd_nfiles) {
 1708                 error = EBADF;
 1709         } else if (fdp->fd_files[fd].fp == NULL) {
 1710                 error = EBADF;
 1711         } else {
 1712                 fdp->fd_files[fd].fileflags &= ~rem_flags;
 1713                 error = 0;
 1714         }
 1715         spin_unlock(&fdp->fd_spin);
 1716         return (error);
 1717 }
 1718 
 1719 /*
 1720  * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
 1721  */
 1722 void
 1723 fsetcred(struct file *fp, struct ucred *ncr)
 1724 {
 1725         struct ucred *ocr;
 1726         struct uidinfo *uip;
 1727 
 1728         ocr = fp->f_cred;
 1729         if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) {
 1730                 if (ocr) {
 1731                         uip = ocr->cr_uidinfo;
 1732                         atomic_add_int(&uip->ui_openfiles, -1);
 1733                 }
 1734                 if (ncr) {
 1735                         uip = ncr->cr_uidinfo;
 1736                         atomic_add_int(&uip->ui_openfiles, 1);
 1737                 }
 1738         }
 1739         if (ncr)
 1740                 crhold(ncr);
 1741         fp->f_cred = ncr;
 1742         if (ocr)
 1743                 crfree(ocr);
 1744 }
 1745 
 1746 /*
 1747  * Free a file descriptor.
 1748  */
 1749 static
 1750 void
 1751 ffree(struct file *fp)
 1752 {
 1753         KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
 1754         spin_lock(&filehead_spin);
 1755         LIST_REMOVE(fp, f_list);
 1756         nfiles--;
 1757         spin_unlock(&filehead_spin);
 1758         fsetcred(fp, NULL);
 1759         if (fp->f_nchandle.ncp)
 1760             cache_drop(&fp->f_nchandle);
 1761         kfree(fp, M_FILE);
 1762 }
 1763 
 1764 /*
 1765  * called from init_main, initialize filedesc0 for proc0.
 1766  */
 1767 void
 1768 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
 1769 {
 1770         p0->p_fd = fdp0;
 1771         p0->p_fdtol = NULL;
 1772         fdp0->fd_refcnt = 1;
 1773         fdp0->fd_cmask = cmask;
 1774         fdp0->fd_files = fdp0->fd_builtin_files;
 1775         fdp0->fd_nfiles = NDFILE;
 1776         fdp0->fd_lastfile = -1;
 1777         spin_init(&fdp0->fd_spin);
 1778 }
 1779 
 1780 /*
 1781  * Build a new filedesc structure.
 1782  *
 1783  * NOT MPSAFE (vref)
 1784  */
 1785 struct filedesc *
 1786 fdinit(struct proc *p)
 1787 {
 1788         struct filedesc *newfdp;
 1789         struct filedesc *fdp = p->p_fd;
 1790 
 1791         newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
 1792         spin_lock(&fdp->fd_spin);
 1793         if (fdp->fd_cdir) {
 1794                 newfdp->fd_cdir = fdp->fd_cdir;
 1795                 vref(newfdp->fd_cdir);
 1796                 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
 1797         }
 1798 
 1799         /*
 1800          * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
 1801          * proc0, but should unconditionally exist in other processes.
 1802          */
 1803         if (fdp->fd_rdir) {
 1804                 newfdp->fd_rdir = fdp->fd_rdir;
 1805                 vref(newfdp->fd_rdir);
 1806                 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
 1807         }
 1808         if (fdp->fd_jdir) {
 1809                 newfdp->fd_jdir = fdp->fd_jdir;
 1810                 vref(newfdp->fd_jdir);
 1811                 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
 1812         }
 1813         spin_unlock(&fdp->fd_spin);
 1814 
 1815         /* Create the file descriptor table. */
 1816         newfdp->fd_refcnt = 1;
 1817         newfdp->fd_cmask = cmask;
 1818         newfdp->fd_files = newfdp->fd_builtin_files;
 1819         newfdp->fd_nfiles = NDFILE;
 1820         newfdp->fd_lastfile = -1;
 1821         spin_init(&newfdp->fd_spin);
 1822 
 1823         return (newfdp);
 1824 }
 1825 
 1826 /*
 1827  * Share a filedesc structure.
 1828  *
 1829  * MPSAFE
 1830  */
 1831 struct filedesc *
 1832 fdshare(struct proc *p)
 1833 {
 1834         struct filedesc *fdp;
 1835 
 1836         fdp = p->p_fd;
 1837         spin_lock(&fdp->fd_spin);
 1838         fdp->fd_refcnt++;
 1839         spin_unlock(&fdp->fd_spin);
 1840         return (fdp);
 1841 }
 1842 
 1843 /*
 1844  * Copy a filedesc structure.
 1845  *
 1846  * MPSAFE
 1847  */
 1848 int
 1849 fdcopy(struct proc *p, struct filedesc **fpp)
 1850 {
 1851         struct filedesc *fdp = p->p_fd;
 1852         struct filedesc *newfdp;
 1853         struct fdnode *fdnode;
 1854         int i;
 1855         int ni;
 1856 
 1857         /*
 1858          * Certain daemons might not have file descriptors. 
 1859          */
 1860         if (fdp == NULL)
 1861                 return (0);
 1862 
 1863         /*
 1864          * Allocate the new filedesc and fd_files[] array.  This can race
 1865          * with operations by other threads on the fdp so we have to be
 1866          * careful.
 1867          */
 1868         newfdp = kmalloc(sizeof(struct filedesc), 
 1869                          M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK);
 1870         if (newfdp == NULL) {
 1871                 *fpp = NULL;
 1872                 return (-1);
 1873         }
 1874 again:
 1875         spin_lock(&fdp->fd_spin);
 1876         if (fdp->fd_lastfile < NDFILE) {
 1877                 newfdp->fd_files = newfdp->fd_builtin_files;
 1878                 i = NDFILE;
 1879         } else {
 1880                 /*
 1881                  * We have to allocate (N^2-1) entries for our in-place
 1882                  * binary tree.  Allow the table to shrink.
 1883                  */
 1884                 i = fdp->fd_nfiles;
 1885                 ni = (i - 1) / 2;
 1886                 while (ni > fdp->fd_lastfile && ni > NDFILE) {
 1887                         i = ni;
 1888                         ni = (i - 1) / 2;
 1889                 }
 1890                 spin_unlock(&fdp->fd_spin);
 1891                 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
 1892                                           M_FILEDESC, M_WAITOK | M_ZERO);
 1893 
 1894                 /*
 1895                  * Check for race, retry
 1896                  */
 1897                 spin_lock(&fdp->fd_spin);
 1898                 if (i <= fdp->fd_lastfile) {
 1899                         spin_unlock(&fdp->fd_spin);
 1900                         kfree(newfdp->fd_files, M_FILEDESC);
 1901                         goto again;
 1902                 }
 1903         }
 1904 
 1905         /*
 1906          * Dup the remaining fields. vref() and cache_hold() can be
 1907          * safely called while holding the read spinlock on fdp.
 1908          *
 1909          * The read spinlock on fdp is still being held.
 1910          *
 1911          * NOTE: vref and cache_hold calls for the case where the vnode
 1912          * or cache entry already has at least one ref may be called
 1913          * while holding spin locks.
 1914          */
 1915         if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
 1916                 vref(newfdp->fd_cdir);
 1917                 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
 1918         }
 1919         /*
 1920          * We must check for fd_rdir here, at least for now because
 1921          * the init process is created before we have access to the
 1922          * rootvode to take a reference to it.
 1923          */
 1924         if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
 1925                 vref(newfdp->fd_rdir);
 1926                 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
 1927         }
 1928         if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
 1929                 vref(newfdp->fd_jdir);
 1930                 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
 1931         }
 1932         newfdp->fd_refcnt = 1;
 1933         newfdp->fd_nfiles = i;
 1934         newfdp->fd_lastfile = fdp->fd_lastfile;
 1935         newfdp->fd_freefile = fdp->fd_freefile;
 1936         newfdp->fd_cmask = fdp->fd_cmask;
 1937         spin_init(&newfdp->fd_spin);
 1938 
 1939         /*
 1940          * Copy the descriptor table through (i).  This also copies the
 1941          * allocation state.   Then go through and ref the file pointers
 1942          * and clean up any KQ descriptors.
 1943          *
 1944          * kq descriptors cannot be copied.  Since we haven't ref'd the
 1945          * copied files yet we can ignore the return value from funsetfd().
 1946          *
 1947          * The read spinlock on fdp is still being held.
 1948          */
 1949         bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
 1950         for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
 1951                 fdnode = &newfdp->fd_files[i];
 1952                 if (fdnode->reserved) {
 1953                         fdreserve_locked(newfdp, i, -1);
 1954                         fdnode->reserved = 0;
 1955                         fdfixup_locked(newfdp, i);
 1956                 } else if (fdnode->fp) {
 1957                         if (fdnode->fp->f_type == DTYPE_KQUEUE) {
 1958                                 (void)funsetfd_locked(newfdp, i);
 1959                         } else {
 1960                                 fhold(fdnode->fp);
 1961                         }
 1962                 }
 1963         }
 1964         spin_unlock(&fdp->fd_spin);
 1965         *fpp = newfdp;
 1966         return (0);
 1967 }
 1968 
 1969 /*
 1970  * Release a filedesc structure.
 1971  *
 1972  * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
 1973  */
 1974 void
 1975 fdfree(struct proc *p, struct filedesc *repl)
 1976 {
 1977         struct filedesc *fdp;
 1978         struct fdnode *fdnode;
 1979         int i;
 1980         struct filedesc_to_leader *fdtol;
 1981         struct file *fp;
 1982         struct vnode *vp;
 1983         struct flock lf;
 1984 
 1985         /*
 1986          * Certain daemons might not have file descriptors.
 1987          */
 1988         fdp = p->p_fd;
 1989         if (fdp == NULL) {
 1990                 p->p_fd = repl;
 1991                 return;
 1992         }
 1993 
 1994         /*
 1995          * Severe messing around to follow.
 1996          */
 1997         spin_lock(&fdp->fd_spin);
 1998 
 1999         /* Check for special need to clear POSIX style locks */
 2000         fdtol = p->p_fdtol;
 2001         if (fdtol != NULL) {
 2002                 KASSERT(fdtol->fdl_refcount > 0,
 2003                         ("filedesc_to_refcount botch: fdl_refcount=%d",
 2004                          fdtol->fdl_refcount));
 2005                 if (fdtol->fdl_refcount == 1 &&
 2006                     (p->p_leader->p_flags & P_ADVLOCK) != 0) {
 2007                         for (i = 0; i <= fdp->fd_lastfile; ++i) {
 2008                                 fdnode = &fdp->fd_files[i];
 2009                                 if (fdnode->fp == NULL ||
 2010                                     fdnode->fp->f_type != DTYPE_VNODE) {
 2011                                         continue;
 2012                                 }
 2013                                 fp = fdnode->fp;
 2014                                 fhold(fp);
 2015                                 spin_unlock(&fdp->fd_spin);
 2016 
 2017                                 lf.l_whence = SEEK_SET;
 2018                                 lf.l_start = 0;
 2019                                 lf.l_len = 0;
 2020                                 lf.l_type = F_UNLCK;
 2021                                 vp = (struct vnode *)fp->f_data;
 2022                                 (void) VOP_ADVLOCK(vp,
 2023                                                    (caddr_t)p->p_leader,
 2024                                                    F_UNLCK,
 2025                                                    &lf,
 2026                                                    F_POSIX);
 2027                                 fdrop(fp);
 2028                                 spin_lock(&fdp->fd_spin);
 2029                         }
 2030                 }
 2031         retry:
 2032                 if (fdtol->fdl_refcount == 1) {
 2033                         if (fdp->fd_holdleaderscount > 0 &&
 2034                             (p->p_leader->p_flags & P_ADVLOCK) != 0) {
 2035                                 /*
 2036                                  * close() or do_dup() has cleared a reference
 2037                                  * in a shared file descriptor table.
 2038                                  */
 2039                                 fdp->fd_holdleaderswakeup = 1;
 2040                                 ssleep(&fdp->fd_holdleaderscount,
 2041                                        &fdp->fd_spin, 0, "fdlhold", 0);
 2042                                 goto retry;
 2043                         }
 2044                         if (fdtol->fdl_holdcount > 0) {
 2045                                 /* 
 2046                                  * Ensure that fdtol->fdl_leader
 2047                                  * remains valid in closef().
 2048                                  */
 2049                                 fdtol->fdl_wakeup = 1;
 2050                                 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
 2051                                 goto retry;
 2052                         }
 2053                 }
 2054                 fdtol->fdl_refcount--;
 2055                 if (fdtol->fdl_refcount == 0 &&
 2056                     fdtol->fdl_holdcount == 0) {
 2057                         fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
 2058                         fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
 2059                 } else {
 2060                         fdtol = NULL;
 2061                 }
 2062                 p->p_fdtol = NULL;
 2063                 if (fdtol != NULL) {
 2064                         spin_unlock(&fdp->fd_spin);
 2065                         kfree(fdtol, M_FILEDESC_TO_LEADER);
 2066                         spin_lock(&fdp->fd_spin);
 2067                 }
 2068         }
 2069         if (--fdp->fd_refcnt > 0) {
 2070                 spin_unlock(&fdp->fd_spin);
 2071                 spin_lock(&p->p_spin);
 2072                 p->p_fd = repl;
 2073                 spin_unlock(&p->p_spin);
 2074                 return;
 2075         }
 2076 
 2077         /*
 2078          * Even though we are the last reference to the structure allproc
 2079          * scans may still reference the structure.  Maintain proper
 2080          * locks until we can replace p->p_fd.
 2081          *
 2082          * Also note that kqueue's closef still needs to reference the
 2083          * fdp via p->p_fd, so we have to close the descriptors before
 2084          * we replace p->p_fd.
 2085          */
 2086         for (i = 0; i <= fdp->fd_lastfile; ++i) {
 2087                 if (fdp->fd_files[i].fp) {
 2088                         fp = funsetfd_locked(fdp, i);
 2089                         if (fp) {
 2090                                 spin_unlock(&fdp->fd_spin);
 2091                                 if (SLIST_FIRST(&fp->f_klist))
 2092                                         knote_fdclose(fp, fdp, i);
 2093                                 closef(fp, p);
 2094                                 spin_lock(&fdp->fd_spin);
 2095                         }
 2096                 }
 2097         }
 2098         spin_unlock(&fdp->fd_spin);
 2099 
 2100         /*
 2101          * Interlock against an allproc scan operations (typically frevoke).
 2102          */
 2103         spin_lock(&p->p_spin);
 2104         p->p_fd = repl;
 2105         spin_unlock(&p->p_spin);
 2106 
 2107         /*
 2108          * Wait for any softrefs to go away.  This race rarely occurs so
 2109          * we can use a non-critical-path style poll/sleep loop.  The
 2110          * race only occurs against allproc scans.
 2111          *
 2112          * No new softrefs can occur with the fdp disconnected from the
 2113          * process.
 2114          */
 2115         if (fdp->fd_softrefs) {
 2116                 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
 2117                 while (fdp->fd_softrefs)
 2118                         tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
 2119         }
 2120 
 2121         if (fdp->fd_files != fdp->fd_builtin_files)
 2122                 kfree(fdp->fd_files, M_FILEDESC);
 2123         if (fdp->fd_cdir) {
 2124                 cache_drop(&fdp->fd_ncdir);
 2125                 vrele(fdp->fd_cdir);
 2126         }
 2127         if (fdp->fd_rdir) {
 2128                 cache_drop(&fdp->fd_nrdir);
 2129                 vrele(fdp->fd_rdir);
 2130         }
 2131         if (fdp->fd_jdir) {
 2132                 cache_drop(&fdp->fd_njdir);
 2133                 vrele(fdp->fd_jdir);
 2134         }
 2135         kfree(fdp, M_FILEDESC);
 2136 }
 2137 
 2138 /*
 2139  * Retrieve and reference the file pointer associated with a descriptor.
 2140  *
 2141  * MPSAFE
 2142  */
 2143 struct file *
 2144 holdfp(struct filedesc *fdp, int fd, int flag)
 2145 {
 2146         struct file* fp;
 2147 
 2148         spin_lock_shared(&fdp->fd_spin);
 2149         if (((u_int)fd) >= fdp->fd_nfiles) {
 2150                 fp = NULL;
 2151                 goto done;
 2152         }
 2153         if ((fp = fdp->fd_files[fd].fp) == NULL)
 2154                 goto done;
 2155         if ((fp->f_flag & flag) == 0 && flag != -1) {
 2156                 fp = NULL;
 2157                 goto done;
 2158         }
 2159         fhold(fp);
 2160 done:
 2161         spin_unlock_shared(&fdp->fd_spin);
 2162         return (fp);
 2163 }
 2164 
 2165 /*
 2166  * holdsock() - load the struct file pointer associated
 2167  * with a socket into *fpp.  If an error occurs, non-zero
 2168  * will be returned and *fpp will be set to NULL.
 2169  *
 2170  * MPSAFE
 2171  */
 2172 int
 2173 holdsock(struct filedesc *fdp, int fd, struct file **fpp)
 2174 {
 2175         struct file *fp;
 2176         int error;
 2177 
 2178         spin_lock_shared(&fdp->fd_spin);
 2179         if ((unsigned)fd >= fdp->fd_nfiles) {
 2180                 error = EBADF;
 2181                 fp = NULL;
 2182                 goto done;
 2183         }
 2184         if ((fp = fdp->fd_files[fd].fp) == NULL) {
 2185                 error = EBADF;
 2186                 goto done;
 2187         }
 2188         if (fp->f_type != DTYPE_SOCKET) {
 2189                 error = ENOTSOCK;
 2190                 goto done;
 2191         }
 2192         fhold(fp);
 2193         error = 0;
 2194 done:
 2195         spin_unlock_shared(&fdp->fd_spin);
 2196         *fpp = fp;
 2197         return (error);
 2198 }
 2199 
 2200 /*
 2201  * Convert a user file descriptor to a held file pointer.
 2202  *
 2203  * MPSAFE
 2204  */
 2205 int
 2206 holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
 2207 {
 2208         struct file *fp;
 2209         int error;
 2210 
 2211         spin_lock_shared(&fdp->fd_spin);
 2212         if ((unsigned)fd >= fdp->fd_nfiles) {
 2213                 error = EBADF;
 2214                 fp = NULL;
 2215                 goto done;
 2216         }
 2217         if ((fp = fdp->fd_files[fd].fp) == NULL) {
 2218                 error = EBADF;
 2219                 goto done;
 2220         }
 2221         if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
 2222                 fp = NULL;
 2223                 error = EINVAL;
 2224                 goto done;
 2225         }
 2226         fhold(fp);
 2227         error = 0;
 2228 done:
 2229         spin_unlock_shared(&fdp->fd_spin);
 2230         *fpp = fp;
 2231         return (error);
 2232 }
 2233 
 2234 /*
 2235  * For setugid programs, we don't want to people to use that setugidness
 2236  * to generate error messages which write to a file which otherwise would
 2237  * otherwise be off-limits to the process.
 2238  *
 2239  * This is a gross hack to plug the hole.  A better solution would involve
 2240  * a special vop or other form of generalized access control mechanism.  We
 2241  * go ahead and just reject all procfs file systems accesses as dangerous.
 2242  *
 2243  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
 2244  * sufficient.  We also don't for check setugidness since we know we are.
 2245  */
 2246 static int
 2247 is_unsafe(struct file *fp)
 2248 {
 2249         if (fp->f_type == DTYPE_VNODE && 
 2250             ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
 2251                 return (1);
 2252         return (0);
 2253 }
 2254 
 2255 /*
 2256  * Make this setguid thing safe, if at all possible.
 2257  *
 2258  * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
 2259  */
 2260 void
 2261 setugidsafety(struct proc *p)
 2262 {
 2263         struct filedesc *fdp = p->p_fd;
 2264         int i;
 2265 
 2266         /* Certain daemons might not have file descriptors. */
 2267         if (fdp == NULL)
 2268                 return;
 2269 
 2270         /*
 2271          * note: fdp->fd_files may be reallocated out from under us while
 2272          * we are blocked in a close.  Be careful!
 2273          */
 2274         for (i = 0; i <= fdp->fd_lastfile; i++) {
 2275                 if (i > 2)
 2276                         break;
 2277                 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
 2278                         struct file *fp;
 2279 
 2280                         /*
 2281                          * NULL-out descriptor prior to close to avoid
 2282                          * a race while close blocks.
 2283                          */
 2284                         if ((fp = funsetfd_locked(fdp, i)) != NULL) {
 2285                                 knote_fdclose(fp, fdp, i);
 2286                                 closef(fp, p);
 2287                         }
 2288                 }
 2289         }
 2290 }
 2291 
 2292 /*
 2293  * Close any files on exec?
 2294  *
 2295  * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
 2296  */
 2297 void
 2298 fdcloseexec(struct proc *p)
 2299 {
 2300         struct filedesc *fdp = p->p_fd;
 2301         int i;
 2302 
 2303         /* Certain daemons might not have file descriptors. */
 2304         if (fdp == NULL)
 2305                 return;
 2306 
 2307         /*
 2308          * We cannot cache fd_files since operations may block and rip
 2309          * them out from under us.
 2310          */
 2311         for (i = 0; i <= fdp->fd_lastfile; i++) {
 2312                 if (fdp->fd_files[i].fp != NULL &&
 2313                     (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
 2314                         struct file *fp;
 2315 
 2316                         /*
 2317                          * NULL-out descriptor prior to close to avoid
 2318                          * a race while close blocks.
 2319                          */
 2320                         if ((fp = funsetfd_locked(fdp, i)) != NULL) {
 2321                                 knote_fdclose(fp, fdp, i);
 2322                                 closef(fp, p);
 2323                         }
 2324                 }
 2325         }
 2326 }
 2327 
 2328 /*
 2329  * It is unsafe for set[ug]id processes to be started with file
 2330  * descriptors 0..2 closed, as these descriptors are given implicit
 2331  * significance in the Standard C library.  fdcheckstd() will create a
 2332  * descriptor referencing /dev/null for each of stdin, stdout, and
 2333  * stderr that is not already open.
 2334  *
 2335  * NOT MPSAFE - calls falloc, vn_open, etc
 2336  */
 2337 int
 2338 fdcheckstd(struct lwp *lp)
 2339 {
 2340         struct nlookupdata nd;
 2341         struct filedesc *fdp;
 2342         struct file *fp;
 2343         int retval;
 2344         int i, error, flags, devnull;
 2345 
 2346         fdp = lp->lwp_proc->p_fd;
 2347         if (fdp == NULL)
 2348                 return (0);
 2349         devnull = -1;
 2350         error = 0;
 2351         for (i = 0; i < 3; i++) {
 2352                 if (fdp->fd_files[i].fp != NULL)
 2353                         continue;
 2354                 if (devnull < 0) {
 2355                         if ((error = falloc(lp, &fp, &devnull)) != 0)
 2356                                 break;
 2357 
 2358                         error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
 2359                                                 NLC_FOLLOW|NLC_LOCKVP);
 2360                         flags = FREAD | FWRITE;
 2361                         if (error == 0)
 2362                                 error = vn_open(&nd, fp, flags, 0);
 2363                         if (error == 0)
 2364                                 fsetfd(fdp, fp, devnull);
 2365                         else
 2366                                 fsetfd(fdp, NULL, devnull);
 2367                         fdrop(fp);
 2368                         nlookup_done(&nd);
 2369                         if (error)
 2370                                 break;
 2371                         KKASSERT(i == devnull);
 2372                 } else {
 2373                         error = kern_dup(DUP_FIXED, devnull, i, &retval);
 2374                         if (error != 0)
 2375                                 break;
 2376                 }
 2377         }
 2378         return (error);
 2379 }
 2380 
 2381 /*
 2382  * Internal form of close.
 2383  * Decrement reference count on file structure.
 2384  * Note: td and/or p may be NULL when closing a file
 2385  * that was being passed in a message.
 2386  *
 2387  * MPALMOSTSAFE - acquires mplock for VOP operations
 2388  */
 2389 int
 2390 closef(struct file *fp, struct proc *p)
 2391 {
 2392         struct vnode *vp;
 2393         struct flock lf;
 2394         struct filedesc_to_leader *fdtol;
 2395 
 2396         if (fp == NULL)
 2397                 return (0);
 2398 
 2399         /*
 2400          * POSIX record locking dictates that any close releases ALL
 2401          * locks owned by this process.  This is handled by setting
 2402          * a flag in the unlock to free ONLY locks obeying POSIX
 2403          * semantics, and not to free BSD-style file locks.
 2404          * If the descriptor was in a message, POSIX-style locks
 2405          * aren't passed with the descriptor.
 2406          */
 2407         if (p != NULL && fp->f_type == DTYPE_VNODE &&
 2408             (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
 2409         ) {
 2410                 if ((p->p_leader->p_flags & P_ADVLOCK) != 0) {
 2411                         lf.l_whence = SEEK_SET;
 2412                         lf.l_start = 0;
 2413                         lf.l_len = 0;
 2414                         lf.l_type = F_UNLCK;
 2415                         vp = (struct vnode *)fp->f_data;
 2416                         (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
 2417                                            &lf, F_POSIX);
 2418                 }
 2419                 fdtol = p->p_fdtol;
 2420                 if (fdtol != NULL) {
 2421                         lwkt_gettoken(&p->p_token);
 2422                         /*
 2423                          * Handle special case where file descriptor table
 2424                          * is shared between multiple process leaders.
 2425                          */
 2426                         for (fdtol = fdtol->fdl_next;
 2427                              fdtol != p->p_fdtol;
 2428                              fdtol = fdtol->fdl_next) {
 2429                                 if ((fdtol->fdl_leader->p_flags &
 2430                                      P_ADVLOCK) == 0)
 2431                                         continue;
 2432                                 fdtol->fdl_holdcount++;
 2433                                 lf.l_whence = SEEK_SET;
 2434                                 lf.l_start = 0;
 2435                                 lf.l_len = 0;
 2436                                 lf.l_type = F_UNLCK;
 2437                                 vp = (struct vnode *)fp->f_data;
 2438                                 (void) VOP_ADVLOCK(vp,
 2439                                                    (caddr_t)fdtol->fdl_leader,
 2440                                                    F_UNLCK, &lf, F_POSIX);
 2441                                 fdtol->fdl_holdcount--;
 2442                                 if (fdtol->fdl_holdcount == 0 &&
 2443                                     fdtol->fdl_wakeup != 0) {
 2444                                         fdtol->fdl_wakeup = 0;
 2445                                         wakeup(fdtol);
 2446                                 }
 2447                         }
 2448                         lwkt_reltoken(&p->p_token);
 2449                 }
 2450         }
 2451         return (fdrop(fp));
 2452 }
 2453 
 2454 /*
 2455  * MPSAFE
 2456  *
 2457  * fhold() can only be called if f_count is already at least 1 (i.e. the
 2458  * caller of fhold() already has a reference to the file pointer in some
 2459  * manner or other). 
 2460  *
 2461  * f_count is not spin-locked.  Instead, atomic ops are used for
 2462  * incrementing, decrementing, and handling the 1->0 transition.
 2463  */
 2464 void
 2465 fhold(struct file *fp)
 2466 {
 2467         atomic_add_int(&fp->f_count, 1);
 2468 }
 2469 
 2470 /*
 2471  * fdrop() - drop a reference to a descriptor
 2472  *
 2473  * MPALMOSTSAFE - acquires mplock for final close sequence
 2474  */
 2475 int
 2476 fdrop(struct file *fp)
 2477 {
 2478         struct flock lf;
 2479         struct vnode *vp;
 2480         int error;
 2481 
 2482         /*
 2483          * A combined fetch and subtract is needed to properly detect
 2484          * 1->0 transitions, otherwise two cpus dropping from a ref
 2485          * count of 2 might both try to run the 1->0 code.
 2486          */
 2487         if (atomic_fetchadd_int(&fp->f_count, -1) > 1)
 2488                 return (0);
 2489 
 2490         KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL);
 2491 
 2492         /*
 2493          * The last reference has gone away, we own the fp structure free
 2494          * and clear.
 2495          */
 2496         if (fp->f_count < 0)
 2497                 panic("fdrop: count < 0");
 2498         if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
 2499             (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
 2500         ) {
 2501                 lf.l_whence = SEEK_SET;
 2502                 lf.l_start = 0;
 2503                 lf.l_len = 0;
 2504                 lf.l_type = F_UNLCK;
 2505                 vp = (struct vnode *)fp->f_data;
 2506                 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
 2507         }
 2508         if (fp->f_ops != &badfileops)
 2509                 error = fo_close(fp);
 2510         else
 2511                 error = 0;
 2512         ffree(fp);
 2513         return (error);
 2514 }
 2515 
 2516 /*
 2517  * Apply an advisory lock on a file descriptor.
 2518  *
 2519  * Just attempt to get a record lock of the requested type on
 2520  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
 2521  *
 2522  * MPALMOSTSAFE
 2523  */
 2524 int
 2525 sys_flock(struct flock_args *uap)
 2526 {
 2527         struct proc *p = curproc;
 2528         struct file *fp;
 2529         struct vnode *vp;
 2530         struct flock lf;
 2531         int error;
 2532 
 2533         if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
 2534                 return (EBADF);
 2535         if (fp->f_type != DTYPE_VNODE) {
 2536                 error = EOPNOTSUPP;
 2537                 goto done;
 2538         }
 2539         vp = (struct vnode *)fp->f_data;
 2540         lf.l_whence = SEEK_SET;
 2541         lf.l_start = 0;
 2542         lf.l_len = 0;
 2543         if (uap->how & LOCK_UN) {
 2544                 lf.l_type = F_UNLCK;
 2545                 fp->f_flag &= ~FHASLOCK;
 2546                 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
 2547                 goto done;
 2548         }
 2549         if (uap->how & LOCK_EX)
 2550                 lf.l_type = F_WRLCK;
 2551         else if (uap->how & LOCK_SH)
 2552                 lf.l_type = F_RDLCK;
 2553         else {
 2554                 error = EBADF;
 2555                 goto done;
 2556         }
 2557         fp->f_flag |= FHASLOCK;
 2558         if (uap->how & LOCK_NB)
 2559                 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
 2560         else
 2561                 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
 2562 done:
 2563         fdrop(fp);
 2564         return (error);
 2565 }
 2566 
 2567 /*
 2568  * File Descriptor pseudo-device driver (/dev/fd/).
 2569  *
 2570  * Opening minor device N dup()s the file (if any) connected to file
 2571  * descriptor N belonging to the calling process.  Note that this driver
 2572  * consists of only the ``open()'' routine, because all subsequent
 2573  * references to this file will be direct to the other driver.
 2574  */
 2575 static int
 2576 fdopen(struct dev_open_args *ap)
 2577 {
 2578         thread_t td = curthread;
 2579 
 2580         KKASSERT(td->td_lwp != NULL);
 2581 
 2582         /*
 2583          * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
 2584          * the file descriptor being sought for duplication. The error
 2585          * return ensures that the vnode for this device will be released
 2586          * by vn_open. Open will detect this special error and take the
 2587          * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
 2588          * will simply report the error.
 2589          */
 2590         td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev);
 2591         return (ENODEV);
 2592 }
 2593 
 2594 /*
 2595  * The caller has reserved the file descriptor dfd for us.  On success we
 2596  * must fsetfd() it.  On failure the caller will clean it up.
 2597  *
 2598  * MPSAFE
 2599  */
 2600 int
 2601 dupfdopen(struct filedesc *fdp, int dfd, int sfd, int mode, int error)
 2602 {
 2603         struct file *wfp;
 2604         struct file *xfp;
 2605         int werror;
 2606 
 2607         if ((wfp = holdfp(fdp, sfd, -1)) == NULL)
 2608                 return (EBADF);
 2609 
 2610         /*
 2611          * Close a revoke/dup race.  Duping a descriptor marked as revoked
 2612          * will dup a dummy descriptor instead of the real one.
 2613          */
 2614         if (wfp->f_flag & FREVOKED) {
 2615                 kprintf("Warning: attempt to dup() a revoked descriptor\n");
 2616                 fdrop(wfp);
 2617                 wfp = NULL;
 2618                 werror = falloc(NULL, &wfp, NULL);
 2619                 if (werror)
 2620                         return (werror);
 2621         }
 2622 
 2623         /*
 2624          * There are two cases of interest here.
 2625          *
 2626          * For ENODEV simply dup sfd to file descriptor dfd and return.
 2627          *
 2628          * For ENXIO steal away the file structure from sfd and store it
 2629          * dfd.  sfd is effectively closed by this operation.
 2630          *
 2631          * Any other error code is just returned.
 2632          */
 2633         switch (error) {
 2634         case ENODEV:
 2635                 /*
 2636                  * Check that the mode the file is being opened for is a
 2637                  * subset of the mode of the existing descriptor.
 2638                  */
 2639                 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
 2640                         error = EACCES;
 2641                         break;
 2642                 }
 2643                 spin_lock(&fdp->fd_spin);
 2644                 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
 2645                 fsetfd_locked(fdp, wfp, dfd);
 2646                 spin_unlock(&fdp->fd_spin);
 2647                 error = 0;
 2648                 break;
 2649         case ENXIO:
 2650                 /*
 2651                  * Steal away the file pointer from dfd, and stuff it into indx.
 2652                  */
 2653                 spin_lock(&fdp->fd_spin);
 2654                 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
 2655                 fsetfd(fdp, wfp, dfd);
 2656                 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) {
 2657                         spin_unlock(&fdp->fd_spin);
 2658                         fdrop(xfp);
 2659                 } else {
 2660                         spin_unlock(&fdp->fd_spin);
 2661                 }
 2662                 error = 0;
 2663                 break;
 2664         default:
 2665                 break;
 2666         }
 2667         fdrop(wfp);
 2668         return (error);
 2669 }
 2670 
 2671 /*
 2672  * NOT MPSAFE - I think these refer to a common file descriptor table
 2673  * and we need to spinlock that to link fdtol in.
 2674  */
 2675 struct filedesc_to_leader *
 2676 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
 2677                          struct proc *leader)
 2678 {
 2679         struct filedesc_to_leader *fdtol;
 2680         
 2681         fdtol = kmalloc(sizeof(struct filedesc_to_leader), 
 2682                         M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO);
 2683         fdtol->fdl_refcount = 1;
 2684         fdtol->fdl_holdcount = 0;
 2685         fdtol->fdl_wakeup = 0;
 2686         fdtol->fdl_leader = leader;
 2687         if (old != NULL) {
 2688                 fdtol->fdl_next = old->fdl_next;
 2689                 fdtol->fdl_prev = old;
 2690                 old->fdl_next = fdtol;
 2691                 fdtol->fdl_next->fdl_prev = fdtol;
 2692         } else {
 2693                 fdtol->fdl_next = fdtol;
 2694                 fdtol->fdl_prev = fdtol;
 2695         }
 2696         return fdtol;
 2697 }
 2698 
 2699 /*
 2700  * Scan all file pointers in the system.  The callback is made with
 2701  * the master list spinlock held exclusively.
 2702  *
 2703  * MPSAFE
 2704  */
 2705 void
 2706 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
 2707 {
 2708         struct file *fp;
 2709         int res;
 2710 
 2711         spin_lock(&filehead_spin);
 2712         LIST_FOREACH(fp, &filehead, f_list) {
 2713                 res = callback(fp, data);
 2714                 if (res < 0)
 2715                         break;
 2716         }
 2717         spin_unlock(&filehead_spin);
 2718 }
 2719 
 2720 /*
 2721  * Get file structures.
 2722  *
 2723  * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
 2724  */
 2725 
 2726 struct sysctl_kern_file_info {
 2727         int count;
 2728         int error;
 2729         struct sysctl_req *req;
 2730 };
 2731 
 2732 static int sysctl_kern_file_callback(struct proc *p, void *data);
 2733 
 2734 static int
 2735 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
 2736 {
 2737         struct sysctl_kern_file_info info;
 2738 
 2739         /*
 2740          * Note: because the number of file descriptors is calculated
 2741          * in different ways for sizing vs returning the data,
 2742          * there is information leakage from the first loop.  However,
 2743          * it is of a similar order of magnitude to the leakage from
 2744          * global system statistics such as kern.openfiles.
 2745          *
 2746          * When just doing a count, note that we cannot just count
 2747          * the elements and add f_count via the filehead list because 
 2748          * threaded processes share their descriptor table and f_count might
 2749          * still be '1' in that case.
 2750          *
 2751          * Since the SYSCTL op can block, we must hold the process to
 2752          * prevent it being ripped out from under us either in the 
 2753          * file descriptor loop or in the greater LIST_FOREACH.  The
 2754          * process may be in varying states of disrepair.  If the process
 2755          * is in SZOMB we may have caught it just as it is being removed
 2756          * from the allproc list, we must skip it in that case to maintain
 2757          * an unbroken chain through the allproc list.
 2758          */
 2759         info.count = 0;
 2760         info.error = 0;
 2761         info.req = req;
 2762         allproc_scan(sysctl_kern_file_callback, &info);
 2763 
 2764         /*
 2765          * When just calculating the size, overestimate a bit to try to
 2766          * prevent system activity from causing the buffer-fill call 
 2767          * to fail later on.
 2768          */
 2769         if (req->oldptr == NULL) {
 2770                 info.count = (info.count + 16) + (info.count / 10);
 2771                 info.error = SYSCTL_OUT(req, NULL,
 2772                                         info.count * sizeof(struct kinfo_file));
 2773         }
 2774         return (info.error);
 2775 }
 2776 
 2777 static int
 2778 sysctl_kern_file_callback(struct proc *p, void *data)
 2779 {
 2780         struct sysctl_kern_file_info *info = data;
 2781         struct kinfo_file kf;
 2782         struct filedesc *fdp;
 2783         struct file *fp;
 2784         uid_t uid;
 2785         int n;
 2786 
 2787         if (p->p_stat == SIDL || p->p_stat == SZOMB)
 2788                 return(0);
 2789         if (!PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)
 2790                 return(0);
 2791 
 2792         /*
 2793          * Softref the fdp to prevent it from being destroyed
 2794          */
 2795         spin_lock(&p->p_spin);
 2796         if ((fdp = p->p_fd) == NULL) {
 2797                 spin_unlock(&p->p_spin);
 2798                 return(0);
 2799         }
 2800         atomic_add_int(&fdp->fd_softrefs, 1);
 2801         spin_unlock(&p->p_spin);
 2802 
 2803         /*
 2804          * The fdp's own spinlock prevents the contents from being
 2805          * modified.
 2806          */
 2807         spin_lock_shared(&fdp->fd_spin);
 2808         for (n = 0; n < fdp->fd_nfiles; ++n) {
 2809                 if ((fp = fdp->fd_files[n].fp) == NULL)
 2810                         continue;
 2811                 if (info->req->oldptr == NULL) {
 2812                         ++info->count;
 2813                 } else {
 2814                         uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
 2815                         kcore_make_file(&kf, fp, p->p_pid, uid, n);
 2816                         spin_unlock_shared(&fdp->fd_spin);
 2817                         info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
 2818                         spin_lock_shared(&fdp->fd_spin);
 2819                         if (info->error)
 2820                                 break;
 2821                 }
 2822         }
 2823         spin_unlock_shared(&fdp->fd_spin);
 2824         atomic_subtract_int(&fdp->fd_softrefs, 1);
 2825         if (info->error)
 2826                 return(-1);
 2827         return(0);
 2828 }
 2829 
 2830 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
 2831     0, 0, sysctl_kern_file, "S,file", "Entire file table");
 2832 
 2833 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW,
 2834     &minfilesperproc, 0, "Minimum files allowed open per process");
 2835 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 
 2836     &maxfilesperproc, 0, "Maximum files allowed open per process");
 2837 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW,
 2838     &maxfilesperuser, 0, "Maximum files allowed open per user");
 2839 
 2840 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 
 2841     &maxfiles, 0, "Maximum number of files");
 2842 
 2843 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 
 2844     &maxfilesrootres, 0, "Descriptors reserved for root use");
 2845 
 2846 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 
 2847         &nfiles, 0, "System-wide number of open files");
 2848 
 2849 static void
 2850 fildesc_drvinit(void *unused)
 2851 {
 2852         int fd;
 2853 
 2854         for (fd = 0; fd < NUMFDESC; fd++) {
 2855                 make_dev(&fildesc_ops, fd,
 2856                          UID_BIN, GID_BIN, 0666, "fd/%d", fd);
 2857         }
 2858 
 2859         make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
 2860         make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
 2861         make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
 2862 }
 2863 
 2864 /*
 2865  * MPSAFE
 2866  */
 2867 struct fileops badfileops = {
 2868         .fo_read = badfo_readwrite,
 2869         .fo_write = badfo_readwrite,
 2870         .fo_ioctl = badfo_ioctl,
 2871         .fo_kqfilter = badfo_kqfilter,
 2872         .fo_stat = badfo_stat,
 2873         .fo_close = badfo_close,
 2874         .fo_shutdown = badfo_shutdown
 2875 };
 2876 
 2877 int
 2878 badfo_readwrite(
 2879         struct file *fp,
 2880         struct uio *uio,
 2881         struct ucred *cred,
 2882         int flags
 2883 ) {
 2884         return (EBADF);
 2885 }
 2886 
 2887 int
 2888 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
 2889             struct ucred *cred, struct sysmsg *msgv)
 2890 {
 2891         return (EBADF);
 2892 }
 2893 
 2894 /*
 2895  * Must return an error to prevent registration, typically
 2896  * due to a revoked descriptor (file_filtops assigned).
 2897  */
 2898 int
 2899 badfo_kqfilter(struct file *fp, struct knote *kn)
 2900 {
 2901         return (EOPNOTSUPP);
 2902 }
 2903 
 2904 /*
 2905  * MPSAFE
 2906  */
 2907 int
 2908 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
 2909 {
 2910         return (EBADF);
 2911 }
 2912 
 2913 /*
 2914  * MPSAFE
 2915  */
 2916 int
 2917 badfo_close(struct file *fp)
 2918 {
 2919         return (EBADF);
 2920 }
 2921 
 2922 /*
 2923  * MPSAFE
 2924  */
 2925 int
 2926 badfo_shutdown(struct file *fp, int how)
 2927 {
 2928         return (EBADF);
 2929 }
 2930 
 2931 /*
 2932  * MPSAFE
 2933  */
 2934 int
 2935 nofo_shutdown(struct file *fp, int how)
 2936 {
 2937         return (EOPNOTSUPP);
 2938 }
 2939 
 2940 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
 2941                                         fildesc_drvinit,NULL)

Cache object: 1b24b56a8c27dfe7325543408f149784


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.