The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_fork.c,v 1.171 2008/10/11 13:40:57 pooka Exp $    */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Copyright (c) 1982, 1986, 1989, 1991, 1993
   35  *      The Regents of the University of California.  All rights reserved.
   36  * (c) UNIX System Laboratories, Inc.
   37  * All or some portions of this file are derived from material licensed
   38  * to the University of California by American Telephone and Telegraph
   39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   40  * the permission of UNIX System Laboratories, Inc.
   41  *
   42  * Redistribution and use in source and binary forms, with or without
   43  * modification, are permitted provided that the following conditions
   44  * are met:
   45  * 1. Redistributions of source code must retain the above copyright
   46  *    notice, this list of conditions and the following disclaimer.
   47  * 2. Redistributions in binary form must reproduce the above copyright
   48  *    notice, this list of conditions and the following disclaimer in the
   49  *    documentation and/or other materials provided with the distribution.
   50  * 3. Neither the name of the University nor the names of its contributors
   51  *    may be used to endorse or promote products derived from this software
   52  *    without specific prior written permission.
   53  *
   54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   64  * SUCH DAMAGE.
   65  *
   66  *      @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
   67  */
   68 
   69 #include <sys/cdefs.h>
   70 __KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.171 2008/10/11 13:40:57 pooka Exp $");
   71 
   72 #include "opt_ktrace.h"
   73 
   74 #include <sys/param.h>
   75 #include <sys/systm.h>
   76 #include <sys/filedesc.h>
   77 #include <sys/kernel.h>
   78 #include <sys/malloc.h>
   79 #include <sys/pool.h>
   80 #include <sys/mount.h>
   81 #include <sys/proc.h>
   82 #include <sys/ras.h>
   83 #include <sys/resourcevar.h>
   84 #include <sys/vnode.h>
   85 #include <sys/file.h>
   86 #include <sys/acct.h>
   87 #include <sys/ktrace.h>
   88 #include <sys/vmmeter.h>
   89 #include <sys/sched.h>
   90 #include <sys/signalvar.h>
   91 #include <sys/kauth.h>
   92 #include <sys/atomic.h>
   93 #include <sys/syscallargs.h>
   94 #include <sys/uidinfo.h>
   95 
   96 #include <uvm/uvm_extern.h>
   97 
   98 u_int   nprocs = 1;             /* process 0 */
   99 
  100 /*
  101  * Number of ticks to sleep if fork() would fail due to process hitting
  102  * limits. Exported in miliseconds to userland via sysctl.
  103  */
  104 int     forkfsleep = 0;
  105 
  106 /*ARGSUSED*/
  107 int
  108 sys_fork(struct lwp *l, const void *v, register_t *retval)
  109 {
  110 
  111         return (fork1(l, 0, SIGCHLD, NULL, 0, NULL, NULL, retval, NULL));
  112 }
  113 
  114 /*
  115  * vfork(2) system call compatible with 4.4BSD (i.e. BSD with Mach VM).
  116  * Address space is not shared, but parent is blocked until child exit.
  117  */
  118 /*ARGSUSED*/
  119 int
  120 sys_vfork(struct lwp *l, const void *v, register_t *retval)
  121 {
  122 
  123         return (fork1(l, FORK_PPWAIT, SIGCHLD, NULL, 0, NULL, NULL,
  124             retval, NULL));
  125 }
  126 
  127 /*
  128  * New vfork(2) system call for NetBSD, which implements original 3BSD vfork(2)
  129  * semantics.  Address space is shared, and parent is blocked until child exit.
  130  */
  131 /*ARGSUSED*/
  132 int
  133 sys___vfork14(struct lwp *l, const void *v, register_t *retval)
  134 {
  135 
  136         return (fork1(l, FORK_PPWAIT|FORK_SHAREVM, SIGCHLD, NULL, 0,
  137             NULL, NULL, retval, NULL));
  138 }
  139 
  140 /*
  141  * Linux-compatible __clone(2) system call.
  142  */
  143 int
  144 sys___clone(struct lwp *l, const struct sys___clone_args *uap, register_t *retval)
  145 {
  146         /* {
  147                 syscallarg(int) flags;
  148                 syscallarg(void *) stack;
  149         } */
  150         int flags, sig;
  151 
  152         /*
  153          * We don't support the CLONE_PID or CLONE_PTRACE flags.
  154          */
  155         if (SCARG(uap, flags) & (CLONE_PID|CLONE_PTRACE))
  156                 return (EINVAL);
  157 
  158         /*
  159          * Linux enforces CLONE_VM with CLONE_SIGHAND, do same.
  160          */
  161         if (SCARG(uap, flags) & CLONE_SIGHAND
  162             && (SCARG(uap, flags) & CLONE_VM) == 0)
  163                 return (EINVAL);
  164 
  165         flags = 0;
  166 
  167         if (SCARG(uap, flags) & CLONE_VM)
  168                 flags |= FORK_SHAREVM;
  169         if (SCARG(uap, flags) & CLONE_FS)
  170                 flags |= FORK_SHARECWD;
  171         if (SCARG(uap, flags) & CLONE_FILES)
  172                 flags |= FORK_SHAREFILES;
  173         if (SCARG(uap, flags) & CLONE_SIGHAND)
  174                 flags |= FORK_SHARESIGS;
  175         if (SCARG(uap, flags) & CLONE_VFORK)
  176                 flags |= FORK_PPWAIT;
  177 
  178         sig = SCARG(uap, flags) & CLONE_CSIGNAL;
  179         if (sig < 0 || sig >= _NSIG)
  180                 return (EINVAL);
  181 
  182         /*
  183          * Note that the Linux API does not provide a portable way of
  184          * specifying the stack area; the caller must know if the stack
  185          * grows up or down.  So, we pass a stack size of 0, so that the
  186          * code that makes this adjustment is a noop.
  187          */
  188         return (fork1(l, flags, sig, SCARG(uap, stack), 0,
  189             NULL, NULL, retval, NULL));
  190 }
  191 
  192 /* print the 'table full' message once per 10 seconds */
  193 struct timeval fork_tfmrate = { 10, 0 };
  194 
  195 /*
  196  * General fork call.  Note that another LWP in the process may call exec()
  197  * or exit() while we are forking.  It's safe to continue here, because
  198  * neither operation will complete until all LWPs have exited the process.
  199  */ 
  200 int
  201 fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
  202     void (*func)(void *), void *arg, register_t *retval,
  203     struct proc **rnewprocp)
  204 {
  205         struct proc     *p1, *p2, *parent;
  206         struct plimit   *p1_lim;
  207         uid_t           uid;
  208         struct lwp      *l2;
  209         int             count;
  210         vaddr_t         uaddr;
  211         bool            inmem;
  212         int             tmp;
  213         int             tnprocs;
  214         int             error = 0;
  215 
  216         p1 = l1->l_proc;
  217         uid = kauth_cred_getuid(l1->l_cred);
  218         tnprocs = atomic_inc_uint_nv(&nprocs);
  219 
  220         /*
  221          * Although process entries are dynamically created, we still keep
  222          * a global limit on the maximum number we will create.
  223          */
  224         if (__predict_false(tnprocs >= maxproc))
  225                 error = -1;
  226         else
  227                 error = kauth_authorize_process(l1->l_cred,
  228                     KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);
  229 
  230         if (error) {
  231                 static struct timeval lasttfm;
  232                 atomic_dec_uint(&nprocs);
  233                 if (ratecheck(&lasttfm, &fork_tfmrate))
  234                         tablefull("proc", "increase kern.maxproc or NPROC");
  235                 if (forkfsleep)
  236                         kpause("forkmx", false, forkfsleep, NULL);
  237                 return (EAGAIN);
  238         }
  239 
  240         /*
  241          * Enforce limits.
  242          */
  243         count = chgproccnt(uid, 1);
  244         if (uid != 0 &&
  245             __predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
  246                 (void)chgproccnt(uid, -1);
  247                 atomic_dec_uint(&nprocs);
  248                 if (forkfsleep)
  249                         kpause("forkulim", false, forkfsleep, NULL);
  250                 return (EAGAIN);
  251         }
  252 
  253         /*
  254          * Allocate virtual address space for the U-area now, while it
  255          * is still easy to abort the fork operation if we're out of
  256          * kernel virtual address space.  The actual U-area pages will
  257          * be allocated and wired in uvm_fork() if needed.
  258          */
  259 
  260         inmem = uvm_uarea_alloc(&uaddr);
  261         if (__predict_false(uaddr == 0)) {
  262                 (void)chgproccnt(uid, -1);
  263                 atomic_dec_uint(&nprocs);
  264                 return (ENOMEM);
  265         }
  266 
  267         /*
  268          * We are now committed to the fork.  From here on, we may
  269          * block on resources, but resource allocation may NOT fail.
  270          */
  271 
  272         /* Allocate new proc. */
  273         p2 = proc_alloc();
  274 
  275         /*
  276          * Make a proc table entry for the new process.
  277          * Start by zeroing the section of proc that is zero-initialized,
  278          * then copy the section that is copied directly from the parent.
  279          */
  280         memset(&p2->p_startzero, 0,
  281             (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
  282         memcpy(&p2->p_startcopy, &p1->p_startcopy,
  283             (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));
  284 
  285         CIRCLEQ_INIT(&p2->p_sigpend.sp_info);
  286 
  287         LIST_INIT(&p2->p_lwps);
  288         LIST_INIT(&p2->p_sigwaiters);
  289 
  290         /*
  291          * Duplicate sub-structures as needed.
  292          * Increase reference counts on shared objects.
  293          * The p_stats and p_sigacts substructs are set in uvm_fork().
  294          * Inherit flags we want to keep.  The flags related to SIGCHLD
  295          * handling are important in order to keep a consistent behaviour
  296          * for the child after the fork.
  297          */
  298         p2->p_flag = p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN);
  299         p2->p_emul = p1->p_emul;
  300         p2->p_execsw = p1->p_execsw;
  301 
  302         if (flags & FORK_SYSTEM) {
  303                 /*
  304                  * Mark it as a system process.  Set P_NOCLDWAIT so that
  305                  * children are reparented to init(8) when they exit. 
  306                  * init(8) can easily wait them out for us.
  307                  */
  308                 p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT);
  309         }
  310 
  311         mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
  312         mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
  313         rw_init(&p2->p_reflock);
  314         cv_init(&p2->p_waitcv, "wait");
  315         cv_init(&p2->p_lwpcv, "lwpwait");
  316 
  317         /*
  318          * Share a lock between the processes if they are to share signal
  319          * state: we must synchronize access to it.
  320          */
  321         if (flags & FORK_SHARESIGS) {
  322                 p2->p_lock = p1->p_lock;
  323                 mutex_obj_hold(p1->p_lock);
  324         } else
  325                 p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
  326 
  327         kauth_proc_fork(p1, p2);
  328 
  329         p2->p_raslist = NULL;
  330 #if defined(__HAVE_RAS)
  331         ras_fork(p1, p2);
  332 #endif
  333 
  334         /* bump references to the text vnode (for procfs) */
  335         p2->p_textvp = p1->p_textvp;
  336         if (p2->p_textvp)
  337                 VREF(p2->p_textvp);
  338 
  339         if (flags & FORK_SHAREFILES)
  340                 fd_share(p2);
  341         else if (flags & FORK_CLEANFILES)
  342                 p2->p_fd = fd_init(NULL);
  343         else
  344                 p2->p_fd = fd_copy();
  345 
  346         if (flags & FORK_SHARECWD)
  347                 cwdshare(p2);
  348         else
  349                 p2->p_cwdi = cwdinit();
  350 
  351         /*
  352          * p_limit (rlimit stuff) is usually copy-on-write, so we just need
  353          * to bump pl_refcnt.
  354          * However in some cases (see compat irix, and plausibly from clone)
  355          * the parent and child share limits - in which case nothing else
  356          * must have a copy of the limits (PL_SHAREMOD is set).
  357          */
  358         if (__predict_false(flags & FORK_SHARELIMIT))
  359                 lim_privatise(p1, 1);
  360         p1_lim = p1->p_limit;
  361         if (p1_lim->pl_flags & PL_WRITEABLE && !(flags & FORK_SHARELIMIT))
  362                 p2->p_limit = lim_copy(p1_lim);
  363         else {
  364                 lim_addref(p1_lim);
  365                 p2->p_limit = p1_lim;
  366         }
  367 
  368         p2->p_lflag = ((flags & FORK_PPWAIT) ? PL_PPWAIT : 0);
  369         p2->p_sflag = 0;
  370         p2->p_slflag = 0;
  371         parent = (flags & FORK_NOWAIT) ? initproc : p1;
  372         p2->p_pptr = parent;
  373         p2->p_ppid = parent->p_pid;
  374         LIST_INIT(&p2->p_children);
  375 
  376         p2->p_aio = NULL;
  377 
  378 #ifdef KTRACE
  379         /*
  380          * Copy traceflag and tracefile if enabled.
  381          * If not inherited, these were zeroed above.
  382          */
  383         if (p1->p_traceflag & KTRFAC_INHERIT) {
  384                 mutex_enter(&ktrace_lock);
  385                 p2->p_traceflag = p1->p_traceflag;
  386                 if ((p2->p_tracep = p1->p_tracep) != NULL)
  387                         ktradref(p2);
  388                 mutex_exit(&ktrace_lock);
  389         }
  390 #endif
  391 
  392         /*
  393          * Create signal actions for the child process.
  394          */
  395         p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS);
  396         mutex_enter(p1->p_lock);
  397         p2->p_sflag |=
  398             (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
  399         sched_proc_fork(p1, p2);
  400         mutex_exit(p1->p_lock);
  401 
  402         p2->p_stflag = p1->p_stflag;
  403 
  404         /*
  405          * p_stats.
  406          * Copy parts of p_stats, and zero out the rest.
  407          */
  408         p2->p_stats = pstatscopy(p1->p_stats);
  409 
  410         /*
  411          * If emulation has process fork hook, call it now.
  412          */
  413         if (p2->p_emul->e_proc_fork)
  414                 (*p2->p_emul->e_proc_fork)(p2, p1, flags);
  415 
  416         /*
  417          * ...and finally, any other random fork hooks that subsystems
  418          * might have registered.
  419          */
  420         doforkhooks(p2, p1);
  421 
  422         /*
  423          * This begins the section where we must prevent the parent
  424          * from being swapped.
  425          */
  426         uvm_lwp_hold(l1);
  427         uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false);
  428 
  429         /*
  430          * Finish creating the child process.
  431          * It will return through a different path later.
  432          */
  433         lwp_create(l1, p2, uaddr, inmem, (flags & FORK_PPWAIT) ? LWP_VFORK : 0,
  434             stack, stacksize, (func != NULL) ? func : child_return, arg, &l2,
  435             l1->l_class);
  436 
  437         /*
  438          * It's now safe for the scheduler and other processes to see the
  439          * child process.
  440          */
  441         mutex_enter(proc_lock);
  442 
  443         if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
  444                 p2->p_lflag |= PL_CONTROLT;
  445 
  446         LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling);
  447         p2->p_exitsig = exitsig;                /* signal for parent on exit */
  448 
  449         LIST_INSERT_AFTER(p1, p2, p_pglist);
  450         LIST_INSERT_HEAD(&allproc, p2, p_list);
  451 
  452         p2->p_trace_enabled = trace_is_enabled(p2);
  453 #ifdef __HAVE_SYSCALL_INTERN
  454         (*p2->p_emul->e_syscall_intern)(p2);
  455 #endif
  456 
  457         /*
  458          * Update stats now that we know the fork was successful.
  459          */
  460         uvmexp.forks++;
  461         if (flags & FORK_PPWAIT)
  462                 uvmexp.forks_ppwait++;
  463         if (flags & FORK_SHAREVM)
  464                 uvmexp.forks_sharevm++;
  465 
  466         /*
  467          * Pass a pointer to the new process to the caller.
  468          */
  469         if (rnewprocp != NULL)
  470                 *rnewprocp = p2;
  471 
  472         if (ktrpoint(KTR_EMUL))
  473                 p2->p_traceflag |= KTRFAC_TRC_EMUL;
  474 
  475         /*
  476          * Now can be swapped.
  477          */
  478         uvm_lwp_rele(l1);
  479 
  480         /*
  481          * Notify any interested parties about the new process.
  482          */
  483         if (!SLIST_EMPTY(&p1->p_klist)) {
  484                 mutex_exit(proc_lock);
  485                 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
  486                 mutex_enter(proc_lock);
  487         }
  488 
  489         /*
  490          * Make child runnable, set start time, and add to run queue except
  491          * if the parent requested the child to start in SSTOP state.
  492          */
  493         tmp = (p2->p_userret != NULL ? LW_WUSERRET : 0);
  494         mutex_enter(p2->p_lock);
  495 
  496         /*
  497          * Start profiling.
  498          */
  499         if ((p2->p_stflag & PST_PROFIL) != 0) {
  500                 mutex_spin_enter(&p2->p_stmutex);
  501                 startprofclock(p2);
  502                 mutex_spin_exit(&p2->p_stmutex);
  503         }
  504 
  505         getmicrotime(&p2->p_stats->p_start);
  506         p2->p_acflag = AFORK;
  507         lwp_lock(l2);
  508         if (p2->p_sflag & PS_STOPFORK) {
  509                 p2->p_nrlwps = 0;
  510                 p2->p_stat = SSTOP;
  511                 p2->p_waited = 0;
  512                 p1->p_nstopchild++;
  513                 l2->l_stat = LSSTOP;
  514                 l2->l_flag |= tmp;
  515                 lwp_unlock(l2);
  516         } else {
  517                 p2->p_nrlwps = 1;
  518                 p2->p_stat = SACTIVE;
  519                 l2->l_stat = LSRUN;
  520                 l2->l_flag |= tmp;
  521                 sched_enqueue(l2, false);
  522                 lwp_unlock(l2);
  523         }
  524 
  525         mutex_exit(p2->p_lock);
  526 
  527         /*
  528          * Preserve synchronization semantics of vfork.  If waiting for
  529          * child to exec or exit, set PL_PPWAIT on child, and sleep on our
  530          * proc (in case of exit).
  531          */
  532         while (p2->p_lflag & PL_PPWAIT)
  533                 cv_wait(&p1->p_waitcv, proc_lock);
  534 
  535         mutex_exit(proc_lock);
  536 
  537         /*
  538          * Return child pid to parent process,
  539          * marking us as parent via retval[1].
  540          */
  541         if (retval != NULL) {
  542                 retval[0] = p2->p_pid;
  543                 retval[1] = 0;
  544         }
  545 
  546         return (0);
  547 }

Cache object: 8abdd7cd54ec50e62be8029ed28c60f2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.