The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/kern/kern_fork.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
   26 /*
   27  * Copyright (c) 1982, 1986, 1989, 1991, 1993
   28  *      The Regents of the University of California.  All rights reserved.
   29  * (c) UNIX System Laboratories, Inc.
   30  * All or some portions of this file are derived from material licensed
   31  * to the University of California by American Telephone and Telegraph
   32  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   33  * the permission of UNIX System Laboratories, Inc.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  * 3. All advertising materials mentioning features or use of this software
   44  *    must display the following acknowledgement:
   45  *      This product includes software developed by the University of
   46  *      California, Berkeley and its contributors.
   47  * 4. Neither the name of the University nor the names of its contributors
   48  *    may be used to endorse or promote products derived from this software
   49  *    without specific prior written permission.
   50  *
   51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   61  * SUCH DAMAGE.
   62  *
   63  *      @(#)kern_fork.c 8.8 (Berkeley) 2/14/95
   64  */
   65 
   66 #include <kern/assert.h>
   67 #include <sys/param.h>
   68 #include <sys/systm.h>
   69 #include <sys/filedesc.h>
   70 #include <sys/kernel.h>
   71 #include <sys/malloc.h>
   72 #include <sys/proc.h>
   73 #include <sys/user.h>
   74 #include <sys/resourcevar.h>
   75 #include <sys/vnode.h>
   76 #include <sys/file.h>
   77 #include <sys/acct.h>
   78 #include <sys/kern_audit.h>
   79 #if KTRACE
   80 #include <sys/ktrace.h>
   81 #endif
   82 
   83 #include <mach/mach_types.h>
   84 #include <kern/mach_param.h>
   85 
   86 #include <machine/spl.h>
   87 
   88 thread_act_t cloneproc(struct proc *, int); 
   89 struct proc * forkproc(struct proc *, int);
   90 thread_act_t procdup();
   91 
   92 #define DOFORK  0x1     /* fork() system call */
   93 #define DOVFORK 0x2     /* vfork() system call */
   94 static int fork1(struct proc *, long, register_t *);
   95 
   96 /*
   97  * fork system call.
   98  */
   99 int
  100 fork(p, uap, retval)
  101         struct proc *p;
  102         void *uap;
  103         register_t *retval;
  104 {
  105         return (fork1(p, (long)DOFORK, retval));
  106 }
  107 
  108 /*
  109  * vfork system call
  110  */
  111 int
  112 vfork(p, uap, retval)
  113         struct proc *p;
  114         void *uap;
  115         register_t *retval;
  116 {
  117         register struct proc * newproc;
  118         register uid_t uid;
  119         thread_act_t cur_act = (thread_act_t)current_act();
  120         int count;
  121         task_t t;
  122         uthread_t ut;
  123         
  124         /*
  125          * Although process entries are dynamically created, we still keep
  126          * a global limit on the maximum number we will create.  Don't allow
  127          * a nonprivileged user to use the last process; don't let root
  128          * exceed the limit. The variable nprocs is the current number of
  129          * processes, maxproc is the limit.
  130          */
  131         uid = p->p_cred->p_ruid;
  132         if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
  133                 tablefull("proc");
  134                 retval[1] = 0;
  135                 return (EAGAIN);
  136         }
  137 
  138         /*
  139          * Increment the count of procs running with this uid. Don't allow
  140          * a nonprivileged user to exceed their current limit.
  141          */
  142         count = chgproccnt(uid, 1);
  143         if (uid != 0 && count > p->p_rlimit[RLIMIT_NPROC].rlim_cur) {
  144                 (void)chgproccnt(uid, -1);
  145                 return (EAGAIN);
  146         }
  147 
  148         ut = (struct uthread *)get_bsdthread_info(cur_act);
  149         if (ut->uu_flag & P_VFORK) {
  150                 printf("vfork called recursively by %s\n", p->p_comm);
  151                 (void)chgproccnt(uid, -1);
  152                 return (EINVAL);
  153         }
  154         p->p_flag  |= P_VFORK;
  155         p->p_vforkcnt++;
  156 
  157         /* The newly created process comes with signal lock held */
  158         newproc = (struct proc *)forkproc(p,1);
  159 
  160         LIST_INSERT_AFTER(p, newproc, p_pglist);
  161         newproc->p_pptr = p;
  162         newproc->task = p->task;
  163         LIST_INSERT_HEAD(&p->p_children, newproc, p_sibling);
  164         LIST_INIT(&newproc->p_children);
  165         LIST_INSERT_HEAD(&allproc, newproc, p_list);
  166         LIST_INSERT_HEAD(PIDHASH(newproc->p_pid), newproc, p_hash);
  167         TAILQ_INIT(& newproc->p_evlist);
  168         newproc->p_stat = SRUN;
  169         newproc->p_flag  |= P_INVFORK;
  170         newproc->p_vforkact = cur_act;
  171 
  172         ut->uu_flag |= P_VFORK;
  173         ut->uu_proc = newproc;
  174         ut->uu_userstate = (void *)act_thread_csave();
  175         ut->uu_vforkmask = ut->uu_sigmask;
  176 
  177         thread_set_child(cur_act, newproc->p_pid);
  178 
  179         newproc->p_stats->p_start = time;
  180         newproc->p_acflag = AFORK;
  181 
  182         /*
  183          * Preserve synchronization semantics of vfork.  If waiting for
  184          * child to exec or exit, set P_PPWAIT on child, and sleep on our
  185          * proc (in case of exit).
  186          */
  187         newproc->p_flag |= P_PPWAIT;
  188 
  189         /* drop the signal lock on the child */
  190         signal_unlock(newproc);
  191 
  192         retval[0] = newproc->p_pid;
  193         retval[1] = 1;                  /* mark child */
  194 
  195         return (0);
  196 }
  197 
  198 /*
  199  * Return to parent vfork ehread()
  200  */
  201 void
  202 vfork_return(th_act, p, p2, retval)
  203         thread_act_t th_act;
  204         struct proc * p;
  205         struct proc *p2;
  206         register_t *retval;
  207 {
  208         long flags;
  209         register uid_t uid;
  210         thread_act_t cur_act = (thread_act_t)current_act();
  211         int s, count;
  212         task_t t;
  213         uthread_t ut;
  214         
  215         ut = (struct uthread *)get_bsdthread_info(cur_act);
  216 
  217         act_thread_catt(ut->uu_userstate);
  218 
  219         /* Make sure only one at this time */
  220         p->p_vforkcnt--;
  221         if (p->p_vforkcnt <0)
  222                 panic("vfork cnt is -ve");
  223         if (p->p_vforkcnt <=0)
  224                 p->p_flag  &= ~P_VFORK;
  225         ut->uu_userstate = 0;
  226         ut->uu_flag &= ~P_VFORK;
  227         ut->uu_proc = 0;
  228         ut->uu_sigmask = ut->uu_vforkmask;
  229         p2->p_flag  &= ~P_INVFORK;
  230         p2->p_vforkact = (void *)0;
  231 
  232         thread_set_parent(cur_act, p2->p_pid);
  233 
  234         if (retval) {
  235                 retval[0] = p2->p_pid;
  236                 retval[1] = 0;                  /* mark parent */
  237         }
  238 
  239         return;
  240 }
  241 
  242 thread_act_t
  243 procdup(
  244         struct proc             *child,
  245         struct proc             *parent)
  246 {
  247         thread_act_t            thread;
  248         task_t                  task;
  249         kern_return_t   result;
  250         pmap_t                  pmap;
  251         extern task_t kernel_task;
  252 
  253         if (parent->task == kernel_task)
  254                 result = task_create_internal(TASK_NULL, FALSE, &task);
  255         else
  256                 result = task_create_internal(parent->task, TRUE, &task);
  257         if (result != KERN_SUCCESS)
  258             printf("fork/procdup: task_create failed. Code: 0x%x\n", result);
  259         child->task = task;
  260         /* task->proc = child; */
  261         set_bsdtask_info(task, child);
  262         if (child->p_nice != 0)
  263                 resetpriority(child);
  264                 
  265         result = thread_create(task, &thread);
  266         if (result != KERN_SUCCESS)
  267             printf("fork/procdup: thread_create failed. Code: 0x%x\n", result);
  268 
  269         return(thread);
  270 }
  271 
  272 
  273 static int
  274 fork1(p1, flags, retval)
  275         struct proc *p1;
  276         long flags;
  277         register_t *retval;
  278 {
  279         register struct proc *p2;
  280         register uid_t uid;
  281         thread_act_t newth;
  282         int s, count;
  283         task_t t;
  284 
  285         /*
  286          * Although process entries are dynamically created, we still keep
  287          * a global limit on the maximum number we will create.  Don't allow
  288          * a nonprivileged user to use the last process; don't let root
  289          * exceed the limit. The variable nprocs is the current number of
  290          * processes, maxproc is the limit.
  291          */
  292         uid = p1->p_cred->p_ruid;
  293         if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
  294                 tablefull("proc");
  295                 retval[1] = 0;
  296                 return (EAGAIN);
  297         }
  298 
  299         /*
  300          * Increment the count of procs running with this uid. Don't allow
  301          * a nonprivileged user to exceed their current limit.
  302          */
  303         count = chgproccnt(uid, 1);
  304         if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
  305                 (void)chgproccnt(uid, -1);
  306                 return (EAGAIN);
  307         }
  308 
  309         /* The newly created process comes with signal lock held */
  310         newth = cloneproc(p1, 1);
  311         thread_dup(newth);
  312         /* p2 = newth->task->proc; */
  313         p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth)));
  314 
  315         thread_set_child(newth, p2->p_pid);
  316 
  317         s = splhigh();
  318         p2->p_stats->p_start = time;
  319         splx(s);
  320         p2->p_acflag = AFORK;
  321 
  322         /*
  323          * Preserve synchronization semantics of vfork.  If waiting for
  324          * child to exec or exit, set P_PPWAIT on child, and sleep on our
  325          * proc (in case of exit).
  326          */
  327         if (flags == DOVFORK)
  328                 p2->p_flag |= P_PPWAIT;
  329         /* drop the signal lock on the child */
  330         signal_unlock(p2);
  331 
  332         (void) thread_resume(newth);
  333 
  334         /* drop the extra references we got during the creation */
  335         if (t = (task_t)get_threadtask(newth)) {
  336                 task_deallocate(t);
  337         }
  338         act_deallocate(newth);
  339 
  340         KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
  341 
  342         while (p2->p_flag & P_PPWAIT)
  343                 tsleep(p1, PWAIT, "ppwait", 0);
  344 
  345         retval[0] = p2->p_pid;
  346         retval[1] = 0;                  /* mark parent */
  347 
  348         return (0);
  349 }
  350 
  351 /*
  352  * cloneproc()
  353  *
  354  * Create a new process from a specified process.
  355  * On return newly created child process has signal
  356  * lock held to block delivery of signal to it if called with
  357  * lock set. fork() code needs to explicity remove this lock 
  358  * before signals can be delivered
  359  */
  360 thread_act_t
  361 cloneproc(p1, lock)
  362         register struct proc *p1;
  363         register int lock;
  364 {
  365         register struct proc *p2;
  366         thread_act_t th;
  367 
  368         p2 = (struct proc *)forkproc(p1,lock);
  369 
  370 
  371         th = procdup(p2, p1);   /* child, parent */
  372 
  373         LIST_INSERT_AFTER(p1, p2, p_pglist);
  374         p2->p_pptr = p1;
  375         LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
  376         LIST_INIT(&p2->p_children);
  377         LIST_INSERT_HEAD(&allproc, p2, p_list);
  378         LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
  379         TAILQ_INIT(&p2->p_evlist);
  380         /*
  381          * Make child runnable, set start time.
  382          */
  383         p2->p_stat = SRUN;
  384 
  385         return(th);
  386 }
  387 
  388 struct proc *
  389 forkproc(p1, lock)
  390         register struct proc *p1;
  391         register int lock;
  392 {
  393         register struct proc *p2, *newproc;
  394         static int nextpid = 0, pidchecked = 0;
  395         thread_t th;
  396 
  397         /* Allocate new proc. */
  398         MALLOC_ZONE(newproc, struct proc *,
  399                         sizeof *newproc, M_PROC, M_WAITOK);
  400         MALLOC_ZONE(newproc->p_cred, struct pcred *,
  401                         sizeof *newproc->p_cred, M_SUBPROC, M_WAITOK);
  402         MALLOC_ZONE(newproc->p_stats, struct pstats *,
  403                         sizeof *newproc->p_stats, M_SUBPROC, M_WAITOK);
  404         MALLOC_ZONE(newproc->p_sigacts, struct sigacts *,
  405                         sizeof *newproc->p_sigacts, M_SUBPROC, M_WAITOK);
  406 
  407         /*
  408          * Find an unused process ID.  We remember a range of unused IDs
  409          * ready to use (from nextpid+1 through pidchecked-1).
  410          */
  411         nextpid++;
  412 retry:
  413         /*
  414          * If the process ID prototype has wrapped around,
  415          * restart somewhat above 0, as the low-numbered procs
  416          * tend to include daemons that don't exit.
  417          */
  418         if (nextpid >= PID_MAX) {
  419                 nextpid = 100;
  420                 pidchecked = 0;
  421         }
  422         if (nextpid >= pidchecked) {
  423                 int doingzomb = 0;
  424 
  425                 pidchecked = PID_MAX;
  426                 /*
  427                  * Scan the active and zombie procs to check whether this pid
  428                  * is in use.  Remember the lowest pid that's greater
  429                  * than nextpid, so we can avoid checking for a while.
  430                  */
  431                 p2 = allproc.lh_first;
  432 again:
  433                 for (; p2 != 0; p2 = p2->p_list.le_next) {
  434                         while (p2->p_pid == nextpid ||
  435                             p2->p_pgrp->pg_id == nextpid ||
  436                                 p2->p_session->s_sid == nextpid) {
  437                                 nextpid++;
  438                                 if (nextpid >= pidchecked)
  439                                         goto retry;
  440                         }
  441                         if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
  442                                 pidchecked = p2->p_pid;
  443                         if (p2->p_pgrp && p2->p_pgrp->pg_id > nextpid && 
  444                             pidchecked > p2->p_pgrp->pg_id)
  445                                 pidchecked = p2->p_pgrp->pg_id;
  446                         if (p2->p_session->s_sid > nextpid &&
  447                                 pidchecked > p2->p_session->s_sid)
  448                                 pidchecked = p2->p_session->s_sid;
  449                 }
  450                 if (!doingzomb) {
  451                         doingzomb = 1;
  452                         p2 = zombproc.lh_first;
  453                         goto again;
  454                 }
  455         }
  456 
  457         nprocs++;
  458         p2 = newproc;
  459         p2->p_stat = SIDL;
  460         p2->p_pid = nextpid;
  461 
  462         /*
  463          * Make a proc table entry for the new process.
  464          * Start by zeroing the section of proc that is zero-initialized,
  465          * then copy the section that is copied directly from the parent.
  466          */
  467         bzero(&p2->p_startzero,
  468             (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
  469         bcopy(&p1->p_startcopy, &p2->p_startcopy,
  470             (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
  471         p2->vm_shm = (void *)NULL; /* Make sure it is zero */
  472 
  473         /*
  474          * Copy the audit info.
  475          */
  476         audit_proc_fork(p1, p2);
  477 
  478         /*
  479          * Duplicate sub-structures as needed.
  480          * Increase reference counts on shared objects.
  481          * The p_stats and p_sigacts substructs are set in vm_fork.
  482          */
  483         p2->p_flag = P_INMEM;
  484         p2->p_flag |= (p1->p_flag & P_CLASSIC); // copy from parent
  485         p2->p_flag |= (p1->p_flag & P_AFFINITY); // copy from parent
  486         if (p1->p_flag & P_PROFIL)
  487                 startprofclock(p2);
  488         bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
  489         p2->p_cred->p_refcnt = 1;
  490         crhold(p1->p_ucred);
  491         lockinit(&p2->p_cred->pc_lock, PLOCK, "proc cred", 0, 0);
  492         klist_init(&p2->p_klist);
  493 
  494         /* bump references to the text vnode */
  495         p2->p_textvp = p1->p_textvp;
  496         if (p2->p_textvp)
  497                 VREF(p2->p_textvp);
  498 
  499         p2->p_fd = fdcopy(p1);
  500         if (p1->vm_shm) {
  501                 shmfork(p1,p2);
  502         }
  503         /*
  504          * If p_limit is still copy-on-write, bump refcnt,
  505          * otherwise get a copy that won't be modified.
  506          * (If PL_SHAREMOD is clear, the structure is shared
  507          * copy-on-write.)
  508          */
  509         if (p1->p_limit->p_lflags & PL_SHAREMOD)
  510                 p2->p_limit = limcopy(p1->p_limit);
  511         else {
  512                 p2->p_limit = p1->p_limit;
  513                 p2->p_limit->p_refcnt++;
  514         }
  515 
  516         bzero(&p2->p_stats->pstat_startzero,
  517             (unsigned) ((caddr_t)&p2->p_stats->pstat_endzero -
  518             (caddr_t)&p2->p_stats->pstat_startzero));
  519         bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy,
  520             ((caddr_t)&p2->p_stats->pstat_endcopy -
  521              (caddr_t)&p2->p_stats->pstat_startcopy));
  522 
  523         if (p1->p_sigacts != NULL)
  524                 (void)memcpy(p2->p_sigacts,
  525                                 p1->p_sigacts, sizeof *p2->p_sigacts);
  526         else
  527                 (void)memset(p2->p_sigacts, 0, sizeof *p2->p_sigacts);
  528 
  529         if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
  530                 p2->p_flag |= P_CONTROLT;
  531 
  532         p2->p_argslen = p1->p_argslen;
  533         p2->p_argc = p1->p_argc;
  534         p2->p_xstat = 0;
  535         p2->p_ru = NULL;
  536 
  537         p2->p_debugger = 0;     /* don't inherit */
  538         lockinit(&p2->signal_lock, PVM, "signal", 0, 0);
  539         /* block all signals to reach the process */
  540         if (lock)
  541                 signal_lock(p2);
  542         p2->sigwait = FALSE;
  543         p2->sigwait_thread = NULL;
  544         p2->exit_thread = NULL;
  545         p2->user_stack = p1->user_stack;
  546         p2->p_vforkcnt = 0;
  547         p2->p_vforkact = 0;
  548         TAILQ_INIT(&p2->p_uthlist);
  549         TAILQ_INIT(&p2->aio_activeq);
  550         TAILQ_INIT(&p2->aio_doneq);
  551         p2->aio_active_count = 0;
  552         p2->aio_done_count = 0;
  553 
  554 #if KTRACE
  555         /*
  556          * Copy traceflag and tracefile if enabled.
  557          * If not inherited, these were zeroed above.
  558          */
  559         if (p1->p_traceflag&KTRFAC_INHERIT) {
  560                 p2->p_traceflag = p1->p_traceflag;
  561                 if ((p2->p_tracep = p1->p_tracep) != NULL)
  562                         VREF(p2->p_tracep);
  563         }
  564 #endif
  565         return(p2);
  566 
  567 }
  568 
  569 #include <kern/zalloc.h>
  570 
  571 struct zone     *uthread_zone;
  572 int uthread_zone_inited = 0;
  573 
  574 void
  575 uthread_zone_init()
  576 {
  577         if (!uthread_zone_inited) {
  578                 uthread_zone = zinit(sizeof(struct uthread),
  579                                                         THREAD_MAX * sizeof(struct uthread),
  580                                                         THREAD_CHUNK * sizeof(struct uthread),
  581                                                         "uthreads");
  582                 uthread_zone_inited = 1;
  583         }
  584 }
  585 
  586 void *
  587 uthread_alloc(task_t task, thread_act_t thr_act )
  588 {
  589         struct proc *p;
  590         struct uthread *uth, *uth_parent;
  591         void *ut;
  592         extern task_t kernel_task;
  593         boolean_t funnel_state;
  594 
  595         if (!uthread_zone_inited)
  596                 uthread_zone_init();
  597 
  598         ut = (void *)zalloc(uthread_zone);
  599         bzero(ut, sizeof(struct uthread));
  600 
  601         if (task != kernel_task) {
  602                 uth = (struct uthread *)ut;
  603                 p = (struct proc *) get_bsdtask_info(task);
  604 
  605                 funnel_state = thread_funnel_set(kernel_flock, TRUE);
  606                 uth_parent = (struct uthread *)get_bsdthread_info(current_act());
  607                 if (uth_parent) {
  608                         if (uth_parent->uu_flag & USAS_OLDMASK)
  609                                 uth->uu_sigmask = uth_parent->uu_oldmask;
  610                         else
  611                                 uth->uu_sigmask = uth_parent->uu_sigmask;
  612                 }
  613                 uth->uu_act = thr_act;
  614                 //signal_lock(p);
  615                 if (p)
  616                         TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
  617                 //signal_unlock(p);
  618                 (void)thread_funnel_set(kernel_flock, funnel_state);
  619         }
  620 
  621         return (ut);
  622 }
  623 
  624 
  625 void
  626 uthread_free(task_t task, void *uthread, void * bsd_info)
  627 {
  628         struct _select *sel;
  629         struct uthread *uth = (struct uthread *)uthread;
  630         struct proc * p = (struct proc *)bsd_info;
  631         extern task_t kernel_task;
  632         int size;
  633         boolean_t funnel_state;
  634         struct nlminfo *nlmp;
  635 
  636         /*
  637          * Per-thread audit state should never last beyond system
  638          * call return.  Since we don't audit the thread creation/
  639          * removal, the thread state pointer should never be
  640          * non-NULL when we get here.
  641          */
  642         assert(uth->uu_ar == NULL);
  643 
  644         sel = &uth->uu_state.ss_select;
  645         /* cleanup the select bit space */
  646         if (sel->nbytes) {
  647                 FREE(sel->ibits, M_TEMP);
  648                 FREE(sel->obits, M_TEMP);
  649         }
  650 
  651         if (sel->allocsize && uth->uu_wqsub){
  652                 kfree(uth->uu_wqsub, sel->allocsize);
  653                 sel->count = sel->nfcount = 0;
  654                 sel->allocsize = 0;
  655                 uth->uu_wqsub = 0;
  656                 sel->wql = 0;
  657         }
  658 
  659         if ((nlmp = uth->uu_nlminfo)) {
  660                 uth->uu_nlminfo = 0;
  661                 FREE(nlmp, M_LOCKF);
  662         }
  663 
  664         if ((task != kernel_task) && p) {
  665                 funnel_state = thread_funnel_set(kernel_flock, TRUE);
  666                 //signal_lock(p);
  667                 TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
  668                 //signal_unlock(p);
  669                 (void)thread_funnel_set(kernel_flock, funnel_state);
  670         }
  671         /* and free the uthread itself */
  672         zfree(uthread_zone, (vm_offset_t)uthread);
  673 }

Cache object: 75e94d9fd0cc84eb9284d01dd2a28959


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.