The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/kern/kern_exit.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
   26 /*
   27  * Copyright (c) 1982, 1986, 1989, 1991, 1993
   28  *      The Regents of the University of California.  All rights reserved.
   29  * (c) UNIX System Laboratories, Inc.
   30  * All or some portions of this file are derived from material licensed
   31  * to the University of California by American Telephone and Telegraph
   32  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   33  * the permission of UNIX System Laboratories, Inc.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  * 3. All advertising materials mentioning features or use of this software
   44  *    must display the following acknowledgement:
   45  *      This product includes software developed by the University of
   46  *      California, Berkeley and its contributors.
   47  * 4. Neither the name of the University nor the names of its contributors
   48  *    may be used to endorse or promote products derived from this software
   49  *    without specific prior written permission.
   50  *
   51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   61  * SUCH DAMAGE.
   62  *
   63  *      @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
   64  */
   65  
   66 #include <machine/reg.h>
   67 #include <machine/psl.h>
   68 
   69 #include "compat_43.h"
   70 
   71 #include <sys/param.h>
   72 #include <sys/systm.h>
   73 #include <sys/ioctl.h>
   74 #include <sys/proc.h>
   75 #include <sys/tty.h>
   76 #include <sys/time.h>
   77 #include <sys/resource.h>
   78 #include <sys/kernel.h>
   79 #include <sys/buf.h>
   80 #include <sys/wait.h>
   81 #include <sys/file.h>
   82 #include <sys/vnode.h>
   83 #include <sys/syslog.h>
   84 #include <sys/malloc.h>
   85 #include <sys/resourcevar.h>
   86 #include <sys/ptrace.h>
   87 #include <sys/user.h>
   88 #include <sys/aio_kern.h>
   89 #include <sys/kern_audit.h>
   90 
   91 #include <mach/mach_types.h>
   92 #include <kern/thread.h>
   93 #include <kern/thread_act.h>
   94 #include <kern/sched_prim.h>
   95 #include <kern/assert.h>
   96 #if KTRACE   
   97 #include <sys/ktrace.h>
   98 #endif
   99 
  100 extern char init_task_failure_data[];
  101 int exit1 __P((struct proc *, int, int *));
  102 void proc_prepareexit(struct proc *p);
  103 void vfork_exit(struct proc *p, int rv);
  104 void vproc_exit(struct proc *p);
  105 
  106 /*
  107  * exit --
  108  *      Death of process.
  109  */
  110 struct exit_args {
  111         int     rval;
  112 };
  113 void
  114 exit(p, uap, retval)
  115         struct proc *p;
  116         struct exit_args *uap;
  117         int *retval;
  118 {
  119         exit1(p, W_EXITCODE(uap->rval, 0), retval);
  120 
  121         /* drop funnel before we return */
  122         thread_funnel_set(kernel_flock, FALSE);
  123         thread_exception_return();
  124         /* NOTREACHED */
  125         while (TRUE)
  126                 thread_block(THREAD_CONTINUE_NULL);
  127         /* NOTREACHED */
  128 }
  129 
  130 /*
  131  * Exit: deallocate address space and other resources, change proc state
  132  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
  133  * status and rusage for wait().  Check for child processes and orphan them.
  134  */
  135 int
  136 exit1(p, rv, retval)
  137         register struct proc *p;
  138         int rv;
  139         int * retval;
  140 {
  141         register struct proc *q, *nq;
  142         thread_act_t self = current_act();
  143         struct task *task = p->task;
  144         register int i,s;
  145         struct uthread *ut;
  146 
  147         /*
  148          * If a thread in this task has already
  149          * called exit(), then halt any others
  150          * right here.
  151          */
  152 
  153          ut = get_bsdthread_info(self);
  154          if (ut->uu_flag & P_VFORK) {
  155                         vfork_exit(p, rv);
  156                         vfork_return(self, p->p_pptr, p , retval);
  157                         unix_syscall_return(0);
  158                         /* NOT REACHED */
  159          }
  160         audit_syscall_exit(0, p, ut); /* Exit is always successfull */
  161         signal_lock(p);
  162         while (p->exit_thread != self) {
  163                 if (sig_try_locked(p) <= 0) {
  164                         if (get_threadtask(self) != task) {
  165                                 signal_unlock(p);
  166                                 return(0);
  167                         }
  168                         signal_unlock(p);
  169                         thread_terminate(self);
  170                         thread_funnel_set(kernel_flock, FALSE);
  171                         thread_exception_return();
  172                         /* NOTREACHED */
  173                 }
  174                 sig_lock_to_exit(p);
  175         }
  176         signal_unlock(p);
  177         if (p->p_pid == 1) {
  178                 printf("pid 1 exited (signal %d, exit %d)",
  179                     WTERMSIG(rv), WEXITSTATUS(rv));
  180                 panic("init died\nState at Last Exception:\n\n%s", 
  181                                                         init_task_failure_data);
  182         }
  183 
  184         s = splsched();
  185         p->p_flag |= P_WEXIT;
  186         splx(s);
  187         proc_prepareexit(p);
  188         p->p_xstat = rv;
  189 
  190         /* task terminate will call proc_terminate and that cleans it up */
  191         task_terminate_internal(task);
  192 
  193         return(0);
  194 }
  195 
  196 void
  197 proc_prepareexit(struct proc *p) 
  198 {
  199         int s;
  200         struct uthread *ut;
  201         exception_data_t        code[EXCEPTION_CODE_MAX];
  202         thread_act_t self = current_act();
  203 
  204         code[0] = 0xFF000001;                   /* Set terminate code */
  205         code[1] = p->p_pid;                             /* Pass out the pid     */
  206         (void)sys_perf_notify(p->task, &code, 2);       /* Notify the perf server */
  207 
  208         /*
  209          * Remove proc from allproc queue and from pidhash chain.
  210          * Need to do this before we do anything that can block.
  211          * Not doing causes things like mount() find this on allproc
  212          * in partially cleaned state.
  213          */
  214         LIST_REMOVE(p, p_list);
  215         LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
  216         LIST_REMOVE(p, p_hash);
  217 
  218 #ifdef PGINPROF
  219         vmsizmon();
  220 #endif
  221         /*
  222          * If parent is waiting for us to exit or exec,
  223          * P_PPWAIT is set; we will wakeup the parent below.
  224          */
  225         p->p_flag &= ~(P_TRACED | P_PPWAIT);
  226         p->p_sigignore = ~0;
  227         p->p_siglist = 0;
  228         ut = get_bsdthread_info(self);
  229         ut->uu_siglist = 0;
  230         untimeout(realitexpire, (caddr_t)p->p_pid);
  231 }
  232 
  233 void 
  234 proc_exit(struct proc *p)
  235 {
  236         register struct proc *q, *nq, *pp;
  237         struct task *task = p->task;
  238         register int i,s;
  239         boolean_t funnel_state;
  240 
  241         /* This can happen if thread_terminate of the single thread
  242          * process 
  243          */
  244 
  245         funnel_state = thread_funnel_set(kernel_flock, TRUE);
  246         if( !(p->p_flag & P_WEXIT)) {
  247                 s = splsched();
  248                 p->p_flag |= P_WEXIT;
  249                 splx(s);
  250                 proc_prepareexit(p);    
  251         }
  252 
  253         MALLOC_ZONE(p->p_ru, struct rusage *,
  254                         sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
  255 
  256         /*
  257          * need to cancel async IO requests that can be cancelled and wait for those
  258          * already active.  MAY BLOCK!
  259          */
  260         _aio_exit( p );
  261 
  262         /*
  263          * Close open files and release open-file table.
  264          * This may block!
  265          */
  266         fdfree(p);
  267 
  268         /* Close ref SYSV Shared memory*/
  269         if (p->vm_shm)
  270                 shmexit(p);
  271         /* Release SYSV semaphores */
  272         semexit(p);
  273         
  274         if (SESS_LEADER(p)) {
  275                 register struct session *sp = p->p_session;
  276 
  277                 if (sp->s_ttyvp) {
  278                         struct vnode *ttyvp;
  279 
  280                         /*
  281                          * Controlling process.
  282                          * Signal foreground pgrp,
  283                          * drain controlling terminal
  284                          * and revoke access to controlling terminal.
  285                          */
  286                         if (sp->s_ttyp->t_session == sp) {
  287                                 if (sp->s_ttyp->t_pgrp)
  288                                         pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
  289                                 (void) ttywait(sp->s_ttyp);
  290                                 /*
  291                                  * The tty could have been revoked
  292                                  * if we blocked.
  293                                  */
  294                                 if (sp->s_ttyvp)
  295                                         VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
  296                         }
  297                         ttyvp = sp->s_ttyvp;
  298                         sp->s_ttyvp = NULL;
  299                         if (ttyvp)
  300                                 vrele(ttyvp);
  301                         /*
  302                          * s_ttyp is not zero'd; we use this to indicate
  303                          * that the session once had a controlling terminal.
  304                          * (for logging and informational purposes)
  305                          */
  306                 }
  307                 sp->s_leader = NULL;
  308         }
  309 
  310         fixjobc(p, p->p_pgrp, 0);
  311         p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
  312         (void)acct_process(p);
  313 
  314 #if KTRACE
  315         /* 
  316          * release trace file
  317          */
  318         p->p_traceflag = 0;     /* don't trace the vrele() */
  319         if (p->p_tracep) {
  320                 struct vnode *tvp = p->p_tracep;
  321                 p->p_tracep = NULL;
  322                 vrele(tvp);
  323         }
  324 #endif
  325 
  326         q = p->p_children.lh_first;
  327         if (q)          /* only need this if any child is S_ZOMB */
  328                 wakeup((caddr_t) initproc);
  329         for (; q != 0; q = nq) {
  330                 nq = q->p_sibling.le_next;
  331                 proc_reparent(q, initproc);
  332                 /*
  333                  * Traced processes are killed
  334                  * since their existence means someone is messing up.
  335                  */
  336                 if (q->p_flag & P_TRACED) {
  337                         q->p_flag &= ~P_TRACED;
  338                         if (q->sigwait_thread) {
  339                                 /*
  340                                  * The sigwait_thread could be stopped at a
  341                                  * breakpoint. Wake it up to kill.
  342                                  * Need to do this as it could be a thread which is not
  343                                  * the first thread in the task. So any attempts to kill
  344                                  * the process would result into a deadlock on q->sigwait.
  345                                  */
  346                                 thread_resume((thread_act_t)q->sigwait_thread);
  347                                 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
  348                                 threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
  349                         }
  350                         psignal(q, SIGKILL);
  351                 }
  352         }
  353 
  354         /*
  355          * Save exit status and final rusage info, adding in child rusage
  356          * info and self times.
  357          */
  358         *p->p_ru = p->p_stats->p_ru;
  359 
  360         timerclear(&p->p_ru->ru_utime);
  361         timerclear(&p->p_ru->ru_stime);
  362 
  363         if (task) {
  364                 task_basic_info_data_t tinfo;
  365                 task_thread_times_info_data_t ttimesinfo;
  366                 int task_info_stuff, task_ttimes_stuff;
  367                 struct timeval ut,st;
  368 
  369                 task_info_stuff = TASK_BASIC_INFO_COUNT;
  370                 task_info(task, TASK_BASIC_INFO,
  371                           &tinfo, &task_info_stuff);
  372                 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
  373                 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
  374                 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
  375                 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
  376 
  377                 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
  378                 task_info(task, TASK_THREAD_TIMES_INFO,
  379                           &ttimesinfo, &task_ttimes_stuff);
  380 
  381                 ut.tv_sec = ttimesinfo.user_time.seconds;
  382                 ut.tv_usec = ttimesinfo.user_time.microseconds;
  383                 st.tv_sec = ttimesinfo.system_time.seconds;
  384                 st.tv_usec = ttimesinfo.system_time.microseconds;
  385                 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
  386                 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
  387         }
  388 
  389         ruadd(p->p_ru, &p->p_stats->p_cru);
  390 
  391         /*
  392          * Free up profiling buffers.
  393          */
  394         {
  395                 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
  396 
  397                 p1 = p0->pr_next;
  398                 p0->pr_next = NULL;
  399                 p0->pr_scale = 0;
  400 
  401                 for (; p1 != NULL; p1 = pn) {
  402                         pn = p1->pr_next;
  403                         kfree((vm_offset_t)p1, sizeof *p1);
  404                 }
  405         }
  406 
  407         /*
  408          * Other substructures are freed from wait().
  409          */
  410         FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
  411         p->p_stats = NULL;
  412 
  413         FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
  414         p->p_sigacts = NULL;
  415 
  416         if (--p->p_limit->p_refcnt == 0)
  417                 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
  418         p->p_limit = NULL;
  419 
  420         /* Free the auditing info */
  421         audit_proc_free(p);
  422 
  423         /*
  424          * Finish up by terminating the task
  425          * and halt this thread (only if a
  426          * member of the task exiting).
  427          */
  428         p->task = TASK_NULL;
  429         //task->proc = NULL;
  430         set_bsdtask_info(task, NULL);
  431 
  432         KNOTE(&p->p_klist, NOTE_EXIT);
  433 
  434         /*
  435          * Notify parent that we're gone.
  436          */
  437         if (p->p_pptr->p_flag & P_NOCLDWAIT) {
  438                 struct proc * pp = p->p_pptr;
  439 
  440                 /*
  441                  * Add child resource usage to parent before giving
  442                  * zombie to init
  443                  */
  444                 ruadd(&p->p_pptr->p_stats->p_cru, p->p_ru);
  445 
  446                 proc_reparent(p, initproc);
  447                 /* If there are no more children wakeup parent */
  448                 if (LIST_EMPTY(&pp->p_children))
  449                         wakeup((caddr_t)pp);
  450         }
  451         /* should be fine as parent proc would be initproc */
  452         pp = p->p_pptr;
  453         if (pp != initproc) {
  454                 pp->si_pid = p->p_pid;
  455                 pp->si_status = p->p_xstat;
  456                 pp->si_code = CLD_EXITED;
  457                 pp->si_uid = p->p_cred->p_ruid;
  458         }
  459         psignal(pp, SIGCHLD);
  460 
  461 
  462         /* mark as a zombie */
  463         p->p_stat = SZOMB;
  464 
  465         /* and now wakeup the parent */
  466         wakeup((caddr_t)p->p_pptr);
  467 
  468         (void) thread_funnel_set(kernel_flock, funnel_state);
  469 }
  470 
  471 
  472 struct wait4_args {
  473         int     pid;
  474         int *status;
  475         int options;
  476         struct rusage *rusage;
  477 };
  478 
  479 #if COMPAT_43
  480 int
  481 owait(p, uap, retval)
  482         struct proc *p;
  483         void *uap;
  484         int *retval;
  485 {
  486         struct wait4_args *a;
  487 
  488         a = (struct wait4_args *)get_bsduthreadarg(current_act());
  489 
  490         a->options = 0;
  491         a->rusage = NULL;
  492         a->pid = WAIT_ANY;
  493         a->status = NULL;
  494         return (wait1(p, a, retval, 1));
  495 }
  496 
  497 int
  498 wait4(p, uap, retval)
  499         struct proc *p;
  500         struct wait4_args *uap;
  501         int *retval;
  502 {
  503         return (wait1(p, uap, retval, 0));
  504 }
  505 
  506 struct owait3_args {
  507         int *status;
  508         int options;
  509         struct rusage *rusage;
  510 };
  511 
  512 int
  513 owait3(p, uap, retval)
  514         struct proc *p;
  515         struct owait3_args *uap;
  516         int *retval;
  517 {
  518         struct wait4_args *a;
  519 
  520         a = (struct wait4_args *)get_bsduthreadarg(current_act);
  521 
  522         a->rusage = uap->rusage;
  523         a->options = uap->options;
  524         a->status = uap->status;
  525         a->pid = WAIT_ANY;
  526 
  527         return (wait1(p, a, retval, 1));
  528 }
  529 
  530 #else
  531 #define wait1   wait4
  532 #endif
  533 
  534 int
  535 wait1continue(result)
  536 {
  537         void *vt;
  538         thread_act_t thread;
  539         int *retval;
  540         struct proc *p;
  541 
  542         if (result)
  543                 return(result);
  544 
  545         p = current_proc();
  546         thread = current_act();
  547         vt = (void *)get_bsduthreadarg(thread);
  548         retval = (int *)get_bsduthreadrval(thread);
  549         return(wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0));
  550 }
  551 
  552 int
  553 wait1(q, uap, retval, compat)
  554         register struct proc *q;
  555         register struct wait4_args *uap;
  556         register_t *retval;
  557 #if COMPAT_43
  558         int compat;
  559 #endif
  560 {
  561         register int nfound;
  562         register struct proc *p, *t;
  563         int status, error;
  564         struct vnode *tvp;
  565 
  566 retry:
  567         if (uap->pid == 0)
  568                 uap->pid = -q->p_pgid;
  569 
  570 loop:
  571         nfound = 0;
  572         for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
  573                 if (uap->pid != WAIT_ANY &&
  574                     p->p_pid != uap->pid &&
  575                     p->p_pgid != -(uap->pid))
  576                         continue;
  577                 nfound++;
  578                 if (p->p_flag & P_WAITING) {
  579                         (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
  580                         goto loop;
  581                 }
  582                 p->p_flag |= P_WAITING;   /* only allow single thread to wait() */
  583 
  584                 if (p->p_stat == SZOMB) {
  585                         retval[0] = p->p_pid;
  586 #if COMPAT_43
  587                         if (compat)
  588                                 retval[1] = p->p_xstat;
  589                         else
  590 #endif
  591                         if (uap->status) {
  592                                 status = p->p_xstat;    /* convert to int */
  593                                 if (error = copyout((caddr_t)&status,
  594                                     (caddr_t)uap->status,
  595                                                     sizeof(status))) {
  596                                         p->p_flag &= ~P_WAITING;
  597                                         wakeup(&p->p_stat);
  598                                         return (error);
  599                                 }
  600                         }
  601                         if (uap->rusage &&
  602                             (error = copyout((caddr_t)p->p_ru,
  603                             (caddr_t)uap->rusage,
  604                                              sizeof (struct rusage)))) {
  605                                 p->p_flag &= ~P_WAITING;
  606                                 wakeup(&p->p_stat);
  607                                 return (error);
  608                         }
  609                         /*
  610                          * If we got the child via a ptrace 'attach',
  611                          * we need to give it back to the old parent.
  612                          */
  613                         if (p->p_oppid && (t = pfind(p->p_oppid))) {
  614                                 p->p_oppid = 0;
  615                                 proc_reparent(p, t);
  616                                 if (t != initproc) {
  617                                         t->si_pid = p->p_pid;
  618                                         t->si_status = p->p_xstat;
  619                                         t->si_code = CLD_CONTINUED;
  620                                         t->si_uid = p->p_cred->p_ruid;
  621                                 }
  622                                 psignal(t, SIGCHLD);
  623                                 wakeup((caddr_t)t);
  624                                 p->p_flag &= ~P_WAITING;
  625                                 wakeup(&p->p_stat);
  626                                 return (0);
  627                         }
  628                         p->p_xstat = 0;
  629                         if (p->p_ru) {
  630                                 ruadd(&q->p_stats->p_cru, p->p_ru);
  631                                 FREE_ZONE(p->p_ru, sizeof *p->p_ru, M_ZOMBIE);
  632                                 p->p_ru = NULL;
  633                         } else {
  634                                 printf("Warning : lost p_ru for %s\n", p->p_comm);
  635                         }
  636 
  637                         /*
  638                          * Decrement the count of procs running with this uid.
  639                          */
  640                         (void)chgproccnt(p->p_cred->p_ruid, -1);
  641 
  642                         /*
  643                          * Free up credentials.
  644                          */
  645                         if (--p->p_cred->p_refcnt == 0) {
  646                                 struct ucred *ucr = p->p_ucred;
  647                                 struct pcred *pcr;
  648 
  649                                 if (ucr != NOCRED) {
  650                                         p->p_ucred = NOCRED;
  651                                         crfree(ucr);
  652                                 }
  653                                 pcr = p->p_cred;
  654                                 p->p_cred = NULL;
  655                                 FREE_ZONE(pcr, sizeof *pcr, M_SUBPROC);
  656                         }
  657 
  658                         /*
  659                          * Release reference to text vnode
  660                          */
  661                         tvp = p->p_textvp;
  662                         p->p_textvp = NULL;
  663                         if (tvp)
  664                                 vrele(tvp);
  665 
  666                         /*
  667                          * Finally finished with old proc entry.
  668                          * Unlink it from its process group and free it.
  669                          */
  670                         leavepgrp(p);
  671                         LIST_REMOVE(p, p_list); /* off zombproc */
  672                         LIST_REMOVE(p, p_sibling);
  673                         p->p_flag &= ~P_WAITING;
  674                         FREE_ZONE(p, sizeof *p, M_PROC);
  675                         nprocs--;
  676                         wakeup(&p->p_stat);
  677                         return (0);
  678                 }
  679                 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
  680                     (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
  681                         p->p_flag |= P_WAITED;
  682                         retval[0] = p->p_pid;
  683 #if COMPAT_43
  684                         if (compat) {
  685                                 retval[1] = W_STOPCODE(p->p_xstat);
  686                                 error = 0;
  687                         } else
  688 #endif
  689                         if (uap->status) {
  690                                 status = W_STOPCODE(p->p_xstat);
  691                                 error = copyout((caddr_t)&status,
  692                                     (caddr_t)uap->status,
  693                                     sizeof(status));
  694                         } else
  695                                 error = 0;
  696                         p->p_flag &= ~P_WAITING;
  697                         wakeup(&p->p_stat);
  698                         return (error);
  699                 }
  700                 p->p_flag &= ~P_WAITING;
  701                 wakeup(&p->p_stat);
  702         }
  703         if (nfound == 0)
  704                 return (ECHILD);
  705 
  706         if (uap->options & WNOHANG) {
  707                 retval[0] = 0;
  708                 return (0);
  709         }
  710 
  711         if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue))
  712                 return (error);
  713 
  714         goto loop;
  715 }
  716 
  717 /*
  718  * make process 'parent' the new parent of process 'child'.
  719  */
  720 void
  721 proc_reparent(child, parent)
  722         register struct proc *child;
  723         register struct proc *parent;
  724 {
  725 
  726         if (child->p_pptr == parent)
  727                 return;
  728 
  729         LIST_REMOVE(child, p_sibling);
  730         LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
  731         child->p_pptr = parent;
  732 }
  733 
  734 /*
  735  *      Make the current process an "init" process, meaning
  736  *      that it doesn't have a parent, and that it won't be
  737  *      gunned down by kill(-1, 0).
  738  */
  739 kern_return_t
  740 init_process(void)
  741 {
  742         register struct proc *p = current_proc();
  743 
  744         if (suser(p->p_ucred, &p->p_acflag))
  745                 return(KERN_NO_ACCESS);
  746 
  747         if (p->p_pid != 1 && p->p_pgid != p->p_pid)
  748                 enterpgrp(p, p->p_pid, 0);
  749         p->p_flag |= P_SYSTEM;
  750 
  751         /*
  752          *      Take us out of the sibling chain, and
  753          *      out of our parent's child chain.
  754          */
  755         LIST_REMOVE(p, p_sibling);
  756         p->p_sibling.le_prev = NULL;
  757         p->p_sibling.le_next = NULL;
  758         p->p_pptr = kernproc;
  759 
  760         return(KERN_SUCCESS);
  761 }
  762 
  763 void
  764 process_terminate_self(void)
  765 {
  766         struct proc *p = current_proc();
  767 
  768         if (p != NULL) {
  769                 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
  770                 /*NOTREACHED*/
  771         }
  772 }
  773 
  774 /*
  775  * Exit: deallocate address space and other resources, change proc state
  776  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
  777  * status and rusage for wait().  Check for child processes and orphan them.
  778  */
  779 
  780 void
  781 vfork_exit(p, rv)
  782         struct proc *p;
  783         int rv;
  784 {
  785         register struct proc *q, *nq;
  786         thread_act_t self = current_act();
  787         struct task *task = p->task;
  788         register int i,s;
  789         struct uthread *ut;
  790         exception_data_t        code[EXCEPTION_CODE_MAX];
  791 
  792         /*
  793          * If a thread in this task has already
  794          * called exit(), then halt any others
  795          * right here.
  796          */
  797 
  798          ut = get_bsdthread_info(self);
  799 #ifdef FIXME
  800         signal_lock(p);
  801         while (p->exit_thread != self) {
  802                 if (sig_try_locked(p) <= 0) {
  803                         if (get_threadtask(self) != task) {
  804                                 signal_unlock(p);
  805                                 return;
  806                         }
  807                         signal_unlock(p);
  808                         thread_terminate(self);
  809                         thread_funnel_set(kernel_flock, FALSE);
  810                         thread_exception_return();
  811                         /* NOTREACHED */
  812                 }
  813                 sig_lock_to_exit(p);
  814         }
  815         signal_unlock(p);
  816         if (p->p_pid == 1) {
  817                 printf("pid 1 exited (signal %d, exit %d)",
  818                     WTERMSIG(rv), WEXITSTATUS(rv));
  819 panic("init died\nState at Last Exception:\n\n%s", init_task_failure_data);
  820         }
  821 #endif /* FIXME */
  822 
  823         s = splsched();
  824         p->p_flag |= P_WEXIT;
  825         splx(s);
  826 
  827         code[0] = 0xFF000001;                   /* Set terminate code */
  828         code[1] = p->p_pid;                             /* Pass out the pid     */
  829         (void)sys_perf_notify(p->task, &code, 2);       /* Notify the perf server */
  830 
  831         /*
  832          * Remove proc from allproc queue and from pidhash chain.
  833          * Need to do this before we do anything that can block.
  834          * Not doing causes things like mount() find this on allproc
  835          * in partially cleaned state.
  836          */
  837         LIST_REMOVE(p, p_list);
  838         LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
  839         LIST_REMOVE(p, p_hash);
  840         /*
  841          * If parent is waiting for us to exit or exec,
  842          * P_PPWAIT is set; we will wakeup the parent below.
  843          */
  844         p->p_flag &= ~(P_TRACED | P_PPWAIT);
  845         p->p_sigignore = ~0;
  846         p->p_siglist = 0;
  847 
  848         ut->uu_siglist = 0;
  849         untimeout(realitexpire, (caddr_t)p->p_pid);
  850 
  851         p->p_xstat = rv;
  852 
  853         vproc_exit(p);
  854 }
  855 
  856 void 
  857 vproc_exit(struct proc *p)
  858 {
  859         register struct proc *q, *nq, *pp;
  860         struct task *task = p->task;
  861         register int i,s;
  862         boolean_t funnel_state;
  863 
  864         MALLOC_ZONE(p->p_ru, struct rusage *,
  865                         sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK);
  866 
  867         /*
  868          * Close open files and release open-file table.
  869          * This may block!
  870          */
  871         fdfree(p);
  872 
  873         if (SESS_LEADER(p)) {
  874                 register struct session *sp = p->p_session;
  875 
  876                 if (sp->s_ttyvp) {
  877                         struct vnode *ttyvp;
  878 
  879                         /*
  880                          * Controlling process.
  881                          * Signal foreground pgrp,
  882                          * drain controlling terminal
  883                          * and revoke access to controlling terminal.
  884                          */
  885                         if (sp->s_ttyp->t_session == sp) {
  886                                 if (sp->s_ttyp->t_pgrp)
  887                                         pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
  888                                 (void) ttywait(sp->s_ttyp);
  889                                 /*
  890                                  * The tty could have been revoked
  891                                  * if we blocked.
  892                                  */
  893                                 if (sp->s_ttyvp)
  894                                         VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
  895                         }
  896                         ttyvp = sp->s_ttyvp;
  897                         sp->s_ttyvp = NULL;
  898                         if (ttyvp)
  899                                 vrele(ttyvp);
  900                         /*
  901                          * s_ttyp is not zero'd; we use this to indicate
  902                          * that the session once had a controlling terminal.
  903                          * (for logging and informational purposes)
  904                          */
  905                 }
  906                 sp->s_leader = NULL;
  907         }
  908 
  909         fixjobc(p, p->p_pgrp, 0);
  910         p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
  911 
  912 #if KTRACE
  913         /* 
  914          * release trace file
  915          */
  916         p->p_traceflag = 0;     /* don't trace the vrele() */
  917         if (p->p_tracep) {
  918                 struct vnode *tvp = p->p_tracep;
  919                 p->p_tracep = NULL;
  920                 vrele(tvp);
  921         }
  922 #endif
  923 
  924         q = p->p_children.lh_first;
  925         if (q)          /* only need this if any child is S_ZOMB */
  926                 wakeup((caddr_t) initproc);
  927         for (; q != 0; q = nq) {
  928                 nq = q->p_sibling.le_next;
  929                 proc_reparent(q, initproc);
  930                 /*
  931                  * Traced processes are killed
  932                  * since their existence means someone is messing up.
  933                  */
  934                 if (q->p_flag & P_TRACED) {
  935                         q->p_flag &= ~P_TRACED;
  936                         if (q->sigwait_thread) {
  937                                 /*
  938                                  * The sigwait_thread could be stopped at a
  939                                  * breakpoint. Wake it up to kill.
  940                                  * Need to do this as it could be a thread which is not
  941                                  * the first thread in the task. So any attempts to kill
  942                                  * the process would result into a deadlock on q->sigwait.
  943                                  */
  944                                 thread_resume((thread_act_t)q->sigwait_thread);
  945                                 clear_wait(q->sigwait_thread, THREAD_INTERRUPTED);
  946                                 threadsignal((thread_act_t)q->sigwait_thread, SIGKILL, 0);
  947                         }
  948                         psignal(q, SIGKILL);
  949                 }
  950         }
  951 
  952         /*
  953          * Save exit status and final rusage info, adding in child rusage
  954          * info and self times.
  955          */
  956         *p->p_ru = p->p_stats->p_ru;
  957 
  958         timerclear(&p->p_ru->ru_utime);
  959         timerclear(&p->p_ru->ru_stime);
  960 
  961 #ifdef  FIXME
  962         if (task) {
  963                 task_basic_info_data_t tinfo;
  964                 task_thread_times_info_data_t ttimesinfo;
  965                 int task_info_stuff, task_ttimes_stuff;
  966                 struct timeval ut,st;
  967 
  968                 task_info_stuff = TASK_BASIC_INFO_COUNT;
  969                 task_info(task, TASK_BASIC_INFO,
  970                           &tinfo, &task_info_stuff);
  971                 p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds;
  972                 p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds;
  973                 p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds;
  974                 p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds;
  975 
  976                 task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT;
  977                 task_info(task, TASK_THREAD_TIMES_INFO,
  978                           &ttimesinfo, &task_ttimes_stuff);
  979 
  980                 ut.tv_sec = ttimesinfo.user_time.seconds;
  981                 ut.tv_usec = ttimesinfo.user_time.microseconds;
  982                 st.tv_sec = ttimesinfo.system_time.seconds;
  983                 st.tv_usec = ttimesinfo.system_time.microseconds;
  984                 timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime);
  985                 timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
  986         }
  987 #endif /* FIXME */
  988 
  989         ruadd(p->p_ru, &p->p_stats->p_cru);
  990 
  991         /*
  992          * Free up profiling buffers.
  993          */
  994         {
  995                 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
  996 
  997                 p1 = p0->pr_next;
  998                 p0->pr_next = NULL;
  999                 p0->pr_scale = 0;
 1000 
 1001                 for (; p1 != NULL; p1 = pn) {
 1002                         pn = p1->pr_next;
 1003                         kfree((vm_offset_t)p1, sizeof *p1);
 1004                 }
 1005         }
 1006 
 1007         /*
 1008          * Other substructures are freed from wait().
 1009          */
 1010         FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC);
 1011         p->p_stats = NULL;
 1012 
 1013         FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC);
 1014         p->p_sigacts = NULL;
 1015 
 1016         if (--p->p_limit->p_refcnt == 0)
 1017                 FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC);
 1018         p->p_limit = NULL;
 1019 
 1020         /*
 1021          * Finish up by terminating the task
 1022          * and halt this thread (only if a
 1023          * member of the task exiting).
 1024          */
 1025         p->task = TASK_NULL;
 1026 
 1027         /*
 1028          * Notify parent that we're gone.
 1029          */
 1030         pp = p->p_pptr;
 1031         if (pp != initproc) {
 1032                 pp->si_pid = p->p_pid;
 1033                 pp->si_status = p->p_xstat;
 1034                 pp->si_code = CLD_EXITED;
 1035                 pp->si_uid = p->p_cred->p_ruid;
 1036         }
 1037         psignal(p->p_pptr, SIGCHLD);
 1038 
 1039         /* mark as a zombie */
 1040         p->p_stat = SZOMB;
 1041 
 1042         /* and now wakeup the parent */
 1043         wakeup((caddr_t)p->p_pptr);
 1044 }

Cache object: 16487a40c56ce8cebc47d46922d24bf9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.