FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_exit.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_exit.c 158179 2006-04-30 16:44:43Z cvs2svn $");
39
40 #include "opt_compat.h"
41 #include "opt_ktrace.h"
42 #include "opt_mac.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/eventhandler.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/pioctl.h>
54 #include <sys/tty.h>
55 #include <sys/wait.h>
56 #include <sys/vmmeter.h>
57 #include <sys/vnode.h>
58 #include <sys/resourcevar.h>
59 #include <sys/signalvar.h>
60 #include <sys/sched.h>
61 #include <sys/sx.h>
62 #include <sys/syscallsubr.h>
63 #include <sys/ptrace.h>
64 #include <sys/acct.h> /* for acct_process() function prototype */
65 #include <sys/filedesc.h>
66 #include <sys/mac.h>
67 #include <sys/shm.h>
68 #include <sys/sem.h>
69 #ifdef KTRACE
70 #include <sys/ktrace.h>
71 #endif
72
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_param.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_page.h>
79 #include <vm/uma.h>
80
81 /* Required to be non-static for SysVR4 emulator */
82 MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
83
84 /* Hook for NFS teardown procedure. */
85 void (*nlminfo_release_p)(struct proc *p);
86
87 /*
88 * exit --
89 * Death of process.
90 *
91 * MPSAFE
92 */
93 void
94 sys_exit(struct thread *td, struct sys_exit_args *uap)
95 {
96
97 exit1(td, W_EXITCODE(uap->rval, 0));
98 /* NOTREACHED */
99 }
100
101 /*
102 * Exit: deallocate address space and other resources, change proc state
103 * to zombie, and unlink proc from allproc and parent's lists. Save exit
104 * status and rusage for wait(). Check for child processes and orphan them.
105 */
106 void
107 exit1(struct thread *td, int rv)
108 {
109 struct proc *p, *nq, *q;
110 struct tty *tp;
111 struct vnode *ttyvp;
112 struct vmspace *vm;
113 struct vnode *vtmp;
114 #ifdef KTRACE
115 struct vnode *tracevp;
116 struct ucred *tracecred;
117 #endif
118 struct plimit *plim;
119 int locked, refcnt;
120
121 /*
122 * Drop Giant if caller has it. Eventually we should warn about
123 * being called with Giant held.
124 */
125 while (mtx_owned(&Giant))
126 mtx_unlock(&Giant);
127
128 p = td->td_proc;
129 if (p == initproc) {
130 printf("init died (signal %d, exit %d)\n",
131 WTERMSIG(rv), WEXITSTATUS(rv));
132 panic("Going nowhere without my init!");
133 }
134
135 /*
136 * MUST abort all other threads before proceeding past here.
137 */
138 PROC_LOCK(p);
139 if (p->p_flag & P_HADTHREADS) {
140 retry:
141 /*
142 * First check if some other thread got here before us..
143 * if so, act apropriatly, (exit or suspend);
144 */
145 thread_suspend_check(0);
146
147 /*
148 * Kill off the other threads. This requires
149 * some co-operation from other parts of the kernel
150 * so it may not be instantaneous. With this state set
151 * any thread entering the kernel from userspace will
152 * thread_exit() in trap(). Any thread attempting to
153 * sleep will return immediately with EINTR or EWOULDBLOCK
154 * which will hopefully force them to back out to userland
155 * freeing resources as they go. Any thread attempting
156 * to return to userland will thread_exit() from userret().
157 * thread_exit() will unsuspend us when the last of the
158 * other threads exits.
159 * If there is already a thread singler after resumption,
160 * calling thread_single will fail; in that case, we just
161 * re-check all suspension request, the thread should
162 * either be suspended there or exit.
163 */
164 if (thread_single(SINGLE_EXIT))
165 goto retry;
166
167 /*
168 * All other activity in this process is now stopped.
169 * Threading support has been turned off.
170 */
171 }
172
173 /*
174 * Wakeup anyone in procfs' PIOCWAIT. They should have a hold
175 * on our vmspace, so we should block below until they have
176 * released their reference to us. Note that if they have
177 * requested S_EXIT stops we will block here until they ack
178 * via PIOCCONT.
179 */
180 _STOPEVENT(p, S_EXIT, rv);
181
182 /*
183 * Note that we are exiting and do another wakeup of anyone in
184 * PIOCWAIT in case they aren't listening for S_EXIT stops or
185 * decided to wait again after we told them we are exiting.
186 */
187 p->p_flag |= P_WEXIT;
188 wakeup(&p->p_stype);
189
190 /*
191 * Wait for any processes that have a hold on our vmspace to
192 * release their reference.
193 */
194 while (p->p_lock > 0)
195 msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
196 PROC_UNLOCK(p);
197
198 /* Are we a task leader? */
199 if (p == p->p_leader) {
200 mtx_lock(&ppeers_lock);
201 q = p->p_peers;
202 while (q != NULL) {
203 PROC_LOCK(q);
204 psignal(q, SIGKILL);
205 PROC_UNLOCK(q);
206 q = q->p_peers;
207 }
208 while (p->p_peers != NULL)
209 msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
210 mtx_unlock(&ppeers_lock);
211 }
212
213 /*
214 * Check if any loadable modules need anything done at process exit.
215 * E.g. SYSV IPC stuff
216 * XXX what if one of these generates an error?
217 */
218 EVENTHANDLER_INVOKE(process_exit, p);
219
220 MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
221 M_ZOMBIE, M_WAITOK);
222 /*
223 * If parent is waiting for us to exit or exec,
224 * P_PPWAIT is set; we will wakeup the parent below.
225 */
226 PROC_LOCK(p);
227 stopprofclock(p);
228 p->p_flag &= ~(P_TRACED | P_PPWAIT);
229 SIGEMPTYSET(p->p_siglist);
230 SIGEMPTYSET(td->td_siglist);
231
232 /*
233 * Stop the real interval timer. If the handler is currently
234 * executing, prevent it from rearming itself and let it finish.
235 */
236 if (timevalisset(&p->p_realtimer.it_value) &&
237 callout_stop(&p->p_itcallout) == 0) {
238 timevalclear(&p->p_realtimer.it_interval);
239 msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
240 KASSERT(!timevalisset(&p->p_realtimer.it_value),
241 ("realtime timer is still armed"));
242 }
243 PROC_UNLOCK(p);
244
245 /*
246 * Reset any sigio structures pointing to us as a result of
247 * F_SETOWN with our pid.
248 */
249 mtx_lock(&Giant); /* XXX: not sure if needed */
250 funsetownlst(&p->p_sigiolst);
251 mtx_unlock(&Giant);
252
253 /*
254 * If this process has an nlminfo data area (for lockd), release it
255 */
256 if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
257 (*nlminfo_release_p)(p);
258
259 /*
260 * Close open files and release open-file table.
261 * This may block!
262 */
263 fdfree(td);
264
265 /*
266 * If this thread tickled GEOM, we need to wait for the giggling to
267 * stop before we return to userland
268 */
269 if (td->td_pflags & TDP_GEOM)
270 g_waitidle();
271
272 /*
273 * Remove ourself from our leader's peer list and wake our leader.
274 */
275 mtx_lock(&ppeers_lock);
276 if (p->p_leader->p_peers) {
277 q = p->p_leader;
278 while (q->p_peers != p)
279 q = q->p_peers;
280 q->p_peers = p->p_peers;
281 wakeup(p->p_leader);
282 }
283 mtx_unlock(&ppeers_lock);
284
285 /* The next two chunks should probably be moved to vmspace_exit. */
286 vm = p->p_vmspace;
287 /*
288 * Release user portion of address space.
289 * This releases references to vnodes,
290 * which could cause I/O if the file has been unlinked.
291 * Need to do this early enough that we can still sleep.
292 * Can't free the entire vmspace as the kernel stack
293 * may be mapped within that space also.
294 *
295 * Processes sharing the same vmspace may exit in one order, and
296 * get cleaned up by vmspace_exit() in a different order. The
297 * last exiting process to reach this point releases as much of
298 * the environment as it can, and the last process cleaned up
299 * by vmspace_exit() (which decrements exitingcnt) cleans up the
300 * remainder.
301 */
302 atomic_add_int(&vm->vm_exitingcnt, 1);
303 do
304 refcnt = vm->vm_refcnt;
305 while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
306 if (refcnt == 1) {
307 shmexit(vm);
308 pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
309 vm_map_max(&vm->vm_map));
310 (void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
311 vm_map_max(&vm->vm_map));
312 }
313
314 sx_xlock(&proctree_lock);
315 if (SESS_LEADER(p)) {
316 struct session *sp;
317
318 sp = p->p_session;
319 if (sp->s_ttyvp) {
320 locked = VFS_LOCK_GIANT(sp->s_ttyvp->v_mount);
321 /*
322 * Controlling process.
323 * Signal foreground pgrp,
324 * drain controlling terminal
325 * and revoke access to controlling terminal.
326 */
327 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
328 tp = sp->s_ttyp;
329 if (sp->s_ttyp->t_pgrp) {
330 PGRP_LOCK(sp->s_ttyp->t_pgrp);
331 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
332 PGRP_UNLOCK(sp->s_ttyp->t_pgrp);
333 }
334 /* XXX tp should be locked. */
335 sx_xunlock(&proctree_lock);
336 (void) ttywait(tp);
337 sx_xlock(&proctree_lock);
338 /*
339 * The tty could have been revoked
340 * if we blocked.
341 */
342 if (sp->s_ttyvp) {
343 ttyvp = sp->s_ttyvp;
344 SESS_LOCK(p->p_session);
345 sp->s_ttyvp = NULL;
346 SESS_UNLOCK(p->p_session);
347 sx_xunlock(&proctree_lock);
348 VOP_LOCK(ttyvp, LK_EXCLUSIVE, td);
349 VOP_REVOKE(ttyvp, REVOKEALL);
350 vput(ttyvp);
351 sx_xlock(&proctree_lock);
352 }
353 }
354 if (sp->s_ttyvp) {
355 ttyvp = sp->s_ttyvp;
356 SESS_LOCK(p->p_session);
357 sp->s_ttyvp = NULL;
358 SESS_UNLOCK(p->p_session);
359 vrele(ttyvp);
360 }
361 /*
362 * s_ttyp is not zero'd; we use this to indicate
363 * that the session once had a controlling terminal.
364 * (for logging and informational purposes)
365 */
366 VFS_UNLOCK_GIANT(locked);
367 }
368 SESS_LOCK(p->p_session);
369 sp->s_leader = NULL;
370 SESS_UNLOCK(p->p_session);
371 }
372 fixjobc(p, p->p_pgrp, 0);
373 sx_xunlock(&proctree_lock);
374 (void)acct_process(td);
375 #ifdef KTRACE
376 /*
377 * release trace file
378 */
379 PROC_LOCK(p);
380 mtx_lock(&ktrace_mtx);
381 p->p_traceflag = 0; /* don't trace the vrele() */
382 tracevp = p->p_tracevp;
383 p->p_tracevp = NULL;
384 tracecred = p->p_tracecred;
385 p->p_tracecred = NULL;
386 mtx_unlock(&ktrace_mtx);
387 PROC_UNLOCK(p);
388 if (tracevp != NULL) {
389 locked = VFS_LOCK_GIANT(tracevp->v_mount);
390 vrele(tracevp);
391 VFS_UNLOCK_GIANT(locked);
392 }
393 if (tracecred != NULL)
394 crfree(tracecred);
395 #endif
396 /*
397 * Release reference to text vnode
398 */
399 if ((vtmp = p->p_textvp) != NULL) {
400 p->p_textvp = NULL;
401 locked = VFS_LOCK_GIANT(vtmp->v_mount);
402 vrele(vtmp);
403 VFS_UNLOCK_GIANT(locked);
404 }
405
406 /*
407 * Release our limits structure.
408 */
409 PROC_LOCK(p);
410 plim = p->p_limit;
411 p->p_limit = NULL;
412 PROC_UNLOCK(p);
413 lim_free(plim);
414
415 /*
416 * Remove proc from allproc queue and pidhash chain.
417 * Place onto zombproc. Unlink from parent's child list.
418 */
419 sx_xlock(&allproc_lock);
420 LIST_REMOVE(p, p_list);
421 LIST_INSERT_HEAD(&zombproc, p, p_list);
422 LIST_REMOVE(p, p_hash);
423 sx_xunlock(&allproc_lock);
424
425 sx_xlock(&proctree_lock);
426 q = LIST_FIRST(&p->p_children);
427 if (q != NULL) /* only need this if any child is S_ZOMB */
428 wakeup(initproc);
429 for (; q != NULL; q = nq) {
430 nq = LIST_NEXT(q, p_sibling);
431 PROC_LOCK(q);
432 proc_reparent(q, initproc);
433 q->p_sigparent = SIGCHLD;
434 /*
435 * Traced processes are killed
436 * since their existence means someone is screwing up.
437 */
438 if (q->p_flag & P_TRACED) {
439 q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
440 psignal(q, SIGKILL);
441 }
442 PROC_UNLOCK(q);
443 }
444
445 /*
446 * Save exit status and finalize rusage info except for times,
447 * adding in child rusage info.
448 */
449 PROC_LOCK(p);
450 p->p_xstat = rv;
451 p->p_xthread = td;
452 p->p_stats->p_ru.ru_nvcsw++;
453 *p->p_ru = p->p_stats->p_ru;
454
455 /*
456 * Notify interested parties of our demise.
457 */
458 KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
459
460 /*
461 * Just delete all entries in the p_klist. At this point we won't
462 * report any more events, and there are nasty race conditions that
463 * can beat us if we don't.
464 */
465 knlist_clear(&p->p_klist, 1);
466
467 /*
468 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
469 * flag set, or if the handler is set to SIG_IGN, notify process
470 * 1 instead (and hope it will handle this situation).
471 */
472 PROC_LOCK(p->p_pptr);
473 mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
474 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
475 struct proc *pp;
476
477 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
478 pp = p->p_pptr;
479 PROC_UNLOCK(pp);
480 proc_reparent(p, initproc);
481 p->p_sigparent = SIGCHLD;
482 PROC_LOCK(p->p_pptr);
483 /*
484 * If this was the last child of our parent, notify
485 * parent, so in case he was wait(2)ing, he will
486 * continue.
487 */
488 if (LIST_EMPTY(&pp->p_children))
489 wakeup(pp);
490 } else
491 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
492
493 if (p->p_pptr == initproc)
494 psignal(p->p_pptr, SIGCHLD);
495 else if (p->p_sigparent != 0)
496 psignal(p->p_pptr, p->p_sigparent);
497 PROC_UNLOCK(p->p_pptr);
498
499 /*
500 * If this is a kthread, then wakeup anyone waiting for it to exit.
501 */
502 if (p->p_flag & P_KTHREAD)
503 wakeup(p);
504 PROC_UNLOCK(p);
505
506 /*
507 * Finally, call machine-dependent code to release the remaining
508 * resources including address space.
509 * The address space is released by "vmspace_exitfree(p)" in
510 * vm_waitproc().
511 */
512 cpu_exit(td);
513
514 WITNESS_WARN(WARN_PANIC, &proctree_lock.sx_object,
515 "process (pid %d) exiting", p->p_pid);
516
517 PROC_LOCK(p);
518 PROC_LOCK(p->p_pptr);
519 sx_xunlock(&proctree_lock);
520
521 /*
522 * We have to wait until after acquiring all locks before
523 * changing p_state. We need to avoid all possible context
524 * switches (including ones from blocking on a mutex) while
525 * marked as a zombie. We also have to set the zombie state
526 * before we release the parent process' proc lock to avoid
527 * a lost wakeup. So, we first call wakeup, then we grab the
528 * sched lock, update the state, and release the parent process'
529 * proc lock.
530 */
531 wakeup(p->p_pptr);
532 mtx_lock_spin(&sched_lock);
533 p->p_state = PRS_ZOMBIE;
534 PROC_UNLOCK(p->p_pptr);
535
536 sched_exit(p->p_pptr, td);
537
538 /*
539 * Hopefully no one will try to deliver a signal to the process this
540 * late in the game.
541 */
542 knlist_destroy(&p->p_klist);
543
544 /*
545 * Make sure the scheduler takes this thread out of its tables etc.
546 * This will also release this thread's reference to the ucred.
547 * Other thread parts to release include pcb bits and such.
548 */
549 thread_exit();
550 }
551
552 #ifdef COMPAT_43
553 /*
554 * The dirty work is handled by kern_wait().
555 *
556 * MPSAFE.
557 */
558 int
559 owait(struct thread *td, struct owait_args *uap __unused)
560 {
561 int error, status;
562
563 error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
564 if (error == 0)
565 td->td_retval[1] = status;
566 return (error);
567 }
568 #endif /* COMPAT_43 */
569
570 /*
571 * The dirty work is handled by kern_wait().
572 *
573 * MPSAFE.
574 */
575 int
576 wait4(struct thread *td, struct wait_args *uap)
577 {
578 struct rusage ru, *rup;
579 int error, status;
580
581 if (uap->rusage != NULL)
582 rup = &ru;
583 else
584 rup = NULL;
585 error = kern_wait(td, uap->pid, &status, uap->options, rup);
586 if (uap->status != NULL && error == 0)
587 error = copyout(&status, uap->status, sizeof(status));
588 if (uap->rusage != NULL && error == 0)
589 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
590 return (error);
591 }
592
593 int
594 kern_wait(struct thread *td, pid_t pid, int *status, int options,
595 struct rusage *rusage)
596 {
597 struct proc *p, *q, *t;
598 int error, nfound;
599
600 q = td->td_proc;
601 if (pid == 0) {
602 PROC_LOCK(q);
603 pid = -q->p_pgid;
604 PROC_UNLOCK(q);
605 }
606 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
607 return (EINVAL);
608 loop:
609 if (q->p_flag & P_STATCHILD) {
610 PROC_LOCK(q);
611 q->p_flag &= ~P_STATCHILD;
612 PROC_UNLOCK(q);
613 }
614 nfound = 0;
615 sx_xlock(&proctree_lock);
616 LIST_FOREACH(p, &q->p_children, p_sibling) {
617 PROC_LOCK(p);
618 if (pid != WAIT_ANY &&
619 p->p_pid != pid && p->p_pgid != -pid) {
620 PROC_UNLOCK(p);
621 continue;
622 }
623 if (p_canwait(td, p)) {
624 PROC_UNLOCK(p);
625 continue;
626 }
627
628 /*
629 * This special case handles a kthread spawned by linux_clone
630 * (see linux_misc.c). The linux_wait4 and linux_waitpid
631 * functions need to be able to distinguish between waiting
632 * on a process and waiting on a thread. It is a thread if
633 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
634 * signifies we want to wait for threads and not processes.
635 */
636 if ((p->p_sigparent != SIGCHLD) ^
637 ((options & WLINUXCLONE) != 0)) {
638 PROC_UNLOCK(p);
639 continue;
640 }
641
642 nfound++;
643 if (p->p_state == PRS_ZOMBIE) {
644
645 /*
646 * It is possible that the last thread of this
647 * process is still running on another CPU
648 * in thread_exit() after having dropped the process
649 * lock via PROC_UNLOCK() but before it has completed
650 * cpu_throw(). In that case, the other thread must
651 * still hold sched_lock, so simply by acquiring
652 * sched_lock once we will wait long enough for the
653 * thread to exit in that case.
654 */
655 mtx_lock_spin(&sched_lock);
656 mtx_unlock_spin(&sched_lock);
657
658 td->td_retval[0] = p->p_pid;
659 if (status)
660 *status = p->p_xstat; /* convert to int */
661 if (rusage) {
662 *rusage = *p->p_ru;
663 calcru(p, &rusage->ru_utime, &rusage->ru_stime);
664 }
665
666 /*
667 * If we got the child via a ptrace 'attach',
668 * we need to give it back to the old parent.
669 */
670 PROC_UNLOCK(p);
671 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
672 PROC_LOCK(p);
673 p->p_oppid = 0;
674 proc_reparent(p, t);
675 PROC_UNLOCK(p);
676 psignal(t, SIGCHLD);
677 wakeup(t);
678 PROC_UNLOCK(t);
679 sx_xunlock(&proctree_lock);
680 return (0);
681 }
682
683 /*
684 * Remove other references to this process to ensure
685 * we have an exclusive reference.
686 */
687 sx_xlock(&allproc_lock);
688 LIST_REMOVE(p, p_list); /* off zombproc */
689 sx_xunlock(&allproc_lock);
690 LIST_REMOVE(p, p_sibling);
691 leavepgrp(p);
692 sx_xunlock(&proctree_lock);
693
694 /*
695 * As a side effect of this lock, we know that
696 * all other writes to this proc are visible now, so
697 * no more locking is needed for p.
698 */
699 PROC_LOCK(p);
700 p->p_xstat = 0; /* XXX: why? */
701 PROC_UNLOCK(p);
702 PROC_LOCK(q);
703 ruadd(&q->p_stats->p_cru, &q->p_crux, p->p_ru,
704 &p->p_rux);
705 PROC_UNLOCK(q);
706 FREE(p->p_ru, M_ZOMBIE);
707 p->p_ru = NULL;
708
709 /*
710 * Decrement the count of procs running with this uid.
711 */
712 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
713
714 /*
715 * Free credentials, arguments, and sigacts.
716 */
717 crfree(p->p_ucred);
718 p->p_ucred = NULL;
719 pargs_drop(p->p_args);
720 p->p_args = NULL;
721 sigacts_free(p->p_sigacts);
722 p->p_sigacts = NULL;
723
724 /*
725 * Do any thread-system specific cleanups.
726 */
727 thread_wait(p);
728
729 /*
730 * Give vm and machine-dependent layer a chance
731 * to free anything that cpu_exit couldn't
732 * release while still running in process context.
733 */
734 vm_waitproc(p);
735 #ifdef MAC
736 mac_destroy_proc(p);
737 #endif
738 KASSERT(FIRST_THREAD_IN_PROC(p),
739 ("kern_wait: no residual thread!"));
740 uma_zfree(proc_zone, p);
741 sx_xlock(&allproc_lock);
742 nprocs--;
743 sx_xunlock(&allproc_lock);
744 return (0);
745 }
746 mtx_lock_spin(&sched_lock);
747 if ((p->p_flag & P_STOPPED_SIG) &&
748 (p->p_suspcount == p->p_numthreads) &&
749 (p->p_flag & P_WAITED) == 0 &&
750 (p->p_flag & P_TRACED || options & WUNTRACED)) {
751 mtx_unlock_spin(&sched_lock);
752 p->p_flag |= P_WAITED;
753 sx_xunlock(&proctree_lock);
754 td->td_retval[0] = p->p_pid;
755 if (status)
756 *status = W_STOPCODE(p->p_xstat);
757 PROC_UNLOCK(p);
758 return (0);
759 }
760 mtx_unlock_spin(&sched_lock);
761 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
762 sx_xunlock(&proctree_lock);
763 td->td_retval[0] = p->p_pid;
764 p->p_flag &= ~P_CONTINUED;
765 PROC_UNLOCK(p);
766
767 if (status)
768 *status = SIGCONT;
769 return (0);
770 }
771 PROC_UNLOCK(p);
772 }
773 if (nfound == 0) {
774 sx_xunlock(&proctree_lock);
775 return (ECHILD);
776 }
777 if (options & WNOHANG) {
778 sx_xunlock(&proctree_lock);
779 td->td_retval[0] = 0;
780 return (0);
781 }
782 PROC_LOCK(q);
783 sx_xunlock(&proctree_lock);
784 if (q->p_flag & P_STATCHILD) {
785 q->p_flag &= ~P_STATCHILD;
786 error = 0;
787 } else
788 error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
789 PROC_UNLOCK(q);
790 if (error)
791 return (error);
792 goto loop;
793 }
794
795 /*
796 * Make process 'parent' the new parent of process 'child'.
797 * Must be called with an exclusive hold of proctree lock.
798 */
799 void
800 proc_reparent(struct proc *child, struct proc *parent)
801 {
802
803 sx_assert(&proctree_lock, SX_XLOCKED);
804 PROC_LOCK_ASSERT(child, MA_OWNED);
805 if (child->p_pptr == parent)
806 return;
807
808 LIST_REMOVE(child, p_sibling);
809 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
810 child->p_pptr = parent;
811 }
Cache object: 5d5780ef741653d5ec2d198dfb0c2a65
|