FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/6.2/sys/kern/kern_fork.c 164286 2006-11-14 20:42:41Z cvs2svn $");
39
40 #include "opt_ktrace.h"
41 #include "opt_mac.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/syscall.h>
59 #include <sys/vmmeter.h>
60 #include <sys/vnode.h>
61 #include <sys/acct.h>
62 #include <sys/mac.h>
63 #include <sys/ktr.h>
64 #include <sys/ktrace.h>
65 #include <sys/unistd.h>
66 #include <sys/sx.h>
67 #include <sys/signalvar.h>
68
69 #include <security/audit/audit.h>
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_extern.h>
75 #include <vm/uma.h>
76
77
78 #ifndef _SYS_SYSPROTO_H_
79 struct fork_args {
80 int dummy;
81 };
82 #endif
83
84 static int forksleep; /* Place for fork1() to sleep on. */
85
86 /*
87 * MPSAFE
88 */
89 /* ARGSUSED */
90 int
91 fork(td, uap)
92 struct thread *td;
93 struct fork_args *uap;
94 {
95 int error;
96 struct proc *p2;
97
98 error = fork1(td, RFFDG | RFPROC, 0, &p2);
99 if (error == 0) {
100 td->td_retval[0] = p2->p_pid;
101 td->td_retval[1] = 0;
102 }
103 return (error);
104 }
105
106 /*
107 * MPSAFE
108 */
109 /* ARGSUSED */
110 int
111 vfork(td, uap)
112 struct thread *td;
113 struct vfork_args *uap;
114 {
115 int error;
116 struct proc *p2;
117
118 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
119 if (error == 0) {
120 td->td_retval[0] = p2->p_pid;
121 td->td_retval[1] = 0;
122 }
123 return (error);
124 }
125
126 /*
127 * MPSAFE
128 */
129 int
130 rfork(td, uap)
131 struct thread *td;
132 struct rfork_args *uap;
133 {
134 struct proc *p2;
135 int error;
136
137 /* Don't allow kernel-only flags. */
138 if ((uap->flags & RFKERNELONLY) != 0)
139 return (EINVAL);
140
141 AUDIT_ARG(fflags, uap->flags);
142 error = fork1(td, uap->flags, 0, &p2);
143 if (error == 0) {
144 td->td_retval[0] = p2 ? p2->p_pid : 0;
145 td->td_retval[1] = 0;
146 }
147 return (error);
148 }
149
150 int nprocs = 1; /* process 0 */
151 int lastpid = 0;
152 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
153 "Last used PID");
154
155 /*
156 * Random component to lastpid generation. We mix in a random factor to make
157 * it a little harder to predict. We sanity check the modulus value to avoid
158 * doing it in critical paths. Don't let it be too small or we pointlessly
159 * waste randomness entropy, and don't let it be impossibly large. Using a
160 * modulus that is too big causes a LOT more process table scans and slows
161 * down fork processing as the pidchecked caching is defeated.
162 */
163 static int randompid = 0;
164
165 static int
166 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
167 {
168 int error, pid;
169
170 error = sysctl_wire_old_buffer(req, sizeof(int));
171 if (error != 0)
172 return(error);
173 sx_xlock(&allproc_lock);
174 pid = randompid;
175 error = sysctl_handle_int(oidp, &pid, 0, req);
176 if (error == 0 && req->newptr != NULL) {
177 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
178 pid = PID_MAX - 100;
179 else if (pid < 2) /* NOP */
180 pid = 0;
181 else if (pid < 100) /* Make it reasonable */
182 pid = 100;
183 randompid = pid;
184 }
185 sx_xunlock(&allproc_lock);
186 return (error);
187 }
188
189 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
190 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
191
192 int
193 fork1(td, flags, pages, procp)
194 struct thread *td;
195 int flags;
196 int pages;
197 struct proc **procp;
198 {
199 struct proc *p1, *p2, *pptr;
200 struct proc *newproc;
201 int ok, trypid;
202 static int curfail, pidchecked = 0;
203 static struct timeval lastfail;
204 struct filedesc *fd;
205 struct filedesc_to_leader *fdtol;
206 struct thread *td2;
207 struct ksegrp *kg2;
208 struct sigacts *newsigacts;
209 int error;
210
211 /* Can't copy and clear. */
212 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
213 return (EINVAL);
214
215 p1 = td->td_proc;
216
217 /*
218 * Here we don't create a new process, but we divorce
219 * certain parts of a process from itself.
220 */
221 if ((flags & RFPROC) == 0) {
222 if ((p1->p_flag & P_HADTHREADS) &&
223 (flags & (RFCFDG | RFFDG))) {
224 PROC_LOCK(p1);
225 if (thread_single(SINGLE_BOUNDARY)) {
226 PROC_UNLOCK(p1);
227 return (ERESTART);
228 }
229 PROC_UNLOCK(p1);
230 }
231
232 vm_forkproc(td, NULL, NULL, flags);
233 /*
234 * Close all file descriptors.
235 */
236 if (flags & RFCFDG) {
237 struct filedesc *fdtmp;
238 fdtmp = fdinit(td->td_proc->p_fd);
239 fdfree(td);
240 p1->p_fd = fdtmp;
241 }
242
243 /*
244 * Unshare file descriptors (from parent).
245 */
246 if (flags & RFFDG)
247 fdunshare(p1, td);
248
249 if ((p1->p_flag & P_HADTHREADS) &&
250 (flags & (RFCFDG | RFFDG))) {
251 PROC_LOCK(p1);
252 thread_single_end();
253 PROC_UNLOCK(p1);
254 }
255 *procp = NULL;
256 return (0);
257 }
258
259 /*
260 * Note 1:1 allows for forking with one thread coming out on the
261 * other side with the expectation that the process is about to
262 * exec.
263 */
264 if (p1->p_flag & P_HADTHREADS) {
265 /*
266 * Idle the other threads for a second.
267 * Since the user space is copied, it must remain stable.
268 * In addition, all threads (from the user perspective)
269 * need to either be suspended or in the kernel,
270 * where they will try restart in the parent and will
271 * be aborted in the child.
272 */
273 PROC_LOCK(p1);
274 if (thread_single(SINGLE_NO_EXIT)) {
275 /* Abort. Someone else is single threading before us. */
276 PROC_UNLOCK(p1);
277 return (ERESTART);
278 }
279 PROC_UNLOCK(p1);
280 /*
281 * All other activity in this process
282 * is now suspended at the user boundary,
283 * (or other safe places if we think of any).
284 */
285 }
286
287 /* Allocate new proc. */
288 newproc = uma_zalloc(proc_zone, M_WAITOK);
289 #ifdef MAC
290 mac_init_proc(newproc);
291 #endif
292 #ifdef AUDIT
293 audit_proc_alloc(newproc);
294 #endif
295 knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL);
296 STAILQ_INIT(&newproc->p_ktr);
297
298 /* We have to lock the process tree while we look for a pid. */
299 sx_slock(&proctree_lock);
300
301 /*
302 * Although process entries are dynamically created, we still keep
303 * a global limit on the maximum number we will create. Don't allow
304 * a nonprivileged user to use the last ten processes; don't let root
305 * exceed the limit. The variable nprocs is the current number of
306 * processes, maxproc is the limit.
307 */
308 sx_xlock(&allproc_lock);
309 if ((nprocs >= maxproc - 10 &&
310 suser_cred(td->td_ucred, SUSER_RUID) != 0) ||
311 nprocs >= maxproc) {
312 error = EAGAIN;
313 goto fail;
314 }
315
316 /*
317 * Increment the count of procs running with this uid. Don't allow
318 * a nonprivileged user to exceed their current limit.
319 */
320 error = suser_cred(td->td_ucred, SUSER_RUID | SUSER_ALLOWJAIL);
321 if (error == 0)
322 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
323 else {
324 PROC_LOCK(p1);
325 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
326 lim_cur(p1, RLIMIT_NPROC));
327 PROC_UNLOCK(p1);
328 }
329 if (!ok) {
330 error = EAGAIN;
331 goto fail;
332 }
333
334 /*
335 * Increment the nprocs resource before blocking can occur. There
336 * are hard-limits as to the number of processes that can run.
337 */
338 nprocs++;
339
340 /*
341 * Find an unused process ID. We remember a range of unused IDs
342 * ready to use (from lastpid+1 through pidchecked-1).
343 *
344 * If RFHIGHPID is set (used during system boot), do not allocate
345 * low-numbered pids.
346 */
347 trypid = lastpid + 1;
348 if (flags & RFHIGHPID) {
349 if (trypid < 10)
350 trypid = 10;
351 } else {
352 if (randompid)
353 trypid += arc4random() % randompid;
354 }
355 retry:
356 /*
357 * If the process ID prototype has wrapped around,
358 * restart somewhat above 0, as the low-numbered procs
359 * tend to include daemons that don't exit.
360 */
361 if (trypid >= PID_MAX) {
362 trypid = trypid % PID_MAX;
363 if (trypid < 100)
364 trypid += 100;
365 pidchecked = 0;
366 }
367 if (trypid >= pidchecked) {
368 int doingzomb = 0;
369
370 pidchecked = PID_MAX;
371 /*
372 * Scan the active and zombie procs to check whether this pid
373 * is in use. Remember the lowest pid that's greater
374 * than trypid, so we can avoid checking for a while.
375 */
376 p2 = LIST_FIRST(&allproc);
377 again:
378 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
379 PROC_LOCK(p2);
380 while (p2->p_pid == trypid ||
381 (p2->p_pgrp != NULL &&
382 (p2->p_pgrp->pg_id == trypid ||
383 (p2->p_session != NULL &&
384 p2->p_session->s_sid == trypid)))) {
385 trypid++;
386 if (trypid >= pidchecked) {
387 PROC_UNLOCK(p2);
388 goto retry;
389 }
390 }
391 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
392 pidchecked = p2->p_pid;
393 if (p2->p_pgrp != NULL) {
394 if (p2->p_pgrp->pg_id > trypid &&
395 pidchecked > p2->p_pgrp->pg_id)
396 pidchecked = p2->p_pgrp->pg_id;
397 if (p2->p_session != NULL &&
398 p2->p_session->s_sid > trypid &&
399 pidchecked > p2->p_session->s_sid)
400 pidchecked = p2->p_session->s_sid;
401 }
402 PROC_UNLOCK(p2);
403 }
404 if (!doingzomb) {
405 doingzomb = 1;
406 p2 = LIST_FIRST(&zombproc);
407 goto again;
408 }
409 }
410 sx_sunlock(&proctree_lock);
411
412 /*
413 * RFHIGHPID does not mess with the lastpid counter during boot.
414 */
415 if (flags & RFHIGHPID)
416 pidchecked = 0;
417 else
418 lastpid = trypid;
419
420 p2 = newproc;
421 p2->p_state = PRS_NEW; /* protect against others */
422 p2->p_pid = trypid;
423 AUDIT_ARG(pid, p2->p_pid);
424 LIST_INSERT_HEAD(&allproc, p2, p_list);
425 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
426 sx_xunlock(&allproc_lock);
427
428 /*
429 * Malloc things while we don't hold any locks.
430 */
431 if (flags & RFSIGSHARE)
432 newsigacts = NULL;
433 else
434 newsigacts = sigacts_alloc();
435
436 /*
437 * Copy filedesc.
438 */
439 if (flags & RFCFDG) {
440 fd = fdinit(p1->p_fd);
441 fdtol = NULL;
442 } else if (flags & RFFDG) {
443 fd = fdcopy(p1->p_fd);
444 fdtol = NULL;
445 } else {
446 fd = fdshare(p1->p_fd);
447 if (p1->p_fdtol == NULL)
448 p1->p_fdtol =
449 filedesc_to_leader_alloc(NULL,
450 NULL,
451 p1->p_leader);
452 if ((flags & RFTHREAD) != 0) {
453 /*
454 * Shared file descriptor table and
455 * shared process leaders.
456 */
457 fdtol = p1->p_fdtol;
458 FILEDESC_LOCK_FAST(p1->p_fd);
459 fdtol->fdl_refcount++;
460 FILEDESC_UNLOCK_FAST(p1->p_fd);
461 } else {
462 /*
463 * Shared file descriptor table, and
464 * different process leaders
465 */
466 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
467 p1->p_fd,
468 p2);
469 }
470 }
471 /*
472 * Make a proc table entry for the new process.
473 * Start by zeroing the section of proc that is zero-initialized,
474 * then copy the section that is copied directly from the parent.
475 */
476 td2 = FIRST_THREAD_IN_PROC(p2);
477 kg2 = FIRST_KSEGRP_IN_PROC(p2);
478
479 /* Allocate and switch to an alternate kstack if specified. */
480 if (pages != 0)
481 vm_thread_new_altkstack(td2, pages);
482
483 PROC_LOCK(p2);
484 PROC_LOCK(p1);
485
486 bzero(&p2->p_startzero,
487 __rangeof(struct proc, p_startzero, p_endzero));
488 bzero(&td2->td_startzero,
489 __rangeof(struct thread, td_startzero, td_endzero));
490 bzero(&kg2->kg_startzero,
491 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
492
493 bcopy(&p1->p_startcopy, &p2->p_startcopy,
494 __rangeof(struct proc, p_startcopy, p_endcopy));
495 bcopy(&td->td_startcopy, &td2->td_startcopy,
496 __rangeof(struct thread, td_startcopy, td_endcopy));
497 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
498 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
499
500 td2->td_sigstk = td->td_sigstk;
501 td2->td_sigmask = td->td_sigmask;
502
503 /*
504 * Duplicate sub-structures as needed.
505 * Increase reference counts on shared objects.
506 */
507 p2->p_flag = 0;
508 if (p1->p_flag & P_PROFIL)
509 startprofclock(p2);
510 mtx_lock_spin(&sched_lock);
511 p2->p_sflag = PS_INMEM;
512 /*
513 * Allow the scheduler to adjust the priority of the child and
514 * parent while we hold the sched_lock.
515 */
516 sched_fork(td, td2);
517
518 mtx_unlock_spin(&sched_lock);
519 p2->p_ucred = crhold(td->td_ucred);
520 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
521 #ifdef AUDIT
522 audit_proc_fork(p1, p2);
523 #endif
524 pargs_hold(p2->p_args);
525
526 if (flags & RFSIGSHARE) {
527 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
528 } else {
529 sigacts_copy(newsigacts, p1->p_sigacts);
530 p2->p_sigacts = newsigacts;
531 }
532 if (flags & RFLINUXTHPN)
533 p2->p_sigparent = SIGUSR1;
534 else
535 p2->p_sigparent = SIGCHLD;
536
537 p2->p_textvp = p1->p_textvp;
538 p2->p_fd = fd;
539 p2->p_fdtol = fdtol;
540
541 /*
542 * p_limit is copy-on-write. Bump its refcount.
543 */
544 p2->p_limit = lim_hold(p1->p_limit);
545
546 pstats_fork(p1->p_stats, p2->p_stats);
547
548 PROC_UNLOCK(p1);
549 PROC_UNLOCK(p2);
550
551 /* Bump references to the text vnode (for procfs) */
552 if (p2->p_textvp)
553 vref(p2->p_textvp);
554
555 /*
556 * Set up linkage for kernel based threading.
557 */
558 if ((flags & RFTHREAD) != 0) {
559 mtx_lock(&ppeers_lock);
560 p2->p_peers = p1->p_peers;
561 p1->p_peers = p2;
562 p2->p_leader = p1->p_leader;
563 mtx_unlock(&ppeers_lock);
564 PROC_LOCK(p1->p_leader);
565 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
566 PROC_UNLOCK(p1->p_leader);
567 /*
568 * The task leader is exiting, so process p1 is
569 * going to be killed shortly. Since p1 obviously
570 * isn't dead yet, we know that the leader is either
571 * sending SIGKILL's to all the processes in this
572 * task or is sleeping waiting for all the peers to
573 * exit. We let p1 complete the fork, but we need
574 * to go ahead and kill the new process p2 since
575 * the task leader may not get a chance to send
576 * SIGKILL to it. We leave it on the list so that
577 * the task leader will wait for this new process
578 * to commit suicide.
579 */
580 PROC_LOCK(p2);
581 psignal(p2, SIGKILL);
582 PROC_UNLOCK(p2);
583 } else
584 PROC_UNLOCK(p1->p_leader);
585 } else {
586 p2->p_peers = NULL;
587 p2->p_leader = p2;
588 }
589
590 sx_xlock(&proctree_lock);
591 PGRP_LOCK(p1->p_pgrp);
592 PROC_LOCK(p2);
593 PROC_LOCK(p1);
594
595 /*
596 * Preserve some more flags in subprocess. P_PROFIL has already
597 * been preserved.
598 */
599 p2->p_flag |= p1->p_flag & P_SUGID;
600 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
601 SESS_LOCK(p1->p_session);
602 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
603 p2->p_flag |= P_CONTROLT;
604 SESS_UNLOCK(p1->p_session);
605 if (flags & RFPPWAIT)
606 p2->p_flag |= P_PPWAIT;
607
608 p2->p_pgrp = p1->p_pgrp;
609 LIST_INSERT_AFTER(p1, p2, p_pglist);
610 PGRP_UNLOCK(p1->p_pgrp);
611 LIST_INIT(&p2->p_children);
612
613 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
614
615 #ifdef KTRACE
616 /*
617 * Copy traceflag and tracefile if enabled.
618 */
619 mtx_lock(&ktrace_mtx);
620 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
621 if (p1->p_traceflag & KTRFAC_INHERIT) {
622 p2->p_traceflag = p1->p_traceflag;
623 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
624 VREF(p2->p_tracevp);
625 KASSERT(p1->p_tracecred != NULL,
626 ("ktrace vnode with no cred"));
627 p2->p_tracecred = crhold(p1->p_tracecred);
628 }
629 }
630 mtx_unlock(&ktrace_mtx);
631 #endif
632
633 /*
634 * If PF_FORK is set, the child process inherits the
635 * procfs ioctl flags from its parent.
636 */
637 if (p1->p_pfsflags & PF_FORK) {
638 p2->p_stops = p1->p_stops;
639 p2->p_pfsflags = p1->p_pfsflags;
640 }
641
642 /*
643 * This begins the section where we must prevent the parent
644 * from being swapped.
645 */
646 _PHOLD(p1);
647 PROC_UNLOCK(p1);
648
649 /*
650 * Attach the new process to its parent.
651 *
652 * If RFNOWAIT is set, the newly created process becomes a child
653 * of init. This effectively disassociates the child from the
654 * parent.
655 */
656 if (flags & RFNOWAIT)
657 pptr = initproc;
658 else
659 pptr = p1;
660 p2->p_pptr = pptr;
661 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
662 sx_xunlock(&proctree_lock);
663
664 /* Inform accounting that we have forked. */
665 p2->p_acflag = AFORK;
666 PROC_UNLOCK(p2);
667
668 /*
669 * Finish creating the child process. It will return via a different
670 * execution path later. (ie: directly into user mode)
671 */
672 vm_forkproc(td, p2, td2, flags);
673
674 if (flags == (RFFDG | RFPROC)) {
675 atomic_add_int(&cnt.v_forks, 1);
676 atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
677 p2->p_vmspace->vm_ssize);
678 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
679 atomic_add_int(&cnt.v_vforks, 1);
680 atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
681 p2->p_vmspace->vm_ssize);
682 } else if (p1 == &proc0) {
683 atomic_add_int(&cnt.v_kthreads, 1);
684 atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
685 p2->p_vmspace->vm_ssize);
686 } else {
687 atomic_add_int(&cnt.v_rforks, 1);
688 atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
689 p2->p_vmspace->vm_ssize);
690 }
691
692 /*
693 * Both processes are set up, now check if any loadable modules want
694 * to adjust anything.
695 * What if they have an error? XXX
696 */
697 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
698
699 /*
700 * Set the child start time and mark the process as being complete.
701 */
702 microuptime(&p2->p_stats->p_start);
703 mtx_lock_spin(&sched_lock);
704 p2->p_state = PRS_NORMAL;
705
706 /*
707 * If RFSTOPPED not requested, make child runnable and add to
708 * run queue.
709 */
710 if ((flags & RFSTOPPED) == 0) {
711 TD_SET_CAN_RUN(td2);
712 setrunqueue(td2, SRQ_BORING);
713 }
714 mtx_unlock_spin(&sched_lock);
715
716 /*
717 * Now can be swapped.
718 */
719 PROC_LOCK(p1);
720 _PRELE(p1);
721
722 /*
723 * Tell any interested parties about the new process.
724 */
725 KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
726
727 PROC_UNLOCK(p1);
728
729 /*
730 * Preserve synchronization semantics of vfork. If waiting for
731 * child to exec or exit, set P_PPWAIT on child, and sleep on our
732 * proc (in case of exit).
733 */
734 PROC_LOCK(p2);
735 while (p2->p_flag & P_PPWAIT)
736 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
737 PROC_UNLOCK(p2);
738
739 /*
740 * If other threads are waiting, let them continue now.
741 */
742 if (p1->p_flag & P_HADTHREADS) {
743 PROC_LOCK(p1);
744 thread_single_end();
745 PROC_UNLOCK(p1);
746 }
747
748 /*
749 * Return child proc pointer to parent.
750 */
751 *procp = p2;
752 return (0);
753 fail:
754 sx_sunlock(&proctree_lock);
755 if (ppsratecheck(&lastfail, &curfail, 1))
756 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
757 td->td_ucred->cr_ruid);
758 sx_xunlock(&allproc_lock);
759 #ifdef MAC
760 mac_destroy_proc(newproc);
761 #endif
762 #ifdef AUDIT
763 audit_proc_free(newproc);
764 #endif
765 uma_zfree(proc_zone, newproc);
766 if (p1->p_flag & P_HADTHREADS) {
767 PROC_LOCK(p1);
768 thread_single_end();
769 PROC_UNLOCK(p1);
770 }
771 tsleep(&forksleep, PUSER, "fork", hz / 2);
772 return (error);
773 }
774
775 /*
776 * Handle the return of a child process from fork1(). This function
777 * is called from the MD fork_trampoline() entry point.
778 */
779 void
780 fork_exit(callout, arg, frame)
781 void (*callout)(void *, struct trapframe *);
782 void *arg;
783 struct trapframe *frame;
784 {
785 struct proc *p;
786 struct thread *td;
787
788 /*
789 * Finish setting up thread glue so that it begins execution in a
790 * non-nested critical section with sched_lock held but not recursed.
791 */
792 td = curthread;
793 p = td->td_proc;
794 td->td_oncpu = PCPU_GET(cpuid);
795 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
796
797 sched_lock.mtx_lock = (uintptr_t)td;
798 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
799 CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
800 td, td->td_sched, p->p_pid, p->p_comm);
801
802 /*
803 * Processes normally resume in mi_switch() after being
804 * cpu_switch()'ed to, but when children start up they arrive here
805 * instead, so we must do much the same things as mi_switch() would.
806 */
807
808 if ((td = PCPU_GET(deadthread))) {
809 PCPU_SET(deadthread, NULL);
810 thread_stash(td);
811 }
812 td = curthread;
813 mtx_unlock_spin(&sched_lock);
814
815 /*
816 * cpu_set_fork_handler intercepts this function call to
817 * have this call a non-return function to stay in kernel mode.
818 * initproc has its own fork handler, but it does return.
819 */
820 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
821 callout(arg, frame);
822
823 /*
824 * Check if a kernel thread misbehaved and returned from its main
825 * function.
826 */
827 PROC_LOCK(p);
828 if (p->p_flag & P_KTHREAD) {
829 PROC_UNLOCK(p);
830 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
831 p->p_comm, p->p_pid);
832 kthread_exit(0);
833 }
834 PROC_UNLOCK(p);
835 mtx_assert(&Giant, MA_NOTOWNED);
836 }
837
838 /*
839 * Simplified back end of syscall(), used when returning from fork()
840 * directly into user mode. Giant is not held on entry, and must not
841 * be held on return. This function is passed in to fork_exit() as the
842 * first parameter and is called when returning to a new userland process.
843 */
844 void
845 fork_return(td, frame)
846 struct thread *td;
847 struct trapframe *frame;
848 {
849
850 userret(td, frame, 0);
851 #ifdef KTRACE
852 if (KTRPOINT(td, KTR_SYSRET))
853 ktrsysret(SYS_fork, 0, 0);
854 #endif
855 mtx_assert(&Giant, MA_NOTOWNED);
856 }
Cache object: 7ed1a5283fb2e20ca4bfa8d9c511a507
|