FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/5.2/sys/kern/kern_fork.c 121688 2003-10-29 15:23:09Z bde $");
43
44 #include "opt_ktrace.h"
45 #include "opt_mac.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/sysproto.h>
50 #include <sys/eventhandler.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/kthread.h>
54 #include <sys/sysctl.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/proc.h>
59 #include <sys/pioctl.h>
60 #include <sys/resourcevar.h>
61 #include <sys/sched.h>
62 #include <sys/syscall.h>
63 #include <sys/vmmeter.h>
64 #include <sys/vnode.h>
65 #include <sys/acct.h>
66 #include <sys/mac.h>
67 #include <sys/ktr.h>
68 #include <sys/ktrace.h>
69 #include <sys/unistd.h>
70 #include <sys/jail.h>
71 #include <sys/sx.h>
72
73 #include <vm/vm.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_extern.h>
77 #include <vm/uma.h>
78
79 #include <sys/user.h>
80 #include <machine/critical.h>
81
82 #ifndef _SYS_SYSPROTO_H_
83 struct fork_args {
84 int dummy;
85 };
86 #endif
87
88 static int forksleep; /* Place for fork1() to sleep on. */
89
90 /*
91 * MPSAFE
92 */
93 /* ARGSUSED */
94 int
95 fork(td, uap)
96 struct thread *td;
97 struct fork_args *uap;
98 {
99 int error;
100 struct proc *p2;
101
102 error = fork1(td, RFFDG | RFPROC, 0, &p2);
103 if (error == 0) {
104 td->td_retval[0] = p2->p_pid;
105 td->td_retval[1] = 0;
106 }
107 return (error);
108 }
109
110 /*
111 * MPSAFE
112 */
113 /* ARGSUSED */
114 int
115 vfork(td, uap)
116 struct thread *td;
117 struct vfork_args *uap;
118 {
119 int error;
120 struct proc *p2;
121
122 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
123 if (error == 0) {
124 td->td_retval[0] = p2->p_pid;
125 td->td_retval[1] = 0;
126 }
127 return (error);
128 }
129
130 /*
131 * MPSAFE
132 */
133 int
134 rfork(td, uap)
135 struct thread *td;
136 struct rfork_args *uap;
137 {
138 int error;
139 struct proc *p2;
140
141 /* Don't allow kernel only flags. */
142 if ((uap->flags & RFKERNELONLY) != 0)
143 return (EINVAL);
144 error = fork1(td, uap->flags, 0, &p2);
145 if (error == 0) {
146 td->td_retval[0] = p2 ? p2->p_pid : 0;
147 td->td_retval[1] = 0;
148 }
149 return (error);
150 }
151
152 int nprocs = 1; /* process 0 */
153 int lastpid = 0;
154 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
155 "Last used PID");
156
157 /*
158 * Random component to lastpid generation. We mix in a random factor to make
159 * it a little harder to predict. We sanity check the modulus value to avoid
160 * doing it in critical paths. Don't let it be too small or we pointlessly
161 * waste randomness entropy, and don't let it be impossibly large. Using a
162 * modulus that is too big causes a LOT more process table scans and slows
163 * down fork processing as the pidchecked caching is defeated.
164 */
165 static int randompid = 0;
166
167 static int
168 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
169 {
170 int error, pid;
171
172 sysctl_wire_old_buffer(req, sizeof(int));
173 sx_xlock(&allproc_lock);
174 pid = randompid;
175 error = sysctl_handle_int(oidp, &pid, 0, req);
176 if (error == 0 && req->newptr != NULL) {
177 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
178 pid = PID_MAX - 100;
179 else if (pid < 2) /* NOP */
180 pid = 0;
181 else if (pid < 100) /* Make it reasonable */
182 pid = 100;
183 randompid = pid;
184 }
185 sx_xunlock(&allproc_lock);
186 return (error);
187 }
188
189 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
190 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
191
192 int
193 fork1(td, flags, pages, procp)
194 struct thread *td;
195 int flags;
196 int pages;
197 struct proc **procp;
198 {
199 struct proc *p1, *p2, *pptr;
200 uid_t uid;
201 struct proc *newproc;
202 int ok, trypid;
203 static int curfail, pidchecked = 0;
204 static struct timeval lastfail;
205 struct filedesc *fd;
206 struct filedesc_to_leader *fdtol;
207 struct thread *td2;
208 struct kse *ke2;
209 struct ksegrp *kg2;
210 struct sigacts *newsigacts;
211 int error;
212
213 /* Can't copy and clear. */
214 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
215 return (EINVAL);
216
217 p1 = td->td_proc;
218 mtx_lock(&Giant);
219
220 /*
221 * Here we don't create a new process, but we divorce
222 * certain parts of a process from itself.
223 */
224 if ((flags & RFPROC) == 0) {
225 vm_forkproc(td, NULL, NULL, flags);
226
227 /*
228 * Close all file descriptors.
229 */
230 if (flags & RFCFDG) {
231 struct filedesc *fdtmp;
232 fdtmp = fdinit(td->td_proc->p_fd);
233 fdfree(td);
234 p1->p_fd = fdtmp;
235 }
236
237 /*
238 * Unshare file descriptors (from parent.)
239 */
240 if (flags & RFFDG) {
241 FILEDESC_LOCK(p1->p_fd);
242 if (p1->p_fd->fd_refcnt > 1) {
243 struct filedesc *newfd;
244
245 newfd = fdcopy(td->td_proc->p_fd);
246 FILEDESC_UNLOCK(p1->p_fd);
247 fdfree(td);
248 p1->p_fd = newfd;
249 } else
250 FILEDESC_UNLOCK(p1->p_fd);
251 }
252 mtx_unlock(&Giant);
253 *procp = NULL;
254 return (0);
255 }
256
257 /*
258 * Note 1:1 allows for forking with one thread coming out on the
259 * other side with the expectation that the process is about to
260 * exec.
261 */
262 if (p1->p_flag & P_SA) {
263 /*
264 * Idle the other threads for a second.
265 * Since the user space is copied, it must remain stable.
266 * In addition, all threads (from the user perspective)
267 * need to either be suspended or in the kernel,
268 * where they will try restart in the parent and will
269 * be aborted in the child.
270 */
271 PROC_LOCK(p1);
272 if (thread_single(SINGLE_NO_EXIT)) {
273 /* Abort.. someone else is single threading before us */
274 PROC_UNLOCK(p1);
275 mtx_unlock(&Giant);
276 return (ERESTART);
277 }
278 PROC_UNLOCK(p1);
279 /*
280 * All other activity in this process
281 * is now suspended at the user boundary,
282 * (or other safe places if we think of any).
283 */
284 }
285
286 /* Allocate new proc. */
287 newproc = uma_zalloc(proc_zone, M_WAITOK);
288 #ifdef MAC
289 mac_init_proc(newproc);
290 #endif
291
292 /*
293 * Although process entries are dynamically created, we still keep
294 * a global limit on the maximum number we will create. Don't allow
295 * a nonprivileged user to use the last ten processes; don't let root
296 * exceed the limit. The variable nprocs is the current number of
297 * processes, maxproc is the limit.
298 */
299 sx_xlock(&allproc_lock);
300 uid = td->td_ucred->cr_ruid;
301 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
302 error = EAGAIN;
303 goto fail;
304 }
305
306 /*
307 * Increment the count of procs running with this uid. Don't allow
308 * a nonprivileged user to exceed their current limit.
309 */
310 PROC_LOCK(p1);
311 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
312 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
313 PROC_UNLOCK(p1);
314 if (!ok) {
315 error = EAGAIN;
316 goto fail;
317 }
318
319 /*
320 * Increment the nprocs resource before blocking can occur. There
321 * are hard-limits as to the number of processes that can run.
322 */
323 nprocs++;
324
325 /*
326 * Find an unused process ID. We remember a range of unused IDs
327 * ready to use (from lastpid+1 through pidchecked-1).
328 *
329 * If RFHIGHPID is set (used during system boot), do not allocate
330 * low-numbered pids.
331 */
332 trypid = lastpid + 1;
333 if (flags & RFHIGHPID) {
334 if (trypid < 10)
335 trypid = 10;
336 } else {
337 if (randompid)
338 trypid += arc4random() % randompid;
339 }
340 retry:
341 /*
342 * If the process ID prototype has wrapped around,
343 * restart somewhat above 0, as the low-numbered procs
344 * tend to include daemons that don't exit.
345 */
346 if (trypid >= PID_MAX) {
347 trypid = trypid % PID_MAX;
348 if (trypid < 100)
349 trypid += 100;
350 pidchecked = 0;
351 }
352 if (trypid >= pidchecked) {
353 int doingzomb = 0;
354
355 pidchecked = PID_MAX;
356 /*
357 * Scan the active and zombie procs to check whether this pid
358 * is in use. Remember the lowest pid that's greater
359 * than trypid, so we can avoid checking for a while.
360 */
361 p2 = LIST_FIRST(&allproc);
362 again:
363 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
364 PROC_LOCK(p2);
365 while (p2->p_pid == trypid ||
366 p2->p_pgrp->pg_id == trypid ||
367 p2->p_session->s_sid == trypid) {
368 trypid++;
369 if (trypid >= pidchecked) {
370 PROC_UNLOCK(p2);
371 goto retry;
372 }
373 }
374 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
375 pidchecked = p2->p_pid;
376 if (p2->p_pgrp->pg_id > trypid &&
377 pidchecked > p2->p_pgrp->pg_id)
378 pidchecked = p2->p_pgrp->pg_id;
379 if (p2->p_session->s_sid > trypid &&
380 pidchecked > p2->p_session->s_sid)
381 pidchecked = p2->p_session->s_sid;
382 PROC_UNLOCK(p2);
383 }
384 if (!doingzomb) {
385 doingzomb = 1;
386 p2 = LIST_FIRST(&zombproc);
387 goto again;
388 }
389 }
390
391 /*
392 * RFHIGHPID does not mess with the lastpid counter during boot.
393 */
394 if (flags & RFHIGHPID)
395 pidchecked = 0;
396 else
397 lastpid = trypid;
398
399 p2 = newproc;
400 p2->p_state = PRS_NEW; /* protect against others */
401 p2->p_pid = trypid;
402 LIST_INSERT_HEAD(&allproc, p2, p_list);
403 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
404 sx_xunlock(&allproc_lock);
405
406 /*
407 * Malloc things while we don't hold any locks.
408 */
409 if (flags & RFSIGSHARE)
410 newsigacts = NULL;
411 else
412 newsigacts = sigacts_alloc();
413
414 /*
415 * Copy filedesc.
416 */
417 if (flags & RFCFDG) {
418 fd = fdinit(td->td_proc->p_fd);
419 fdtol = NULL;
420 } else if (flags & RFFDG) {
421 FILEDESC_LOCK(p1->p_fd);
422 fd = fdcopy(td->td_proc->p_fd);
423 FILEDESC_UNLOCK(p1->p_fd);
424 fdtol = NULL;
425 } else {
426 fd = fdshare(p1->p_fd);
427 if (p1->p_fdtol == NULL)
428 p1->p_fdtol =
429 filedesc_to_leader_alloc(NULL,
430 NULL,
431 p1->p_leader);
432 if ((flags & RFTHREAD) != 0) {
433 /*
434 * Shared file descriptor table and
435 * shared process leaders.
436 */
437 fdtol = p1->p_fdtol;
438 FILEDESC_LOCK(p1->p_fd);
439 fdtol->fdl_refcount++;
440 FILEDESC_UNLOCK(p1->p_fd);
441 } else {
442 /*
443 * Shared file descriptor table, and
444 * different process leaders
445 */
446 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
447 p1->p_fd,
448 p2);
449 }
450 }
451 /*
452 * Make a proc table entry for the new process.
453 * Start by zeroing the section of proc that is zero-initialized,
454 * then copy the section that is copied directly from the parent.
455 */
456 td2 = FIRST_THREAD_IN_PROC(p2);
457 kg2 = FIRST_KSEGRP_IN_PROC(p2);
458 ke2 = FIRST_KSE_IN_KSEGRP(kg2);
459
460 /* Allocate and switch to an alternate kstack if specified */
461 if (pages != 0)
462 vm_thread_new_altkstack(td2, pages);
463
464 PROC_LOCK(p2);
465 PROC_LOCK(p1);
466
467 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
468
469 bzero(&p2->p_startzero,
470 (unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
471 bzero(&ke2->ke_startzero,
472 (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
473 bzero(&td2->td_startzero,
474 (unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
475 bzero(&kg2->kg_startzero,
476 (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero));
477
478 bcopy(&p1->p_startcopy, &p2->p_startcopy,
479 (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy));
480 bcopy(&td->td_startcopy, &td2->td_startcopy,
481 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
482 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
483 (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
484 #undef RANGEOF
485
486 /* Set up the thread as an active thread (as if runnable). */
487 ke2->ke_state = KES_THREAD;
488 ke2->ke_thread = td2;
489 td2->td_kse = ke2;
490
491 /*
492 * Duplicate sub-structures as needed.
493 * Increase reference counts on shared objects.
494 * The p_stats substruct is set in vm_forkproc.
495 */
496 p2->p_flag = 0;
497 if (p1->p_flag & P_PROFIL)
498 startprofclock(p2);
499 mtx_lock_spin(&sched_lock);
500 p2->p_sflag = PS_INMEM;
501 /*
502 * Allow the scheduler to adjust the priority of the child and
503 * parent while we hold the sched_lock.
504 */
505 sched_fork(p1, p2);
506
507 mtx_unlock_spin(&sched_lock);
508 p2->p_ucred = crhold(td->td_ucred);
509 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
510
511 pargs_hold(p2->p_args);
512
513 if (flags & RFSIGSHARE) {
514 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
515 } else {
516 sigacts_copy(newsigacts, p1->p_sigacts);
517 p2->p_sigacts = newsigacts;
518 }
519 if (flags & RFLINUXTHPN)
520 p2->p_sigparent = SIGUSR1;
521 else
522 p2->p_sigparent = SIGCHLD;
523
524 /* Bump references to the text vnode (for procfs) */
525 p2->p_textvp = p1->p_textvp;
526 if (p2->p_textvp)
527 VREF(p2->p_textvp);
528 p2->p_fd = fd;
529 p2->p_fdtol = fdtol;
530 PROC_UNLOCK(p1);
531 PROC_UNLOCK(p2);
532
533 /*
534 * p_limit is copy-on-write, bump refcnt,
535 */
536 p2->p_limit = p1->p_limit;
537 p2->p_limit->p_refcnt++;
538
539 /*
540 * Setup linkage for kernel based threading
541 */
542 if((flags & RFTHREAD) != 0) {
543 mtx_lock(&ppeers_lock);
544 p2->p_peers = p1->p_peers;
545 p1->p_peers = p2;
546 p2->p_leader = p1->p_leader;
547 mtx_unlock(&ppeers_lock);
548 PROC_LOCK(p1->p_leader);
549 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
550 PROC_UNLOCK(p1->p_leader);
551 /*
552 * The task leader is exiting, so process p1 is
553 * going to be killed shortly. Since p1 obviously
554 * isn't dead yet, we know that the leader is either
555 * sending SIGKILL's to all the processes in this
556 * task or is sleeping waiting for all the peers to
557 * exit. We let p1 complete the fork, but we need
558 * to go ahead and kill the new process p2 since
559 * the task leader may not get a chance to send
560 * SIGKILL to it. We leave it on the list so that
561 * the task leader will wait for this new process
562 * to commit suicide.
563 */
564 PROC_LOCK(p2);
565 psignal(p2, SIGKILL);
566 PROC_UNLOCK(p2);
567 } else
568 PROC_UNLOCK(p1->p_leader);
569 } else {
570 p2->p_peers = NULL;
571 p2->p_leader = p2;
572 }
573
574 sx_xlock(&proctree_lock);
575 PGRP_LOCK(p1->p_pgrp);
576 PROC_LOCK(p2);
577 PROC_LOCK(p1);
578
579 /*
580 * Preserve some more flags in subprocess. P_PROFIL has already
581 * been preserved.
582 */
583 p2->p_flag |= p1->p_flag & (P_ALTSTACK | P_SUGID);
584 SESS_LOCK(p1->p_session);
585 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
586 p2->p_flag |= P_CONTROLT;
587 SESS_UNLOCK(p1->p_session);
588 if (flags & RFPPWAIT)
589 p2->p_flag |= P_PPWAIT;
590
591 LIST_INSERT_AFTER(p1, p2, p_pglist);
592 PGRP_UNLOCK(p1->p_pgrp);
593 LIST_INIT(&p2->p_children);
594
595 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
596
597 #ifdef KTRACE
598 /*
599 * Copy traceflag and tracefile if enabled.
600 */
601 mtx_lock(&ktrace_mtx);
602 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
603 if (p1->p_traceflag & KTRFAC_INHERIT) {
604 p2->p_traceflag = p1->p_traceflag;
605 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
606 VREF(p2->p_tracevp);
607 KASSERT(p1->p_tracecred != NULL,
608 ("ktrace vnode with no cred"));
609 p2->p_tracecred = crhold(p1->p_tracecred);
610 }
611 }
612 mtx_unlock(&ktrace_mtx);
613 #endif
614
615 /*
616 * If PF_FORK is set, the child process inherits the
617 * procfs ioctl flags from its parent.
618 */
619 if (p1->p_pfsflags & PF_FORK) {
620 p2->p_stops = p1->p_stops;
621 p2->p_pfsflags = p1->p_pfsflags;
622 }
623
624 /*
625 * This begins the section where we must prevent the parent
626 * from being swapped.
627 */
628 _PHOLD(p1);
629 PROC_UNLOCK(p1);
630
631 /*
632 * Attach the new process to its parent.
633 *
634 * If RFNOWAIT is set, the newly created process becomes a child
635 * of init. This effectively disassociates the child from the
636 * parent.
637 */
638 if (flags & RFNOWAIT)
639 pptr = initproc;
640 else
641 pptr = p1;
642 p2->p_pptr = pptr;
643 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
644 sx_xunlock(&proctree_lock);
645
646 /* Inform accounting that we have forked. */
647 p2->p_acflag = AFORK;
648 PROC_UNLOCK(p2);
649
650 /*
651 * Finish creating the child process. It will return via a different
652 * execution path later. (ie: directly into user mode)
653 */
654 vm_forkproc(td, p2, td2, flags);
655
656 if (flags == (RFFDG | RFPROC)) {
657 cnt.v_forks++;
658 cnt.v_forkpages += p2->p_vmspace->vm_dsize +
659 p2->p_vmspace->vm_ssize;
660 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
661 cnt.v_vforks++;
662 cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
663 p2->p_vmspace->vm_ssize;
664 } else if (p1 == &proc0) {
665 cnt.v_kthreads++;
666 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
667 p2->p_vmspace->vm_ssize;
668 } else {
669 cnt.v_rforks++;
670 cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
671 p2->p_vmspace->vm_ssize;
672 }
673
674 /*
675 * Both processes are set up, now check if any loadable modules want
676 * to adjust anything.
677 * What if they have an error? XXX
678 */
679 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
680
681 /*
682 * If RFSTOPPED not requested, make child runnable and add to
683 * run queue.
684 */
685 microuptime(&p2->p_stats->p_start);
686 if ((flags & RFSTOPPED) == 0) {
687 mtx_lock_spin(&sched_lock);
688 p2->p_state = PRS_NORMAL;
689 TD_SET_CAN_RUN(td2);
690 setrunqueue(td2);
691 mtx_unlock_spin(&sched_lock);
692 }
693
694 /*
695 * Now can be swapped.
696 */
697 PROC_LOCK(p1);
698 _PRELE(p1);
699
700 /*
701 * Tell any interested parties about the new process.
702 */
703 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
704
705 PROC_UNLOCK(p1);
706
707 /*
708 * Preserve synchronization semantics of vfork. If waiting for
709 * child to exec or exit, set P_PPWAIT on child, and sleep on our
710 * proc (in case of exit).
711 */
712 PROC_LOCK(p2);
713 while (p2->p_flag & P_PPWAIT)
714 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
715 PROC_UNLOCK(p2);
716
717 /*
718 * If other threads are waiting, let them continue now
719 */
720 if (p1->p_flag & P_SA) {
721 PROC_LOCK(p1);
722 thread_single_end();
723 PROC_UNLOCK(p1);
724 }
725
726 /*
727 * Return child proc pointer to parent.
728 */
729 mtx_unlock(&Giant);
730 *procp = p2;
731 return (0);
732 fail:
733 if (ppsratecheck(&lastfail, &curfail, 1))
734 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
735 uid);
736 sx_xunlock(&allproc_lock);
737 uma_zfree(proc_zone, newproc);
738 if (p1->p_flag & P_SA) {
739 PROC_LOCK(p1);
740 thread_single_end();
741 PROC_UNLOCK(p1);
742 }
743 tsleep(&forksleep, PUSER, "fork", hz / 2);
744 mtx_unlock(&Giant);
745 return (error);
746 }
747
748 /*
749 * Handle the return of a child process from fork1(). This function
750 * is called from the MD fork_trampoline() entry point.
751 */
752 void
753 fork_exit(callout, arg, frame)
754 void (*callout)(void *, struct trapframe *);
755 void *arg;
756 struct trapframe *frame;
757 {
758 struct proc *p;
759 struct thread *td;
760
761 /*
762 * Processes normally resume in mi_switch() after being
763 * cpu_switch()'ed to, but when children start up they arrive here
764 * instead, so we must do much the same things as mi_switch() would.
765 */
766
767 if ((td = PCPU_GET(deadthread))) {
768 PCPU_SET(deadthread, NULL);
769 thread_stash(td);
770 }
771 td = curthread;
772 p = td->td_proc;
773 td->td_oncpu = PCPU_GET(cpuid);
774 p->p_state = PRS_NORMAL;
775
776 /*
777 * Finish setting up thread glue so that it begins execution in a
778 * non-nested critical section with sched_lock held but not recursed.
779 */
780 sched_lock.mtx_lock = (uintptr_t)td;
781 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
782 cpu_critical_fork_exit();
783 CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
784 p->p_comm);
785 mtx_unlock_spin(&sched_lock);
786
787 /*
788 * cpu_set_fork_handler intercepts this function call to
789 * have this call a non-return function to stay in kernel mode.
790 * initproc has its own fork handler, but it does return.
791 */
792 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
793 callout(arg, frame);
794
795 /*
796 * Check if a kernel thread misbehaved and returned from its main
797 * function.
798 */
799 PROC_LOCK(p);
800 if (p->p_flag & P_KTHREAD) {
801 PROC_UNLOCK(p);
802 mtx_lock(&Giant);
803 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
804 p->p_comm, p->p_pid);
805 kthread_exit(0);
806 }
807 PROC_UNLOCK(p);
808 #ifdef DIAGNOSTIC
809 cred_free_thread(td);
810 #endif
811 mtx_assert(&Giant, MA_NOTOWNED);
812 }
813
814 /*
815 * Simplified back end of syscall(), used when returning from fork()
816 * directly into user mode. Giant is not held on entry, and must not
817 * be held on return. This function is passed in to fork_exit() as the
818 * first parameter and is called when returning to a new userland process.
819 */
820 void
821 fork_return(td, frame)
822 struct thread *td;
823 struct trapframe *frame;
824 {
825
826 userret(td, frame, 0);
827 #ifdef KTRACE
828 if (KTRPOINT(td, KTR_SYSRET))
829 ktrsysret(SYS_fork, 0, 0);
830 #endif
831 mtx_assert(&Giant, MA_NOTOWNED);
832 }
Cache object: db24eb8f88225befc26066ea269addc6
|