FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_ktrace.h"
41 #include "opt_mac.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/syscall.h>
59 #include <sys/vmmeter.h>
60 #include <sys/vnode.h>
61 #include <sys/acct.h>
62 #include <sys/mac.h>
63 #include <sys/ktr.h>
64 #include <sys/ktrace.h>
65 #include <sys/unistd.h>
66 #include <sys/sx.h>
67 #include <sys/signalvar.h>
68
69 #include <vm/vm.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_extern.h>
73 #include <vm/uma.h>
74
75 #include <machine/critical.h>
76
77 #ifndef _SYS_SYSPROTO_H_
78 struct fork_args {
79 int dummy;
80 };
81 #endif
82
83 static int forksleep; /* Place for fork1() to sleep on. */
84
85 /*
86 * MPSAFE
87 */
88 /* ARGSUSED */
89 int
90 fork(td, uap)
91 struct thread *td;
92 struct fork_args *uap;
93 {
94 int error;
95 struct proc *p2;
96
97 error = fork1(td, RFFDG | RFPROC, 0, &p2);
98 if (error == 0) {
99 td->td_retval[0] = p2->p_pid;
100 td->td_retval[1] = 0;
101 }
102 return (error);
103 }
104
105 /*
106 * MPSAFE
107 */
108 /* ARGSUSED */
109 int
110 vfork(td, uap)
111 struct thread *td;
112 struct vfork_args *uap;
113 {
114 int error;
115 struct proc *p2;
116
117 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
118 if (error == 0) {
119 td->td_retval[0] = p2->p_pid;
120 td->td_retval[1] = 0;
121 }
122 return (error);
123 }
124
125 /*
126 * MPSAFE
127 */
128 int
129 rfork(td, uap)
130 struct thread *td;
131 struct rfork_args *uap;
132 {
133 struct proc *p2;
134 int error;
135
136 /* Don't allow kernel-only flags. */
137 if ((uap->flags & RFKERNELONLY) != 0)
138 return (EINVAL);
139
140 error = fork1(td, uap->flags, 0, &p2);
141 if (error == 0) {
142 td->td_retval[0] = p2 ? p2->p_pid : 0;
143 td->td_retval[1] = 0;
144 }
145 return (error);
146 }
147
148 int nprocs = 1; /* process 0 */
149 int lastpid = 0;
150 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
151 "Last used PID");
152
153 /*
154 * Random component to lastpid generation. We mix in a random factor to make
155 * it a little harder to predict. We sanity check the modulus value to avoid
156 * doing it in critical paths. Don't let it be too small or we pointlessly
157 * waste randomness entropy, and don't let it be impossibly large. Using a
158 * modulus that is too big causes a LOT more process table scans and slows
159 * down fork processing as the pidchecked caching is defeated.
160 */
161 static int randompid = 0;
162
163 static int
164 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
165 {
166 int error, pid;
167
168 error = sysctl_wire_old_buffer(req, sizeof(int));
169 if (error != 0)
170 return(error);
171 sx_xlock(&allproc_lock);
172 pid = randompid;
173 error = sysctl_handle_int(oidp, &pid, 0, req);
174 if (error == 0 && req->newptr != NULL) {
175 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
176 pid = PID_MAX - 100;
177 else if (pid < 2) /* NOP */
178 pid = 0;
179 else if (pid < 100) /* Make it reasonable */
180 pid = 100;
181 randompid = pid;
182 }
183 sx_xunlock(&allproc_lock);
184 return (error);
185 }
186
187 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
188 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
189
190 int
191 fork1(td, flags, pages, procp)
192 struct thread *td;
193 int flags;
194 int pages;
195 struct proc **procp;
196 {
197 struct proc *p1, *p2, *pptr;
198 uid_t uid;
199 struct proc *newproc;
200 int ok, trypid;
201 static int curfail, pidchecked = 0;
202 static struct timeval lastfail;
203 struct filedesc *fd;
204 struct filedesc_to_leader *fdtol;
205 struct thread *td2;
206 struct ksegrp *kg2;
207 struct sigacts *newsigacts;
208 int error;
209
210 /* Can't copy and clear. */
211 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
212 return (EINVAL);
213
214 p1 = td->td_proc;
215
216 /*
217 * Here we don't create a new process, but we divorce
218 * certain parts of a process from itself.
219 */
220 if ((flags & RFPROC) == 0) {
221 vm_forkproc(td, NULL, NULL, flags);
222
223 /*
224 * Close all file descriptors.
225 */
226 if (flags & RFCFDG) {
227 struct filedesc *fdtmp;
228 fdtmp = fdinit(td->td_proc->p_fd);
229 fdfree(td);
230 p1->p_fd = fdtmp;
231 }
232
233 /*
234 * Unshare file descriptors (from parent).
235 */
236 if (flags & RFFDG)
237 fdunshare(p1, td);
238 *procp = NULL;
239 return (0);
240 }
241
242 /*
243 * Note 1:1 allows for forking with one thread coming out on the
244 * other side with the expectation that the process is about to
245 * exec.
246 */
247 if (p1->p_flag & P_HADTHREADS) {
248 /*
249 * Idle the other threads for a second.
250 * Since the user space is copied, it must remain stable.
251 * In addition, all threads (from the user perspective)
252 * need to either be suspended or in the kernel,
253 * where they will try restart in the parent and will
254 * be aborted in the child.
255 */
256 PROC_LOCK(p1);
257 if (thread_single(SINGLE_NO_EXIT)) {
258 /* Abort. Someone else is single threading before us. */
259 PROC_UNLOCK(p1);
260 return (ERESTART);
261 }
262 PROC_UNLOCK(p1);
263 /*
264 * All other activity in this process
265 * is now suspended at the user boundary,
266 * (or other safe places if we think of any).
267 */
268 }
269
270 /* Allocate new proc. */
271 newproc = uma_zalloc(proc_zone, M_WAITOK);
272 #ifdef MAC
273 mac_init_proc(newproc);
274 #endif
275 knlist_init(&newproc->p_klist, &newproc->p_mtx);
276
277 /* We have to lock the process tree while we look for a pid. */
278 sx_slock(&proctree_lock);
279
280 /*
281 * Although process entries are dynamically created, we still keep
282 * a global limit on the maximum number we will create. Don't allow
283 * a nonprivileged user to use the last ten processes; don't let root
284 * exceed the limit. The variable nprocs is the current number of
285 * processes, maxproc is the limit.
286 */
287 sx_xlock(&allproc_lock);
288 uid = td->td_ucred->cr_ruid;
289 if ((nprocs >= maxproc - 10 &&
290 suser_cred(td->td_ucred, SUSER_RUID) != 0) ||
291 nprocs >= maxproc) {
292 error = EAGAIN;
293 goto fail;
294 }
295
296 /*
297 * Increment the count of procs running with this uid. Don't allow
298 * a nonprivileged user to exceed their current limit.
299 */
300 PROC_LOCK(p1);
301 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
302 (uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0);
303 PROC_UNLOCK(p1);
304 if (!ok) {
305 error = EAGAIN;
306 goto fail;
307 }
308
309 /*
310 * Increment the nprocs resource before blocking can occur. There
311 * are hard-limits as to the number of processes that can run.
312 */
313 nprocs++;
314
315 /*
316 * Find an unused process ID. We remember a range of unused IDs
317 * ready to use (from lastpid+1 through pidchecked-1).
318 *
319 * If RFHIGHPID is set (used during system boot), do not allocate
320 * low-numbered pids.
321 */
322 trypid = lastpid + 1;
323 if (flags & RFHIGHPID) {
324 if (trypid < 10)
325 trypid = 10;
326 } else {
327 if (randompid)
328 trypid += arc4random() % randompid;
329 }
330 retry:
331 /*
332 * If the process ID prototype has wrapped around,
333 * restart somewhat above 0, as the low-numbered procs
334 * tend to include daemons that don't exit.
335 */
336 if (trypid >= PID_MAX) {
337 trypid = trypid % PID_MAX;
338 if (trypid < 100)
339 trypid += 100;
340 pidchecked = 0;
341 }
342 if (trypid >= pidchecked) {
343 int doingzomb = 0;
344
345 pidchecked = PID_MAX;
346 /*
347 * Scan the active and zombie procs to check whether this pid
348 * is in use. Remember the lowest pid that's greater
349 * than trypid, so we can avoid checking for a while.
350 */
351 p2 = LIST_FIRST(&allproc);
352 again:
353 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
354 PROC_LOCK(p2);
355 while (p2->p_pid == trypid ||
356 (p2->p_pgrp != NULL &&
357 (p2->p_pgrp->pg_id == trypid ||
358 (p2->p_session != NULL &&
359 p2->p_session->s_sid == trypid)))) {
360 trypid++;
361 if (trypid >= pidchecked) {
362 PROC_UNLOCK(p2);
363 goto retry;
364 }
365 }
366 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
367 pidchecked = p2->p_pid;
368 if (p2->p_pgrp != NULL) {
369 if (p2->p_pgrp->pg_id > trypid &&
370 pidchecked > p2->p_pgrp->pg_id)
371 pidchecked = p2->p_pgrp->pg_id;
372 if (p2->p_session != NULL &&
373 p2->p_session->s_sid > trypid &&
374 pidchecked > p2->p_session->s_sid)
375 pidchecked = p2->p_session->s_sid;
376 }
377 PROC_UNLOCK(p2);
378 }
379 if (!doingzomb) {
380 doingzomb = 1;
381 p2 = LIST_FIRST(&zombproc);
382 goto again;
383 }
384 }
385 sx_sunlock(&proctree_lock);
386
387 /*
388 * RFHIGHPID does not mess with the lastpid counter during boot.
389 */
390 if (flags & RFHIGHPID)
391 pidchecked = 0;
392 else
393 lastpid = trypid;
394
395 p2 = newproc;
396 p2->p_state = PRS_NEW; /* protect against others */
397 p2->p_pid = trypid;
398 LIST_INSERT_HEAD(&allproc, p2, p_list);
399 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
400 PROC_LOCK(p2);
401 PROC_LOCK(p1);
402 sx_xunlock(&allproc_lock);
403
404 bcopy(&p1->p_startcopy, &p2->p_startcopy,
405 __rangeof(struct proc, p_startcopy, p_endcopy));
406 PROC_UNLOCK(p1);
407
408 bzero(&p2->p_startzero,
409 __rangeof(struct proc, p_startzero, p_endzero));
410
411 p2->p_ucred = crhold(td->td_ucred);
412 PROC_UNLOCK(p2);
413
414 /*
415 * Malloc things while we don't hold any locks.
416 */
417 if (flags & RFSIGSHARE)
418 newsigacts = NULL;
419 else
420 newsigacts = sigacts_alloc();
421
422 /*
423 * Copy filedesc.
424 */
425 if (flags & RFCFDG) {
426 fd = fdinit(p1->p_fd);
427 fdtol = NULL;
428 } else if (flags & RFFDG) {
429 fd = fdcopy(p1->p_fd);
430 fdtol = NULL;
431 } else {
432 fd = fdshare(p1->p_fd);
433 if (p1->p_fdtol == NULL)
434 p1->p_fdtol =
435 filedesc_to_leader_alloc(NULL,
436 NULL,
437 p1->p_leader);
438 if ((flags & RFTHREAD) != 0) {
439 /*
440 * Shared file descriptor table and
441 * shared process leaders.
442 */
443 fdtol = p1->p_fdtol;
444 FILEDESC_LOCK_FAST(p1->p_fd);
445 fdtol->fdl_refcount++;
446 FILEDESC_UNLOCK_FAST(p1->p_fd);
447 } else {
448 /*
449 * Shared file descriptor table, and
450 * different process leaders
451 */
452 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
453 p1->p_fd,
454 p2);
455 }
456 }
457 /*
458 * Make a proc table entry for the new process.
459 * Start by zeroing the section of proc that is zero-initialized,
460 * then copy the section that is copied directly from the parent.
461 */
462 td2 = FIRST_THREAD_IN_PROC(p2);
463 kg2 = FIRST_KSEGRP_IN_PROC(p2);
464
465 /* Allocate and switch to an alternate kstack if specified. */
466 if (pages != 0)
467 vm_thread_new_altkstack(td2, pages);
468
469 PROC_LOCK(p2);
470 PROC_LOCK(p1);
471
472 bzero(&td2->td_startzero,
473 __rangeof(struct thread, td_startzero, td_endzero));
474 bzero(&kg2->kg_startzero,
475 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
476
477 bcopy(&td->td_startcopy, &td2->td_startcopy,
478 __rangeof(struct thread, td_startcopy, td_endcopy));
479 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
480 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
481
482 td2->td_sigstk = td->td_sigstk;
483 td2->td_sigmask = td->td_sigmask;
484
485 /*
486 * Duplicate sub-structures as needed.
487 * Increase reference counts on shared objects.
488 */
489 p2->p_flag = 0;
490 if (p1->p_flag & P_PROFIL)
491 startprofclock(p2);
492 mtx_lock_spin(&sched_lock);
493 p2->p_sflag = PS_INMEM;
494 /*
495 * Allow the scheduler to adjust the priority of the child and
496 * parent while we hold the sched_lock.
497 */
498 sched_fork(td, td2);
499
500 mtx_unlock_spin(&sched_lock);
501 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
502
503 pargs_hold(p2->p_args);
504
505 if (flags & RFSIGSHARE) {
506 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
507 } else {
508 sigacts_copy(newsigacts, p1->p_sigacts);
509 p2->p_sigacts = newsigacts;
510 }
511 if (flags & RFLINUXTHPN)
512 p2->p_sigparent = SIGUSR1;
513 else
514 p2->p_sigparent = SIGCHLD;
515
516 p2->p_textvp = p1->p_textvp;
517 p2->p_fd = fd;
518 p2->p_fdtol = fdtol;
519
520 /*
521 * p_limit is copy-on-write. Bump its refcount.
522 */
523 p2->p_limit = lim_hold(p1->p_limit);
524
525 pstats_fork(p1->p_stats, p2->p_stats);
526
527 PROC_UNLOCK(p1);
528 PROC_UNLOCK(p2);
529
530 /* Bump references to the text vnode (for procfs) */
531 if (p2->p_textvp)
532 vref(p2->p_textvp);
533
534 /*
535 * Set up linkage for kernel based threading.
536 */
537 if ((flags & RFTHREAD) != 0) {
538 mtx_lock(&ppeers_lock);
539 p2->p_peers = p1->p_peers;
540 p1->p_peers = p2;
541 p2->p_leader = p1->p_leader;
542 mtx_unlock(&ppeers_lock);
543 PROC_LOCK(p1->p_leader);
544 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
545 PROC_UNLOCK(p1->p_leader);
546 /*
547 * The task leader is exiting, so process p1 is
548 * going to be killed shortly. Since p1 obviously
549 * isn't dead yet, we know that the leader is either
550 * sending SIGKILL's to all the processes in this
551 * task or is sleeping waiting for all the peers to
552 * exit. We let p1 complete the fork, but we need
553 * to go ahead and kill the new process p2 since
554 * the task leader may not get a chance to send
555 * SIGKILL to it. We leave it on the list so that
556 * the task leader will wait for this new process
557 * to commit suicide.
558 */
559 PROC_LOCK(p2);
560 psignal(p2, SIGKILL);
561 PROC_UNLOCK(p2);
562 } else
563 PROC_UNLOCK(p1->p_leader);
564 } else {
565 p2->p_peers = NULL;
566 p2->p_leader = p2;
567 }
568
569 sx_xlock(&proctree_lock);
570 PGRP_LOCK(p1->p_pgrp);
571 PROC_LOCK(p2);
572 PROC_LOCK(p1);
573
574 /*
575 * Preserve some more flags in subprocess. P_PROFIL has already
576 * been preserved.
577 */
578 p2->p_flag |= p1->p_flag & P_SUGID;
579 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
580 SESS_LOCK(p1->p_session);
581 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
582 p2->p_flag |= P_CONTROLT;
583 SESS_UNLOCK(p1->p_session);
584 if (flags & RFPPWAIT)
585 p2->p_flag |= P_PPWAIT;
586
587 p2->p_pgrp = p1->p_pgrp;
588 LIST_INSERT_AFTER(p1, p2, p_pglist);
589 PGRP_UNLOCK(p1->p_pgrp);
590 LIST_INIT(&p2->p_children);
591
592 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
593
594 #ifdef KTRACE
595 /*
596 * Copy traceflag and tracefile if enabled.
597 */
598 mtx_lock(&ktrace_mtx);
599 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
600 if (p1->p_traceflag & KTRFAC_INHERIT) {
601 p2->p_traceflag = p1->p_traceflag;
602 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
603 VREF(p2->p_tracevp);
604 KASSERT(p1->p_tracecred != NULL,
605 ("ktrace vnode with no cred"));
606 p2->p_tracecred = crhold(p1->p_tracecred);
607 }
608 }
609 mtx_unlock(&ktrace_mtx);
610 #endif
611
612 /*
613 * If PF_FORK is set, the child process inherits the
614 * procfs ioctl flags from its parent.
615 */
616 if (p1->p_pfsflags & PF_FORK) {
617 p2->p_stops = p1->p_stops;
618 p2->p_pfsflags = p1->p_pfsflags;
619 }
620
621 /*
622 * This begins the section where we must prevent the parent
623 * from being swapped.
624 */
625 _PHOLD(p1);
626 PROC_UNLOCK(p1);
627
628 /*
629 * Attach the new process to its parent.
630 *
631 * If RFNOWAIT is set, the newly created process becomes a child
632 * of init. This effectively disassociates the child from the
633 * parent.
634 */
635 if (flags & RFNOWAIT)
636 pptr = initproc;
637 else
638 pptr = p1;
639 p2->p_pptr = pptr;
640 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
641 sx_xunlock(&proctree_lock);
642
643 /* Inform accounting that we have forked. */
644 p2->p_acflag = AFORK;
645 PROC_UNLOCK(p2);
646
647 /*
648 * Finish creating the child process. It will return via a different
649 * execution path later. (ie: directly into user mode)
650 */
651 vm_forkproc(td, p2, td2, flags);
652
653 if (flags == (RFFDG | RFPROC)) {
654 atomic_add_int(&cnt.v_forks, 1);
655 atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
656 p2->p_vmspace->vm_ssize);
657 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
658 atomic_add_int(&cnt.v_vforks, 1);
659 atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
660 p2->p_vmspace->vm_ssize);
661 } else if (p1 == &proc0) {
662 atomic_add_int(&cnt.v_kthreads, 1);
663 atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
664 p2->p_vmspace->vm_ssize);
665 } else {
666 atomic_add_int(&cnt.v_rforks, 1);
667 atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
668 p2->p_vmspace->vm_ssize);
669 }
670
671 /*
672 * Both processes are set up, now check if any loadable modules want
673 * to adjust anything.
674 * What if they have an error? XXX
675 */
676 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
677
678 /*
679 * Set the child start time and mark the process as being complete.
680 */
681 microuptime(&p2->p_stats->p_start);
682 mtx_lock_spin(&sched_lock);
683 p2->p_state = PRS_NORMAL;
684
685 /*
686 * If RFSTOPPED not requested, make child runnable and add to
687 * run queue.
688 */
689 if ((flags & RFSTOPPED) == 0) {
690 TD_SET_CAN_RUN(td2);
691 setrunqueue(td2, SRQ_BORING);
692 }
693 mtx_unlock_spin(&sched_lock);
694
695 /*
696 * Now can be swapped.
697 */
698 PROC_LOCK(p1);
699 _PRELE(p1);
700
701 /*
702 * Tell any interested parties about the new process.
703 */
704 KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
705
706 PROC_UNLOCK(p1);
707
708 /*
709 * Preserve synchronization semantics of vfork. If waiting for
710 * child to exec or exit, set P_PPWAIT on child, and sleep on our
711 * proc (in case of exit).
712 */
713 PROC_LOCK(p2);
714 while (p2->p_flag & P_PPWAIT)
715 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
716 PROC_UNLOCK(p2);
717
718 /*
719 * If other threads are waiting, let them continue now.
720 */
721 if (p1->p_flag & P_HADTHREADS) {
722 PROC_LOCK(p1);
723 thread_single_end();
724 PROC_UNLOCK(p1);
725 }
726
727 /*
728 * Return child proc pointer to parent.
729 */
730 *procp = p2;
731 return (0);
732 fail:
733 sx_sunlock(&proctree_lock);
734 if (ppsratecheck(&lastfail, &curfail, 1))
735 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
736 uid);
737 sx_xunlock(&allproc_lock);
738 #ifdef MAC
739 mac_destroy_proc(newproc);
740 #endif
741 uma_zfree(proc_zone, newproc);
742 if (p1->p_flag & P_HADTHREADS) {
743 PROC_LOCK(p1);
744 thread_single_end();
745 PROC_UNLOCK(p1);
746 }
747 tsleep(&forksleep, PUSER, "fork", hz / 2);
748 return (error);
749 }
750
751 /*
752 * Handle the return of a child process from fork1(). This function
753 * is called from the MD fork_trampoline() entry point.
754 */
755 void
756 fork_exit(callout, arg, frame)
757 void (*callout)(void *, struct trapframe *);
758 void *arg;
759 struct trapframe *frame;
760 {
761 struct proc *p;
762 struct thread *td;
763
764 /*
765 * Finish setting up thread glue so that it begins execution in a
766 * non-nested critical section with sched_lock held but not recursed.
767 */
768 td = curthread;
769 p = td->td_proc;
770 td->td_oncpu = PCPU_GET(cpuid);
771 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
772
773 sched_lock.mtx_lock = (uintptr_t)td;
774 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
775 cpu_critical_fork_exit();
776 CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
777 td, td->td_sched, p->p_pid, p->p_comm);
778
779 /*
780 * Processes normally resume in mi_switch() after being
781 * cpu_switch()'ed to, but when children start up they arrive here
782 * instead, so we must do much the same things as mi_switch() would.
783 */
784
785 if ((td = PCPU_GET(deadthread))) {
786 PCPU_SET(deadthread, NULL);
787 thread_stash(td);
788 }
789 td = curthread;
790 mtx_unlock_spin(&sched_lock);
791
792 /*
793 * cpu_set_fork_handler intercepts this function call to
794 * have this call a non-return function to stay in kernel mode.
795 * initproc has its own fork handler, but it does return.
796 */
797 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
798 callout(arg, frame);
799
800 /*
801 * Check if a kernel thread misbehaved and returned from its main
802 * function.
803 */
804 PROC_LOCK(p);
805 if (p->p_flag & P_KTHREAD) {
806 PROC_UNLOCK(p);
807 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
808 p->p_comm, p->p_pid);
809 kthread_exit(0);
810 }
811 PROC_UNLOCK(p);
812 mtx_assert(&Giant, MA_NOTOWNED);
813 }
814
815 /*
816 * Simplified back end of syscall(), used when returning from fork()
817 * directly into user mode. Giant is not held on entry, and must not
818 * be held on return. This function is passed in to fork_exit() as the
819 * first parameter and is called when returning to a new userland process.
820 */
821 void
822 fork_return(td, frame)
823 struct thread *td;
824 struct trapframe *frame;
825 {
826
827 userret(td, frame, 0);
828 #ifdef KTRACE
829 if (KTRPOINT(td, KTR_SYSRET))
830 ktrsysret(SYS_fork, 0, 0);
831 #endif
832 mtx_assert(&Giant, MA_NOTOWNED);
833 }
Cache object: aa0f7bdd4d53b309fa104383b55ecf45
|