FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_ktrace.h"
41 #include "opt_kstack_pages.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/fcntl.h>
48 #include <sys/filedesc.h>
49 #include <sys/jail.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/sysctl.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/priv.h>
57 #include <sys/proc.h>
58 #include <sys/procdesc.h>
59 #include <sys/pioctl.h>
60 #include <sys/ptrace.h>
61 #include <sys/racct.h>
62 #include <sys/resourcevar.h>
63 #include <sys/sched.h>
64 #include <sys/syscall.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67 #include <sys/acct.h>
68 #include <sys/ktr.h>
69 #include <sys/ktrace.h>
70 #include <sys/unistd.h>
71 #include <sys/sdt.h>
72 #include <sys/sx.h>
73 #include <sys/sysent.h>
74 #include <sys/signalvar.h>
75
76 #include <security/audit/audit.h>
77 #include <security/mac/mac_framework.h>
78
79 #include <vm/vm.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_extern.h>
83 #include <vm/uma.h>
84 #include <vm/vm_domain.h>
85
86 #ifdef KDTRACE_HOOKS
87 #include <sys/dtrace_bsd.h>
88 dtrace_fork_func_t dtrace_fasttrap_fork;
89 #endif
90
91 SDT_PROVIDER_DECLARE(proc);
92 SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int");
93
94 #ifndef _SYS_SYSPROTO_H_
95 struct fork_args {
96 int dummy;
97 };
98 #endif
99
100 EVENTHANDLER_LIST_DECLARE(process_fork);
101
102 /* ARGSUSED */
103 int
104 sys_fork(struct thread *td, struct fork_args *uap)
105 {
106 struct fork_req fr;
107 int error, pid;
108
109 bzero(&fr, sizeof(fr));
110 fr.fr_flags = RFFDG | RFPROC;
111 fr.fr_pidp = &pid;
112 error = fork1(td, &fr);
113 if (error == 0) {
114 td->td_retval[0] = pid;
115 td->td_retval[1] = 0;
116 }
117 return (error);
118 }
119
120 /* ARGUSED */
121 int
122 sys_pdfork(struct thread *td, struct pdfork_args *uap)
123 {
124 struct fork_req fr;
125 int error, fd, pid;
126
127 bzero(&fr, sizeof(fr));
128 fr.fr_flags = RFFDG | RFPROC | RFPROCDESC;
129 fr.fr_pidp = &pid;
130 fr.fr_pd_fd = &fd;
131 fr.fr_pd_flags = uap->flags;
132 /*
133 * It is necessary to return fd by reference because 0 is a valid file
134 * descriptor number, and the child needs to be able to distinguish
135 * itself from the parent using the return value.
136 */
137 error = fork1(td, &fr);
138 if (error == 0) {
139 td->td_retval[0] = pid;
140 td->td_retval[1] = 0;
141 error = copyout(&fd, uap->fdp, sizeof(fd));
142 }
143 return (error);
144 }
145
146 /* ARGSUSED */
147 int
148 sys_vfork(struct thread *td, struct vfork_args *uap)
149 {
150 struct fork_req fr;
151 int error, pid;
152
153 bzero(&fr, sizeof(fr));
154 fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
155 fr.fr_pidp = &pid;
156 error = fork1(td, &fr);
157 if (error == 0) {
158 td->td_retval[0] = pid;
159 td->td_retval[1] = 0;
160 }
161 return (error);
162 }
163
164 int
165 sys_rfork(struct thread *td, struct rfork_args *uap)
166 {
167 struct fork_req fr;
168 int error, pid;
169
170 /* Don't allow kernel-only flags. */
171 if ((uap->flags & RFKERNELONLY) != 0)
172 return (EINVAL);
173 /* RFSPAWN must not appear with others */
174 if ((uap->flags & RFSPAWN) != 0 && uap->flags != RFSPAWN)
175 return (EINVAL);
176
177 AUDIT_ARG_FFLAGS(uap->flags);
178 bzero(&fr, sizeof(fr));
179 if ((uap->flags & RFSPAWN) != 0) {
180 fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
181 fr.fr_flags2 = FR2_DROPSIG_CAUGHT;
182 } else {
183 fr.fr_flags = uap->flags;
184 }
185 fr.fr_pidp = &pid;
186 error = fork1(td, &fr);
187 if (error == 0) {
188 td->td_retval[0] = pid;
189 td->td_retval[1] = 0;
190 }
191 return (error);
192 }
193
194 int nprocs = 1; /* process 0 */
195 int lastpid = 0;
196 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
197 "Last used PID");
198
199 /*
200 * Random component to lastpid generation. We mix in a random factor to make
201 * it a little harder to predict. We sanity check the modulus value to avoid
202 * doing it in critical paths. Don't let it be too small or we pointlessly
203 * waste randomness entropy, and don't let it be impossibly large. Using a
204 * modulus that is too big causes a LOT more process table scans and slows
205 * down fork processing as the pidchecked caching is defeated.
206 */
207 static int randompid = 0;
208
209 static int
210 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
211 {
212 int error, pid;
213
214 error = sysctl_wire_old_buffer(req, sizeof(int));
215 if (error != 0)
216 return(error);
217 sx_xlock(&allproc_lock);
218 pid = randompid;
219 error = sysctl_handle_int(oidp, &pid, 0, req);
220 if (error == 0 && req->newptr != NULL) {
221 if (pid == 0)
222 randompid = 0;
223 else if (pid == 1)
224 /* generate a random PID modulus between 100 and 1123 */
225 randompid = 100 + arc4random() % 1024;
226 else if (pid < 0 || pid > pid_max - 100)
227 /* out of range */
228 randompid = pid_max - 100;
229 else if (pid < 100)
230 /* Make it reasonable */
231 randompid = 100;
232 else
233 randompid = pid;
234 }
235 sx_xunlock(&allproc_lock);
236 return (error);
237 }
238
239 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
240 0, 0, sysctl_kern_randompid, "I", "Random PID modulus. Special values: 0: disable, 1: choose random value");
241
242 static int
243 fork_findpid(int flags)
244 {
245 struct proc *p;
246 int trypid;
247 static int pidchecked = 0;
248
249 /*
250 * Requires allproc_lock in order to iterate over the list
251 * of processes, and proctree_lock to access p_pgrp.
252 */
253 sx_assert(&allproc_lock, SX_LOCKED);
254 sx_assert(&proctree_lock, SX_LOCKED);
255
256 /*
257 * Find an unused process ID. We remember a range of unused IDs
258 * ready to use (from lastpid+1 through pidchecked-1).
259 *
260 * If RFHIGHPID is set (used during system boot), do not allocate
261 * low-numbered pids.
262 */
263 trypid = lastpid + 1;
264 if (flags & RFHIGHPID) {
265 if (trypid < 10)
266 trypid = 10;
267 } else {
268 if (randompid)
269 trypid += arc4random() % randompid;
270 }
271 retry:
272 /*
273 * If the process ID prototype has wrapped around,
274 * restart somewhat above 0, as the low-numbered procs
275 * tend to include daemons that don't exit.
276 */
277 if (trypid >= pid_max) {
278 trypid = trypid % pid_max;
279 if (trypid < 100)
280 trypid += 100;
281 pidchecked = 0;
282 }
283 if (trypid >= pidchecked) {
284 int doingzomb = 0;
285
286 pidchecked = PID_MAX;
287 /*
288 * Scan the active and zombie procs to check whether this pid
289 * is in use. Remember the lowest pid that's greater
290 * than trypid, so we can avoid checking for a while.
291 *
292 * Avoid reuse of the process group id, session id or
293 * the reaper subtree id. Note that for process group
294 * and sessions, the amount of reserved pids is
295 * limited by process limit. For the subtree ids, the
296 * id is kept reserved only while there is a
297 * non-reaped process in the subtree, so amount of
298 * reserved pids is limited by process limit times
299 * two.
300 */
301 p = LIST_FIRST(&allproc);
302 again:
303 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
304 while (p->p_pid == trypid ||
305 p->p_reapsubtree == trypid ||
306 (p->p_pgrp != NULL &&
307 (p->p_pgrp->pg_id == trypid ||
308 (p->p_session != NULL &&
309 p->p_session->s_sid == trypid)))) {
310 trypid++;
311 if (trypid >= pidchecked)
312 goto retry;
313 }
314 if (p->p_pid > trypid && pidchecked > p->p_pid)
315 pidchecked = p->p_pid;
316 if (p->p_pgrp != NULL) {
317 if (p->p_pgrp->pg_id > trypid &&
318 pidchecked > p->p_pgrp->pg_id)
319 pidchecked = p->p_pgrp->pg_id;
320 if (p->p_session != NULL &&
321 p->p_session->s_sid > trypid &&
322 pidchecked > p->p_session->s_sid)
323 pidchecked = p->p_session->s_sid;
324 }
325 }
326 if (!doingzomb) {
327 doingzomb = 1;
328 p = LIST_FIRST(&zombproc);
329 goto again;
330 }
331 }
332
333 /*
334 * RFHIGHPID does not mess with the lastpid counter during boot.
335 */
336 if (flags & RFHIGHPID)
337 pidchecked = 0;
338 else
339 lastpid = trypid;
340
341 return (trypid);
342 }
343
344 static int
345 fork_norfproc(struct thread *td, int flags)
346 {
347 int error;
348 struct proc *p1;
349
350 KASSERT((flags & RFPROC) == 0,
351 ("fork_norfproc called with RFPROC set"));
352 p1 = td->td_proc;
353
354 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
355 (flags & (RFCFDG | RFFDG))) {
356 PROC_LOCK(p1);
357 if (thread_single(p1, SINGLE_BOUNDARY)) {
358 PROC_UNLOCK(p1);
359 return (ERESTART);
360 }
361 PROC_UNLOCK(p1);
362 }
363
364 error = vm_forkproc(td, NULL, NULL, NULL, flags);
365 if (error)
366 goto fail;
367
368 /*
369 * Close all file descriptors.
370 */
371 if (flags & RFCFDG) {
372 struct filedesc *fdtmp;
373 fdtmp = fdinit(td->td_proc->p_fd, false);
374 fdescfree(td);
375 p1->p_fd = fdtmp;
376 }
377
378 /*
379 * Unshare file descriptors (from parent).
380 */
381 if (flags & RFFDG)
382 fdunshare(td);
383
384 fail:
385 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
386 (flags & (RFCFDG | RFFDG))) {
387 PROC_LOCK(p1);
388 thread_single_end(p1, SINGLE_BOUNDARY);
389 PROC_UNLOCK(p1);
390 }
391 return (error);
392 }
393
394 static void
395 do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
396 struct vmspace *vm2, struct file *fp_procdesc)
397 {
398 struct proc *p1, *pptr;
399 int trypid;
400 struct filedesc *fd;
401 struct filedesc_to_leader *fdtol;
402 struct sigacts *newsigacts;
403
404 sx_assert(&proctree_lock, SX_SLOCKED);
405 sx_assert(&allproc_lock, SX_XLOCKED);
406
407 p1 = td->td_proc;
408
409 trypid = fork_findpid(fr->fr_flags);
410
411 sx_sunlock(&proctree_lock);
412
413 p2->p_state = PRS_NEW; /* protect against others */
414 p2->p_pid = trypid;
415 AUDIT_ARG_PID(p2->p_pid);
416 LIST_INSERT_HEAD(&allproc, p2, p_list);
417 allproc_gen++;
418 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
419 tidhash_add(td2);
420 PROC_LOCK(p2);
421 PROC_LOCK(p1);
422
423 sx_xunlock(&allproc_lock);
424
425 bcopy(&p1->p_startcopy, &p2->p_startcopy,
426 __rangeof(struct proc, p_startcopy, p_endcopy));
427 p2->p_elf_machine = p1->p_elf_machine;
428 p2->p_elf_flags = p1->p_elf_flags;
429 pargs_hold(p2->p_args);
430
431 PROC_UNLOCK(p1);
432
433 bzero(&p2->p_startzero,
434 __rangeof(struct proc, p_startzero, p_endzero));
435 p2->p_ptevents = 0;
436 p2->p_pdeathsig = 0;
437
438 /* Tell the prison that we exist. */
439 prison_proc_hold(p2->p_ucred->cr_prison);
440
441 PROC_UNLOCK(p2);
442
443 /*
444 * Malloc things while we don't hold any locks.
445 */
446 if (fr->fr_flags & RFSIGSHARE)
447 newsigacts = NULL;
448 else
449 newsigacts = sigacts_alloc();
450
451 /*
452 * Copy filedesc.
453 */
454 if (fr->fr_flags & RFCFDG) {
455 fd = fdinit(p1->p_fd, false);
456 fdtol = NULL;
457 } else if (fr->fr_flags & RFFDG) {
458 fd = fdcopy(p1->p_fd);
459 fdtol = NULL;
460 } else {
461 fd = fdshare(p1->p_fd);
462 if (p1->p_fdtol == NULL)
463 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
464 p1->p_leader);
465 if ((fr->fr_flags & RFTHREAD) != 0) {
466 /*
467 * Shared file descriptor table, and shared
468 * process leaders.
469 */
470 fdtol = p1->p_fdtol;
471 FILEDESC_XLOCK(p1->p_fd);
472 fdtol->fdl_refcount++;
473 FILEDESC_XUNLOCK(p1->p_fd);
474 } else {
475 /*
476 * Shared file descriptor table, and different
477 * process leaders.
478 */
479 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
480 p1->p_fd, p2);
481 }
482 }
483 /*
484 * Make a proc table entry for the new process.
485 * Start by zeroing the section of proc that is zero-initialized,
486 * then copy the section that is copied directly from the parent.
487 */
488
489 PROC_LOCK(p2);
490 PROC_LOCK(p1);
491
492 bzero(&td2->td_startzero,
493 __rangeof(struct thread, td_startzero, td_endzero));
494 td2->td_sleeptimo = 0;
495 td2->td_vslock_sz = 0;
496 bzero(&td2->td_si, sizeof(td2->td_si));
497
498 bcopy(&td->td_startcopy, &td2->td_startcopy,
499 __rangeof(struct thread, td_startcopy, td_endcopy));
500 td2->td_sa = td->td_sa;
501
502 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
503 td2->td_sigstk = td->td_sigstk;
504 td2->td_flags = TDF_INMEM;
505 td2->td_lend_user_pri = PRI_MAX;
506
507 #ifdef VIMAGE
508 td2->td_vnet = NULL;
509 td2->td_vnet_lpush = NULL;
510 #endif
511
512 /*
513 * Allow the scheduler to initialize the child.
514 */
515 thread_lock(td);
516 sched_fork(td, td2);
517 thread_unlock(td);
518
519 /*
520 * Duplicate sub-structures as needed.
521 * Increase reference counts on shared objects.
522 */
523 p2->p_flag = P_INMEM;
524 p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP |
525 P2_STKGAP_DISABLE | P2_STKGAP_DISABLE_EXEC);
526 p2->p_swtick = ticks;
527 if (p1->p_flag & P_PROFIL)
528 startprofclock(p2);
529
530 /*
531 * Whilst the proc lock is held, copy the VM domain data out
532 * using the VM domain method.
533 */
534 vm_domain_policy_init(&p2->p_vm_dom_policy);
535 vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
536 &p1->p_vm_dom_policy);
537
538 if (fr->fr_flags & RFSIGSHARE) {
539 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
540 } else {
541 sigacts_copy(newsigacts, p1->p_sigacts);
542 p2->p_sigacts = newsigacts;
543 if ((fr->fr_flags2 & FR2_DROPSIG_CAUGHT) != 0) {
544 mtx_lock(&p2->p_sigacts->ps_mtx);
545 sig_drop_caught(p2);
546 mtx_unlock(&p2->p_sigacts->ps_mtx);
547 }
548 }
549
550 if (fr->fr_flags & RFTSIGZMB)
551 p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
552 else if (fr->fr_flags & RFLINUXTHPN)
553 p2->p_sigparent = SIGUSR1;
554 else
555 p2->p_sigparent = SIGCHLD;
556
557 p2->p_textvp = p1->p_textvp;
558 p2->p_fd = fd;
559 p2->p_fdtol = fdtol;
560
561 if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
562 p2->p_flag |= P_PROTECTED;
563 p2->p_flag2 |= P2_INHERIT_PROTECTED;
564 }
565
566 /*
567 * p_limit is copy-on-write. Bump its refcount.
568 */
569 lim_fork(p1, p2);
570
571 thread_cow_get_proc(td2, p2);
572
573 pstats_fork(p1->p_stats, p2->p_stats);
574
575 PROC_UNLOCK(p1);
576 PROC_UNLOCK(p2);
577
578 /* Bump references to the text vnode (for procfs). */
579 if (p2->p_textvp)
580 vrefact(p2->p_textvp);
581
582 /*
583 * Set up linkage for kernel based threading.
584 */
585 if ((fr->fr_flags & RFTHREAD) != 0) {
586 mtx_lock(&ppeers_lock);
587 p2->p_peers = p1->p_peers;
588 p1->p_peers = p2;
589 p2->p_leader = p1->p_leader;
590 mtx_unlock(&ppeers_lock);
591 PROC_LOCK(p1->p_leader);
592 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
593 PROC_UNLOCK(p1->p_leader);
594 /*
595 * The task leader is exiting, so process p1 is
596 * going to be killed shortly. Since p1 obviously
597 * isn't dead yet, we know that the leader is either
598 * sending SIGKILL's to all the processes in this
599 * task or is sleeping waiting for all the peers to
600 * exit. We let p1 complete the fork, but we need
601 * to go ahead and kill the new process p2 since
602 * the task leader may not get a chance to send
603 * SIGKILL to it. We leave it on the list so that
604 * the task leader will wait for this new process
605 * to commit suicide.
606 */
607 PROC_LOCK(p2);
608 kern_psignal(p2, SIGKILL);
609 PROC_UNLOCK(p2);
610 } else
611 PROC_UNLOCK(p1->p_leader);
612 } else {
613 p2->p_peers = NULL;
614 p2->p_leader = p2;
615 }
616
617 sx_xlock(&proctree_lock);
618 PGRP_LOCK(p1->p_pgrp);
619 PROC_LOCK(p2);
620 PROC_LOCK(p1);
621
622 /*
623 * Preserve some more flags in subprocess. P_PROFIL has already
624 * been preserved.
625 */
626 p2->p_flag |= p1->p_flag & P_SUGID;
627 td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
628 SESS_LOCK(p1->p_session);
629 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
630 p2->p_flag |= P_CONTROLT;
631 SESS_UNLOCK(p1->p_session);
632 if (fr->fr_flags & RFPPWAIT)
633 p2->p_flag |= P_PPWAIT;
634
635 p2->p_pgrp = p1->p_pgrp;
636 LIST_INSERT_AFTER(p1, p2, p_pglist);
637 PGRP_UNLOCK(p1->p_pgrp);
638 LIST_INIT(&p2->p_children);
639 LIST_INIT(&p2->p_orphans);
640
641 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);
642
643 /*
644 * If PF_FORK is set, the child process inherits the
645 * procfs ioctl flags from its parent.
646 */
647 if (p1->p_pfsflags & PF_FORK) {
648 p2->p_stops = p1->p_stops;
649 p2->p_pfsflags = p1->p_pfsflags;
650 }
651
652 /*
653 * This begins the section where we must prevent the parent
654 * from being swapped.
655 */
656 _PHOLD(p1);
657 PROC_UNLOCK(p1);
658
659 /*
660 * Attach the new process to its parent.
661 *
662 * If RFNOWAIT is set, the newly created process becomes a child
663 * of init. This effectively disassociates the child from the
664 * parent.
665 */
666 if ((fr->fr_flags & RFNOWAIT) != 0) {
667 pptr = p1->p_reaper;
668 p2->p_reaper = pptr;
669 } else {
670 p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
671 p1 : p1->p_reaper;
672 pptr = p1;
673 }
674 p2->p_pptr = pptr;
675 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
676 LIST_INIT(&p2->p_reaplist);
677 LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
678 if (p2->p_reaper == p1)
679 p2->p_reapsubtree = p2->p_pid;
680 sx_xunlock(&proctree_lock);
681
682 /* Inform accounting that we have forked. */
683 p2->p_acflag = AFORK;
684 PROC_UNLOCK(p2);
685
686 #ifdef KTRACE
687 ktrprocfork(p1, p2);
688 #endif
689
690 /*
691 * Finish creating the child process. It will return via a different
692 * execution path later. (ie: directly into user mode)
693 */
694 vm_forkproc(td, p2, td2, vm2, fr->fr_flags);
695
696 if (fr->fr_flags == (RFFDG | RFPROC)) {
697 PCPU_INC(cnt.v_forks);
698 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
699 p2->p_vmspace->vm_ssize);
700 } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
701 PCPU_INC(cnt.v_vforks);
702 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
703 p2->p_vmspace->vm_ssize);
704 } else if (p1 == &proc0) {
705 PCPU_INC(cnt.v_kthreads);
706 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
707 p2->p_vmspace->vm_ssize);
708 } else {
709 PCPU_INC(cnt.v_rforks);
710 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
711 p2->p_vmspace->vm_ssize);
712 }
713
714 /*
715 * Associate the process descriptor with the process before anything
716 * can happen that might cause that process to need the descriptor.
717 * However, don't do this until after fork(2) can no longer fail.
718 */
719 if (fr->fr_flags & RFPROCDESC)
720 procdesc_new(p2, fr->fr_pd_flags);
721
722 /*
723 * Both processes are set up, now check if any loadable modules want
724 * to adjust anything.
725 */
726 EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags);
727
728 /*
729 * Set the child start time and mark the process as being complete.
730 */
731 PROC_LOCK(p2);
732 PROC_LOCK(p1);
733 microuptime(&p2->p_stats->p_start);
734 PROC_SLOCK(p2);
735 p2->p_state = PRS_NORMAL;
736 PROC_SUNLOCK(p2);
737
738 #ifdef KDTRACE_HOOKS
739 /*
740 * Tell the DTrace fasttrap provider about the new process so that any
741 * tracepoints inherited from the parent can be removed. We have to do
742 * this only after p_state is PRS_NORMAL since the fasttrap module will
743 * use pfind() later on.
744 */
745 if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
746 dtrace_fasttrap_fork(p1, p2);
747 #endif
748 /*
749 * Hold the process so that it cannot exit after we make it runnable,
750 * but before we wait for the debugger.
751 */
752 _PHOLD(p2);
753 if (fr->fr_flags & RFPPWAIT) {
754 td->td_pflags |= TDP_RFPPWAIT;
755 td->td_rfppwait_p = p2;
756 td->td_dbgflags |= TDB_VFORK;
757 }
758 PROC_UNLOCK(p2);
759
760 /*
761 * Now can be swapped.
762 */
763 _PRELE(p1);
764 PROC_UNLOCK(p1);
765
766 /*
767 * Tell any interested parties about the new process.
768 */
769 knote_fork(p1->p_klist, p2->p_pid);
770 SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);
771
772 if (fr->fr_flags & RFPROCDESC) {
773 procdesc_finit(p2->p_procdesc, fp_procdesc);
774 fdrop(fp_procdesc, td);
775 }
776
777 /*
778 * Speculative check for PTRACE_FORK. PTRACE_FORK is not
779 * synced with forks in progress so it is OK if we miss it
780 * if being set atm.
781 */
782 if ((p1->p_ptevents & PTRACE_FORK) != 0) {
783 sx_xlock(&proctree_lock);
784 PROC_LOCK(p2);
785
786 /*
787 * p1->p_ptevents & p1->p_pptr are protected by both
788 * process and proctree locks for modifications,
789 * so owning proctree_lock allows the race-free read.
790 */
791 if ((p1->p_ptevents & PTRACE_FORK) != 0) {
792 /*
793 * Arrange for debugger to receive the fork event.
794 *
795 * We can report PL_FLAG_FORKED regardless of
796 * P_FOLLOWFORK settings, but it does not make a sense
797 * for runaway child.
798 */
799 td->td_dbgflags |= TDB_FORK;
800 td->td_dbg_forked = p2->p_pid;
801 td2->td_dbgflags |= TDB_STOPATFORK;
802 proc_set_traced(p2, true);
803 CTR2(KTR_PTRACE,
804 "do_fork: attaching to new child pid %d: oppid %d",
805 p2->p_pid, p2->p_oppid);
806 proc_reparent(p2, p1->p_pptr);
807 }
808 PROC_UNLOCK(p2);
809 sx_xunlock(&proctree_lock);
810 }
811
812 if ((fr->fr_flags & RFSTOPPED) == 0) {
813 /*
814 * If RFSTOPPED not requested, make child runnable and
815 * add to run queue.
816 */
817 thread_lock(td2);
818 TD_SET_CAN_RUN(td2);
819 sched_add(td2, SRQ_BORING);
820 thread_unlock(td2);
821 if (fr->fr_pidp != NULL)
822 *fr->fr_pidp = p2->p_pid;
823 } else {
824 *fr->fr_procp = p2;
825 }
826
827 PROC_LOCK(p2);
828 _PRELE(p2);
829 racct_proc_fork_done(p2);
830 PROC_UNLOCK(p2);
831 }
832
833 int
834 fork1(struct thread *td, struct fork_req *fr)
835 {
836 struct proc *p1, *newproc;
837 struct thread *td2;
838 struct vmspace *vm2;
839 struct file *fp_procdesc;
840 vm_ooffset_t mem_charged;
841 int error, nprocs_new, ok;
842 static int curfail;
843 static struct timeval lastfail;
844 int flags, pages;
845
846 flags = fr->fr_flags;
847 pages = fr->fr_pages;
848
849 if ((flags & RFSTOPPED) != 0)
850 MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
851 else
852 MPASS(fr->fr_procp == NULL);
853
854 /* Check for the undefined or unimplemented flags. */
855 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
856 return (EINVAL);
857
858 /* Signal value requires RFTSIGZMB. */
859 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
860 return (EINVAL);
861
862 /* Can't copy and clear. */
863 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
864 return (EINVAL);
865
866 /* Check the validity of the signal number. */
867 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
868 return (EINVAL);
869
870 if ((flags & RFPROCDESC) != 0) {
871 /* Can't not create a process yet get a process descriptor. */
872 if ((flags & RFPROC) == 0)
873 return (EINVAL);
874
875 /* Must provide a place to put a procdesc if creating one. */
876 if (fr->fr_pd_fd == NULL)
877 return (EINVAL);
878
879 /* Check if we are using supported flags. */
880 if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
881 return (EINVAL);
882 }
883
884 p1 = td->td_proc;
885
886 /*
887 * Here we don't create a new process, but we divorce
888 * certain parts of a process from itself.
889 */
890 if ((flags & RFPROC) == 0) {
891 if (fr->fr_procp != NULL)
892 *fr->fr_procp = NULL;
893 else if (fr->fr_pidp != NULL)
894 *fr->fr_pidp = 0;
895 return (fork_norfproc(td, flags));
896 }
897
898 fp_procdesc = NULL;
899 newproc = NULL;
900 vm2 = NULL;
901
902 /*
903 * Increment the nprocs resource before allocations occur.
904 * Although process entries are dynamically created, we still
905 * keep a global limit on the maximum number we will
906 * create. There are hard-limits as to the number of processes
907 * that can run, established by the KVA and memory usage for
908 * the process data.
909 *
910 * Don't allow a nonprivileged user to use the last ten
911 * processes; don't let root exceed the limit.
912 */
913 nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
914 if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
915 PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
916 error = EAGAIN;
917 sx_xlock(&allproc_lock);
918 if (ppsratecheck(&lastfail, &curfail, 1)) {
919 printf("maxproc limit exceeded by uid %u (pid %d); "
920 "see tuning(7) and login.conf(5)\n",
921 td->td_ucred->cr_ruid, p1->p_pid);
922 }
923 sx_xunlock(&allproc_lock);
924 goto fail2;
925 }
926
927 /*
928 * If required, create a process descriptor in the parent first; we
929 * will abandon it if something goes wrong. We don't finit() until
930 * later.
931 */
932 if (flags & RFPROCDESC) {
933 error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd,
934 fr->fr_pd_flags, fr->fr_pd_fcaps);
935 if (error != 0)
936 goto fail2;
937 }
938
939 mem_charged = 0;
940 if (pages == 0)
941 pages = kstack_pages;
942 /* Allocate new proc. */
943 newproc = uma_zalloc(proc_zone, M_WAITOK);
944 td2 = FIRST_THREAD_IN_PROC(newproc);
945 if (td2 == NULL) {
946 td2 = thread_alloc(pages);
947 if (td2 == NULL) {
948 error = ENOMEM;
949 goto fail2;
950 }
951 proc_linkup(newproc, td2);
952 } else {
953 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
954 if (td2->td_kstack != 0)
955 vm_thread_dispose(td2);
956 if (!thread_alloc_stack(td2, pages)) {
957 error = ENOMEM;
958 goto fail2;
959 }
960 }
961 }
962
963 if ((flags & RFMEM) == 0) {
964 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
965 if (vm2 == NULL) {
966 error = ENOMEM;
967 goto fail2;
968 }
969 if (!swap_reserve(mem_charged)) {
970 /*
971 * The swap reservation failed. The accounting
972 * from the entries of the copied vm2 will be
973 * subtracted in vmspace_free(), so force the
974 * reservation there.
975 */
976 swap_reserve_force(mem_charged);
977 error = ENOMEM;
978 goto fail2;
979 }
980 } else
981 vm2 = NULL;
982
983 /*
984 * XXX: This is ugly; when we copy resource usage, we need to bump
985 * per-cred resource counters.
986 */
987 proc_set_cred_init(newproc, crhold(td->td_ucred));
988
989 /*
990 * Initialize resource accounting for the child process.
991 */
992 error = racct_proc_fork(p1, newproc);
993 if (error != 0) {
994 error = EAGAIN;
995 goto fail1;
996 }
997
998 #ifdef MAC
999 mac_proc_init(newproc);
1000 #endif
1001 newproc->p_klist = knlist_alloc(&newproc->p_mtx);
1002 STAILQ_INIT(&newproc->p_ktr);
1003
1004 /* We have to lock the process tree while we look for a pid. */
1005 sx_slock(&proctree_lock);
1006 sx_xlock(&allproc_lock);
1007
1008 /*
1009 * Increment the count of procs running with this uid. Don't allow
1010 * a nonprivileged user to exceed their current limit.
1011 *
1012 * XXXRW: Can we avoid privilege here if it's not needed?
1013 */
1014 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
1015 if (error == 0)
1016 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
1017 else {
1018 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
1019 lim_cur(td, RLIMIT_NPROC));
1020 }
1021 if (ok) {
1022 do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
1023 return (0);
1024 }
1025
1026 error = EAGAIN;
1027 sx_sunlock(&proctree_lock);
1028 sx_xunlock(&allproc_lock);
1029 #ifdef MAC
1030 mac_proc_destroy(newproc);
1031 #endif
1032 racct_proc_exit(newproc);
1033 fail1:
1034 crfree(newproc->p_ucred);
1035 newproc->p_ucred = NULL;
1036 fail2:
1037 if (vm2 != NULL)
1038 vmspace_free(vm2);
1039 uma_zfree(proc_zone, newproc);
1040 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
1041 fdclose(td, fp_procdesc, *fr->fr_pd_fd);
1042 fdrop(fp_procdesc, td);
1043 }
1044 atomic_add_int(&nprocs, -1);
1045 pause("fork", hz / 2);
1046 return (error);
1047 }
1048
1049 /*
1050 * Handle the return of a child process from fork1(). This function
1051 * is called from the MD fork_trampoline() entry point.
1052 */
1053 void
1054 fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
1055 struct trapframe *frame)
1056 {
1057 struct proc *p;
1058 struct thread *td;
1059 struct thread *dtd;
1060
1061 td = curthread;
1062 p = td->td_proc;
1063 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
1064
1065 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
1066 td, td_get_sched(td), p->p_pid, td->td_name);
1067
1068 sched_fork_exit(td);
1069 /*
1070 * Processes normally resume in mi_switch() after being
1071 * cpu_switch()'ed to, but when children start up they arrive here
1072 * instead, so we must do much the same things as mi_switch() would.
1073 */
1074 if ((dtd = PCPU_GET(deadthread))) {
1075 PCPU_SET(deadthread, NULL);
1076 thread_stash(dtd);
1077 }
1078 thread_unlock(td);
1079
1080 /*
1081 * cpu_fork_kthread_handler intercepts this function call to
1082 * have this call a non-return function to stay in kernel mode.
1083 * initproc has its own fork handler, but it does return.
1084 */
1085 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
1086 callout(arg, frame);
1087
1088 /*
1089 * Check if a kernel thread misbehaved and returned from its main
1090 * function.
1091 */
1092 if (p->p_flag & P_KPROC) {
1093 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1094 td->td_name, p->p_pid);
1095 kthread_exit();
1096 }
1097 mtx_assert(&Giant, MA_NOTOWNED);
1098
1099 if (p->p_sysent->sv_schedtail != NULL)
1100 (p->p_sysent->sv_schedtail)(td);
1101 td->td_pflags &= ~TDP_FORKING;
1102 }
1103
1104 /*
1105 * Simplified back end of syscall(), used when returning from fork()
1106 * directly into user mode. This function is passed in to fork_exit()
1107 * as the first parameter and is called when returning to a new
1108 * userland process.
1109 */
1110 void
1111 fork_return(struct thread *td, struct trapframe *frame)
1112 {
1113 struct proc *p;
1114
1115 p = td->td_proc;
1116 if (td->td_dbgflags & TDB_STOPATFORK) {
1117 PROC_LOCK(p);
1118 if ((p->p_flag & P_TRACED) != 0) {
1119 /*
1120 * Inform the debugger if one is still present.
1121 */
1122 td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP;
1123 ptracestop(td, SIGSTOP, NULL);
1124 td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX);
1125 } else {
1126 /*
1127 * ... otherwise clear the request.
1128 */
1129 td->td_dbgflags &= ~TDB_STOPATFORK;
1130 }
1131 PROC_UNLOCK(p);
1132 } else if (p->p_flag & P_TRACED || td->td_dbgflags & TDB_BORN) {
1133 /*
1134 * This is the start of a new thread in a traced
1135 * process. Report a system call exit event.
1136 */
1137 PROC_LOCK(p);
1138 td->td_dbgflags |= TDB_SCX;
1139 _STOPEVENT(p, S_SCX, td->td_sa.code);
1140 if ((p->p_ptevents & PTRACE_SCX) != 0 ||
1141 (td->td_dbgflags & TDB_BORN) != 0)
1142 ptracestop(td, SIGTRAP, NULL);
1143 td->td_dbgflags &= ~(TDB_SCX | TDB_BORN);
1144 PROC_UNLOCK(p);
1145 }
1146
1147 /*
1148 * If the prison was killed mid-fork, die along with it.
1149 */
1150 if (td->td_ucred->cr_prison->pr_flags & PR_REMOVE)
1151 exit1(td, 0, SIGKILL);
1152
1153 userret(td, frame);
1154
1155 #ifdef KTRACE
1156 if (KTRPOINT(td, KTR_SYSRET))
1157 ktrsysret(SYS_fork, 0, 0);
1158 #endif
1159 }
Cache object: cb00c2614e99abfbc914efcc6b9c5b8a
|