FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.3/sys/kern/kern_fork.c 295674 2016-02-16 21:36:48Z jhb $");
39
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 #include "opt_kstack_pages.h"
43 #include "opt_procdesc.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/filedesc.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/kthread.h>
54 #include <sys/sysctl.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procdesc.h>
61 #include <sys/pioctl.h>
62 #include <sys/ptrace.h>
63 #include <sys/racct.h>
64 #include <sys/resourcevar.h>
65 #include <sys/sched.h>
66 #include <sys/syscall.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69 #include <sys/acct.h>
70 #include <sys/ktr.h>
71 #include <sys/ktrace.h>
72 #include <sys/unistd.h>
73 #include <sys/sdt.h>
74 #include <sys/sx.h>
75 #include <sys/sysent.h>
76 #include <sys/signalvar.h>
77
78 #include <security/audit/audit.h>
79 #include <security/mac/mac_framework.h>
80
81 #include <vm/vm.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_extern.h>
85 #include <vm/uma.h>
86
87 #ifdef KDTRACE_HOOKS
88 #include <sys/dtrace_bsd.h>
89 dtrace_fork_func_t dtrace_fasttrap_fork;
90 #endif
91
92 SDT_PROVIDER_DECLARE(proc);
93 SDT_PROBE_DEFINE3(proc, kernel, , create, "struct proc *",
94 "struct proc *", "int");
95
96 #ifndef _SYS_SYSPROTO_H_
97 struct fork_args {
98 int dummy;
99 };
100 #endif
101
102 /* ARGSUSED */
103 int
104 sys_fork(struct thread *td, struct fork_args *uap)
105 {
106 int error;
107 struct proc *p2;
108
109 error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0);
110 if (error == 0) {
111 td->td_retval[0] = p2->p_pid;
112 td->td_retval[1] = 0;
113 }
114 return (error);
115 }
116
117 /* ARGUSED */
118 int
119 sys_pdfork(td, uap)
120 struct thread *td;
121 struct pdfork_args *uap;
122 {
123 #ifdef PROCDESC
124 int error, fd;
125 struct proc *p2;
126
127 /*
128 * It is necessary to return fd by reference because 0 is a valid file
129 * descriptor number, and the child needs to be able to distinguish
130 * itself from the parent using the return value.
131 */
132 error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2,
133 &fd, uap->flags);
134 if (error == 0) {
135 td->td_retval[0] = p2->p_pid;
136 td->td_retval[1] = 0;
137 error = copyout(&fd, uap->fdp, sizeof(fd));
138 }
139 return (error);
140 #else
141 return (ENOSYS);
142 #endif
143 }
144
145 /* ARGSUSED */
146 int
147 sys_vfork(struct thread *td, struct vfork_args *uap)
148 {
149 int error, flags;
150 struct proc *p2;
151
152 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
153 error = fork1(td, flags, 0, &p2, NULL, 0);
154 if (error == 0) {
155 td->td_retval[0] = p2->p_pid;
156 td->td_retval[1] = 0;
157 }
158 return (error);
159 }
160
161 int
162 sys_rfork(struct thread *td, struct rfork_args *uap)
163 {
164 struct proc *p2;
165 int error;
166
167 /* Don't allow kernel-only flags. */
168 if ((uap->flags & RFKERNELONLY) != 0)
169 return (EINVAL);
170
171 AUDIT_ARG_FFLAGS(uap->flags);
172 error = fork1(td, uap->flags, 0, &p2, NULL, 0);
173 if (error == 0) {
174 td->td_retval[0] = p2 ? p2->p_pid : 0;
175 td->td_retval[1] = 0;
176 }
177 return (error);
178 }
179
180 int nprocs = 1; /* process 0 */
181 int lastpid = 0;
182 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
183 "Last used PID");
184
185 /*
186 * Random component to lastpid generation. We mix in a random factor to make
187 * it a little harder to predict. We sanity check the modulus value to avoid
188 * doing it in critical paths. Don't let it be too small or we pointlessly
189 * waste randomness entropy, and don't let it be impossibly large. Using a
190 * modulus that is too big causes a LOT more process table scans and slows
191 * down fork processing as the pidchecked caching is defeated.
192 */
193 static int randompid = 0;
194
195 static int
196 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
197 {
198 int error, pid;
199
200 error = sysctl_wire_old_buffer(req, sizeof(int));
201 if (error != 0)
202 return(error);
203 sx_xlock(&allproc_lock);
204 pid = randompid;
205 error = sysctl_handle_int(oidp, &pid, 0, req);
206 if (error == 0 && req->newptr != NULL) {
207 if (pid < 0 || pid > pid_max - 100) /* out of range */
208 pid = pid_max - 100;
209 else if (pid < 2) /* NOP */
210 pid = 0;
211 else if (pid < 100) /* Make it reasonable */
212 pid = 100;
213 randompid = pid;
214 }
215 sx_xunlock(&allproc_lock);
216 return (error);
217 }
218
219 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
220 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
221
222 static int
223 fork_findpid(int flags)
224 {
225 struct proc *p;
226 int trypid;
227 static int pidchecked = 0;
228
229 /*
230 * Requires allproc_lock in order to iterate over the list
231 * of processes, and proctree_lock to access p_pgrp.
232 */
233 sx_assert(&allproc_lock, SX_LOCKED);
234 sx_assert(&proctree_lock, SX_LOCKED);
235
236 /*
237 * Find an unused process ID. We remember a range of unused IDs
238 * ready to use (from lastpid+1 through pidchecked-1).
239 *
240 * If RFHIGHPID is set (used during system boot), do not allocate
241 * low-numbered pids.
242 */
243 trypid = lastpid + 1;
244 if (flags & RFHIGHPID) {
245 if (trypid < 10)
246 trypid = 10;
247 } else {
248 if (randompid)
249 trypid += arc4random() % randompid;
250 }
251 retry:
252 /*
253 * If the process ID prototype has wrapped around,
254 * restart somewhat above 0, as the low-numbered procs
255 * tend to include daemons that don't exit.
256 */
257 if (trypid >= pid_max) {
258 trypid = trypid % pid_max;
259 if (trypid < 100)
260 trypid += 100;
261 pidchecked = 0;
262 }
263 if (trypid >= pidchecked) {
264 int doingzomb = 0;
265
266 pidchecked = PID_MAX;
267 /*
268 * Scan the active and zombie procs to check whether this pid
269 * is in use. Remember the lowest pid that's greater
270 * than trypid, so we can avoid checking for a while.
271 *
272 * Avoid reuse of the process group id, session id or
273 * the reaper subtree id. Note that for process group
274 * and sessions, the amount of reserved pids is
275 * limited by process limit. For the subtree ids, the
276 * id is kept reserved only while there is a
277 * non-reaped process in the subtree, so amount of
278 * reserved pids is limited by process limit times
279 * two.
280 */
281 p = LIST_FIRST(&allproc);
282 again:
283 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
284 while (p->p_pid == trypid ||
285 p->p_reapsubtree == trypid ||
286 (p->p_pgrp != NULL &&
287 (p->p_pgrp->pg_id == trypid ||
288 (p->p_session != NULL &&
289 p->p_session->s_sid == trypid)))) {
290 trypid++;
291 if (trypid >= pidchecked)
292 goto retry;
293 }
294 if (p->p_pid > trypid && pidchecked > p->p_pid)
295 pidchecked = p->p_pid;
296 if (p->p_pgrp != NULL) {
297 if (p->p_pgrp->pg_id > trypid &&
298 pidchecked > p->p_pgrp->pg_id)
299 pidchecked = p->p_pgrp->pg_id;
300 if (p->p_session != NULL &&
301 p->p_session->s_sid > trypid &&
302 pidchecked > p->p_session->s_sid)
303 pidchecked = p->p_session->s_sid;
304 }
305 }
306 if (!doingzomb) {
307 doingzomb = 1;
308 p = LIST_FIRST(&zombproc);
309 goto again;
310 }
311 }
312
313 /*
314 * RFHIGHPID does not mess with the lastpid counter during boot.
315 */
316 if (flags & RFHIGHPID)
317 pidchecked = 0;
318 else
319 lastpid = trypid;
320
321 return (trypid);
322 }
323
324 static int
325 fork_norfproc(struct thread *td, int flags)
326 {
327 int error;
328 struct proc *p1;
329
330 KASSERT((flags & RFPROC) == 0,
331 ("fork_norfproc called with RFPROC set"));
332 p1 = td->td_proc;
333
334 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
335 (flags & (RFCFDG | RFFDG))) {
336 PROC_LOCK(p1);
337 if (thread_single(p1, SINGLE_BOUNDARY)) {
338 PROC_UNLOCK(p1);
339 return (ERESTART);
340 }
341 PROC_UNLOCK(p1);
342 }
343
344 error = vm_forkproc(td, NULL, NULL, NULL, flags);
345 if (error)
346 goto fail;
347
348 /*
349 * Close all file descriptors.
350 */
351 if (flags & RFCFDG) {
352 struct filedesc *fdtmp;
353 fdtmp = fdinit(td->td_proc->p_fd);
354 fdescfree(td);
355 p1->p_fd = fdtmp;
356 }
357
358 /*
359 * Unshare file descriptors (from parent).
360 */
361 if (flags & RFFDG)
362 fdunshare(td);
363
364 fail:
365 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
366 (flags & (RFCFDG | RFFDG))) {
367 PROC_LOCK(p1);
368 thread_single_end(p1, SINGLE_BOUNDARY);
369 PROC_UNLOCK(p1);
370 }
371 return (error);
372 }
373
374 static void
375 do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
376 struct vmspace *vm2, int pdflags)
377 {
378 struct proc *p1, *pptr;
379 int p2_held, trypid;
380 struct filedesc *fd;
381 struct filedesc_to_leader *fdtol;
382 struct sigacts *newsigacts;
383
384 sx_assert(&proctree_lock, SX_SLOCKED);
385 sx_assert(&allproc_lock, SX_XLOCKED);
386
387 p2_held = 0;
388 p1 = td->td_proc;
389
390 trypid = fork_findpid(flags);
391
392 sx_sunlock(&proctree_lock);
393
394 p2->p_state = PRS_NEW; /* protect against others */
395 p2->p_pid = trypid;
396 AUDIT_ARG_PID(p2->p_pid);
397 LIST_INSERT_HEAD(&allproc, p2, p_list);
398 allproc_gen++;
399 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
400 tidhash_add(td2);
401 PROC_LOCK(p2);
402 PROC_LOCK(p1);
403
404 sx_xunlock(&allproc_lock);
405
406 bcopy(&p1->p_startcopy, &p2->p_startcopy,
407 __rangeof(struct proc, p_startcopy, p_endcopy));
408 pargs_hold(p2->p_args);
409 PROC_UNLOCK(p1);
410
411 bzero(&p2->p_startzero,
412 __rangeof(struct proc, p_startzero, p_endzero));
413 p2->p_treeflag = 0;
414
415 p2->p_ucred = crhold(td->td_ucred);
416
417 /* Tell the prison that we exist. */
418 prison_proc_hold(p2->p_ucred->cr_prison);
419
420 PROC_UNLOCK(p2);
421
422 /*
423 * Malloc things while we don't hold any locks.
424 */
425 if (flags & RFSIGSHARE)
426 newsigacts = NULL;
427 else
428 newsigacts = sigacts_alloc();
429
430 /*
431 * Copy filedesc.
432 */
433 if (flags & RFCFDG) {
434 fd = fdinit(p1->p_fd);
435 fdtol = NULL;
436 } else if (flags & RFFDG) {
437 fd = fdcopy(p1->p_fd);
438 fdtol = NULL;
439 } else {
440 fd = fdshare(p1->p_fd);
441 if (p1->p_fdtol == NULL)
442 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
443 p1->p_leader);
444 if ((flags & RFTHREAD) != 0) {
445 /*
446 * Shared file descriptor table, and shared
447 * process leaders.
448 */
449 fdtol = p1->p_fdtol;
450 FILEDESC_XLOCK(p1->p_fd);
451 fdtol->fdl_refcount++;
452 FILEDESC_XUNLOCK(p1->p_fd);
453 } else {
454 /*
455 * Shared file descriptor table, and different
456 * process leaders.
457 */
458 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
459 p1->p_fd, p2);
460 }
461 }
462 /*
463 * Make a proc table entry for the new process.
464 * Start by zeroing the section of proc that is zero-initialized,
465 * then copy the section that is copied directly from the parent.
466 */
467
468 PROC_LOCK(p2);
469 PROC_LOCK(p1);
470
471 bzero(&td2->td_startzero,
472 __rangeof(struct thread, td_startzero, td_endzero));
473 td2->td_su = NULL;
474
475 bcopy(&td->td_startcopy, &td2->td_startcopy,
476 __rangeof(struct thread, td_startcopy, td_endcopy));
477
478 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
479 td2->td_sigstk = td->td_sigstk;
480 td2->td_flags = TDF_INMEM;
481 td2->td_lend_user_pri = PRI_MAX;
482 td2->td_dbg_sc_code = td->td_dbg_sc_code;
483 td2->td_dbg_sc_narg = td->td_dbg_sc_narg;
484
485 #ifdef VIMAGE
486 td2->td_vnet = NULL;
487 td2->td_vnet_lpush = NULL;
488 #endif
489
490 /*
491 * Allow the scheduler to initialize the child.
492 */
493 thread_lock(td);
494 sched_fork(td, td2);
495 thread_unlock(td);
496
497 /*
498 * Duplicate sub-structures as needed.
499 * Increase reference counts on shared objects.
500 */
501 p2->p_flag = P_INMEM;
502 p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC);
503 p2->p_swtick = ticks;
504 if (p1->p_flag & P_PROFIL)
505 startprofclock(p2);
506 td2->td_ucred = crhold(p2->p_ucred);
507
508 if (flags & RFSIGSHARE) {
509 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
510 } else {
511 sigacts_copy(newsigacts, p1->p_sigacts);
512 p2->p_sigacts = newsigacts;
513 }
514
515 if (flags & RFTSIGZMB)
516 p2->p_sigparent = RFTSIGNUM(flags);
517 else if (flags & RFLINUXTHPN)
518 p2->p_sigparent = SIGUSR1;
519 else
520 p2->p_sigparent = SIGCHLD;
521
522 p2->p_textvp = p1->p_textvp;
523 p2->p_fd = fd;
524 p2->p_fdtol = fdtol;
525
526 if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
527 p2->p_flag |= P_PROTECTED;
528 p2->p_flag2 |= P2_INHERIT_PROTECTED;
529 }
530
531 /*
532 * p_limit is copy-on-write. Bump its refcount.
533 */
534 lim_fork(p1, p2);
535
536 pstats_fork(p1->p_stats, p2->p_stats);
537
538 PROC_UNLOCK(p1);
539 PROC_UNLOCK(p2);
540
541 /* Bump references to the text vnode (for procfs). */
542 if (p2->p_textvp)
543 vref(p2->p_textvp);
544
545 /*
546 * Set up linkage for kernel based threading.
547 */
548 if ((flags & RFTHREAD) != 0) {
549 mtx_lock(&ppeers_lock);
550 p2->p_peers = p1->p_peers;
551 p1->p_peers = p2;
552 p2->p_leader = p1->p_leader;
553 mtx_unlock(&ppeers_lock);
554 PROC_LOCK(p1->p_leader);
555 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
556 PROC_UNLOCK(p1->p_leader);
557 /*
558 * The task leader is exiting, so process p1 is
559 * going to be killed shortly. Since p1 obviously
560 * isn't dead yet, we know that the leader is either
561 * sending SIGKILL's to all the processes in this
562 * task or is sleeping waiting for all the peers to
563 * exit. We let p1 complete the fork, but we need
564 * to go ahead and kill the new process p2 since
565 * the task leader may not get a chance to send
566 * SIGKILL to it. We leave it on the list so that
567 * the task leader will wait for this new process
568 * to commit suicide.
569 */
570 PROC_LOCK(p2);
571 kern_psignal(p2, SIGKILL);
572 PROC_UNLOCK(p2);
573 } else
574 PROC_UNLOCK(p1->p_leader);
575 } else {
576 p2->p_peers = NULL;
577 p2->p_leader = p2;
578 }
579
580 sx_xlock(&proctree_lock);
581 PGRP_LOCK(p1->p_pgrp);
582 PROC_LOCK(p2);
583 PROC_LOCK(p1);
584
585 /*
586 * Preserve some more flags in subprocess. P_PROFIL has already
587 * been preserved.
588 */
589 p2->p_flag |= p1->p_flag & P_SUGID;
590 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
591 SESS_LOCK(p1->p_session);
592 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
593 p2->p_flag |= P_CONTROLT;
594 SESS_UNLOCK(p1->p_session);
595 if (flags & RFPPWAIT)
596 p2->p_flag |= P_PPWAIT;
597
598 p2->p_pgrp = p1->p_pgrp;
599 LIST_INSERT_AFTER(p1, p2, p_pglist);
600 PGRP_UNLOCK(p1->p_pgrp);
601 LIST_INIT(&p2->p_children);
602 LIST_INIT(&p2->p_orphans);
603
604 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);
605
606 /*
607 * If PF_FORK is set, the child process inherits the
608 * procfs ioctl flags from its parent.
609 */
610 if (p1->p_pfsflags & PF_FORK) {
611 p2->p_stops = p1->p_stops;
612 p2->p_pfsflags = p1->p_pfsflags;
613 }
614
615 /*
616 * This begins the section where we must prevent the parent
617 * from being swapped.
618 */
619 _PHOLD(p1);
620 PROC_UNLOCK(p1);
621
622 /*
623 * Attach the new process to its parent.
624 *
625 * If RFNOWAIT is set, the newly created process becomes a child
626 * of init. This effectively disassociates the child from the
627 * parent.
628 */
629 if ((flags & RFNOWAIT) != 0) {
630 pptr = p1->p_reaper;
631 p2->p_reaper = pptr;
632 } else {
633 p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
634 p1 : p1->p_reaper;
635 pptr = p1;
636 }
637 p2->p_pptr = pptr;
638 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
639 LIST_INIT(&p2->p_reaplist);
640 LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
641 if (p2->p_reaper == p1)
642 p2->p_reapsubtree = p2->p_pid;
643 else
644 p2->p_reapsubtree = p1->p_reapsubtree;
645 sx_xunlock(&proctree_lock);
646
647 /* Inform accounting that we have forked. */
648 p2->p_acflag = AFORK;
649 PROC_UNLOCK(p2);
650
651 #ifdef KTRACE
652 ktrprocfork(p1, p2);
653 #endif
654
655 /*
656 * Finish creating the child process. It will return via a different
657 * execution path later. (ie: directly into user mode)
658 */
659 vm_forkproc(td, p2, td2, vm2, flags);
660
661 if (flags == (RFFDG | RFPROC)) {
662 PCPU_INC(cnt.v_forks);
663 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
664 p2->p_vmspace->vm_ssize);
665 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
666 PCPU_INC(cnt.v_vforks);
667 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
668 p2->p_vmspace->vm_ssize);
669 } else if (p1 == &proc0) {
670 PCPU_INC(cnt.v_kthreads);
671 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
672 p2->p_vmspace->vm_ssize);
673 } else {
674 PCPU_INC(cnt.v_rforks);
675 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
676 p2->p_vmspace->vm_ssize);
677 }
678
679 #ifdef PROCDESC
680 /*
681 * Associate the process descriptor with the process before anything
682 * can happen that might cause that process to need the descriptor.
683 * However, don't do this until after fork(2) can no longer fail.
684 */
685 if (flags & RFPROCDESC)
686 procdesc_new(p2, pdflags);
687 #endif
688
689 /*
690 * Both processes are set up, now check if any loadable modules want
691 * to adjust anything.
692 */
693 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
694
695 /*
696 * Set the child start time and mark the process as being complete.
697 */
698 PROC_LOCK(p2);
699 PROC_LOCK(p1);
700 microuptime(&p2->p_stats->p_start);
701 PROC_SLOCK(p2);
702 p2->p_state = PRS_NORMAL;
703 PROC_SUNLOCK(p2);
704
705 #ifdef KDTRACE_HOOKS
706 /*
707 * Tell the DTrace fasttrap provider about the new process so that any
708 * tracepoints inherited from the parent can be removed. We have to do
709 * this only after p_state is PRS_NORMAL since the fasttrap module will
710 * use pfind() later on.
711 */
712 if ((flags & RFMEM) == 0 && dtrace_fasttrap_fork)
713 dtrace_fasttrap_fork(p1, p2);
714 #endif
715 if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED |
716 P_FOLLOWFORK)) {
717 /*
718 * Arrange for debugger to receive the fork event.
719 *
720 * We can report PL_FLAG_FORKED regardless of
721 * P_FOLLOWFORK settings, but it does not make a sense
722 * for runaway child.
723 */
724 td->td_dbgflags |= TDB_FORK;
725 td->td_dbg_forked = p2->p_pid;
726 td2->td_dbgflags |= TDB_STOPATFORK;
727 _PHOLD(p2);
728 p2_held = 1;
729 }
730 if (flags & RFPPWAIT) {
731 td->td_pflags |= TDP_RFPPWAIT;
732 td->td_rfppwait_p = p2;
733 }
734 PROC_UNLOCK(p2);
735 if ((flags & RFSTOPPED) == 0) {
736 /*
737 * If RFSTOPPED not requested, make child runnable and
738 * add to run queue.
739 */
740 thread_lock(td2);
741 TD_SET_CAN_RUN(td2);
742 sched_add(td2, SRQ_BORING);
743 thread_unlock(td2);
744 }
745
746 /*
747 * Now can be swapped.
748 */
749 _PRELE(p1);
750 PROC_UNLOCK(p1);
751
752 /*
753 * Tell any interested parties about the new process.
754 */
755 knote_fork(&p1->p_klist, p2->p_pid);
756 SDT_PROBE3(proc, kernel, , create, p2, p1, flags);
757
758 /*
759 * Wait until debugger is attached to child.
760 */
761 PROC_LOCK(p2);
762 while ((td2->td_dbgflags & TDB_STOPATFORK) != 0)
763 cv_wait(&p2->p_dbgwait, &p2->p_mtx);
764 if (p2_held)
765 _PRELE(p2);
766 PROC_UNLOCK(p2);
767 }
768
769 int
770 fork1(struct thread *td, int flags, int pages, struct proc **procp,
771 int *procdescp, int pdflags)
772 {
773 struct proc *p1, *newproc;
774 struct thread *td2;
775 struct vmspace *vm2;
776 #ifdef PROCDESC
777 struct file *fp_procdesc;
778 #endif
779 vm_ooffset_t mem_charged;
780 int error, nprocs_new, ok;
781 static int curfail;
782 static struct timeval lastfail;
783
784 /* Check for the undefined or unimplemented flags. */
785 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
786 return (EINVAL);
787
788 /* Signal value requires RFTSIGZMB. */
789 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
790 return (EINVAL);
791
792 /* Can't copy and clear. */
793 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
794 return (EINVAL);
795
796 /* Check the validity of the signal number. */
797 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
798 return (EINVAL);
799
800 #ifdef PROCDESC
801 if ((flags & RFPROCDESC) != 0) {
802 /* Can't not create a process yet get a process descriptor. */
803 if ((flags & RFPROC) == 0)
804 return (EINVAL);
805
806 /* Must provide a place to put a procdesc if creating one. */
807 if (procdescp == NULL)
808 return (EINVAL);
809 }
810 #endif
811
812 p1 = td->td_proc;
813
814 /*
815 * Here we don't create a new process, but we divorce
816 * certain parts of a process from itself.
817 */
818 if ((flags & RFPROC) == 0) {
819 *procp = NULL;
820 return (fork_norfproc(td, flags));
821 }
822
823 #ifdef PROCDESC
824 fp_procdesc = NULL;
825 #endif
826 newproc = NULL;
827 vm2 = NULL;
828
829 /*
830 * Increment the nprocs resource before allocations occur.
831 * Although process entries are dynamically created, we still
832 * keep a global limit on the maximum number we will
833 * create. There are hard-limits as to the number of processes
834 * that can run, established by the KVA and memory usage for
835 * the process data.
836 *
837 * Don't allow a nonprivileged user to use the last ten
838 * processes; don't let root exceed the limit.
839 */
840 nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
841 if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
842 PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
843 sx_xlock(&allproc_lock);
844 if (ppsratecheck(&lastfail, &curfail, 1)) {
845 printf("maxproc limit exceeded by uid %u (pid %d); "
846 "see tuning(7) and login.conf(5)\n",
847 td->td_ucred->cr_ruid, p1->p_pid);
848 }
849 sx_xunlock(&allproc_lock);
850 error = EAGAIN;
851 goto fail1;
852 }
853
854 #ifdef PROCDESC
855 /*
856 * If required, create a process descriptor in the parent first; we
857 * will abandon it if something goes wrong. We don't finit() until
858 * later.
859 */
860 if (flags & RFPROCDESC) {
861 error = falloc(td, &fp_procdesc, procdescp, 0);
862 if (error != 0)
863 goto fail1;
864 }
865 #endif
866
867 mem_charged = 0;
868 if (pages == 0)
869 pages = KSTACK_PAGES;
870 /* Allocate new proc. */
871 newproc = uma_zalloc(proc_zone, M_WAITOK);
872 td2 = FIRST_THREAD_IN_PROC(newproc);
873 if (td2 == NULL) {
874 td2 = thread_alloc(pages);
875 if (td2 == NULL) {
876 error = ENOMEM;
877 goto fail1;
878 }
879 proc_linkup(newproc, td2);
880 } else {
881 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
882 if (td2->td_kstack != 0)
883 vm_thread_dispose(td2);
884 if (!thread_alloc_stack(td2, pages)) {
885 error = ENOMEM;
886 goto fail1;
887 }
888 }
889 }
890
891 if ((flags & RFMEM) == 0) {
892 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
893 if (vm2 == NULL) {
894 error = ENOMEM;
895 goto fail1;
896 }
897 if (!swap_reserve(mem_charged)) {
898 /*
899 * The swap reservation failed. The accounting
900 * from the entries of the copied vm2 will be
901 * substracted in vmspace_free(), so force the
902 * reservation there.
903 */
904 swap_reserve_force(mem_charged);
905 error = ENOMEM;
906 goto fail1;
907 }
908 } else
909 vm2 = NULL;
910
911 /*
912 * XXX: This is ugly; when we copy resource usage, we need to bump
913 * per-cred resource counters.
914 */
915 newproc->p_ucred = p1->p_ucred;
916
917 /*
918 * Initialize resource accounting for the child process.
919 */
920 error = racct_proc_fork(p1, newproc);
921 if (error != 0) {
922 error = EAGAIN;
923 goto fail1;
924 }
925
926 #ifdef MAC
927 mac_proc_init(newproc);
928 #endif
929 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
930 STAILQ_INIT(&newproc->p_ktr);
931
932 /* We have to lock the process tree while we look for a pid. */
933 sx_slock(&proctree_lock);
934 sx_xlock(&allproc_lock);
935
936 /*
937 * Increment the count of procs running with this uid. Don't allow
938 * a nonprivileged user to exceed their current limit.
939 *
940 * XXXRW: Can we avoid privilege here if it's not needed?
941 */
942 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
943 if (error == 0)
944 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
945 else {
946 PROC_LOCK(p1);
947 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
948 lim_cur(p1, RLIMIT_NPROC));
949 PROC_UNLOCK(p1);
950 }
951 if (ok) {
952 do_fork(td, flags, newproc, td2, vm2, pdflags);
953
954 /*
955 * Return child proc pointer to parent.
956 */
957 *procp = newproc;
958 #ifdef PROCDESC
959 if (flags & RFPROCDESC) {
960 procdesc_finit(newproc->p_procdesc, fp_procdesc);
961 fdrop(fp_procdesc, td);
962 }
963 #endif
964 racct_proc_fork_done(newproc);
965 return (0);
966 }
967
968 error = EAGAIN;
969 sx_sunlock(&proctree_lock);
970 sx_xunlock(&allproc_lock);
971 #ifdef MAC
972 mac_proc_destroy(newproc);
973 #endif
974 racct_proc_exit(newproc);
975 fail1:
976 if (vm2 != NULL)
977 vmspace_free(vm2);
978 uma_zfree(proc_zone, newproc);
979 #ifdef PROCDESC
980 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
981 fdclose(td->td_proc->p_fd, fp_procdesc, *procdescp, td);
982 fdrop(fp_procdesc, td);
983 }
984 #endif
985 atomic_add_int(&nprocs, -1);
986 pause("fork", hz / 2);
987 return (error);
988 }
989
990 /*
991 * Handle the return of a child process from fork1(). This function
992 * is called from the MD fork_trampoline() entry point.
993 */
994 void
995 fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
996 struct trapframe *frame)
997 {
998 struct proc *p;
999 struct thread *td;
1000 struct thread *dtd;
1001
1002 td = curthread;
1003 p = td->td_proc;
1004 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
1005
1006 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
1007 td, td->td_sched, p->p_pid, td->td_name);
1008
1009 sched_fork_exit(td);
1010 /*
1011 * Processes normally resume in mi_switch() after being
1012 * cpu_switch()'ed to, but when children start up they arrive here
1013 * instead, so we must do much the same things as mi_switch() would.
1014 */
1015 if ((dtd = PCPU_GET(deadthread))) {
1016 PCPU_SET(deadthread, NULL);
1017 thread_stash(dtd);
1018 }
1019 thread_unlock(td);
1020
1021 /*
1022 * cpu_set_fork_handler intercepts this function call to
1023 * have this call a non-return function to stay in kernel mode.
1024 * initproc has its own fork handler, but it does return.
1025 */
1026 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
1027 callout(arg, frame);
1028
1029 /*
1030 * Check if a kernel thread misbehaved and returned from its main
1031 * function.
1032 */
1033 if (p->p_flag & P_KTHREAD) {
1034 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1035 td->td_name, p->p_pid);
1036 kthread_exit();
1037 }
1038 mtx_assert(&Giant, MA_NOTOWNED);
1039
1040 if (p->p_sysent->sv_schedtail != NULL)
1041 (p->p_sysent->sv_schedtail)(td);
1042 }
1043
1044 /*
1045 * Simplified back end of syscall(), used when returning from fork()
1046 * directly into user mode. Giant is not held on entry, and must not
1047 * be held on return. This function is passed in to fork_exit() as the
1048 * first parameter and is called when returning to a new userland process.
1049 */
1050 void
1051 fork_return(struct thread *td, struct trapframe *frame)
1052 {
1053 struct proc *p, *dbg;
1054
1055 p = td->td_proc;
1056 if (td->td_dbgflags & TDB_STOPATFORK) {
1057 sx_xlock(&proctree_lock);
1058 PROC_LOCK(p);
1059 if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) ==
1060 (P_TRACED | P_FOLLOWFORK)) {
1061 /*
1062 * If debugger still wants auto-attach for the
1063 * parent's children, do it now.
1064 */
1065 dbg = p->p_pptr->p_pptr;
1066 p->p_flag |= P_TRACED;
1067 p->p_oppid = p->p_pptr->p_pid;
1068 CTR2(KTR_PTRACE,
1069 "fork_return: attaching to new child pid %d: oppid %d",
1070 p->p_pid, p->p_oppid);
1071 proc_reparent(p, dbg);
1072 sx_xunlock(&proctree_lock);
1073 td->td_dbgflags |= TDB_CHILD | TDB_SCX;
1074 ptracestop(td, SIGSTOP);
1075 td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX);
1076 } else {
1077 /*
1078 * ... otherwise clear the request.
1079 */
1080 sx_xunlock(&proctree_lock);
1081 td->td_dbgflags &= ~TDB_STOPATFORK;
1082 cv_broadcast(&p->p_dbgwait);
1083 }
1084 PROC_UNLOCK(p);
1085 } else if (p->p_flag & P_TRACED) {
1086 /*
1087 * This is the start of a new thread in a traced
1088 * process. Report a system call exit event.
1089 */
1090 PROC_LOCK(p);
1091 td->td_dbgflags |= TDB_SCX;
1092 _STOPEVENT(p, S_SCX, td->td_dbg_sc_code);
1093 if ((p->p_stops & S_PT_SCX) != 0)
1094 ptracestop(td, SIGTRAP);
1095 td->td_dbgflags &= ~TDB_SCX;
1096 PROC_UNLOCK(p);
1097 }
1098
1099 userret(td, frame);
1100
1101 #ifdef KTRACE
1102 if (KTRPOINT(td, KTR_SYSRET))
1103 ktrsysret(SYS_fork, 0, 0);
1104 #endif
1105 }
Cache object: b15e4d6b0f59eae32977467210cda7f9
|