FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_fork.c 284343 2015-06-13 16:15:43Z jhb $");
39
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 #include "opt_kstack_pages.h"
43 #include "opt_procdesc.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/filedesc.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/kthread.h>
54 #include <sys/sysctl.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procdesc.h>
61 #include <sys/pioctl.h>
62 #include <sys/racct.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sched.h>
65 #include <sys/syscall.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68 #include <sys/acct.h>
69 #include <sys/ktr.h>
70 #include <sys/ktrace.h>
71 #include <sys/unistd.h>
72 #include <sys/sdt.h>
73 #include <sys/sx.h>
74 #include <sys/sysent.h>
75 #include <sys/signalvar.h>
76
77 #include <security/audit/audit.h>
78 #include <security/mac/mac_framework.h>
79
80 #include <vm/vm.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
84 #include <vm/uma.h>
85
86 #ifdef KDTRACE_HOOKS
87 #include <sys/dtrace_bsd.h>
88 dtrace_fork_func_t dtrace_fasttrap_fork;
89 #endif
90
91 SDT_PROVIDER_DECLARE(proc);
92 SDT_PROBE_DEFINE3(proc, kernel, , create, "struct proc *",
93 "struct proc *", "int");
94
95 #ifndef _SYS_SYSPROTO_H_
96 struct fork_args {
97 int dummy;
98 };
99 #endif
100
101 /* ARGSUSED */
102 int
103 sys_fork(struct thread *td, struct fork_args *uap)
104 {
105 int error;
106 struct proc *p2;
107
108 error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0);
109 if (error == 0) {
110 td->td_retval[0] = p2->p_pid;
111 td->td_retval[1] = 0;
112 }
113 return (error);
114 }
115
116 /* ARGUSED */
117 int
118 sys_pdfork(td, uap)
119 struct thread *td;
120 struct pdfork_args *uap;
121 {
122 #ifdef PROCDESC
123 int error, fd;
124 struct proc *p2;
125
126 /*
127 * It is necessary to return fd by reference because 0 is a valid file
128 * descriptor number, and the child needs to be able to distinguish
129 * itself from the parent using the return value.
130 */
131 error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2,
132 &fd, uap->flags);
133 if (error == 0) {
134 td->td_retval[0] = p2->p_pid;
135 td->td_retval[1] = 0;
136 error = copyout(&fd, uap->fdp, sizeof(fd));
137 }
138 return (error);
139 #else
140 return (ENOSYS);
141 #endif
142 }
143
144 /* ARGSUSED */
145 int
146 sys_vfork(struct thread *td, struct vfork_args *uap)
147 {
148 int error, flags;
149 struct proc *p2;
150
151 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
152 error = fork1(td, flags, 0, &p2, NULL, 0);
153 if (error == 0) {
154 td->td_retval[0] = p2->p_pid;
155 td->td_retval[1] = 0;
156 }
157 return (error);
158 }
159
160 int
161 sys_rfork(struct thread *td, struct rfork_args *uap)
162 {
163 struct proc *p2;
164 int error;
165
166 /* Don't allow kernel-only flags. */
167 if ((uap->flags & RFKERNELONLY) != 0)
168 return (EINVAL);
169
170 AUDIT_ARG_FFLAGS(uap->flags);
171 error = fork1(td, uap->flags, 0, &p2, NULL, 0);
172 if (error == 0) {
173 td->td_retval[0] = p2 ? p2->p_pid : 0;
174 td->td_retval[1] = 0;
175 }
176 return (error);
177 }
178
179 int nprocs = 1; /* process 0 */
180 int lastpid = 0;
181 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
182 "Last used PID");
183
184 /*
185 * Random component to lastpid generation. We mix in a random factor to make
186 * it a little harder to predict. We sanity check the modulus value to avoid
187 * doing it in critical paths. Don't let it be too small or we pointlessly
188 * waste randomness entropy, and don't let it be impossibly large. Using a
189 * modulus that is too big causes a LOT more process table scans and slows
190 * down fork processing as the pidchecked caching is defeated.
191 */
192 static int randompid = 0;
193
194 static int
195 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
196 {
197 int error, pid;
198
199 error = sysctl_wire_old_buffer(req, sizeof(int));
200 if (error != 0)
201 return(error);
202 sx_xlock(&allproc_lock);
203 pid = randompid;
204 error = sysctl_handle_int(oidp, &pid, 0, req);
205 if (error == 0 && req->newptr != NULL) {
206 if (pid < 0 || pid > pid_max - 100) /* out of range */
207 pid = pid_max - 100;
208 else if (pid < 2) /* NOP */
209 pid = 0;
210 else if (pid < 100) /* Make it reasonable */
211 pid = 100;
212 randompid = pid;
213 }
214 sx_xunlock(&allproc_lock);
215 return (error);
216 }
217
218 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
219 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
220
221 static int
222 fork_findpid(int flags)
223 {
224 struct proc *p;
225 int trypid;
226 static int pidchecked = 0;
227
228 /*
229 * Requires allproc_lock in order to iterate over the list
230 * of processes, and proctree_lock to access p_pgrp.
231 */
232 sx_assert(&allproc_lock, SX_LOCKED);
233 sx_assert(&proctree_lock, SX_LOCKED);
234
235 /*
236 * Find an unused process ID. We remember a range of unused IDs
237 * ready to use (from lastpid+1 through pidchecked-1).
238 *
239 * If RFHIGHPID is set (used during system boot), do not allocate
240 * low-numbered pids.
241 */
242 trypid = lastpid + 1;
243 if (flags & RFHIGHPID) {
244 if (trypid < 10)
245 trypid = 10;
246 } else {
247 if (randompid)
248 trypid += arc4random() % randompid;
249 }
250 retry:
251 /*
252 * If the process ID prototype has wrapped around,
253 * restart somewhat above 0, as the low-numbered procs
254 * tend to include daemons that don't exit.
255 */
256 if (trypid >= pid_max) {
257 trypid = trypid % pid_max;
258 if (trypid < 100)
259 trypid += 100;
260 pidchecked = 0;
261 }
262 if (trypid >= pidchecked) {
263 int doingzomb = 0;
264
265 pidchecked = PID_MAX;
266 /*
267 * Scan the active and zombie procs to check whether this pid
268 * is in use. Remember the lowest pid that's greater
269 * than trypid, so we can avoid checking for a while.
270 *
271 * Avoid reuse of the process group id, session id or
272 * the reaper subtree id. Note that for process group
273 * and sessions, the amount of reserved pids is
274 * limited by process limit. For the subtree ids, the
275 * id is kept reserved only while there is a
276 * non-reaped process in the subtree, so amount of
277 * reserved pids is limited by process limit times
278 * two.
279 */
280 p = LIST_FIRST(&allproc);
281 again:
282 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
283 while (p->p_pid == trypid ||
284 p->p_reapsubtree == trypid ||
285 (p->p_pgrp != NULL &&
286 (p->p_pgrp->pg_id == trypid ||
287 (p->p_session != NULL &&
288 p->p_session->s_sid == trypid)))) {
289 trypid++;
290 if (trypid >= pidchecked)
291 goto retry;
292 }
293 if (p->p_pid > trypid && pidchecked > p->p_pid)
294 pidchecked = p->p_pid;
295 if (p->p_pgrp != NULL) {
296 if (p->p_pgrp->pg_id > trypid &&
297 pidchecked > p->p_pgrp->pg_id)
298 pidchecked = p->p_pgrp->pg_id;
299 if (p->p_session != NULL &&
300 p->p_session->s_sid > trypid &&
301 pidchecked > p->p_session->s_sid)
302 pidchecked = p->p_session->s_sid;
303 }
304 }
305 if (!doingzomb) {
306 doingzomb = 1;
307 p = LIST_FIRST(&zombproc);
308 goto again;
309 }
310 }
311
312 /*
313 * RFHIGHPID does not mess with the lastpid counter during boot.
314 */
315 if (flags & RFHIGHPID)
316 pidchecked = 0;
317 else
318 lastpid = trypid;
319
320 return (trypid);
321 }
322
323 static int
324 fork_norfproc(struct thread *td, int flags)
325 {
326 int error;
327 struct proc *p1;
328
329 KASSERT((flags & RFPROC) == 0,
330 ("fork_norfproc called with RFPROC set"));
331 p1 = td->td_proc;
332
333 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
334 (flags & (RFCFDG | RFFDG))) {
335 PROC_LOCK(p1);
336 if (thread_single(p1, SINGLE_BOUNDARY)) {
337 PROC_UNLOCK(p1);
338 return (ERESTART);
339 }
340 PROC_UNLOCK(p1);
341 }
342
343 error = vm_forkproc(td, NULL, NULL, NULL, flags);
344 if (error)
345 goto fail;
346
347 /*
348 * Close all file descriptors.
349 */
350 if (flags & RFCFDG) {
351 struct filedesc *fdtmp;
352 fdtmp = fdinit(td->td_proc->p_fd);
353 fdescfree(td);
354 p1->p_fd = fdtmp;
355 }
356
357 /*
358 * Unshare file descriptors (from parent).
359 */
360 if (flags & RFFDG)
361 fdunshare(td);
362
363 fail:
364 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
365 (flags & (RFCFDG | RFFDG))) {
366 PROC_LOCK(p1);
367 thread_single_end(p1, SINGLE_BOUNDARY);
368 PROC_UNLOCK(p1);
369 }
370 return (error);
371 }
372
373 static void
374 do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
375 struct vmspace *vm2, int pdflags)
376 {
377 struct proc *p1, *pptr;
378 int p2_held, trypid;
379 struct filedesc *fd;
380 struct filedesc_to_leader *fdtol;
381 struct sigacts *newsigacts;
382
383 sx_assert(&proctree_lock, SX_SLOCKED);
384 sx_assert(&allproc_lock, SX_XLOCKED);
385
386 p2_held = 0;
387 p1 = td->td_proc;
388
389 /*
390 * Increment the nprocs resource before blocking can occur. There
391 * are hard-limits as to the number of processes that can run.
392 */
393 nprocs++;
394
395 trypid = fork_findpid(flags);
396
397 sx_sunlock(&proctree_lock);
398
399 p2->p_state = PRS_NEW; /* protect against others */
400 p2->p_pid = trypid;
401 AUDIT_ARG_PID(p2->p_pid);
402 LIST_INSERT_HEAD(&allproc, p2, p_list);
403 allproc_gen++;
404 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
405 tidhash_add(td2);
406 PROC_LOCK(p2);
407 PROC_LOCK(p1);
408
409 sx_xunlock(&allproc_lock);
410
411 bcopy(&p1->p_startcopy, &p2->p_startcopy,
412 __rangeof(struct proc, p_startcopy, p_endcopy));
413 pargs_hold(p2->p_args);
414 PROC_UNLOCK(p1);
415
416 bzero(&p2->p_startzero,
417 __rangeof(struct proc, p_startzero, p_endzero));
418 p2->p_treeflag = 0;
419
420 p2->p_ucred = crhold(td->td_ucred);
421
422 /* Tell the prison that we exist. */
423 prison_proc_hold(p2->p_ucred->cr_prison);
424
425 PROC_UNLOCK(p2);
426
427 /*
428 * Malloc things while we don't hold any locks.
429 */
430 if (flags & RFSIGSHARE)
431 newsigacts = NULL;
432 else
433 newsigacts = sigacts_alloc();
434
435 /*
436 * Copy filedesc.
437 */
438 if (flags & RFCFDG) {
439 fd = fdinit(p1->p_fd);
440 fdtol = NULL;
441 } else if (flags & RFFDG) {
442 fd = fdcopy(p1->p_fd);
443 fdtol = NULL;
444 } else {
445 fd = fdshare(p1->p_fd);
446 if (p1->p_fdtol == NULL)
447 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
448 p1->p_leader);
449 if ((flags & RFTHREAD) != 0) {
450 /*
451 * Shared file descriptor table, and shared
452 * process leaders.
453 */
454 fdtol = p1->p_fdtol;
455 FILEDESC_XLOCK(p1->p_fd);
456 fdtol->fdl_refcount++;
457 FILEDESC_XUNLOCK(p1->p_fd);
458 } else {
459 /*
460 * Shared file descriptor table, and different
461 * process leaders.
462 */
463 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
464 p1->p_fd, p2);
465 }
466 }
467 /*
468 * Make a proc table entry for the new process.
469 * Start by zeroing the section of proc that is zero-initialized,
470 * then copy the section that is copied directly from the parent.
471 */
472
473 PROC_LOCK(p2);
474 PROC_LOCK(p1);
475
476 bzero(&td2->td_startzero,
477 __rangeof(struct thread, td_startzero, td_endzero));
478 td2->td_su = NULL;
479
480 bcopy(&td->td_startcopy, &td2->td_startcopy,
481 __rangeof(struct thread, td_startcopy, td_endcopy));
482
483 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
484 td2->td_sigstk = td->td_sigstk;
485 td2->td_flags = TDF_INMEM;
486 td2->td_lend_user_pri = PRI_MAX;
487
488 #ifdef VIMAGE
489 td2->td_vnet = NULL;
490 td2->td_vnet_lpush = NULL;
491 #endif
492
493 /*
494 * Allow the scheduler to initialize the child.
495 */
496 thread_lock(td);
497 sched_fork(td, td2);
498 thread_unlock(td);
499
500 /*
501 * Duplicate sub-structures as needed.
502 * Increase reference counts on shared objects.
503 */
504 p2->p_flag = P_INMEM;
505 p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC);
506 p2->p_swtick = ticks;
507 if (p1->p_flag & P_PROFIL)
508 startprofclock(p2);
509 td2->td_ucred = crhold(p2->p_ucred);
510
511 if (flags & RFSIGSHARE) {
512 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
513 } else {
514 sigacts_copy(newsigacts, p1->p_sigacts);
515 p2->p_sigacts = newsigacts;
516 }
517
518 if (flags & RFTSIGZMB)
519 p2->p_sigparent = RFTSIGNUM(flags);
520 else if (flags & RFLINUXTHPN)
521 p2->p_sigparent = SIGUSR1;
522 else
523 p2->p_sigparent = SIGCHLD;
524
525 p2->p_textvp = p1->p_textvp;
526 p2->p_fd = fd;
527 p2->p_fdtol = fdtol;
528
529 if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
530 p2->p_flag |= P_PROTECTED;
531 p2->p_flag2 |= P2_INHERIT_PROTECTED;
532 }
533
534 /*
535 * p_limit is copy-on-write. Bump its refcount.
536 */
537 lim_fork(p1, p2);
538
539 pstats_fork(p1->p_stats, p2->p_stats);
540
541 PROC_UNLOCK(p1);
542 PROC_UNLOCK(p2);
543
544 /* Bump references to the text vnode (for procfs). */
545 if (p2->p_textvp)
546 vref(p2->p_textvp);
547
548 /*
549 * Set up linkage for kernel based threading.
550 */
551 if ((flags & RFTHREAD) != 0) {
552 mtx_lock(&ppeers_lock);
553 p2->p_peers = p1->p_peers;
554 p1->p_peers = p2;
555 p2->p_leader = p1->p_leader;
556 mtx_unlock(&ppeers_lock);
557 PROC_LOCK(p1->p_leader);
558 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
559 PROC_UNLOCK(p1->p_leader);
560 /*
561 * The task leader is exiting, so process p1 is
562 * going to be killed shortly. Since p1 obviously
563 * isn't dead yet, we know that the leader is either
564 * sending SIGKILL's to all the processes in this
565 * task or is sleeping waiting for all the peers to
566 * exit. We let p1 complete the fork, but we need
567 * to go ahead and kill the new process p2 since
568 * the task leader may not get a chance to send
569 * SIGKILL to it. We leave it on the list so that
570 * the task leader will wait for this new process
571 * to commit suicide.
572 */
573 PROC_LOCK(p2);
574 kern_psignal(p2, SIGKILL);
575 PROC_UNLOCK(p2);
576 } else
577 PROC_UNLOCK(p1->p_leader);
578 } else {
579 p2->p_peers = NULL;
580 p2->p_leader = p2;
581 }
582
583 sx_xlock(&proctree_lock);
584 PGRP_LOCK(p1->p_pgrp);
585 PROC_LOCK(p2);
586 PROC_LOCK(p1);
587
588 /*
589 * Preserve some more flags in subprocess. P_PROFIL has already
590 * been preserved.
591 */
592 p2->p_flag |= p1->p_flag & P_SUGID;
593 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
594 SESS_LOCK(p1->p_session);
595 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
596 p2->p_flag |= P_CONTROLT;
597 SESS_UNLOCK(p1->p_session);
598 if (flags & RFPPWAIT)
599 p2->p_flag |= P_PPWAIT;
600
601 p2->p_pgrp = p1->p_pgrp;
602 LIST_INSERT_AFTER(p1, p2, p_pglist);
603 PGRP_UNLOCK(p1->p_pgrp);
604 LIST_INIT(&p2->p_children);
605 LIST_INIT(&p2->p_orphans);
606
607 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);
608
609 /*
610 * If PF_FORK is set, the child process inherits the
611 * procfs ioctl flags from its parent.
612 */
613 if (p1->p_pfsflags & PF_FORK) {
614 p2->p_stops = p1->p_stops;
615 p2->p_pfsflags = p1->p_pfsflags;
616 }
617
618 /*
619 * This begins the section where we must prevent the parent
620 * from being swapped.
621 */
622 _PHOLD(p1);
623 PROC_UNLOCK(p1);
624
625 /*
626 * Attach the new process to its parent.
627 *
628 * If RFNOWAIT is set, the newly created process becomes a child
629 * of init. This effectively disassociates the child from the
630 * parent.
631 */
632 if ((flags & RFNOWAIT) != 0) {
633 pptr = p1->p_reaper;
634 p2->p_reaper = pptr;
635 } else {
636 p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
637 p1 : p1->p_reaper;
638 pptr = p1;
639 }
640 p2->p_pptr = pptr;
641 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
642 LIST_INIT(&p2->p_reaplist);
643 LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
644 if (p2->p_reaper == p1)
645 p2->p_reapsubtree = p2->p_pid;
646 else
647 p2->p_reapsubtree = p1->p_reapsubtree;
648 sx_xunlock(&proctree_lock);
649
650 /* Inform accounting that we have forked. */
651 p2->p_acflag = AFORK;
652 PROC_UNLOCK(p2);
653
654 #ifdef KTRACE
655 ktrprocfork(p1, p2);
656 #endif
657
658 /*
659 * Finish creating the child process. It will return via a different
660 * execution path later. (ie: directly into user mode)
661 */
662 vm_forkproc(td, p2, td2, vm2, flags);
663
664 if (flags == (RFFDG | RFPROC)) {
665 PCPU_INC(cnt.v_forks);
666 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
667 p2->p_vmspace->vm_ssize);
668 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
669 PCPU_INC(cnt.v_vforks);
670 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
671 p2->p_vmspace->vm_ssize);
672 } else if (p1 == &proc0) {
673 PCPU_INC(cnt.v_kthreads);
674 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
675 p2->p_vmspace->vm_ssize);
676 } else {
677 PCPU_INC(cnt.v_rforks);
678 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
679 p2->p_vmspace->vm_ssize);
680 }
681
682 #ifdef PROCDESC
683 /*
684 * Associate the process descriptor with the process before anything
685 * can happen that might cause that process to need the descriptor.
686 * However, don't do this until after fork(2) can no longer fail.
687 */
688 if (flags & RFPROCDESC)
689 procdesc_new(p2, pdflags);
690 #endif
691
692 /*
693 * Both processes are set up, now check if any loadable modules want
694 * to adjust anything.
695 */
696 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
697
698 /*
699 * Set the child start time and mark the process as being complete.
700 */
701 PROC_LOCK(p2);
702 PROC_LOCK(p1);
703 microuptime(&p2->p_stats->p_start);
704 PROC_SLOCK(p2);
705 p2->p_state = PRS_NORMAL;
706 PROC_SUNLOCK(p2);
707
708 #ifdef KDTRACE_HOOKS
709 /*
710 * Tell the DTrace fasttrap provider about the new process so that any
711 * tracepoints inherited from the parent can be removed. We have to do
712 * this only after p_state is PRS_NORMAL since the fasttrap module will
713 * use pfind() later on.
714 */
715 if ((flags & RFMEM) == 0 && dtrace_fasttrap_fork)
716 dtrace_fasttrap_fork(p1, p2);
717 #endif
718 if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED |
719 P_FOLLOWFORK)) {
720 /*
721 * Arrange for debugger to receive the fork event.
722 *
723 * We can report PL_FLAG_FORKED regardless of
724 * P_FOLLOWFORK settings, but it does not make a sense
725 * for runaway child.
726 */
727 td->td_dbgflags |= TDB_FORK;
728 td->td_dbg_forked = p2->p_pid;
729 td2->td_dbgflags |= TDB_STOPATFORK;
730 _PHOLD(p2);
731 p2_held = 1;
732 }
733 if (flags & RFPPWAIT) {
734 td->td_pflags |= TDP_RFPPWAIT;
735 td->td_rfppwait_p = p2;
736 }
737 PROC_UNLOCK(p2);
738 if ((flags & RFSTOPPED) == 0) {
739 /*
740 * If RFSTOPPED not requested, make child runnable and
741 * add to run queue.
742 */
743 thread_lock(td2);
744 TD_SET_CAN_RUN(td2);
745 sched_add(td2, SRQ_BORING);
746 thread_unlock(td2);
747 }
748
749 /*
750 * Now can be swapped.
751 */
752 _PRELE(p1);
753 PROC_UNLOCK(p1);
754
755 /*
756 * Tell any interested parties about the new process.
757 */
758 knote_fork(&p1->p_klist, p2->p_pid);
759 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
760
761 /*
762 * Wait until debugger is attached to child.
763 */
764 PROC_LOCK(p2);
765 while ((td2->td_dbgflags & TDB_STOPATFORK) != 0)
766 cv_wait(&p2->p_dbgwait, &p2->p_mtx);
767 if (p2_held)
768 _PRELE(p2);
769 PROC_UNLOCK(p2);
770 }
771
772 int
773 fork1(struct thread *td, int flags, int pages, struct proc **procp,
774 int *procdescp, int pdflags)
775 {
776 struct proc *p1;
777 struct proc *newproc;
778 int ok;
779 struct thread *td2;
780 struct vmspace *vm2;
781 vm_ooffset_t mem_charged;
782 int error;
783 static int curfail;
784 static struct timeval lastfail;
785 #ifdef PROCDESC
786 struct file *fp_procdesc = NULL;
787 #endif
788
789 /* Check for the undefined or unimplemented flags. */
790 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
791 return (EINVAL);
792
793 /* Signal value requires RFTSIGZMB. */
794 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
795 return (EINVAL);
796
797 /* Can't copy and clear. */
798 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
799 return (EINVAL);
800
801 /* Check the validity of the signal number. */
802 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
803 return (EINVAL);
804
805 #ifdef PROCDESC
806 if ((flags & RFPROCDESC) != 0) {
807 /* Can't not create a process yet get a process descriptor. */
808 if ((flags & RFPROC) == 0)
809 return (EINVAL);
810
811 /* Must provide a place to put a procdesc if creating one. */
812 if (procdescp == NULL)
813 return (EINVAL);
814 }
815 #endif
816
817 p1 = td->td_proc;
818
819 /*
820 * Here we don't create a new process, but we divorce
821 * certain parts of a process from itself.
822 */
823 if ((flags & RFPROC) == 0) {
824 *procp = NULL;
825 return (fork_norfproc(td, flags));
826 }
827
828 #ifdef PROCDESC
829 /*
830 * If required, create a process descriptor in the parent first; we
831 * will abandon it if something goes wrong. We don't finit() until
832 * later.
833 */
834 if (flags & RFPROCDESC) {
835 error = falloc(td, &fp_procdesc, procdescp, 0);
836 if (error != 0)
837 return (error);
838 }
839 #endif
840
841 mem_charged = 0;
842 vm2 = NULL;
843 if (pages == 0)
844 pages = KSTACK_PAGES;
845 /* Allocate new proc. */
846 newproc = uma_zalloc(proc_zone, M_WAITOK);
847 td2 = FIRST_THREAD_IN_PROC(newproc);
848 if (td2 == NULL) {
849 td2 = thread_alloc(pages);
850 if (td2 == NULL) {
851 error = ENOMEM;
852 goto fail1;
853 }
854 proc_linkup(newproc, td2);
855 } else {
856 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
857 if (td2->td_kstack != 0)
858 vm_thread_dispose(td2);
859 if (!thread_alloc_stack(td2, pages)) {
860 error = ENOMEM;
861 goto fail1;
862 }
863 }
864 }
865
866 if ((flags & RFMEM) == 0) {
867 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
868 if (vm2 == NULL) {
869 error = ENOMEM;
870 goto fail1;
871 }
872 if (!swap_reserve(mem_charged)) {
873 /*
874 * The swap reservation failed. The accounting
875 * from the entries of the copied vm2 will be
876 * substracted in vmspace_free(), so force the
877 * reservation there.
878 */
879 swap_reserve_force(mem_charged);
880 error = ENOMEM;
881 goto fail1;
882 }
883 } else
884 vm2 = NULL;
885
886 /*
887 * XXX: This is ugly; when we copy resource usage, we need to bump
888 * per-cred resource counters.
889 */
890 newproc->p_ucred = p1->p_ucred;
891
892 /*
893 * Initialize resource accounting for the child process.
894 */
895 error = racct_proc_fork(p1, newproc);
896 if (error != 0) {
897 error = EAGAIN;
898 goto fail1;
899 }
900
901 #ifdef MAC
902 mac_proc_init(newproc);
903 #endif
904 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
905 STAILQ_INIT(&newproc->p_ktr);
906
907 /* We have to lock the process tree while we look for a pid. */
908 sx_slock(&proctree_lock);
909
910 /*
911 * Although process entries are dynamically created, we still keep
912 * a global limit on the maximum number we will create. Don't allow
913 * a nonprivileged user to use the last ten processes; don't let root
914 * exceed the limit. The variable nprocs is the current number of
915 * processes, maxproc is the limit.
916 */
917 sx_xlock(&allproc_lock);
918 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
919 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
920 error = EAGAIN;
921 goto fail;
922 }
923
924 /*
925 * Increment the count of procs running with this uid. Don't allow
926 * a nonprivileged user to exceed their current limit.
927 *
928 * XXXRW: Can we avoid privilege here if it's not needed?
929 */
930 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
931 if (error == 0)
932 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
933 else {
934 PROC_LOCK(p1);
935 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
936 lim_cur(p1, RLIMIT_NPROC));
937 PROC_UNLOCK(p1);
938 }
939 if (ok) {
940 do_fork(td, flags, newproc, td2, vm2, pdflags);
941
942 /*
943 * Return child proc pointer to parent.
944 */
945 *procp = newproc;
946 #ifdef PROCDESC
947 if (flags & RFPROCDESC) {
948 procdesc_finit(newproc->p_procdesc, fp_procdesc);
949 fdrop(fp_procdesc, td);
950 }
951 #endif
952 racct_proc_fork_done(newproc);
953 return (0);
954 }
955
956 error = EAGAIN;
957 fail:
958 sx_sunlock(&proctree_lock);
959 if (ppsratecheck(&lastfail, &curfail, 1))
960 printf("maxproc limit exceeded by uid %u (pid %d); see tuning(7) and login.conf(5)\n",
961 td->td_ucred->cr_ruid, p1->p_pid);
962 sx_xunlock(&allproc_lock);
963 #ifdef MAC
964 mac_proc_destroy(newproc);
965 #endif
966 racct_proc_exit(newproc);
967 fail1:
968 if (vm2 != NULL)
969 vmspace_free(vm2);
970 uma_zfree(proc_zone, newproc);
971 #ifdef PROCDESC
972 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
973 fdclose(td->td_proc->p_fd, fp_procdesc, *procdescp, td);
974 fdrop(fp_procdesc, td);
975 }
976 #endif
977 pause("fork", hz / 2);
978 return (error);
979 }
980
981 /*
982 * Handle the return of a child process from fork1(). This function
983 * is called from the MD fork_trampoline() entry point.
984 */
985 void
986 fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
987 struct trapframe *frame)
988 {
989 struct proc *p;
990 struct thread *td;
991 struct thread *dtd;
992
993 td = curthread;
994 p = td->td_proc;
995 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
996
997 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
998 td, td->td_sched, p->p_pid, td->td_name);
999
1000 sched_fork_exit(td);
1001 /*
1002 * Processes normally resume in mi_switch() after being
1003 * cpu_switch()'ed to, but when children start up they arrive here
1004 * instead, so we must do much the same things as mi_switch() would.
1005 */
1006 if ((dtd = PCPU_GET(deadthread))) {
1007 PCPU_SET(deadthread, NULL);
1008 thread_stash(dtd);
1009 }
1010 thread_unlock(td);
1011
1012 /*
1013 * cpu_set_fork_handler intercepts this function call to
1014 * have this call a non-return function to stay in kernel mode.
1015 * initproc has its own fork handler, but it does return.
1016 */
1017 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
1018 callout(arg, frame);
1019
1020 /*
1021 * Check if a kernel thread misbehaved and returned from its main
1022 * function.
1023 */
1024 if (p->p_flag & P_KTHREAD) {
1025 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1026 td->td_name, p->p_pid);
1027 kproc_exit(0);
1028 }
1029 mtx_assert(&Giant, MA_NOTOWNED);
1030
1031 if (p->p_sysent->sv_schedtail != NULL)
1032 (p->p_sysent->sv_schedtail)(td);
1033 }
1034
1035 /*
1036 * Simplified back end of syscall(), used when returning from fork()
1037 * directly into user mode. Giant is not held on entry, and must not
1038 * be held on return. This function is passed in to fork_exit() as the
1039 * first parameter and is called when returning to a new userland process.
1040 */
1041 void
1042 fork_return(struct thread *td, struct trapframe *frame)
1043 {
1044 struct proc *p, *dbg;
1045
1046 if (td->td_dbgflags & TDB_STOPATFORK) {
1047 p = td->td_proc;
1048 sx_xlock(&proctree_lock);
1049 PROC_LOCK(p);
1050 if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) ==
1051 (P_TRACED | P_FOLLOWFORK)) {
1052 /*
1053 * If debugger still wants auto-attach for the
1054 * parent's children, do it now.
1055 */
1056 dbg = p->p_pptr->p_pptr;
1057 p->p_flag |= P_TRACED;
1058 p->p_oppid = p->p_pptr->p_pid;
1059 CTR2(KTR_PTRACE,
1060 "fork_return: attaching to new child pid %d: oppid %d",
1061 p->p_pid, p->p_oppid);
1062 proc_reparent(p, dbg);
1063 sx_xunlock(&proctree_lock);
1064 td->td_dbgflags |= TDB_CHILD;
1065 ptracestop(td, SIGSTOP);
1066 td->td_dbgflags &= ~TDB_CHILD;
1067 } else {
1068 /*
1069 * ... otherwise clear the request.
1070 */
1071 sx_xunlock(&proctree_lock);
1072 td->td_dbgflags &= ~TDB_STOPATFORK;
1073 cv_broadcast(&p->p_dbgwait);
1074 }
1075 PROC_UNLOCK(p);
1076 }
1077
1078 userret(td, frame);
1079
1080 #ifdef KTRACE
1081 if (KTRPOINT(td, KTR_SYSRET))
1082 ktrsysret(SYS_fork, 0, 0);
1083 #endif
1084 }
Cache object: 75c74d33db25fbce9383e66651d8fe27
|