FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_fork.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.1/sys/kern/kern_fork.c 270267 2014-08-21 12:30:01Z kib $");
39
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 #include "opt_kstack_pages.h"
43 #include "opt_procdesc.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/filedesc.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/kthread.h>
54 #include <sys/sysctl.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procdesc.h>
61 #include <sys/pioctl.h>
62 #include <sys/racct.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sched.h>
65 #include <sys/syscall.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68 #include <sys/acct.h>
69 #include <sys/ktr.h>
70 #include <sys/ktrace.h>
71 #include <sys/unistd.h>
72 #include <sys/sdt.h>
73 #include <sys/sx.h>
74 #include <sys/sysent.h>
75 #include <sys/signalvar.h>
76
77 #include <security/audit/audit.h>
78 #include <security/mac/mac_framework.h>
79
80 #include <vm/vm.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
84 #include <vm/uma.h>
85
86 #ifdef KDTRACE_HOOKS
87 #include <sys/dtrace_bsd.h>
88 dtrace_fork_func_t dtrace_fasttrap_fork;
89 #endif
90
91 SDT_PROVIDER_DECLARE(proc);
92 SDT_PROBE_DEFINE3(proc, kernel, , create, "struct proc *",
93 "struct proc *", "int");
94
95 #ifndef _SYS_SYSPROTO_H_
96 struct fork_args {
97 int dummy;
98 };
99 #endif
100
101 /* ARGSUSED */
102 int
103 sys_fork(struct thread *td, struct fork_args *uap)
104 {
105 int error;
106 struct proc *p2;
107
108 error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0);
109 if (error == 0) {
110 td->td_retval[0] = p2->p_pid;
111 td->td_retval[1] = 0;
112 }
113 return (error);
114 }
115
116 /* ARGUSED */
117 int
118 sys_pdfork(td, uap)
119 struct thread *td;
120 struct pdfork_args *uap;
121 {
122 #ifdef PROCDESC
123 int error, fd;
124 struct proc *p2;
125
126 /*
127 * It is necessary to return fd by reference because 0 is a valid file
128 * descriptor number, and the child needs to be able to distinguish
129 * itself from the parent using the return value.
130 */
131 error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2,
132 &fd, uap->flags);
133 if (error == 0) {
134 td->td_retval[0] = p2->p_pid;
135 td->td_retval[1] = 0;
136 error = copyout(&fd, uap->fdp, sizeof(fd));
137 }
138 return (error);
139 #else
140 return (ENOSYS);
141 #endif
142 }
143
144 /* ARGSUSED */
145 int
146 sys_vfork(struct thread *td, struct vfork_args *uap)
147 {
148 int error, flags;
149 struct proc *p2;
150
151 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
152 error = fork1(td, flags, 0, &p2, NULL, 0);
153 if (error == 0) {
154 td->td_retval[0] = p2->p_pid;
155 td->td_retval[1] = 0;
156 }
157 return (error);
158 }
159
160 int
161 sys_rfork(struct thread *td, struct rfork_args *uap)
162 {
163 struct proc *p2;
164 int error;
165
166 /* Don't allow kernel-only flags. */
167 if ((uap->flags & RFKERNELONLY) != 0)
168 return (EINVAL);
169
170 AUDIT_ARG_FFLAGS(uap->flags);
171 error = fork1(td, uap->flags, 0, &p2, NULL, 0);
172 if (error == 0) {
173 td->td_retval[0] = p2 ? p2->p_pid : 0;
174 td->td_retval[1] = 0;
175 }
176 return (error);
177 }
178
179 int nprocs = 1; /* process 0 */
180 int lastpid = 0;
181 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
182 "Last used PID");
183
184 /*
185 * Random component to lastpid generation. We mix in a random factor to make
186 * it a little harder to predict. We sanity check the modulus value to avoid
187 * doing it in critical paths. Don't let it be too small or we pointlessly
188 * waste randomness entropy, and don't let it be impossibly large. Using a
189 * modulus that is too big causes a LOT more process table scans and slows
190 * down fork processing as the pidchecked caching is defeated.
191 */
192 static int randompid = 0;
193
194 static int
195 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
196 {
197 int error, pid;
198
199 error = sysctl_wire_old_buffer(req, sizeof(int));
200 if (error != 0)
201 return(error);
202 sx_xlock(&allproc_lock);
203 pid = randompid;
204 error = sysctl_handle_int(oidp, &pid, 0, req);
205 if (error == 0 && req->newptr != NULL) {
206 if (pid < 0 || pid > pid_max - 100) /* out of range */
207 pid = pid_max - 100;
208 else if (pid < 2) /* NOP */
209 pid = 0;
210 else if (pid < 100) /* Make it reasonable */
211 pid = 100;
212 randompid = pid;
213 }
214 sx_xunlock(&allproc_lock);
215 return (error);
216 }
217
218 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
219 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
220
221 static int
222 fork_findpid(int flags)
223 {
224 struct proc *p;
225 int trypid;
226 static int pidchecked = 0;
227
228 /*
229 * Requires allproc_lock in order to iterate over the list
230 * of processes, and proctree_lock to access p_pgrp.
231 */
232 sx_assert(&allproc_lock, SX_LOCKED);
233 sx_assert(&proctree_lock, SX_LOCKED);
234
235 /*
236 * Find an unused process ID. We remember a range of unused IDs
237 * ready to use (from lastpid+1 through pidchecked-1).
238 *
239 * If RFHIGHPID is set (used during system boot), do not allocate
240 * low-numbered pids.
241 */
242 trypid = lastpid + 1;
243 if (flags & RFHIGHPID) {
244 if (trypid < 10)
245 trypid = 10;
246 } else {
247 if (randompid)
248 trypid += arc4random() % randompid;
249 }
250 retry:
251 /*
252 * If the process ID prototype has wrapped around,
253 * restart somewhat above 0, as the low-numbered procs
254 * tend to include daemons that don't exit.
255 */
256 if (trypid >= pid_max) {
257 trypid = trypid % pid_max;
258 if (trypid < 100)
259 trypid += 100;
260 pidchecked = 0;
261 }
262 if (trypid >= pidchecked) {
263 int doingzomb = 0;
264
265 pidchecked = PID_MAX;
266 /*
267 * Scan the active and zombie procs to check whether this pid
268 * is in use. Remember the lowest pid that's greater
269 * than trypid, so we can avoid checking for a while.
270 */
271 p = LIST_FIRST(&allproc);
272 again:
273 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
274 while (p->p_pid == trypid ||
275 (p->p_pgrp != NULL &&
276 (p->p_pgrp->pg_id == trypid ||
277 (p->p_session != NULL &&
278 p->p_session->s_sid == trypid)))) {
279 trypid++;
280 if (trypid >= pidchecked)
281 goto retry;
282 }
283 if (p->p_pid > trypid && pidchecked > p->p_pid)
284 pidchecked = p->p_pid;
285 if (p->p_pgrp != NULL) {
286 if (p->p_pgrp->pg_id > trypid &&
287 pidchecked > p->p_pgrp->pg_id)
288 pidchecked = p->p_pgrp->pg_id;
289 if (p->p_session != NULL &&
290 p->p_session->s_sid > trypid &&
291 pidchecked > p->p_session->s_sid)
292 pidchecked = p->p_session->s_sid;
293 }
294 }
295 if (!doingzomb) {
296 doingzomb = 1;
297 p = LIST_FIRST(&zombproc);
298 goto again;
299 }
300 }
301
302 /*
303 * RFHIGHPID does not mess with the lastpid counter during boot.
304 */
305 if (flags & RFHIGHPID)
306 pidchecked = 0;
307 else
308 lastpid = trypid;
309
310 return (trypid);
311 }
312
313 static int
314 fork_norfproc(struct thread *td, int flags)
315 {
316 int error;
317 struct proc *p1;
318
319 KASSERT((flags & RFPROC) == 0,
320 ("fork_norfproc called with RFPROC set"));
321 p1 = td->td_proc;
322
323 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
324 (flags & (RFCFDG | RFFDG))) {
325 PROC_LOCK(p1);
326 if (thread_single(SINGLE_BOUNDARY)) {
327 PROC_UNLOCK(p1);
328 return (ERESTART);
329 }
330 PROC_UNLOCK(p1);
331 }
332
333 error = vm_forkproc(td, NULL, NULL, NULL, flags);
334 if (error)
335 goto fail;
336
337 /*
338 * Close all file descriptors.
339 */
340 if (flags & RFCFDG) {
341 struct filedesc *fdtmp;
342 fdtmp = fdinit(td->td_proc->p_fd);
343 fdescfree(td);
344 p1->p_fd = fdtmp;
345 }
346
347 /*
348 * Unshare file descriptors (from parent).
349 */
350 if (flags & RFFDG)
351 fdunshare(td);
352
353 fail:
354 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
355 (flags & (RFCFDG | RFFDG))) {
356 PROC_LOCK(p1);
357 thread_single_end();
358 PROC_UNLOCK(p1);
359 }
360 return (error);
361 }
362
363 static void
364 do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
365 struct vmspace *vm2, int pdflags)
366 {
367 struct proc *p1, *pptr;
368 int p2_held, trypid;
369 struct filedesc *fd;
370 struct filedesc_to_leader *fdtol;
371 struct sigacts *newsigacts;
372
373 sx_assert(&proctree_lock, SX_SLOCKED);
374 sx_assert(&allproc_lock, SX_XLOCKED);
375
376 p2_held = 0;
377 p1 = td->td_proc;
378
379 /*
380 * Increment the nprocs resource before blocking can occur. There
381 * are hard-limits as to the number of processes that can run.
382 */
383 nprocs++;
384
385 trypid = fork_findpid(flags);
386
387 sx_sunlock(&proctree_lock);
388
389 p2->p_state = PRS_NEW; /* protect against others */
390 p2->p_pid = trypid;
391 AUDIT_ARG_PID(p2->p_pid);
392 LIST_INSERT_HEAD(&allproc, p2, p_list);
393 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
394 tidhash_add(td2);
395 PROC_LOCK(p2);
396 PROC_LOCK(p1);
397
398 sx_xunlock(&allproc_lock);
399
400 bcopy(&p1->p_startcopy, &p2->p_startcopy,
401 __rangeof(struct proc, p_startcopy, p_endcopy));
402 pargs_hold(p2->p_args);
403 PROC_UNLOCK(p1);
404
405 bzero(&p2->p_startzero,
406 __rangeof(struct proc, p_startzero, p_endzero));
407 p2->p_treeflag = 0;
408
409 p2->p_ucred = crhold(td->td_ucred);
410
411 /* Tell the prison that we exist. */
412 prison_proc_hold(p2->p_ucred->cr_prison);
413
414 PROC_UNLOCK(p2);
415
416 /*
417 * Malloc things while we don't hold any locks.
418 */
419 if (flags & RFSIGSHARE)
420 newsigacts = NULL;
421 else
422 newsigacts = sigacts_alloc();
423
424 /*
425 * Copy filedesc.
426 */
427 if (flags & RFCFDG) {
428 fd = fdinit(p1->p_fd);
429 fdtol = NULL;
430 } else if (flags & RFFDG) {
431 fd = fdcopy(p1->p_fd);
432 fdtol = NULL;
433 } else {
434 fd = fdshare(p1->p_fd);
435 if (p1->p_fdtol == NULL)
436 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
437 p1->p_leader);
438 if ((flags & RFTHREAD) != 0) {
439 /*
440 * Shared file descriptor table, and shared
441 * process leaders.
442 */
443 fdtol = p1->p_fdtol;
444 FILEDESC_XLOCK(p1->p_fd);
445 fdtol->fdl_refcount++;
446 FILEDESC_XUNLOCK(p1->p_fd);
447 } else {
448 /*
449 * Shared file descriptor table, and different
450 * process leaders.
451 */
452 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
453 p1->p_fd, p2);
454 }
455 }
456 /*
457 * Make a proc table entry for the new process.
458 * Start by zeroing the section of proc that is zero-initialized,
459 * then copy the section that is copied directly from the parent.
460 */
461
462 PROC_LOCK(p2);
463 PROC_LOCK(p1);
464
465 bzero(&td2->td_startzero,
466 __rangeof(struct thread, td_startzero, td_endzero));
467
468 bcopy(&td->td_startcopy, &td2->td_startcopy,
469 __rangeof(struct thread, td_startcopy, td_endcopy));
470
471 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
472 td2->td_sigstk = td->td_sigstk;
473 td2->td_flags = TDF_INMEM;
474 td2->td_lend_user_pri = PRI_MAX;
475
476 #ifdef VIMAGE
477 td2->td_vnet = NULL;
478 td2->td_vnet_lpush = NULL;
479 #endif
480
481 /*
482 * Allow the scheduler to initialize the child.
483 */
484 thread_lock(td);
485 sched_fork(td, td2);
486 thread_unlock(td);
487
488 /*
489 * Duplicate sub-structures as needed.
490 * Increase reference counts on shared objects.
491 */
492 p2->p_flag = P_INMEM;
493 p2->p_flag2 = 0;
494 p2->p_swtick = ticks;
495 if (p1->p_flag & P_PROFIL)
496 startprofclock(p2);
497 td2->td_ucred = crhold(p2->p_ucred);
498
499 if (flags & RFSIGSHARE) {
500 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
501 } else {
502 sigacts_copy(newsigacts, p1->p_sigacts);
503 p2->p_sigacts = newsigacts;
504 }
505
506 if (flags & RFTSIGZMB)
507 p2->p_sigparent = RFTSIGNUM(flags);
508 else if (flags & RFLINUXTHPN)
509 p2->p_sigparent = SIGUSR1;
510 else
511 p2->p_sigparent = SIGCHLD;
512
513 p2->p_textvp = p1->p_textvp;
514 p2->p_fd = fd;
515 p2->p_fdtol = fdtol;
516
517 if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
518 p2->p_flag |= P_PROTECTED;
519 p2->p_flag2 |= P2_INHERIT_PROTECTED;
520 }
521
522 /*
523 * p_limit is copy-on-write. Bump its refcount.
524 */
525 lim_fork(p1, p2);
526
527 pstats_fork(p1->p_stats, p2->p_stats);
528
529 PROC_UNLOCK(p1);
530 PROC_UNLOCK(p2);
531
532 /* Bump references to the text vnode (for procfs). */
533 if (p2->p_textvp)
534 vref(p2->p_textvp);
535
536 /*
537 * Set up linkage for kernel based threading.
538 */
539 if ((flags & RFTHREAD) != 0) {
540 mtx_lock(&ppeers_lock);
541 p2->p_peers = p1->p_peers;
542 p1->p_peers = p2;
543 p2->p_leader = p1->p_leader;
544 mtx_unlock(&ppeers_lock);
545 PROC_LOCK(p1->p_leader);
546 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
547 PROC_UNLOCK(p1->p_leader);
548 /*
549 * The task leader is exiting, so process p1 is
550 * going to be killed shortly. Since p1 obviously
551 * isn't dead yet, we know that the leader is either
552 * sending SIGKILL's to all the processes in this
553 * task or is sleeping waiting for all the peers to
554 * exit. We let p1 complete the fork, but we need
555 * to go ahead and kill the new process p2 since
556 * the task leader may not get a chance to send
557 * SIGKILL to it. We leave it on the list so that
558 * the task leader will wait for this new process
559 * to commit suicide.
560 */
561 PROC_LOCK(p2);
562 kern_psignal(p2, SIGKILL);
563 PROC_UNLOCK(p2);
564 } else
565 PROC_UNLOCK(p1->p_leader);
566 } else {
567 p2->p_peers = NULL;
568 p2->p_leader = p2;
569 }
570
571 sx_xlock(&proctree_lock);
572 PGRP_LOCK(p1->p_pgrp);
573 PROC_LOCK(p2);
574 PROC_LOCK(p1);
575
576 /*
577 * Preserve some more flags in subprocess. P_PROFIL has already
578 * been preserved.
579 */
580 p2->p_flag |= p1->p_flag & P_SUGID;
581 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
582 SESS_LOCK(p1->p_session);
583 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
584 p2->p_flag |= P_CONTROLT;
585 SESS_UNLOCK(p1->p_session);
586 if (flags & RFPPWAIT)
587 p2->p_flag |= P_PPWAIT;
588
589 p2->p_pgrp = p1->p_pgrp;
590 LIST_INSERT_AFTER(p1, p2, p_pglist);
591 PGRP_UNLOCK(p1->p_pgrp);
592 LIST_INIT(&p2->p_children);
593 LIST_INIT(&p2->p_orphans);
594
595 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);
596
597 /*
598 * If PF_FORK is set, the child process inherits the
599 * procfs ioctl flags from its parent.
600 */
601 if (p1->p_pfsflags & PF_FORK) {
602 p2->p_stops = p1->p_stops;
603 p2->p_pfsflags = p1->p_pfsflags;
604 }
605
606 /*
607 * This begins the section where we must prevent the parent
608 * from being swapped.
609 */
610 _PHOLD(p1);
611 PROC_UNLOCK(p1);
612
613 /*
614 * Attach the new process to its parent.
615 *
616 * If RFNOWAIT is set, the newly created process becomes a child
617 * of init. This effectively disassociates the child from the
618 * parent.
619 */
620 if (flags & RFNOWAIT)
621 pptr = initproc;
622 else
623 pptr = p1;
624 p2->p_pptr = pptr;
625 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
626 sx_xunlock(&proctree_lock);
627
628 /* Inform accounting that we have forked. */
629 p2->p_acflag = AFORK;
630 PROC_UNLOCK(p2);
631
632 #ifdef KTRACE
633 ktrprocfork(p1, p2);
634 #endif
635
636 /*
637 * Finish creating the child process. It will return via a different
638 * execution path later. (ie: directly into user mode)
639 */
640 vm_forkproc(td, p2, td2, vm2, flags);
641
642 if (flags == (RFFDG | RFPROC)) {
643 PCPU_INC(cnt.v_forks);
644 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
645 p2->p_vmspace->vm_ssize);
646 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
647 PCPU_INC(cnt.v_vforks);
648 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
649 p2->p_vmspace->vm_ssize);
650 } else if (p1 == &proc0) {
651 PCPU_INC(cnt.v_kthreads);
652 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
653 p2->p_vmspace->vm_ssize);
654 } else {
655 PCPU_INC(cnt.v_rforks);
656 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
657 p2->p_vmspace->vm_ssize);
658 }
659
660 #ifdef PROCDESC
661 /*
662 * Associate the process descriptor with the process before anything
663 * can happen that might cause that process to need the descriptor.
664 * However, don't do this until after fork(2) can no longer fail.
665 */
666 if (flags & RFPROCDESC)
667 procdesc_new(p2, pdflags);
668 #endif
669
670 /*
671 * Both processes are set up, now check if any loadable modules want
672 * to adjust anything.
673 */
674 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
675
676 /*
677 * Set the child start time and mark the process as being complete.
678 */
679 PROC_LOCK(p2);
680 PROC_LOCK(p1);
681 microuptime(&p2->p_stats->p_start);
682 PROC_SLOCK(p2);
683 p2->p_state = PRS_NORMAL;
684 PROC_SUNLOCK(p2);
685
686 #ifdef KDTRACE_HOOKS
687 /*
688 * Tell the DTrace fasttrap provider about the new process so that any
689 * tracepoints inherited from the parent can be removed. We have to do
690 * this only after p_state is PRS_NORMAL since the fasttrap module will
691 * use pfind() later on.
692 */
693 if ((flags & RFMEM) == 0 && dtrace_fasttrap_fork)
694 dtrace_fasttrap_fork(p1, p2);
695 #endif
696 if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED |
697 P_FOLLOWFORK)) {
698 /*
699 * Arrange for debugger to receive the fork event.
700 *
701 * We can report PL_FLAG_FORKED regardless of
702 * P_FOLLOWFORK settings, but it does not make a sense
703 * for runaway child.
704 */
705 td->td_dbgflags |= TDB_FORK;
706 td->td_dbg_forked = p2->p_pid;
707 td2->td_dbgflags |= TDB_STOPATFORK;
708 _PHOLD(p2);
709 p2_held = 1;
710 }
711 if (flags & RFPPWAIT) {
712 td->td_pflags |= TDP_RFPPWAIT;
713 td->td_rfppwait_p = p2;
714 }
715 PROC_UNLOCK(p2);
716 if ((flags & RFSTOPPED) == 0) {
717 /*
718 * If RFSTOPPED not requested, make child runnable and
719 * add to run queue.
720 */
721 thread_lock(td2);
722 TD_SET_CAN_RUN(td2);
723 sched_add(td2, SRQ_BORING);
724 thread_unlock(td2);
725 }
726
727 /*
728 * Now can be swapped.
729 */
730 _PRELE(p1);
731 PROC_UNLOCK(p1);
732
733 /*
734 * Tell any interested parties about the new process.
735 */
736 knote_fork(&p1->p_klist, p2->p_pid);
737 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
738
739 /*
740 * Wait until debugger is attached to child.
741 */
742 PROC_LOCK(p2);
743 while ((td2->td_dbgflags & TDB_STOPATFORK) != 0)
744 cv_wait(&p2->p_dbgwait, &p2->p_mtx);
745 if (p2_held)
746 _PRELE(p2);
747 PROC_UNLOCK(p2);
748 }
749
750 int
751 fork1(struct thread *td, int flags, int pages, struct proc **procp,
752 int *procdescp, int pdflags)
753 {
754 struct proc *p1;
755 struct proc *newproc;
756 int ok;
757 struct thread *td2;
758 struct vmspace *vm2;
759 vm_ooffset_t mem_charged;
760 int error;
761 static int curfail;
762 static struct timeval lastfail;
763 #ifdef PROCDESC
764 struct file *fp_procdesc = NULL;
765 #endif
766
767 /* Check for the undefined or unimplemented flags. */
768 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
769 return (EINVAL);
770
771 /* Signal value requires RFTSIGZMB. */
772 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
773 return (EINVAL);
774
775 /* Can't copy and clear. */
776 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
777 return (EINVAL);
778
779 /* Check the validity of the signal number. */
780 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
781 return (EINVAL);
782
783 #ifdef PROCDESC
784 if ((flags & RFPROCDESC) != 0) {
785 /* Can't not create a process yet get a process descriptor. */
786 if ((flags & RFPROC) == 0)
787 return (EINVAL);
788
789 /* Must provide a place to put a procdesc if creating one. */
790 if (procdescp == NULL)
791 return (EINVAL);
792 }
793 #endif
794
795 p1 = td->td_proc;
796
797 /*
798 * Here we don't create a new process, but we divorce
799 * certain parts of a process from itself.
800 */
801 if ((flags & RFPROC) == 0) {
802 *procp = NULL;
803 return (fork_norfproc(td, flags));
804 }
805
806 #ifdef PROCDESC
807 /*
808 * If required, create a process descriptor in the parent first; we
809 * will abandon it if something goes wrong. We don't finit() until
810 * later.
811 */
812 if (flags & RFPROCDESC) {
813 error = falloc(td, &fp_procdesc, procdescp, 0);
814 if (error != 0)
815 return (error);
816 }
817 #endif
818
819 mem_charged = 0;
820 vm2 = NULL;
821 if (pages == 0)
822 pages = KSTACK_PAGES;
823 /* Allocate new proc. */
824 newproc = uma_zalloc(proc_zone, M_WAITOK);
825 td2 = FIRST_THREAD_IN_PROC(newproc);
826 if (td2 == NULL) {
827 td2 = thread_alloc(pages);
828 if (td2 == NULL) {
829 error = ENOMEM;
830 goto fail1;
831 }
832 proc_linkup(newproc, td2);
833 } else {
834 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
835 if (td2->td_kstack != 0)
836 vm_thread_dispose(td2);
837 if (!thread_alloc_stack(td2, pages)) {
838 error = ENOMEM;
839 goto fail1;
840 }
841 }
842 }
843
844 if ((flags & RFMEM) == 0) {
845 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
846 if (vm2 == NULL) {
847 error = ENOMEM;
848 goto fail1;
849 }
850 if (!swap_reserve(mem_charged)) {
851 /*
852 * The swap reservation failed. The accounting
853 * from the entries of the copied vm2 will be
854 * substracted in vmspace_free(), so force the
855 * reservation there.
856 */
857 swap_reserve_force(mem_charged);
858 error = ENOMEM;
859 goto fail1;
860 }
861 } else
862 vm2 = NULL;
863
864 /*
865 * XXX: This is ugly; when we copy resource usage, we need to bump
866 * per-cred resource counters.
867 */
868 newproc->p_ucred = p1->p_ucred;
869
870 /*
871 * Initialize resource accounting for the child process.
872 */
873 error = racct_proc_fork(p1, newproc);
874 if (error != 0) {
875 error = EAGAIN;
876 goto fail1;
877 }
878
879 #ifdef MAC
880 mac_proc_init(newproc);
881 #endif
882 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
883 STAILQ_INIT(&newproc->p_ktr);
884
885 /* We have to lock the process tree while we look for a pid. */
886 sx_slock(&proctree_lock);
887
888 /*
889 * Although process entries are dynamically created, we still keep
890 * a global limit on the maximum number we will create. Don't allow
891 * a nonprivileged user to use the last ten processes; don't let root
892 * exceed the limit. The variable nprocs is the current number of
893 * processes, maxproc is the limit.
894 */
895 sx_xlock(&allproc_lock);
896 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
897 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
898 error = EAGAIN;
899 goto fail;
900 }
901
902 /*
903 * Increment the count of procs running with this uid. Don't allow
904 * a nonprivileged user to exceed their current limit.
905 *
906 * XXXRW: Can we avoid privilege here if it's not needed?
907 */
908 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
909 if (error == 0)
910 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
911 else {
912 PROC_LOCK(p1);
913 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
914 lim_cur(p1, RLIMIT_NPROC));
915 PROC_UNLOCK(p1);
916 }
917 if (ok) {
918 do_fork(td, flags, newproc, td2, vm2, pdflags);
919
920 /*
921 * Return child proc pointer to parent.
922 */
923 *procp = newproc;
924 #ifdef PROCDESC
925 if (flags & RFPROCDESC) {
926 procdesc_finit(newproc->p_procdesc, fp_procdesc);
927 fdrop(fp_procdesc, td);
928 }
929 #endif
930 racct_proc_fork_done(newproc);
931 return (0);
932 }
933
934 error = EAGAIN;
935 fail:
936 sx_sunlock(&proctree_lock);
937 if (ppsratecheck(&lastfail, &curfail, 1))
938 printf("maxproc limit exceeded by uid %u (pid %d); see tuning(7) and login.conf(5)\n",
939 td->td_ucred->cr_ruid, p1->p_pid);
940 sx_xunlock(&allproc_lock);
941 #ifdef MAC
942 mac_proc_destroy(newproc);
943 #endif
944 racct_proc_exit(newproc);
945 fail1:
946 if (vm2 != NULL)
947 vmspace_free(vm2);
948 uma_zfree(proc_zone, newproc);
949 #ifdef PROCDESC
950 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
951 fdclose(td->td_proc->p_fd, fp_procdesc, *procdescp, td);
952 fdrop(fp_procdesc, td);
953 }
954 #endif
955 pause("fork", hz / 2);
956 return (error);
957 }
958
959 /*
960 * Handle the return of a child process from fork1(). This function
961 * is called from the MD fork_trampoline() entry point.
962 */
963 void
964 fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
965 struct trapframe *frame)
966 {
967 struct proc *p;
968 struct thread *td;
969 struct thread *dtd;
970
971 td = curthread;
972 p = td->td_proc;
973 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
974
975 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
976 td, td->td_sched, p->p_pid, td->td_name);
977
978 sched_fork_exit(td);
979 /*
980 * Processes normally resume in mi_switch() after being
981 * cpu_switch()'ed to, but when children start up they arrive here
982 * instead, so we must do much the same things as mi_switch() would.
983 */
984 if ((dtd = PCPU_GET(deadthread))) {
985 PCPU_SET(deadthread, NULL);
986 thread_stash(dtd);
987 }
988 thread_unlock(td);
989
990 /*
991 * cpu_set_fork_handler intercepts this function call to
992 * have this call a non-return function to stay in kernel mode.
993 * initproc has its own fork handler, but it does return.
994 */
995 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
996 callout(arg, frame);
997
998 /*
999 * Check if a kernel thread misbehaved and returned from its main
1000 * function.
1001 */
1002 if (p->p_flag & P_KTHREAD) {
1003 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1004 td->td_name, p->p_pid);
1005 kproc_exit(0);
1006 }
1007 mtx_assert(&Giant, MA_NOTOWNED);
1008
1009 if (p->p_sysent->sv_schedtail != NULL)
1010 (p->p_sysent->sv_schedtail)(td);
1011 }
1012
1013 /*
1014 * Simplified back end of syscall(), used when returning from fork()
1015 * directly into user mode. Giant is not held on entry, and must not
1016 * be held on return. This function is passed in to fork_exit() as the
1017 * first parameter and is called when returning to a new userland process.
1018 */
1019 void
1020 fork_return(struct thread *td, struct trapframe *frame)
1021 {
1022 struct proc *p, *dbg;
1023
1024 if (td->td_dbgflags & TDB_STOPATFORK) {
1025 p = td->td_proc;
1026 sx_xlock(&proctree_lock);
1027 PROC_LOCK(p);
1028 if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) ==
1029 (P_TRACED | P_FOLLOWFORK)) {
1030 /*
1031 * If debugger still wants auto-attach for the
1032 * parent's children, do it now.
1033 */
1034 dbg = p->p_pptr->p_pptr;
1035 p->p_flag |= P_TRACED;
1036 p->p_oppid = p->p_pptr->p_pid;
1037 proc_reparent(p, dbg);
1038 sx_xunlock(&proctree_lock);
1039 td->td_dbgflags |= TDB_CHILD;
1040 ptracestop(td, SIGSTOP);
1041 td->td_dbgflags &= ~TDB_CHILD;
1042 } else {
1043 /*
1044 * ... otherwise clear the request.
1045 */
1046 sx_xunlock(&proctree_lock);
1047 td->td_dbgflags &= ~TDB_STOPATFORK;
1048 cv_broadcast(&p->p_dbgwait);
1049 }
1050 PROC_UNLOCK(p);
1051 }
1052
1053 userret(td, frame);
1054
1055 #ifdef KTRACE
1056 if (KTRPOINT(td, KTR_SYSRET))
1057 ktrsysret(SYS_fork, 0, 0);
1058 #endif
1059 }
Cache object: af332250917e8f4c47230f5943deffe6
|