FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_proc.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
30 * $FreeBSD$
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "opt_ktrace.h"
37 #include "opt_kstack_pages.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sysent.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/filedesc.h>
51 #include <sys/tty.h>
52 #include <sys/signalvar.h>
53 #include <sys/sx.h>
54 #include <sys/user.h>
55 #include <sys/jail.h>
56 #ifdef KTRACE
57 #include <sys/uio.h>
58 #include <sys/ktrace.h>
59 #endif
60
61 #include <vm/vm.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/uma.h>
66 #include <machine/critical.h>
67
68 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
69 MALLOC_DEFINE(M_SESSION, "session", "session header");
70 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
71 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
72
73 static void doenterpgrp(struct proc *, struct pgrp *);
74 static void orphanpg(struct pgrp *pg);
75 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
76 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
77 static void pgadjustjobc(struct pgrp *pgrp, int entering);
78 static void pgdelete(struct pgrp *);
79 static int proc_ctor(void *mem, int size, void *arg, int flags);
80 static void proc_dtor(void *mem, int size, void *arg);
81 static int proc_init(void *mem, int size, int flags);
82 static void proc_fini(void *mem, int size);
83
84 /*
85 * Other process lists
86 */
87 struct pidhashhead *pidhashtbl;
88 u_long pidhash;
89 struct pgrphashhead *pgrphashtbl;
90 u_long pgrphash;
91 struct proclist allproc;
92 struct proclist zombproc;
93 struct sx allproc_lock;
94 struct sx proctree_lock;
95 struct mtx pargs_ref_lock;
96 struct mtx ppeers_lock;
97 uma_zone_t proc_zone;
98 uma_zone_t ithread_zone;
99
100 int kstack_pages = KSTACK_PAGES;
101 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
102
103 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
104
105 /*
106 * Initialize global process hashing structures.
107 */
108 void
109 procinit()
110 {
111
112 sx_init(&allproc_lock, "allproc");
113 sx_init(&proctree_lock, "proctree");
114 mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
115 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
116 LIST_INIT(&allproc);
117 LIST_INIT(&zombproc);
118 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
119 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
120 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
121 proc_ctor, proc_dtor, proc_init, proc_fini,
122 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
123 uihashinit();
124 }
125
126 /*
127 * Prepare a proc for use.
128 */
129 static int
130 proc_ctor(void *mem, int size, void *arg, int flags)
131 {
132 struct proc *p;
133
134 p = (struct proc *)mem;
135 return (0);
136 }
137
138 /*
139 * Reclaim a proc after use.
140 */
141 static void
142 proc_dtor(void *mem, int size, void *arg)
143 {
144 struct proc *p;
145 struct thread *td;
146 #ifdef INVARIANTS
147 struct ksegrp *kg;
148 #endif
149
150 /* INVARIANTS checks go here */
151 p = (struct proc *)mem;
152 td = FIRST_THREAD_IN_PROC(p);
153 #ifdef INVARIANTS
154 KASSERT((p->p_numthreads == 1),
155 ("bad number of threads in exiting process"));
156 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
157 kg = FIRST_KSEGRP_IN_PROC(p);
158 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
159 #endif
160
161 /* Dispose of an alternate kstack, if it exists.
162 * XXX What if there are more than one thread in the proc?
163 * The first thread in the proc is special and not
164 * freed, so you gotta do this here.
165 */
166 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
167 vm_thread_dispose_altkstack(td);
168 }
169
170 /*
171 * Initialize type-stable parts of a proc (when newly created).
172 */
173 static int
174 proc_init(void *mem, int size, int flags)
175 {
176 struct proc *p;
177 struct thread *td;
178 struct ksegrp *kg;
179
180 p = (struct proc *)mem;
181 p->p_sched = (struct p_sched *)&p[1];
182 td = thread_alloc();
183 kg = ksegrp_alloc();
184 bzero(&p->p_mtx, sizeof(struct mtx));
185 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
186 p->p_stats = pstats_alloc();
187 proc_linkup(p, kg, td);
188 sched_newproc(p, kg, td);
189 return (0);
190 }
191
192 /*
193 * Tear down type-stable parts of a proc (just before being discarded)
194 */
195 static void
196 proc_fini(void *mem, int size)
197 {
198 struct proc *p;
199 struct thread *td;
200 struct ksegrp *kg;
201
202 p = (struct proc *)mem;
203 KASSERT((p->p_numthreads == 1),
204 ("bad number of threads in freeing process"));
205 td = FIRST_THREAD_IN_PROC(p);
206 KASSERT((td != NULL), ("proc_fini: bad thread pointer"));
207 kg = FIRST_KSEGRP_IN_PROC(p);
208 KASSERT((kg != NULL), ("proc_fini: bad kg pointer"));
209 sched_destroyproc(p);
210 thread_free(td);
211 ksegrp_free(kg);
212 mtx_destroy(&p->p_mtx);
213 }
214
215 /*
216 * Is p an inferior of the current process?
217 */
218 int
219 inferior(p)
220 register struct proc *p;
221 {
222
223 sx_assert(&proctree_lock, SX_LOCKED);
224 for (; p != curproc; p = p->p_pptr)
225 if (p->p_pid == 0)
226 return (0);
227 return (1);
228 }
229
230 /*
231 * Locate a process by number; return only "live" processes -- i.e., neither
232 * zombies nor newly born but incompletely initialized processes. By not
233 * returning processes in the PRS_NEW state, we allow callers to avoid
234 * testing for that condition to avoid dereferencing p_ucred, et al.
235 */
236 struct proc *
237 pfind(pid)
238 register pid_t pid;
239 {
240 register struct proc *p;
241
242 sx_slock(&allproc_lock);
243 LIST_FOREACH(p, PIDHASH(pid), p_hash)
244 if (p->p_pid == pid) {
245 if (p->p_state == PRS_NEW) {
246 p = NULL;
247 break;
248 }
249 PROC_LOCK(p);
250 break;
251 }
252 sx_sunlock(&allproc_lock);
253 return (p);
254 }
255
256 /*
257 * Locate a process group by number.
258 * The caller must hold proctree_lock.
259 */
260 struct pgrp *
261 pgfind(pgid)
262 register pid_t pgid;
263 {
264 register struct pgrp *pgrp;
265
266 sx_assert(&proctree_lock, SX_LOCKED);
267
268 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
269 if (pgrp->pg_id == pgid) {
270 PGRP_LOCK(pgrp);
271 return (pgrp);
272 }
273 }
274 return (NULL);
275 }
276
277 /*
278 * Create a new process group.
279 * pgid must be equal to the pid of p.
280 * Begin a new session if required.
281 */
282 int
283 enterpgrp(p, pgid, pgrp, sess)
284 register struct proc *p;
285 pid_t pgid;
286 struct pgrp *pgrp;
287 struct session *sess;
288 {
289 struct pgrp *pgrp2;
290
291 sx_assert(&proctree_lock, SX_XLOCKED);
292
293 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
294 KASSERT(p->p_pid == pgid,
295 ("enterpgrp: new pgrp and pid != pgid"));
296
297 pgrp2 = pgfind(pgid);
298
299 KASSERT(pgrp2 == NULL,
300 ("enterpgrp: pgrp with pgid exists"));
301 KASSERT(!SESS_LEADER(p),
302 ("enterpgrp: session leader attempted setpgrp"));
303
304 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
305
306 if (sess != NULL) {
307 /*
308 * new session
309 */
310 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
311 PROC_LOCK(p);
312 p->p_flag &= ~P_CONTROLT;
313 PROC_UNLOCK(p);
314 PGRP_LOCK(pgrp);
315 sess->s_leader = p;
316 sess->s_sid = p->p_pid;
317 sess->s_count = 1;
318 sess->s_ttyvp = NULL;
319 sess->s_ttyp = NULL;
320 bcopy(p->p_session->s_login, sess->s_login,
321 sizeof(sess->s_login));
322 pgrp->pg_session = sess;
323 KASSERT(p == curproc,
324 ("enterpgrp: mksession and p != curproc"));
325 } else {
326 pgrp->pg_session = p->p_session;
327 SESS_LOCK(pgrp->pg_session);
328 pgrp->pg_session->s_count++;
329 SESS_UNLOCK(pgrp->pg_session);
330 PGRP_LOCK(pgrp);
331 }
332 pgrp->pg_id = pgid;
333 LIST_INIT(&pgrp->pg_members);
334
335 /*
336 * As we have an exclusive lock of proctree_lock,
337 * this should not deadlock.
338 */
339 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
340 pgrp->pg_jobc = 0;
341 SLIST_INIT(&pgrp->pg_sigiolst);
342 PGRP_UNLOCK(pgrp);
343
344 doenterpgrp(p, pgrp);
345
346 return (0);
347 }
348
349 /*
350 * Move p to an existing process group
351 */
352 int
353 enterthispgrp(p, pgrp)
354 register struct proc *p;
355 struct pgrp *pgrp;
356 {
357
358 sx_assert(&proctree_lock, SX_XLOCKED);
359 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
360 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
361 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
362 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
363 KASSERT(pgrp->pg_session == p->p_session,
364 ("%s: pgrp's session %p, p->p_session %p.\n",
365 __func__,
366 pgrp->pg_session,
367 p->p_session));
368 KASSERT(pgrp != p->p_pgrp,
369 ("%s: p belongs to pgrp.", __func__));
370
371 doenterpgrp(p, pgrp);
372
373 return (0);
374 }
375
376 /*
377 * Move p to a process group
378 */
379 static void
380 doenterpgrp(p, pgrp)
381 struct proc *p;
382 struct pgrp *pgrp;
383 {
384 struct pgrp *savepgrp;
385
386 sx_assert(&proctree_lock, SX_XLOCKED);
387 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
388 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
389 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
390 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
391
392 savepgrp = p->p_pgrp;
393
394 /*
395 * Adjust eligibility of affected pgrps to participate in job control.
396 * Increment eligibility counts before decrementing, otherwise we
397 * could reach 0 spuriously during the first call.
398 */
399 fixjobc(p, pgrp, 1);
400 fixjobc(p, p->p_pgrp, 0);
401
402 PGRP_LOCK(pgrp);
403 PGRP_LOCK(savepgrp);
404 PROC_LOCK(p);
405 LIST_REMOVE(p, p_pglist);
406 p->p_pgrp = pgrp;
407 PROC_UNLOCK(p);
408 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
409 PGRP_UNLOCK(savepgrp);
410 PGRP_UNLOCK(pgrp);
411 if (LIST_EMPTY(&savepgrp->pg_members))
412 pgdelete(savepgrp);
413 }
414
415 /*
416 * remove process from process group
417 */
418 int
419 leavepgrp(p)
420 register struct proc *p;
421 {
422 struct pgrp *savepgrp;
423
424 sx_assert(&proctree_lock, SX_XLOCKED);
425 savepgrp = p->p_pgrp;
426 PGRP_LOCK(savepgrp);
427 PROC_LOCK(p);
428 LIST_REMOVE(p, p_pglist);
429 p->p_pgrp = NULL;
430 PROC_UNLOCK(p);
431 PGRP_UNLOCK(savepgrp);
432 if (LIST_EMPTY(&savepgrp->pg_members))
433 pgdelete(savepgrp);
434 return (0);
435 }
436
437 /*
438 * delete a process group
439 */
440 static void
441 pgdelete(pgrp)
442 register struct pgrp *pgrp;
443 {
444 struct session *savesess;
445 int i;
446
447 sx_assert(&proctree_lock, SX_XLOCKED);
448 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
449 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
450
451 /*
452 * Reset any sigio structures pointing to us as a result of
453 * F_SETOWN with our pgid.
454 */
455 funsetownlst(&pgrp->pg_sigiolst);
456
457 PGRP_LOCK(pgrp);
458 if (pgrp->pg_session->s_ttyp != NULL &&
459 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
460 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
461 LIST_REMOVE(pgrp, pg_hash);
462 savesess = pgrp->pg_session;
463 SESS_LOCK(savesess);
464 i = --savesess->s_count;
465 SESS_UNLOCK(savesess);
466 PGRP_UNLOCK(pgrp);
467 if (i == 0) {
468 if (savesess->s_ttyp != NULL)
469 ttyrel(savesess->s_ttyp);
470 mtx_destroy(&savesess->s_mtx);
471 FREE(savesess, M_SESSION);
472 }
473 mtx_destroy(&pgrp->pg_mtx);
474 FREE(pgrp, M_PGRP);
475 }
476
477 static void
478 pgadjustjobc(pgrp, entering)
479 struct pgrp *pgrp;
480 int entering;
481 {
482
483 PGRP_LOCK(pgrp);
484 if (entering)
485 pgrp->pg_jobc++;
486 else {
487 --pgrp->pg_jobc;
488 if (pgrp->pg_jobc == 0)
489 orphanpg(pgrp);
490 }
491 PGRP_UNLOCK(pgrp);
492 }
493
494 /*
495 * Adjust pgrp jobc counters when specified process changes process group.
496 * We count the number of processes in each process group that "qualify"
497 * the group for terminal job control (those with a parent in a different
498 * process group of the same session). If that count reaches zero, the
499 * process group becomes orphaned. Check both the specified process'
500 * process group and that of its children.
501 * entering == 0 => p is leaving specified group.
502 * entering == 1 => p is entering specified group.
503 */
504 void
505 fixjobc(p, pgrp, entering)
506 register struct proc *p;
507 register struct pgrp *pgrp;
508 int entering;
509 {
510 register struct pgrp *hispgrp;
511 register struct session *mysession;
512
513 sx_assert(&proctree_lock, SX_LOCKED);
514 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
515 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
516 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
517
518 /*
519 * Check p's parent to see whether p qualifies its own process
520 * group; if so, adjust count for p's process group.
521 */
522 mysession = pgrp->pg_session;
523 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
524 hispgrp->pg_session == mysession)
525 pgadjustjobc(pgrp, entering);
526
527 /*
528 * Check this process' children to see whether they qualify
529 * their process groups; if so, adjust counts for children's
530 * process groups.
531 */
532 LIST_FOREACH(p, &p->p_children, p_sibling) {
533 hispgrp = p->p_pgrp;
534 if (hispgrp == pgrp ||
535 hispgrp->pg_session != mysession)
536 continue;
537 PROC_LOCK(p);
538 if (p->p_state == PRS_ZOMBIE) {
539 PROC_UNLOCK(p);
540 continue;
541 }
542 PROC_UNLOCK(p);
543 pgadjustjobc(hispgrp, entering);
544 }
545 }
546
547 /*
548 * A process group has become orphaned;
549 * if there are any stopped processes in the group,
550 * hang-up all process in that group.
551 */
552 static void
553 orphanpg(pg)
554 struct pgrp *pg;
555 {
556 register struct proc *p;
557
558 PGRP_LOCK_ASSERT(pg, MA_OWNED);
559
560 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
561 PROC_LOCK(p);
562 if (P_SHOULDSTOP(p)) {
563 PROC_UNLOCK(p);
564 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
565 PROC_LOCK(p);
566 psignal(p, SIGHUP);
567 psignal(p, SIGCONT);
568 PROC_UNLOCK(p);
569 }
570 return;
571 }
572 PROC_UNLOCK(p);
573 }
574 }
575
576 #include "opt_ddb.h"
577 #ifdef DDB
578 #include <ddb/ddb.h>
579
580 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
581 {
582 register struct pgrp *pgrp;
583 register struct proc *p;
584 register int i;
585
586 for (i = 0; i <= pgrphash; i++) {
587 if (!LIST_EMPTY(&pgrphashtbl[i])) {
588 printf("\tindx %d\n", i);
589 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
590 printf(
591 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
592 (void *)pgrp, (long)pgrp->pg_id,
593 (void *)pgrp->pg_session,
594 pgrp->pg_session->s_count,
595 (void *)LIST_FIRST(&pgrp->pg_members));
596 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
597 printf("\t\tpid %ld addr %p pgrp %p\n",
598 (long)p->p_pid, (void *)p,
599 (void *)p->p_pgrp);
600 }
601 }
602 }
603 }
604 }
605 #endif /* DDB */
606
607 /*
608 * Clear kinfo_proc and fill in any information that is common
609 * to all threads in the process.
610 * Must be called with the target process locked.
611 */
612 static void
613 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
614 {
615 struct thread *td0;
616 struct tty *tp;
617 struct session *sp;
618 struct timeval tv;
619 struct ucred *cred;
620 struct sigacts *ps;
621
622 bzero(kp, sizeof(*kp));
623
624 kp->ki_structsize = sizeof(*kp);
625 kp->ki_paddr = p;
626 PROC_LOCK_ASSERT(p, MA_OWNED);
627 kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
628 kp->ki_args = p->p_args;
629 kp->ki_textvp = p->p_textvp;
630 #ifdef KTRACE
631 kp->ki_tracep = p->p_tracevp;
632 mtx_lock(&ktrace_mtx);
633 kp->ki_traceflag = p->p_traceflag;
634 mtx_unlock(&ktrace_mtx);
635 #endif
636 kp->ki_fd = p->p_fd;
637 kp->ki_vmspace = p->p_vmspace;
638 kp->ki_flag = p->p_flag;
639 cred = p->p_ucred;
640 if (cred) {
641 kp->ki_uid = cred->cr_uid;
642 kp->ki_ruid = cred->cr_ruid;
643 kp->ki_svuid = cred->cr_svuid;
644 /* XXX bde doesn't like KI_NGROUPS */
645 kp->ki_ngroups = min(cred->cr_ngroups, KI_NGROUPS);
646 bcopy(cred->cr_groups, kp->ki_groups,
647 kp->ki_ngroups * sizeof(gid_t));
648 kp->ki_rgid = cred->cr_rgid;
649 kp->ki_svgid = cred->cr_svgid;
650 /* If jailed(cred), emulate the old P_JAILED flag. */
651 if (jailed(cred))
652 kp->ki_flag |= P_JAILED;
653 }
654 ps = p->p_sigacts;
655 if (ps) {
656 mtx_lock(&ps->ps_mtx);
657 kp->ki_sigignore = ps->ps_sigignore;
658 kp->ki_sigcatch = ps->ps_sigcatch;
659 mtx_unlock(&ps->ps_mtx);
660 }
661 mtx_lock_spin(&sched_lock);
662 if (p->p_state != PRS_NEW &&
663 p->p_state != PRS_ZOMBIE &&
664 p->p_vmspace != NULL) {
665 struct vmspace *vm = p->p_vmspace;
666
667 kp->ki_size = vm->vm_map.size;
668 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
669 FOREACH_THREAD_IN_PROC(p, td0) {
670 if (!TD_IS_SWAPPED(td0))
671 kp->ki_rssize += td0->td_kstack_pages;
672 if (td0->td_altkstack_obj != NULL)
673 kp->ki_rssize += td0->td_altkstack_pages;
674 }
675 kp->ki_swrss = vm->vm_swrss;
676 kp->ki_tsize = vm->vm_tsize;
677 kp->ki_dsize = vm->vm_dsize;
678 kp->ki_ssize = vm->vm_ssize;
679 } else if (p->p_state == PRS_ZOMBIE)
680 kp->ki_stat = SZOMB;
681 if ((p->p_sflag & PS_INMEM) && p->p_stats) {
682 kp->ki_start = p->p_stats->p_start;
683 timevaladd(&kp->ki_start, &boottime);
684 kp->ki_rusage = p->p_stats->p_ru;
685 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime,
686 NULL);
687 kp->ki_childstime = p->p_stats->p_cru.ru_stime;
688 kp->ki_childutime = p->p_stats->p_cru.ru_utime;
689 /* Some callers want child-times in a single value */
690 kp->ki_childtime = kp->ki_childstime;
691 timevaladd(&kp->ki_childtime, &kp->ki_childutime);
692 }
693 kp->ki_sflag = p->p_sflag;
694 kp->ki_swtime = p->p_swtime;
695 kp->ki_pid = p->p_pid;
696 kp->ki_nice = p->p_nice;
697 bintime2timeval(&p->p_runtime, &tv);
698 kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
699 mtx_unlock_spin(&sched_lock);
700 tp = NULL;
701 if (p->p_pgrp) {
702 kp->ki_pgid = p->p_pgrp->pg_id;
703 kp->ki_jobc = p->p_pgrp->pg_jobc;
704 sp = p->p_pgrp->pg_session;
705
706 if (sp != NULL) {
707 kp->ki_sid = sp->s_sid;
708 SESS_LOCK(sp);
709 strlcpy(kp->ki_login, sp->s_login,
710 sizeof(kp->ki_login));
711 if (sp->s_ttyvp)
712 kp->ki_kiflag |= KI_CTTY;
713 if (SESS_LEADER(p))
714 kp->ki_kiflag |= KI_SLEADER;
715 tp = sp->s_ttyp;
716 SESS_UNLOCK(sp);
717 }
718 }
719 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
720 kp->ki_tdev = dev2udev(tp->t_dev);
721 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
722 if (tp->t_session)
723 kp->ki_tsid = tp->t_session->s_sid;
724 } else
725 kp->ki_tdev = NODEV;
726 if (p->p_comm[0] != '\0') {
727 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
728 strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
729 }
730 if (p->p_sysent && p->p_sysent->sv_name != NULL &&
731 p->p_sysent->sv_name[0] != '\0')
732 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
733 kp->ki_siglist = p->p_siglist;
734 kp->ki_xstat = p->p_xstat;
735 kp->ki_acflag = p->p_acflag;
736 kp->ki_lock = p->p_lock;
737 if (p->p_pptr)
738 kp->ki_ppid = p->p_pptr->p_pid;
739 }
740
741 /*
742 * Fill in information that is thread specific.
743 * Must be called with sched_lock locked.
744 */
745 static void
746 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
747 {
748 struct ksegrp *kg;
749 struct proc *p;
750
751 p = td->td_proc;
752
753 if (td->td_wmesg != NULL)
754 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
755 else
756 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
757 if (TD_ON_LOCK(td)) {
758 kp->ki_kiflag |= KI_LOCKBLOCK;
759 strlcpy(kp->ki_lockname, td->td_lockname,
760 sizeof(kp->ki_lockname));
761 } else {
762 kp->ki_kiflag &= ~KI_LOCKBLOCK;
763 bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
764 }
765
766 if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
767 if (TD_ON_RUNQ(td) ||
768 TD_CAN_RUN(td) ||
769 TD_IS_RUNNING(td)) {
770 kp->ki_stat = SRUN;
771 } else if (P_SHOULDSTOP(p)) {
772 kp->ki_stat = SSTOP;
773 } else if (TD_IS_SLEEPING(td)) {
774 kp->ki_stat = SSLEEP;
775 } else if (TD_ON_LOCK(td)) {
776 kp->ki_stat = SLOCK;
777 } else {
778 kp->ki_stat = SWAIT;
779 }
780 } else {
781 kp->ki_stat = SIDL;
782 }
783
784 kg = td->td_ksegrp;
785
786 /* things in the KSE GROUP */
787 kp->ki_estcpu = kg->kg_estcpu;
788 kp->ki_slptime = kg->kg_slptime;
789 kp->ki_pri.pri_user = kg->kg_user_pri;
790 kp->ki_pri.pri_class = kg->kg_pri_class;
791
792 /* Things in the thread */
793 kp->ki_wchan = td->td_wchan;
794 kp->ki_pri.pri_level = td->td_priority;
795 kp->ki_pri.pri_native = td->td_base_pri;
796 kp->ki_lastcpu = td->td_lastcpu;
797 kp->ki_oncpu = td->td_oncpu;
798 kp->ki_tdflags = td->td_flags;
799 kp->ki_tid = td->td_tid;
800 kp->ki_numthreads = p->p_numthreads;
801 kp->ki_pcb = td->td_pcb;
802 kp->ki_kstack = (void *)td->td_kstack;
803 kp->ki_pctcpu = sched_pctcpu(td);
804
805 /* We can't get this anymore but ps etc never used it anyway. */
806 kp->ki_rqindex = 0;
807
808 SIGSETOR(kp->ki_siglist, td->td_siglist);
809 kp->ki_sigmask = td->td_sigmask;
810 }
811
812 /*
813 * Fill in a kinfo_proc structure for the specified process.
814 * Must be called with the target process locked.
815 */
816 void
817 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
818 {
819
820 fill_kinfo_proc_only(p, kp);
821 mtx_lock_spin(&sched_lock);
822 if (FIRST_THREAD_IN_PROC(p) != NULL)
823 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
824 mtx_unlock_spin(&sched_lock);
825 }
826
827 struct pstats *
828 pstats_alloc(void)
829 {
830
831 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
832 }
833
834 /*
835 * Copy parts of p_stats; zero the rest of p_stats (statistics).
836 */
837 void
838 pstats_fork(struct pstats *src, struct pstats *dst)
839 {
840
841 bzero(&dst->pstat_startzero,
842 __rangeof(struct pstats, pstat_startzero, pstat_endzero));
843 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
844 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
845 }
846
847 void
848 pstats_free(struct pstats *ps)
849 {
850
851 free(ps, M_SUBPROC);
852 }
853
854 /*
855 * Locate a zombie process by number
856 */
857 struct proc *
858 zpfind(pid_t pid)
859 {
860 struct proc *p;
861
862 sx_slock(&allproc_lock);
863 LIST_FOREACH(p, &zombproc, p_list)
864 if (p->p_pid == pid) {
865 PROC_LOCK(p);
866 break;
867 }
868 sx_sunlock(&allproc_lock);
869 return (p);
870 }
871
872 #define KERN_PROC_ZOMBMASK 0x3
873 #define KERN_PROC_NOTHREADS 0x4
874
875 /*
876 * Must be called with the process locked and will return with it unlocked.
877 */
878 static int
879 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
880 {
881 struct thread *td;
882 struct kinfo_proc kinfo_proc;
883 int error = 0;
884 struct proc *np;
885 pid_t pid = p->p_pid;
886
887 PROC_LOCK_ASSERT(p, MA_OWNED);
888
889 fill_kinfo_proc_only(p, &kinfo_proc);
890 if (flags & KERN_PROC_NOTHREADS) {
891 mtx_lock_spin(&sched_lock);
892 if (FIRST_THREAD_IN_PROC(p) != NULL)
893 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc);
894 mtx_unlock_spin(&sched_lock);
895 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
896 sizeof(kinfo_proc));
897 } else {
898 mtx_lock_spin(&sched_lock);
899 if (FIRST_THREAD_IN_PROC(p) != NULL)
900 FOREACH_THREAD_IN_PROC(p, td) {
901 fill_kinfo_thread(td, &kinfo_proc);
902 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
903 sizeof(kinfo_proc));
904 if (error)
905 break;
906 }
907 else
908 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
909 sizeof(kinfo_proc));
910 mtx_unlock_spin(&sched_lock);
911 }
912 PROC_UNLOCK(p);
913 if (error)
914 return (error);
915 if (flags & KERN_PROC_ZOMBMASK)
916 np = zpfind(pid);
917 else {
918 if (pid == 0)
919 return (0);
920 np = pfind(pid);
921 }
922 if (np == NULL)
923 return EAGAIN;
924 if (np != p) {
925 PROC_UNLOCK(np);
926 return EAGAIN;
927 }
928 PROC_UNLOCK(np);
929 return (0);
930 }
931
932 static int
933 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
934 {
935 int *name = (int*) arg1;
936 u_int namelen = arg2;
937 struct proc *p;
938 int flags, doingzomb, oid_number;
939 int error = 0;
940
941 oid_number = oidp->oid_number;
942 if (oid_number != KERN_PROC_ALL &&
943 (oid_number & KERN_PROC_INC_THREAD) == 0)
944 flags = KERN_PROC_NOTHREADS;
945 else {
946 flags = 0;
947 oid_number &= ~KERN_PROC_INC_THREAD;
948 }
949 if (oid_number == KERN_PROC_PID) {
950 if (namelen != 1)
951 return (EINVAL);
952 error = sysctl_wire_old_buffer(req, 0);
953 if (error)
954 return (error);
955 p = pfind((pid_t)name[0]);
956 if (!p)
957 return (ESRCH);
958 if ((error = p_cansee(curthread, p))) {
959 PROC_UNLOCK(p);
960 return (error);
961 }
962 error = sysctl_out_proc(p, req, flags);
963 return (error);
964 }
965
966 switch (oid_number) {
967 case KERN_PROC_ALL:
968 if (namelen != 0)
969 return (EINVAL);
970 break;
971 case KERN_PROC_PROC:
972 if (namelen != 0 && namelen != 1)
973 return (EINVAL);
974 break;
975 default:
976 if (namelen != 1)
977 return (EINVAL);
978 break;
979 }
980
981 if (!req->oldptr) {
982 /* overestimate by 5 procs */
983 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
984 if (error)
985 return (error);
986 }
987 error = sysctl_wire_old_buffer(req, 0);
988 if (error != 0)
989 return (error);
990 sx_slock(&allproc_lock);
991 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
992 if (!doingzomb)
993 p = LIST_FIRST(&allproc);
994 else
995 p = LIST_FIRST(&zombproc);
996 for (; p != 0; p = LIST_NEXT(p, p_list)) {
997 /*
998 * Skip embryonic processes.
999 */
1000 mtx_lock_spin(&sched_lock);
1001 if (p->p_state == PRS_NEW) {
1002 mtx_unlock_spin(&sched_lock);
1003 continue;
1004 }
1005 mtx_unlock_spin(&sched_lock);
1006 PROC_LOCK(p);
1007 /*
1008 * Show a user only appropriate processes.
1009 */
1010 if (p_cansee(curthread, p)) {
1011 PROC_UNLOCK(p);
1012 continue;
1013 }
1014 /*
1015 * TODO - make more efficient (see notes below).
1016 * do by session.
1017 */
1018 switch (oid_number) {
1019
1020 case KERN_PROC_GID:
1021 if (p->p_ucred == NULL ||
1022 p->p_ucred->cr_gid != (gid_t)name[0]) {
1023 PROC_UNLOCK(p);
1024 continue;
1025 }
1026 break;
1027
1028 case KERN_PROC_PGRP:
1029 /* could do this by traversing pgrp */
1030 if (p->p_pgrp == NULL ||
1031 p->p_pgrp->pg_id != (pid_t)name[0]) {
1032 PROC_UNLOCK(p);
1033 continue;
1034 }
1035 break;
1036
1037 case KERN_PROC_RGID:
1038 if (p->p_ucred == NULL ||
1039 p->p_ucred->cr_rgid != (gid_t)name[0]) {
1040 PROC_UNLOCK(p);
1041 continue;
1042 }
1043 break;
1044
1045 case KERN_PROC_SESSION:
1046 if (p->p_session == NULL ||
1047 p->p_session->s_sid != (pid_t)name[0]) {
1048 PROC_UNLOCK(p);
1049 continue;
1050 }
1051 break;
1052
1053 case KERN_PROC_TTY:
1054 if ((p->p_flag & P_CONTROLT) == 0 ||
1055 p->p_session == NULL) {
1056 PROC_UNLOCK(p);
1057 continue;
1058 }
1059 SESS_LOCK(p->p_session);
1060 if (p->p_session->s_ttyp == NULL ||
1061 dev2udev(p->p_session->s_ttyp->t_dev) !=
1062 (dev_t)name[0]) {
1063 SESS_UNLOCK(p->p_session);
1064 PROC_UNLOCK(p);
1065 continue;
1066 }
1067 SESS_UNLOCK(p->p_session);
1068 break;
1069
1070 case KERN_PROC_UID:
1071 if (p->p_ucred == NULL ||
1072 p->p_ucred->cr_uid != (uid_t)name[0]) {
1073 PROC_UNLOCK(p);
1074 continue;
1075 }
1076 break;
1077
1078 case KERN_PROC_RUID:
1079 if (p->p_ucred == NULL ||
1080 p->p_ucred->cr_ruid != (uid_t)name[0]) {
1081 PROC_UNLOCK(p);
1082 continue;
1083 }
1084 break;
1085
1086 case KERN_PROC_PROC:
1087 break;
1088
1089 default:
1090 break;
1091
1092 }
1093
1094 error = sysctl_out_proc(p, req, flags | doingzomb);
1095 if (error) {
1096 sx_sunlock(&allproc_lock);
1097 return (error);
1098 }
1099 }
1100 }
1101 sx_sunlock(&allproc_lock);
1102 return (0);
1103 }
1104
1105 struct pargs *
1106 pargs_alloc(int len)
1107 {
1108 struct pargs *pa;
1109
1110 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
1111 M_WAITOK);
1112 pa->ar_ref = 1;
1113 pa->ar_length = len;
1114 return (pa);
1115 }
1116
1117 void
1118 pargs_free(struct pargs *pa)
1119 {
1120
1121 FREE(pa, M_PARGS);
1122 }
1123
1124 void
1125 pargs_hold(struct pargs *pa)
1126 {
1127
1128 if (pa == NULL)
1129 return;
1130 PARGS_LOCK(pa);
1131 pa->ar_ref++;
1132 PARGS_UNLOCK(pa);
1133 }
1134
1135 void
1136 pargs_drop(struct pargs *pa)
1137 {
1138
1139 if (pa == NULL)
1140 return;
1141 PARGS_LOCK(pa);
1142 if (--pa->ar_ref == 0) {
1143 PARGS_UNLOCK(pa);
1144 pargs_free(pa);
1145 } else
1146 PARGS_UNLOCK(pa);
1147 }
1148
1149 /*
1150 * This sysctl allows a process to retrieve the argument list or process
1151 * title for another process without groping around in the address space
1152 * of the other process. It also allow a process to set its own "process
1153 * title to a string of its own choice.
1154 */
1155 static int
1156 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1157 {
1158 int *name = (int*) arg1;
1159 u_int namelen = arg2;
1160 struct pargs *newpa, *pa;
1161 struct proc *p;
1162 int error = 0;
1163
1164 if (namelen != 1)
1165 return (EINVAL);
1166
1167 p = pfind((pid_t)name[0]);
1168 if (!p)
1169 return (ESRCH);
1170
1171 if ((error = p_cansee(curthread, p)) != 0) {
1172 PROC_UNLOCK(p);
1173 return (error);
1174 }
1175
1176 if (req->newptr && curproc != p) {
1177 PROC_UNLOCK(p);
1178 return (EPERM);
1179 }
1180
1181 pa = p->p_args;
1182 pargs_hold(pa);
1183 PROC_UNLOCK(p);
1184 if (req->oldptr != NULL && pa != NULL)
1185 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1186 pargs_drop(pa);
1187 if (error != 0 || req->newptr == NULL)
1188 return (error);
1189
1190 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1191 return (ENOMEM);
1192 newpa = pargs_alloc(req->newlen);
1193 error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1194 if (error != 0) {
1195 pargs_free(newpa);
1196 return (error);
1197 }
1198 PROC_LOCK(p);
1199 pa = p->p_args;
1200 p->p_args = newpa;
1201 PROC_UNLOCK(p);
1202 pargs_drop(pa);
1203 return (0);
1204 }
1205
1206 static int
1207 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
1208 {
1209 struct proc *p;
1210 char *sv_name;
1211 int *name;
1212 int namelen;
1213 int error;
1214
1215 namelen = arg2;
1216 if (namelen != 1)
1217 return (EINVAL);
1218
1219 name = (int *)arg1;
1220 if ((p = pfind((pid_t)name[0])) == NULL)
1221 return (ESRCH);
1222 if ((error = p_cansee(curthread, p))) {
1223 PROC_UNLOCK(p);
1224 return (error);
1225 }
1226 sv_name = p->p_sysent->sv_name;
1227 PROC_UNLOCK(p);
1228 return (sysctl_handle_string(oidp, sv_name, 0, req));
1229 }
1230
1231
1232 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1233
1234 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1235 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1236
1237 SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD,
1238 sysctl_kern_proc, "Process table");
1239
1240 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1241 sysctl_kern_proc, "Process table");
1242
1243 SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD,
1244 sysctl_kern_proc, "Process table");
1245
1246 SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD,
1247 sysctl_kern_proc, "Process table");
1248
1249 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1250 sysctl_kern_proc, "Process table");
1251
1252 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1253 sysctl_kern_proc, "Process table");
1254
1255 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1256 sysctl_kern_proc, "Process table");
1257
1258 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1259 sysctl_kern_proc, "Process table");
1260
1261 SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD,
1262 sysctl_kern_proc, "Return process table, no threads");
1263
1264 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1265 sysctl_kern_proc_args, "Process argument list");
1266
1267 SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD,
1268 sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)");
1269
1270 SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
1271 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1272
1273 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
1274 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1275
1276 SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
1277 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1278
1279 SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td,
1280 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1281
1282 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
1283 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1284
1285 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
1286 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1287
1288 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
1289 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1290
1291 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
1292 CTLFLAG_RD, sysctl_kern_proc, "Process table");
1293
1294 SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
1295 CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
Cache object: 82597d3ed93b0d5587261d455a5f3fde
|