FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_proc.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
34 * $FreeBSD: releng/5.1/sys/kern/kern_proc.c 114983 2003-05-13 20:36:02Z jhb $
35 */
36
37 #include "opt_ktrace.h"
38 #include "opt_kstack_pages.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/kse.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 #include <sys/sysctl.h>
51 #include <sys/filedesc.h>
52 #include <sys/tty.h>
53 #include <sys/signalvar.h>
54 #include <sys/sx.h>
55 #include <sys/user.h>
56 #include <sys/jail.h>
57 #ifdef KTRACE
58 #include <sys/uio.h>
59 #include <sys/ktrace.h>
60 #endif
61
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/uma.h>
67 #include <machine/critical.h>
68
69 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70 MALLOC_DEFINE(M_SESSION, "session", "session header");
71 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
73
74 static void doenterpgrp(struct proc *, struct pgrp *);
75 static void orphanpg(struct pgrp *pg);
76 static void pgadjustjobc(struct pgrp *pgrp, int entering);
77 static void pgdelete(struct pgrp *);
78 static void proc_ctor(void *mem, int size, void *arg);
79 static void proc_dtor(void *mem, int size, void *arg);
80 static void proc_init(void *mem, int size);
81 static void proc_fini(void *mem, int size);
82
83 /*
84 * Other process lists
85 */
86 struct pidhashhead *pidhashtbl;
87 u_long pidhash;
88 struct pgrphashhead *pgrphashtbl;
89 u_long pgrphash;
90 struct proclist allproc;
91 struct proclist zombproc;
92 struct sx allproc_lock;
93 struct sx proctree_lock;
94 struct mtx pargs_ref_lock;
95 struct mtx ppeers_lock;
96 uma_zone_t proc_zone;
97 uma_zone_t ithread_zone;
98
99 int kstack_pages = KSTACK_PAGES;
100 int uarea_pages = UAREA_PAGES;
101 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
102 SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
103
104 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
105
106 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
107
108 /*
109 * Initialize global process hashing structures.
110 */
111 void
112 procinit()
113 {
114
115 sx_init(&allproc_lock, "allproc");
116 sx_init(&proctree_lock, "proctree");
117 mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
118 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
119 LIST_INIT(&allproc);
120 LIST_INIT(&zombproc);
121 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
122 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
123 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
124 proc_ctor, proc_dtor, proc_init, proc_fini,
125 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
126 uihashinit();
127 }
128
129 /*
130 * Prepare a proc for use.
131 */
132 static void
133 proc_ctor(void *mem, int size, void *arg)
134 {
135 struct proc *p;
136
137 p = (struct proc *)mem;
138 }
139
140 /*
141 * Reclaim a proc after use.
142 */
143 static void
144 proc_dtor(void *mem, int size, void *arg)
145 {
146 struct proc *p;
147 struct thread *td;
148 struct ksegrp *kg;
149 struct kse *ke;
150
151 /* INVARIANTS checks go here */
152 p = (struct proc *)mem;
153 KASSERT((p->p_numthreads == 1),
154 ("bad number of threads in exiting process"));
155 td = FIRST_THREAD_IN_PROC(p);
156 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
157 kg = FIRST_KSEGRP_IN_PROC(p);
158 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
159 ke = FIRST_KSE_IN_KSEGRP(kg);
160 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
161
162 /* Dispose of an alternate kstack, if it exists.
163 * XXX What if there are more than one thread in the proc?
164 * The first thread in the proc is special and not
165 * freed, so you gotta do this here.
166 */
167 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
168 pmap_dispose_altkstack(td);
169
170 /*
171 * We want to make sure we know the initial linkages.
172 * so for now tear them down and remake them.
173 * This is probably un-needed as we can probably rely
174 * on the state coming in here from wait4().
175 */
176 proc_linkup(p, kg, ke, td);
177 }
178
179 /*
180 * Initialize type-stable parts of a proc (when newly created).
181 */
182 static void
183 proc_init(void *mem, int size)
184 {
185 struct proc *p;
186 struct thread *td;
187 struct ksegrp *kg;
188 struct kse *ke;
189
190 p = (struct proc *)mem;
191 p->p_sched = (struct p_sched *)&p[1];
192 vm_proc_new(p);
193 td = thread_alloc();
194 ke = kse_alloc();
195 kg = ksegrp_alloc();
196 proc_linkup(p, kg, ke, td);
197 bzero(&p->p_mtx, sizeof(struct mtx));
198 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
199 }
200
201 /*
202 * Tear down type-stable parts of a proc (just before being discarded)
203 */
204 static void
205 proc_fini(void *mem, int size)
206 {
207 struct proc *p;
208 struct thread *td;
209 struct ksegrp *kg;
210 struct kse *ke;
211
212 p = (struct proc *)mem;
213 KASSERT((p->p_numthreads == 1),
214 ("bad number of threads in freeing process"));
215 td = FIRST_THREAD_IN_PROC(p);
216 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
217 kg = FIRST_KSEGRP_IN_PROC(p);
218 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
219 ke = FIRST_KSE_IN_KSEGRP(kg);
220 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
221 vm_proc_dispose(p);
222 thread_free(td);
223 ksegrp_free(kg);
224 kse_free(ke);
225 mtx_destroy(&p->p_mtx);
226 }
227
228 /*
229 * Is p an inferior of the current process?
230 */
231 int
232 inferior(p)
233 register struct proc *p;
234 {
235
236 sx_assert(&proctree_lock, SX_LOCKED);
237 for (; p != curproc; p = p->p_pptr)
238 if (p->p_pid == 0)
239 return (0);
240 return (1);
241 }
242
243 /*
244 * Locate a process by number
245 */
246 struct proc *
247 pfind(pid)
248 register pid_t pid;
249 {
250 register struct proc *p;
251
252 sx_slock(&allproc_lock);
253 LIST_FOREACH(p, PIDHASH(pid), p_hash)
254 if (p->p_pid == pid) {
255 PROC_LOCK(p);
256 break;
257 }
258 sx_sunlock(&allproc_lock);
259 return (p);
260 }
261
262 /*
263 * Locate a process group by number.
264 * The caller must hold proctree_lock.
265 */
266 struct pgrp *
267 pgfind(pgid)
268 register pid_t pgid;
269 {
270 register struct pgrp *pgrp;
271
272 sx_assert(&proctree_lock, SX_LOCKED);
273
274 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
275 if (pgrp->pg_id == pgid) {
276 PGRP_LOCK(pgrp);
277 return (pgrp);
278 }
279 }
280 return (NULL);
281 }
282
283 /*
284 * Create a new process group.
285 * pgid must be equal to the pid of p.
286 * Begin a new session if required.
287 */
288 int
289 enterpgrp(p, pgid, pgrp, sess)
290 register struct proc *p;
291 pid_t pgid;
292 struct pgrp *pgrp;
293 struct session *sess;
294 {
295 struct pgrp *pgrp2;
296
297 sx_assert(&proctree_lock, SX_XLOCKED);
298
299 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
300 KASSERT(p->p_pid == pgid,
301 ("enterpgrp: new pgrp and pid != pgid"));
302
303 pgrp2 = pgfind(pgid);
304
305 KASSERT(pgrp2 == NULL,
306 ("enterpgrp: pgrp with pgid exists"));
307 KASSERT(!SESS_LEADER(p),
308 ("enterpgrp: session leader attempted setpgrp"));
309
310 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
311
312 if (sess != NULL) {
313 /*
314 * new session
315 */
316 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
317 PROC_LOCK(p);
318 p->p_flag &= ~P_CONTROLT;
319 PROC_UNLOCK(p);
320 PGRP_LOCK(pgrp);
321 sess->s_leader = p;
322 sess->s_sid = p->p_pid;
323 sess->s_count = 1;
324 sess->s_ttyvp = NULL;
325 sess->s_ttyp = NULL;
326 bcopy(p->p_session->s_login, sess->s_login,
327 sizeof(sess->s_login));
328 pgrp->pg_session = sess;
329 KASSERT(p == curproc,
330 ("enterpgrp: mksession and p != curproc"));
331 } else {
332 pgrp->pg_session = p->p_session;
333 SESS_LOCK(pgrp->pg_session);
334 pgrp->pg_session->s_count++;
335 SESS_UNLOCK(pgrp->pg_session);
336 PGRP_LOCK(pgrp);
337 }
338 pgrp->pg_id = pgid;
339 LIST_INIT(&pgrp->pg_members);
340
341 /*
342 * As we have an exclusive lock of proctree_lock,
343 * this should not deadlock.
344 */
345 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
346 pgrp->pg_jobc = 0;
347 SLIST_INIT(&pgrp->pg_sigiolst);
348 PGRP_UNLOCK(pgrp);
349
350 doenterpgrp(p, pgrp);
351
352 return (0);
353 }
354
355 /*
356 * Move p to an existing process group
357 */
358 int
359 enterthispgrp(p, pgrp)
360 register struct proc *p;
361 struct pgrp *pgrp;
362 {
363
364 sx_assert(&proctree_lock, SX_XLOCKED);
365 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
366 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
367 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
368 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
369 KASSERT(pgrp->pg_session == p->p_session,
370 ("%s: pgrp's session %p, p->p_session %p.\n",
371 __func__,
372 pgrp->pg_session,
373 p->p_session));
374 KASSERT(pgrp != p->p_pgrp,
375 ("%s: p belongs to pgrp.", __func__));
376
377 doenterpgrp(p, pgrp);
378
379 return (0);
380 }
381
382 /*
383 * Move p to a process group
384 */
385 static void
386 doenterpgrp(p, pgrp)
387 struct proc *p;
388 struct pgrp *pgrp;
389 {
390 struct pgrp *savepgrp;
391
392 sx_assert(&proctree_lock, SX_XLOCKED);
393 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
394 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
395 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
396 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
397
398 savepgrp = p->p_pgrp;
399
400 /*
401 * Adjust eligibility of affected pgrps to participate in job control.
402 * Increment eligibility counts before decrementing, otherwise we
403 * could reach 0 spuriously during the first call.
404 */
405 fixjobc(p, pgrp, 1);
406 fixjobc(p, p->p_pgrp, 0);
407
408 PGRP_LOCK(pgrp);
409 PGRP_LOCK(savepgrp);
410 PROC_LOCK(p);
411 LIST_REMOVE(p, p_pglist);
412 p->p_pgrp = pgrp;
413 PROC_UNLOCK(p);
414 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
415 PGRP_UNLOCK(savepgrp);
416 PGRP_UNLOCK(pgrp);
417 if (LIST_EMPTY(&savepgrp->pg_members))
418 pgdelete(savepgrp);
419 }
420
421 /*
422 * remove process from process group
423 */
424 int
425 leavepgrp(p)
426 register struct proc *p;
427 {
428 struct pgrp *savepgrp;
429
430 sx_assert(&proctree_lock, SX_XLOCKED);
431 savepgrp = p->p_pgrp;
432 PGRP_LOCK(savepgrp);
433 PROC_LOCK(p);
434 LIST_REMOVE(p, p_pglist);
435 p->p_pgrp = NULL;
436 PROC_UNLOCK(p);
437 PGRP_UNLOCK(savepgrp);
438 if (LIST_EMPTY(&savepgrp->pg_members))
439 pgdelete(savepgrp);
440 return (0);
441 }
442
443 /*
444 * delete a process group
445 */
446 static void
447 pgdelete(pgrp)
448 register struct pgrp *pgrp;
449 {
450 struct session *savesess;
451
452 sx_assert(&proctree_lock, SX_XLOCKED);
453 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
454 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
455
456 /*
457 * Reset any sigio structures pointing to us as a result of
458 * F_SETOWN with our pgid.
459 */
460 funsetownlst(&pgrp->pg_sigiolst);
461
462 PGRP_LOCK(pgrp);
463 if (pgrp->pg_session->s_ttyp != NULL &&
464 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
465 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
466 LIST_REMOVE(pgrp, pg_hash);
467 savesess = pgrp->pg_session;
468 SESS_LOCK(savesess);
469 savesess->s_count--;
470 SESS_UNLOCK(savesess);
471 PGRP_UNLOCK(pgrp);
472 if (savesess->s_count == 0) {
473 mtx_destroy(&savesess->s_mtx);
474 FREE(pgrp->pg_session, M_SESSION);
475 }
476 mtx_destroy(&pgrp->pg_mtx);
477 FREE(pgrp, M_PGRP);
478 }
479
480 static void
481 pgadjustjobc(pgrp, entering)
482 struct pgrp *pgrp;
483 int entering;
484 {
485
486 PGRP_LOCK(pgrp);
487 if (entering)
488 pgrp->pg_jobc++;
489 else {
490 --pgrp->pg_jobc;
491 if (pgrp->pg_jobc == 0)
492 orphanpg(pgrp);
493 }
494 PGRP_UNLOCK(pgrp);
495 }
496
497 /*
498 * Adjust pgrp jobc counters when specified process changes process group.
499 * We count the number of processes in each process group that "qualify"
500 * the group for terminal job control (those with a parent in a different
501 * process group of the same session). If that count reaches zero, the
502 * process group becomes orphaned. Check both the specified process'
503 * process group and that of its children.
504 * entering == 0 => p is leaving specified group.
505 * entering == 1 => p is entering specified group.
506 */
507 void
508 fixjobc(p, pgrp, entering)
509 register struct proc *p;
510 register struct pgrp *pgrp;
511 int entering;
512 {
513 register struct pgrp *hispgrp;
514 register struct session *mysession;
515
516 sx_assert(&proctree_lock, SX_LOCKED);
517 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
518 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
519 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
520
521 /*
522 * Check p's parent to see whether p qualifies its own process
523 * group; if so, adjust count for p's process group.
524 */
525 mysession = pgrp->pg_session;
526 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
527 hispgrp->pg_session == mysession)
528 pgadjustjobc(pgrp, entering);
529
530 /*
531 * Check this process' children to see whether they qualify
532 * their process groups; if so, adjust counts for children's
533 * process groups.
534 */
535 LIST_FOREACH(p, &p->p_children, p_sibling) {
536 hispgrp = p->p_pgrp;
537 if (hispgrp == pgrp ||
538 hispgrp->pg_session != mysession)
539 continue;
540 PROC_LOCK(p);
541 if (p->p_state == PRS_ZOMBIE) {
542 PROC_UNLOCK(p);
543 continue;
544 }
545 PROC_UNLOCK(p);
546 pgadjustjobc(hispgrp, entering);
547 }
548 }
549
550 /*
551 * A process group has become orphaned;
552 * if there are any stopped processes in the group,
553 * hang-up all process in that group.
554 */
555 static void
556 orphanpg(pg)
557 struct pgrp *pg;
558 {
559 register struct proc *p;
560
561 PGRP_LOCK_ASSERT(pg, MA_OWNED);
562
563 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
564 PROC_LOCK(p);
565 if (P_SHOULDSTOP(p)) {
566 PROC_UNLOCK(p);
567 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
568 PROC_LOCK(p);
569 psignal(p, SIGHUP);
570 psignal(p, SIGCONT);
571 PROC_UNLOCK(p);
572 }
573 return;
574 }
575 PROC_UNLOCK(p);
576 }
577 }
578
579 #include "opt_ddb.h"
580 #ifdef DDB
581 #include <ddb/ddb.h>
582
583 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
584 {
585 register struct pgrp *pgrp;
586 register struct proc *p;
587 register int i;
588
589 for (i = 0; i <= pgrphash; i++) {
590 if (!LIST_EMPTY(&pgrphashtbl[i])) {
591 printf("\tindx %d\n", i);
592 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
593 printf(
594 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
595 (void *)pgrp, (long)pgrp->pg_id,
596 (void *)pgrp->pg_session,
597 pgrp->pg_session->s_count,
598 (void *)LIST_FIRST(&pgrp->pg_members));
599 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
600 printf("\t\tpid %ld addr %p pgrp %p\n",
601 (long)p->p_pid, (void *)p,
602 (void *)p->p_pgrp);
603 }
604 }
605 }
606 }
607 }
608 #endif /* DDB */
609
610 /*
611 * Fill in a kinfo_proc structure for the specified process.
612 * Must be called with the target process locked.
613 */
614 void
615 fill_kinfo_proc(p, kp)
616 struct proc *p;
617 struct kinfo_proc *kp;
618 {
619 struct thread *td;
620 struct thread *td0;
621 struct kse *ke;
622 struct ksegrp *kg;
623 struct tty *tp;
624 struct session *sp;
625 struct timeval tv;
626 struct sigacts *ps;
627
628 td = FIRST_THREAD_IN_PROC(p);
629
630 bzero(kp, sizeof(*kp));
631
632 kp->ki_structsize = sizeof(*kp);
633 kp->ki_paddr = p;
634 PROC_LOCK_ASSERT(p, MA_OWNED);
635 kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
636 kp->ki_args = p->p_args;
637 kp->ki_textvp = p->p_textvp;
638 #ifdef KTRACE
639 kp->ki_tracep = p->p_tracevp;
640 mtx_lock(&ktrace_mtx);
641 kp->ki_traceflag = p->p_traceflag;
642 mtx_unlock(&ktrace_mtx);
643 #endif
644 kp->ki_fd = p->p_fd;
645 kp->ki_vmspace = p->p_vmspace;
646 if (p->p_ucred) {
647 kp->ki_uid = p->p_ucred->cr_uid;
648 kp->ki_ruid = p->p_ucred->cr_ruid;
649 kp->ki_svuid = p->p_ucred->cr_svuid;
650 /* XXX bde doesn't like KI_NGROUPS */
651 kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
652 bcopy(p->p_ucred->cr_groups, kp->ki_groups,
653 kp->ki_ngroups * sizeof(gid_t));
654 kp->ki_rgid = p->p_ucred->cr_rgid;
655 kp->ki_svgid = p->p_ucred->cr_svgid;
656 }
657 if (p->p_sigacts) {
658 ps = p->p_sigacts;
659 mtx_lock(&ps->ps_mtx);
660 kp->ki_sigignore = ps->ps_sigignore;
661 kp->ki_sigcatch = ps->ps_sigcatch;
662 mtx_unlock(&ps->ps_mtx);
663 }
664 mtx_lock_spin(&sched_lock);
665 if (p->p_state != PRS_NEW &&
666 p->p_state != PRS_ZOMBIE &&
667 p->p_vmspace != NULL) {
668 struct vmspace *vm = p->p_vmspace;
669
670 kp->ki_size = vm->vm_map.size;
671 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
672 if (p->p_sflag & PS_INMEM)
673 kp->ki_rssize += UAREA_PAGES;
674 FOREACH_THREAD_IN_PROC(p, td0) {
675 if (!TD_IS_SWAPPED(td0))
676 kp->ki_rssize += td0->td_kstack_pages;
677 if (td0->td_altkstack_obj != NULL)
678 kp->ki_rssize += td0->td_altkstack_pages;
679 }
680 kp->ki_swrss = vm->vm_swrss;
681 kp->ki_tsize = vm->vm_tsize;
682 kp->ki_dsize = vm->vm_dsize;
683 kp->ki_ssize = vm->vm_ssize;
684 }
685 if ((p->p_sflag & PS_INMEM) && p->p_stats) {
686 kp->ki_start = p->p_stats->p_start;
687 timevaladd(&kp->ki_start, &boottime);
688 kp->ki_rusage = p->p_stats->p_ru;
689 kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
690 p->p_stats->p_cru.ru_stime.tv_sec;
691 kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
692 p->p_stats->p_cru.ru_stime.tv_usec;
693 }
694 if (p->p_state != PRS_ZOMBIE) {
695 if (td == NULL) {
696 /* XXXKSE: This should never happen. */
697 printf("fill_kinfo_proc(): pid %d has no threads!\n",
698 p->p_pid);
699 mtx_unlock_spin(&sched_lock);
700 return;
701 }
702 if (!(p->p_flag & P_THREADED)) {
703 if (td->td_wmesg != NULL) {
704 strlcpy(kp->ki_wmesg, td->td_wmesg,
705 sizeof(kp->ki_wmesg));
706 }
707 if (TD_ON_LOCK(td)) {
708 kp->ki_kiflag |= KI_LOCKBLOCK;
709 strlcpy(kp->ki_lockname, td->td_lockname,
710 sizeof(kp->ki_lockname));
711 }
712 }
713
714 if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
715 if (TD_ON_RUNQ(td) ||
716 TD_CAN_RUN(td) ||
717 TD_IS_RUNNING(td)) {
718 kp->ki_stat = SRUN;
719 } else if (P_SHOULDSTOP(p)) {
720 kp->ki_stat = SSTOP;
721 } else if (TD_IS_SLEEPING(td)) {
722 kp->ki_stat = SSLEEP;
723 } else if (TD_ON_LOCK(td)) {
724 kp->ki_stat = SLOCK;
725 } else {
726 kp->ki_stat = SWAIT;
727 }
728 } else {
729 kp->ki_stat = SIDL;
730 }
731
732 kp->ki_sflag = p->p_sflag;
733 kp->ki_swtime = p->p_swtime;
734 kp->ki_pid = p->p_pid;
735 /* vvv XXXKSE */
736 if (!(p->p_flag & P_THREADED)) {
737 kg = td->td_ksegrp;
738 ke = td->td_kse;
739 KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
740 bintime2timeval(&p->p_runtime, &tv);
741 kp->ki_runtime =
742 tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
743
744 /* things in the KSE GROUP */
745 kp->ki_estcpu = kg->kg_estcpu;
746 kp->ki_slptime = kg->kg_slptime;
747 kp->ki_pri.pri_user = kg->kg_user_pri;
748 kp->ki_pri.pri_class = kg->kg_pri_class;
749 kp->ki_nice = kg->kg_nice;
750
751 /* Things in the thread */
752 kp->ki_wchan = td->td_wchan;
753 kp->ki_pri.pri_level = td->td_priority;
754 kp->ki_pri.pri_native = td->td_base_pri;
755 kp->ki_lastcpu = td->td_lastcpu;
756 kp->ki_oncpu = td->td_oncpu;
757 kp->ki_tdflags = td->td_flags;
758 kp->ki_pcb = td->td_pcb;
759 kp->ki_kstack = (void *)td->td_kstack;
760
761 /* Things in the kse */
762 kp->ki_rqindex = ke->ke_rqindex;
763 kp->ki_pctcpu = sched_pctcpu(ke);
764 } else {
765 kp->ki_oncpu = -1;
766 kp->ki_lastcpu = -1;
767 kp->ki_tdflags = -1;
768 /* All the rest are 0 for now */
769 }
770 /* ^^^ XXXKSE */
771 } else {
772 kp->ki_stat = SZOMB;
773 }
774 mtx_unlock_spin(&sched_lock);
775 sp = NULL;
776 tp = NULL;
777 if (p->p_pgrp) {
778 kp->ki_pgid = p->p_pgrp->pg_id;
779 kp->ki_jobc = p->p_pgrp->pg_jobc;
780 sp = p->p_pgrp->pg_session;
781
782 if (sp != NULL) {
783 kp->ki_sid = sp->s_sid;
784 SESS_LOCK(sp);
785 strlcpy(kp->ki_login, sp->s_login,
786 sizeof(kp->ki_login));
787 if (sp->s_ttyvp)
788 kp->ki_kiflag |= KI_CTTY;
789 if (SESS_LEADER(p))
790 kp->ki_kiflag |= KI_SLEADER;
791 tp = sp->s_ttyp;
792 SESS_UNLOCK(sp);
793 }
794 }
795 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
796 kp->ki_tdev = dev2udev(tp->t_dev);
797 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
798 if (tp->t_session)
799 kp->ki_tsid = tp->t_session->s_sid;
800 } else
801 kp->ki_tdev = NOUDEV;
802 if (p->p_comm[0] != '\0') {
803 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
804 strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
805 }
806 kp->ki_siglist = p->p_siglist;
807 SIGSETOR(kp->ki_siglist, td->td_siglist);
808 kp->ki_sigmask = td->td_sigmask;
809 kp->ki_xstat = p->p_xstat;
810 kp->ki_acflag = p->p_acflag;
811 kp->ki_flag = p->p_flag;
812 /* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
813 if (jailed(p->p_ucred))
814 kp->ki_flag |= P_JAILED;
815 kp->ki_lock = p->p_lock;
816 if (p->p_pptr)
817 kp->ki_ppid = p->p_pptr->p_pid;
818 }
819
820 /*
821 * Locate a zombie process by number
822 */
823 struct proc *
824 zpfind(pid_t pid)
825 {
826 struct proc *p;
827
828 sx_slock(&allproc_lock);
829 LIST_FOREACH(p, &zombproc, p_list)
830 if (p->p_pid == pid) {
831 PROC_LOCK(p);
832 break;
833 }
834 sx_sunlock(&allproc_lock);
835 return (p);
836 }
837
838
839 /*
840 * Must be called with the process locked and will return with it unlocked.
841 */
842 static int
843 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int doingzomb)
844 {
845 struct kinfo_proc kinfo_proc;
846 int error;
847 struct proc *np;
848 pid_t pid = p->p_pid;
849
850 PROC_LOCK_ASSERT(p, MA_OWNED);
851 fill_kinfo_proc(p, &kinfo_proc);
852 PROC_UNLOCK(p);
853 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc));
854 if (error)
855 return (error);
856 if (doingzomb)
857 np = zpfind(pid);
858 else {
859 if (pid == 0)
860 return (0);
861 np = pfind(pid);
862 }
863 if (np == NULL)
864 return EAGAIN;
865 if (np != p) {
866 PROC_UNLOCK(np);
867 return EAGAIN;
868 }
869 PROC_UNLOCK(np);
870 return (0);
871 }
872
873 static int
874 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
875 {
876 int *name = (int*) arg1;
877 u_int namelen = arg2;
878 struct proc *p;
879 int doingzomb;
880 int error = 0;
881
882 if (oidp->oid_number == KERN_PROC_PID) {
883 if (namelen != 1)
884 return (EINVAL);
885 p = pfind((pid_t)name[0]);
886 if (!p)
887 return (0);
888 if (p_cansee(curthread, p)) {
889 PROC_UNLOCK(p);
890 return (0);
891 }
892 error = sysctl_out_proc(p, req, 0);
893 return (error);
894 }
895 if (oidp->oid_number == KERN_PROC_ALL && !namelen)
896 ;
897 else if (oidp->oid_number != KERN_PROC_ALL && namelen == 1)
898 ;
899 else
900 return (EINVAL);
901
902 if (!req->oldptr) {
903 /* overestimate by 5 procs */
904 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
905 if (error)
906 return (error);
907 }
908 sysctl_wire_old_buffer(req, 0);
909 sx_slock(&allproc_lock);
910 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
911 if (!doingzomb)
912 p = LIST_FIRST(&allproc);
913 else
914 p = LIST_FIRST(&zombproc);
915 for (; p != 0; p = LIST_NEXT(p, p_list)) {
916 /*
917 * Skip embryonic processes.
918 */
919 mtx_lock_spin(&sched_lock);
920 if (p->p_state == PRS_NEW) {
921 mtx_unlock_spin(&sched_lock);
922 continue;
923 }
924 mtx_unlock_spin(&sched_lock);
925 PROC_LOCK(p);
926 /*
927 * Show a user only appropriate processes.
928 */
929 if (p_cansee(curthread, p)) {
930 PROC_UNLOCK(p);
931 continue;
932 }
933 /*
934 * TODO - make more efficient (see notes below).
935 * do by session.
936 */
937 switch (oidp->oid_number) {
938
939 case KERN_PROC_PGRP:
940 /* could do this by traversing pgrp */
941 if (p->p_pgrp == NULL ||
942 p->p_pgrp->pg_id != (pid_t)name[0]) {
943 PROC_UNLOCK(p);
944 continue;
945 }
946 break;
947
948 case KERN_PROC_TTY:
949 if ((p->p_flag & P_CONTROLT) == 0 ||
950 p->p_session == NULL) {
951 PROC_UNLOCK(p);
952 continue;
953 }
954 SESS_LOCK(p->p_session);
955 if (p->p_session->s_ttyp == NULL ||
956 dev2udev(p->p_session->s_ttyp->t_dev) !=
957 (udev_t)name[0]) {
958 SESS_UNLOCK(p->p_session);
959 PROC_UNLOCK(p);
960 continue;
961 }
962 SESS_UNLOCK(p->p_session);
963 break;
964
965 case KERN_PROC_UID:
966 if (p->p_ucred == NULL ||
967 p->p_ucred->cr_uid != (uid_t)name[0]) {
968 PROC_UNLOCK(p);
969 continue;
970 }
971 break;
972
973 case KERN_PROC_RUID:
974 if (p->p_ucred == NULL ||
975 p->p_ucred->cr_ruid != (uid_t)name[0]) {
976 PROC_UNLOCK(p);
977 continue;
978 }
979 break;
980 }
981
982 error = sysctl_out_proc(p, req, doingzomb);
983 if (error) {
984 sx_sunlock(&allproc_lock);
985 return (error);
986 }
987 }
988 }
989 sx_sunlock(&allproc_lock);
990 return (0);
991 }
992
993 struct pargs *
994 pargs_alloc(int len)
995 {
996 struct pargs *pa;
997
998 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
999 M_WAITOK);
1000 pa->ar_ref = 1;
1001 pa->ar_length = len;
1002 return (pa);
1003 }
1004
1005 void
1006 pargs_free(struct pargs *pa)
1007 {
1008
1009 FREE(pa, M_PARGS);
1010 }
1011
1012 void
1013 pargs_hold(struct pargs *pa)
1014 {
1015
1016 if (pa == NULL)
1017 return;
1018 PARGS_LOCK(pa);
1019 pa->ar_ref++;
1020 PARGS_UNLOCK(pa);
1021 }
1022
1023 void
1024 pargs_drop(struct pargs *pa)
1025 {
1026
1027 if (pa == NULL)
1028 return;
1029 PARGS_LOCK(pa);
1030 if (--pa->ar_ref == 0) {
1031 PARGS_UNLOCK(pa);
1032 pargs_free(pa);
1033 } else
1034 PARGS_UNLOCK(pa);
1035 }
1036
1037 /*
1038 * This sysctl allows a process to retrieve the argument list or process
1039 * title for another process without groping around in the address space
1040 * of the other process. It also allow a process to set its own "process
1041 * title to a string of its own choice.
1042 */
1043 static int
1044 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1045 {
1046 int *name = (int*) arg1;
1047 u_int namelen = arg2;
1048 struct pargs *newpa, *pa;
1049 struct proc *p;
1050 int error = 0;
1051
1052 if (namelen != 1)
1053 return (EINVAL);
1054
1055 p = pfind((pid_t)name[0]);
1056 if (!p)
1057 return (0);
1058
1059 if ((!ps_argsopen) && p_cansee(curthread, p)) {
1060 PROC_UNLOCK(p);
1061 return (0);
1062 }
1063
1064 if (req->newptr && curproc != p) {
1065 PROC_UNLOCK(p);
1066 return (EPERM);
1067 }
1068
1069 pa = p->p_args;
1070 pargs_hold(pa);
1071 PROC_UNLOCK(p);
1072 if (req->oldptr != NULL && pa != NULL)
1073 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1074 pargs_drop(pa);
1075 if (error != 0 || req->newptr == NULL)
1076 return (error);
1077
1078 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1079 return (ENOMEM);
1080 newpa = pargs_alloc(req->newlen);
1081 error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1082 if (error != 0) {
1083 pargs_free(newpa);
1084 return (error);
1085 }
1086 PROC_LOCK(p);
1087 pa = p->p_args;
1088 p->p_args = newpa;
1089 PROC_UNLOCK(p);
1090 pargs_drop(pa);
1091 return (0);
1092 }
1093
1094 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1095
1096 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1097 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1098
1099 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1100 sysctl_kern_proc, "Process table");
1101
1102 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1103 sysctl_kern_proc, "Process table");
1104
1105 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1106 sysctl_kern_proc, "Process table");
1107
1108 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1109 sysctl_kern_proc, "Process table");
1110
1111 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1112 sysctl_kern_proc, "Process table");
1113
1114 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1115 sysctl_kern_proc_args, "Process argument list");
1116
Cache object: 315c368a247889d0d07e6146afe97708
|