FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_proc.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/11.2/sys/kern/kern_proc.c 331727 2018-03-29 04:41:45Z mjoras $");
34
35 #include "opt_compat.h"
36 #include "opt_ddb.h"
37 #include "opt_ktrace.h"
38 #include "opt_kstack_pages.h"
39 #include "opt_stack.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/elf.h>
44 #include <sys/eventhandler.h>
45 #include <sys/exec.h>
46 #include <sys/jail.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/loginclass.h>
51 #include <sys/malloc.h>
52 #include <sys/mman.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/ptrace.h>
57 #include <sys/refcount.h>
58 #include <sys/resourcevar.h>
59 #include <sys/rwlock.h>
60 #include <sys/sbuf.h>
61 #include <sys/sysent.h>
62 #include <sys/sched.h>
63 #include <sys/smp.h>
64 #include <sys/stack.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/filedesc.h>
68 #include <sys/tty.h>
69 #include <sys/signalvar.h>
70 #include <sys/sdt.h>
71 #include <sys/sx.h>
72 #include <sys/user.h>
73 #include <sys/vnode.h>
74 #include <sys/wait.h>
75
76 #ifdef DDB
77 #include <ddb/ddb.h>
78 #endif
79
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_extern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/uma.h>
88
89 #ifdef COMPAT_FREEBSD32
90 #include <compat/freebsd32/freebsd32.h>
91 #include <compat/freebsd32/freebsd32_util.h>
92 #endif
93
94 SDT_PROVIDER_DEFINE(proc);
95 SDT_PROBE_DEFINE4(proc, , ctor, entry, "struct proc *", "int", "void *",
96 "int");
97 SDT_PROBE_DEFINE4(proc, , ctor, return, "struct proc *", "int", "void *",
98 "int");
99 SDT_PROBE_DEFINE4(proc, , dtor, entry, "struct proc *", "int", "void *",
100 "struct thread *");
101 SDT_PROBE_DEFINE3(proc, , dtor, return, "struct proc *", "int", "void *");
102 SDT_PROBE_DEFINE3(proc, , init, entry, "struct proc *", "int", "int");
103 SDT_PROBE_DEFINE3(proc, , init, return, "struct proc *", "int", "int");
104
105 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
106 MALLOC_DEFINE(M_SESSION, "session", "session header");
107 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
108 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
109
110 static void doenterpgrp(struct proc *, struct pgrp *);
111 static void orphanpg(struct pgrp *pg);
112 static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp);
113 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
114 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp,
115 int preferthread);
116 static void pgadjustjobc(struct pgrp *pgrp, int entering);
117 static void pgdelete(struct pgrp *);
118 static int proc_ctor(void *mem, int size, void *arg, int flags);
119 static void proc_dtor(void *mem, int size, void *arg);
120 static int proc_init(void *mem, int size, int flags);
121 static void proc_fini(void *mem, int size);
122 static void pargs_free(struct pargs *pa);
123 static struct proc *zpfind_locked(pid_t pid);
124
125 /*
126 * Other process lists
127 */
128 struct pidhashhead *pidhashtbl;
129 u_long pidhash;
130 struct pgrphashhead *pgrphashtbl;
131 u_long pgrphash;
132 struct proclist allproc;
133 struct proclist zombproc;
134 struct sx __exclusive_cache_line allproc_lock;
135 struct sx __exclusive_cache_line proctree_lock;
136 struct mtx __exclusive_cache_line ppeers_lock;
137 uma_zone_t proc_zone;
138
139 /*
140 * The offset of various fields in struct proc and struct thread.
141 * These are used by kernel debuggers to enumerate kernel threads and
142 * processes.
143 */
144 const int proc_off_p_pid = offsetof(struct proc, p_pid);
145 const int proc_off_p_comm = offsetof(struct proc, p_comm);
146 const int proc_off_p_list = offsetof(struct proc, p_list);
147 const int proc_off_p_threads = offsetof(struct proc, p_threads);
148 const int thread_off_td_tid = offsetof(struct thread, td_tid);
149 const int thread_off_td_name = offsetof(struct thread, td_name);
150 const int thread_off_td_oncpu = offsetof(struct thread, td_oncpu);
151 const int thread_off_td_pcb = offsetof(struct thread, td_pcb);
152 const int thread_off_td_plist = offsetof(struct thread, td_plist);
153
154 EVENTHANDLER_LIST_DEFINE(process_ctor);
155 EVENTHANDLER_LIST_DEFINE(process_dtor);
156 EVENTHANDLER_LIST_DEFINE(process_init);
157 EVENTHANDLER_LIST_DEFINE(process_fini);
158 EVENTHANDLER_LIST_DEFINE(process_exit);
159 EVENTHANDLER_LIST_DEFINE(process_fork);
160 EVENTHANDLER_LIST_DEFINE(process_exec);
161
162 EVENTHANDLER_LIST_DECLARE(thread_ctor);
163 EVENTHANDLER_LIST_DECLARE(thread_dtor);
164
165 int kstack_pages = KSTACK_PAGES;
166 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
167 "Kernel stack size in pages");
168 static int vmmap_skip_res_cnt = 0;
169 SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW,
170 &vmmap_skip_res_cnt, 0,
171 "Skip calculation of the pages resident count in kern.proc.vmmap");
172
173 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
174 #ifdef COMPAT_FREEBSD32
175 CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE);
176 #endif
177
178 /*
179 * Initialize global process hashing structures.
180 */
181 void
182 procinit(void)
183 {
184
185 sx_init(&allproc_lock, "allproc");
186 sx_init(&proctree_lock, "proctree");
187 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
188 LIST_INIT(&allproc);
189 LIST_INIT(&zombproc);
190 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
191 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
192 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
193 proc_ctor, proc_dtor, proc_init, proc_fini,
194 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
195 uihashinit();
196 }
197
198 /*
199 * Prepare a proc for use.
200 */
201 static int
202 proc_ctor(void *mem, int size, void *arg, int flags)
203 {
204 struct proc *p;
205 struct thread *td;
206
207 p = (struct proc *)mem;
208 SDT_PROBE4(proc, , ctor , entry, p, size, arg, flags);
209 EVENTHANDLER_DIRECT_INVOKE(process_ctor, p);
210 SDT_PROBE4(proc, , ctor , return, p, size, arg, flags);
211 td = FIRST_THREAD_IN_PROC(p);
212 if (td != NULL) {
213 /* Make sure all thread constructors are executed */
214 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
215 }
216 return (0);
217 }
218
219 /*
220 * Reclaim a proc after use.
221 */
222 static void
223 proc_dtor(void *mem, int size, void *arg)
224 {
225 struct proc *p;
226 struct thread *td;
227
228 /* INVARIANTS checks go here */
229 p = (struct proc *)mem;
230 td = FIRST_THREAD_IN_PROC(p);
231 SDT_PROBE4(proc, , dtor, entry, p, size, arg, td);
232 if (td != NULL) {
233 #ifdef INVARIANTS
234 KASSERT((p->p_numthreads == 1),
235 ("bad number of threads in exiting process"));
236 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
237 #endif
238 /* Free all OSD associated to this thread. */
239 osd_thread_exit(td);
240 td_softdep_cleanup(td);
241 MPASS(td->td_su == NULL);
242
243 /* Make sure all thread destructors are executed */
244 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
245 }
246 EVENTHANDLER_DIRECT_INVOKE(process_dtor, p);
247 if (p->p_ksi != NULL)
248 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
249 SDT_PROBE3(proc, , dtor, return, p, size, arg);
250 }
251
252 /*
253 * Initialize type-stable parts of a proc (when newly created).
254 */
255 static int
256 proc_init(void *mem, int size, int flags)
257 {
258 struct proc *p;
259
260 p = (struct proc *)mem;
261 SDT_PROBE3(proc, , init, entry, p, size, flags);
262 mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK | MTX_NEW);
263 mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_NEW);
264 mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN | MTX_NEW);
265 mtx_init(&p->p_itimmtx, "pitiml", NULL, MTX_SPIN | MTX_NEW);
266 mtx_init(&p->p_profmtx, "pprofl", NULL, MTX_SPIN | MTX_NEW);
267 cv_init(&p->p_pwait, "ppwait");
268 cv_init(&p->p_dbgwait, "dbgwait");
269 TAILQ_INIT(&p->p_threads); /* all threads in proc */
270 EVENTHANDLER_DIRECT_INVOKE(process_init, p);
271 p->p_stats = pstats_alloc();
272 p->p_pgrp = NULL;
273 SDT_PROBE3(proc, , init, return, p, size, flags);
274 return (0);
275 }
276
277 /*
278 * UMA should ensure that this function is never called.
279 * Freeing a proc structure would violate type stability.
280 */
281 static void
282 proc_fini(void *mem, int size)
283 {
284 #ifdef notnow
285 struct proc *p;
286
287 p = (struct proc *)mem;
288 EVENTHANDLER_DIRECT_INVOKE(process_fini, p);
289 pstats_free(p->p_stats);
290 thread_free(FIRST_THREAD_IN_PROC(p));
291 mtx_destroy(&p->p_mtx);
292 if (p->p_ksi != NULL)
293 ksiginfo_free(p->p_ksi);
294 #else
295 panic("proc reclaimed");
296 #endif
297 }
298
299 /*
300 * Is p an inferior of the current process?
301 */
302 int
303 inferior(struct proc *p)
304 {
305
306 sx_assert(&proctree_lock, SX_LOCKED);
307 PROC_LOCK_ASSERT(p, MA_OWNED);
308 for (; p != curproc; p = proc_realparent(p)) {
309 if (p->p_pid == 0)
310 return (0);
311 }
312 return (1);
313 }
314
315 struct proc *
316 pfind_locked(pid_t pid)
317 {
318 struct proc *p;
319
320 sx_assert(&allproc_lock, SX_LOCKED);
321 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
322 if (p->p_pid == pid) {
323 PROC_LOCK(p);
324 if (p->p_state == PRS_NEW) {
325 PROC_UNLOCK(p);
326 p = NULL;
327 }
328 break;
329 }
330 }
331 return (p);
332 }
333
334 /*
335 * Locate a process by number; return only "live" processes -- i.e., neither
336 * zombies nor newly born but incompletely initialized processes. By not
337 * returning processes in the PRS_NEW state, we allow callers to avoid
338 * testing for that condition to avoid dereferencing p_ucred, et al.
339 */
340 struct proc *
341 pfind(pid_t pid)
342 {
343 struct proc *p;
344
345 sx_slock(&allproc_lock);
346 p = pfind_locked(pid);
347 sx_sunlock(&allproc_lock);
348 return (p);
349 }
350
351 static struct proc *
352 pfind_tid_locked(pid_t tid)
353 {
354 struct proc *p;
355 struct thread *td;
356
357 sx_assert(&allproc_lock, SX_LOCKED);
358 FOREACH_PROC_IN_SYSTEM(p) {
359 PROC_LOCK(p);
360 if (p->p_state == PRS_NEW) {
361 PROC_UNLOCK(p);
362 continue;
363 }
364 FOREACH_THREAD_IN_PROC(p, td) {
365 if (td->td_tid == tid)
366 goto found;
367 }
368 PROC_UNLOCK(p);
369 }
370 found:
371 return (p);
372 }
373
374 /*
375 * Locate a process group by number.
376 * The caller must hold proctree_lock.
377 */
378 struct pgrp *
379 pgfind(pgid)
380 register pid_t pgid;
381 {
382 register struct pgrp *pgrp;
383
384 sx_assert(&proctree_lock, SX_LOCKED);
385
386 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
387 if (pgrp->pg_id == pgid) {
388 PGRP_LOCK(pgrp);
389 return (pgrp);
390 }
391 }
392 return (NULL);
393 }
394
395 /*
396 * Locate process and do additional manipulations, depending on flags.
397 */
398 int
399 pget(pid_t pid, int flags, struct proc **pp)
400 {
401 struct proc *p;
402 int error;
403
404 sx_slock(&allproc_lock);
405 if (pid <= PID_MAX) {
406 p = pfind_locked(pid);
407 if (p == NULL && (flags & PGET_NOTWEXIT) == 0)
408 p = zpfind_locked(pid);
409 } else if ((flags & PGET_NOTID) == 0) {
410 p = pfind_tid_locked(pid);
411 } else {
412 p = NULL;
413 }
414 sx_sunlock(&allproc_lock);
415 if (p == NULL)
416 return (ESRCH);
417 if ((flags & PGET_CANSEE) != 0) {
418 error = p_cansee(curthread, p);
419 if (error != 0)
420 goto errout;
421 }
422 if ((flags & PGET_CANDEBUG) != 0) {
423 error = p_candebug(curthread, p);
424 if (error != 0)
425 goto errout;
426 }
427 if ((flags & PGET_ISCURRENT) != 0 && curproc != p) {
428 error = EPERM;
429 goto errout;
430 }
431 if ((flags & PGET_NOTWEXIT) != 0 && (p->p_flag & P_WEXIT) != 0) {
432 error = ESRCH;
433 goto errout;
434 }
435 if ((flags & PGET_NOTINEXEC) != 0 && (p->p_flag & P_INEXEC) != 0) {
436 /*
437 * XXXRW: Not clear ESRCH is the right error during proc
438 * execve().
439 */
440 error = ESRCH;
441 goto errout;
442 }
443 if ((flags & PGET_HOLD) != 0) {
444 _PHOLD(p);
445 PROC_UNLOCK(p);
446 }
447 *pp = p;
448 return (0);
449 errout:
450 PROC_UNLOCK(p);
451 return (error);
452 }
453
454 /*
455 * Create a new process group.
456 * pgid must be equal to the pid of p.
457 * Begin a new session if required.
458 */
459 int
460 enterpgrp(p, pgid, pgrp, sess)
461 register struct proc *p;
462 pid_t pgid;
463 struct pgrp *pgrp;
464 struct session *sess;
465 {
466
467 sx_assert(&proctree_lock, SX_XLOCKED);
468
469 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
470 KASSERT(p->p_pid == pgid,
471 ("enterpgrp: new pgrp and pid != pgid"));
472 KASSERT(pgfind(pgid) == NULL,
473 ("enterpgrp: pgrp with pgid exists"));
474 KASSERT(!SESS_LEADER(p),
475 ("enterpgrp: session leader attempted setpgrp"));
476
477 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
478
479 if (sess != NULL) {
480 /*
481 * new session
482 */
483 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
484 PROC_LOCK(p);
485 p->p_flag &= ~P_CONTROLT;
486 PROC_UNLOCK(p);
487 PGRP_LOCK(pgrp);
488 sess->s_leader = p;
489 sess->s_sid = p->p_pid;
490 refcount_init(&sess->s_count, 1);
491 sess->s_ttyvp = NULL;
492 sess->s_ttydp = NULL;
493 sess->s_ttyp = NULL;
494 bcopy(p->p_session->s_login, sess->s_login,
495 sizeof(sess->s_login));
496 pgrp->pg_session = sess;
497 KASSERT(p == curproc,
498 ("enterpgrp: mksession and p != curproc"));
499 } else {
500 pgrp->pg_session = p->p_session;
501 sess_hold(pgrp->pg_session);
502 PGRP_LOCK(pgrp);
503 }
504 pgrp->pg_id = pgid;
505 LIST_INIT(&pgrp->pg_members);
506
507 /*
508 * As we have an exclusive lock of proctree_lock,
509 * this should not deadlock.
510 */
511 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
512 pgrp->pg_jobc = 0;
513 SLIST_INIT(&pgrp->pg_sigiolst);
514 PGRP_UNLOCK(pgrp);
515
516 doenterpgrp(p, pgrp);
517
518 return (0);
519 }
520
521 /*
522 * Move p to an existing process group
523 */
524 int
525 enterthispgrp(p, pgrp)
526 register struct proc *p;
527 struct pgrp *pgrp;
528 {
529
530 sx_assert(&proctree_lock, SX_XLOCKED);
531 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
532 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
533 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
534 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
535 KASSERT(pgrp->pg_session == p->p_session,
536 ("%s: pgrp's session %p, p->p_session %p.\n",
537 __func__,
538 pgrp->pg_session,
539 p->p_session));
540 KASSERT(pgrp != p->p_pgrp,
541 ("%s: p belongs to pgrp.", __func__));
542
543 doenterpgrp(p, pgrp);
544
545 return (0);
546 }
547
548 /*
549 * Move p to a process group
550 */
551 static void
552 doenterpgrp(p, pgrp)
553 struct proc *p;
554 struct pgrp *pgrp;
555 {
556 struct pgrp *savepgrp;
557
558 sx_assert(&proctree_lock, SX_XLOCKED);
559 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
560 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
561 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
562 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
563
564 savepgrp = p->p_pgrp;
565
566 /*
567 * Adjust eligibility of affected pgrps to participate in job control.
568 * Increment eligibility counts before decrementing, otherwise we
569 * could reach 0 spuriously during the first call.
570 */
571 fixjobc(p, pgrp, 1);
572 fixjobc(p, p->p_pgrp, 0);
573
574 PGRP_LOCK(pgrp);
575 PGRP_LOCK(savepgrp);
576 PROC_LOCK(p);
577 LIST_REMOVE(p, p_pglist);
578 p->p_pgrp = pgrp;
579 PROC_UNLOCK(p);
580 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
581 PGRP_UNLOCK(savepgrp);
582 PGRP_UNLOCK(pgrp);
583 if (LIST_EMPTY(&savepgrp->pg_members))
584 pgdelete(savepgrp);
585 }
586
587 /*
588 * remove process from process group
589 */
590 int
591 leavepgrp(p)
592 register struct proc *p;
593 {
594 struct pgrp *savepgrp;
595
596 sx_assert(&proctree_lock, SX_XLOCKED);
597 savepgrp = p->p_pgrp;
598 PGRP_LOCK(savepgrp);
599 PROC_LOCK(p);
600 LIST_REMOVE(p, p_pglist);
601 p->p_pgrp = NULL;
602 PROC_UNLOCK(p);
603 PGRP_UNLOCK(savepgrp);
604 if (LIST_EMPTY(&savepgrp->pg_members))
605 pgdelete(savepgrp);
606 return (0);
607 }
608
609 /*
610 * delete a process group
611 */
612 static void
613 pgdelete(pgrp)
614 register struct pgrp *pgrp;
615 {
616 struct session *savesess;
617 struct tty *tp;
618
619 sx_assert(&proctree_lock, SX_XLOCKED);
620 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
621 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
622
623 /*
624 * Reset any sigio structures pointing to us as a result of
625 * F_SETOWN with our pgid.
626 */
627 funsetownlst(&pgrp->pg_sigiolst);
628
629 PGRP_LOCK(pgrp);
630 tp = pgrp->pg_session->s_ttyp;
631 LIST_REMOVE(pgrp, pg_hash);
632 savesess = pgrp->pg_session;
633 PGRP_UNLOCK(pgrp);
634
635 /* Remove the reference to the pgrp before deallocating it. */
636 if (tp != NULL) {
637 tty_lock(tp);
638 tty_rel_pgrp(tp, pgrp);
639 }
640
641 mtx_destroy(&pgrp->pg_mtx);
642 free(pgrp, M_PGRP);
643 sess_release(savesess);
644 }
645
646 static void
647 pgadjustjobc(pgrp, entering)
648 struct pgrp *pgrp;
649 int entering;
650 {
651
652 PGRP_LOCK(pgrp);
653 if (entering)
654 pgrp->pg_jobc++;
655 else {
656 --pgrp->pg_jobc;
657 if (pgrp->pg_jobc == 0)
658 orphanpg(pgrp);
659 }
660 PGRP_UNLOCK(pgrp);
661 }
662
663 /*
664 * Adjust pgrp jobc counters when specified process changes process group.
665 * We count the number of processes in each process group that "qualify"
666 * the group for terminal job control (those with a parent in a different
667 * process group of the same session). If that count reaches zero, the
668 * process group becomes orphaned. Check both the specified process'
669 * process group and that of its children.
670 * entering == 0 => p is leaving specified group.
671 * entering == 1 => p is entering specified group.
672 */
673 void
674 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
675 {
676 struct pgrp *hispgrp;
677 struct session *mysession;
678 struct proc *q;
679
680 sx_assert(&proctree_lock, SX_LOCKED);
681 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
682 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
683 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
684
685 /*
686 * Check p's parent to see whether p qualifies its own process
687 * group; if so, adjust count for p's process group.
688 */
689 mysession = pgrp->pg_session;
690 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
691 hispgrp->pg_session == mysession)
692 pgadjustjobc(pgrp, entering);
693
694 /*
695 * Check this process' children to see whether they qualify
696 * their process groups; if so, adjust counts for children's
697 * process groups.
698 */
699 LIST_FOREACH(q, &p->p_children, p_sibling) {
700 hispgrp = q->p_pgrp;
701 if (hispgrp == pgrp ||
702 hispgrp->pg_session != mysession)
703 continue;
704 if (q->p_state == PRS_ZOMBIE)
705 continue;
706 pgadjustjobc(hispgrp, entering);
707 }
708 }
709
710 void
711 killjobc(void)
712 {
713 struct session *sp;
714 struct tty *tp;
715 struct proc *p;
716 struct vnode *ttyvp;
717
718 p = curproc;
719 MPASS(p->p_flag & P_WEXIT);
720 /*
721 * Do a quick check to see if there is anything to do with the
722 * proctree_lock held. pgrp and LIST_EMPTY checks are for fixjobc().
723 */
724 PROC_LOCK(p);
725 if (!SESS_LEADER(p) &&
726 (p->p_pgrp == p->p_pptr->p_pgrp) &&
727 LIST_EMPTY(&p->p_children)) {
728 PROC_UNLOCK(p);
729 return;
730 }
731 PROC_UNLOCK(p);
732
733 sx_xlock(&proctree_lock);
734 if (SESS_LEADER(p)) {
735 sp = p->p_session;
736
737 /*
738 * s_ttyp is not zero'd; we use this to indicate that
739 * the session once had a controlling terminal. (for
740 * logging and informational purposes)
741 */
742 SESS_LOCK(sp);
743 ttyvp = sp->s_ttyvp;
744 tp = sp->s_ttyp;
745 sp->s_ttyvp = NULL;
746 sp->s_ttydp = NULL;
747 sp->s_leader = NULL;
748 SESS_UNLOCK(sp);
749
750 /*
751 * Signal foreground pgrp and revoke access to
752 * controlling terminal if it has not been revoked
753 * already.
754 *
755 * Because the TTY may have been revoked in the mean
756 * time and could already have a new session associated
757 * with it, make sure we don't send a SIGHUP to a
758 * foreground process group that does not belong to this
759 * session.
760 */
761
762 if (tp != NULL) {
763 tty_lock(tp);
764 if (tp->t_session == sp)
765 tty_signal_pgrp(tp, SIGHUP);
766 tty_unlock(tp);
767 }
768
769 if (ttyvp != NULL) {
770 sx_xunlock(&proctree_lock);
771 if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
772 VOP_REVOKE(ttyvp, REVOKEALL);
773 VOP_UNLOCK(ttyvp, 0);
774 }
775 vrele(ttyvp);
776 sx_xlock(&proctree_lock);
777 }
778 }
779 fixjobc(p, p->p_pgrp, 0);
780 sx_xunlock(&proctree_lock);
781 }
782
783 /*
784 * A process group has become orphaned;
785 * if there are any stopped processes in the group,
786 * hang-up all process in that group.
787 */
788 static void
789 orphanpg(pg)
790 struct pgrp *pg;
791 {
792 register struct proc *p;
793
794 PGRP_LOCK_ASSERT(pg, MA_OWNED);
795
796 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
797 PROC_LOCK(p);
798 if (P_SHOULDSTOP(p) == P_STOPPED_SIG) {
799 PROC_UNLOCK(p);
800 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
801 PROC_LOCK(p);
802 kern_psignal(p, SIGHUP);
803 kern_psignal(p, SIGCONT);
804 PROC_UNLOCK(p);
805 }
806 return;
807 }
808 PROC_UNLOCK(p);
809 }
810 }
811
812 void
813 sess_hold(struct session *s)
814 {
815
816 refcount_acquire(&s->s_count);
817 }
818
819 void
820 sess_release(struct session *s)
821 {
822
823 if (refcount_release(&s->s_count)) {
824 if (s->s_ttyp != NULL) {
825 tty_lock(s->s_ttyp);
826 tty_rel_sess(s->s_ttyp, s);
827 }
828 mtx_destroy(&s->s_mtx);
829 free(s, M_SESSION);
830 }
831 }
832
833 #ifdef DDB
834
835 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
836 {
837 register struct pgrp *pgrp;
838 register struct proc *p;
839 register int i;
840
841 for (i = 0; i <= pgrphash; i++) {
842 if (!LIST_EMPTY(&pgrphashtbl[i])) {
843 printf("\tindx %d\n", i);
844 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
845 printf(
846 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
847 (void *)pgrp, (long)pgrp->pg_id,
848 (void *)pgrp->pg_session,
849 pgrp->pg_session->s_count,
850 (void *)LIST_FIRST(&pgrp->pg_members));
851 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
852 printf("\t\tpid %ld addr %p pgrp %p\n",
853 (long)p->p_pid, (void *)p,
854 (void *)p->p_pgrp);
855 }
856 }
857 }
858 }
859 }
860 #endif /* DDB */
861
862 /*
863 * Calculate the kinfo_proc members which contain process-wide
864 * informations.
865 * Must be called with the target process locked.
866 */
867 static void
868 fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp)
869 {
870 struct thread *td;
871
872 PROC_LOCK_ASSERT(p, MA_OWNED);
873
874 kp->ki_estcpu = 0;
875 kp->ki_pctcpu = 0;
876 FOREACH_THREAD_IN_PROC(p, td) {
877 thread_lock(td);
878 kp->ki_pctcpu += sched_pctcpu(td);
879 kp->ki_estcpu += sched_estcpu(td);
880 thread_unlock(td);
881 }
882 }
883
884 /*
885 * Clear kinfo_proc and fill in any information that is common
886 * to all threads in the process.
887 * Must be called with the target process locked.
888 */
889 static void
890 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
891 {
892 struct thread *td0;
893 struct tty *tp;
894 struct session *sp;
895 struct ucred *cred;
896 struct sigacts *ps;
897 struct timeval boottime;
898
899 /* For proc_realparent. */
900 sx_assert(&proctree_lock, SX_LOCKED);
901 PROC_LOCK_ASSERT(p, MA_OWNED);
902 bzero(kp, sizeof(*kp));
903
904 kp->ki_structsize = sizeof(*kp);
905 kp->ki_paddr = p;
906 kp->ki_addr =/* p->p_addr; */0; /* XXX */
907 kp->ki_args = p->p_args;
908 kp->ki_textvp = p->p_textvp;
909 #ifdef KTRACE
910 kp->ki_tracep = p->p_tracevp;
911 kp->ki_traceflag = p->p_traceflag;
912 #endif
913 kp->ki_fd = p->p_fd;
914 kp->ki_vmspace = p->p_vmspace;
915 kp->ki_flag = p->p_flag;
916 kp->ki_flag2 = p->p_flag2;
917 cred = p->p_ucred;
918 if (cred) {
919 kp->ki_uid = cred->cr_uid;
920 kp->ki_ruid = cred->cr_ruid;
921 kp->ki_svuid = cred->cr_svuid;
922 kp->ki_cr_flags = 0;
923 if (cred->cr_flags & CRED_FLAG_CAPMODE)
924 kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE;
925 /* XXX bde doesn't like KI_NGROUPS */
926 if (cred->cr_ngroups > KI_NGROUPS) {
927 kp->ki_ngroups = KI_NGROUPS;
928 kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
929 } else
930 kp->ki_ngroups = cred->cr_ngroups;
931 bcopy(cred->cr_groups, kp->ki_groups,
932 kp->ki_ngroups * sizeof(gid_t));
933 kp->ki_rgid = cred->cr_rgid;
934 kp->ki_svgid = cred->cr_svgid;
935 /* If jailed(cred), emulate the old P_JAILED flag. */
936 if (jailed(cred)) {
937 kp->ki_flag |= P_JAILED;
938 /* If inside the jail, use 0 as a jail ID. */
939 if (cred->cr_prison != curthread->td_ucred->cr_prison)
940 kp->ki_jid = cred->cr_prison->pr_id;
941 }
942 strlcpy(kp->ki_loginclass, cred->cr_loginclass->lc_name,
943 sizeof(kp->ki_loginclass));
944 }
945 ps = p->p_sigacts;
946 if (ps) {
947 mtx_lock(&ps->ps_mtx);
948 kp->ki_sigignore = ps->ps_sigignore;
949 kp->ki_sigcatch = ps->ps_sigcatch;
950 mtx_unlock(&ps->ps_mtx);
951 }
952 if (p->p_state != PRS_NEW &&
953 p->p_state != PRS_ZOMBIE &&
954 p->p_vmspace != NULL) {
955 struct vmspace *vm = p->p_vmspace;
956
957 kp->ki_size = vm->vm_map.size;
958 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
959 FOREACH_THREAD_IN_PROC(p, td0) {
960 if (!TD_IS_SWAPPED(td0))
961 kp->ki_rssize += td0->td_kstack_pages;
962 }
963 kp->ki_swrss = vm->vm_swrss;
964 kp->ki_tsize = vm->vm_tsize;
965 kp->ki_dsize = vm->vm_dsize;
966 kp->ki_ssize = vm->vm_ssize;
967 } else if (p->p_state == PRS_ZOMBIE)
968 kp->ki_stat = SZOMB;
969 if (kp->ki_flag & P_INMEM)
970 kp->ki_sflag = PS_INMEM;
971 else
972 kp->ki_sflag = 0;
973 /* Calculate legacy swtime as seconds since 'swtick'. */
974 kp->ki_swtime = (ticks - p->p_swtick) / hz;
975 kp->ki_pid = p->p_pid;
976 kp->ki_nice = p->p_nice;
977 kp->ki_fibnum = p->p_fibnum;
978 kp->ki_start = p->p_stats->p_start;
979 getboottime(&boottime);
980 timevaladd(&kp->ki_start, &boottime);
981 PROC_STATLOCK(p);
982 rufetch(p, &kp->ki_rusage);
983 kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
984 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
985 PROC_STATUNLOCK(p);
986 calccru(p, &kp->ki_childutime, &kp->ki_childstime);
987 /* Some callers want child times in a single value. */
988 kp->ki_childtime = kp->ki_childstime;
989 timevaladd(&kp->ki_childtime, &kp->ki_childutime);
990
991 FOREACH_THREAD_IN_PROC(p, td0)
992 kp->ki_cow += td0->td_cow;
993
994 tp = NULL;
995 if (p->p_pgrp) {
996 kp->ki_pgid = p->p_pgrp->pg_id;
997 kp->ki_jobc = p->p_pgrp->pg_jobc;
998 sp = p->p_pgrp->pg_session;
999
1000 if (sp != NULL) {
1001 kp->ki_sid = sp->s_sid;
1002 SESS_LOCK(sp);
1003 strlcpy(kp->ki_login, sp->s_login,
1004 sizeof(kp->ki_login));
1005 if (sp->s_ttyvp)
1006 kp->ki_kiflag |= KI_CTTY;
1007 if (SESS_LEADER(p))
1008 kp->ki_kiflag |= KI_SLEADER;
1009 /* XXX proctree_lock */
1010 tp = sp->s_ttyp;
1011 SESS_UNLOCK(sp);
1012 }
1013 }
1014 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
1015 kp->ki_tdev = tty_udev(tp);
1016 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1017 if (tp->t_session)
1018 kp->ki_tsid = tp->t_session->s_sid;
1019 } else
1020 kp->ki_tdev = NODEV;
1021 if (p->p_comm[0] != '\0')
1022 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
1023 if (p->p_sysent && p->p_sysent->sv_name != NULL &&
1024 p->p_sysent->sv_name[0] != '\0')
1025 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
1026 kp->ki_siglist = p->p_siglist;
1027 kp->ki_xstat = KW_EXITCODE(p->p_xexit, p->p_xsig);
1028 kp->ki_acflag = p->p_acflag;
1029 kp->ki_lock = p->p_lock;
1030 if (p->p_pptr) {
1031 kp->ki_ppid = proc_realparent(p)->p_pid;
1032 if (p->p_flag & P_TRACED)
1033 kp->ki_tracer = p->p_pptr->p_pid;
1034 }
1035 }
1036
1037 /*
1038 * Fill in information that is thread specific. Must be called with
1039 * target process locked. If 'preferthread' is set, overwrite certain
1040 * process-related fields that are maintained for both threads and
1041 * processes.
1042 */
1043 static void
1044 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
1045 {
1046 struct proc *p;
1047
1048 p = td->td_proc;
1049 kp->ki_tdaddr = td;
1050 PROC_LOCK_ASSERT(p, MA_OWNED);
1051
1052 if (preferthread)
1053 PROC_STATLOCK(p);
1054 thread_lock(td);
1055 if (td->td_wmesg != NULL)
1056 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
1057 else
1058 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
1059 if (strlcpy(kp->ki_tdname, td->td_name, sizeof(kp->ki_tdname)) >=
1060 sizeof(kp->ki_tdname)) {
1061 strlcpy(kp->ki_moretdname,
1062 td->td_name + sizeof(kp->ki_tdname) - 1,
1063 sizeof(kp->ki_moretdname));
1064 } else {
1065 bzero(kp->ki_moretdname, sizeof(kp->ki_moretdname));
1066 }
1067 if (TD_ON_LOCK(td)) {
1068 kp->ki_kiflag |= KI_LOCKBLOCK;
1069 strlcpy(kp->ki_lockname, td->td_lockname,
1070 sizeof(kp->ki_lockname));
1071 } else {
1072 kp->ki_kiflag &= ~KI_LOCKBLOCK;
1073 bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
1074 }
1075
1076 if (p->p_state == PRS_NORMAL) { /* approximate. */
1077 if (TD_ON_RUNQ(td) ||
1078 TD_CAN_RUN(td) ||
1079 TD_IS_RUNNING(td)) {
1080 kp->ki_stat = SRUN;
1081 } else if (P_SHOULDSTOP(p)) {
1082 kp->ki_stat = SSTOP;
1083 } else if (TD_IS_SLEEPING(td)) {
1084 kp->ki_stat = SSLEEP;
1085 } else if (TD_ON_LOCK(td)) {
1086 kp->ki_stat = SLOCK;
1087 } else {
1088 kp->ki_stat = SWAIT;
1089 }
1090 } else if (p->p_state == PRS_ZOMBIE) {
1091 kp->ki_stat = SZOMB;
1092 } else {
1093 kp->ki_stat = SIDL;
1094 }
1095
1096 /* Things in the thread */
1097 kp->ki_wchan = td->td_wchan;
1098 kp->ki_pri.pri_level = td->td_priority;
1099 kp->ki_pri.pri_native = td->td_base_pri;
1100
1101 /*
1102 * Note: legacy fields; clamp at the old NOCPU value and/or
1103 * the maximum u_char CPU value.
1104 */
1105 if (td->td_lastcpu == NOCPU)
1106 kp->ki_lastcpu_old = NOCPU_OLD;
1107 else if (td->td_lastcpu > MAXCPU_OLD)
1108 kp->ki_lastcpu_old = MAXCPU_OLD;
1109 else
1110 kp->ki_lastcpu_old = td->td_lastcpu;
1111
1112 if (td->td_oncpu == NOCPU)
1113 kp->ki_oncpu_old = NOCPU_OLD;
1114 else if (td->td_oncpu > MAXCPU_OLD)
1115 kp->ki_oncpu_old = MAXCPU_OLD;
1116 else
1117 kp->ki_oncpu_old = td->td_oncpu;
1118
1119 kp->ki_lastcpu = td->td_lastcpu;
1120 kp->ki_oncpu = td->td_oncpu;
1121 kp->ki_tdflags = td->td_flags;
1122 kp->ki_tid = td->td_tid;
1123 kp->ki_numthreads = p->p_numthreads;
1124 kp->ki_pcb = td->td_pcb;
1125 kp->ki_kstack = (void *)td->td_kstack;
1126 kp->ki_slptime = (ticks - td->td_slptick) / hz;
1127 kp->ki_pri.pri_class = td->td_pri_class;
1128 kp->ki_pri.pri_user = td->td_user_pri;
1129
1130 if (preferthread) {
1131 rufetchtd(td, &kp->ki_rusage);
1132 kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime);
1133 kp->ki_pctcpu = sched_pctcpu(td);
1134 kp->ki_estcpu = sched_estcpu(td);
1135 kp->ki_cow = td->td_cow;
1136 }
1137
1138 /* We can't get this anymore but ps etc never used it anyway. */
1139 kp->ki_rqindex = 0;
1140
1141 if (preferthread)
1142 kp->ki_siglist = td->td_siglist;
1143 kp->ki_sigmask = td->td_sigmask;
1144 thread_unlock(td);
1145 if (preferthread)
1146 PROC_STATUNLOCK(p);
1147 }
1148
1149 /*
1150 * Fill in a kinfo_proc structure for the specified process.
1151 * Must be called with the target process locked.
1152 */
1153 void
1154 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
1155 {
1156
1157 MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
1158
1159 fill_kinfo_proc_only(p, kp);
1160 fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
1161 fill_kinfo_aggregate(p, kp);
1162 }
1163
1164 struct pstats *
1165 pstats_alloc(void)
1166 {
1167
1168 return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
1169 }
1170
1171 /*
1172 * Copy parts of p_stats; zero the rest of p_stats (statistics).
1173 */
1174 void
1175 pstats_fork(struct pstats *src, struct pstats *dst)
1176 {
1177
1178 bzero(&dst->pstat_startzero,
1179 __rangeof(struct pstats, pstat_startzero, pstat_endzero));
1180 bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
1181 __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
1182 }
1183
1184 void
1185 pstats_free(struct pstats *ps)
1186 {
1187
1188 free(ps, M_SUBPROC);
1189 }
1190
1191 static struct proc *
1192 zpfind_locked(pid_t pid)
1193 {
1194 struct proc *p;
1195
1196 sx_assert(&allproc_lock, SX_LOCKED);
1197 LIST_FOREACH(p, &zombproc, p_list) {
1198 if (p->p_pid == pid) {
1199 PROC_LOCK(p);
1200 break;
1201 }
1202 }
1203 return (p);
1204 }
1205
1206 /*
1207 * Locate a zombie process by number
1208 */
1209 struct proc *
1210 zpfind(pid_t pid)
1211 {
1212 struct proc *p;
1213
1214 sx_slock(&allproc_lock);
1215 p = zpfind_locked(pid);
1216 sx_sunlock(&allproc_lock);
1217 return (p);
1218 }
1219
1220 #ifdef COMPAT_FREEBSD32
1221
1222 /*
1223 * This function is typically used to copy out the kernel address, so
1224 * it can be replaced by assignment of zero.
1225 */
1226 static inline uint32_t
1227 ptr32_trim(void *ptr)
1228 {
1229 uintptr_t uptr;
1230
1231 uptr = (uintptr_t)ptr;
1232 return ((uptr > UINT_MAX) ? 0 : uptr);
1233 }
1234
1235 #define PTRTRIM_CP(src,dst,fld) \
1236 do { (dst).fld = ptr32_trim((src).fld); } while (0)
1237
1238 static void
1239 freebsd32_kinfo_proc_out(const struct kinfo_proc *ki, struct kinfo_proc32 *ki32)
1240 {
1241 int i;
1242
1243 bzero(ki32, sizeof(struct kinfo_proc32));
1244 ki32->ki_structsize = sizeof(struct kinfo_proc32);
1245 CP(*ki, *ki32, ki_layout);
1246 PTRTRIM_CP(*ki, *ki32, ki_args);
1247 PTRTRIM_CP(*ki, *ki32, ki_paddr);
1248 PTRTRIM_CP(*ki, *ki32, ki_addr);
1249 PTRTRIM_CP(*ki, *ki32, ki_tracep);
1250 PTRTRIM_CP(*ki, *ki32, ki_textvp);
1251 PTRTRIM_CP(*ki, *ki32, ki_fd);
1252 PTRTRIM_CP(*ki, *ki32, ki_vmspace);
1253 PTRTRIM_CP(*ki, *ki32, ki_wchan);
1254 CP(*ki, *ki32, ki_pid);
1255 CP(*ki, *ki32, ki_ppid);
1256 CP(*ki, *ki32, ki_pgid);
1257 CP(*ki, *ki32, ki_tpgid);
1258 CP(*ki, *ki32, ki_sid);
1259 CP(*ki, *ki32, ki_tsid);
1260 CP(*ki, *ki32, ki_jobc);
1261 CP(*ki, *ki32, ki_tdev);
1262 CP(*ki, *ki32, ki_siglist);
1263 CP(*ki, *ki32, ki_sigmask);
1264 CP(*ki, *ki32, ki_sigignore);
1265 CP(*ki, *ki32, ki_sigcatch);
1266 CP(*ki, *ki32, ki_uid);
1267 CP(*ki, *ki32, ki_ruid);
1268 CP(*ki, *ki32, ki_svuid);
1269 CP(*ki, *ki32, ki_rgid);
1270 CP(*ki, *ki32, ki_svgid);
1271 CP(*ki, *ki32, ki_ngroups);
1272 for (i = 0; i < KI_NGROUPS; i++)
1273 CP(*ki, *ki32, ki_groups[i]);
1274 CP(*ki, *ki32, ki_size);
1275 CP(*ki, *ki32, ki_rssize);
1276 CP(*ki, *ki32, ki_swrss);
1277 CP(*ki, *ki32, ki_tsize);
1278 CP(*ki, *ki32, ki_dsize);
1279 CP(*ki, *ki32, ki_ssize);
1280 CP(*ki, *ki32, ki_xstat);
1281 CP(*ki, *ki32, ki_acflag);
1282 CP(*ki, *ki32, ki_pctcpu);
1283 CP(*ki, *ki32, ki_estcpu);
1284 CP(*ki, *ki32, ki_slptime);
1285 CP(*ki, *ki32, ki_swtime);
1286 CP(*ki, *ki32, ki_cow);
1287 CP(*ki, *ki32, ki_runtime);
1288 TV_CP(*ki, *ki32, ki_start);
1289 TV_CP(*ki, *ki32, ki_childtime);
1290 CP(*ki, *ki32, ki_flag);
1291 CP(*ki, *ki32, ki_kiflag);
1292 CP(*ki, *ki32, ki_traceflag);
1293 CP(*ki, *ki32, ki_stat);
1294 CP(*ki, *ki32, ki_nice);
1295 CP(*ki, *ki32, ki_lock);
1296 CP(*ki, *ki32, ki_rqindex);
1297 CP(*ki, *ki32, ki_oncpu);
1298 CP(*ki, *ki32, ki_lastcpu);
1299
1300 /* XXX TODO: wrap cpu value as appropriate */
1301 CP(*ki, *ki32, ki_oncpu_old);
1302 CP(*ki, *ki32, ki_lastcpu_old);
1303
1304 bcopy(ki->ki_tdname, ki32->ki_tdname, TDNAMLEN + 1);
1305 bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1);
1306 bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1);
1307 bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1);
1308 bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1);
1309 bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1);
1310 bcopy(ki->ki_loginclass, ki32->ki_loginclass, LOGINCLASSLEN + 1);
1311 bcopy(ki->ki_moretdname, ki32->ki_moretdname, MAXCOMLEN - TDNAMLEN + 1);
1312 CP(*ki, *ki32, ki_tracer);
1313 CP(*ki, *ki32, ki_flag2);
1314 CP(*ki, *ki32, ki_fibnum);
1315 CP(*ki, *ki32, ki_cr_flags);
1316 CP(*ki, *ki32, ki_jid);
1317 CP(*ki, *ki32, ki_numthreads);
1318 CP(*ki, *ki32, ki_tid);
1319 CP(*ki, *ki32, ki_pri);
1320 freebsd32_rusage_out(&ki->ki_rusage, &ki32->ki_rusage);
1321 freebsd32_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch);
1322 PTRTRIM_CP(*ki, *ki32, ki_pcb);
1323 PTRTRIM_CP(*ki, *ki32, ki_kstack);
1324 PTRTRIM_CP(*ki, *ki32, ki_udata);
1325 PTRTRIM_CP(*ki, *ki32, ki_tdaddr);
1326 CP(*ki, *ki32, ki_sflag);
1327 CP(*ki, *ki32, ki_tdflags);
1328 }
1329 #endif
1330
1331 int
1332 kern_proc_out(struct proc *p, struct sbuf *sb, int flags)
1333 {
1334 struct thread *td;
1335 struct kinfo_proc ki;
1336 #ifdef COMPAT_FREEBSD32
1337 struct kinfo_proc32 ki32;
1338 #endif
1339 int error;
1340
1341 PROC_LOCK_ASSERT(p, MA_OWNED);
1342 MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
1343
1344 error = 0;
1345 fill_kinfo_proc(p, &ki);
1346 if ((flags & KERN_PROC_NOTHREADS) != 0) {
1347 #ifdef COMPAT_FREEBSD32
1348 if ((flags & KERN_PROC_MASK32) != 0) {
1349 freebsd32_kinfo_proc_out(&ki, &ki32);
1350 if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0)
1351 error = ENOMEM;
1352 } else
1353 #endif
1354 if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0)
1355 error = ENOMEM;
1356 } else {
1357 FOREACH_THREAD_IN_PROC(p, td) {
1358 fill_kinfo_thread(td, &ki, 1);
1359 #ifdef COMPAT_FREEBSD32
1360 if ((flags & KERN_PROC_MASK32) != 0) {
1361 freebsd32_kinfo_proc_out(&ki, &ki32);
1362 if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0)
1363 error = ENOMEM;
1364 } else
1365 #endif
1366 if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0)
1367 error = ENOMEM;
1368 if (error != 0)
1369 break;
1370 }
1371 }
1372 PROC_UNLOCK(p);
1373 return (error);
1374 }
1375
1376 static int
1377 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags,
1378 int doingzomb)
1379 {
1380 struct sbuf sb;
1381 struct kinfo_proc ki;
1382 struct proc *np;
1383 int error, error2;
1384 pid_t pid;
1385
1386 pid = p->p_pid;
1387 sbuf_new_for_sysctl(&sb, (char *)&ki, sizeof(ki), req);
1388 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1389 error = kern_proc_out(p, &sb, flags);
1390 error2 = sbuf_finish(&sb);
1391 sbuf_delete(&sb);
1392 if (error != 0)
1393 return (error);
1394 else if (error2 != 0)
1395 return (error2);
1396 if (doingzomb)
1397 np = zpfind(pid);
1398 else {
1399 if (pid == 0)
1400 return (0);
1401 np = pfind(pid);
1402 }
1403 if (np == NULL)
1404 return (ESRCH);
1405 if (np != p) {
1406 PROC_UNLOCK(np);
1407 return (ESRCH);
1408 }
1409 PROC_UNLOCK(np);
1410 return (0);
1411 }
1412
1413 static int
1414 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1415 {
1416 int *name = (int *)arg1;
1417 u_int namelen = arg2;
1418 struct proc *p;
1419 int flags, doingzomb, oid_number;
1420 int error = 0;
1421
1422 oid_number = oidp->oid_number;
1423 if (oid_number != KERN_PROC_ALL &&
1424 (oid_number & KERN_PROC_INC_THREAD) == 0)
1425 flags = KERN_PROC_NOTHREADS;
1426 else {
1427 flags = 0;
1428 oid_number &= ~KERN_PROC_INC_THREAD;
1429 }
1430 #ifdef COMPAT_FREEBSD32
1431 if (req->flags & SCTL_MASK32)
1432 flags |= KERN_PROC_MASK32;
1433 #endif
1434 if (oid_number == KERN_PROC_PID) {
1435 if (namelen != 1)
1436 return (EINVAL);
1437 error = sysctl_wire_old_buffer(req, 0);
1438 if (error)
1439 return (error);
1440 sx_slock(&proctree_lock);
1441 error = pget((pid_t)name[0], PGET_CANSEE, &p);
1442 if (error == 0)
1443 error = sysctl_out_proc(p, req, flags, 0);
1444 sx_sunlock(&proctree_lock);
1445 return (error);
1446 }
1447
1448 switch (oid_number) {
1449 case KERN_PROC_ALL:
1450 if (namelen != 0)
1451 return (EINVAL);
1452 break;
1453 case KERN_PROC_PROC:
1454 if (namelen != 0 && namelen != 1)
1455 return (EINVAL);
1456 break;
1457 default:
1458 if (namelen != 1)
1459 return (EINVAL);
1460 break;
1461 }
1462
1463 if (!req->oldptr) {
1464 /* overestimate by 5 procs */
1465 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1466 if (error)
1467 return (error);
1468 }
1469 error = sysctl_wire_old_buffer(req, 0);
1470 if (error != 0)
1471 return (error);
1472 sx_slock(&proctree_lock);
1473 sx_slock(&allproc_lock);
1474 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
1475 if (!doingzomb)
1476 p = LIST_FIRST(&allproc);
1477 else
1478 p = LIST_FIRST(&zombproc);
1479 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
1480 /*
1481 * Skip embryonic processes.
1482 */
1483 PROC_LOCK(p);
1484 if (p->p_state == PRS_NEW) {
1485 PROC_UNLOCK(p);
1486 continue;
1487 }
1488 KASSERT(p->p_ucred != NULL,
1489 ("process credential is NULL for non-NEW proc"));
1490 /*
1491 * Show a user only appropriate processes.
1492 */
1493 if (p_cansee(curthread, p)) {
1494 PROC_UNLOCK(p);
1495 continue;
1496 }
1497 /*
1498 * TODO - make more efficient (see notes below).
1499 * do by session.
1500 */
1501 switch (oid_number) {
1502
1503 case KERN_PROC_GID:
1504 if (p->p_ucred->cr_gid != (gid_t)name[0]) {
1505 PROC_UNLOCK(p);
1506 continue;
1507 }
1508 break;
1509
1510 case KERN_PROC_PGRP:
1511 /* could do this by traversing pgrp */
1512 if (p->p_pgrp == NULL ||
1513 p->p_pgrp->pg_id != (pid_t)name[0]) {
1514 PROC_UNLOCK(p);
1515 continue;
1516 }
1517 break;
1518
1519 case KERN_PROC_RGID:
1520 if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
1521 PROC_UNLOCK(p);
1522 continue;
1523 }
1524 break;
1525
1526 case KERN_PROC_SESSION:
1527 if (p->p_session == NULL ||
1528 p->p_session->s_sid != (pid_t)name[0]) {
1529 PROC_UNLOCK(p);
1530 continue;
1531 }
1532 break;
1533
1534 case KERN_PROC_TTY:
1535 if ((p->p_flag & P_CONTROLT) == 0 ||
1536 p->p_session == NULL) {
1537 PROC_UNLOCK(p);
1538 continue;
1539 }
1540 /* XXX proctree_lock */
1541 SESS_LOCK(p->p_session);
1542 if (p->p_session->s_ttyp == NULL ||
1543 tty_udev(p->p_session->s_ttyp) !=
1544 (dev_t)name[0]) {
1545 SESS_UNLOCK(p->p_session);
1546 PROC_UNLOCK(p);
1547 continue;
1548 }
1549 SESS_UNLOCK(p->p_session);
1550 break;
1551
1552 case KERN_PROC_UID:
1553 if (p->p_ucred->cr_uid != (uid_t)name[0]) {
1554 PROC_UNLOCK(p);
1555 continue;
1556 }
1557 break;
1558
1559 case KERN_PROC_RUID:
1560 if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
1561 PROC_UNLOCK(p);
1562 continue;
1563 }
1564 break;
1565
1566 case KERN_PROC_PROC:
1567 break;
1568
1569 default:
1570 break;
1571
1572 }
1573
1574 error = sysctl_out_proc(p, req, flags, doingzomb);
1575 if (error) {
1576 sx_sunlock(&allproc_lock);
1577 sx_sunlock(&proctree_lock);
1578 return (error);
1579 }
1580 }
1581 }
1582 sx_sunlock(&allproc_lock);
1583 sx_sunlock(&proctree_lock);
1584 return (0);
1585 }
1586
1587 struct pargs *
1588 pargs_alloc(int len)
1589 {
1590 struct pargs *pa;
1591
1592 pa = malloc(sizeof(struct pargs) + len, M_PARGS,
1593 M_WAITOK);
1594 refcount_init(&pa->ar_ref, 1);
1595 pa->ar_length = len;
1596 return (pa);
1597 }
1598
1599 static void
1600 pargs_free(struct pargs *pa)
1601 {
1602
1603 free(pa, M_PARGS);
1604 }
1605
1606 void
1607 pargs_hold(struct pargs *pa)
1608 {
1609
1610 if (pa == NULL)
1611 return;
1612 refcount_acquire(&pa->ar_ref);
1613 }
1614
1615 void
1616 pargs_drop(struct pargs *pa)
1617 {
1618
1619 if (pa == NULL)
1620 return;
1621 if (refcount_release(&pa->ar_ref))
1622 pargs_free(pa);
1623 }
1624
1625 static int
1626 proc_read_string(struct thread *td, struct proc *p, const char *sptr, char *buf,
1627 size_t len)
1628 {
1629 ssize_t n;
1630
1631 /*
1632 * This may return a short read if the string is shorter than the chunk
1633 * and is aligned at the end of the page, and the following page is not
1634 * mapped.
1635 */
1636 n = proc_readmem(td, p, (vm_offset_t)sptr, buf, len);
1637 if (n <= 0)
1638 return (ENOMEM);
1639 return (0);
1640 }
1641
1642 #define PROC_AUXV_MAX 256 /* Safety limit on auxv size. */
1643
1644 enum proc_vector_type {
1645 PROC_ARG,
1646 PROC_ENV,
1647 PROC_AUX,
1648 };
1649
1650 #ifdef COMPAT_FREEBSD32
1651 static int
1652 get_proc_vector32(struct thread *td, struct proc *p, char ***proc_vectorp,
1653 size_t *vsizep, enum proc_vector_type type)
1654 {
1655 struct freebsd32_ps_strings pss;
1656 Elf32_Auxinfo aux;
1657 vm_offset_t vptr, ptr;
1658 uint32_t *proc_vector32;
1659 char **proc_vector;
1660 size_t vsize, size;
1661 int i, error;
1662
1663 error = 0;
1664 if (proc_readmem(td, p, (vm_offset_t)p->p_sysent->sv_psstrings, &pss,
1665 sizeof(pss)) != sizeof(pss))
1666 return (ENOMEM);
1667 switch (type) {
1668 case PROC_ARG:
1669 vptr = (vm_offset_t)PTRIN(pss.ps_argvstr);
1670 vsize = pss.ps_nargvstr;
1671 if (vsize > ARG_MAX)
1672 return (ENOEXEC);
1673 size = vsize * sizeof(int32_t);
1674 break;
1675 case PROC_ENV:
1676 vptr = (vm_offset_t)PTRIN(pss.ps_envstr);
1677 vsize = pss.ps_nenvstr;
1678 if (vsize > ARG_MAX)
1679 return (ENOEXEC);
1680 size = vsize * sizeof(int32_t);
1681 break;
1682 case PROC_AUX:
1683 vptr = (vm_offset_t)PTRIN(pss.ps_envstr) +
1684 (pss.ps_nenvstr + 1) * sizeof(int32_t);
1685 if (vptr % 4 != 0)
1686 return (ENOEXEC);
1687 for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) {
1688 if (proc_readmem(td, p, ptr, &aux, sizeof(aux)) !=
1689 sizeof(aux))
1690 return (ENOMEM);
1691 if (aux.a_type == AT_NULL)
1692 break;
1693 ptr += sizeof(aux);
1694 }
1695 if (aux.a_type != AT_NULL)
1696 return (ENOEXEC);
1697 vsize = i + 1;
1698 size = vsize * sizeof(aux);
1699 break;
1700 default:
1701 KASSERT(0, ("Wrong proc vector type: %d", type));
1702 return (EINVAL);
1703 }
1704 proc_vector32 = malloc(size, M_TEMP, M_WAITOK);
1705 if (proc_readmem(td, p, vptr, proc_vector32, size) != size) {
1706 error = ENOMEM;
1707 goto done;
1708 }
1709 if (type == PROC_AUX) {
1710 *proc_vectorp = (char **)proc_vector32;
1711 *vsizep = vsize;
1712 return (0);
1713 }
1714 proc_vector = malloc(vsize * sizeof(char *), M_TEMP, M_WAITOK);
1715 for (i = 0; i < (int)vsize; i++)
1716 proc_vector[i] = PTRIN(proc_vector32[i]);
1717 *proc_vectorp = proc_vector;
1718 *vsizep = vsize;
1719 done:
1720 free(proc_vector32, M_TEMP);
1721 return (error);
1722 }
1723 #endif
1724
1725 static int
1726 get_proc_vector(struct thread *td, struct proc *p, char ***proc_vectorp,
1727 size_t *vsizep, enum proc_vector_type type)
1728 {
1729 struct ps_strings pss;
1730 Elf_Auxinfo aux;
1731 vm_offset_t vptr, ptr;
1732 char **proc_vector;
1733 size_t vsize, size;
1734 int i;
1735
1736 #ifdef COMPAT_FREEBSD32
1737 if (SV_PROC_FLAG(p, SV_ILP32) != 0)
1738 return (get_proc_vector32(td, p, proc_vectorp, vsizep, type));
1739 #endif
1740 if (proc_readmem(td, p, (vm_offset_t)p->p_sysent->sv_psstrings, &pss,
1741 sizeof(pss)) != sizeof(pss))
1742 return (ENOMEM);
1743 switch (type) {
1744 case PROC_ARG:
1745 vptr = (vm_offset_t)pss.ps_argvstr;
1746 vsize = pss.ps_nargvstr;
1747 if (vsize > ARG_MAX)
1748 return (ENOEXEC);
1749 size = vsize * sizeof(char *);
1750 break;
1751 case PROC_ENV:
1752 vptr = (vm_offset_t)pss.ps_envstr;
1753 vsize = pss.ps_nenvstr;
1754 if (vsize > ARG_MAX)
1755 return (ENOEXEC);
1756 size = vsize * sizeof(char *);
1757 break;
1758 case PROC_AUX:
1759 /*
1760 * The aux array is just above env array on the stack. Check
1761 * that the address is naturally aligned.
1762 */
1763 vptr = (vm_offset_t)pss.ps_envstr + (pss.ps_nenvstr + 1)
1764 * sizeof(char *);
1765 #if __ELF_WORD_SIZE == 64
1766 if (vptr % sizeof(uint64_t) != 0)
1767 #else
1768 if (vptr % sizeof(uint32_t) != 0)
1769 #endif
1770 return (ENOEXEC);
1771 /*
1772 * We count the array size reading the aux vectors from the
1773 * stack until AT_NULL vector is returned. So (to keep the code
1774 * simple) we read the process stack twice: the first time here
1775 * to find the size and the second time when copying the vectors
1776 * to the allocated proc_vector.
1777 */
1778 for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) {
1779 if (proc_readmem(td, p, ptr, &aux, sizeof(aux)) !=
1780 sizeof(aux))
1781 return (ENOMEM);
1782 if (aux.a_type == AT_NULL)
1783 break;
1784 ptr += sizeof(aux);
1785 }
1786 /*
1787 * If the PROC_AUXV_MAX entries are iterated over, and we have
1788 * not reached AT_NULL, it is most likely we are reading wrong
1789 * data: either the process doesn't have auxv array or data has
1790 * been modified. Return the error in this case.
1791 */
1792 if (aux.a_type != AT_NULL)
1793 return (ENOEXEC);
1794 vsize = i + 1;
1795 size = vsize * sizeof(aux);
1796 break;
1797 default:
1798 KASSERT(0, ("Wrong proc vector type: %d", type));
1799 return (EINVAL); /* In case we are built without INVARIANTS. */
1800 }
1801 proc_vector = malloc(size, M_TEMP, M_WAITOK);
1802 if (proc_readmem(td, p, vptr, proc_vector, size) != size) {
1803 free(proc_vector, M_TEMP);
1804 return (ENOMEM);
1805 }
1806 *proc_vectorp = proc_vector;
1807 *vsizep = vsize;
1808
1809 return (0);
1810 }
1811
1812 #define GET_PS_STRINGS_CHUNK_SZ 256 /* Chunk size (bytes) for ps_strings operations. */
1813
1814 static int
1815 get_ps_strings(struct thread *td, struct proc *p, struct sbuf *sb,
1816 enum proc_vector_type type)
1817 {
1818 size_t done, len, nchr, vsize;
1819 int error, i;
1820 char **proc_vector, *sptr;
1821 char pss_string[GET_PS_STRINGS_CHUNK_SZ];
1822
1823 PROC_ASSERT_HELD(p);
1824
1825 /*
1826 * We are not going to read more than 2 * (PATH_MAX + ARG_MAX) bytes.
1827 */
1828 nchr = 2 * (PATH_MAX + ARG_MAX);
1829
1830 error = get_proc_vector(td, p, &proc_vector, &vsize, type);
1831 if (error != 0)
1832 return (error);
1833 for (done = 0, i = 0; i < (int)vsize && done < nchr; i++) {
1834 /*
1835 * The program may have scribbled into its argv array, e.g. to
1836 * remove some arguments. If that has happened, break out
1837 * before trying to read from NULL.
1838 */
1839 if (proc_vector[i] == NULL)
1840 break;
1841 for (sptr = proc_vector[i]; ; sptr += GET_PS_STRINGS_CHUNK_SZ) {
1842 error = proc_read_string(td, p, sptr, pss_string,
1843 sizeof(pss_string));
1844 if (error != 0)
1845 goto done;
1846 len = strnlen(pss_string, GET_PS_STRINGS_CHUNK_SZ);
1847 if (done + len >= nchr)
1848 len = nchr - done - 1;
1849 sbuf_bcat(sb, pss_string, len);
1850 if (len != GET_PS_STRINGS_CHUNK_SZ)
1851 break;
1852 done += GET_PS_STRINGS_CHUNK_SZ;
1853 }
1854 sbuf_bcat(sb, "", 1);
1855 done += len + 1;
1856 }
1857 done:
1858 free(proc_vector, M_TEMP);
1859 return (error);
1860 }
1861
1862 int
1863 proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb)
1864 {
1865
1866 return (get_ps_strings(curthread, p, sb, PROC_ARG));
1867 }
1868
1869 int
1870 proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb)
1871 {
1872
1873 return (get_ps_strings(curthread, p, sb, PROC_ENV));
1874 }
1875
1876 int
1877 proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb)
1878 {
1879 size_t vsize, size;
1880 char **auxv;
1881 int error;
1882
1883 error = get_proc_vector(td, p, &auxv, &vsize, PROC_AUX);
1884 if (error == 0) {
1885 #ifdef COMPAT_FREEBSD32
1886 if (SV_PROC_FLAG(p, SV_ILP32) != 0)
1887 size = vsize * sizeof(Elf32_Auxinfo);
1888 else
1889 #endif
1890 size = vsize * sizeof(Elf_Auxinfo);
1891 if (sbuf_bcat(sb, auxv, size) != 0)
1892 error = ENOMEM;
1893 free(auxv, M_TEMP);
1894 }
1895 return (error);
1896 }
1897
1898 /*
1899 * This sysctl allows a process to retrieve the argument list or process
1900 * title for another process without groping around in the address space
1901 * of the other process. It also allow a process to set its own "process
1902 * title to a string of its own choice.
1903 */
1904 static int
1905 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1906 {
1907 int *name = (int *)arg1;
1908 u_int namelen = arg2;
1909 struct pargs *newpa, *pa;
1910 struct proc *p;
1911 struct sbuf sb;
1912 int flags, error = 0, error2;
1913
1914 if (namelen != 1)
1915 return (EINVAL);
1916
1917 flags = PGET_CANSEE;
1918 if (req->newptr != NULL)
1919 flags |= PGET_ISCURRENT;
1920 error = pget((pid_t)name[0], flags, &p);
1921 if (error)
1922 return (error);
1923
1924 pa = p->p_args;
1925 if (pa != NULL) {
1926 pargs_hold(pa);
1927 PROC_UNLOCK(p);
1928 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1929 pargs_drop(pa);
1930 } else if ((p->p_flag & (P_WEXIT | P_SYSTEM)) == 0) {
1931 _PHOLD(p);
1932 PROC_UNLOCK(p);
1933 sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req);
1934 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1935 error = proc_getargv(curthread, p, &sb);
1936 error2 = sbuf_finish(&sb);
1937 PRELE(p);
1938 sbuf_delete(&sb);
1939 if (error == 0 && error2 != 0)
1940 error = error2;
1941 } else {
1942 PROC_UNLOCK(p);
1943 }
1944 if (error != 0 || req->newptr == NULL)
1945 return (error);
1946
1947 if (req->newlen > ps_arg_cache_limit - sizeof(struct pargs))
1948 return (ENOMEM);
1949 newpa = pargs_alloc(req->newlen);
1950 error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
1951 if (error != 0) {
1952 pargs_free(newpa);
1953 return (error);
1954 }
1955 PROC_LOCK(p);
1956 pa = p->p_args;
1957 p->p_args = newpa;
1958 PROC_UNLOCK(p);
1959 pargs_drop(pa);
1960 return (0);
1961 }
1962
1963 /*
1964 * This sysctl allows a process to retrieve environment of another process.
1965 */
1966 static int
1967 sysctl_kern_proc_env(SYSCTL_HANDLER_ARGS)
1968 {
1969 int *name = (int *)arg1;
1970 u_int namelen = arg2;
1971 struct proc *p;
1972 struct sbuf sb;
1973 int error, error2;
1974
1975 if (namelen != 1)
1976 return (EINVAL);
1977
1978 error = pget((pid_t)name[0], PGET_WANTREAD, &p);
1979 if (error != 0)
1980 return (error);
1981 if ((p->p_flag & P_SYSTEM) != 0) {
1982 PRELE(p);
1983 return (0);
1984 }
1985
1986 sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req);
1987 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1988 error = proc_getenvv(curthread, p, &sb);
1989 error2 = sbuf_finish(&sb);
1990 PRELE(p);
1991 sbuf_delete(&sb);
1992 return (error != 0 ? error : error2);
1993 }
1994
1995 /*
1996 * This sysctl allows a process to retrieve ELF auxiliary vector of
1997 * another process.
1998 */
1999 static int
2000 sysctl_kern_proc_auxv(SYSCTL_HANDLER_ARGS)
2001 {
2002 int *name = (int *)arg1;
2003 u_int namelen = arg2;
2004 struct proc *p;
2005 struct sbuf sb;
2006 int error, error2;
2007
2008 if (namelen != 1)
2009 return (EINVAL);
2010
2011 error = pget((pid_t)name[0], PGET_WANTREAD, &p);
2012 if (error != 0)
2013 return (error);
2014 if ((p->p_flag & P_SYSTEM) != 0) {
2015 PRELE(p);
2016 return (0);
2017 }
2018 sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req);
2019 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2020 error = proc_getauxv(curthread, p, &sb);
2021 error2 = sbuf_finish(&sb);
2022 PRELE(p);
2023 sbuf_delete(&sb);
2024 return (error != 0 ? error : error2);
2025 }
2026
2027 /*
2028 * This sysctl allows a process to retrieve the path of the executable for
2029 * itself or another process.
2030 */
2031 static int
2032 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
2033 {
2034 pid_t *pidp = (pid_t *)arg1;
2035 unsigned int arglen = arg2;
2036 struct proc *p;
2037 struct vnode *vp;
2038 char *retbuf, *freebuf;
2039 int error;
2040
2041 if (arglen != 1)
2042 return (EINVAL);
2043 if (*pidp == -1) { /* -1 means this process */
2044 p = req->td->td_proc;
2045 } else {
2046 error = pget(*pidp, PGET_CANSEE, &p);
2047 if (error != 0)
2048 return (error);
2049 }
2050
2051 vp = p->p_textvp;
2052 if (vp == NULL) {
2053 if (*pidp != -1)
2054 PROC_UNLOCK(p);
2055 return (0);
2056 }
2057 vref(vp);
2058 if (*pidp != -1)
2059 PROC_UNLOCK(p);
2060 error = vn_fullpath(req->td, vp, &retbuf, &freebuf);
2061 vrele(vp);
2062 if (error)
2063 return (error);
2064 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
2065 free(freebuf, M_TEMP);
2066 return (error);
2067 }
2068
2069 static int
2070 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
2071 {
2072 struct proc *p;
2073 char *sv_name;
2074 int *name;
2075 int namelen;
2076 int error;
2077
2078 namelen = arg2;
2079 if (namelen != 1)
2080 return (EINVAL);
2081
2082 name = (int *)arg1;
2083 error = pget((pid_t)name[0], PGET_CANSEE, &p);
2084 if (error != 0)
2085 return (error);
2086 sv_name = p->p_sysent->sv_name;
2087 PROC_UNLOCK(p);
2088 return (sysctl_handle_string(oidp, sv_name, 0, req));
2089 }
2090
2091 #ifdef KINFO_OVMENTRY_SIZE
2092 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE);
2093 #endif
2094
2095 #ifdef COMPAT_FREEBSD7
2096 static int
2097 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
2098 {
2099 vm_map_entry_t entry, tmp_entry;
2100 unsigned int last_timestamp;
2101 char *fullpath, *freepath;
2102 struct kinfo_ovmentry *kve;
2103 struct vattr va;
2104 struct ucred *cred;
2105 int error, *name;
2106 struct vnode *vp;
2107 struct proc *p;
2108 vm_map_t map;
2109 struct vmspace *vm;
2110
2111 name = (int *)arg1;
2112 error = pget((pid_t)name[0], PGET_WANTREAD, &p);
2113 if (error != 0)
2114 return (error);
2115 vm = vmspace_acquire_ref(p);
2116 if (vm == NULL) {
2117 PRELE(p);
2118 return (ESRCH);
2119 }
2120 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
2121
2122 map = &vm->vm_map;
2123 vm_map_lock_read(map);
2124 for (entry = map->header.next; entry != &map->header;
2125 entry = entry->next) {
2126 vm_object_t obj, tobj, lobj;
2127 vm_offset_t addr;
2128
2129 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2130 continue;
2131
2132 bzero(kve, sizeof(*kve));
2133 kve->kve_structsize = sizeof(*kve);
2134
2135 kve->kve_private_resident = 0;
2136 obj = entry->object.vm_object;
2137 if (obj != NULL) {
2138 VM_OBJECT_RLOCK(obj);
2139 if (obj->shadow_count == 1)
2140 kve->kve_private_resident =
2141 obj->resident_page_count;
2142 }
2143 kve->kve_resident = 0;
2144 addr = entry->start;
2145 while (addr < entry->end) {
2146 if (pmap_extract(map->pmap, addr))
2147 kve->kve_resident++;
2148 addr += PAGE_SIZE;
2149 }
2150
2151 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
2152 if (tobj != obj) {
2153 VM_OBJECT_RLOCK(tobj);
2154 kve->kve_offset += tobj->backing_object_offset;
2155 }
2156 if (lobj != obj)
2157 VM_OBJECT_RUNLOCK(lobj);
2158 lobj = tobj;
2159 }
2160
2161 kve->kve_start = (void*)entry->start;
2162 kve->kve_end = (void*)entry->end;
2163 kve->kve_offset += (off_t)entry->offset;
2164
2165 if (entry->protection & VM_PROT_READ)
2166 kve->kve_protection |= KVME_PROT_READ;
2167 if (entry->protection & VM_PROT_WRITE)
2168 kve->kve_protection |= KVME_PROT_WRITE;
2169 if (entry->protection & VM_PROT_EXECUTE)
2170 kve->kve_protection |= KVME_PROT_EXEC;
2171
2172 if (entry->eflags & MAP_ENTRY_COW)
2173 kve->kve_flags |= KVME_FLAG_COW;
2174 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
2175 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
2176 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
2177 kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
2178
2179 last_timestamp = map->timestamp;
2180 vm_map_unlock_read(map);
2181
2182 kve->kve_fileid = 0;
2183 kve->kve_fsid = 0;
2184 freepath = NULL;
2185 fullpath = "";
2186 if (lobj) {
2187 vp = NULL;
2188 switch (lobj->type) {
2189 case OBJT_DEFAULT:
2190 kve->kve_type = KVME_TYPE_DEFAULT;
2191 break;
2192 case OBJT_VNODE:
2193 kve->kve_type = KVME_TYPE_VNODE;
2194 vp = lobj->handle;
2195 vref(vp);
2196 break;
2197 case OBJT_SWAP:
2198 if ((lobj->flags & OBJ_TMPFS_NODE) != 0) {
2199 kve->kve_type = KVME_TYPE_VNODE;
2200 if ((lobj->flags & OBJ_TMPFS) != 0) {
2201 vp = lobj->un_pager.swp.swp_tmpfs;
2202 vref(vp);
2203 }
2204 } else {
2205 kve->kve_type = KVME_TYPE_SWAP;
2206 }
2207 break;
2208 case OBJT_DEVICE:
2209 kve->kve_type = KVME_TYPE_DEVICE;
2210 break;
2211 case OBJT_PHYS:
2212 kve->kve_type = KVME_TYPE_PHYS;
2213 break;
2214 case OBJT_DEAD:
2215 kve->kve_type = KVME_TYPE_DEAD;
2216 break;
2217 case OBJT_SG:
2218 kve->kve_type = KVME_TYPE_SG;
2219 break;
2220 default:
2221 kve->kve_type = KVME_TYPE_UNKNOWN;
2222 break;
2223 }
2224 if (lobj != obj)
2225 VM_OBJECT_RUNLOCK(lobj);
2226
2227 kve->kve_ref_count = obj->ref_count;
2228 kve->kve_shadow_count = obj->shadow_count;
2229 VM_OBJECT_RUNLOCK(obj);
2230 if (vp != NULL) {
2231 vn_fullpath(curthread, vp, &fullpath,
2232 &freepath);
2233 cred = curthread->td_ucred;
2234 vn_lock(vp, LK_SHARED | LK_RETRY);
2235 if (VOP_GETATTR(vp, &va, cred) == 0) {
2236 kve->kve_fileid = va.va_fileid;
2237 kve->kve_fsid = va.va_fsid;
2238 }
2239 vput(vp);
2240 }
2241 } else {
2242 kve->kve_type = KVME_TYPE_NONE;
2243 kve->kve_ref_count = 0;
2244 kve->kve_shadow_count = 0;
2245 }
2246
2247 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
2248 if (freepath != NULL)
2249 free(freepath, M_TEMP);
2250
2251 error = SYSCTL_OUT(req, kve, sizeof(*kve));
2252 vm_map_lock_read(map);
2253 if (error)
2254 break;
2255 if (last_timestamp != map->timestamp) {
2256 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
2257 entry = tmp_entry;
2258 }
2259 }
2260 vm_map_unlock_read(map);
2261 vmspace_free(vm);
2262 PRELE(p);
2263 free(kve, M_TEMP);
2264 return (error);
2265 }
2266 #endif /* COMPAT_FREEBSD7 */
2267
2268 #ifdef KINFO_VMENTRY_SIZE
2269 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2270 #endif
2271
2272 void
2273 kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
2274 int *resident_count, bool *super)
2275 {
2276 vm_object_t obj, tobj;
2277 vm_page_t m, m_adv;
2278 vm_offset_t addr;
2279 vm_paddr_t locked_pa;
2280 vm_pindex_t pi, pi_adv, pindex;
2281
2282 *super = false;
2283 *resident_count = 0;
2284 if (vmmap_skip_res_cnt)
2285 return;
2286
2287 locked_pa = 0;
2288 obj = entry->object.vm_object;
2289 addr = entry->start;
2290 m_adv = NULL;
2291 pi = OFF_TO_IDX(entry->offset);
2292 for (; addr < entry->end; addr += IDX_TO_OFF(pi_adv), pi += pi_adv) {
2293 if (m_adv != NULL) {
2294 m = m_adv;
2295 } else {
2296 pi_adv = atop(entry->end - addr);
2297 pindex = pi;
2298 for (tobj = obj;; tobj = tobj->backing_object) {
2299 m = vm_page_find_least(tobj, pindex);
2300 if (m != NULL) {
2301 if (m->pindex == pindex)
2302 break;
2303 if (pi_adv > m->pindex - pindex) {
2304 pi_adv = m->pindex - pindex;
2305 m_adv = m;
2306 }
2307 }
2308 if (tobj->backing_object == NULL)
2309 goto next;
2310 pindex += OFF_TO_IDX(tobj->
2311 backing_object_offset);
2312 }
2313 }
2314 m_adv = NULL;
2315 if (m->psind != 0 && addr + pagesizes[1] <= entry->end &&
2316 (addr & (pagesizes[1] - 1)) == 0 &&
2317 (pmap_mincore(map->pmap, addr, &locked_pa) &
2318 MINCORE_SUPER) != 0) {
2319 *super = true;
2320 pi_adv = atop(pagesizes[1]);
2321 } else {
2322 /*
2323 * We do not test the found page on validity.
2324 * Either the page is busy and being paged in,
2325 * or it was invalidated. The first case
2326 * should be counted as resident, the second
2327 * is not so clear; we do account both.
2328 */
2329 pi_adv = 1;
2330 }
2331 *resident_count += pi_adv;
2332 next:;
2333 }
2334 PA_UNLOCK_COND(locked_pa);
2335 }
2336
2337 /*
2338 * Must be called with the process locked and will return unlocked.
2339 */
2340 int
2341 kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags)
2342 {
2343 vm_map_entry_t entry, tmp_entry;
2344 struct vattr va;
2345 vm_map_t map;
2346 vm_object_t obj, tobj, lobj;
2347 char *fullpath, *freepath;
2348 struct kinfo_vmentry *kve;
2349 struct ucred *cred;
2350 struct vnode *vp;
2351 struct vmspace *vm;
2352 vm_offset_t addr;
2353 unsigned int last_timestamp;
2354 int error;
2355 bool super;
2356
2357 PROC_LOCK_ASSERT(p, MA_OWNED);
2358
2359 _PHOLD(p);
2360 PROC_UNLOCK(p);
2361 vm = vmspace_acquire_ref(p);
2362 if (vm == NULL) {
2363 PRELE(p);
2364 return (ESRCH);
2365 }
2366 kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK | M_ZERO);
2367
2368 error = 0;
2369 map = &vm->vm_map;
2370 vm_map_lock_read(map);
2371 for (entry = map->header.next; entry != &map->header;
2372 entry = entry->next) {
2373 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2374 continue;
2375
2376 addr = entry->end;
2377 bzero(kve, sizeof(*kve));
2378 obj = entry->object.vm_object;
2379 if (obj != NULL) {
2380 for (tobj = obj; tobj != NULL;
2381 tobj = tobj->backing_object) {
2382 VM_OBJECT_RLOCK(tobj);
2383 kve->kve_offset += tobj->backing_object_offset;
2384 lobj = tobj;
2385 }
2386 if (obj->backing_object == NULL)
2387 kve->kve_private_resident =
2388 obj->resident_page_count;
2389 kern_proc_vmmap_resident(map, entry,
2390 &kve->kve_resident, &super);
2391 if (super)
2392 kve->kve_flags |= KVME_FLAG_SUPER;
2393 for (tobj = obj; tobj != NULL;
2394 tobj = tobj->backing_object) {
2395 if (tobj != obj && tobj != lobj)
2396 VM_OBJECT_RUNLOCK(tobj);
2397 }
2398 } else {
2399 lobj = NULL;
2400 }
2401
2402 kve->kve_start = entry->start;
2403 kve->kve_end = entry->end;
2404 kve->kve_offset += entry->offset;
2405
2406 if (entry->protection & VM_PROT_READ)
2407 kve->kve_protection |= KVME_PROT_READ;
2408 if (entry->protection & VM_PROT_WRITE)
2409 kve->kve_protection |= KVME_PROT_WRITE;
2410 if (entry->protection & VM_PROT_EXECUTE)
2411 kve->kve_protection |= KVME_PROT_EXEC;
2412
2413 if (entry->eflags & MAP_ENTRY_COW)
2414 kve->kve_flags |= KVME_FLAG_COW;
2415 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
2416 kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
2417 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
2418 kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
2419 if (entry->eflags & MAP_ENTRY_GROWS_UP)
2420 kve->kve_flags |= KVME_FLAG_GROWS_UP;
2421 if (entry->eflags & MAP_ENTRY_GROWS_DOWN)
2422 kve->kve_flags |= KVME_FLAG_GROWS_DOWN;
2423
2424 last_timestamp = map->timestamp;
2425 vm_map_unlock_read(map);
2426
2427 freepath = NULL;
2428 fullpath = "";
2429 if (lobj != NULL) {
2430 vp = NULL;
2431 switch (lobj->type) {
2432 case OBJT_DEFAULT:
2433 kve->kve_type = KVME_TYPE_DEFAULT;
2434 break;
2435 case OBJT_VNODE:
2436 kve->kve_type = KVME_TYPE_VNODE;
2437 vp = lobj->handle;
2438 vref(vp);
2439 break;
2440 case OBJT_SWAP:
2441 if ((lobj->flags & OBJ_TMPFS_NODE) != 0) {
2442 kve->kve_type = KVME_TYPE_VNODE;
2443 if ((lobj->flags & OBJ_TMPFS) != 0) {
2444 vp = lobj->un_pager.swp.swp_tmpfs;
2445 vref(vp);
2446 }
2447 } else {
2448 kve->kve_type = KVME_TYPE_SWAP;
2449 }
2450 break;
2451 case OBJT_DEVICE:
2452 kve->kve_type = KVME_TYPE_DEVICE;
2453 break;
2454 case OBJT_PHYS:
2455 kve->kve_type = KVME_TYPE_PHYS;
2456 break;
2457 case OBJT_DEAD:
2458 kve->kve_type = KVME_TYPE_DEAD;
2459 break;
2460 case OBJT_SG:
2461 kve->kve_type = KVME_TYPE_SG;
2462 break;
2463 case OBJT_MGTDEVICE:
2464 kve->kve_type = KVME_TYPE_MGTDEVICE;
2465 break;
2466 default:
2467 kve->kve_type = KVME_TYPE_UNKNOWN;
2468 break;
2469 }
2470 if (lobj != obj)
2471 VM_OBJECT_RUNLOCK(lobj);
2472
2473 kve->kve_ref_count = obj->ref_count;
2474 kve->kve_shadow_count = obj->shadow_count;
2475 VM_OBJECT_RUNLOCK(obj);
2476 if (vp != NULL) {
2477 vn_fullpath(curthread, vp, &fullpath,
2478 &freepath);
2479 kve->kve_vn_type = vntype_to_kinfo(vp->v_type);
2480 cred = curthread->td_ucred;
2481 vn_lock(vp, LK_SHARED | LK_RETRY);
2482 if (VOP_GETATTR(vp, &va, cred) == 0) {
2483 kve->kve_vn_fileid = va.va_fileid;
2484 kve->kve_vn_fsid = va.va_fsid;
2485 kve->kve_vn_mode =
2486 MAKEIMODE(va.va_type, va.va_mode);
2487 kve->kve_vn_size = va.va_size;
2488 kve->kve_vn_rdev = va.va_rdev;
2489 kve->kve_status = KF_ATTR_VALID;
2490 }
2491 vput(vp);
2492 }
2493 } else {
2494 kve->kve_type = KVME_TYPE_NONE;
2495 kve->kve_ref_count = 0;
2496 kve->kve_shadow_count = 0;
2497 }
2498
2499 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
2500 if (freepath != NULL)
2501 free(freepath, M_TEMP);
2502
2503 /* Pack record size down */
2504 if ((flags & KERN_VMMAP_PACK_KINFO) != 0)
2505 kve->kve_structsize =
2506 offsetof(struct kinfo_vmentry, kve_path) +
2507 strlen(kve->kve_path) + 1;
2508 else
2509 kve->kve_structsize = sizeof(*kve);
2510 kve->kve_structsize = roundup(kve->kve_structsize,
2511 sizeof(uint64_t));
2512
2513 /* Halt filling and truncate rather than exceeding maxlen */
2514 if (maxlen != -1 && maxlen < kve->kve_structsize) {
2515 error = 0;
2516 vm_map_lock_read(map);
2517 break;
2518 } else if (maxlen != -1)
2519 maxlen -= kve->kve_structsize;
2520
2521 if (sbuf_bcat(sb, kve, kve->kve_structsize) != 0)
2522 error = ENOMEM;
2523 vm_map_lock_read(map);
2524 if (error != 0)
2525 break;
2526 if (last_timestamp != map->timestamp) {
2527 vm_map_lookup_entry(map, addr - 1, &tmp_entry);
2528 entry = tmp_entry;
2529 }
2530 }
2531 vm_map_unlock_read(map);
2532 vmspace_free(vm);
2533 PRELE(p);
2534 free(kve, M_TEMP);
2535 return (error);
2536 }
2537
2538 static int
2539 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
2540 {
2541 struct proc *p;
2542 struct sbuf sb;
2543 int error, error2, *name;
2544
2545 name = (int *)arg1;
2546 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_vmentry), req);
2547 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2548 error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
2549 if (error != 0) {
2550 sbuf_delete(&sb);
2551 return (error);
2552 }
2553 error = kern_proc_vmmap_out(p, &sb, -1, KERN_VMMAP_PACK_KINFO);
2554 error2 = sbuf_finish(&sb);
2555 sbuf_delete(&sb);
2556 return (error != 0 ? error : error2);
2557 }
2558
2559 #if defined(STACK) || defined(DDB)
2560 static int
2561 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS)
2562 {
2563 struct kinfo_kstack *kkstp;
2564 int error, i, *name, numthreads;
2565 lwpid_t *lwpidarray;
2566 struct thread *td;
2567 struct stack *st;
2568 struct sbuf sb;
2569 struct proc *p;
2570
2571 name = (int *)arg1;
2572 error = pget((pid_t)name[0], PGET_NOTINEXEC | PGET_WANTREAD, &p);
2573 if (error != 0)
2574 return (error);
2575
2576 kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK);
2577 st = stack_create();
2578
2579 lwpidarray = NULL;
2580 PROC_LOCK(p);
2581 do {
2582 if (lwpidarray != NULL) {
2583 free(lwpidarray, M_TEMP);
2584 lwpidarray = NULL;
2585 }
2586 numthreads = p->p_numthreads;
2587 PROC_UNLOCK(p);
2588 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP,
2589 M_WAITOK | M_ZERO);
2590 PROC_LOCK(p);
2591 } while (numthreads < p->p_numthreads);
2592
2593 /*
2594 * XXXRW: During the below loop, execve(2) and countless other sorts
2595 * of changes could have taken place. Should we check to see if the
2596 * vmspace has been replaced, or the like, in order to prevent
2597 * giving a snapshot that spans, say, execve(2), with some threads
2598 * before and some after? Among other things, the credentials could
2599 * have changed, in which case the right to extract debug info might
2600 * no longer be assured.
2601 */
2602 i = 0;
2603 FOREACH_THREAD_IN_PROC(p, td) {
2604 KASSERT(i < numthreads,
2605 ("sysctl_kern_proc_kstack: numthreads"));
2606 lwpidarray[i] = td->td_tid;
2607 i++;
2608 }
2609 numthreads = i;
2610 for (i = 0; i < numthreads; i++) {
2611 td = thread_find(p, lwpidarray[i]);
2612 if (td == NULL) {
2613 continue;
2614 }
2615 bzero(kkstp, sizeof(*kkstp));
2616 (void)sbuf_new(&sb, kkstp->kkst_trace,
2617 sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN);
2618 thread_lock(td);
2619 kkstp->kkst_tid = td->td_tid;
2620 if (TD_IS_SWAPPED(td)) {
2621 kkstp->kkst_state = KKST_STATE_SWAPPED;
2622 } else if (TD_IS_RUNNING(td)) {
2623 if (stack_save_td_running(st, td) == 0)
2624 kkstp->kkst_state = KKST_STATE_STACKOK;
2625 else
2626 kkstp->kkst_state = KKST_STATE_RUNNING;
2627 } else {
2628 kkstp->kkst_state = KKST_STATE_STACKOK;
2629 stack_save_td(st, td);
2630 }
2631 thread_unlock(td);
2632 PROC_UNLOCK(p);
2633 stack_sbuf_print(&sb, st);
2634 sbuf_finish(&sb);
2635 sbuf_delete(&sb);
2636 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp));
2637 PROC_LOCK(p);
2638 if (error)
2639 break;
2640 }
2641 _PRELE(p);
2642 PROC_UNLOCK(p);
2643 if (lwpidarray != NULL)
2644 free(lwpidarray, M_TEMP);
2645 stack_destroy(st);
2646 free(kkstp, M_TEMP);
2647 return (error);
2648 }
2649 #endif
2650
2651 /*
2652 * This sysctl allows a process to retrieve the full list of groups from
2653 * itself or another process.
2654 */
2655 static int
2656 sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS)
2657 {
2658 pid_t *pidp = (pid_t *)arg1;
2659 unsigned int arglen = arg2;
2660 struct proc *p;
2661 struct ucred *cred;
2662 int error;
2663
2664 if (arglen != 1)
2665 return (EINVAL);
2666 if (*pidp == -1) { /* -1 means this process */
2667 p = req->td->td_proc;
2668 PROC_LOCK(p);
2669 } else {
2670 error = pget(*pidp, PGET_CANSEE, &p);
2671 if (error != 0)
2672 return (error);
2673 }
2674
2675 cred = crhold(p->p_ucred);
2676 PROC_UNLOCK(p);
2677
2678 error = SYSCTL_OUT(req, cred->cr_groups,
2679 cred->cr_ngroups * sizeof(gid_t));
2680 crfree(cred);
2681 return (error);
2682 }
2683
2684 /*
2685 * This sysctl allows a process to retrieve or/and set the resource limit for
2686 * another process.
2687 */
2688 static int
2689 sysctl_kern_proc_rlimit(SYSCTL_HANDLER_ARGS)
2690 {
2691 int *name = (int *)arg1;
2692 u_int namelen = arg2;
2693 struct rlimit rlim;
2694 struct proc *p;
2695 u_int which;
2696 int flags, error;
2697
2698 if (namelen != 2)
2699 return (EINVAL);
2700
2701 which = (u_int)name[1];
2702 if (which >= RLIM_NLIMITS)
2703 return (EINVAL);
2704
2705 if (req->newptr != NULL && req->newlen != sizeof(rlim))
2706 return (EINVAL);
2707
2708 flags = PGET_HOLD | PGET_NOTWEXIT;
2709 if (req->newptr != NULL)
2710 flags |= PGET_CANDEBUG;
2711 else
2712 flags |= PGET_CANSEE;
2713 error = pget((pid_t)name[0], flags, &p);
2714 if (error != 0)
2715 return (error);
2716
2717 /*
2718 * Retrieve limit.
2719 */
2720 if (req->oldptr != NULL) {
2721 PROC_LOCK(p);
2722 lim_rlimit_proc(p, which, &rlim);
2723 PROC_UNLOCK(p);
2724 }
2725 error = SYSCTL_OUT(req, &rlim, sizeof(rlim));
2726 if (error != 0)
2727 goto errout;
2728
2729 /*
2730 * Set limit.
2731 */
2732 if (req->newptr != NULL) {
2733 error = SYSCTL_IN(req, &rlim, sizeof(rlim));
2734 if (error == 0)
2735 error = kern_proc_setrlimit(curthread, p, which, &rlim);
2736 }
2737
2738 errout:
2739 PRELE(p);
2740 return (error);
2741 }
2742
2743 /*
2744 * This sysctl allows a process to retrieve ps_strings structure location of
2745 * another process.
2746 */
2747 static int
2748 sysctl_kern_proc_ps_strings(SYSCTL_HANDLER_ARGS)
2749 {
2750 int *name = (int *)arg1;
2751 u_int namelen = arg2;
2752 struct proc *p;
2753 vm_offset_t ps_strings;
2754 int error;
2755 #ifdef COMPAT_FREEBSD32
2756 uint32_t ps_strings32;
2757 #endif
2758
2759 if (namelen != 1)
2760 return (EINVAL);
2761
2762 error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
2763 if (error != 0)
2764 return (error);
2765 #ifdef COMPAT_FREEBSD32
2766 if ((req->flags & SCTL_MASK32) != 0) {
2767 /*
2768 * We return 0 if the 32 bit emulation request is for a 64 bit
2769 * process.
2770 */
2771 ps_strings32 = SV_PROC_FLAG(p, SV_ILP32) != 0 ?
2772 PTROUT(p->p_sysent->sv_psstrings) : 0;
2773 PROC_UNLOCK(p);
2774 error = SYSCTL_OUT(req, &ps_strings32, sizeof(ps_strings32));
2775 return (error);
2776 }
2777 #endif
2778 ps_strings = p->p_sysent->sv_psstrings;
2779 PROC_UNLOCK(p);
2780 error = SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings));
2781 return (error);
2782 }
2783
2784 /*
2785 * This sysctl allows a process to retrieve umask of another process.
2786 */
2787 static int
2788 sysctl_kern_proc_umask(SYSCTL_HANDLER_ARGS)
2789 {
2790 int *name = (int *)arg1;
2791 u_int namelen = arg2;
2792 struct proc *p;
2793 int error;
2794 u_short fd_cmask;
2795
2796 if (namelen != 1)
2797 return (EINVAL);
2798
2799 error = pget((pid_t)name[0], PGET_WANTREAD, &p);
2800 if (error != 0)
2801 return (error);
2802
2803 FILEDESC_SLOCK(p->p_fd);
2804 fd_cmask = p->p_fd->fd_cmask;
2805 FILEDESC_SUNLOCK(p->p_fd);
2806 PRELE(p);
2807 error = SYSCTL_OUT(req, &fd_cmask, sizeof(fd_cmask));
2808 return (error);
2809 }
2810
2811 /*
2812 * This sysctl allows a process to set and retrieve binary osreldate of
2813 * another process.
2814 */
2815 static int
2816 sysctl_kern_proc_osrel(SYSCTL_HANDLER_ARGS)
2817 {
2818 int *name = (int *)arg1;
2819 u_int namelen = arg2;
2820 struct proc *p;
2821 int flags, error, osrel;
2822
2823 if (namelen != 1)
2824 return (EINVAL);
2825
2826 if (req->newptr != NULL && req->newlen != sizeof(osrel))
2827 return (EINVAL);
2828
2829 flags = PGET_HOLD | PGET_NOTWEXIT;
2830 if (req->newptr != NULL)
2831 flags |= PGET_CANDEBUG;
2832 else
2833 flags |= PGET_CANSEE;
2834 error = pget((pid_t)name[0], flags, &p);
2835 if (error != 0)
2836 return (error);
2837
2838 error = SYSCTL_OUT(req, &p->p_osrel, sizeof(p->p_osrel));
2839 if (error != 0)
2840 goto errout;
2841
2842 if (req->newptr != NULL) {
2843 error = SYSCTL_IN(req, &osrel, sizeof(osrel));
2844 if (error != 0)
2845 goto errout;
2846 if (osrel < 0) {
2847 error = EINVAL;
2848 goto errout;
2849 }
2850 p->p_osrel = osrel;
2851 }
2852 errout:
2853 PRELE(p);
2854 return (error);
2855 }
2856
2857 static int
2858 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS)
2859 {
2860 int *name = (int *)arg1;
2861 u_int namelen = arg2;
2862 struct proc *p;
2863 struct kinfo_sigtramp kst;
2864 const struct sysentvec *sv;
2865 int error;
2866 #ifdef COMPAT_FREEBSD32
2867 struct kinfo_sigtramp32 kst32;
2868 #endif
2869
2870 if (namelen != 1)
2871 return (EINVAL);
2872
2873 error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
2874 if (error != 0)
2875 return (error);
2876 sv = p->p_sysent;
2877 #ifdef COMPAT_FREEBSD32
2878 if ((req->flags & SCTL_MASK32) != 0) {
2879 bzero(&kst32, sizeof(kst32));
2880 if (SV_PROC_FLAG(p, SV_ILP32)) {
2881 if (sv->sv_sigcode_base != 0) {
2882 kst32.ksigtramp_start = sv->sv_sigcode_base;
2883 kst32.ksigtramp_end = sv->sv_sigcode_base +
2884 *sv->sv_szsigcode;
2885 } else {
2886 kst32.ksigtramp_start = sv->sv_psstrings -
2887 *sv->sv_szsigcode;
2888 kst32.ksigtramp_end = sv->sv_psstrings;
2889 }
2890 }
2891 PROC_UNLOCK(p);
2892 error = SYSCTL_OUT(req, &kst32, sizeof(kst32));
2893 return (error);
2894 }
2895 #endif
2896 bzero(&kst, sizeof(kst));
2897 if (sv->sv_sigcode_base != 0) {
2898 kst.ksigtramp_start = (char *)sv->sv_sigcode_base;
2899 kst.ksigtramp_end = (char *)sv->sv_sigcode_base +
2900 *sv->sv_szsigcode;
2901 } else {
2902 kst.ksigtramp_start = (char *)sv->sv_psstrings -
2903 *sv->sv_szsigcode;
2904 kst.ksigtramp_end = (char *)sv->sv_psstrings;
2905 }
2906 PROC_UNLOCK(p);
2907 error = SYSCTL_OUT(req, &kst, sizeof(kst));
2908 return (error);
2909 }
2910
2911 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
2912
2913 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT|
2914 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc",
2915 "Return entire process table");
2916
2917 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2918 sysctl_kern_proc, "Process table");
2919
2920 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE,
2921 sysctl_kern_proc, "Process table");
2922
2923 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2924 sysctl_kern_proc, "Process table");
2925
2926 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD |
2927 CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2928
2929 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE,
2930 sysctl_kern_proc, "Process table");
2931
2932 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2933 sysctl_kern_proc, "Process table");
2934
2935 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2936 sysctl_kern_proc, "Process table");
2937
2938 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE,
2939 sysctl_kern_proc, "Process table");
2940
2941 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE,
2942 sysctl_kern_proc, "Return process table, no threads");
2943
2944 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
2945 CTLFLAG_RW | CTLFLAG_CAPWR | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
2946 sysctl_kern_proc_args, "Process argument list");
2947
2948 static SYSCTL_NODE(_kern_proc, KERN_PROC_ENV, env, CTLFLAG_RD | CTLFLAG_MPSAFE,
2949 sysctl_kern_proc_env, "Process environment");
2950
2951 static SYSCTL_NODE(_kern_proc, KERN_PROC_AUXV, auxv, CTLFLAG_RD |
2952 CTLFLAG_MPSAFE, sysctl_kern_proc_auxv, "Process ELF auxiliary vector");
2953
2954 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD |
2955 CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path");
2956
2957 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD |
2958 CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name,
2959 "Process syscall vector name (ABI type)");
2960
2961 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
2962 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2963
2964 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
2965 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2966
2967 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
2968 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2969
2970 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
2971 sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2972
2973 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
2974 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2975
2976 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
2977 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2978
2979 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
2980 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2981
2982 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
2983 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
2984
2985 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
2986 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc,
2987 "Return process table, no threads");
2988
2989 #ifdef COMPAT_FREEBSD7
2990 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD |
2991 CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries");
2992 #endif
2993
2994 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD |
2995 CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries");
2996
2997 #if defined(STACK) || defined(DDB)
2998 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD |
2999 CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks");
3000 #endif
3001
3002 static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD |
3003 CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups");
3004
3005 static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT, rlimit, CTLFLAG_RW |
3006 CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_rlimit,
3007 "Process resource limits");
3008
3009 static SYSCTL_NODE(_kern_proc, KERN_PROC_PS_STRINGS, ps_strings, CTLFLAG_RD |
3010 CTLFLAG_MPSAFE, sysctl_kern_proc_ps_strings,
3011 "Process ps_strings location");
3012
3013 static SYSCTL_NODE(_kern_proc, KERN_PROC_UMASK, umask, CTLFLAG_RD |
3014 CTLFLAG_MPSAFE, sysctl_kern_proc_umask, "Process umask");
3015
3016 static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
3017 CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_osrel,
3018 "Process binary osreldate");
3019
3020 static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD |
3021 CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp,
3022 "Process signal trampoline location");
3023
3024 int allproc_gen;
3025
3026 /*
3027 * stop_all_proc() purpose is to stop all process which have usermode,
3028 * except current process for obvious reasons. This makes it somewhat
3029 * unreliable when invoked from multithreaded process. The service
3030 * must not be user-callable anyway.
3031 */
3032 void
3033 stop_all_proc(void)
3034 {
3035 struct proc *cp, *p;
3036 int r, gen;
3037 bool restart, seen_stopped, seen_exiting, stopped_some;
3038
3039 cp = curproc;
3040 allproc_loop:
3041 sx_xlock(&allproc_lock);
3042 gen = allproc_gen;
3043 seen_exiting = seen_stopped = stopped_some = restart = false;
3044 LIST_REMOVE(cp, p_list);
3045 LIST_INSERT_HEAD(&allproc, cp, p_list);
3046 for (;;) {
3047 p = LIST_NEXT(cp, p_list);
3048 if (p == NULL)
3049 break;
3050 LIST_REMOVE(cp, p_list);
3051 LIST_INSERT_AFTER(p, cp, p_list);
3052 PROC_LOCK(p);
3053 if ((p->p_flag & (P_KPROC | P_SYSTEM | P_TOTAL_STOP)) != 0) {
3054 PROC_UNLOCK(p);
3055 continue;
3056 }
3057 if ((p->p_flag & P_WEXIT) != 0) {
3058 seen_exiting = true;
3059 PROC_UNLOCK(p);
3060 continue;
3061 }
3062 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
3063 /*
3064 * Stopped processes are tolerated when there
3065 * are no other processes which might continue
3066 * them. P_STOPPED_SINGLE but not
3067 * P_TOTAL_STOP process still has at least one
3068 * thread running.
3069 */
3070 seen_stopped = true;
3071 PROC_UNLOCK(p);
3072 continue;
3073 }
3074 _PHOLD(p);
3075 sx_xunlock(&allproc_lock);
3076 r = thread_single(p, SINGLE_ALLPROC);
3077 if (r != 0)
3078 restart = true;
3079 else
3080 stopped_some = true;
3081 _PRELE(p);
3082 PROC_UNLOCK(p);
3083 sx_xlock(&allproc_lock);
3084 }
3085 /* Catch forked children we did not see in iteration. */
3086 if (gen != allproc_gen)
3087 restart = true;
3088 sx_xunlock(&allproc_lock);
3089 if (restart || stopped_some || seen_exiting || seen_stopped) {
3090 kern_yield(PRI_USER);
3091 goto allproc_loop;
3092 }
3093 }
3094
3095 void
3096 resume_all_proc(void)
3097 {
3098 struct proc *cp, *p;
3099
3100 cp = curproc;
3101 sx_xlock(&allproc_lock);
3102 again:
3103 LIST_REMOVE(cp, p_list);
3104 LIST_INSERT_HEAD(&allproc, cp, p_list);
3105 for (;;) {
3106 p = LIST_NEXT(cp, p_list);
3107 if (p == NULL)
3108 break;
3109 LIST_REMOVE(cp, p_list);
3110 LIST_INSERT_AFTER(p, cp, p_list);
3111 PROC_LOCK(p);
3112 if ((p->p_flag & P_TOTAL_STOP) != 0) {
3113 sx_xunlock(&allproc_lock);
3114 _PHOLD(p);
3115 thread_single_end(p, SINGLE_ALLPROC);
3116 _PRELE(p);
3117 PROC_UNLOCK(p);
3118 sx_xlock(&allproc_lock);
3119 } else {
3120 PROC_UNLOCK(p);
3121 }
3122 }
3123 /* Did the loop above missed any stopped process ? */
3124 LIST_FOREACH(p, &allproc, p_list) {
3125 /* No need for proc lock. */
3126 if ((p->p_flag & P_TOTAL_STOP) != 0)
3127 goto again;
3128 }
3129 sx_xunlock(&allproc_lock);
3130 }
3131
3132 /* #define TOTAL_STOP_DEBUG 1 */
3133 #ifdef TOTAL_STOP_DEBUG
3134 volatile static int ap_resume;
3135 #include <sys/mount.h>
3136
3137 static int
3138 sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS)
3139 {
3140 int error, val;
3141
3142 val = 0;
3143 ap_resume = 0;
3144 error = sysctl_handle_int(oidp, &val, 0, req);
3145 if (error != 0 || req->newptr == NULL)
3146 return (error);
3147 if (val != 0) {
3148 stop_all_proc();
3149 syncer_suspend();
3150 while (ap_resume == 0)
3151 ;
3152 syncer_resume();
3153 resume_all_proc();
3154 }
3155 return (0);
3156 }
3157
3158 SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW |
3159 CTLFLAG_MPSAFE, __DEVOLATILE(int *, &ap_resume), 0,
3160 sysctl_debug_stop_all_proc, "I",
3161 "");
3162 #endif
Cache object: 72abd9d0c12dd37844fe874e51767f5e
|