The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_proc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/8.4/sys/kern/kern_proc.c 243583 2012-11-27 01:30:12Z mjg $");
   34 
   35 #include "opt_compat.h"
   36 #include "opt_ddb.h"
   37 #include "opt_kdtrace.h"
   38 #include "opt_ktrace.h"
   39 #include "opt_kstack_pages.h"
   40 #include "opt_stack.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/limits.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mman.h>
   49 #include <sys/mount.h>
   50 #include <sys/mutex.h>
   51 #include <sys/proc.h>
   52 #include <sys/refcount.h>
   53 #include <sys/sbuf.h>
   54 #include <sys/sysent.h>
   55 #include <sys/sched.h>
   56 #include <sys/smp.h>
   57 #include <sys/stack.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/filedesc.h>
   60 #include <sys/tty.h>
   61 #include <sys/signalvar.h>
   62 #include <sys/sdt.h>
   63 #include <sys/sx.h>
   64 #include <sys/user.h>
   65 #include <sys/jail.h>
   66 #include <sys/vnode.h>
   67 #include <sys/eventhandler.h>
   68 
   69 #ifdef DDB
   70 #include <ddb/ddb.h>
   71 #endif
   72 
   73 #include <vm/vm.h>
   74 #include <vm/vm_extern.h>
   75 #include <vm/pmap.h>
   76 #include <vm/vm_map.h>
   77 #include <vm/vm_object.h>
   78 #include <vm/vm_page.h>
   79 #include <vm/uma.h>
   80 
   81 #ifdef COMPAT_FREEBSD32
   82 #include <compat/freebsd32/freebsd32.h>
   83 #include <compat/freebsd32/freebsd32_util.h>
   84 #endif
   85 
   86 SDT_PROVIDER_DEFINE(proc);
   87 SDT_PROBE_DEFINE(proc, kernel, ctor, entry, entry);
   88 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 0, "struct proc *");
   89 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 1, "int");
   90 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 2, "void *");
   91 SDT_PROBE_ARGTYPE(proc, kernel, ctor, entry, 3, "int");
   92 SDT_PROBE_DEFINE(proc, kernel, ctor, return, return);
   93 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 0, "struct proc *");
   94 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 1, "int");
   95 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 2, "void *");
   96 SDT_PROBE_ARGTYPE(proc, kernel, ctor, return, 3, "int");
   97 SDT_PROBE_DEFINE(proc, kernel, dtor, entry, entry);
   98 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 0, "struct proc *");
   99 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 1, "int");
  100 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 2, "void *");
  101 SDT_PROBE_ARGTYPE(proc, kernel, dtor, entry, 3, "struct thread *");
  102 SDT_PROBE_DEFINE(proc, kernel, dtor, return, return);
  103 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 0, "struct proc *");
  104 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 1, "int");
  105 SDT_PROBE_ARGTYPE(proc, kernel, dtor, return, 2, "void *");
  106 SDT_PROBE_DEFINE(proc, kernel, init, entry, entry);
  107 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 0, "struct proc *");
  108 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 1, "int");
  109 SDT_PROBE_ARGTYPE(proc, kernel, init, entry, 2, "int");
  110 SDT_PROBE_DEFINE(proc, kernel, init, return, return);
  111 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 0, "struct proc *");
  112 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 1, "int");
  113 SDT_PROBE_ARGTYPE(proc, kernel, init, return, 2, "int");
  114 
  115 MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
  116 MALLOC_DEFINE(M_SESSION, "session", "session header");
  117 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
  118 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
  119 
  120 static void doenterpgrp(struct proc *, struct pgrp *);
  121 static void orphanpg(struct pgrp *pg);
  122 static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp);
  123 static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
  124 static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp,
  125     int preferthread);
  126 static void pgadjustjobc(struct pgrp *pgrp, int entering);
  127 static void pgdelete(struct pgrp *);
  128 static int proc_ctor(void *mem, int size, void *arg, int flags);
  129 static void proc_dtor(void *mem, int size, void *arg);
  130 static int proc_init(void *mem, int size, int flags);
  131 static void proc_fini(void *mem, int size);
  132 static void pargs_free(struct pargs *pa);
  133 
  134 /*
  135  * Other process lists
  136  */
  137 struct pidhashhead *pidhashtbl;
  138 u_long pidhash;
  139 struct pgrphashhead *pgrphashtbl;
  140 u_long pgrphash;
  141 struct proclist allproc;
  142 struct proclist zombproc;
  143 struct sx allproc_lock;
  144 struct sx proctree_lock;
  145 struct mtx ppeers_lock;
  146 uma_zone_t proc_zone;
  147 
  148 int kstack_pages = KSTACK_PAGES;
  149 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
  150     "Kernel stack size in pages");
  151 
  152 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
  153 #ifdef COMPAT_FREEBSD32
  154 CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE);
  155 #endif
  156 
  157 /*
  158  * Initialize global process hashing structures.
  159  */
  160 void
  161 procinit()
  162 {
  163 
  164         sx_init(&allproc_lock, "allproc");
  165         sx_init(&proctree_lock, "proctree");
  166         mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
  167         LIST_INIT(&allproc);
  168         LIST_INIT(&zombproc);
  169         pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
  170         pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
  171         proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
  172             proc_ctor, proc_dtor, proc_init, proc_fini,
  173             UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  174         uihashinit();
  175 }
  176 
  177 /*
  178  * Prepare a proc for use.
  179  */
  180 static int
  181 proc_ctor(void *mem, int size, void *arg, int flags)
  182 {
  183         struct proc *p;
  184 
  185         p = (struct proc *)mem;
  186         SDT_PROBE(proc, kernel, ctor , entry, p, size, arg, flags, 0);
  187         EVENTHANDLER_INVOKE(process_ctor, p);
  188         SDT_PROBE(proc, kernel, ctor , return, p, size, arg, flags, 0);
  189         return (0);
  190 }
  191 
  192 /*
  193  * Reclaim a proc after use.
  194  */
  195 static void
  196 proc_dtor(void *mem, int size, void *arg)
  197 {
  198         struct proc *p;
  199         struct thread *td;
  200 
  201         /* INVARIANTS checks go here */
  202         p = (struct proc *)mem;
  203         td = FIRST_THREAD_IN_PROC(p);
  204         SDT_PROBE(proc, kernel, dtor, entry, p, size, arg, td, 0);
  205         if (td != NULL) {
  206 #ifdef INVARIANTS
  207                 KASSERT((p->p_numthreads == 1),
  208                     ("bad number of threads in exiting process"));
  209                 KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
  210 #endif
  211                 /* Free all OSD associated to this thread. */
  212                 osd_thread_exit(td);
  213         }
  214         EVENTHANDLER_INVOKE(process_dtor, p);
  215         if (p->p_ksi != NULL)
  216                 KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
  217         SDT_PROBE(proc, kernel, dtor, return, p, size, arg, 0, 0);
  218 }
  219 
  220 /*
  221  * Initialize type-stable parts of a proc (when newly created).
  222  */
  223 static int
  224 proc_init(void *mem, int size, int flags)
  225 {
  226         struct proc *p;
  227 
  228         p = (struct proc *)mem;
  229         SDT_PROBE(proc, kernel, init, entry, p, size, flags, 0, 0);
  230         p->p_sched = (struct p_sched *)&p[1];
  231         bzero(&p->p_mtx, sizeof(struct mtx));
  232         mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
  233         mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
  234         cv_init(&p->p_pwait, "ppwait");
  235         cv_init(&p->p_dbgwait, "dbgwait");
  236         TAILQ_INIT(&p->p_threads);           /* all threads in proc */
  237         EVENTHANDLER_INVOKE(process_init, p);
  238         p->p_stats = pstats_alloc();
  239         SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0);
  240         return (0);
  241 }
  242 
  243 /*
  244  * UMA should ensure that this function is never called.
  245  * Freeing a proc structure would violate type stability.
  246  */
  247 static void
  248 proc_fini(void *mem, int size)
  249 {
  250 #ifdef notnow
  251         struct proc *p;
  252 
  253         p = (struct proc *)mem;
  254         EVENTHANDLER_INVOKE(process_fini, p);
  255         pstats_free(p->p_stats);
  256         thread_free(FIRST_THREAD_IN_PROC(p));
  257         mtx_destroy(&p->p_mtx);
  258         if (p->p_ksi != NULL)
  259                 ksiginfo_free(p->p_ksi);
  260 #else
  261         panic("proc reclaimed");
  262 #endif
  263 }
  264 
  265 /*
  266  * Is p an inferior of the current process?
  267  */
  268 int
  269 inferior(p)
  270         register struct proc *p;
  271 {
  272 
  273         sx_assert(&proctree_lock, SX_LOCKED);
  274         for (; p != curproc; p = p->p_pptr)
  275                 if (p->p_pid == 0)
  276                         return (0);
  277         return (1);
  278 }
  279 
  280 /*
  281  * Locate a process by number; return only "live" processes -- i.e., neither
  282  * zombies nor newly born but incompletely initialized processes.  By not
  283  * returning processes in the PRS_NEW state, we allow callers to avoid
  284  * testing for that condition to avoid dereferencing p_ucred, et al.
  285  */
  286 struct proc *
  287 pfind(pid)
  288         register pid_t pid;
  289 {
  290         register struct proc *p;
  291 
  292         sx_slock(&allproc_lock);
  293         LIST_FOREACH(p, PIDHASH(pid), p_hash)
  294                 if (p->p_pid == pid) {
  295                         PROC_LOCK(p);
  296                         if (p->p_state == PRS_NEW) {
  297                                 PROC_UNLOCK(p);
  298                                 p = NULL;
  299                         }
  300                         break;
  301                 }
  302         sx_sunlock(&allproc_lock);
  303         return (p);
  304 }
  305 
  306 static struct proc *
  307 pfind_tid(pid_t tid)
  308 {
  309         struct proc *p;
  310         struct thread *td;
  311 
  312         sx_slock(&allproc_lock);
  313         FOREACH_PROC_IN_SYSTEM(p) {
  314                 PROC_LOCK(p);
  315                 if (p->p_state == PRS_NEW) {
  316                         PROC_UNLOCK(p);
  317                         continue;
  318                 }
  319                 FOREACH_THREAD_IN_PROC(p, td) {
  320                         if (td->td_tid == tid)
  321                                 goto found;
  322                 }
  323                 PROC_UNLOCK(p);
  324         }
  325 found:
  326         sx_sunlock(&allproc_lock);
  327         return (p);
  328 }
  329 
  330 /*
  331  * Locate a process group by number.
  332  * The caller must hold proctree_lock.
  333  */
  334 struct pgrp *
  335 pgfind(pgid)
  336         register pid_t pgid;
  337 {
  338         register struct pgrp *pgrp;
  339 
  340         sx_assert(&proctree_lock, SX_LOCKED);
  341 
  342         LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
  343                 if (pgrp->pg_id == pgid) {
  344                         PGRP_LOCK(pgrp);
  345                         return (pgrp);
  346                 }
  347         }
  348         return (NULL);
  349 }
  350 
  351 /*
  352  * Locate process and do additional manipulations, depending on flags.
  353  */
  354 int
  355 pget(pid_t pid, int flags, struct proc **pp)
  356 {
  357         struct proc *p;
  358         int error;
  359 
  360         if (pid <= PID_MAX)
  361                 p = pfind(pid);
  362         else if ((flags & PGET_NOTID) == 0)
  363                 p = pfind_tid(pid);
  364         else
  365                 p = NULL;
  366         if (p == NULL)
  367                 return (ESRCH);
  368         if ((flags & PGET_CANSEE) != 0) {
  369                 error = p_cansee(curthread, p);
  370                 if (error != 0)
  371                         goto errout;
  372         }
  373         if ((flags & PGET_CANDEBUG) != 0) {
  374                 error = p_candebug(curthread, p);
  375                 if (error != 0)
  376                         goto errout;
  377         }
  378         if ((flags & PGET_ISCURRENT) != 0 && curproc != p) {
  379                 error = EPERM;
  380                 goto errout;
  381         }
  382         if ((flags & PGET_NOTWEXIT) != 0 && (p->p_flag & P_WEXIT) != 0) {
  383                 error = ESRCH;
  384                 goto errout;
  385         }
  386         if ((flags & PGET_NOTINEXEC) != 0 && (p->p_flag & P_INEXEC) != 0) {
  387                 /*
  388                  * XXXRW: Not clear ESRCH is the right error during proc
  389                  * execve().
  390                  */
  391                 error = ESRCH;
  392                 goto errout;
  393         }
  394         if ((flags & PGET_HOLD) != 0) {
  395                 _PHOLD(p);
  396                 PROC_UNLOCK(p);
  397         }
  398         *pp = p;
  399         return (0);
  400 errout:
  401         PROC_UNLOCK(p);
  402         return (error);
  403 }
  404 
  405 /*
  406  * Create a new process group.
  407  * pgid must be equal to the pid of p.
  408  * Begin a new session if required.
  409  */
  410 int
  411 enterpgrp(p, pgid, pgrp, sess)
  412         register struct proc *p;
  413         pid_t pgid;
  414         struct pgrp *pgrp;
  415         struct session *sess;
  416 {
  417 
  418         sx_assert(&proctree_lock, SX_XLOCKED);
  419 
  420         KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
  421         KASSERT(p->p_pid == pgid,
  422             ("enterpgrp: new pgrp and pid != pgid"));
  423         KASSERT(pgfind(pgid) == NULL,
  424             ("enterpgrp: pgrp with pgid exists"));
  425         KASSERT(!SESS_LEADER(p),
  426             ("enterpgrp: session leader attempted setpgrp"));
  427 
  428         mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
  429 
  430         if (sess != NULL) {
  431                 /*
  432                  * new session
  433                  */
  434                 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
  435                 PROC_LOCK(p);
  436                 p->p_flag &= ~P_CONTROLT;
  437                 PROC_UNLOCK(p);
  438                 PGRP_LOCK(pgrp);
  439                 sess->s_leader = p;
  440                 sess->s_sid = p->p_pid;
  441                 refcount_init(&sess->s_count, 1);
  442                 sess->s_ttyvp = NULL;
  443                 sess->s_ttyp = NULL;
  444                 bcopy(p->p_session->s_login, sess->s_login,
  445                             sizeof(sess->s_login));
  446                 pgrp->pg_session = sess;
  447                 KASSERT(p == curproc,
  448                     ("enterpgrp: mksession and p != curproc"));
  449         } else {
  450                 pgrp->pg_session = p->p_session;
  451                 sess_hold(pgrp->pg_session);
  452                 PGRP_LOCK(pgrp);
  453         }
  454         pgrp->pg_id = pgid;
  455         LIST_INIT(&pgrp->pg_members);
  456 
  457         /*
  458          * As we have an exclusive lock of proctree_lock,
  459          * this should not deadlock.
  460          */
  461         LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
  462         pgrp->pg_jobc = 0;
  463         SLIST_INIT(&pgrp->pg_sigiolst);
  464         PGRP_UNLOCK(pgrp);
  465 
  466         doenterpgrp(p, pgrp);
  467 
  468         return (0);
  469 }
  470 
  471 /*
  472  * Move p to an existing process group
  473  */
  474 int
  475 enterthispgrp(p, pgrp)
  476         register struct proc *p;
  477         struct pgrp *pgrp;
  478 {
  479 
  480         sx_assert(&proctree_lock, SX_XLOCKED);
  481         PROC_LOCK_ASSERT(p, MA_NOTOWNED);
  482         PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
  483         PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
  484         SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
  485         KASSERT(pgrp->pg_session == p->p_session,
  486                 ("%s: pgrp's session %p, p->p_session %p.\n",
  487                 __func__,
  488                 pgrp->pg_session,
  489                 p->p_session));
  490         KASSERT(pgrp != p->p_pgrp,
  491                 ("%s: p belongs to pgrp.", __func__));
  492 
  493         doenterpgrp(p, pgrp);
  494 
  495         return (0);
  496 }
  497 
  498 /*
  499  * Move p to a process group
  500  */
  501 static void
  502 doenterpgrp(p, pgrp)
  503         struct proc *p;
  504         struct pgrp *pgrp;
  505 {
  506         struct pgrp *savepgrp;
  507 
  508         sx_assert(&proctree_lock, SX_XLOCKED);
  509         PROC_LOCK_ASSERT(p, MA_NOTOWNED);
  510         PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
  511         PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
  512         SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
  513 
  514         savepgrp = p->p_pgrp;
  515 
  516         /*
  517          * Adjust eligibility of affected pgrps to participate in job control.
  518          * Increment eligibility counts before decrementing, otherwise we
  519          * could reach 0 spuriously during the first call.
  520          */
  521         fixjobc(p, pgrp, 1);
  522         fixjobc(p, p->p_pgrp, 0);
  523 
  524         PGRP_LOCK(pgrp);
  525         PGRP_LOCK(savepgrp);
  526         PROC_LOCK(p);
  527         LIST_REMOVE(p, p_pglist);
  528         p->p_pgrp = pgrp;
  529         PROC_UNLOCK(p);
  530         LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
  531         PGRP_UNLOCK(savepgrp);
  532         PGRP_UNLOCK(pgrp);
  533         if (LIST_EMPTY(&savepgrp->pg_members))
  534                 pgdelete(savepgrp);
  535 }
  536 
  537 /*
  538  * remove process from process group
  539  */
  540 int
  541 leavepgrp(p)
  542         register struct proc *p;
  543 {
  544         struct pgrp *savepgrp;
  545 
  546         sx_assert(&proctree_lock, SX_XLOCKED);
  547         savepgrp = p->p_pgrp;
  548         PGRP_LOCK(savepgrp);
  549         PROC_LOCK(p);
  550         LIST_REMOVE(p, p_pglist);
  551         p->p_pgrp = NULL;
  552         PROC_UNLOCK(p);
  553         PGRP_UNLOCK(savepgrp);
  554         if (LIST_EMPTY(&savepgrp->pg_members))
  555                 pgdelete(savepgrp);
  556         return (0);
  557 }
  558 
  559 /*
  560  * delete a process group
  561  */
  562 static void
  563 pgdelete(pgrp)
  564         register struct pgrp *pgrp;
  565 {
  566         struct session *savesess;
  567         struct tty *tp;
  568 
  569         sx_assert(&proctree_lock, SX_XLOCKED);
  570         PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
  571         SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
  572 
  573         /*
  574          * Reset any sigio structures pointing to us as a result of
  575          * F_SETOWN with our pgid.
  576          */
  577         funsetownlst(&pgrp->pg_sigiolst);
  578 
  579         PGRP_LOCK(pgrp);
  580         tp = pgrp->pg_session->s_ttyp;
  581         LIST_REMOVE(pgrp, pg_hash);
  582         savesess = pgrp->pg_session;
  583         PGRP_UNLOCK(pgrp);
  584 
  585         /* Remove the reference to the pgrp before deallocating it. */
  586         if (tp != NULL) {
  587                 tty_lock(tp);
  588                 tty_rel_pgrp(tp, pgrp);
  589         }
  590 
  591         mtx_destroy(&pgrp->pg_mtx);
  592         free(pgrp, M_PGRP);
  593         sess_release(savesess);
  594 }
  595 
  596 static void
  597 pgadjustjobc(pgrp, entering)
  598         struct pgrp *pgrp;
  599         int entering;
  600 {
  601 
  602         PGRP_LOCK(pgrp);
  603         if (entering)
  604                 pgrp->pg_jobc++;
  605         else {
  606                 --pgrp->pg_jobc;
  607                 if (pgrp->pg_jobc == 0)
  608                         orphanpg(pgrp);
  609         }
  610         PGRP_UNLOCK(pgrp);
  611 }
  612 
  613 /*
  614  * Adjust pgrp jobc counters when specified process changes process group.
  615  * We count the number of processes in each process group that "qualify"
  616  * the group for terminal job control (those with a parent in a different
  617  * process group of the same session).  If that count reaches zero, the
  618  * process group becomes orphaned.  Check both the specified process'
  619  * process group and that of its children.
  620  * entering == 0 => p is leaving specified group.
  621  * entering == 1 => p is entering specified group.
  622  */
  623 void
  624 fixjobc(p, pgrp, entering)
  625         register struct proc *p;
  626         register struct pgrp *pgrp;
  627         int entering;
  628 {
  629         register struct pgrp *hispgrp;
  630         register struct session *mysession;
  631 
  632         sx_assert(&proctree_lock, SX_LOCKED);
  633         PROC_LOCK_ASSERT(p, MA_NOTOWNED);
  634         PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
  635         SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
  636 
  637         /*
  638          * Check p's parent to see whether p qualifies its own process
  639          * group; if so, adjust count for p's process group.
  640          */
  641         mysession = pgrp->pg_session;
  642         if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
  643             hispgrp->pg_session == mysession)
  644                 pgadjustjobc(pgrp, entering);
  645 
  646         /*
  647          * Check this process' children to see whether they qualify
  648          * their process groups; if so, adjust counts for children's
  649          * process groups.
  650          */
  651         LIST_FOREACH(p, &p->p_children, p_sibling) {
  652                 hispgrp = p->p_pgrp;
  653                 if (hispgrp == pgrp ||
  654                     hispgrp->pg_session != mysession)
  655                         continue;
  656                 PROC_LOCK(p);
  657                 if (p->p_state == PRS_ZOMBIE) {
  658                         PROC_UNLOCK(p);
  659                         continue;
  660                 }
  661                 PROC_UNLOCK(p);
  662                 pgadjustjobc(hispgrp, entering);
  663         }
  664 }
  665 
  666 /*
  667  * A process group has become orphaned;
  668  * if there are any stopped processes in the group,
  669  * hang-up all process in that group.
  670  */
  671 static void
  672 orphanpg(pg)
  673         struct pgrp *pg;
  674 {
  675         register struct proc *p;
  676 
  677         PGRP_LOCK_ASSERT(pg, MA_OWNED);
  678 
  679         LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  680                 PROC_LOCK(p);
  681                 if (P_SHOULDSTOP(p)) {
  682                         PROC_UNLOCK(p);
  683                         LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  684                                 PROC_LOCK(p);
  685                                 psignal(p, SIGHUP);
  686                                 psignal(p, SIGCONT);
  687                                 PROC_UNLOCK(p);
  688                         }
  689                         return;
  690                 }
  691                 PROC_UNLOCK(p);
  692         }
  693 }
  694 
  695 void
  696 sess_hold(struct session *s)
  697 {
  698 
  699         refcount_acquire(&s->s_count);
  700 }
  701 
  702 void
  703 sess_release(struct session *s)
  704 {
  705 
  706         if (refcount_release(&s->s_count)) {
  707                 if (s->s_ttyp != NULL) {
  708                         tty_lock(s->s_ttyp);
  709                         tty_rel_sess(s->s_ttyp, s);
  710                 }
  711                 mtx_destroy(&s->s_mtx);
  712                 free(s, M_SESSION);
  713         }
  714 }
  715 
  716 #include "opt_ddb.h"
  717 #ifdef DDB
  718 #include <ddb/ddb.h>
  719 
  720 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
  721 {
  722         register struct pgrp *pgrp;
  723         register struct proc *p;
  724         register int i;
  725 
  726         for (i = 0; i <= pgrphash; i++) {
  727                 if (!LIST_EMPTY(&pgrphashtbl[i])) {
  728                         printf("\tindx %d\n", i);
  729                         LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
  730                                 printf(
  731                         "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
  732                                     (void *)pgrp, (long)pgrp->pg_id,
  733                                     (void *)pgrp->pg_session,
  734                                     pgrp->pg_session->s_count,
  735                                     (void *)LIST_FIRST(&pgrp->pg_members));
  736                                 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
  737                                         printf("\t\tpid %ld addr %p pgrp %p\n", 
  738                                             (long)p->p_pid, (void *)p,
  739                                             (void *)p->p_pgrp);
  740                                 }
  741                         }
  742                 }
  743         }
  744 }
  745 #endif /* DDB */
  746 
  747 /*
  748  * Calculate the kinfo_proc members which contain process-wide
  749  * informations.
  750  * Must be called with the target process locked.
  751  */
  752 static void
  753 fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp)
  754 {
  755         struct thread *td;
  756 
  757         PROC_LOCK_ASSERT(p, MA_OWNED);
  758 
  759         kp->ki_estcpu = 0;
  760         kp->ki_pctcpu = 0;
  761         FOREACH_THREAD_IN_PROC(p, td) {
  762                 thread_lock(td);
  763                 kp->ki_pctcpu += sched_pctcpu(td);
  764                 kp->ki_estcpu += td->td_estcpu;
  765                 thread_unlock(td);
  766         }
  767 }
  768 
  769 /*
  770  * Clear kinfo_proc and fill in any information that is common
  771  * to all threads in the process.
  772  * Must be called with the target process locked.
  773  */
  774 static void
  775 fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
  776 {
  777         struct thread *td0;
  778         struct tty *tp;
  779         struct session *sp;
  780         struct ucred *cred;
  781         struct sigacts *ps;
  782 
  783         PROC_LOCK_ASSERT(p, MA_OWNED);
  784         bzero(kp, sizeof(*kp));
  785 
  786         kp->ki_structsize = sizeof(*kp);
  787         kp->ki_paddr = p;
  788         kp->ki_addr =/* p->p_addr; */0; /* XXX */
  789         kp->ki_args = p->p_args;
  790         kp->ki_textvp = p->p_textvp;
  791 #ifdef KTRACE
  792         kp->ki_tracep = p->p_tracevp;
  793         kp->ki_traceflag = p->p_traceflag;
  794 #endif
  795         kp->ki_fd = p->p_fd;
  796         kp->ki_vmspace = p->p_vmspace;
  797         kp->ki_flag = p->p_flag;
  798         cred = p->p_ucred;
  799         if (cred) {
  800                 kp->ki_uid = cred->cr_uid;
  801                 kp->ki_ruid = cred->cr_ruid;
  802                 kp->ki_svuid = cred->cr_svuid;
  803                 kp->ki_cr_flags = cred->cr_flags;
  804                 /* XXX bde doesn't like KI_NGROUPS */
  805                 if (cred->cr_ngroups > KI_NGROUPS) {
  806                         kp->ki_ngroups = KI_NGROUPS;
  807                         kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
  808                 } else
  809                         kp->ki_ngroups = cred->cr_ngroups;
  810                 bcopy(cred->cr_groups, kp->ki_groups,
  811                     kp->ki_ngroups * sizeof(gid_t));
  812                 kp->ki_rgid = cred->cr_rgid;
  813                 kp->ki_svgid = cred->cr_svgid;
  814                 /* If jailed(cred), emulate the old P_JAILED flag. */
  815                 if (jailed(cred)) {
  816                         kp->ki_flag |= P_JAILED;
  817                         /* If inside the jail, use 0 as a jail ID. */
  818                         if (cred->cr_prison != curthread->td_ucred->cr_prison)
  819                                 kp->ki_jid = cred->cr_prison->pr_id;
  820                 }
  821         }
  822         ps = p->p_sigacts;
  823         if (ps) {
  824                 mtx_lock(&ps->ps_mtx);
  825                 kp->ki_sigignore = ps->ps_sigignore;
  826                 kp->ki_sigcatch = ps->ps_sigcatch;
  827                 mtx_unlock(&ps->ps_mtx);
  828         }
  829         if (p->p_state != PRS_NEW &&
  830             p->p_state != PRS_ZOMBIE &&
  831             p->p_vmspace != NULL) {
  832                 struct vmspace *vm = p->p_vmspace;
  833 
  834                 kp->ki_size = vm->vm_map.size;
  835                 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
  836                 FOREACH_THREAD_IN_PROC(p, td0) {
  837                         if (!TD_IS_SWAPPED(td0))
  838                                 kp->ki_rssize += td0->td_kstack_pages;
  839                 }
  840                 kp->ki_swrss = vm->vm_swrss;
  841                 kp->ki_tsize = vm->vm_tsize;
  842                 kp->ki_dsize = vm->vm_dsize;
  843                 kp->ki_ssize = vm->vm_ssize;
  844         } else if (p->p_state == PRS_ZOMBIE)
  845                 kp->ki_stat = SZOMB;
  846         if (kp->ki_flag & P_INMEM)
  847                 kp->ki_sflag = PS_INMEM;
  848         else
  849                 kp->ki_sflag = 0;
  850         /* Calculate legacy swtime as seconds since 'swtick'. */
  851         kp->ki_swtime = (ticks - p->p_swtick) / hz;
  852         kp->ki_pid = p->p_pid;
  853         kp->ki_nice = p->p_nice;
  854         PROC_SLOCK(p);
  855         rufetch(p, &kp->ki_rusage);
  856         kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
  857         PROC_SUNLOCK(p);
  858         if ((p->p_flag & P_INMEM) && p->p_stats != NULL) {
  859                 kp->ki_start = p->p_stats->p_start;
  860                 timevaladd(&kp->ki_start, &boottime);
  861                 PROC_SLOCK(p);
  862                 calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
  863                 PROC_SUNLOCK(p);
  864                 calccru(p, &kp->ki_childutime, &kp->ki_childstime);
  865 
  866                 /* Some callers want child-times in a single value */
  867                 kp->ki_childtime = kp->ki_childstime;
  868                 timevaladd(&kp->ki_childtime, &kp->ki_childutime);
  869         }
  870 
  871         FOREACH_THREAD_IN_PROC(p, td0)
  872                 kp->ki_cow += td0->td_cow;
  873 
  874         tp = NULL;
  875         if (p->p_pgrp) {
  876                 kp->ki_pgid = p->p_pgrp->pg_id;
  877                 kp->ki_jobc = p->p_pgrp->pg_jobc;
  878                 sp = p->p_pgrp->pg_session;
  879 
  880                 if (sp != NULL) {
  881                         kp->ki_sid = sp->s_sid;
  882                         SESS_LOCK(sp);
  883                         strlcpy(kp->ki_login, sp->s_login,
  884                             sizeof(kp->ki_login));
  885                         if (sp->s_ttyvp)
  886                                 kp->ki_kiflag |= KI_CTTY;
  887                         if (SESS_LEADER(p))
  888                                 kp->ki_kiflag |= KI_SLEADER;
  889                         /* XXX proctree_lock */
  890                         tp = sp->s_ttyp;
  891                         SESS_UNLOCK(sp);
  892                 }
  893         }
  894         if ((p->p_flag & P_CONTROLT) && tp != NULL) {
  895                 kp->ki_tdev = tty_udev(tp);
  896                 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
  897                 if (tp->t_session)
  898                         kp->ki_tsid = tp->t_session->s_sid;
  899         } else
  900                 kp->ki_tdev = NODEV;
  901         if (p->p_comm[0] != '\0')
  902                 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
  903         if (p->p_sysent && p->p_sysent->sv_name != NULL &&
  904             p->p_sysent->sv_name[0] != '\0')
  905                 strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul));
  906         kp->ki_siglist = p->p_siglist;
  907         kp->ki_xstat = p->p_xstat;
  908         kp->ki_acflag = p->p_acflag;
  909         kp->ki_lock = p->p_lock;
  910         if (p->p_pptr)
  911                 kp->ki_ppid = p->p_pptr->p_pid;
  912 }
  913 
  914 /*
  915  * Fill in information that is thread specific.  Must be called with
  916  * target process locked.  If 'preferthread' is set, overwrite certain
  917  * process-related fields that are maintained for both threads and
  918  * processes.
  919  */
  920 static void
  921 fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
  922 {
  923         struct proc *p;
  924 
  925         p = td->td_proc;
  926         kp->ki_tdaddr = td;
  927         PROC_LOCK_ASSERT(p, MA_OWNED);
  928 
  929         thread_lock(td);
  930         if (td->td_wmesg != NULL)
  931                 strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
  932         else
  933                 bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg));
  934         strlcpy(kp->ki_ocomm, td->td_name, sizeof(kp->ki_ocomm));
  935         if (TD_ON_LOCK(td)) {
  936                 kp->ki_kiflag |= KI_LOCKBLOCK;
  937                 strlcpy(kp->ki_lockname, td->td_lockname,
  938                     sizeof(kp->ki_lockname));
  939         } else {
  940                 kp->ki_kiflag &= ~KI_LOCKBLOCK;
  941                 bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
  942         }
  943 
  944         if (p->p_state == PRS_NORMAL) { /* approximate. */
  945                 if (TD_ON_RUNQ(td) ||
  946                     TD_CAN_RUN(td) ||
  947                     TD_IS_RUNNING(td)) {
  948                         kp->ki_stat = SRUN;
  949                 } else if (P_SHOULDSTOP(p)) {
  950                         kp->ki_stat = SSTOP;
  951                 } else if (TD_IS_SLEEPING(td)) {
  952                         kp->ki_stat = SSLEEP;
  953                 } else if (TD_ON_LOCK(td)) {
  954                         kp->ki_stat = SLOCK;
  955                 } else {
  956                         kp->ki_stat = SWAIT;
  957                 }
  958         } else if (p->p_state == PRS_ZOMBIE) {
  959                 kp->ki_stat = SZOMB;
  960         } else {
  961                 kp->ki_stat = SIDL;
  962         }
  963 
  964         /* Things in the thread */
  965         kp->ki_wchan = td->td_wchan;
  966         kp->ki_pri.pri_level = td->td_priority;
  967         kp->ki_pri.pri_native = td->td_base_pri;
  968         kp->ki_lastcpu = td->td_lastcpu;
  969         kp->ki_oncpu = td->td_oncpu;
  970         kp->ki_tdflags = td->td_flags;
  971         kp->ki_tid = td->td_tid;
  972         kp->ki_numthreads = p->p_numthreads;
  973         kp->ki_pcb = td->td_pcb;
  974         kp->ki_kstack = (void *)td->td_kstack;
  975         kp->ki_slptime = (ticks - td->td_slptick) / hz;
  976         kp->ki_pri.pri_class = td->td_pri_class;
  977         kp->ki_pri.pri_user = td->td_user_pri;
  978 
  979         if (preferthread) {
  980                 kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime);
  981                 kp->ki_pctcpu = sched_pctcpu(td);
  982                 kp->ki_estcpu = td->td_estcpu;
  983                 kp->ki_cow = td->td_cow;
  984         }
  985 
  986         /* We can't get this anymore but ps etc never used it anyway. */
  987         kp->ki_rqindex = 0;
  988 
  989         if (preferthread)
  990                 kp->ki_siglist = td->td_siglist;
  991         kp->ki_sigmask = td->td_sigmask;
  992         thread_unlock(td);
  993 }
  994 
  995 /*
  996  * Fill in a kinfo_proc structure for the specified process.
  997  * Must be called with the target process locked.
  998  */
  999 void
 1000 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
 1001 {
 1002 
 1003         MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
 1004 
 1005         fill_kinfo_proc_only(p, kp);
 1006         fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
 1007         fill_kinfo_aggregate(p, kp);
 1008 }
 1009 
 1010 struct pstats *
 1011 pstats_alloc(void)
 1012 {
 1013 
 1014         return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
 1015 }
 1016 
 1017 /*
 1018  * Copy parts of p_stats; zero the rest of p_stats (statistics).
 1019  */
 1020 void
 1021 pstats_fork(struct pstats *src, struct pstats *dst)
 1022 {
 1023 
 1024         bzero(&dst->pstat_startzero,
 1025             __rangeof(struct pstats, pstat_startzero, pstat_endzero));
 1026         bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
 1027             __rangeof(struct pstats, pstat_startcopy, pstat_endcopy));
 1028 }
 1029 
 1030 void
 1031 pstats_free(struct pstats *ps)
 1032 {
 1033 
 1034         free(ps, M_SUBPROC);
 1035 }
 1036 
 1037 /*
 1038  * Locate a zombie process by number
 1039  */
 1040 struct proc *
 1041 zpfind(pid_t pid)
 1042 {
 1043         struct proc *p;
 1044 
 1045         sx_slock(&allproc_lock);
 1046         LIST_FOREACH(p, &zombproc, p_list)
 1047                 if (p->p_pid == pid) {
 1048                         PROC_LOCK(p);
 1049                         break;
 1050                 }
 1051         sx_sunlock(&allproc_lock);
 1052         return (p);
 1053 }
 1054 
 1055 #define KERN_PROC_ZOMBMASK      0x3
 1056 #define KERN_PROC_NOTHREADS     0x4
 1057 
 1058 #ifdef COMPAT_FREEBSD32
 1059 
 1060 /*
 1061  * This function is typically used to copy out the kernel address, so
 1062  * it can be replaced by assignment of zero.
 1063  */
 1064 static inline uint32_t
 1065 ptr32_trim(void *ptr)
 1066 {
 1067         uintptr_t uptr;
 1068 
 1069         uptr = (uintptr_t)ptr;
 1070         return ((uptr > UINT_MAX) ? 0 : uptr);
 1071 }
 1072 
 1073 #define PTRTRIM_CP(src,dst,fld) \
 1074         do { (dst).fld = ptr32_trim((src).fld); } while (0)
 1075 
 1076 static void
 1077 freebsd32_kinfo_proc_out(const struct kinfo_proc *ki, struct kinfo_proc32 *ki32)
 1078 {
 1079         int i;
 1080 
 1081         bzero(ki32, sizeof(struct kinfo_proc32));
 1082         ki32->ki_structsize = sizeof(struct kinfo_proc32);
 1083         CP(*ki, *ki32, ki_layout);
 1084         PTRTRIM_CP(*ki, *ki32, ki_args);
 1085         PTRTRIM_CP(*ki, *ki32, ki_paddr);
 1086         PTRTRIM_CP(*ki, *ki32, ki_addr);
 1087         PTRTRIM_CP(*ki, *ki32, ki_tracep);
 1088         PTRTRIM_CP(*ki, *ki32, ki_textvp);
 1089         PTRTRIM_CP(*ki, *ki32, ki_fd);
 1090         PTRTRIM_CP(*ki, *ki32, ki_vmspace);
 1091         PTRTRIM_CP(*ki, *ki32, ki_wchan);
 1092         CP(*ki, *ki32, ki_pid);
 1093         CP(*ki, *ki32, ki_ppid);
 1094         CP(*ki, *ki32, ki_pgid);
 1095         CP(*ki, *ki32, ki_tpgid);
 1096         CP(*ki, *ki32, ki_sid);
 1097         CP(*ki, *ki32, ki_tsid);
 1098         CP(*ki, *ki32, ki_jobc);
 1099         CP(*ki, *ki32, ki_tdev);
 1100         CP(*ki, *ki32, ki_siglist);
 1101         CP(*ki, *ki32, ki_sigmask);
 1102         CP(*ki, *ki32, ki_sigignore);
 1103         CP(*ki, *ki32, ki_sigcatch);
 1104         CP(*ki, *ki32, ki_uid);
 1105         CP(*ki, *ki32, ki_ruid);
 1106         CP(*ki, *ki32, ki_svuid);
 1107         CP(*ki, *ki32, ki_rgid);
 1108         CP(*ki, *ki32, ki_svgid);
 1109         CP(*ki, *ki32, ki_ngroups);
 1110         for (i = 0; i < KI_NGROUPS; i++)
 1111                 CP(*ki, *ki32, ki_groups[i]);
 1112         CP(*ki, *ki32, ki_size);
 1113         CP(*ki, *ki32, ki_rssize);
 1114         CP(*ki, *ki32, ki_swrss);
 1115         CP(*ki, *ki32, ki_tsize);
 1116         CP(*ki, *ki32, ki_dsize);
 1117         CP(*ki, *ki32, ki_ssize);
 1118         CP(*ki, *ki32, ki_xstat);
 1119         CP(*ki, *ki32, ki_acflag);
 1120         CP(*ki, *ki32, ki_pctcpu);
 1121         CP(*ki, *ki32, ki_estcpu);
 1122         CP(*ki, *ki32, ki_slptime);
 1123         CP(*ki, *ki32, ki_swtime);
 1124         CP(*ki, *ki32, ki_cow);
 1125         CP(*ki, *ki32, ki_runtime);
 1126         TV_CP(*ki, *ki32, ki_start);
 1127         TV_CP(*ki, *ki32, ki_childtime);
 1128         CP(*ki, *ki32, ki_flag);
 1129         CP(*ki, *ki32, ki_kiflag);
 1130         CP(*ki, *ki32, ki_traceflag);
 1131         CP(*ki, *ki32, ki_stat);
 1132         CP(*ki, *ki32, ki_nice);
 1133         CP(*ki, *ki32, ki_lock);
 1134         CP(*ki, *ki32, ki_rqindex);
 1135         CP(*ki, *ki32, ki_oncpu);
 1136         CP(*ki, *ki32, ki_lastcpu);
 1137         bcopy(ki->ki_ocomm, ki32->ki_ocomm, OCOMMLEN + 1);
 1138         bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1);
 1139         bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1);
 1140         bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1);
 1141         bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1);
 1142         bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1);
 1143         CP(*ki, *ki32, ki_cr_flags);
 1144         CP(*ki, *ki32, ki_jid);
 1145         CP(*ki, *ki32, ki_numthreads);
 1146         CP(*ki, *ki32, ki_tid);
 1147         CP(*ki, *ki32, ki_pri);
 1148         freebsd32_rusage_out(&ki->ki_rusage, &ki32->ki_rusage);
 1149         freebsd32_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch);
 1150         PTRTRIM_CP(*ki, *ki32, ki_pcb);
 1151         PTRTRIM_CP(*ki, *ki32, ki_kstack);
 1152         PTRTRIM_CP(*ki, *ki32, ki_udata);
 1153         CP(*ki, *ki32, ki_sflag);
 1154         CP(*ki, *ki32, ki_tdflags);
 1155 }
 1156 
 1157 static int
 1158 sysctl_out_proc_copyout(struct kinfo_proc *ki, struct sysctl_req *req)
 1159 {
 1160         struct kinfo_proc32 ki32;
 1161         int error;
 1162 
 1163         if (req->flags & SCTL_MASK32) {
 1164                 freebsd32_kinfo_proc_out(ki, &ki32);
 1165                 error = SYSCTL_OUT(req, &ki32, sizeof(struct kinfo_proc32));
 1166         } else
 1167                 error = SYSCTL_OUT(req, ki, sizeof(struct kinfo_proc));
 1168         return (error);
 1169 }
 1170 #else
 1171 static int
 1172 sysctl_out_proc_copyout(struct kinfo_proc *ki, struct sysctl_req *req)
 1173 {
 1174 
 1175         return (SYSCTL_OUT(req, ki, sizeof(struct kinfo_proc)));
 1176 }
 1177 #endif
 1178 
 1179 /*
 1180  * Must be called with the process locked and will return with it unlocked.
 1181  */
 1182 static int
 1183 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
 1184 {
 1185         struct thread *td;
 1186         struct kinfo_proc kinfo_proc;
 1187         int error = 0;
 1188         struct proc *np;
 1189         pid_t pid = p->p_pid;
 1190 
 1191         PROC_LOCK_ASSERT(p, MA_OWNED);
 1192         MPASS(FIRST_THREAD_IN_PROC(p) != NULL);
 1193 
 1194         fill_kinfo_proc(p, &kinfo_proc);
 1195         if (flags & KERN_PROC_NOTHREADS)
 1196                 error = sysctl_out_proc_copyout(&kinfo_proc, req);
 1197         else {
 1198                 FOREACH_THREAD_IN_PROC(p, td) {
 1199                         fill_kinfo_thread(td, &kinfo_proc, 1);
 1200                         error = sysctl_out_proc_copyout(&kinfo_proc, req);
 1201                         if (error)
 1202                                 break;
 1203                 }
 1204         }
 1205         PROC_UNLOCK(p);
 1206         if (error)
 1207                 return (error);
 1208         if (flags & KERN_PROC_ZOMBMASK)
 1209                 np = zpfind(pid);
 1210         else {
 1211                 if (pid == 0)
 1212                         return (0);
 1213                 np = pfind(pid);
 1214         }
 1215         if (np == NULL)
 1216                 return (ESRCH);
 1217         if (np != p) {
 1218                 PROC_UNLOCK(np);
 1219                 return (ESRCH);
 1220         }
 1221         PROC_UNLOCK(np);
 1222         return (0);
 1223 }
 1224 
 1225 static int
 1226 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
 1227 {
 1228         int *name = (int*) arg1;
 1229         u_int namelen = arg2;
 1230         struct proc *p;
 1231         int flags, doingzomb, oid_number;
 1232         int error = 0;
 1233 
 1234         oid_number = oidp->oid_number;
 1235         if (oid_number != KERN_PROC_ALL &&
 1236             (oid_number & KERN_PROC_INC_THREAD) == 0)
 1237                 flags = KERN_PROC_NOTHREADS;
 1238         else {
 1239                 flags = 0;
 1240                 oid_number &= ~KERN_PROC_INC_THREAD;
 1241         }
 1242         if (oid_number == KERN_PROC_PID) {
 1243                 if (namelen != 1) 
 1244                         return (EINVAL);
 1245                 error = sysctl_wire_old_buffer(req, 0);
 1246                 if (error)
 1247                         return (error);         
 1248                 error = pget((pid_t)name[0], PGET_CANSEE, &p);
 1249                 if (error != 0)
 1250                         return (error);
 1251                 error = sysctl_out_proc(p, req, flags);
 1252                 return (error);
 1253         }
 1254 
 1255         switch (oid_number) {
 1256         case KERN_PROC_ALL:
 1257                 if (namelen != 0)
 1258                         return (EINVAL);
 1259                 break;
 1260         case KERN_PROC_PROC:
 1261                 if (namelen != 0 && namelen != 1)
 1262                         return (EINVAL);
 1263                 break;
 1264         default:
 1265                 if (namelen != 1)
 1266                         return (EINVAL);
 1267                 break;
 1268         }
 1269         
 1270         if (!req->oldptr) {
 1271                 /* overestimate by 5 procs */
 1272                 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
 1273                 if (error)
 1274                         return (error);
 1275         }
 1276         error = sysctl_wire_old_buffer(req, 0);
 1277         if (error != 0)
 1278                 return (error);
 1279         sx_slock(&allproc_lock);
 1280         for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
 1281                 if (!doingzomb)
 1282                         p = LIST_FIRST(&allproc);
 1283                 else
 1284                         p = LIST_FIRST(&zombproc);
 1285                 for (; p != 0; p = LIST_NEXT(p, p_list)) {
 1286                         /*
 1287                          * Skip embryonic processes.
 1288                          */
 1289                         PROC_LOCK(p);
 1290                         if (p->p_state == PRS_NEW) {
 1291                                 PROC_UNLOCK(p);
 1292                                 continue;
 1293                         }
 1294                         KASSERT(p->p_ucred != NULL,
 1295                             ("process credential is NULL for non-NEW proc"));
 1296                         /*
 1297                          * Show a user only appropriate processes.
 1298                          */
 1299                         if (p_cansee(curthread, p)) {
 1300                                 PROC_UNLOCK(p);
 1301                                 continue;
 1302                         }
 1303                         /*
 1304                          * TODO - make more efficient (see notes below).
 1305                          * do by session.
 1306                          */
 1307                         switch (oid_number) {
 1308 
 1309                         case KERN_PROC_GID:
 1310                                 if (p->p_ucred->cr_gid != (gid_t)name[0]) {
 1311                                         PROC_UNLOCK(p);
 1312                                         continue;
 1313                                 }
 1314                                 break;
 1315 
 1316                         case KERN_PROC_PGRP:
 1317                                 /* could do this by traversing pgrp */
 1318                                 if (p->p_pgrp == NULL ||
 1319                                     p->p_pgrp->pg_id != (pid_t)name[0]) {
 1320                                         PROC_UNLOCK(p);
 1321                                         continue;
 1322                                 }
 1323                                 break;
 1324 
 1325                         case KERN_PROC_RGID:
 1326                                 if (p->p_ucred->cr_rgid != (gid_t)name[0]) {
 1327                                         PROC_UNLOCK(p);
 1328                                         continue;
 1329                                 }
 1330                                 break;
 1331 
 1332                         case KERN_PROC_SESSION:
 1333                                 if (p->p_session == NULL ||
 1334                                     p->p_session->s_sid != (pid_t)name[0]) {
 1335                                         PROC_UNLOCK(p);
 1336                                         continue;
 1337                                 }
 1338                                 break;
 1339 
 1340                         case KERN_PROC_TTY:
 1341                                 if ((p->p_flag & P_CONTROLT) == 0 ||
 1342                                     p->p_session == NULL) {
 1343                                         PROC_UNLOCK(p);
 1344                                         continue;
 1345                                 }
 1346                                 /* XXX proctree_lock */
 1347                                 SESS_LOCK(p->p_session);
 1348                                 if (p->p_session->s_ttyp == NULL ||
 1349                                     tty_udev(p->p_session->s_ttyp) != 
 1350                                     (dev_t)name[0]) {
 1351                                         SESS_UNLOCK(p->p_session);
 1352                                         PROC_UNLOCK(p);
 1353                                         continue;
 1354                                 }
 1355                                 SESS_UNLOCK(p->p_session);
 1356                                 break;
 1357 
 1358                         case KERN_PROC_UID:
 1359                                 if (p->p_ucred->cr_uid != (uid_t)name[0]) {
 1360                                         PROC_UNLOCK(p);
 1361                                         continue;
 1362                                 }
 1363                                 break;
 1364 
 1365                         case KERN_PROC_RUID:
 1366                                 if (p->p_ucred->cr_ruid != (uid_t)name[0]) {
 1367                                         PROC_UNLOCK(p);
 1368                                         continue;
 1369                                 }
 1370                                 break;
 1371 
 1372                         case KERN_PROC_PROC:
 1373                                 break;
 1374 
 1375                         default:
 1376                                 break;
 1377 
 1378                         }
 1379 
 1380                         error = sysctl_out_proc(p, req, flags | doingzomb);
 1381                         if (error) {
 1382                                 sx_sunlock(&allproc_lock);
 1383                                 return (error);
 1384                         }
 1385                 }
 1386         }
 1387         sx_sunlock(&allproc_lock);
 1388         return (0);
 1389 }
 1390 
 1391 struct pargs *
 1392 pargs_alloc(int len)
 1393 {
 1394         struct pargs *pa;
 1395 
 1396         pa = malloc(sizeof(struct pargs) + len, M_PARGS,
 1397                 M_WAITOK);
 1398         refcount_init(&pa->ar_ref, 1);
 1399         pa->ar_length = len;
 1400         return (pa);
 1401 }
 1402 
 1403 static void
 1404 pargs_free(struct pargs *pa)
 1405 {
 1406 
 1407         free(pa, M_PARGS);
 1408 }
 1409 
 1410 void
 1411 pargs_hold(struct pargs *pa)
 1412 {
 1413 
 1414         if (pa == NULL)
 1415                 return;
 1416         refcount_acquire(&pa->ar_ref);
 1417 }
 1418 
 1419 void
 1420 pargs_drop(struct pargs *pa)
 1421 {
 1422 
 1423         if (pa == NULL)
 1424                 return;
 1425         if (refcount_release(&pa->ar_ref))
 1426                 pargs_free(pa);
 1427 }
 1428 
 1429 /*
 1430  * This sysctl allows a process to retrieve the argument list or process
 1431  * title for another process without groping around in the address space
 1432  * of the other process.  It also allow a process to set its own "process 
 1433  * title to a string of its own choice.
 1434  */
 1435 static int
 1436 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
 1437 {
 1438         int *name = (int*) arg1;
 1439         u_int namelen = arg2;
 1440         struct pargs *newpa, *pa;
 1441         struct proc *p;
 1442         int flags, error = 0;
 1443 
 1444         if (namelen != 1) 
 1445                 return (EINVAL);
 1446 
 1447         flags = PGET_CANSEE;
 1448         if (req->newptr != NULL)
 1449                 flags |= PGET_ISCURRENT;
 1450         error = pget((pid_t)name[0], flags, &p);
 1451         if (error)
 1452                 return (error);
 1453 
 1454         pa = p->p_args;
 1455         pargs_hold(pa);
 1456         PROC_UNLOCK(p);
 1457         if (pa != NULL)
 1458                 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
 1459         pargs_drop(pa);
 1460         if (error != 0 || req->newptr == NULL)
 1461                 return (error);
 1462 
 1463         if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
 1464                 return (ENOMEM);
 1465         newpa = pargs_alloc(req->newlen);
 1466         error = SYSCTL_IN(req, newpa->ar_args, req->newlen);
 1467         if (error != 0) {
 1468                 pargs_free(newpa);
 1469                 return (error);
 1470         }
 1471         PROC_LOCK(p);
 1472         pa = p->p_args;
 1473         p->p_args = newpa;
 1474         PROC_UNLOCK(p);
 1475         pargs_drop(pa);
 1476         return (0);
 1477 }
 1478 
 1479 /*
 1480  * This sysctl allows a process to retrieve the path of the executable for
 1481  * itself or another process.
 1482  */
 1483 static int
 1484 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
 1485 {
 1486         pid_t *pidp = (pid_t *)arg1;
 1487         unsigned int arglen = arg2;
 1488         struct proc *p;
 1489         struct vnode *vp;
 1490         char *retbuf, *freebuf;
 1491         int error, vfslocked;
 1492 
 1493         if (arglen != 1)
 1494                 return (EINVAL);
 1495         if (*pidp == -1) {      /* -1 means this process */
 1496                 p = req->td->td_proc;
 1497         } else {
 1498                 error = pget(*pidp, PGET_CANSEE, &p);
 1499                 if (error != 0)
 1500                         return (error);
 1501         }
 1502 
 1503         vp = p->p_textvp;
 1504         if (vp == NULL) {
 1505                 if (*pidp != -1)
 1506                         PROC_UNLOCK(p);
 1507                 return (0);
 1508         }
 1509         vref(vp);
 1510         if (*pidp != -1)
 1511                 PROC_UNLOCK(p);
 1512         error = vn_fullpath(req->td, vp, &retbuf, &freebuf);
 1513         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1514         vrele(vp);
 1515         VFS_UNLOCK_GIANT(vfslocked);
 1516         if (error)
 1517                 return (error);
 1518         error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
 1519         free(freebuf, M_TEMP);
 1520         return (error);
 1521 }
 1522 
 1523 static int
 1524 sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
 1525 {
 1526         struct proc *p;
 1527         char *sv_name;
 1528         int *name;
 1529         int namelen;
 1530         int error;
 1531 
 1532         namelen = arg2;
 1533         if (namelen != 1) 
 1534                 return (EINVAL);
 1535 
 1536         name = (int *)arg1;
 1537         error = pget((pid_t)name[0], PGET_CANSEE, &p);
 1538         if (error != 0)
 1539                 return (error);
 1540         sv_name = p->p_sysent->sv_name;
 1541         PROC_UNLOCK(p);
 1542         return (sysctl_handle_string(oidp, sv_name, 0, req));
 1543 }
 1544 
 1545 #ifdef KINFO_OVMENTRY_SIZE
 1546 CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE);
 1547 #endif
 1548 
 1549 #ifdef COMPAT_FREEBSD7
 1550 static int
 1551 sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
 1552 {
 1553         vm_map_entry_t entry, tmp_entry;
 1554         unsigned int last_timestamp;
 1555         char *fullpath, *freepath;
 1556         struct kinfo_ovmentry *kve;
 1557         struct vattr va;
 1558         struct ucred *cred;
 1559         int error, *name;
 1560         struct vnode *vp;
 1561         struct proc *p;
 1562         vm_map_t map;
 1563         struct vmspace *vm;
 1564 
 1565         name = (int *)arg1;
 1566         error = pget((pid_t)name[0], PGET_WANTREAD, &p);
 1567         if (error != 0)
 1568                 return (error);
 1569         vm = vmspace_acquire_ref(p);
 1570         if (vm == NULL) {
 1571                 PRELE(p);
 1572                 return (ESRCH);
 1573         }
 1574         kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
 1575 
 1576         map = &p->p_vmspace->vm_map;    /* XXXRW: More locking required? */
 1577         vm_map_lock_read(map);
 1578         for (entry = map->header.next; entry != &map->header;
 1579             entry = entry->next) {
 1580                 vm_object_t obj, tobj, lobj;
 1581                 vm_offset_t addr;
 1582                 int vfslocked;
 1583 
 1584                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 1585                         continue;
 1586 
 1587                 bzero(kve, sizeof(*kve));
 1588                 kve->kve_structsize = sizeof(*kve);
 1589 
 1590                 kve->kve_private_resident = 0;
 1591                 obj = entry->object.vm_object;
 1592                 if (obj != NULL) {
 1593                         VM_OBJECT_LOCK(obj);
 1594                         if (obj->shadow_count == 1)
 1595                                 kve->kve_private_resident =
 1596                                     obj->resident_page_count;
 1597                 }
 1598                 kve->kve_resident = 0;
 1599                 addr = entry->start;
 1600                 while (addr < entry->end) {
 1601                         if (pmap_extract(map->pmap, addr))
 1602                                 kve->kve_resident++;
 1603                         addr += PAGE_SIZE;
 1604                 }
 1605 
 1606                 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
 1607                         if (tobj != obj)
 1608                                 VM_OBJECT_LOCK(tobj);
 1609                         if (lobj != obj)
 1610                                 VM_OBJECT_UNLOCK(lobj);
 1611                         lobj = tobj;
 1612                 }
 1613 
 1614                 kve->kve_start = (void*)entry->start;
 1615                 kve->kve_end = (void*)entry->end;
 1616                 kve->kve_offset = (off_t)entry->offset;
 1617 
 1618                 if (entry->protection & VM_PROT_READ)
 1619                         kve->kve_protection |= KVME_PROT_READ;
 1620                 if (entry->protection & VM_PROT_WRITE)
 1621                         kve->kve_protection |= KVME_PROT_WRITE;
 1622                 if (entry->protection & VM_PROT_EXECUTE)
 1623                         kve->kve_protection |= KVME_PROT_EXEC;
 1624 
 1625                 if (entry->eflags & MAP_ENTRY_COW)
 1626                         kve->kve_flags |= KVME_FLAG_COW;
 1627                 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
 1628                         kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
 1629                 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
 1630                         kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
 1631 
 1632                 last_timestamp = map->timestamp;
 1633                 vm_map_unlock_read(map);
 1634 
 1635                 kve->kve_fileid = 0;
 1636                 kve->kve_fsid = 0;
 1637                 freepath = NULL;
 1638                 fullpath = "";
 1639                 if (lobj) {
 1640                         vp = NULL;
 1641                         switch (lobj->type) {
 1642                         case OBJT_DEFAULT:
 1643                                 kve->kve_type = KVME_TYPE_DEFAULT;
 1644                                 break;
 1645                         case OBJT_VNODE:
 1646                                 kve->kve_type = KVME_TYPE_VNODE;
 1647                                 vp = lobj->handle;
 1648                                 vref(vp);
 1649                                 break;
 1650                         case OBJT_SWAP:
 1651                                 kve->kve_type = KVME_TYPE_SWAP;
 1652                                 break;
 1653                         case OBJT_DEVICE:
 1654                                 kve->kve_type = KVME_TYPE_DEVICE;
 1655                                 break;
 1656                         case OBJT_PHYS:
 1657                                 kve->kve_type = KVME_TYPE_PHYS;
 1658                                 break;
 1659                         case OBJT_DEAD:
 1660                                 kve->kve_type = KVME_TYPE_DEAD;
 1661                                 break;
 1662                         case OBJT_SG:
 1663                                 kve->kve_type = KVME_TYPE_SG;
 1664                                 break;
 1665                         default:
 1666                                 kve->kve_type = KVME_TYPE_UNKNOWN;
 1667                                 break;
 1668                         }
 1669                         if (lobj != obj)
 1670                                 VM_OBJECT_UNLOCK(lobj);
 1671 
 1672                         kve->kve_ref_count = obj->ref_count;
 1673                         kve->kve_shadow_count = obj->shadow_count;
 1674                         VM_OBJECT_UNLOCK(obj);
 1675                         if (vp != NULL) {
 1676                                 vn_fullpath(curthread, vp, &fullpath,
 1677                                     &freepath);
 1678                                 cred = curthread->td_ucred;
 1679                                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1680                                 vn_lock(vp, LK_SHARED | LK_RETRY);
 1681                                 if (VOP_GETATTR(vp, &va, cred) == 0) {
 1682                                         kve->kve_fileid = va.va_fileid;
 1683                                         kve->kve_fsid = va.va_fsid;
 1684                                 }
 1685                                 vput(vp);
 1686                                 VFS_UNLOCK_GIANT(vfslocked);
 1687                         }
 1688                 } else {
 1689                         kve->kve_type = KVME_TYPE_NONE;
 1690                         kve->kve_ref_count = 0;
 1691                         kve->kve_shadow_count = 0;
 1692                 }
 1693 
 1694                 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
 1695                 if (freepath != NULL)
 1696                         free(freepath, M_TEMP);
 1697 
 1698                 error = SYSCTL_OUT(req, kve, sizeof(*kve));
 1699                 vm_map_lock_read(map);
 1700                 if (error)
 1701                         break;
 1702                 if (last_timestamp != map->timestamp) {
 1703                         vm_map_lookup_entry(map, addr - 1, &tmp_entry);
 1704                         entry = tmp_entry;
 1705                 }
 1706         }
 1707         vm_map_unlock_read(map);
 1708         vmspace_free(vm);
 1709         PRELE(p);
 1710         free(kve, M_TEMP);
 1711         return (error);
 1712 }
 1713 #endif  /* COMPAT_FREEBSD7 */
 1714 
 1715 #ifdef KINFO_VMENTRY_SIZE
 1716 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
 1717 #endif
 1718 
 1719 static int
 1720 sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
 1721 {
 1722         vm_map_entry_t entry, tmp_entry;
 1723         unsigned int last_timestamp;
 1724         char *fullpath, *freepath;
 1725         struct kinfo_vmentry *kve;
 1726         struct vattr va;
 1727         struct ucred *cred;
 1728         int error, *name;
 1729         struct vnode *vp;
 1730         struct proc *p;
 1731         struct vmspace *vm;
 1732         vm_map_t map;
 1733 
 1734         name = (int *)arg1;
 1735         error = pget((pid_t)name[0], PGET_WANTREAD, &p);
 1736         if (error != 0)
 1737                 return (error);
 1738         vm = vmspace_acquire_ref(p);
 1739         if (vm == NULL) {
 1740                 PRELE(p);
 1741                 return (ESRCH);
 1742         }
 1743         kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
 1744 
 1745         map = &vm->vm_map;      /* XXXRW: More locking required? */
 1746         vm_map_lock_read(map);
 1747         for (entry = map->header.next; entry != &map->header;
 1748             entry = entry->next) {
 1749                 vm_object_t obj, tobj, lobj;
 1750                 vm_offset_t addr;
 1751                 int vfslocked, mincoreinfo;
 1752 
 1753                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 1754                         continue;
 1755 
 1756                 bzero(kve, sizeof(*kve));
 1757 
 1758                 kve->kve_private_resident = 0;
 1759                 obj = entry->object.vm_object;
 1760                 if (obj != NULL) {
 1761                         VM_OBJECT_LOCK(obj);
 1762                         if (obj->shadow_count == 1)
 1763                                 kve->kve_private_resident =
 1764                                     obj->resident_page_count;
 1765                 }
 1766                 kve->kve_resident = 0;
 1767                 addr = entry->start;
 1768                 while (addr < entry->end) {
 1769                         mincoreinfo = pmap_mincore(map->pmap, addr);
 1770                         if (mincoreinfo & MINCORE_INCORE)
 1771                                 kve->kve_resident++;
 1772                         if (mincoreinfo & MINCORE_SUPER)
 1773                                 kve->kve_flags |= KVME_FLAG_SUPER;
 1774                         addr += PAGE_SIZE;
 1775                 }
 1776 
 1777                 for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
 1778                         if (tobj != obj)
 1779                                 VM_OBJECT_LOCK(tobj);
 1780                         if (lobj != obj)
 1781                                 VM_OBJECT_UNLOCK(lobj);
 1782                         lobj = tobj;
 1783                 }
 1784 
 1785                 kve->kve_start = entry->start;
 1786                 kve->kve_end = entry->end;
 1787                 kve->kve_offset = entry->offset;
 1788 
 1789                 if (entry->protection & VM_PROT_READ)
 1790                         kve->kve_protection |= KVME_PROT_READ;
 1791                 if (entry->protection & VM_PROT_WRITE)
 1792                         kve->kve_protection |= KVME_PROT_WRITE;
 1793                 if (entry->protection & VM_PROT_EXECUTE)
 1794                         kve->kve_protection |= KVME_PROT_EXEC;
 1795 
 1796                 if (entry->eflags & MAP_ENTRY_COW)
 1797                         kve->kve_flags |= KVME_FLAG_COW;
 1798                 if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
 1799                         kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
 1800                 if (entry->eflags & MAP_ENTRY_NOCOREDUMP)
 1801                         kve->kve_flags |= KVME_FLAG_NOCOREDUMP;
 1802 
 1803                 last_timestamp = map->timestamp;
 1804                 vm_map_unlock_read(map);
 1805 
 1806                 kve->kve_fileid = 0;
 1807                 kve->kve_fsid = 0;
 1808                 freepath = NULL;
 1809                 fullpath = "";
 1810                 if (lobj) {
 1811                         vp = NULL;
 1812                         switch (lobj->type) {
 1813                         case OBJT_DEFAULT:
 1814                                 kve->kve_type = KVME_TYPE_DEFAULT;
 1815                                 break;
 1816                         case OBJT_VNODE:
 1817                                 kve->kve_type = KVME_TYPE_VNODE;
 1818                                 vp = lobj->handle;
 1819                                 vref(vp);
 1820                                 break;
 1821                         case OBJT_SWAP:
 1822                                 kve->kve_type = KVME_TYPE_SWAP;
 1823                                 break;
 1824                         case OBJT_DEVICE:
 1825                                 kve->kve_type = KVME_TYPE_DEVICE;
 1826                                 break;
 1827                         case OBJT_PHYS:
 1828                                 kve->kve_type = KVME_TYPE_PHYS;
 1829                                 break;
 1830                         case OBJT_DEAD:
 1831                                 kve->kve_type = KVME_TYPE_DEAD;
 1832                                 break;
 1833                         case OBJT_SG:
 1834                                 kve->kve_type = KVME_TYPE_SG;
 1835                                 break;
 1836                         default:
 1837                                 kve->kve_type = KVME_TYPE_UNKNOWN;
 1838                                 break;
 1839                         }
 1840                         if (lobj != obj)
 1841                                 VM_OBJECT_UNLOCK(lobj);
 1842 
 1843                         kve->kve_ref_count = obj->ref_count;
 1844                         kve->kve_shadow_count = obj->shadow_count;
 1845                         VM_OBJECT_UNLOCK(obj);
 1846                         if (vp != NULL) {
 1847                                 vn_fullpath(curthread, vp, &fullpath,
 1848                                     &freepath);
 1849                                 cred = curthread->td_ucred;
 1850                                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1851                                 vn_lock(vp, LK_SHARED | LK_RETRY);
 1852                                 if (VOP_GETATTR(vp, &va, cred) == 0) {
 1853                                         kve->kve_fileid = va.va_fileid;
 1854                                         kve->kve_fsid = va.va_fsid;
 1855                                 }
 1856                                 vput(vp);
 1857                                 VFS_UNLOCK_GIANT(vfslocked);
 1858                         }
 1859                 } else {
 1860                         kve->kve_type = KVME_TYPE_NONE;
 1861                         kve->kve_ref_count = 0;
 1862                         kve->kve_shadow_count = 0;
 1863                 }
 1864 
 1865                 strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
 1866                 if (freepath != NULL)
 1867                         free(freepath, M_TEMP);
 1868 
 1869                 /* Pack record size down */
 1870                 kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) +
 1871                     strlen(kve->kve_path) + 1;
 1872                 kve->kve_structsize = roundup(kve->kve_structsize,
 1873                     sizeof(uint64_t));
 1874                 error = SYSCTL_OUT(req, kve, kve->kve_structsize);
 1875                 vm_map_lock_read(map);
 1876                 if (error)
 1877                         break;
 1878                 if (last_timestamp != map->timestamp) {
 1879                         vm_map_lookup_entry(map, addr - 1, &tmp_entry);
 1880                         entry = tmp_entry;
 1881                 }
 1882         }
 1883         vm_map_unlock_read(map);
 1884         vmspace_free(vm);
 1885         PRELE(p);
 1886         free(kve, M_TEMP);
 1887         return (error);
 1888 }
 1889 
 1890 #if defined(STACK) || defined(DDB)
 1891 static int
 1892 sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS)
 1893 {
 1894         struct kinfo_kstack *kkstp;
 1895         int error, i, *name, numthreads;
 1896         lwpid_t *lwpidarray;
 1897         struct thread *td;
 1898         struct stack *st;
 1899         struct sbuf sb;
 1900         struct proc *p;
 1901 
 1902         name = (int *)arg1;
 1903         error = pget((pid_t)name[0], PGET_NOTINEXEC | PGET_WANTREAD, &p);
 1904         if (error != 0)
 1905                 return (error);
 1906 
 1907         kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK);
 1908         st = stack_create();
 1909 
 1910         lwpidarray = NULL;
 1911         numthreads = 0;
 1912         PROC_LOCK(p);
 1913 repeat:
 1914         if (numthreads < p->p_numthreads) {
 1915                 if (lwpidarray != NULL) {
 1916                         free(lwpidarray, M_TEMP);
 1917                         lwpidarray = NULL;
 1918                 }
 1919                 numthreads = p->p_numthreads;
 1920                 PROC_UNLOCK(p);
 1921                 lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP,
 1922                     M_WAITOK | M_ZERO);
 1923                 PROC_LOCK(p);
 1924                 goto repeat;
 1925         }
 1926         i = 0;
 1927 
 1928         /*
 1929          * XXXRW: During the below loop, execve(2) and countless other sorts
 1930          * of changes could have taken place.  Should we check to see if the
 1931          * vmspace has been replaced, or the like, in order to prevent
 1932          * giving a snapshot that spans, say, execve(2), with some threads
 1933          * before and some after?  Among other things, the credentials could
 1934          * have changed, in which case the right to extract debug info might
 1935          * no longer be assured.
 1936          */
 1937         FOREACH_THREAD_IN_PROC(p, td) {
 1938                 KASSERT(i < numthreads,
 1939                     ("sysctl_kern_proc_kstack: numthreads"));
 1940                 lwpidarray[i] = td->td_tid;
 1941                 i++;
 1942         }
 1943         numthreads = i;
 1944         for (i = 0; i < numthreads; i++) {
 1945                 td = thread_find(p, lwpidarray[i]);
 1946                 if (td == NULL) {
 1947                         continue;
 1948                 }
 1949                 bzero(kkstp, sizeof(*kkstp));
 1950                 (void)sbuf_new(&sb, kkstp->kkst_trace,
 1951                     sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN);
 1952                 thread_lock(td);
 1953                 kkstp->kkst_tid = td->td_tid;
 1954                 if (TD_IS_SWAPPED(td))
 1955                         kkstp->kkst_state = KKST_STATE_SWAPPED;
 1956                 else if (TD_IS_RUNNING(td))
 1957                         kkstp->kkst_state = KKST_STATE_RUNNING;
 1958                 else {
 1959                         kkstp->kkst_state = KKST_STATE_STACKOK;
 1960                         stack_save_td(st, td);
 1961                 }
 1962                 thread_unlock(td);
 1963                 PROC_UNLOCK(p);
 1964                 stack_sbuf_print(&sb, st);
 1965                 sbuf_finish(&sb);
 1966                 sbuf_delete(&sb);
 1967                 error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp));
 1968                 PROC_LOCK(p);
 1969                 if (error)
 1970                         break;
 1971         }
 1972         _PRELE(p);
 1973         PROC_UNLOCK(p);
 1974         if (lwpidarray != NULL)
 1975                 free(lwpidarray, M_TEMP);
 1976         stack_destroy(st);
 1977         free(kkstp, M_TEMP);
 1978         return (error);
 1979 }
 1980 #endif
 1981 
 1982 /*
 1983  * This sysctl allows a process to retrieve the full list of groups from
 1984  * itself or another process.
 1985  */
 1986 static int
 1987 sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS)
 1988 {
 1989         pid_t *pidp = (pid_t *)arg1;
 1990         unsigned int arglen = arg2;
 1991         struct proc *p;
 1992         struct ucred *cred;
 1993         int error;
 1994 
 1995         if (arglen != 1)
 1996                 return (EINVAL);
 1997         if (*pidp == -1) {      /* -1 means this process */
 1998                 p = req->td->td_proc;
 1999         } else {
 2000                 error = pget(*pidp, PGET_CANSEE, &p);
 2001                 if (error != 0)
 2002                         return (error);
 2003         }
 2004 
 2005         cred = crhold(p->p_ucred);
 2006         if (*pidp != -1)
 2007                 PROC_UNLOCK(p);
 2008 
 2009         error = SYSCTL_OUT(req, cred->cr_groups,
 2010             cred->cr_ngroups * sizeof(gid_t));
 2011         crfree(cred);
 2012         return (error);
 2013 }
 2014 
 2015 /*
 2016  * This sysctl allows a process to set and retrieve binary osreldate of
 2017  * another process.
 2018  */
 2019 static int
 2020 sysctl_kern_proc_osrel(SYSCTL_HANDLER_ARGS)
 2021 {
 2022         int *name = (int *)arg1;
 2023         u_int namelen = arg2;
 2024         struct proc *p;
 2025         int flags, error, osrel;
 2026 
 2027         if (namelen != 1)
 2028                 return (EINVAL);
 2029 
 2030         if (req->newptr != NULL && req->newlen != sizeof(osrel))
 2031                 return (EINVAL);
 2032 
 2033         flags = PGET_HOLD | PGET_NOTWEXIT;
 2034         if (req->newptr != NULL)
 2035                 flags |= PGET_CANDEBUG;
 2036         else
 2037                 flags |= PGET_CANSEE;
 2038         error = pget((pid_t)name[0], flags, &p);
 2039         if (error != 0)
 2040                 return (error);
 2041 
 2042         error = SYSCTL_OUT(req, &p->p_osrel, sizeof(p->p_osrel));
 2043         if (error != 0)
 2044                 goto errout;
 2045 
 2046         if (req->newptr != NULL) {
 2047                 error = SYSCTL_IN(req, &osrel, sizeof(osrel));
 2048                 if (error != 0)
 2049                         goto errout;
 2050                 if (osrel < 0) {
 2051                         error = EINVAL;
 2052                         goto errout;
 2053                 }
 2054                 p->p_osrel = osrel;
 2055         }
 2056 errout:
 2057         PRELE(p);
 2058         return (error);
 2059 }
 2060 
 2061 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
 2062 
 2063 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT|
 2064         CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc",
 2065         "Return entire process table");
 2066 
 2067 static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2068         sysctl_kern_proc, "Process table");
 2069 
 2070 static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2071         sysctl_kern_proc, "Process table");
 2072 
 2073 static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2074         sysctl_kern_proc, "Process table");
 2075 
 2076 static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD |
 2077         CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2078 
 2079 static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, 
 2080         sysctl_kern_proc, "Process table");
 2081 
 2082 static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, 
 2083         sysctl_kern_proc, "Process table");
 2084 
 2085 static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2086         sysctl_kern_proc, "Process table");
 2087 
 2088 static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2089         sysctl_kern_proc, "Process table");
 2090 
 2091 static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2092         sysctl_kern_proc, "Return process table, no threads");
 2093 
 2094 static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
 2095         CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE,
 2096         sysctl_kern_proc_args, "Process argument list");
 2097 
 2098 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD |
 2099         CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path");
 2100 
 2101 static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD |
 2102         CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name,
 2103         "Process syscall vector name (ABI type)");
 2104 
 2105 static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td,
 2106         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2107 
 2108 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td,
 2109         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2110 
 2111 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td,
 2112         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2113 
 2114 static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD),
 2115         sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2116 
 2117 static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td,
 2118         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2119 
 2120 static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td,
 2121         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2122 
 2123 static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td,
 2124         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2125 
 2126 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
 2127         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table");
 2128 
 2129 static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
 2130         CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc,
 2131         "Return process table, no threads");
 2132 
 2133 #ifdef COMPAT_FREEBSD7
 2134 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD |
 2135         CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries");
 2136 #endif
 2137 
 2138 static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD |
 2139         CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries");
 2140 
 2141 #if defined(STACK) || defined(DDB)
 2142 static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD |
 2143         CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks");
 2144 #endif
 2145 
 2146 static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD |
 2147         CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups");
 2148 
 2149 static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
 2150         CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_osrel,
 2151         "Process binary osreldate");

Cache object: 902bb339a38831ac48898477b9c7121f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.