FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_proc.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/vnode.h>
37 #include <sys/jail.h>
38 #include <sys/filedesc.h>
39 #include <sys/tty.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <vm/vm.h>
45 #include <sys/lock.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <sys/user.h>
49 #include <machine/smp.h>
50
51 #include <sys/refcount.h>
52 #include <sys/spinlock2.h>
53 #include <sys/mplock2.h>
54
55 /*
56 * Hash table size must be a power of two and is not currently dynamically
57 * sized. There is a trade-off between the linear scans which must iterate
58 * all HSIZE elements and the number of elements which might accumulate
59 * within each hash chain.
60 */
61 #define ALLPROC_HSIZE 256
62 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1)
63 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK)
64 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK)
65 #define SESS_HASH(pid) (pid & ALLPROC_HMASK)
66
67 LIST_HEAD(pidhashhead, proc);
68
69 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70 MALLOC_DEFINE(M_SESSION, "session", "session header");
71 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
73 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
74
75 int ps_showallprocs = 1;
76 static int ps_showallthreads = 1;
77 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
78 &ps_showallprocs, 0,
79 "Unprivileged processes can see processes with different UID/GID");
80 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
81 &ps_showallthreads, 0,
82 "Unprivileged processes can see kernel threads");
83
84 static void orphanpg(struct pgrp *pg);
85 static void proc_makepid(struct proc *p, int random_offset);
86
87 /*
88 * Other process lists
89 */
90 static struct lwkt_token proc_tokens[ALLPROC_HSIZE];
91 static struct proclist allprocs[ALLPROC_HSIZE]; /* locked by proc_tokens */
92 static struct pgrplist allpgrps[ALLPROC_HSIZE]; /* locked by proc_tokens */
93 static struct sesslist allsessn[ALLPROC_HSIZE]; /* locked by proc_tokens */
94
95 /*
96 * Random component to nextpid generation. We mix in a random factor to make
97 * it a little harder to predict. We sanity check the modulus value to avoid
98 * doing it in critical paths. Don't let it be too small or we pointlessly
99 * waste randomness entropy, and don't let it be impossibly large. Using a
100 * modulus that is too big causes a LOT more process table scans and slows
101 * down fork processing as the pidchecked caching is defeated.
102 */
103 static int randompid = 0;
104
105 /*
106 * No requirements.
107 */
108 static int
109 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
110 {
111 int error, pid;
112
113 pid = randompid;
114 error = sysctl_handle_int(oidp, &pid, 0, req);
115 if (error || !req->newptr)
116 return (error);
117 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
118 pid = PID_MAX - 100;
119 else if (pid < 2) /* NOP */
120 pid = 0;
121 else if (pid < 100) /* Make it reasonable */
122 pid = 100;
123 randompid = pid;
124 return (error);
125 }
126
127 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
128 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
129
130 /*
131 * Initialize global process hashing structures.
132 *
133 * These functions are ONLY called from the low level boot code and do
134 * not lock their operations.
135 */
136 void
137 procinit(void)
138 {
139 u_long i;
140
141 for (i = 0; i < ALLPROC_HSIZE; ++i) {
142 LIST_INIT(&allprocs[i]);
143 LIST_INIT(&allsessn[i]);
144 LIST_INIT(&allpgrps[i]);
145 lwkt_token_init(&proc_tokens[i], "allproc");
146 }
147 lwkt_init();
148 uihashinit();
149 }
150
151 void
152 procinsertinit(struct proc *p)
153 {
154 LIST_INSERT_HEAD(&allprocs[ALLPROC_HASH(p->p_pid)], p, p_list);
155 }
156
157 void
158 pgrpinsertinit(struct pgrp *pg)
159 {
160 LIST_INSERT_HEAD(&allpgrps[ALLPROC_HASH(pg->pg_id)], pg, pg_list);
161 }
162
163 void
164 sessinsertinit(struct session *sess)
165 {
166 LIST_INSERT_HEAD(&allsessn[ALLPROC_HASH(sess->s_sid)], sess, s_list);
167 }
168
169 /*
170 * Process hold/release support functions. Called via the PHOLD(),
171 * PRELE(), and PSTALL() macros.
172 *
173 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
174 * is issued unless someone is actually waiting for the process.
175 *
176 * Most holds are short-term, allowing a process scan or other similar
177 * operation to access a proc structure without it getting ripped out from
178 * under us. procfs and process-list sysctl ops also use the hold function
179 * interlocked with various p_flags to keep the vmspace intact when reading
180 * or writing a user process's address space.
181 *
182 * There are two situations where a hold count can be longer. Exiting lwps
183 * hold the process until the lwp is reaped, and the parent will hold the
184 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
185 *
186 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
187 * various critical points in the fork/exec and exit paths before proceeding.
188 */
189 #define PLOCK_ZOMB 0x20000000
190 #define PLOCK_WAITING 0x40000000
191 #define PLOCK_MASK 0x1FFFFFFF
192
193 void
194 pstall(struct proc *p, const char *wmesg, int count)
195 {
196 int o;
197 int n;
198
199 for (;;) {
200 o = p->p_lock;
201 cpu_ccfence();
202 if ((o & PLOCK_MASK) <= count)
203 break;
204 n = o | PLOCK_WAITING;
205 tsleep_interlock(&p->p_lock, 0);
206
207 /*
208 * If someone is trying to single-step the process during
209 * an exec or an exit they can deadlock us because procfs
210 * sleeps with the process held.
211 */
212 if (p->p_stops) {
213 if (p->p_flags & P_INEXEC) {
214 wakeup(&p->p_stype);
215 } else if (p->p_flags & P_POSTEXIT) {
216 spin_lock(&p->p_spin);
217 p->p_stops = 0;
218 p->p_step = 0;
219 spin_unlock(&p->p_spin);
220 wakeup(&p->p_stype);
221 }
222 }
223
224 if (atomic_cmpset_int(&p->p_lock, o, n)) {
225 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
226 }
227 }
228 }
229
230 void
231 phold(struct proc *p)
232 {
233 atomic_add_int(&p->p_lock, 1);
234 }
235
236 /*
237 * WARNING! On last release (p) can become instantly invalid due to
238 * MP races.
239 */
240 void
241 prele(struct proc *p)
242 {
243 int o;
244 int n;
245
246 /*
247 * Fast path
248 */
249 if (atomic_cmpset_int(&p->p_lock, 1, 0))
250 return;
251
252 /*
253 * Slow path
254 */
255 for (;;) {
256 o = p->p_lock;
257 KKASSERT((o & PLOCK_MASK) > 0);
258 cpu_ccfence();
259 n = (o - 1) & ~PLOCK_WAITING;
260 if (atomic_cmpset_int(&p->p_lock, o, n)) {
261 if (o & PLOCK_WAITING)
262 wakeup(&p->p_lock);
263 break;
264 }
265 }
266 }
267
268 /*
269 * Hold and flag serialized for zombie reaping purposes.
270 *
271 * This function will fail if it has to block, returning non-zero with
272 * neither the flag set or the hold count bumped. Note that we must block
273 * without holding a ref, meaning that the caller must ensure that (p)
274 * remains valid through some other interlock (typically on its parent
275 * process's p_token).
276 *
277 * Zero is returned on success. The hold count will be incremented and
278 * the serialization flag acquired. Note that serialization is only against
279 * other pholdzomb() calls, not against phold() calls.
280 */
281 int
282 pholdzomb(struct proc *p)
283 {
284 int o;
285 int n;
286
287 /*
288 * Fast path
289 */
290 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
291 return(0);
292
293 /*
294 * Slow path
295 */
296 for (;;) {
297 o = p->p_lock;
298 cpu_ccfence();
299 if ((o & PLOCK_ZOMB) == 0) {
300 n = (o + 1) | PLOCK_ZOMB;
301 if (atomic_cmpset_int(&p->p_lock, o, n))
302 return(0);
303 } else {
304 KKASSERT((o & PLOCK_MASK) > 0);
305 n = o | PLOCK_WAITING;
306 tsleep_interlock(&p->p_lock, 0);
307 if (atomic_cmpset_int(&p->p_lock, o, n)) {
308 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
309 /* (p) can be ripped out at this point */
310 return(1);
311 }
312 }
313 }
314 }
315
316 /*
317 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
318 *
319 * WARNING! On last release (p) can become instantly invalid due to
320 * MP races.
321 */
322 void
323 prelezomb(struct proc *p)
324 {
325 int o;
326 int n;
327
328 /*
329 * Fast path
330 */
331 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
332 return;
333
334 /*
335 * Slow path
336 */
337 KKASSERT(p->p_lock & PLOCK_ZOMB);
338 for (;;) {
339 o = p->p_lock;
340 KKASSERT((o & PLOCK_MASK) > 0);
341 cpu_ccfence();
342 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
343 if (atomic_cmpset_int(&p->p_lock, o, n)) {
344 if (o & PLOCK_WAITING)
345 wakeup(&p->p_lock);
346 break;
347 }
348 }
349 }
350
351 /*
352 * Is p an inferior of the current process?
353 *
354 * No requirements.
355 */
356 int
357 inferior(struct proc *p)
358 {
359 struct proc *p2;
360
361 PHOLD(p);
362 lwkt_gettoken_shared(&p->p_token);
363 while (p != curproc) {
364 if (p->p_pid == 0) {
365 lwkt_reltoken(&p->p_token);
366 return (0);
367 }
368 p2 = p->p_pptr;
369 PHOLD(p2);
370 lwkt_reltoken(&p->p_token);
371 PRELE(p);
372 lwkt_gettoken_shared(&p2->p_token);
373 p = p2;
374 }
375 lwkt_reltoken(&p->p_token);
376 PRELE(p);
377
378 return (1);
379 }
380
381 /*
382 * Locate a process by number. The returned process will be referenced and
383 * must be released with PRELE().
384 *
385 * No requirements.
386 */
387 struct proc *
388 pfind(pid_t pid)
389 {
390 struct proc *p = curproc;
391 int n;
392
393 /*
394 * Shortcut the current process
395 */
396 if (p && p->p_pid == pid) {
397 PHOLD(p);
398 return (p);
399 }
400
401 /*
402 * Otherwise find it in the hash table.
403 */
404 n = ALLPROC_HASH(pid);
405
406 lwkt_gettoken_shared(&proc_tokens[n]);
407 LIST_FOREACH(p, &allprocs[n], p_list) {
408 if (p->p_stat == SZOMB)
409 continue;
410 if (p->p_pid == pid) {
411 PHOLD(p);
412 lwkt_reltoken(&proc_tokens[n]);
413 return (p);
414 }
415 }
416 lwkt_reltoken(&proc_tokens[n]);
417
418 return (NULL);
419 }
420
421 /*
422 * Locate a process by number. The returned process is NOT referenced.
423 * The result will not be stable and is typically only used to validate
424 * against a process that the caller has in-hand.
425 *
426 * No requirements.
427 */
428 struct proc *
429 pfindn(pid_t pid)
430 {
431 struct proc *p = curproc;
432 int n;
433
434 /*
435 * Shortcut the current process
436 */
437 if (p && p->p_pid == pid)
438 return (p);
439
440 /*
441 * Otherwise find it in the hash table.
442 */
443 n = ALLPROC_HASH(pid);
444
445 lwkt_gettoken_shared(&proc_tokens[n]);
446 LIST_FOREACH(p, &allprocs[n], p_list) {
447 if (p->p_stat == SZOMB)
448 continue;
449 if (p->p_pid == pid) {
450 lwkt_reltoken(&proc_tokens[n]);
451 return (p);
452 }
453 }
454 lwkt_reltoken(&proc_tokens[n]);
455
456 return (NULL);
457 }
458
459 /*
460 * Locate a process on the zombie list. Return a process or NULL.
461 * The returned process will be referenced and the caller must release
462 * it with PRELE().
463 *
464 * No other requirements.
465 */
466 struct proc *
467 zpfind(pid_t pid)
468 {
469 struct proc *p = curproc;
470 int n;
471
472 /*
473 * Shortcut the current process
474 */
475 if (p && p->p_pid == pid) {
476 PHOLD(p);
477 return (p);
478 }
479
480 /*
481 * Otherwise find it in the hash table.
482 */
483 n = ALLPROC_HASH(pid);
484
485 lwkt_gettoken_shared(&proc_tokens[n]);
486 LIST_FOREACH(p, &allprocs[n], p_list) {
487 if (p->p_stat != SZOMB)
488 continue;
489 if (p->p_pid == pid) {
490 PHOLD(p);
491 lwkt_reltoken(&proc_tokens[n]);
492 return (p);
493 }
494 }
495 lwkt_reltoken(&proc_tokens[n]);
496
497 return (NULL);
498 }
499
500
501 void
502 pgref(struct pgrp *pgrp)
503 {
504 refcount_acquire(&pgrp->pg_refs);
505 }
506
507 void
508 pgrel(struct pgrp *pgrp)
509 {
510 int count;
511 int n;
512
513 n = PGRP_HASH(pgrp->pg_id);
514 for (;;) {
515 count = pgrp->pg_refs;
516 cpu_ccfence();
517 KKASSERT(count > 0);
518 if (count == 1) {
519 lwkt_gettoken(&proc_tokens[n]);
520 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0))
521 break;
522 lwkt_reltoken(&proc_tokens[n]);
523 /* retry */
524 } else {
525 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1))
526 return;
527 /* retry */
528 }
529 }
530
531 /*
532 * Successful 1->0 transition, pghash_spin is held.
533 */
534 LIST_REMOVE(pgrp, pg_list);
535
536 /*
537 * Reset any sigio structures pointing to us as a result of
538 * F_SETOWN with our pgid.
539 */
540 funsetownlst(&pgrp->pg_sigiolst);
541
542 if (pgrp->pg_session->s_ttyp != NULL &&
543 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) {
544 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
545 }
546 lwkt_reltoken(&proc_tokens[n]);
547
548 sess_rele(pgrp->pg_session);
549 kfree(pgrp, M_PGRP);
550 }
551
552 /*
553 * Locate a process group by number. The returned process group will be
554 * referenced w/pgref() and must be released with pgrel() (or assigned
555 * somewhere if you wish to keep the reference).
556 *
557 * No requirements.
558 */
559 struct pgrp *
560 pgfind(pid_t pgid)
561 {
562 struct pgrp *pgrp;
563 int n;
564
565 n = PGRP_HASH(pgid);
566 lwkt_gettoken_shared(&proc_tokens[n]);
567
568 LIST_FOREACH(pgrp, &allpgrps[n], pg_list) {
569 if (pgrp->pg_id == pgid) {
570 refcount_acquire(&pgrp->pg_refs);
571 lwkt_reltoken(&proc_tokens[n]);
572 return (pgrp);
573 }
574 }
575 lwkt_reltoken(&proc_tokens[n]);
576 return (NULL);
577 }
578
579 /*
580 * Move p to a new or existing process group (and session)
581 *
582 * No requirements.
583 */
584 int
585 enterpgrp(struct proc *p, pid_t pgid, int mksess)
586 {
587 struct pgrp *pgrp;
588 struct pgrp *opgrp;
589 int error;
590
591 pgrp = pgfind(pgid);
592
593 KASSERT(pgrp == NULL || !mksess,
594 ("enterpgrp: setsid into non-empty pgrp"));
595 KASSERT(!SESS_LEADER(p),
596 ("enterpgrp: session leader attempted setpgrp"));
597
598 if (pgrp == NULL) {
599 pid_t savepid = p->p_pid;
600 struct proc *np;
601 int n;
602
603 /*
604 * new process group
605 */
606 KASSERT(p->p_pid == pgid,
607 ("enterpgrp: new pgrp and pid != pgid"));
608 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO);
609 pgrp->pg_id = pgid;
610 LIST_INIT(&pgrp->pg_members);
611 pgrp->pg_jobc = 0;
612 SLIST_INIT(&pgrp->pg_sigiolst);
613 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
614 refcount_init(&pgrp->pg_refs, 1);
615 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
616
617 n = PGRP_HASH(pgid);
618
619 if ((np = pfindn(savepid)) == NULL || np != p) {
620 lwkt_reltoken(&proc_tokens[n]);
621 error = ESRCH;
622 kfree(pgrp, M_PGRP);
623 goto fatal;
624 }
625
626 lwkt_gettoken(&proc_tokens[n]);
627 if (mksess) {
628 struct session *sess;
629
630 /*
631 * new session
632 */
633 sess = kmalloc(sizeof(struct session), M_SESSION,
634 M_WAITOK | M_ZERO);
635 lwkt_gettoken(&p->p_token);
636 sess->s_leader = p;
637 sess->s_sid = p->p_pid;
638 sess->s_count = 1;
639 sess->s_ttyvp = NULL;
640 sess->s_ttyp = NULL;
641 bcopy(p->p_session->s_login, sess->s_login,
642 sizeof(sess->s_login));
643 pgrp->pg_session = sess;
644 KASSERT(p == curproc,
645 ("enterpgrp: mksession and p != curproc"));
646 p->p_flags &= ~P_CONTROLT;
647 LIST_INSERT_HEAD(&allsessn[n], sess, s_list);
648 lwkt_reltoken(&p->p_token);
649 } else {
650 lwkt_gettoken(&p->p_token);
651 pgrp->pg_session = p->p_session;
652 sess_hold(pgrp->pg_session);
653 lwkt_reltoken(&p->p_token);
654 }
655 LIST_INSERT_HEAD(&allpgrps[n], pgrp, pg_list);
656
657 lwkt_reltoken(&proc_tokens[n]);
658 } else if (pgrp == p->p_pgrp) {
659 pgrel(pgrp);
660 goto done;
661 } /* else pgfind() referenced the pgrp */
662
663 lwkt_gettoken(&pgrp->pg_token);
664 lwkt_gettoken(&p->p_token);
665
666 /*
667 * Replace p->p_pgrp, handling any races that occur.
668 */
669 while ((opgrp = p->p_pgrp) != NULL) {
670 pgref(opgrp);
671 lwkt_gettoken(&opgrp->pg_token);
672 if (opgrp != p->p_pgrp) {
673 lwkt_reltoken(&opgrp->pg_token);
674 pgrel(opgrp);
675 continue;
676 }
677 LIST_REMOVE(p, p_pglist);
678 break;
679 }
680 p->p_pgrp = pgrp;
681 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
682
683 /*
684 * Adjust eligibility of affected pgrps to participate in job control.
685 * Increment eligibility counts before decrementing, otherwise we
686 * could reach 0 spuriously during the first call.
687 */
688 fixjobc(p, pgrp, 1);
689 if (opgrp) {
690 fixjobc(p, opgrp, 0);
691 lwkt_reltoken(&opgrp->pg_token);
692 pgrel(opgrp); /* manual pgref */
693 pgrel(opgrp); /* p->p_pgrp ref */
694 }
695 lwkt_reltoken(&p->p_token);
696 lwkt_reltoken(&pgrp->pg_token);
697 done:
698 error = 0;
699 fatal:
700 return (error);
701 }
702
703 /*
704 * Remove process from process group
705 *
706 * No requirements.
707 */
708 int
709 leavepgrp(struct proc *p)
710 {
711 struct pgrp *pg = p->p_pgrp;
712
713 lwkt_gettoken(&p->p_token);
714 while ((pg = p->p_pgrp) != NULL) {
715 pgref(pg);
716 lwkt_gettoken(&pg->pg_token);
717 if (p->p_pgrp != pg) {
718 lwkt_reltoken(&pg->pg_token);
719 pgrel(pg);
720 continue;
721 }
722 p->p_pgrp = NULL;
723 LIST_REMOVE(p, p_pglist);
724 lwkt_reltoken(&pg->pg_token);
725 pgrel(pg); /* manual pgref */
726 pgrel(pg); /* p->p_pgrp ref */
727 break;
728 }
729 lwkt_reltoken(&p->p_token);
730
731 return (0);
732 }
733
734 /*
735 * Adjust the ref count on a session structure. When the ref count falls to
736 * zero the tty is disassociated from the session and the session structure
737 * is freed. Note that tty assocation is not itself ref-counted.
738 *
739 * No requirements.
740 */
741 void
742 sess_hold(struct session *sp)
743 {
744 atomic_add_int(&sp->s_count, 1);
745 }
746
747 /*
748 * No requirements.
749 */
750 void
751 sess_rele(struct session *sess)
752 {
753 struct tty *tp;
754 int count;
755 int n;
756
757 n = SESS_HASH(sess->s_sid);
758 for (;;) {
759 count = sess->s_count;
760 cpu_ccfence();
761 KKASSERT(count > 0);
762 if (count == 1) {
763 lwkt_gettoken(&tty_token);
764 lwkt_gettoken(&proc_tokens[n]);
765 if (atomic_cmpset_int(&sess->s_count, 1, 0))
766 break;
767 lwkt_reltoken(&proc_tokens[n]);
768 lwkt_reltoken(&tty_token);
769 /* retry */
770 } else {
771 if (atomic_cmpset_int(&sess->s_count, count, count - 1))
772 return;
773 /* retry */
774 }
775 }
776
777 /*
778 * Successful 1->0 transition and tty_token is held.
779 */
780 LIST_REMOVE(sess, s_list);
781
782 if (sess->s_ttyp && sess->s_ttyp->t_session) {
783 #ifdef TTY_DO_FULL_CLOSE
784 /* FULL CLOSE, see ttyclearsession() */
785 KKASSERT(sess->s_ttyp->t_session == sess);
786 sess->s_ttyp->t_session = NULL;
787 #else
788 /* HALF CLOSE, see ttyclearsession() */
789 if (sess->s_ttyp->t_session == sess)
790 sess->s_ttyp->t_session = NULL;
791 #endif
792 }
793 if ((tp = sess->s_ttyp) != NULL) {
794 sess->s_ttyp = NULL;
795 ttyunhold(tp);
796 }
797 lwkt_reltoken(&proc_tokens[n]);
798 lwkt_reltoken(&tty_token);
799
800 kfree(sess, M_SESSION);
801 }
802
803 /*
804 * Adjust pgrp jobc counters when specified process changes process group.
805 * We count the number of processes in each process group that "qualify"
806 * the group for terminal job control (those with a parent in a different
807 * process group of the same session). If that count reaches zero, the
808 * process group becomes orphaned. Check both the specified process'
809 * process group and that of its children.
810 * entering == 0 => p is leaving specified group.
811 * entering == 1 => p is entering specified group.
812 *
813 * No requirements.
814 */
815 void
816 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
817 {
818 struct pgrp *hispgrp;
819 struct session *mysession;
820 struct proc *np;
821
822 /*
823 * Check p's parent to see whether p qualifies its own process
824 * group; if so, adjust count for p's process group.
825 */
826 lwkt_gettoken(&p->p_token); /* p_children scan */
827 lwkt_gettoken(&pgrp->pg_token);
828
829 mysession = pgrp->pg_session;
830 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
831 hispgrp->pg_session == mysession) {
832 if (entering)
833 pgrp->pg_jobc++;
834 else if (--pgrp->pg_jobc == 0)
835 orphanpg(pgrp);
836 }
837
838 /*
839 * Check this process' children to see whether they qualify
840 * their process groups; if so, adjust counts for children's
841 * process groups.
842 */
843 LIST_FOREACH(np, &p->p_children, p_sibling) {
844 PHOLD(np);
845 lwkt_gettoken(&np->p_token);
846 if ((hispgrp = np->p_pgrp) != pgrp &&
847 hispgrp->pg_session == mysession &&
848 np->p_stat != SZOMB) {
849 pgref(hispgrp);
850 lwkt_gettoken(&hispgrp->pg_token);
851 if (entering)
852 hispgrp->pg_jobc++;
853 else if (--hispgrp->pg_jobc == 0)
854 orphanpg(hispgrp);
855 lwkt_reltoken(&hispgrp->pg_token);
856 pgrel(hispgrp);
857 }
858 lwkt_reltoken(&np->p_token);
859 PRELE(np);
860 }
861 KKASSERT(pgrp->pg_refs > 0);
862 lwkt_reltoken(&pgrp->pg_token);
863 lwkt_reltoken(&p->p_token);
864 }
865
866 /*
867 * A process group has become orphaned;
868 * if there are any stopped processes in the group,
869 * hang-up all process in that group.
870 *
871 * The caller must hold pg_token.
872 */
873 static void
874 orphanpg(struct pgrp *pg)
875 {
876 struct proc *p;
877
878 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
879 if (p->p_stat == SSTOP) {
880 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
881 ksignal(p, SIGHUP);
882 ksignal(p, SIGCONT);
883 }
884 return;
885 }
886 }
887 }
888
889 /*
890 * Add a new process to the allproc list and the PID hash. This
891 * also assigns a pid to the new process.
892 *
893 * No requirements.
894 */
895 void
896 proc_add_allproc(struct proc *p)
897 {
898 int random_offset;
899
900 if ((random_offset = randompid) != 0) {
901 read_random(&random_offset, sizeof(random_offset));
902 random_offset = (random_offset & 0x7FFFFFFF) % randompid;
903 }
904 proc_makepid(p, random_offset);
905 }
906
907 /*
908 * Calculate a new process pid. This function is integrated into
909 * proc_add_allproc() to guarentee that the new pid is not reused before
910 * the new process can be added to the allproc list.
911 *
912 * p_pid is assigned and the process is added to the allproc hash table
913 */
914 static
915 void
916 proc_makepid(struct proc *p, int random_offset)
917 {
918 static pid_t nextpid; /* heuristic, allowed to race */
919 struct pgrp *pg;
920 struct proc *ps;
921 struct session *sess;
922 pid_t base;
923 int n;
924
925 /*
926 * Calculate a hash index and find an unused process id within
927 * the table, looping if we cannot find one.
928 */
929 if (random_offset)
930 atomic_add_int(&nextpid, random_offset);
931 retry:
932 base = atomic_fetchadd_int(&nextpid, 1) + 1;
933 if (base >= PID_MAX) {
934 base = base % PID_MAX;
935 if (base < 100)
936 base += 100;
937 }
938 n = ALLPROC_HASH(base);
939 lwkt_gettoken(&proc_tokens[n]);
940
941 LIST_FOREACH(ps, &allprocs[n], p_list) {
942 if (ps->p_pid == base) {
943 base += ALLPROC_HSIZE;
944 if (base >= PID_MAX) {
945 lwkt_reltoken(&proc_tokens[n]);
946 goto retry;
947 }
948 }
949 }
950 LIST_FOREACH(pg, &allpgrps[n], pg_list) {
951 if (pg->pg_id == base) {
952 base += ALLPROC_HSIZE;
953 if (base >= PID_MAX) {
954 lwkt_reltoken(&proc_tokens[n]);
955 goto retry;
956 }
957 }
958 }
959 LIST_FOREACH(sess, &allsessn[n], s_list) {
960 if (sess->s_sid == base) {
961 base += ALLPROC_HSIZE;
962 if (base >= PID_MAX) {
963 lwkt_reltoken(&proc_tokens[n]);
964 goto retry;
965 }
966 }
967 }
968
969 /*
970 * Assign the pid and insert the process.
971 */
972 p->p_pid = base;
973 LIST_INSERT_HEAD(&allprocs[n], p, p_list);
974 lwkt_reltoken(&proc_tokens[n]);
975 }
976
977 /*
978 * Called from exit1 to place the process into a zombie state.
979 * The process is removed from the pid hash and p_stat is set
980 * to SZOMB. Normal pfind[n]() calls will not find it any more.
981 *
982 * Caller must hold p->p_token. We are required to wait until p_lock
983 * becomes zero before we can manipulate the list, allowing allproc
984 * scans to guarantee consistency during a list scan.
985 */
986 void
987 proc_move_allproc_zombie(struct proc *p)
988 {
989 int n;
990
991 n = ALLPROC_HASH(p->p_pid);
992 PSTALL(p, "reap1", 0);
993 lwkt_gettoken(&proc_tokens[n]);
994
995 PSTALL(p, "reap1a", 0);
996 p->p_stat = SZOMB;
997
998 lwkt_reltoken(&proc_tokens[n]);
999 dsched_exit_proc(p);
1000 }
1001
1002 /*
1003 * This routine is called from kern_wait() and will remove the process
1004 * from the zombie list and the sibling list. This routine will block
1005 * if someone has a lock on the proces (p_lock).
1006 *
1007 * Caller must hold p->p_token. We are required to wait until p_lock
1008 * becomes zero before we can manipulate the list, allowing allproc
1009 * scans to guarantee consistency during a list scan.
1010 */
1011 void
1012 proc_remove_zombie(struct proc *p)
1013 {
1014 int n;
1015
1016 n = ALLPROC_HASH(p->p_pid);
1017
1018 PSTALL(p, "reap2", 0);
1019 lwkt_gettoken(&proc_tokens[n]);
1020 PSTALL(p, "reap2a", 0);
1021 LIST_REMOVE(p, p_list); /* from remove master list */
1022 LIST_REMOVE(p, p_sibling); /* and from sibling list */
1023 p->p_pptr = NULL;
1024 lwkt_reltoken(&proc_tokens[n]);
1025 }
1026
1027 /*
1028 * Handle various requirements prior to returning to usermode. Called from
1029 * platform trap and system call code.
1030 */
1031 void
1032 lwpuserret(struct lwp *lp)
1033 {
1034 struct proc *p = lp->lwp_proc;
1035
1036 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1037 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1038 allocvnode_gc();
1039 }
1040 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
1041 lwkt_gettoken(&p->p_token);
1042 lwp_exit(0, NULL);
1043 lwkt_reltoken(&p->p_token); /* NOT REACHED */
1044 }
1045 }
1046
1047 /*
1048 * Kernel threads run from user processes can also accumulate deferred
1049 * actions which need to be acted upon. Callers include:
1050 *
1051 * nfsd - Can allocate lots of vnodes
1052 */
1053 void
1054 lwpkthreaddeferred(void)
1055 {
1056 struct lwp *lp = curthread->td_lwp;
1057
1058 if (lp) {
1059 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1060 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1061 allocvnode_gc();
1062 }
1063 }
1064 }
1065
1066 /*
1067 * Scan all processes on the allproc list. The process is automatically
1068 * held for the callback. A return value of -1 terminates the loop.
1069 * Zombie procs are skipped.
1070 *
1071 * The callback is made with the process held and proc_token held.
1072 *
1073 * We limit the scan to the number of processes as-of the start of
1074 * the scan so as not to get caught up in an endless loop if new processes
1075 * are created more quickly than we can scan the old ones. Add a little
1076 * slop to try to catch edge cases since nprocs can race.
1077 *
1078 * No requirements.
1079 */
1080 void
1081 allproc_scan(int (*callback)(struct proc *, void *), void *data)
1082 {
1083 int limit = nprocs + ncpus;
1084 struct proc *p;
1085 int r;
1086 int n;
1087
1088 /*
1089 * proc_tokens[n] protects the allproc list and PHOLD() prevents the
1090 * process from being removed from the allproc list or the zombproc
1091 * list.
1092 */
1093 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1094 if (LIST_FIRST(&allprocs[n]) == NULL)
1095 continue;
1096 lwkt_gettoken(&proc_tokens[n]);
1097 LIST_FOREACH(p, &allprocs[n], p_list) {
1098 if (p->p_stat == SZOMB)
1099 continue;
1100 PHOLD(p);
1101 r = callback(p, data);
1102 PRELE(p);
1103 if (r < 0)
1104 break;
1105 if (--limit < 0)
1106 break;
1107 }
1108 lwkt_reltoken(&proc_tokens[n]);
1109
1110 /*
1111 * Check if asked to stop early
1112 */
1113 if (p)
1114 break;
1115 }
1116 }
1117
1118 /*
1119 * Scan all lwps of processes on the allproc list. The lwp is automatically
1120 * held for the callback. A return value of -1 terminates the loop.
1121 *
1122 * The callback is made with the proces and lwp both held, and proc_token held.
1123 *
1124 * No requirements.
1125 */
1126 void
1127 alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
1128 {
1129 struct proc *p;
1130 struct lwp *lp;
1131 int r = 0;
1132 int n;
1133
1134 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1135 if (LIST_FIRST(&allprocs[n]) == NULL)
1136 continue;
1137 lwkt_gettoken(&proc_tokens[n]);
1138 LIST_FOREACH(p, &allprocs[n], p_list) {
1139 if (p->p_stat == SZOMB)
1140 continue;
1141 PHOLD(p);
1142 lwkt_gettoken(&p->p_token);
1143 FOREACH_LWP_IN_PROC(lp, p) {
1144 LWPHOLD(lp);
1145 r = callback(lp, data);
1146 LWPRELE(lp);
1147 }
1148 lwkt_reltoken(&p->p_token);
1149 PRELE(p);
1150 if (r < 0)
1151 break;
1152 }
1153 lwkt_reltoken(&proc_tokens[n]);
1154
1155 /*
1156 * Asked to exit early
1157 */
1158 if (p)
1159 break;
1160 }
1161 }
1162
1163 /*
1164 * Scan all processes on the zombproc list. The process is automatically
1165 * held for the callback. A return value of -1 terminates the loop.
1166 *
1167 * No requirements.
1168 * The callback is made with the proces held and proc_token held.
1169 */
1170 void
1171 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
1172 {
1173 struct proc *p;
1174 int r;
1175 int n;
1176
1177 /*
1178 * proc_tokens[n] protects the allproc list and PHOLD() prevents the
1179 * process from being removed from the allproc list or the zombproc
1180 * list.
1181 */
1182 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1183 if (LIST_FIRST(&allprocs[n]) == NULL)
1184 continue;
1185 lwkt_gettoken(&proc_tokens[n]);
1186 LIST_FOREACH(p, &allprocs[n], p_list) {
1187 if (p->p_stat != SZOMB)
1188 continue;
1189 PHOLD(p);
1190 r = callback(p, data);
1191 PRELE(p);
1192 if (r < 0)
1193 break;
1194 }
1195 lwkt_reltoken(&proc_tokens[n]);
1196
1197 /*
1198 * Check if asked to stop early
1199 */
1200 if (p)
1201 break;
1202 }
1203 }
1204
1205 #include "opt_ddb.h"
1206 #ifdef DDB
1207 #include <ddb/ddb.h>
1208
1209 /*
1210 * Debugging only
1211 */
1212 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1213 {
1214 struct pgrp *pgrp;
1215 struct proc *p;
1216 int i;
1217
1218 for (i = 0; i < ALLPROC_HSIZE; ++i) {
1219 if (LIST_EMPTY(&allpgrps[i]))
1220 continue;
1221 kprintf("\tindx %d\n", i);
1222 LIST_FOREACH(pgrp, &allpgrps[i], pg_list) {
1223 kprintf("\tpgrp %p, pgid %ld, sess %p, "
1224 "sesscnt %d, mem %p\n",
1225 (void *)pgrp, (long)pgrp->pg_id,
1226 (void *)pgrp->pg_session,
1227 pgrp->pg_session->s_count,
1228 (void *)LIST_FIRST(&pgrp->pg_members));
1229 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1230 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1231 (long)p->p_pid, (void *)p,
1232 (void *)p->p_pgrp);
1233 }
1234 }
1235 }
1236 }
1237 #endif /* DDB */
1238
1239 /*
1240 * The caller must hold proc_token.
1241 */
1242 static int
1243 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1244 {
1245 struct kinfo_proc ki;
1246 struct lwp *lp;
1247 int skp = 0, had_output = 0;
1248 int error;
1249
1250 bzero(&ki, sizeof(ki));
1251 lwkt_gettoken_shared(&p->p_token);
1252 fill_kinfo_proc(p, &ki);
1253 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1254 skp = 1;
1255 error = 0;
1256 FOREACH_LWP_IN_PROC(lp, p) {
1257 LWPHOLD(lp);
1258 fill_kinfo_lwp(lp, &ki.kp_lwp);
1259 had_output = 1;
1260 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1261 LWPRELE(lp);
1262 if (error)
1263 break;
1264 if (skp)
1265 break;
1266 }
1267 lwkt_reltoken(&p->p_token);
1268 /* We need to output at least the proc, even if there is no lwp. */
1269 if (had_output == 0) {
1270 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1271 }
1272 return (error);
1273 }
1274
1275 /*
1276 * The caller must hold proc_token.
1277 */
1278 static int
1279 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req)
1280 {
1281 struct kinfo_proc ki;
1282 int error;
1283
1284 fill_kinfo_proc_kthread(td, &ki);
1285 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1286 if (error)
1287 return error;
1288 return(0);
1289 }
1290
1291 /*
1292 * No requirements.
1293 */
1294 static int
1295 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1296 {
1297 int *name = (int *)arg1;
1298 int oid = oidp->oid_number;
1299 u_int namelen = arg2;
1300 struct proc *p;
1301 struct thread *td;
1302 struct thread *marker;
1303 int flags = 0;
1304 int error = 0;
1305 int n;
1306 int origcpu;
1307 struct ucred *cr1 = curproc->p_ucred;
1308
1309 flags = oid & KERN_PROC_FLAGMASK;
1310 oid &= ~KERN_PROC_FLAGMASK;
1311
1312 if ((oid == KERN_PROC_ALL && namelen != 0) ||
1313 (oid != KERN_PROC_ALL && namelen != 1)) {
1314 return (EINVAL);
1315 }
1316
1317 /*
1318 * proc_token protects the allproc list and PHOLD() prevents the
1319 * process from being removed from the allproc list or the zombproc
1320 * list.
1321 */
1322 if (oid == KERN_PROC_PID) {
1323 p = pfind((pid_t)name[0]);
1324 if (p) {
1325 if (PRISON_CHECK(cr1, p->p_ucred))
1326 error = sysctl_out_proc(p, req, flags);
1327 PRELE(p);
1328 }
1329 goto post_threads;
1330 }
1331 p = NULL;
1332
1333 if (!req->oldptr) {
1334 /* overestimate by 5 procs */
1335 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1336 if (error)
1337 goto post_threads;
1338 }
1339
1340 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1341 if (LIST_EMPTY(&allprocs[n]))
1342 continue;
1343 lwkt_gettoken_shared(&proc_tokens[n]);
1344 LIST_FOREACH(p, &allprocs[n], p_list) {
1345 /*
1346 * Show a user only their processes.
1347 */
1348 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred))
1349 continue;
1350 /*
1351 * Skip embryonic processes.
1352 */
1353 if (p->p_stat == SIDL)
1354 continue;
1355 /*
1356 * TODO - make more efficient (see notes below).
1357 * do by session.
1358 */
1359 switch (oid) {
1360 case KERN_PROC_PGRP:
1361 /* could do this by traversing pgrp */
1362 if (p->p_pgrp == NULL ||
1363 p->p_pgrp->pg_id != (pid_t)name[0])
1364 continue;
1365 break;
1366
1367 case KERN_PROC_TTY:
1368 if ((p->p_flags & P_CONTROLT) == 0 ||
1369 p->p_session == NULL ||
1370 p->p_session->s_ttyp == NULL ||
1371 dev2udev(p->p_session->s_ttyp->t_dev) !=
1372 (udev_t)name[0])
1373 continue;
1374 break;
1375
1376 case KERN_PROC_UID:
1377 if (p->p_ucred == NULL ||
1378 p->p_ucred->cr_uid != (uid_t)name[0])
1379 continue;
1380 break;
1381
1382 case KERN_PROC_RUID:
1383 if (p->p_ucred == NULL ||
1384 p->p_ucred->cr_ruid != (uid_t)name[0])
1385 continue;
1386 break;
1387 }
1388
1389 if (!PRISON_CHECK(cr1, p->p_ucred))
1390 continue;
1391 PHOLD(p);
1392 error = sysctl_out_proc(p, req, flags);
1393 PRELE(p);
1394 if (error) {
1395 lwkt_reltoken(&proc_tokens[n]);
1396 goto post_threads;
1397 }
1398 }
1399 lwkt_reltoken(&proc_tokens[n]);
1400 }
1401
1402 /*
1403 * Iterate over all active cpus and scan their thread list. Start
1404 * with the next logical cpu and end with our original cpu. We
1405 * migrate our own thread to each target cpu in order to safely scan
1406 * its thread list. In the last loop we migrate back to our original
1407 * cpu.
1408 */
1409 origcpu = mycpu->gd_cpuid;
1410 if (!ps_showallthreads || jailed(cr1))
1411 goto post_threads;
1412
1413 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1414 marker->td_flags = TDF_MARKER;
1415 error = 0;
1416
1417 for (n = 1; n <= ncpus; ++n) {
1418 globaldata_t rgd;
1419 int nid;
1420
1421 nid = (origcpu + n) % ncpus;
1422 if ((smp_active_mask & CPUMASK(nid)) == 0)
1423 continue;
1424 rgd = globaldata_find(nid);
1425 lwkt_setcpu_self(rgd);
1426
1427 crit_enter();
1428 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1429
1430 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1431 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1432 TAILQ_INSERT_BEFORE(td, marker, td_allq);
1433 if (td->td_flags & TDF_MARKER)
1434 continue;
1435 if (td->td_proc)
1436 continue;
1437
1438 lwkt_hold(td);
1439 crit_exit();
1440
1441 switch (oid) {
1442 case KERN_PROC_PGRP:
1443 case KERN_PROC_TTY:
1444 case KERN_PROC_UID:
1445 case KERN_PROC_RUID:
1446 break;
1447 default:
1448 error = sysctl_out_proc_kthread(td, req);
1449 break;
1450 }
1451 lwkt_rele(td);
1452 crit_enter();
1453 if (error)
1454 break;
1455 }
1456 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1457 crit_exit();
1458
1459 if (error)
1460 break;
1461 }
1462
1463 /*
1464 * Userland scheduler expects us to return on the same cpu we
1465 * started on.
1466 */
1467 if (mycpu->gd_cpuid != origcpu)
1468 lwkt_setcpu_self(globaldata_find(origcpu));
1469
1470 kfree(marker, M_TEMP);
1471
1472 post_threads:
1473 return (error);
1474 }
1475
1476 /*
1477 * This sysctl allows a process to retrieve the argument list or process
1478 * title for another process without groping around in the address space
1479 * of the other process. It also allow a process to set its own "process
1480 * title to a string of its own choice.
1481 *
1482 * No requirements.
1483 */
1484 static int
1485 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1486 {
1487 int *name = (int*) arg1;
1488 u_int namelen = arg2;
1489 struct proc *p;
1490 struct pargs *opa;
1491 struct pargs *pa;
1492 int error = 0;
1493 struct ucred *cr1 = curproc->p_ucred;
1494
1495 if (namelen != 1)
1496 return (EINVAL);
1497
1498 p = pfind((pid_t)name[0]);
1499 if (p == NULL)
1500 goto done;
1501 lwkt_gettoken(&p->p_token);
1502
1503 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1504 goto done;
1505
1506 if (req->newptr && curproc != p) {
1507 error = EPERM;
1508 goto done;
1509 }
1510 if (req->oldptr && (pa = p->p_args) != NULL) {
1511 refcount_acquire(&pa->ar_ref);
1512 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1513 if (refcount_release(&pa->ar_ref))
1514 kfree(pa, M_PARGS);
1515 }
1516 if (req->newptr == NULL)
1517 goto done;
1518
1519 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1520 goto done;
1521 }
1522
1523 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1524 refcount_init(&pa->ar_ref, 1);
1525 pa->ar_length = req->newlen;
1526 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1527 if (error) {
1528 kfree(pa, M_PARGS);
1529 goto done;
1530 }
1531
1532
1533 /*
1534 * Replace p_args with the new pa. p_args may have previously
1535 * been NULL.
1536 */
1537 opa = p->p_args;
1538 p->p_args = pa;
1539
1540 if (opa) {
1541 KKASSERT(opa->ar_ref > 0);
1542 if (refcount_release(&opa->ar_ref)) {
1543 kfree(opa, M_PARGS);
1544 /* opa = NULL; */
1545 }
1546 }
1547 done:
1548 if (p) {
1549 lwkt_reltoken(&p->p_token);
1550 PRELE(p);
1551 }
1552 return (error);
1553 }
1554
1555 static int
1556 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1557 {
1558 int *name = (int*) arg1;
1559 u_int namelen = arg2;
1560 struct proc *p;
1561 int error = 0;
1562 char *fullpath, *freepath;
1563 struct ucred *cr1 = curproc->p_ucred;
1564
1565 if (namelen != 1)
1566 return (EINVAL);
1567
1568 p = pfind((pid_t)name[0]);
1569 if (p == NULL)
1570 goto done;
1571 lwkt_gettoken_shared(&p->p_token);
1572
1573 /*
1574 * If we are not allowed to see other args, we certainly shouldn't
1575 * get the cwd either. Also check the usual trespassing.
1576 */
1577 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1578 goto done;
1579
1580 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1581 struct nchandle nch;
1582
1583 cache_copy(&p->p_fd->fd_ncdir, &nch);
1584 error = cache_fullpath(p, &nch, NULL,
1585 &fullpath, &freepath, 0);
1586 cache_drop(&nch);
1587 if (error)
1588 goto done;
1589 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1590 kfree(freepath, M_TEMP);
1591 }
1592
1593 done:
1594 if (p) {
1595 lwkt_reltoken(&p->p_token);
1596 PRELE(p);
1597 }
1598 return (error);
1599 }
1600
1601 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1602
1603 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1604 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1605
1606 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1607 sysctl_kern_proc, "Process table");
1608
1609 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1610 sysctl_kern_proc, "Process table");
1611
1612 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1613 sysctl_kern_proc, "Process table");
1614
1615 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1616 sysctl_kern_proc, "Process table");
1617
1618 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1619 sysctl_kern_proc, "Process table");
1620
1621 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
1622 sysctl_kern_proc, "Process table");
1623
1624 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
1625 sysctl_kern_proc, "Process table");
1626
1627 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
1628 sysctl_kern_proc, "Process table");
1629
1630 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
1631 sysctl_kern_proc, "Process table");
1632
1633 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
1634 sysctl_kern_proc, "Process table");
1635
1636 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
1637 sysctl_kern_proc, "Process table");
1638
1639 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1640 sysctl_kern_proc_args, "Process argument list");
1641
1642 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY,
1643 sysctl_kern_proc_cwd, "Process argument list");
Cache object: 61e9692aa999f0180eff4e007f77a721
|