1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.2/sys/kern/kern_resource.c 216593 2010-12-20 17:08:22Z jhb $");
39
40 #include "opt_compat.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
45 #include <sys/file.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/resourcevar.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/sx.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/time.h>
60 #include <sys/umtx.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66
67
68 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
71 static struct rwlock uihashtbl_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash; /* size of hash table - 1 */
74
75 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
76 struct timeval *up, struct timeval *sp);
77 static int donice(struct thread *td, struct proc *chgp, int n);
78 static struct uidinfo *uilookup(uid_t uid);
79 static void ruxagg_locked(struct rusage_ext *rux, struct thread *td);
80
81 /*
82 * Resource controls and accounting.
83 */
84 #ifndef _SYS_SYSPROTO_H_
85 struct getpriority_args {
86 int which;
87 int who;
88 };
89 #endif
90 int
91 getpriority(td, uap)
92 struct thread *td;
93 register struct getpriority_args *uap;
94 {
95 struct proc *p;
96 struct pgrp *pg;
97 int error, low;
98
99 error = 0;
100 low = PRIO_MAX + 1;
101 switch (uap->which) {
102
103 case PRIO_PROCESS:
104 if (uap->who == 0)
105 low = td->td_proc->p_nice;
106 else {
107 p = pfind(uap->who);
108 if (p == NULL)
109 break;
110 if (p_cansee(td, p) == 0)
111 low = p->p_nice;
112 PROC_UNLOCK(p);
113 }
114 break;
115
116 case PRIO_PGRP:
117 sx_slock(&proctree_lock);
118 if (uap->who == 0) {
119 pg = td->td_proc->p_pgrp;
120 PGRP_LOCK(pg);
121 } else {
122 pg = pgfind(uap->who);
123 if (pg == NULL) {
124 sx_sunlock(&proctree_lock);
125 break;
126 }
127 }
128 sx_sunlock(&proctree_lock);
129 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
130 PROC_LOCK(p);
131 if (p_cansee(td, p) == 0) {
132 if (p->p_nice < low)
133 low = p->p_nice;
134 }
135 PROC_UNLOCK(p);
136 }
137 PGRP_UNLOCK(pg);
138 break;
139
140 case PRIO_USER:
141 if (uap->who == 0)
142 uap->who = td->td_ucred->cr_uid;
143 sx_slock(&allproc_lock);
144 FOREACH_PROC_IN_SYSTEM(p) {
145 /* Do not bother to check PRS_NEW processes */
146 if (p->p_state == PRS_NEW)
147 continue;
148 PROC_LOCK(p);
149 if (p_cansee(td, p) == 0 &&
150 p->p_ucred->cr_uid == uap->who) {
151 if (p->p_nice < low)
152 low = p->p_nice;
153 }
154 PROC_UNLOCK(p);
155 }
156 sx_sunlock(&allproc_lock);
157 break;
158
159 default:
160 error = EINVAL;
161 break;
162 }
163 if (low == PRIO_MAX + 1 && error == 0)
164 error = ESRCH;
165 td->td_retval[0] = low;
166 return (error);
167 }
168
169 #ifndef _SYS_SYSPROTO_H_
170 struct setpriority_args {
171 int which;
172 int who;
173 int prio;
174 };
175 #endif
176 int
177 setpriority(td, uap)
178 struct thread *td;
179 struct setpriority_args *uap;
180 {
181 struct proc *curp, *p;
182 struct pgrp *pg;
183 int found = 0, error = 0;
184
185 curp = td->td_proc;
186 switch (uap->which) {
187 case PRIO_PROCESS:
188 if (uap->who == 0) {
189 PROC_LOCK(curp);
190 error = donice(td, curp, uap->prio);
191 PROC_UNLOCK(curp);
192 } else {
193 p = pfind(uap->who);
194 if (p == NULL)
195 break;
196 error = p_cansee(td, p);
197 if (error == 0)
198 error = donice(td, p, uap->prio);
199 PROC_UNLOCK(p);
200 }
201 found++;
202 break;
203
204 case PRIO_PGRP:
205 sx_slock(&proctree_lock);
206 if (uap->who == 0) {
207 pg = curp->p_pgrp;
208 PGRP_LOCK(pg);
209 } else {
210 pg = pgfind(uap->who);
211 if (pg == NULL) {
212 sx_sunlock(&proctree_lock);
213 break;
214 }
215 }
216 sx_sunlock(&proctree_lock);
217 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 PROC_LOCK(p);
219 if (p_cansee(td, p) == 0) {
220 error = donice(td, p, uap->prio);
221 found++;
222 }
223 PROC_UNLOCK(p);
224 }
225 PGRP_UNLOCK(pg);
226 break;
227
228 case PRIO_USER:
229 if (uap->who == 0)
230 uap->who = td->td_ucred->cr_uid;
231 sx_slock(&allproc_lock);
232 FOREACH_PROC_IN_SYSTEM(p) {
233 PROC_LOCK(p);
234 if (p->p_ucred->cr_uid == uap->who &&
235 p_cansee(td, p) == 0) {
236 error = donice(td, p, uap->prio);
237 found++;
238 }
239 PROC_UNLOCK(p);
240 }
241 sx_sunlock(&allproc_lock);
242 break;
243
244 default:
245 error = EINVAL;
246 break;
247 }
248 if (found == 0 && error == 0)
249 error = ESRCH;
250 return (error);
251 }
252
253 /*
254 * Set "nice" for a (whole) process.
255 */
256 static int
257 donice(struct thread *td, struct proc *p, int n)
258 {
259 int error;
260
261 PROC_LOCK_ASSERT(p, MA_OWNED);
262 if ((error = p_cansched(td, p)))
263 return (error);
264 if (n > PRIO_MAX)
265 n = PRIO_MAX;
266 if (n < PRIO_MIN)
267 n = PRIO_MIN;
268 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
269 return (EACCES);
270 sched_nice(p, n);
271 return (0);
272 }
273
274 /*
275 * Set realtime priority for LWP.
276 */
277 #ifndef _SYS_SYSPROTO_H_
278 struct rtprio_thread_args {
279 int function;
280 lwpid_t lwpid;
281 struct rtprio *rtp;
282 };
283 #endif
284 int
285 rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
286 {
287 struct proc *p;
288 struct rtprio rtp;
289 struct thread *td1;
290 int cierror, error;
291
292 /* Perform copyin before acquiring locks if needed. */
293 if (uap->function == RTP_SET)
294 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
295 else
296 cierror = 0;
297
298 /*
299 * Though lwpid is unique, only current process is supported
300 * since there is no efficient way to look up a LWP yet.
301 */
302 p = td->td_proc;
303 PROC_LOCK(p);
304
305 switch (uap->function) {
306 case RTP_LOOKUP:
307 if ((error = p_cansee(td, p)))
308 break;
309 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
310 td1 = td;
311 else
312 td1 = thread_find(p, uap->lwpid);
313 if (td1 != NULL)
314 pri_to_rtp(td1, &rtp);
315 else
316 error = ESRCH;
317 PROC_UNLOCK(p);
318 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
319 case RTP_SET:
320 if ((error = p_cansched(td, p)) || (error = cierror))
321 break;
322
323 /* Disallow setting rtprio in most cases if not superuser. */
324 /*
325 * Realtime priority has to be restricted for reasons which should be
326 * obvious. However, for idle priority, there is a potential for
327 * system deadlock if an idleprio process gains a lock on a resource
328 * that other processes need (and the idleprio process can't run
329 * due to a CPU-bound normal process). Fix me! XXX
330 */
331 #if 0
332 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
333 #else
334 if (rtp.type != RTP_PRIO_NORMAL) {
335 #endif
336 error = priv_check(td, PRIV_SCHED_RTPRIO);
337 if (error)
338 break;
339 }
340
341 if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
342 td1 = td;
343 else
344 td1 = thread_find(p, uap->lwpid);
345 if (td1 != NULL)
346 error = rtp_to_pri(&rtp, td1);
347 else
348 error = ESRCH;
349 break;
350 default:
351 error = EINVAL;
352 break;
353 }
354 PROC_UNLOCK(p);
355 return (error);
356 }
357
358 /*
359 * Set realtime priority.
360 */
361 #ifndef _SYS_SYSPROTO_H_
362 struct rtprio_args {
363 int function;
364 pid_t pid;
365 struct rtprio *rtp;
366 };
367 #endif
368 int
369 rtprio(td, uap)
370 struct thread *td; /* curthread */
371 register struct rtprio_args *uap;
372 {
373 struct proc *p;
374 struct thread *tdp;
375 struct rtprio rtp;
376 int cierror, error;
377
378 /* Perform copyin before acquiring locks if needed. */
379 if (uap->function == RTP_SET)
380 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
381 else
382 cierror = 0;
383
384 if (uap->pid == 0) {
385 p = td->td_proc;
386 PROC_LOCK(p);
387 } else {
388 p = pfind(uap->pid);
389 if (p == NULL)
390 return (ESRCH);
391 }
392
393 switch (uap->function) {
394 case RTP_LOOKUP:
395 if ((error = p_cansee(td, p)))
396 break;
397 /*
398 * Return OUR priority if no pid specified,
399 * or if one is, report the highest priority
400 * in the process. There isn't much more you can do as
401 * there is only room to return a single priority.
402 * Note: specifying our own pid is not the same
403 * as leaving it zero.
404 */
405 if (uap->pid == 0) {
406 pri_to_rtp(td, &rtp);
407 } else {
408 struct rtprio rtp2;
409
410 rtp.type = RTP_PRIO_IDLE;
411 rtp.prio = RTP_PRIO_MAX;
412 FOREACH_THREAD_IN_PROC(p, tdp) {
413 pri_to_rtp(tdp, &rtp2);
414 if (rtp2.type < rtp.type ||
415 (rtp2.type == rtp.type &&
416 rtp2.prio < rtp.prio)) {
417 rtp.type = rtp2.type;
418 rtp.prio = rtp2.prio;
419 }
420 }
421 }
422 PROC_UNLOCK(p);
423 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
424 case RTP_SET:
425 if ((error = p_cansched(td, p)) || (error = cierror))
426 break;
427
428 /* Disallow setting rtprio in most cases if not superuser. */
429 /*
430 * Realtime priority has to be restricted for reasons which should be
431 * obvious. However, for idle priority, there is a potential for
432 * system deadlock if an idleprio process gains a lock on a resource
433 * that other processes need (and the idleprio process can't run
434 * due to a CPU-bound normal process). Fix me! XXX
435 */
436 #if 0
437 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
438 #else
439 if (rtp.type != RTP_PRIO_NORMAL) {
440 #endif
441 error = priv_check(td, PRIV_SCHED_RTPRIO);
442 if (error)
443 break;
444 }
445
446 /*
447 * If we are setting our own priority, set just our
448 * thread but if we are doing another process,
449 * do all the threads on that process. If we
450 * specify our own pid we do the latter.
451 */
452 if (uap->pid == 0) {
453 error = rtp_to_pri(&rtp, td);
454 } else {
455 FOREACH_THREAD_IN_PROC(p, td) {
456 if ((error = rtp_to_pri(&rtp, td)) != 0)
457 break;
458 }
459 }
460 break;
461 default:
462 error = EINVAL;
463 break;
464 }
465 PROC_UNLOCK(p);
466 return (error);
467 }
468
469 int
470 rtp_to_pri(struct rtprio *rtp, struct thread *td)
471 {
472 u_char newpri;
473 u_char oldpri;
474
475 switch (RTP_PRIO_BASE(rtp->type)) {
476 case RTP_PRIO_REALTIME:
477 if (rtp->prio > RTP_PRIO_MAX)
478 return (EINVAL);
479 newpri = PRI_MIN_REALTIME + rtp->prio;
480 break;
481 case RTP_PRIO_NORMAL:
482 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
483 return (EINVAL);
484 newpri = PRI_MIN_TIMESHARE + rtp->prio;
485 break;
486 case RTP_PRIO_IDLE:
487 if (rtp->prio > RTP_PRIO_MAX)
488 return (EINVAL);
489 newpri = PRI_MIN_IDLE + rtp->prio;
490 break;
491 default:
492 return (EINVAL);
493 }
494
495 thread_lock(td);
496 sched_class(td, rtp->type); /* XXX fix */
497 oldpri = td->td_user_pri;
498 sched_user_prio(td, newpri);
499 if (curthread == td)
500 sched_prio(curthread, td->td_user_pri); /* XXX dubious */
501 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
502 thread_unlock(td);
503 umtx_pi_adjust(td, oldpri);
504 } else
505 thread_unlock(td);
506 return (0);
507 }
508
509 void
510 pri_to_rtp(struct thread *td, struct rtprio *rtp)
511 {
512
513 thread_lock(td);
514 switch (PRI_BASE(td->td_pri_class)) {
515 case PRI_REALTIME:
516 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
517 break;
518 case PRI_TIMESHARE:
519 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
520 break;
521 case PRI_IDLE:
522 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
523 break;
524 default:
525 break;
526 }
527 rtp->type = td->td_pri_class;
528 thread_unlock(td);
529 }
530
531 #if defined(COMPAT_43)
532 #ifndef _SYS_SYSPROTO_H_
533 struct osetrlimit_args {
534 u_int which;
535 struct orlimit *rlp;
536 };
537 #endif
538 int
539 osetrlimit(td, uap)
540 struct thread *td;
541 register struct osetrlimit_args *uap;
542 {
543 struct orlimit olim;
544 struct rlimit lim;
545 int error;
546
547 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
548 return (error);
549 lim.rlim_cur = olim.rlim_cur;
550 lim.rlim_max = olim.rlim_max;
551 error = kern_setrlimit(td, uap->which, &lim);
552 return (error);
553 }
554
555 #ifndef _SYS_SYSPROTO_H_
556 struct ogetrlimit_args {
557 u_int which;
558 struct orlimit *rlp;
559 };
560 #endif
561 int
562 ogetrlimit(td, uap)
563 struct thread *td;
564 register struct ogetrlimit_args *uap;
565 {
566 struct orlimit olim;
567 struct rlimit rl;
568 struct proc *p;
569 int error;
570
571 if (uap->which >= RLIM_NLIMITS)
572 return (EINVAL);
573 p = td->td_proc;
574 PROC_LOCK(p);
575 lim_rlimit(p, uap->which, &rl);
576 PROC_UNLOCK(p);
577
578 /*
579 * XXX would be more correct to convert only RLIM_INFINITY to the
580 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
581 * values. Most 64->32 and 32->16 conversions, including not
582 * unimportant ones of uids are even more broken than what we
583 * do here (they blindly truncate). We don't do this correctly
584 * here since we have little experience with EOVERFLOW yet.
585 * Elsewhere, getuid() can't fail...
586 */
587 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
588 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
589 error = copyout(&olim, uap->rlp, sizeof(olim));
590 return (error);
591 }
592 #endif /* COMPAT_43 */
593
594 #ifndef _SYS_SYSPROTO_H_
595 struct __setrlimit_args {
596 u_int which;
597 struct rlimit *rlp;
598 };
599 #endif
600 int
601 setrlimit(td, uap)
602 struct thread *td;
603 register struct __setrlimit_args *uap;
604 {
605 struct rlimit alim;
606 int error;
607
608 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
609 return (error);
610 error = kern_setrlimit(td, uap->which, &alim);
611 return (error);
612 }
613
614 static void
615 lim_cb(void *arg)
616 {
617 struct rlimit rlim;
618 struct thread *td;
619 struct proc *p;
620
621 p = arg;
622 PROC_LOCK_ASSERT(p, MA_OWNED);
623 /*
624 * Check if the process exceeds its cpu resource allocation. If
625 * it reaches the max, arrange to kill the process in ast().
626 */
627 if (p->p_cpulimit == RLIM_INFINITY)
628 return;
629 PROC_SLOCK(p);
630 FOREACH_THREAD_IN_PROC(p, td) {
631 ruxagg(p, td);
632 }
633 PROC_SUNLOCK(p);
634 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
635 lim_rlimit(p, RLIMIT_CPU, &rlim);
636 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
637 killproc(p, "exceeded maximum CPU limit");
638 } else {
639 if (p->p_cpulimit < rlim.rlim_max)
640 p->p_cpulimit += 5;
641 psignal(p, SIGXCPU);
642 }
643 }
644 if ((p->p_flag & P_WEXIT) == 0)
645 callout_reset(&p->p_limco, hz, lim_cb, p);
646 }
647
648 int
649 kern_setrlimit(td, which, limp)
650 struct thread *td;
651 u_int which;
652 struct rlimit *limp;
653 {
654 struct plimit *newlim, *oldlim;
655 struct proc *p;
656 register struct rlimit *alimp;
657 struct rlimit oldssiz;
658 int error;
659
660 if (which >= RLIM_NLIMITS)
661 return (EINVAL);
662
663 /*
664 * Preserve historical bugs by treating negative limits as unsigned.
665 */
666 if (limp->rlim_cur < 0)
667 limp->rlim_cur = RLIM_INFINITY;
668 if (limp->rlim_max < 0)
669 limp->rlim_max = RLIM_INFINITY;
670
671 oldssiz.rlim_cur = 0;
672 p = td->td_proc;
673 newlim = lim_alloc();
674 PROC_LOCK(p);
675 oldlim = p->p_limit;
676 alimp = &oldlim->pl_rlimit[which];
677 if (limp->rlim_cur > alimp->rlim_max ||
678 limp->rlim_max > alimp->rlim_max)
679 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
680 PROC_UNLOCK(p);
681 lim_free(newlim);
682 return (error);
683 }
684 if (limp->rlim_cur > limp->rlim_max)
685 limp->rlim_cur = limp->rlim_max;
686 lim_copy(newlim, oldlim);
687 alimp = &newlim->pl_rlimit[which];
688
689 switch (which) {
690
691 case RLIMIT_CPU:
692 if (limp->rlim_cur != RLIM_INFINITY &&
693 p->p_cpulimit == RLIM_INFINITY)
694 callout_reset(&p->p_limco, hz, lim_cb, p);
695 p->p_cpulimit = limp->rlim_cur;
696 break;
697 case RLIMIT_DATA:
698 if (limp->rlim_cur > maxdsiz)
699 limp->rlim_cur = maxdsiz;
700 if (limp->rlim_max > maxdsiz)
701 limp->rlim_max = maxdsiz;
702 break;
703
704 case RLIMIT_STACK:
705 if (limp->rlim_cur > maxssiz)
706 limp->rlim_cur = maxssiz;
707 if (limp->rlim_max > maxssiz)
708 limp->rlim_max = maxssiz;
709 oldssiz = *alimp;
710 if (td->td_proc->p_sysent->sv_fixlimit != NULL)
711 td->td_proc->p_sysent->sv_fixlimit(&oldssiz,
712 RLIMIT_STACK);
713 break;
714
715 case RLIMIT_NOFILE:
716 if (limp->rlim_cur > maxfilesperproc)
717 limp->rlim_cur = maxfilesperproc;
718 if (limp->rlim_max > maxfilesperproc)
719 limp->rlim_max = maxfilesperproc;
720 break;
721
722 case RLIMIT_NPROC:
723 if (limp->rlim_cur > maxprocperuid)
724 limp->rlim_cur = maxprocperuid;
725 if (limp->rlim_max > maxprocperuid)
726 limp->rlim_max = maxprocperuid;
727 if (limp->rlim_cur < 1)
728 limp->rlim_cur = 1;
729 if (limp->rlim_max < 1)
730 limp->rlim_max = 1;
731 break;
732 }
733 if (td->td_proc->p_sysent->sv_fixlimit != NULL)
734 td->td_proc->p_sysent->sv_fixlimit(limp, which);
735 *alimp = *limp;
736 p->p_limit = newlim;
737 PROC_UNLOCK(p);
738 lim_free(oldlim);
739
740 if (which == RLIMIT_STACK) {
741 /*
742 * Stack is allocated to the max at exec time with only
743 * "rlim_cur" bytes accessible. If stack limit is going
744 * up make more accessible, if going down make inaccessible.
745 */
746 if (limp->rlim_cur != oldssiz.rlim_cur) {
747 vm_offset_t addr;
748 vm_size_t size;
749 vm_prot_t prot;
750
751 if (limp->rlim_cur > oldssiz.rlim_cur) {
752 prot = p->p_sysent->sv_stackprot;
753 size = limp->rlim_cur - oldssiz.rlim_cur;
754 addr = p->p_sysent->sv_usrstack -
755 limp->rlim_cur;
756 } else {
757 prot = VM_PROT_NONE;
758 size = oldssiz.rlim_cur - limp->rlim_cur;
759 addr = p->p_sysent->sv_usrstack -
760 oldssiz.rlim_cur;
761 }
762 addr = trunc_page(addr);
763 size = round_page(size);
764 (void)vm_map_protect(&p->p_vmspace->vm_map,
765 addr, addr + size, prot, FALSE);
766 }
767 }
768
769 return (0);
770 }
771
772 #ifndef _SYS_SYSPROTO_H_
773 struct __getrlimit_args {
774 u_int which;
775 struct rlimit *rlp;
776 };
777 #endif
778 /* ARGSUSED */
779 int
780 getrlimit(td, uap)
781 struct thread *td;
782 register struct __getrlimit_args *uap;
783 {
784 struct rlimit rlim;
785 struct proc *p;
786 int error;
787
788 if (uap->which >= RLIM_NLIMITS)
789 return (EINVAL);
790 p = td->td_proc;
791 PROC_LOCK(p);
792 lim_rlimit(p, uap->which, &rlim);
793 PROC_UNLOCK(p);
794 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
795 return (error);
796 }
797
798 /*
799 * Transform the running time and tick information for children of proc p
800 * into user and system time usage.
801 */
802 void
803 calccru(p, up, sp)
804 struct proc *p;
805 struct timeval *up;
806 struct timeval *sp;
807 {
808
809 PROC_LOCK_ASSERT(p, MA_OWNED);
810 calcru1(p, &p->p_crux, up, sp);
811 }
812
813 /*
814 * Transform the running time and tick information in proc p into user
815 * and system time usage. If appropriate, include the current time slice
816 * on this CPU.
817 */
818 void
819 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
820 {
821 struct thread *td;
822 uint64_t u;
823
824 PROC_LOCK_ASSERT(p, MA_OWNED);
825 PROC_SLOCK_ASSERT(p, MA_OWNED);
826 /*
827 * If we are getting stats for the current process, then add in the
828 * stats that this thread has accumulated in its current time slice.
829 * We reset the thread and CPU state as if we had performed a context
830 * switch right here.
831 */
832 td = curthread;
833 if (td->td_proc == p) {
834 u = cpu_ticks();
835 p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
836 PCPU_SET(switchtime, u);
837 }
838 /* Make sure the per-thread stats are current. */
839 FOREACH_THREAD_IN_PROC(p, td) {
840 if (td->td_incruntime == 0)
841 continue;
842 ruxagg(p, td);
843 }
844 calcru1(p, &p->p_rux, up, sp);
845 }
846
847 static void
848 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
849 struct timeval *sp)
850 {
851 /* {user, system, interrupt, total} {ticks, usec}: */
852 u_int64_t ut, uu, st, su, it, tt, tu;
853
854 ut = ruxp->rux_uticks;
855 st = ruxp->rux_sticks;
856 it = ruxp->rux_iticks;
857 tt = ut + st + it;
858 if (tt == 0) {
859 /* Avoid divide by zero */
860 st = 1;
861 tt = 1;
862 }
863 tu = cputick2usec(ruxp->rux_runtime);
864 if ((int64_t)tu < 0) {
865 /* XXX: this should be an assert /phk */
866 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
867 (intmax_t)tu, p->p_pid, p->p_comm);
868 tu = ruxp->rux_tu;
869 }
870
871 if (tu >= ruxp->rux_tu) {
872 /*
873 * The normal case, time increased.
874 * Enforce monotonicity of bucketed numbers.
875 */
876 uu = (tu * ut) / tt;
877 if (uu < ruxp->rux_uu)
878 uu = ruxp->rux_uu;
879 su = (tu * st) / tt;
880 if (su < ruxp->rux_su)
881 su = ruxp->rux_su;
882 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
883 /*
884 * When we calibrate the cputicker, it is not uncommon to
885 * see the presumably fixed frequency increase slightly over
886 * time as a result of thermal stabilization and NTP
887 * discipline (of the reference clock). We therefore ignore
888 * a bit of backwards slop because we expect to catch up
889 * shortly. We use a 3 microsecond limit to catch low
890 * counts and a 1% limit for high counts.
891 */
892 uu = ruxp->rux_uu;
893 su = ruxp->rux_su;
894 tu = ruxp->rux_tu;
895 } else { /* tu < ruxp->rux_tu */
896 /*
897 * What happened here was likely that a laptop, which ran at
898 * a reduced clock frequency at boot, kicked into high gear.
899 * The wisdom of spamming this message in that case is
900 * dubious, but it might also be indicative of something
901 * serious, so lets keep it and hope laptops can be made
902 * more truthful about their CPU speed via ACPI.
903 */
904 printf("calcru: runtime went backwards from %ju usec "
905 "to %ju usec for pid %d (%s)\n",
906 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
907 p->p_pid, p->p_comm);
908 uu = (tu * ut) / tt;
909 su = (tu * st) / tt;
910 }
911
912 ruxp->rux_uu = uu;
913 ruxp->rux_su = su;
914 ruxp->rux_tu = tu;
915
916 up->tv_sec = uu / 1000000;
917 up->tv_usec = uu % 1000000;
918 sp->tv_sec = su / 1000000;
919 sp->tv_usec = su % 1000000;
920 }
921
922 #ifndef _SYS_SYSPROTO_H_
923 struct getrusage_args {
924 int who;
925 struct rusage *rusage;
926 };
927 #endif
928 int
929 getrusage(td, uap)
930 register struct thread *td;
931 register struct getrusage_args *uap;
932 {
933 struct rusage ru;
934 int error;
935
936 error = kern_getrusage(td, uap->who, &ru);
937 if (error == 0)
938 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
939 return (error);
940 }
941
942 int
943 kern_getrusage(struct thread *td, int who, struct rusage *rup)
944 {
945 struct proc *p;
946 int error;
947
948 error = 0;
949 p = td->td_proc;
950 PROC_LOCK(p);
951 switch (who) {
952 case RUSAGE_SELF:
953 rufetchcalc(p, rup, &rup->ru_utime,
954 &rup->ru_stime);
955 break;
956
957 case RUSAGE_CHILDREN:
958 *rup = p->p_stats->p_cru;
959 calccru(p, &rup->ru_utime, &rup->ru_stime);
960 break;
961
962 case RUSAGE_THREAD:
963 PROC_SLOCK(p);
964 ruxagg(p, td);
965 PROC_SUNLOCK(p);
966 thread_lock(td);
967 *rup = td->td_ru;
968 calcru1(p, &td->td_rux, &rup->ru_utime, &rup->ru_stime);
969 thread_unlock(td);
970 break;
971
972 default:
973 error = EINVAL;
974 }
975 PROC_UNLOCK(p);
976 return (error);
977 }
978
979 void
980 rucollect(struct rusage *ru, struct rusage *ru2)
981 {
982 long *ip, *ip2;
983 int i;
984
985 if (ru->ru_maxrss < ru2->ru_maxrss)
986 ru->ru_maxrss = ru2->ru_maxrss;
987 ip = &ru->ru_first;
988 ip2 = &ru2->ru_first;
989 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
990 *ip++ += *ip2++;
991 }
992
993 void
994 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
995 struct rusage_ext *rux2)
996 {
997
998 rux->rux_runtime += rux2->rux_runtime;
999 rux->rux_uticks += rux2->rux_uticks;
1000 rux->rux_sticks += rux2->rux_sticks;
1001 rux->rux_iticks += rux2->rux_iticks;
1002 rux->rux_uu += rux2->rux_uu;
1003 rux->rux_su += rux2->rux_su;
1004 rux->rux_tu += rux2->rux_tu;
1005 rucollect(ru, ru2);
1006 }
1007
1008 /*
1009 * Aggregate tick counts into the proc's rusage_ext.
1010 */
1011 static void
1012 ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1013 {
1014
1015 THREAD_LOCK_ASSERT(td, MA_OWNED);
1016 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1017 rux->rux_runtime += td->td_incruntime;
1018 rux->rux_uticks += td->td_uticks;
1019 rux->rux_sticks += td->td_sticks;
1020 rux->rux_iticks += td->td_iticks;
1021 }
1022
1023 void
1024 ruxagg(struct proc *p, struct thread *td)
1025 {
1026
1027 thread_lock(td);
1028 ruxagg_locked(&p->p_rux, td);
1029 ruxagg_locked(&td->td_rux, td);
1030 td->td_incruntime = 0;
1031 td->td_uticks = 0;
1032 td->td_iticks = 0;
1033 td->td_sticks = 0;
1034 thread_unlock(td);
1035 }
1036
1037 /*
1038 * Update the rusage_ext structure and fetch a valid aggregate rusage
1039 * for proc p if storage for one is supplied.
1040 */
1041 void
1042 rufetch(struct proc *p, struct rusage *ru)
1043 {
1044 struct thread *td;
1045
1046 PROC_SLOCK_ASSERT(p, MA_OWNED);
1047
1048 *ru = p->p_ru;
1049 if (p->p_numthreads > 0) {
1050 FOREACH_THREAD_IN_PROC(p, td) {
1051 ruxagg(p, td);
1052 rucollect(ru, &td->td_ru);
1053 }
1054 }
1055 }
1056
1057 /*
1058 * Atomically perform a rufetch and a calcru together.
1059 * Consumers, can safely assume the calcru is executed only once
1060 * rufetch is completed.
1061 */
1062 void
1063 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1064 struct timeval *sp)
1065 {
1066
1067 PROC_SLOCK(p);
1068 rufetch(p, ru);
1069 calcru(p, up, sp);
1070 PROC_SUNLOCK(p);
1071 }
1072
1073 /*
1074 * Allocate a new resource limits structure and initialize its
1075 * reference count and mutex pointer.
1076 */
1077 struct plimit *
1078 lim_alloc()
1079 {
1080 struct plimit *limp;
1081
1082 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1083 refcount_init(&limp->pl_refcnt, 1);
1084 return (limp);
1085 }
1086
1087 struct plimit *
1088 lim_hold(limp)
1089 struct plimit *limp;
1090 {
1091
1092 refcount_acquire(&limp->pl_refcnt);
1093 return (limp);
1094 }
1095
1096 void
1097 lim_fork(struct proc *p1, struct proc *p2)
1098 {
1099 p2->p_limit = lim_hold(p1->p_limit);
1100 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1101 if (p1->p_cpulimit != RLIM_INFINITY)
1102 callout_reset(&p2->p_limco, hz, lim_cb, p2);
1103 }
1104
1105 void
1106 lim_free(limp)
1107 struct plimit *limp;
1108 {
1109
1110 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1111 if (refcount_release(&limp->pl_refcnt))
1112 free((void *)limp, M_PLIMIT);
1113 }
1114
1115 /*
1116 * Make a copy of the plimit structure.
1117 * We share these structures copy-on-write after fork.
1118 */
1119 void
1120 lim_copy(dst, src)
1121 struct plimit *dst, *src;
1122 {
1123
1124 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1125 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1126 }
1127
1128 /*
1129 * Return the hard limit for a particular system resource. The
1130 * which parameter specifies the index into the rlimit array.
1131 */
1132 rlim_t
1133 lim_max(struct proc *p, int which)
1134 {
1135 struct rlimit rl;
1136
1137 lim_rlimit(p, which, &rl);
1138 return (rl.rlim_max);
1139 }
1140
1141 /*
1142 * Return the current (soft) limit for a particular system resource.
1143 * The which parameter which specifies the index into the rlimit array
1144 */
1145 rlim_t
1146 lim_cur(struct proc *p, int which)
1147 {
1148 struct rlimit rl;
1149
1150 lim_rlimit(p, which, &rl);
1151 return (rl.rlim_cur);
1152 }
1153
1154 /*
1155 * Return a copy of the entire rlimit structure for the system limit
1156 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1157 */
1158 void
1159 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1160 {
1161
1162 PROC_LOCK_ASSERT(p, MA_OWNED);
1163 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1164 ("request for invalid resource limit"));
1165 *rlp = p->p_limit->pl_rlimit[which];
1166 if (p->p_sysent->sv_fixlimit != NULL)
1167 p->p_sysent->sv_fixlimit(rlp, which);
1168 }
1169
1170 /*
1171 * Find the uidinfo structure for a uid. This structure is used to
1172 * track the total resource consumption (process count, socket buffer
1173 * size, etc.) for the uid and impose limits.
1174 */
1175 void
1176 uihashinit()
1177 {
1178
1179 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1180 rw_init(&uihashtbl_lock, "uidinfo hash");
1181 }
1182
1183 /*
1184 * Look up a uidinfo struct for the parameter uid.
1185 * uihashtbl_lock must be locked.
1186 */
1187 static struct uidinfo *
1188 uilookup(uid)
1189 uid_t uid;
1190 {
1191 struct uihashhead *uipp;
1192 struct uidinfo *uip;
1193
1194 rw_assert(&uihashtbl_lock, RA_LOCKED);
1195 uipp = UIHASH(uid);
1196 LIST_FOREACH(uip, uipp, ui_hash)
1197 if (uip->ui_uid == uid)
1198 break;
1199
1200 return (uip);
1201 }
1202
1203 /*
1204 * Find or allocate a struct uidinfo for a particular uid.
1205 * Increase refcount on uidinfo struct returned.
1206 * uifree() should be called on a struct uidinfo when released.
1207 */
1208 struct uidinfo *
1209 uifind(uid)
1210 uid_t uid;
1211 {
1212 struct uidinfo *old_uip, *uip;
1213
1214 rw_rlock(&uihashtbl_lock);
1215 uip = uilookup(uid);
1216 if (uip == NULL) {
1217 rw_runlock(&uihashtbl_lock);
1218 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1219 rw_wlock(&uihashtbl_lock);
1220 /*
1221 * There's a chance someone created our uidinfo while we
1222 * were in malloc and not holding the lock, so we have to
1223 * make sure we don't insert a duplicate uidinfo.
1224 */
1225 if ((old_uip = uilookup(uid)) != NULL) {
1226 /* Someone else beat us to it. */
1227 free(uip, M_UIDINFO);
1228 uip = old_uip;
1229 } else {
1230 refcount_init(&uip->ui_ref, 0);
1231 uip->ui_uid = uid;
1232 mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1233 MTX_DEF);
1234 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1235 }
1236 }
1237 uihold(uip);
1238 rw_unlock(&uihashtbl_lock);
1239 return (uip);
1240 }
1241
1242 /*
1243 * Place another refcount on a uidinfo struct.
1244 */
1245 void
1246 uihold(uip)
1247 struct uidinfo *uip;
1248 {
1249
1250 refcount_acquire(&uip->ui_ref);
1251 }
1252
1253 /*-
1254 * Since uidinfo structs have a long lifetime, we use an
1255 * opportunistic refcounting scheme to avoid locking the lookup hash
1256 * for each release.
1257 *
1258 * If the refcount hits 0, we need to free the structure,
1259 * which means we need to lock the hash.
1260 * Optimal case:
1261 * After locking the struct and lowering the refcount, if we find
1262 * that we don't need to free, simply unlock and return.
1263 * Suboptimal case:
1264 * If refcount lowering results in need to free, bump the count
1265 * back up, lose the lock and acquire the locks in the proper
1266 * order to try again.
1267 */
1268 void
1269 uifree(uip)
1270 struct uidinfo *uip;
1271 {
1272 int old;
1273
1274 /* Prepare for optimal case. */
1275 old = uip->ui_ref;
1276 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1277 return;
1278
1279 /* Prepare for suboptimal case. */
1280 rw_wlock(&uihashtbl_lock);
1281 if (refcount_release(&uip->ui_ref)) {
1282 LIST_REMOVE(uip, ui_hash);
1283 rw_wunlock(&uihashtbl_lock);
1284 if (uip->ui_sbsize != 0)
1285 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1286 uip->ui_uid, uip->ui_sbsize);
1287 if (uip->ui_proccnt != 0)
1288 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1289 uip->ui_uid, uip->ui_proccnt);
1290 if (uip->ui_vmsize != 0)
1291 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1292 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1293 mtx_destroy(&uip->ui_vmsize_mtx);
1294 free(uip, M_UIDINFO);
1295 return;
1296 }
1297 /*
1298 * Someone added a reference between atomic_cmpset_int() and
1299 * rw_wlock(&uihashtbl_lock).
1300 */
1301 rw_wunlock(&uihashtbl_lock);
1302 }
1303
1304 /*
1305 * Change the count associated with number of processes
1306 * a given user is using. When 'max' is 0, don't enforce a limit
1307 */
1308 int
1309 chgproccnt(uip, diff, max)
1310 struct uidinfo *uip;
1311 int diff;
1312 rlim_t max;
1313 {
1314
1315 /* Don't allow them to exceed max, but allow subtraction. */
1316 if (diff > 0 && max != 0) {
1317 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1318 atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1319 return (0);
1320 }
1321 } else {
1322 atomic_add_long(&uip->ui_proccnt, (long)diff);
1323 if (uip->ui_proccnt < 0)
1324 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1325 }
1326 return (1);
1327 }
1328
1329 /*
1330 * Change the total socket buffer size a user has used.
1331 */
1332 int
1333 chgsbsize(uip, hiwat, to, max)
1334 struct uidinfo *uip;
1335 u_int *hiwat;
1336 u_int to;
1337 rlim_t max;
1338 {
1339 int diff;
1340
1341 diff = to - *hiwat;
1342 if (diff > 0) {
1343 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1344 atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1345 return (0);
1346 }
1347 } else {
1348 atomic_add_long(&uip->ui_sbsize, (long)diff);
1349 if (uip->ui_sbsize < 0)
1350 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1351 }
1352 *hiwat = to;
1353 return (1);
1354 }
1355
1356 /*
1357 * Change the count associated with number of pseudo-terminals
1358 * a given user is using. When 'max' is 0, don't enforce a limit
1359 */
1360 int
1361 chgptscnt(uip, diff, max)
1362 struct uidinfo *uip;
1363 int diff;
1364 rlim_t max;
1365 {
1366
1367 /* Don't allow them to exceed max, but allow subtraction. */
1368 if (diff > 0 && max != 0) {
1369 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1370 atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1371 return (0);
1372 }
1373 } else {
1374 atomic_add_long(&uip->ui_ptscnt, (long)diff);
1375 if (uip->ui_ptscnt < 0)
1376 printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1377 }
1378 return (1);
1379 }
Cache object: cd5d4900905f7470785b4212b017cce7
|