1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: releng/5.0/sys/kern/kern_resource.c 104964 2002-10-12 05:32:24Z jeff $
40 */
41
42 #include "opt_compat.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/file.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sx.h>
56 #include <sys/sysent.h>
57 #include <sys/time.h>
58
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63
64 static int donice(struct thread *td, struct proc *chgp, int n);
65
66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
67 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
68 static struct mtx uihashtbl_mtx;
69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
70 static u_long uihash; /* size of hash table - 1 */
71
72 static struct uidinfo *uilookup(uid_t uid);
73
74 /*
75 * Resource controls and accounting.
76 */
77
78 #ifndef _SYS_SYSPROTO_H_
79 struct getpriority_args {
80 int which;
81 int who;
82 };
83 #endif
84 /*
85 * MPSAFE
86 */
87 int
88 getpriority(td, uap)
89 struct thread *td;
90 register struct getpriority_args *uap;
91 {
92 struct proc *p;
93 int low = PRIO_MAX + 1;
94 int error = 0;
95 struct ksegrp *kg;
96
97 mtx_lock(&Giant);
98
99 switch (uap->which) {
100 case PRIO_PROCESS:
101 if (uap->who == 0)
102 low = td->td_ksegrp->kg_nice;
103 else {
104 p = pfind(uap->who);
105 if (p == NULL)
106 break;
107 if (p_cansee(td, p) == 0) {
108 FOREACH_KSEGRP_IN_PROC(p, kg) {
109 if (kg->kg_nice < low)
110 low = kg->kg_nice;
111 }
112 }
113 PROC_UNLOCK(p);
114 }
115 break;
116
117 case PRIO_PGRP: {
118 register struct pgrp *pg;
119
120 sx_slock(&proctree_lock);
121 if (uap->who == 0) {
122 pg = td->td_proc->p_pgrp;
123 PGRP_LOCK(pg);
124 } else {
125 pg = pgfind(uap->who);
126 if (pg == NULL) {
127 sx_sunlock(&proctree_lock);
128 break;
129 }
130 }
131 sx_sunlock(&proctree_lock);
132 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
133 PROC_LOCK(p);
134 if (!p_cansee(td, p)) {
135 FOREACH_KSEGRP_IN_PROC(p, kg) {
136 if (kg->kg_nice < low)
137 low = kg->kg_nice;
138 }
139 }
140 PROC_UNLOCK(p);
141 }
142 PGRP_UNLOCK(pg);
143 break;
144 }
145
146 case PRIO_USER:
147 if (uap->who == 0)
148 uap->who = td->td_ucred->cr_uid;
149 sx_slock(&allproc_lock);
150 LIST_FOREACH(p, &allproc, p_list) {
151 PROC_LOCK(p);
152 if (!p_cansee(td, p) &&
153 p->p_ucred->cr_uid == uap->who) {
154 FOREACH_KSEGRP_IN_PROC(p, kg) {
155 if (kg->kg_nice < low)
156 low = kg->kg_nice;
157 }
158 }
159 PROC_UNLOCK(p);
160 }
161 sx_sunlock(&allproc_lock);
162 break;
163
164 default:
165 error = EINVAL;
166 break;
167 }
168 if (low == PRIO_MAX + 1 && error == 0)
169 error = ESRCH;
170 td->td_retval[0] = low;
171 mtx_unlock(&Giant);
172 return (error);
173 }
174
175 #ifndef _SYS_SYSPROTO_H_
176 struct setpriority_args {
177 int which;
178 int who;
179 int prio;
180 };
181 #endif
182 /*
183 * MPSAFE
184 */
185 /* ARGSUSED */
186 int
187 setpriority(td, uap)
188 struct thread *td;
189 register struct setpriority_args *uap;
190 {
191 struct proc *curp = td->td_proc;
192 register struct proc *p;
193 int found = 0, error = 0;
194
195 mtx_lock(&Giant);
196
197 switch (uap->which) {
198 case PRIO_PROCESS:
199 if (uap->who == 0) {
200 PROC_LOCK(curp);
201 error = donice(td, curp, uap->prio);
202 PROC_UNLOCK(curp);
203 } else {
204 p = pfind(uap->who);
205 if (p == 0)
206 break;
207 if (p_cansee(td, p) == 0)
208 error = donice(td, p, uap->prio);
209 PROC_UNLOCK(p);
210 }
211 found++;
212 break;
213
214 case PRIO_PGRP: {
215 register struct pgrp *pg;
216
217 sx_slock(&proctree_lock);
218 if (uap->who == 0) {
219 pg = curp->p_pgrp;
220 PGRP_LOCK(pg);
221 } else {
222 pg = pgfind(uap->who);
223 if (pg == NULL) {
224 sx_sunlock(&proctree_lock);
225 break;
226 }
227 }
228 sx_sunlock(&proctree_lock);
229 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
230 PROC_LOCK(p);
231 if (!p_cansee(td, p)) {
232 error = donice(td, p, uap->prio);
233 found++;
234 }
235 PROC_UNLOCK(p);
236 }
237 PGRP_UNLOCK(pg);
238 break;
239 }
240
241 case PRIO_USER:
242 if (uap->who == 0)
243 uap->who = td->td_ucred->cr_uid;
244 sx_slock(&allproc_lock);
245 FOREACH_PROC_IN_SYSTEM(p) {
246 PROC_LOCK(p);
247 if (p->p_ucred->cr_uid == uap->who &&
248 !p_cansee(td, p)) {
249 error = donice(td, p, uap->prio);
250 found++;
251 }
252 PROC_UNLOCK(p);
253 }
254 sx_sunlock(&allproc_lock);
255 break;
256
257 default:
258 error = EINVAL;
259 break;
260 }
261 if (found == 0 && error == 0)
262 error = ESRCH;
263 mtx_unlock(&Giant);
264 return (error);
265 }
266
267 /*
268 * Set "nice" for a process. Doesn't really understand threaded processes well
269 * but does try. Has the unfortunate side effect of making all the NICE
270 * values for a process's ksegrps the same.. This suggests that
271 * NICE valuse should be stored as a process nice and deltas for the ksegrps.
272 * (but not yet).
273 */
274 static int
275 donice(struct thread *td, struct proc *p, int n)
276 {
277 int error;
278 int low = PRIO_MAX + 1;
279 struct ksegrp *kg;
280
281 PROC_LOCK_ASSERT(p, MA_OWNED);
282 if ((error = p_cansched(td, p)))
283 return (error);
284 if (n > PRIO_MAX)
285 n = PRIO_MAX;
286 if (n < PRIO_MIN)
287 n = PRIO_MIN;
288 /*
289 * Only allow nicing if to more than the lowest nice.
290 * e.g. nices of 4,3,2 allow nice to 3 but not 1
291 */
292 FOREACH_KSEGRP_IN_PROC(p, kg) {
293 if (kg->kg_nice < low)
294 low = kg->kg_nice;
295 }
296 if (n < low && suser(td))
297 return (EACCES);
298 FOREACH_KSEGRP_IN_PROC(p, kg) {
299 sched_nice(kg, n);
300 }
301 return (0);
302 }
303
304 /* rtprio system call */
305 #ifndef _SYS_SYSPROTO_H_
306 struct rtprio_args {
307 int function;
308 pid_t pid;
309 struct rtprio *rtp;
310 };
311 #endif
312
313 /*
314 * Set realtime priority
315 */
316
317 /*
318 * MPSAFE
319 */
320 /* ARGSUSED */
321 int
322 rtprio(td, uap)
323 struct thread *td;
324 register struct rtprio_args *uap;
325 {
326 struct proc *curp = td->td_proc;
327 register struct proc *p;
328 struct rtprio rtp;
329 int error, cierror = 0;
330
331 /* Perform copyin before acquiring locks if needed. */
332 if (uap->function == RTP_SET)
333 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
334
335 if (uap->pid == 0) {
336 p = curp;
337 PROC_LOCK(p);
338 } else {
339 p = pfind(uap->pid);
340 if (p == NULL)
341 return (ESRCH);
342 }
343
344 switch (uap->function) {
345 case RTP_LOOKUP:
346 if ((error = p_cansee(td, p)))
347 break;
348 mtx_lock_spin(&sched_lock);
349 pri_to_rtp(FIRST_KSEGRP_IN_PROC(p), &rtp);
350 mtx_unlock_spin(&sched_lock);
351 PROC_UNLOCK(p);
352 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
353 case RTP_SET:
354 if ((error = p_cansched(td, p)) || (error = cierror))
355 break;
356 /* disallow setting rtprio in most cases if not superuser */
357 if (suser(td) != 0) {
358 /* can't set someone else's */
359 if (uap->pid) {
360 error = EPERM;
361 break;
362 }
363 /* can't set realtime priority */
364 /*
365 * Realtime priority has to be restricted for reasons which should be
366 * obvious. However, for idle priority, there is a potential for
367 * system deadlock if an idleprio process gains a lock on a resource
368 * that other processes need (and the idleprio process can't run
369 * due to a CPU-bound normal process). Fix me! XXX
370 */
371 #if 0
372 if (RTP_PRIO_IS_REALTIME(rtp.type))
373 #endif
374 if (rtp.type != RTP_PRIO_NORMAL) {
375 error = EPERM;
376 break;
377 }
378 }
379 mtx_lock_spin(&sched_lock);
380 error = rtp_to_pri(&rtp, FIRST_KSEGRP_IN_PROC(p));
381 mtx_unlock_spin(&sched_lock);
382 break;
383 default:
384 error = EINVAL;
385 break;
386 }
387 PROC_UNLOCK(p);
388 return (error);
389 }
390
391 int
392 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
393 {
394
395 if (rtp->prio > RTP_PRIO_MAX)
396 return (EINVAL);
397 switch (RTP_PRIO_BASE(rtp->type)) {
398 case RTP_PRIO_REALTIME:
399 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
400 break;
401 case RTP_PRIO_NORMAL:
402 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
403 break;
404 case RTP_PRIO_IDLE:
405 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
406 break;
407 default:
408 return (EINVAL);
409 }
410 kg->kg_pri_class = rtp->type;
411 if (curthread->td_ksegrp == kg) {
412 curthread->td_base_pri = kg->kg_user_pri;
413 curthread->td_priority = kg->kg_user_pri; /* XXX dubious */
414 }
415 return (0);
416 }
417
418 void
419 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
420 {
421
422 switch (PRI_BASE(kg->kg_pri_class)) {
423 case PRI_REALTIME:
424 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
425 break;
426 case PRI_TIMESHARE:
427 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
428 break;
429 case PRI_IDLE:
430 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
431 break;
432 default:
433 break;
434 }
435 rtp->type = kg->kg_pri_class;
436 }
437
438 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
439 #ifndef _SYS_SYSPROTO_H_
440 struct osetrlimit_args {
441 u_int which;
442 struct orlimit *rlp;
443 };
444 #endif
445 /*
446 * MPSAFE
447 */
448 /* ARGSUSED */
449 int
450 osetrlimit(td, uap)
451 struct thread *td;
452 register struct osetrlimit_args *uap;
453 {
454 struct orlimit olim;
455 struct rlimit lim;
456 int error;
457
458 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
459 return (error);
460 lim.rlim_cur = olim.rlim_cur;
461 lim.rlim_max = olim.rlim_max;
462 mtx_lock(&Giant);
463 error = dosetrlimit(td, uap->which, &lim);
464 mtx_unlock(&Giant);
465 return (error);
466 }
467
468 #ifndef _SYS_SYSPROTO_H_
469 struct ogetrlimit_args {
470 u_int which;
471 struct orlimit *rlp;
472 };
473 #endif
474 /*
475 * MPSAFE
476 */
477 /* ARGSUSED */
478 int
479 ogetrlimit(td, uap)
480 struct thread *td;
481 register struct ogetrlimit_args *uap;
482 {
483 struct proc *p = td->td_proc;
484 struct orlimit olim;
485 int error;
486
487 if (uap->which >= RLIM_NLIMITS)
488 return (EINVAL);
489 mtx_lock(&Giant);
490 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
491 if (olim.rlim_cur == -1)
492 olim.rlim_cur = 0x7fffffff;
493 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
494 if (olim.rlim_max == -1)
495 olim.rlim_max = 0x7fffffff;
496 error = copyout(&olim, uap->rlp, sizeof(olim));
497 mtx_unlock(&Giant);
498 return (error);
499 }
500 #endif /* COMPAT_43 || COMPAT_SUNOS */
501
502 #ifndef _SYS_SYSPROTO_H_
503 struct __setrlimit_args {
504 u_int which;
505 struct rlimit *rlp;
506 };
507 #endif
508 /*
509 * MPSAFE
510 */
511 /* ARGSUSED */
512 int
513 setrlimit(td, uap)
514 struct thread *td;
515 register struct __setrlimit_args *uap;
516 {
517 struct rlimit alim;
518 int error;
519
520 if ((error = copyin(uap->rlp, &alim, sizeof (struct rlimit))))
521 return (error);
522 mtx_lock(&Giant);
523 error = dosetrlimit(td, uap->which, &alim);
524 mtx_unlock(&Giant);
525 return (error);
526 }
527
528 int
529 dosetrlimit(td, which, limp)
530 struct thread *td;
531 u_int which;
532 struct rlimit *limp;
533 {
534 struct proc *p = td->td_proc;
535 register struct rlimit *alimp;
536 int error;
537
538 GIANT_REQUIRED;
539
540 if (which >= RLIM_NLIMITS)
541 return (EINVAL);
542 alimp = &p->p_rlimit[which];
543
544 /*
545 * Preserve historical bugs by treating negative limits as unsigned.
546 */
547 if (limp->rlim_cur < 0)
548 limp->rlim_cur = RLIM_INFINITY;
549 if (limp->rlim_max < 0)
550 limp->rlim_max = RLIM_INFINITY;
551
552 if (limp->rlim_cur > alimp->rlim_max ||
553 limp->rlim_max > alimp->rlim_max)
554 if ((error = suser_cred(td->td_ucred, PRISON_ROOT)))
555 return (error);
556 if (limp->rlim_cur > limp->rlim_max)
557 limp->rlim_cur = limp->rlim_max;
558 if (p->p_limit->p_refcnt > 1 &&
559 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
560 p->p_limit->p_refcnt--;
561 p->p_limit = limcopy(p->p_limit);
562 alimp = &p->p_rlimit[which];
563 }
564
565 switch (which) {
566
567 case RLIMIT_CPU:
568 mtx_lock_spin(&sched_lock);
569 p->p_cpulimit = limp->rlim_cur;
570 mtx_unlock_spin(&sched_lock);
571 break;
572 case RLIMIT_DATA:
573 if (limp->rlim_cur > maxdsiz)
574 limp->rlim_cur = maxdsiz;
575 if (limp->rlim_max > maxdsiz)
576 limp->rlim_max = maxdsiz;
577 break;
578
579 case RLIMIT_STACK:
580 if (limp->rlim_cur > maxssiz)
581 limp->rlim_cur = maxssiz;
582 if (limp->rlim_max > maxssiz)
583 limp->rlim_max = maxssiz;
584 /*
585 * Stack is allocated to the max at exec time with only
586 * "rlim_cur" bytes accessible. If stack limit is going
587 * up make more accessible, if going down make inaccessible.
588 */
589 if (limp->rlim_cur != alimp->rlim_cur) {
590 vm_offset_t addr;
591 vm_size_t size;
592 vm_prot_t prot;
593
594 if (limp->rlim_cur > alimp->rlim_cur) {
595 prot = p->p_sysent->sv_stackprot;
596 size = limp->rlim_cur - alimp->rlim_cur;
597 addr = p->p_sysent->sv_usrstack -
598 limp->rlim_cur;
599 } else {
600 prot = VM_PROT_NONE;
601 size = alimp->rlim_cur - limp->rlim_cur;
602 addr = p->p_sysent->sv_usrstack -
603 alimp->rlim_cur;
604 }
605 addr = trunc_page(addr);
606 size = round_page(size);
607 (void) vm_map_protect(&p->p_vmspace->vm_map,
608 addr, addr+size, prot, FALSE);
609 }
610 break;
611
612 case RLIMIT_NOFILE:
613 if (limp->rlim_cur > maxfilesperproc)
614 limp->rlim_cur = maxfilesperproc;
615 if (limp->rlim_max > maxfilesperproc)
616 limp->rlim_max = maxfilesperproc;
617 break;
618
619 case RLIMIT_NPROC:
620 if (limp->rlim_cur > maxprocperuid)
621 limp->rlim_cur = maxprocperuid;
622 if (limp->rlim_max > maxprocperuid)
623 limp->rlim_max = maxprocperuid;
624 if (limp->rlim_cur < 1)
625 limp->rlim_cur = 1;
626 if (limp->rlim_max < 1)
627 limp->rlim_max = 1;
628 break;
629 }
630 *alimp = *limp;
631 return (0);
632 }
633
634 #ifndef _SYS_SYSPROTO_H_
635 struct __getrlimit_args {
636 u_int which;
637 struct rlimit *rlp;
638 };
639 #endif
640 /*
641 * MPSAFE
642 */
643 /* ARGSUSED */
644 int
645 getrlimit(td, uap)
646 struct thread *td;
647 register struct __getrlimit_args *uap;
648 {
649 int error;
650 struct proc *p = td->td_proc;
651
652 if (uap->which >= RLIM_NLIMITS)
653 return (EINVAL);
654 mtx_lock(&Giant);
655 error = copyout(&p->p_rlimit[uap->which], uap->rlp,
656 sizeof (struct rlimit));
657 mtx_unlock(&Giant);
658 return(error);
659 }
660
661 /*
662 * Transform the running time and tick information in proc p into user,
663 * system, and interrupt time usage.
664 */
665 void
666 calcru(p, up, sp, ip)
667 struct proc *p;
668 struct timeval *up;
669 struct timeval *sp;
670 struct timeval *ip;
671 {
672 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
673 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
674 u_int64_t uut = 0, sut = 0, iut = 0;
675 int s;
676 struct timeval tv;
677 struct bintime bt;
678 struct kse *ke;
679 struct ksegrp *kg;
680
681 mtx_assert(&sched_lock, MA_OWNED);
682 /* XXX: why spl-protect ? worst case is an off-by-one report */
683
684 FOREACH_KSEGRP_IN_PROC(p, kg) {
685 /* we could accumulate per ksegrp and per process here*/
686 FOREACH_KSE_IN_GROUP(kg, ke) {
687 s = splstatclock();
688 ut = ke->ke_uticks;
689 st = ke->ke_sticks;
690 it = ke->ke_iticks;
691 splx(s);
692
693 tt = ut + st + it;
694 if (tt == 0) {
695 st = 1;
696 tt = 1;
697 }
698
699 if (ke == curthread->td_kse) {
700 /*
701 * Adjust for the current time slice. This is actually fairly
702 * important since the error here is on the order of a time
703 * quantum, which is much greater than the sampling error.
704 * XXXKSE use a different test due to threads on other
705 * processors also being 'current'.
706 */
707
708 binuptime(&bt);
709 bintime_sub(&bt, PCPU_PTR(switchtime));
710 bintime_add(&bt, &p->p_runtime);
711 } else {
712 bt = p->p_runtime;
713 }
714 bintime2timeval(&bt, &tv);
715 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
716 ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
717 if (tu < ptu || (int64_t)tu < 0) {
718 /* XXX no %qd in kernel. Truncate. */
719 printf("calcru: negative time of %ld usec for pid %d (%s)\n",
720 (long)tu, p->p_pid, p->p_comm);
721 tu = ptu;
722 }
723
724 /* Subdivide tu. */
725 uu = (tu * ut) / tt;
726 su = (tu * st) / tt;
727 iu = tu - uu - su;
728
729 /* Enforce monotonicity. */
730 if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
731 if (uu < ke->ke_uu)
732 uu = ke->ke_uu;
733 else if (uu + ke->ke_su + ke->ke_iu > tu)
734 uu = tu - ke->ke_su - ke->ke_iu;
735 if (st == 0)
736 su = ke->ke_su;
737 else {
738 su = ((tu - uu) * st) / (st + it);
739 if (su < ke->ke_su)
740 su = ke->ke_su;
741 else if (uu + su + ke->ke_iu > tu)
742 su = tu - uu - ke->ke_iu;
743 }
744 KASSERT(uu + su + ke->ke_iu <= tu,
745 ("calcru: monotonisation botch 1"));
746 iu = tu - uu - su;
747 KASSERT(iu >= ke->ke_iu,
748 ("calcru: monotonisation botch 2"));
749 }
750 ke->ke_uu = uu;
751 ke->ke_su = su;
752 ke->ke_iu = iu;
753 uut += uu;
754 sut += su;
755 iut += iu;
756
757 } /* end kse loop */
758 } /* end kseg loop */
759 up->tv_sec = uut / 1000000;
760 up->tv_usec = uut % 1000000;
761 sp->tv_sec = sut / 1000000;
762 sp->tv_usec = sut % 1000000;
763 if (ip != NULL) {
764 ip->tv_sec = iut / 1000000;
765 ip->tv_usec = iut % 1000000;
766 }
767 }
768
769 #ifndef _SYS_SYSPROTO_H_
770 struct getrusage_args {
771 int who;
772 struct rusage *rusage;
773 };
774 #endif
775 /*
776 * MPSAFE
777 */
778 /* ARGSUSED */
779 int
780 getrusage(td, uap)
781 register struct thread *td;
782 register struct getrusage_args *uap;
783 {
784 struct proc *p = td->td_proc;
785 register struct rusage *rup;
786 int error = 0;
787
788 mtx_lock(&Giant);
789
790 switch (uap->who) {
791 case RUSAGE_SELF:
792 rup = &p->p_stats->p_ru;
793 mtx_lock_spin(&sched_lock);
794 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
795 mtx_unlock_spin(&sched_lock);
796 break;
797
798 case RUSAGE_CHILDREN:
799 rup = &p->p_stats->p_cru;
800 break;
801
802 default:
803 rup = NULL;
804 error = EINVAL;
805 break;
806 }
807 mtx_unlock(&Giant);
808 if (error == 0) {
809 error = copyout(rup, uap->rusage, sizeof (struct rusage));
810 }
811 return(error);
812 }
813
814 void
815 ruadd(ru, ru2)
816 register struct rusage *ru, *ru2;
817 {
818 register long *ip, *ip2;
819 register int i;
820
821 timevaladd(&ru->ru_utime, &ru2->ru_utime);
822 timevaladd(&ru->ru_stime, &ru2->ru_stime);
823 if (ru->ru_maxrss < ru2->ru_maxrss)
824 ru->ru_maxrss = ru2->ru_maxrss;
825 ip = &ru->ru_first; ip2 = &ru2->ru_first;
826 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
827 *ip++ += *ip2++;
828 }
829
830 /*
831 * Make a copy of the plimit structure.
832 * We share these structures copy-on-write after fork,
833 * and copy when a limit is changed.
834 */
835 struct plimit *
836 limcopy(lim)
837 struct plimit *lim;
838 {
839 register struct plimit *copy;
840
841 MALLOC(copy, struct plimit *, sizeof(struct plimit),
842 M_SUBPROC, M_WAITOK);
843 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
844 copy->p_lflags = 0;
845 copy->p_refcnt = 1;
846 return (copy);
847 }
848
849 /*
850 * Find the uidinfo structure for a uid. This structure is used to
851 * track the total resource consumption (process count, socket buffer
852 * size, etc.) for the uid and impose limits.
853 */
854 void
855 uihashinit()
856 {
857
858 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
859 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
860 }
861
862 /*
863 * lookup a uidinfo struct for the parameter uid.
864 * uihashtbl_mtx must be locked.
865 */
866 static struct uidinfo *
867 uilookup(uid)
868 uid_t uid;
869 {
870 struct uihashhead *uipp;
871 struct uidinfo *uip;
872
873 mtx_assert(&uihashtbl_mtx, MA_OWNED);
874 uipp = UIHASH(uid);
875 LIST_FOREACH(uip, uipp, ui_hash)
876 if (uip->ui_uid == uid)
877 break;
878
879 return (uip);
880 }
881
882 /*
883 * Find or allocate a struct uidinfo for a particular uid.
884 * Increase refcount on uidinfo struct returned.
885 * uifree() should be called on a struct uidinfo when released.
886 */
887 struct uidinfo *
888 uifind(uid)
889 uid_t uid;
890 {
891 struct uidinfo *uip;
892
893 mtx_lock(&uihashtbl_mtx);
894 uip = uilookup(uid);
895 if (uip == NULL) {
896 struct uidinfo *old_uip;
897
898 mtx_unlock(&uihashtbl_mtx);
899 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
900 mtx_lock(&uihashtbl_mtx);
901 /*
902 * There's a chance someone created our uidinfo while we
903 * were in malloc and not holding the lock, so we have to
904 * make sure we don't insert a duplicate uidinfo
905 */
906 if ((old_uip = uilookup(uid)) != NULL) {
907 /* someone else beat us to it */
908 free(uip, M_UIDINFO);
909 uip = old_uip;
910 } else {
911 uip->ui_mtxp = mtx_pool_alloc();
912 uip->ui_uid = uid;
913 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
914 }
915 }
916 uihold(uip);
917 mtx_unlock(&uihashtbl_mtx);
918 return (uip);
919 }
920
921 /*
922 * Place another refcount on a uidinfo struct.
923 */
924 void
925 uihold(uip)
926 struct uidinfo *uip;
927 {
928
929 UIDINFO_LOCK(uip);
930 uip->ui_ref++;
931 UIDINFO_UNLOCK(uip);
932 }
933
934 /*-
935 * Since uidinfo structs have a long lifetime, we use an
936 * opportunistic refcounting scheme to avoid locking the lookup hash
937 * for each release.
938 *
939 * If the refcount hits 0, we need to free the structure,
940 * which means we need to lock the hash.
941 * Optimal case:
942 * After locking the struct and lowering the refcount, if we find
943 * that we don't need to free, simply unlock and return.
944 * Suboptimal case:
945 * If refcount lowering results in need to free, bump the count
946 * back up, loose the lock and aquire the locks in the proper
947 * order to try again.
948 */
949 void
950 uifree(uip)
951 struct uidinfo *uip;
952 {
953
954 /* Prepare for optimal case. */
955 UIDINFO_LOCK(uip);
956
957 if (--uip->ui_ref != 0) {
958 UIDINFO_UNLOCK(uip);
959 return;
960 }
961
962 /* Prepare for suboptimal case. */
963 uip->ui_ref++;
964 UIDINFO_UNLOCK(uip);
965 mtx_lock(&uihashtbl_mtx);
966 UIDINFO_LOCK(uip);
967
968 /*
969 * We must subtract one from the count again because we backed out
970 * our initial subtraction before dropping the lock.
971 * Since another thread may have added a reference after we dropped the
972 * initial lock we have to test for zero again.
973 */
974 if (--uip->ui_ref == 0) {
975 LIST_REMOVE(uip, ui_hash);
976 mtx_unlock(&uihashtbl_mtx);
977 if (uip->ui_sbsize != 0)
978 /* XXX no %qd in kernel. Truncate. */
979 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
980 uip->ui_uid, (long)uip->ui_sbsize);
981 if (uip->ui_proccnt != 0)
982 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
983 uip->ui_uid, uip->ui_proccnt);
984 UIDINFO_UNLOCK(uip);
985 FREE(uip, M_UIDINFO);
986 return;
987 }
988
989 mtx_unlock(&uihashtbl_mtx);
990 UIDINFO_UNLOCK(uip);
991 }
992
993 /*
994 * Change the count associated with number of processes
995 * a given user is using. When 'max' is 0, don't enforce a limit
996 */
997 int
998 chgproccnt(uip, diff, max)
999 struct uidinfo *uip;
1000 int diff;
1001 int max;
1002 {
1003
1004 UIDINFO_LOCK(uip);
1005 /* don't allow them to exceed max, but allow subtraction */
1006 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1007 UIDINFO_UNLOCK(uip);
1008 return (0);
1009 }
1010 uip->ui_proccnt += diff;
1011 if (uip->ui_proccnt < 0)
1012 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1013 UIDINFO_UNLOCK(uip);
1014 return (1);
1015 }
1016
1017 /*
1018 * Change the total socket buffer size a user has used.
1019 */
1020 int
1021 chgsbsize(uip, hiwat, to, max)
1022 struct uidinfo *uip;
1023 u_int *hiwat;
1024 u_int to;
1025 rlim_t max;
1026 {
1027 rlim_t new;
1028 int s;
1029
1030 s = splnet();
1031 UIDINFO_LOCK(uip);
1032 new = uip->ui_sbsize + to - *hiwat;
1033 /* don't allow them to exceed max, but allow subtraction */
1034 if (to > *hiwat && new > max) {
1035 splx(s);
1036 UIDINFO_UNLOCK(uip);
1037 return (0);
1038 }
1039 uip->ui_sbsize = new;
1040 *hiwat = to;
1041 if (uip->ui_sbsize < 0)
1042 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1043 splx(s);
1044 UIDINFO_UNLOCK(uip);
1045 return (1);
1046 }
Cache object: d5a82bddcfed134a30aba641bae4a08d
|