1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/5.2/sys/kern/kern_resource.c 121608 2003-10-27 07:15:47Z jeff $");
43
44 #include "opt_compat.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/file.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sx.h>
58 #include <sys/sysent.h>
59 #include <sys/time.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65
66 static int donice(struct thread *td, struct proc *chgp, int n);
67
68 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
69 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
70 static struct mtx uihashtbl_mtx;
71 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
72 static u_long uihash; /* size of hash table - 1 */
73
74 static struct uidinfo *uilookup(uid_t uid);
75
76 /*
77 * Resource controls and accounting.
78 */
79
80 #ifndef _SYS_SYSPROTO_H_
81 struct getpriority_args {
82 int which;
83 int who;
84 };
85 #endif
86 /*
87 * MPSAFE
88 */
89 int
90 getpriority(td, uap)
91 struct thread *td;
92 register struct getpriority_args *uap;
93 {
94 struct proc *p;
95 int low = PRIO_MAX + 1;
96 int error = 0;
97 struct ksegrp *kg;
98
99 switch (uap->which) {
100 case PRIO_PROCESS:
101 if (uap->who == 0)
102 low = td->td_ksegrp->kg_nice;
103 else {
104 p = pfind(uap->who);
105 if (p == NULL)
106 break;
107 if (p_cansee(td, p) == 0) {
108 FOREACH_KSEGRP_IN_PROC(p, kg) {
109 if (kg->kg_nice < low)
110 low = kg->kg_nice;
111 }
112 }
113 PROC_UNLOCK(p);
114 }
115 break;
116
117 case PRIO_PGRP: {
118 register struct pgrp *pg;
119
120 sx_slock(&proctree_lock);
121 if (uap->who == 0) {
122 pg = td->td_proc->p_pgrp;
123 PGRP_LOCK(pg);
124 } else {
125 pg = pgfind(uap->who);
126 if (pg == NULL) {
127 sx_sunlock(&proctree_lock);
128 break;
129 }
130 }
131 sx_sunlock(&proctree_lock);
132 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
133 PROC_LOCK(p);
134 if (!p_cansee(td, p)) {
135 FOREACH_KSEGRP_IN_PROC(p, kg) {
136 if (kg->kg_nice < low)
137 low = kg->kg_nice;
138 }
139 }
140 PROC_UNLOCK(p);
141 }
142 PGRP_UNLOCK(pg);
143 break;
144 }
145
146 case PRIO_USER:
147 if (uap->who == 0)
148 uap->who = td->td_ucred->cr_uid;
149 sx_slock(&allproc_lock);
150 LIST_FOREACH(p, &allproc, p_list) {
151 PROC_LOCK(p);
152 if (!p_cansee(td, p) &&
153 p->p_ucred->cr_uid == uap->who) {
154 FOREACH_KSEGRP_IN_PROC(p, kg) {
155 if (kg->kg_nice < low)
156 low = kg->kg_nice;
157 }
158 }
159 PROC_UNLOCK(p);
160 }
161 sx_sunlock(&allproc_lock);
162 break;
163
164 default:
165 error = EINVAL;
166 break;
167 }
168 if (low == PRIO_MAX + 1 && error == 0)
169 error = ESRCH;
170 td->td_retval[0] = low;
171 return (error);
172 }
173
174 #ifndef _SYS_SYSPROTO_H_
175 struct setpriority_args {
176 int which;
177 int who;
178 int prio;
179 };
180 #endif
181 /*
182 * MPSAFE
183 */
184 /* ARGSUSED */
185 int
186 setpriority(td, uap)
187 struct thread *td;
188 register struct setpriority_args *uap;
189 {
190 struct proc *curp = td->td_proc;
191 register struct proc *p;
192 int found = 0, error = 0;
193
194 switch (uap->which) {
195 case PRIO_PROCESS:
196 if (uap->who == 0) {
197 PROC_LOCK(curp);
198 error = donice(td, curp, uap->prio);
199 PROC_UNLOCK(curp);
200 } else {
201 p = pfind(uap->who);
202 if (p == 0)
203 break;
204 if (p_cansee(td, p) == 0)
205 error = donice(td, p, uap->prio);
206 PROC_UNLOCK(p);
207 }
208 found++;
209 break;
210
211 case PRIO_PGRP: {
212 register struct pgrp *pg;
213
214 sx_slock(&proctree_lock);
215 if (uap->who == 0) {
216 pg = curp->p_pgrp;
217 PGRP_LOCK(pg);
218 } else {
219 pg = pgfind(uap->who);
220 if (pg == NULL) {
221 sx_sunlock(&proctree_lock);
222 break;
223 }
224 }
225 sx_sunlock(&proctree_lock);
226 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
227 PROC_LOCK(p);
228 if (!p_cansee(td, p)) {
229 error = donice(td, p, uap->prio);
230 found++;
231 }
232 PROC_UNLOCK(p);
233 }
234 PGRP_UNLOCK(pg);
235 break;
236 }
237
238 case PRIO_USER:
239 if (uap->who == 0)
240 uap->who = td->td_ucred->cr_uid;
241 sx_slock(&allproc_lock);
242 FOREACH_PROC_IN_SYSTEM(p) {
243 PROC_LOCK(p);
244 if (p->p_ucred->cr_uid == uap->who &&
245 !p_cansee(td, p)) {
246 error = donice(td, p, uap->prio);
247 found++;
248 }
249 PROC_UNLOCK(p);
250 }
251 sx_sunlock(&allproc_lock);
252 break;
253
254 default:
255 error = EINVAL;
256 break;
257 }
258 if (found == 0 && error == 0)
259 error = ESRCH;
260 return (error);
261 }
262
263 /*
264 * Set "nice" for a process. Doesn't really understand threaded processes well
265 * but does try. Has the unfortunate side effect of making all the NICE
266 * values for a process's ksegrps the same.. This suggests that
267 * NICE valuse should be stored as a process nice and deltas for the ksegrps.
268 * (but not yet).
269 */
270 static int
271 donice(struct thread *td, struct proc *p, int n)
272 {
273 int error;
274 int low = PRIO_MAX + 1;
275 struct ksegrp *kg;
276
277 PROC_LOCK_ASSERT(p, MA_OWNED);
278 if ((error = p_cansched(td, p)))
279 return (error);
280 if (n > PRIO_MAX)
281 n = PRIO_MAX;
282 if (n < PRIO_MIN)
283 n = PRIO_MIN;
284 /*
285 * Only allow nicing if to more than the lowest nice.
286 * e.g. nices of 4,3,2 allow nice to 3 but not 1
287 */
288 FOREACH_KSEGRP_IN_PROC(p, kg) {
289 if (kg->kg_nice < low)
290 low = kg->kg_nice;
291 }
292 if (n < low && suser(td))
293 return (EACCES);
294 mtx_lock_spin(&sched_lock);
295 FOREACH_KSEGRP_IN_PROC(p, kg) {
296 sched_nice(kg, n);
297 }
298 mtx_unlock_spin(&sched_lock);
299 return (0);
300 }
301
302 /* rtprio system call */
303 #ifndef _SYS_SYSPROTO_H_
304 struct rtprio_args {
305 int function;
306 pid_t pid;
307 struct rtprio *rtp;
308 };
309 #endif
310
311 /*
312 * Set realtime priority
313 */
314
315 /*
316 * MPSAFE
317 */
318 /* ARGSUSED */
319 int
320 rtprio(td, uap)
321 struct thread *td;
322 register struct rtprio_args *uap;
323 {
324 struct proc *curp = td->td_proc;
325 register struct proc *p;
326 struct rtprio rtp;
327 int error, cierror = 0;
328
329 /* Perform copyin before acquiring locks if needed. */
330 if (uap->function == RTP_SET)
331 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
332
333 if (uap->pid == 0) {
334 p = curp;
335 PROC_LOCK(p);
336 } else {
337 p = pfind(uap->pid);
338 if (p == NULL)
339 return (ESRCH);
340 }
341
342 switch (uap->function) {
343 case RTP_LOOKUP:
344 if ((error = p_cansee(td, p)))
345 break;
346 mtx_lock_spin(&sched_lock);
347 pri_to_rtp(FIRST_KSEGRP_IN_PROC(p), &rtp);
348 mtx_unlock_spin(&sched_lock);
349 PROC_UNLOCK(p);
350 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
351 case RTP_SET:
352 if ((error = p_cansched(td, p)) || (error = cierror))
353 break;
354 /* disallow setting rtprio in most cases if not superuser */
355 if (suser(td) != 0) {
356 /* can't set someone else's */
357 if (uap->pid) {
358 error = EPERM;
359 break;
360 }
361 /* can't set realtime priority */
362 /*
363 * Realtime priority has to be restricted for reasons which should be
364 * obvious. However, for idle priority, there is a potential for
365 * system deadlock if an idleprio process gains a lock on a resource
366 * that other processes need (and the idleprio process can't run
367 * due to a CPU-bound normal process). Fix me! XXX
368 */
369 #if 0
370 if (RTP_PRIO_IS_REALTIME(rtp.type))
371 #endif
372 if (rtp.type != RTP_PRIO_NORMAL) {
373 error = EPERM;
374 break;
375 }
376 }
377 mtx_lock_spin(&sched_lock);
378 error = rtp_to_pri(&rtp, FIRST_KSEGRP_IN_PROC(p));
379 mtx_unlock_spin(&sched_lock);
380 break;
381 default:
382 error = EINVAL;
383 break;
384 }
385 PROC_UNLOCK(p);
386 return (error);
387 }
388
389 int
390 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
391 {
392
393 mtx_assert(&sched_lock, MA_OWNED);
394 if (rtp->prio > RTP_PRIO_MAX)
395 return (EINVAL);
396 switch (RTP_PRIO_BASE(rtp->type)) {
397 case RTP_PRIO_REALTIME:
398 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
399 break;
400 case RTP_PRIO_NORMAL:
401 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
402 break;
403 case RTP_PRIO_IDLE:
404 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
405 break;
406 default:
407 return (EINVAL);
408 }
409 sched_class(kg, rtp->type);
410 if (curthread->td_ksegrp == kg) {
411 curthread->td_base_pri = kg->kg_user_pri;
412 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
413 }
414 return (0);
415 }
416
417 void
418 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
419 {
420
421 mtx_assert(&sched_lock, MA_OWNED);
422 switch (PRI_BASE(kg->kg_pri_class)) {
423 case PRI_REALTIME:
424 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
425 break;
426 case PRI_TIMESHARE:
427 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
428 break;
429 case PRI_IDLE:
430 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
431 break;
432 default:
433 break;
434 }
435 rtp->type = kg->kg_pri_class;
436 }
437
438 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
439 #ifndef _SYS_SYSPROTO_H_
440 struct osetrlimit_args {
441 u_int which;
442 struct orlimit *rlp;
443 };
444 #endif
445 /*
446 * MPSAFE
447 */
448 /* ARGSUSED */
449 int
450 osetrlimit(td, uap)
451 struct thread *td;
452 register struct osetrlimit_args *uap;
453 {
454 struct orlimit olim;
455 struct rlimit lim;
456 int error;
457
458 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
459 return (error);
460 lim.rlim_cur = olim.rlim_cur;
461 lim.rlim_max = olim.rlim_max;
462 mtx_lock(&Giant);
463 error = dosetrlimit(td, uap->which, &lim);
464 mtx_unlock(&Giant);
465 return (error);
466 }
467
468 #ifndef _SYS_SYSPROTO_H_
469 struct ogetrlimit_args {
470 u_int which;
471 struct orlimit *rlp;
472 };
473 #endif
474 /*
475 * MPSAFE
476 */
477 /* ARGSUSED */
478 int
479 ogetrlimit(td, uap)
480 struct thread *td;
481 register struct ogetrlimit_args *uap;
482 {
483 struct proc *p = td->td_proc;
484 struct orlimit olim;
485 int error;
486
487 if (uap->which >= RLIM_NLIMITS)
488 return (EINVAL);
489 mtx_lock(&Giant);
490 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
491 if (olim.rlim_cur == -1)
492 olim.rlim_cur = 0x7fffffff;
493 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
494 if (olim.rlim_max == -1)
495 olim.rlim_max = 0x7fffffff;
496 error = copyout(&olim, uap->rlp, sizeof(olim));
497 mtx_unlock(&Giant);
498 return (error);
499 }
500 #endif /* COMPAT_43 || COMPAT_SUNOS */
501
502 #ifndef _SYS_SYSPROTO_H_
503 struct __setrlimit_args {
504 u_int which;
505 struct rlimit *rlp;
506 };
507 #endif
508 /*
509 * MPSAFE
510 */
511 /* ARGSUSED */
512 int
513 setrlimit(td, uap)
514 struct thread *td;
515 register struct __setrlimit_args *uap;
516 {
517 struct rlimit alim;
518 int error;
519
520 if ((error = copyin(uap->rlp, &alim, sizeof (struct rlimit))))
521 return (error);
522 mtx_lock(&Giant);
523 error = dosetrlimit(td, uap->which, &alim);
524 mtx_unlock(&Giant);
525 return (error);
526 }
527
528 int
529 dosetrlimit(td, which, limp)
530 struct thread *td;
531 u_int which;
532 struct rlimit *limp;
533 {
534 struct proc *p = td->td_proc;
535 register struct rlimit *alimp;
536 int error;
537
538 GIANT_REQUIRED;
539
540 if (which >= RLIM_NLIMITS)
541 return (EINVAL);
542 alimp = &p->p_rlimit[which];
543
544 /*
545 * Preserve historical bugs by treating negative limits as unsigned.
546 */
547 if (limp->rlim_cur < 0)
548 limp->rlim_cur = RLIM_INFINITY;
549 if (limp->rlim_max < 0)
550 limp->rlim_max = RLIM_INFINITY;
551
552 if (limp->rlim_cur > alimp->rlim_max ||
553 limp->rlim_max > alimp->rlim_max)
554 if ((error = suser_cred(td->td_ucred, PRISON_ROOT)))
555 return (error);
556 if (limp->rlim_cur > limp->rlim_max)
557 limp->rlim_cur = limp->rlim_max;
558 if (p->p_limit->p_refcnt > 1) {
559 p->p_limit->p_refcnt--;
560 p->p_limit = limcopy(p->p_limit);
561 alimp = &p->p_rlimit[which];
562 }
563
564 switch (which) {
565
566 case RLIMIT_CPU:
567 mtx_lock_spin(&sched_lock);
568 p->p_cpulimit = limp->rlim_cur;
569 mtx_unlock_spin(&sched_lock);
570 break;
571 case RLIMIT_DATA:
572 if (limp->rlim_cur > maxdsiz)
573 limp->rlim_cur = maxdsiz;
574 if (limp->rlim_max > maxdsiz)
575 limp->rlim_max = maxdsiz;
576 break;
577
578 case RLIMIT_STACK:
579 if (limp->rlim_cur > maxssiz)
580 limp->rlim_cur = maxssiz;
581 if (limp->rlim_max > maxssiz)
582 limp->rlim_max = maxssiz;
583 /*
584 * Stack is allocated to the max at exec time with only
585 * "rlim_cur" bytes accessible. If stack limit is going
586 * up make more accessible, if going down make inaccessible.
587 */
588 if (limp->rlim_cur != alimp->rlim_cur) {
589 vm_offset_t addr;
590 vm_size_t size;
591 vm_prot_t prot;
592
593 if (limp->rlim_cur > alimp->rlim_cur) {
594 prot = p->p_sysent->sv_stackprot;
595 size = limp->rlim_cur - alimp->rlim_cur;
596 addr = p->p_sysent->sv_usrstack -
597 limp->rlim_cur;
598 } else {
599 prot = VM_PROT_NONE;
600 size = alimp->rlim_cur - limp->rlim_cur;
601 addr = p->p_sysent->sv_usrstack -
602 alimp->rlim_cur;
603 }
604 addr = trunc_page(addr);
605 size = round_page(size);
606 (void) vm_map_protect(&p->p_vmspace->vm_map,
607 addr, addr+size, prot, FALSE);
608 }
609 break;
610
611 case RLIMIT_NOFILE:
612 if (limp->rlim_cur > maxfilesperproc)
613 limp->rlim_cur = maxfilesperproc;
614 if (limp->rlim_max > maxfilesperproc)
615 limp->rlim_max = maxfilesperproc;
616 break;
617
618 case RLIMIT_NPROC:
619 if (limp->rlim_cur > maxprocperuid)
620 limp->rlim_cur = maxprocperuid;
621 if (limp->rlim_max > maxprocperuid)
622 limp->rlim_max = maxprocperuid;
623 if (limp->rlim_cur < 1)
624 limp->rlim_cur = 1;
625 if (limp->rlim_max < 1)
626 limp->rlim_max = 1;
627 break;
628 }
629 *alimp = *limp;
630 return (0);
631 }
632
633 #ifndef _SYS_SYSPROTO_H_
634 struct __getrlimit_args {
635 u_int which;
636 struct rlimit *rlp;
637 };
638 #endif
639 /*
640 * MPSAFE
641 */
642 /* ARGSUSED */
643 int
644 getrlimit(td, uap)
645 struct thread *td;
646 register struct __getrlimit_args *uap;
647 {
648 int error;
649 struct proc *p = td->td_proc;
650
651 if (uap->which >= RLIM_NLIMITS)
652 return (EINVAL);
653 mtx_lock(&Giant);
654 error = copyout(&p->p_rlimit[uap->which], uap->rlp,
655 sizeof (struct rlimit));
656 mtx_unlock(&Giant);
657 return(error);
658 }
659
660 /*
661 * Transform the running time and tick information in proc p into user,
662 * system, and interrupt time usage.
663 */
664 void
665 calcru(p, up, sp, ip)
666 struct proc *p;
667 struct timeval *up;
668 struct timeval *sp;
669 struct timeval *ip;
670 {
671 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
672 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
673 struct timeval tv;
674 struct bintime bt;
675
676 mtx_assert(&sched_lock, MA_OWNED);
677 /* XXX: why spl-protect ? worst case is an off-by-one report */
678
679 ut = p->p_uticks;
680 st = p->p_sticks;
681 it = p->p_iticks;
682
683 tt = ut + st + it;
684 if (tt == 0) {
685 st = 1;
686 tt = 1;
687 }
688
689 if (curthread->td_proc == p) {
690 /*
691 * Adjust for the current time slice. This is actually fairly
692 * important since the error here is on the order of a time
693 * quantum, which is much greater than the sampling error.
694 * XXXKSE use a different test due to threads on other
695 * processors also being 'current'.
696 */
697
698 binuptime(&bt);
699 bintime_sub(&bt, PCPU_PTR(switchtime));
700 bintime_add(&bt, &p->p_runtime);
701 } else {
702 bt = p->p_runtime;
703 }
704 bintime2timeval(&bt, &tv);
705 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
706 ptu = p->p_uu + p->p_su + p->p_iu;
707 if (tu < ptu || (int64_t)tu < 0) {
708 /* XXX no %qd in kernel. Truncate. */
709 printf("calcru: negative time of %ld usec for pid %d (%s)\n",
710 (long)tu, p->p_pid, p->p_comm);
711 tu = ptu;
712 }
713
714 /* Subdivide tu. */
715 uu = (tu * ut) / tt;
716 su = (tu * st) / tt;
717 iu = tu - uu - su;
718
719 /* Enforce monotonicity. */
720 if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
721 if (uu < p->p_uu)
722 uu = p->p_uu;
723 else if (uu + p->p_su + p->p_iu > tu)
724 uu = tu - p->p_su - p->p_iu;
725 if (st == 0)
726 su = p->p_su;
727 else {
728 su = ((tu - uu) * st) / (st + it);
729 if (su < p->p_su)
730 su = p->p_su;
731 else if (uu + su + p->p_iu > tu)
732 su = tu - uu - p->p_iu;
733 }
734 KASSERT(uu + su + p->p_iu <= tu,
735 ("calcru: monotonisation botch 1"));
736 iu = tu - uu - su;
737 KASSERT(iu >= p->p_iu,
738 ("calcru: monotonisation botch 2"));
739 }
740 p->p_uu = uu;
741 p->p_su = su;
742 p->p_iu = iu;
743
744 up->tv_sec = uu / 1000000;
745 up->tv_usec = uu % 1000000;
746 sp->tv_sec = su / 1000000;
747 sp->tv_usec = su % 1000000;
748 if (ip != NULL) {
749 ip->tv_sec = iu / 1000000;
750 ip->tv_usec = iu % 1000000;
751 }
752 }
753
754 #ifndef _SYS_SYSPROTO_H_
755 struct getrusage_args {
756 int who;
757 struct rusage *rusage;
758 };
759 #endif
760 /*
761 * MPSAFE
762 */
763 /* ARGSUSED */
764 int
765 getrusage(td, uap)
766 register struct thread *td;
767 register struct getrusage_args *uap;
768 {
769 struct proc *p = td->td_proc;
770 register struct rusage *rup;
771 int error = 0;
772
773 mtx_lock(&Giant);
774
775 switch (uap->who) {
776 case RUSAGE_SELF:
777 rup = &p->p_stats->p_ru;
778 mtx_lock_spin(&sched_lock);
779 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
780 mtx_unlock_spin(&sched_lock);
781 break;
782
783 case RUSAGE_CHILDREN:
784 rup = &p->p_stats->p_cru;
785 break;
786
787 default:
788 rup = NULL;
789 error = EINVAL;
790 break;
791 }
792 mtx_unlock(&Giant);
793 if (error == 0) {
794 /* XXX Unlocked access to p_stats->p_ru or p_cru. */
795 error = copyout(rup, uap->rusage, sizeof (struct rusage));
796 }
797 return(error);
798 }
799
800 void
801 ruadd(ru, ru2)
802 register struct rusage *ru, *ru2;
803 {
804 register long *ip, *ip2;
805 register int i;
806
807 timevaladd(&ru->ru_utime, &ru2->ru_utime);
808 timevaladd(&ru->ru_stime, &ru2->ru_stime);
809 if (ru->ru_maxrss < ru2->ru_maxrss)
810 ru->ru_maxrss = ru2->ru_maxrss;
811 ip = &ru->ru_first; ip2 = &ru2->ru_first;
812 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
813 *ip++ += *ip2++;
814 }
815
816 /*
817 * Make a copy of the plimit structure.
818 * We share these structures copy-on-write after fork,
819 * and copy when a limit is changed.
820 */
821 struct plimit *
822 limcopy(lim)
823 struct plimit *lim;
824 {
825 register struct plimit *copy;
826
827 MALLOC(copy, struct plimit *, sizeof(struct plimit),
828 M_SUBPROC, M_WAITOK);
829 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
830 copy->p_refcnt = 1;
831 return (copy);
832 }
833
834 /*
835 * Find the uidinfo structure for a uid. This structure is used to
836 * track the total resource consumption (process count, socket buffer
837 * size, etc.) for the uid and impose limits.
838 */
839 void
840 uihashinit()
841 {
842
843 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
844 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
845 }
846
847 /*
848 * lookup a uidinfo struct for the parameter uid.
849 * uihashtbl_mtx must be locked.
850 */
851 static struct uidinfo *
852 uilookup(uid)
853 uid_t uid;
854 {
855 struct uihashhead *uipp;
856 struct uidinfo *uip;
857
858 mtx_assert(&uihashtbl_mtx, MA_OWNED);
859 uipp = UIHASH(uid);
860 LIST_FOREACH(uip, uipp, ui_hash)
861 if (uip->ui_uid == uid)
862 break;
863
864 return (uip);
865 }
866
867 /*
868 * Find or allocate a struct uidinfo for a particular uid.
869 * Increase refcount on uidinfo struct returned.
870 * uifree() should be called on a struct uidinfo when released.
871 */
872 struct uidinfo *
873 uifind(uid)
874 uid_t uid;
875 {
876 struct uidinfo *uip;
877
878 mtx_lock(&uihashtbl_mtx);
879 uip = uilookup(uid);
880 if (uip == NULL) {
881 struct uidinfo *old_uip;
882
883 mtx_unlock(&uihashtbl_mtx);
884 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
885 mtx_lock(&uihashtbl_mtx);
886 /*
887 * There's a chance someone created our uidinfo while we
888 * were in malloc and not holding the lock, so we have to
889 * make sure we don't insert a duplicate uidinfo
890 */
891 if ((old_uip = uilookup(uid)) != NULL) {
892 /* someone else beat us to it */
893 free(uip, M_UIDINFO);
894 uip = old_uip;
895 } else {
896 uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
897 uip->ui_uid = uid;
898 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
899 }
900 }
901 uihold(uip);
902 mtx_unlock(&uihashtbl_mtx);
903 return (uip);
904 }
905
906 /*
907 * Place another refcount on a uidinfo struct.
908 */
909 void
910 uihold(uip)
911 struct uidinfo *uip;
912 {
913
914 UIDINFO_LOCK(uip);
915 uip->ui_ref++;
916 UIDINFO_UNLOCK(uip);
917 }
918
919 /*-
920 * Since uidinfo structs have a long lifetime, we use an
921 * opportunistic refcounting scheme to avoid locking the lookup hash
922 * for each release.
923 *
924 * If the refcount hits 0, we need to free the structure,
925 * which means we need to lock the hash.
926 * Optimal case:
927 * After locking the struct and lowering the refcount, if we find
928 * that we don't need to free, simply unlock and return.
929 * Suboptimal case:
930 * If refcount lowering results in need to free, bump the count
931 * back up, loose the lock and aquire the locks in the proper
932 * order to try again.
933 */
934 void
935 uifree(uip)
936 struct uidinfo *uip;
937 {
938
939 /* Prepare for optimal case. */
940 UIDINFO_LOCK(uip);
941
942 if (--uip->ui_ref != 0) {
943 UIDINFO_UNLOCK(uip);
944 return;
945 }
946
947 /* Prepare for suboptimal case. */
948 uip->ui_ref++;
949 UIDINFO_UNLOCK(uip);
950 mtx_lock(&uihashtbl_mtx);
951 UIDINFO_LOCK(uip);
952
953 /*
954 * We must subtract one from the count again because we backed out
955 * our initial subtraction before dropping the lock.
956 * Since another thread may have added a reference after we dropped the
957 * initial lock we have to test for zero again.
958 */
959 if (--uip->ui_ref == 0) {
960 LIST_REMOVE(uip, ui_hash);
961 mtx_unlock(&uihashtbl_mtx);
962 if (uip->ui_sbsize != 0)
963 /* XXX no %qd in kernel. Truncate. */
964 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
965 uip->ui_uid, (long)uip->ui_sbsize);
966 if (uip->ui_proccnt != 0)
967 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
968 uip->ui_uid, uip->ui_proccnt);
969 UIDINFO_UNLOCK(uip);
970 FREE(uip, M_UIDINFO);
971 return;
972 }
973
974 mtx_unlock(&uihashtbl_mtx);
975 UIDINFO_UNLOCK(uip);
976 }
977
978 /*
979 * Change the count associated with number of processes
980 * a given user is using. When 'max' is 0, don't enforce a limit
981 */
982 int
983 chgproccnt(uip, diff, max)
984 struct uidinfo *uip;
985 int diff;
986 int max;
987 {
988
989 UIDINFO_LOCK(uip);
990 /* don't allow them to exceed max, but allow subtraction */
991 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
992 UIDINFO_UNLOCK(uip);
993 return (0);
994 }
995 uip->ui_proccnt += diff;
996 if (uip->ui_proccnt < 0)
997 printf("negative proccnt for uid = %d\n", uip->ui_uid);
998 UIDINFO_UNLOCK(uip);
999 return (1);
1000 }
1001
1002 /*
1003 * Change the total socket buffer size a user has used.
1004 */
1005 int
1006 chgsbsize(uip, hiwat, to, max)
1007 struct uidinfo *uip;
1008 u_int *hiwat;
1009 u_int to;
1010 rlim_t max;
1011 {
1012 rlim_t new;
1013 int s;
1014
1015 s = splnet();
1016 UIDINFO_LOCK(uip);
1017 new = uip->ui_sbsize + to - *hiwat;
1018 /* don't allow them to exceed max, but allow subtraction */
1019 if (to > *hiwat && new > max) {
1020 splx(s);
1021 UIDINFO_UNLOCK(uip);
1022 return (0);
1023 }
1024 uip->ui_sbsize = new;
1025 *hiwat = to;
1026 if (uip->ui_sbsize < 0)
1027 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1028 splx(s);
1029 UIDINFO_UNLOCK(uip);
1030 return (1);
1031 }
Cache object: 3c6ac1fc6517ae9e441d797273f57987
|