1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD$
40 */
41
42 #include "opt_compat.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/file.h>
48 #include <sys/kernel.h>
49 #include <sys/resourcevar.h>
50 #include <sys/malloc.h>
51 #include <sys/proc.h>
52 #include <sys/time.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <sys/lock.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59
60 static int donice __P((struct proc *curp, struct proc *chgp, int n));
61 /* dosetrlimit non-static: Needed by SysVR4 emulator */
62 int dosetrlimit __P((struct proc *p, u_int which, struct rlimit *limp));
63
64 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
65 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
66 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
67 static u_long uihash; /* size of hash table - 1 */
68
69 static struct uidinfo *uicreate __P((uid_t uid));
70 static struct uidinfo *uilookup __P((uid_t uid));
71
72 /*
73 * Resource controls and accounting.
74 */
75
76 #ifndef _SYS_SYSPROTO_H_
77 struct getpriority_args {
78 int which;
79 int who;
80 };
81 #endif
82 int
83 getpriority(curp, uap)
84 struct proc *curp;
85 register struct getpriority_args *uap;
86 {
87 register struct proc *p;
88 register int low = PRIO_MAX + 1;
89
90 switch (uap->which) {
91
92 case PRIO_PROCESS:
93 if (uap->who == 0)
94 p = curp;
95 else
96 p = pfind(uap->who);
97 if (p == 0)
98 break;
99 if (!PRISON_CHECK(curp, p))
100 break;
101 low = p->p_nice;
102 break;
103
104 case PRIO_PGRP: {
105 register struct pgrp *pg;
106
107 if (uap->who == 0)
108 pg = curp->p_pgrp;
109 else if ((pg = pgfind(uap->who)) == NULL)
110 break;
111 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
112 if ((PRISON_CHECK(curp, p) && p->p_nice < low))
113 low = p->p_nice;
114 }
115 break;
116 }
117
118 case PRIO_USER:
119 if (uap->who == 0)
120 uap->who = curp->p_ucred->cr_uid;
121 LIST_FOREACH(p, &allproc, p_list)
122 if (PRISON_CHECK(curp, p) &&
123 p->p_ucred->cr_uid == uap->who &&
124 p->p_nice < low)
125 low = p->p_nice;
126 break;
127
128 default:
129 return (EINVAL);
130 }
131 if (low == PRIO_MAX + 1)
132 return (ESRCH);
133 curp->p_retval[0] = low;
134 return (0);
135 }
136
137 #ifndef _SYS_SYSPROTO_H_
138 struct setpriority_args {
139 int which;
140 int who;
141 int prio;
142 };
143 #endif
144 /* ARGSUSED */
145 int
146 setpriority(curp, uap)
147 struct proc *curp;
148 register struct setpriority_args *uap;
149 {
150 register struct proc *p;
151 int found = 0, error = 0;
152
153 switch (uap->which) {
154
155 case PRIO_PROCESS:
156 if (uap->who == 0)
157 p = curp;
158 else
159 p = pfind(uap->who);
160 if (p == 0)
161 break;
162 if (!PRISON_CHECK(curp, p))
163 break;
164 error = donice(curp, p, uap->prio);
165 found++;
166 break;
167
168 case PRIO_PGRP: {
169 register struct pgrp *pg;
170
171 if (uap->who == 0)
172 pg = curp->p_pgrp;
173 else if ((pg = pgfind(uap->who)) == NULL)
174 break;
175 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
176 if (PRISON_CHECK(curp, p)) {
177 error = donice(curp, p, uap->prio);
178 found++;
179 }
180 }
181 break;
182 }
183
184 case PRIO_USER:
185 if (uap->who == 0)
186 uap->who = curp->p_ucred->cr_uid;
187 LIST_FOREACH(p, &allproc, p_list)
188 if (p->p_ucred->cr_uid == uap->who &&
189 PRISON_CHECK(curp, p)) {
190 error = donice(curp, p, uap->prio);
191 found++;
192 }
193 break;
194
195 default:
196 return (EINVAL);
197 }
198 if (found == 0)
199 return (ESRCH);
200 return (error);
201 }
202
203 static int
204 donice(curp, chgp, n)
205 register struct proc *curp, *chgp;
206 register int n;
207 {
208 register struct pcred *pcred = curp->p_cred;
209 int s;
210
211 if (pcred->pc_ucred->cr_uid && pcred->p_ruid &&
212 pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid &&
213 pcred->p_ruid != chgp->p_ucred->cr_uid)
214 return (EPERM);
215 if (n > PRIO_MAX)
216 n = PRIO_MAX;
217 if (n < PRIO_MIN)
218 n = PRIO_MIN;
219 if (n < chgp->p_nice && suser(curp))
220 return (EACCES);
221 chgp->p_nice = n;
222 s = splstatclock();
223 (void)resetpriority(chgp);
224 if (chgp->p_priority >= PUSER)
225 chgp->p_priority = chgp->p_usrpri;
226 splx(s);
227 return (0);
228 }
229
230 /* rtprio system call */
231 #ifndef _SYS_SYSPROTO_H_
232 struct rtprio_args {
233 int function;
234 pid_t pid;
235 struct rtprio *rtp;
236 };
237 #endif
238
239 /*
240 * Set realtime priority
241 */
242
243 /* ARGSUSED */
244 int
245 rtprio(curp, uap)
246 struct proc *curp;
247 register struct rtprio_args *uap;
248 {
249 register struct proc *p;
250 register struct pcred *pcred = curp->p_cred;
251 struct rtprio rtp;
252 int error;
253
254 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
255 if (error)
256 return (error);
257
258 if (uap->pid == 0)
259 p = curp;
260 else
261 p = pfind(uap->pid);
262
263 if (p == 0)
264 return (ESRCH);
265
266 switch (uap->function) {
267 case RTP_LOOKUP:
268 return (copyout(&p->p_rtprio, uap->rtp, sizeof(struct rtprio)));
269 case RTP_SET:
270 if (pcred->pc_ucred->cr_uid && pcred->p_ruid &&
271 pcred->pc_ucred->cr_uid != p->p_ucred->cr_uid &&
272 pcred->p_ruid != p->p_ucred->cr_uid)
273 return (EPERM);
274 /* disallow setting rtprio in most cases if not superuser */
275 if (suser(curp)) {
276 /* can't set someone else's */
277 if (uap->pid)
278 return (EPERM);
279 /* can't set realtime priority */
280 /*
281 * Realtime priority has to be restricted for reasons which should be
282 * obvious. However, for idle priority, there is a potential for
283 * system deadlock if an idleprio process gains a lock on a resource
284 * that other processes need (and the idleprio process can't run
285 * due to a CPU-bound normal process). Fix me! XXX
286 */
287 #if 0
288 if (RTP_PRIO_IS_REALTIME(rtp.type))
289 #endif
290 if (rtp.type != RTP_PRIO_NORMAL)
291 return (EPERM);
292 }
293 switch (rtp.type) {
294 #ifdef RTP_PRIO_FIFO
295 case RTP_PRIO_FIFO:
296 #endif
297 case RTP_PRIO_REALTIME:
298 case RTP_PRIO_NORMAL:
299 case RTP_PRIO_IDLE:
300 if (rtp.prio > RTP_PRIO_MAX)
301 return (EINVAL);
302 p->p_rtprio = rtp;
303 return (0);
304 default:
305 return (EINVAL);
306 }
307
308 default:
309 return (EINVAL);
310 }
311 }
312
313 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
314 #ifndef _SYS_SYSPROTO_H_
315 struct osetrlimit_args {
316 u_int which;
317 struct orlimit *rlp;
318 };
319 #endif
320 /* ARGSUSED */
321 int
322 osetrlimit(p, uap)
323 struct proc *p;
324 register struct osetrlimit_args *uap;
325 {
326 struct orlimit olim;
327 struct rlimit lim;
328 int error;
329
330 if ((error =
331 copyin((caddr_t)uap->rlp, (caddr_t)&olim, sizeof(struct orlimit))))
332 return (error);
333 lim.rlim_cur = olim.rlim_cur;
334 lim.rlim_max = olim.rlim_max;
335 return (dosetrlimit(p, uap->which, &lim));
336 }
337
338 #ifndef _SYS_SYSPROTO_H_
339 struct ogetrlimit_args {
340 u_int which;
341 struct orlimit *rlp;
342 };
343 #endif
344 /* ARGSUSED */
345 int
346 ogetrlimit(p, uap)
347 struct proc *p;
348 register struct ogetrlimit_args *uap;
349 {
350 struct orlimit olim;
351
352 if (uap->which >= RLIM_NLIMITS)
353 return (EINVAL);
354 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
355 if (olim.rlim_cur == -1)
356 olim.rlim_cur = 0x7fffffff;
357 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
358 if (olim.rlim_max == -1)
359 olim.rlim_max = 0x7fffffff;
360 return (copyout((caddr_t)&olim, (caddr_t)uap->rlp, sizeof(olim)));
361 }
362 #endif /* COMPAT_43 || COMPAT_SUNOS */
363
364 #ifndef _SYS_SYSPROTO_H_
365 struct __setrlimit_args {
366 u_int which;
367 struct rlimit *rlp;
368 };
369 #endif
370 /* ARGSUSED */
371 int
372 setrlimit(p, uap)
373 struct proc *p;
374 register struct __setrlimit_args *uap;
375 {
376 struct rlimit alim;
377 int error;
378
379 if ((error =
380 copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit))))
381 return (error);
382 return (dosetrlimit(p, uap->which, &alim));
383 }
384
385 int
386 dosetrlimit(p, which, limp)
387 struct proc *p;
388 u_int which;
389 struct rlimit *limp;
390 {
391 register struct rlimit *alimp;
392 int error;
393
394 if (which >= RLIM_NLIMITS)
395 return (EINVAL);
396 alimp = &p->p_rlimit[which];
397
398 /*
399 * Preserve historical bugs by treating negative limits as unsigned.
400 */
401 if (limp->rlim_cur < 0)
402 limp->rlim_cur = RLIM_INFINITY;
403 if (limp->rlim_max < 0)
404 limp->rlim_max = RLIM_INFINITY;
405
406 if (limp->rlim_cur > alimp->rlim_max ||
407 limp->rlim_max > alimp->rlim_max)
408 if ((error = suser_xxx(0, p, PRISON_ROOT)))
409 return (error);
410 if (limp->rlim_cur > limp->rlim_max)
411 limp->rlim_cur = limp->rlim_max;
412 if (p->p_limit->p_refcnt > 1 &&
413 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
414 p->p_limit->p_refcnt--;
415 p->p_limit = limcopy(p->p_limit);
416 alimp = &p->p_rlimit[which];
417 }
418
419 switch (which) {
420
421 case RLIMIT_CPU:
422 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
423 p->p_limit->p_cpulimit = RLIM_INFINITY;
424 else
425 p->p_limit->p_cpulimit =
426 (rlim_t)1000000 * limp->rlim_cur;
427 break;
428 case RLIMIT_DATA:
429 if (limp->rlim_cur > maxdsiz)
430 limp->rlim_cur = maxdsiz;
431 if (limp->rlim_max > maxdsiz)
432 limp->rlim_max = maxdsiz;
433 break;
434
435 case RLIMIT_STACK:
436 if (limp->rlim_cur > maxssiz)
437 limp->rlim_cur = maxssiz;
438 if (limp->rlim_max > maxssiz)
439 limp->rlim_max = maxssiz;
440 /*
441 * Stack is allocated to the max at exec time with only
442 * "rlim_cur" bytes accessible. If stack limit is going
443 * up make more accessible, if going down make inaccessible.
444 */
445 if (limp->rlim_cur != alimp->rlim_cur) {
446 vm_offset_t addr;
447 vm_size_t size;
448 vm_prot_t prot;
449
450 if (limp->rlim_cur > alimp->rlim_cur) {
451 prot = VM_PROT_ALL;
452 size = limp->rlim_cur - alimp->rlim_cur;
453 addr = USRSTACK - limp->rlim_cur;
454 } else {
455 prot = VM_PROT_NONE;
456 size = alimp->rlim_cur - limp->rlim_cur;
457 addr = USRSTACK - alimp->rlim_cur;
458 }
459 addr = trunc_page(addr);
460 size = round_page(size);
461 (void) vm_map_protect(&p->p_vmspace->vm_map,
462 addr, addr+size, prot, FALSE);
463 }
464 break;
465
466 case RLIMIT_NOFILE:
467 if (limp->rlim_cur > maxfilesperproc)
468 limp->rlim_cur = maxfilesperproc;
469 if (limp->rlim_max > maxfilesperproc)
470 limp->rlim_max = maxfilesperproc;
471 break;
472
473 case RLIMIT_NPROC:
474 if (limp->rlim_cur > maxprocperuid)
475 limp->rlim_cur = maxprocperuid;
476 if (limp->rlim_max > maxprocperuid)
477 limp->rlim_max = maxprocperuid;
478 if (limp->rlim_cur < 1)
479 limp->rlim_cur = 1;
480 if (limp->rlim_max < 1)
481 limp->rlim_max = 1;
482 break;
483 }
484 *alimp = *limp;
485 return (0);
486 }
487
488 #ifndef _SYS_SYSPROTO_H_
489 struct __getrlimit_args {
490 u_int which;
491 struct rlimit *rlp;
492 };
493 #endif
494 /* ARGSUSED */
495 int
496 getrlimit(p, uap)
497 struct proc *p;
498 register struct __getrlimit_args *uap;
499 {
500
501 if (uap->which >= RLIM_NLIMITS)
502 return (EINVAL);
503 return (copyout((caddr_t)&p->p_rlimit[uap->which], (caddr_t)uap->rlp,
504 sizeof (struct rlimit)));
505 }
506
507 /*
508 * Transform the running time and tick information in proc p into user,
509 * system, and interrupt time usage.
510 */
511 void
512 calcru(p, up, sp, ip)
513 struct proc *p;
514 struct timeval *up;
515 struct timeval *sp;
516 struct timeval *ip;
517 {
518 struct timeval tv;
519 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
520 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
521 int problemcase, s;
522
523 /* XXX: why spl-protect ? worst case is an off-by-one report */
524 s = splstatclock();
525 ut = p->p_uticks;
526 st = p->p_sticks;
527 it = p->p_iticks;
528 splx(s);
529
530 tt = ut + st + it;
531 if (tt == 0) {
532 st = 1;
533 tt = 1;
534 }
535
536 tu = p->p_runtime;
537 problemcase = 0;
538 if (p == curproc) {
539 /*
540 * Adjust for the current time slice. This is actually fairly
541 * important since the error here is on the order of a time
542 * quantum, which is much greater than the sampling error.
543 */
544 microuptime(&tv);
545 if (timevalcmp(&tv, &switchtime, <))
546 printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
547 switchtime.tv_sec, switchtime.tv_usec,
548 tv.tv_sec, tv.tv_usec);
549 else
550 tu += (tv.tv_usec - switchtime.tv_usec) +
551 (tv.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
552 } else if (p->p_stat == SRUN || p->p_stat == SZOMB) {
553 /*
554 * XXX: this case should add the difference between
555 * the current time and the switch time as above,
556 * but the switch time is inaccessible, so we can't
557 * do the adjustment and will end up with a wrong
558 * runtime. A previous call with a different
559 * curthread may have obtained a (right or wrong)
560 * runtime that is in advance of ours. Just set a
561 * flag to avoid warning about this known problem.
562 *
563 * In the SRUN case, the inaccessibility is due to
564 * the switch time being in the PCPU info for a
565 * different CPU. In the SZOMB case, it is caused
566 * by the relevant switch time going away in exit1()
567 * and neglecting to use it to update p_runtime there.
568 */
569 problemcase = 1;
570 }
571 ptu = p->p_uu + p->p_su + p->p_iu;
572 if (tu < ptu) {
573 if (!problemcase)
574 printf(
575 "calcru: runtime went backwards from %qu usec to %qu usec for pid %d (%s)\n",
576 (unsigned long long)ptu, (unsigned long long)tu,
577 p->p_pid, p->p_comm);
578 }
579 if ((int64_t)tu < 0) {
580 printf("calcru: negative runtime of %qd usec for pid %d (%s)\n",
581 (long long)tu, p->p_pid, p->p_comm);
582 tu = ptu;
583 }
584
585 /* Subdivide tu. */
586 uu = (tu * ut) / tt;
587 su = (tu * st) / tt;
588 iu = tu - uu - su;
589
590 /* Enforce monotonicity. */
591 if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
592 if (uu < p->p_uu)
593 uu = p->p_uu;
594 else if (uu + p->p_su + p->p_iu > tu)
595 uu = tu - p->p_su - p->p_iu;
596 if (st == 0)
597 su = p->p_su;
598 else {
599 su = ((tu - uu) * st) / (st + it);
600 if (su < p->p_su)
601 su = p->p_su;
602 else if (uu + su + p->p_iu > tu)
603 su = tu - uu - p->p_iu;
604 }
605 KASSERT(uu + su + p->p_iu <= tu,
606 ("calcru: monotonisation botch 1"));
607 iu = tu - uu - su;
608 KASSERT(iu >= p->p_iu,
609 ("calcru: monotonisation botch 2"));
610 }
611 p->p_uu = uu;
612 p->p_su = su;
613 p->p_iu = iu;
614
615 up->tv_sec = uu / 1000000;
616 up->tv_usec = uu % 1000000;
617 sp->tv_sec = su / 1000000;
618 sp->tv_usec = su % 1000000;
619 if (ip != NULL) {
620 ip->tv_sec = iu / 1000000;
621 ip->tv_usec = iu % 1000000;
622 }
623 }
624
625 #ifndef _SYS_SYSPROTO_H_
626 struct getrusage_args {
627 int who;
628 struct rusage *rusage;
629 };
630 #endif
631 /* ARGSUSED */
632 int
633 getrusage(p, uap)
634 register struct proc *p;
635 register struct getrusage_args *uap;
636 {
637 register struct rusage *rup;
638
639 switch (uap->who) {
640
641 case RUSAGE_SELF:
642 rup = &p->p_stats->p_ru;
643 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
644 break;
645
646 case RUSAGE_CHILDREN:
647 rup = &p->p_stats->p_cru;
648 break;
649
650 default:
651 return (EINVAL);
652 }
653 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
654 sizeof (struct rusage)));
655 }
656
657 void
658 ruadd(ru, ru2)
659 register struct rusage *ru, *ru2;
660 {
661 register long *ip, *ip2;
662 register int i;
663
664 timevaladd(&ru->ru_utime, &ru2->ru_utime);
665 timevaladd(&ru->ru_stime, &ru2->ru_stime);
666 if (ru->ru_maxrss < ru2->ru_maxrss)
667 ru->ru_maxrss = ru2->ru_maxrss;
668 ip = &ru->ru_first; ip2 = &ru2->ru_first;
669 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
670 *ip++ += *ip2++;
671 }
672
673 /*
674 * Make a copy of the plimit structure.
675 * We share these structures copy-on-write after fork,
676 * and copy when a limit is changed.
677 */
678 struct plimit *
679 limcopy(lim)
680 struct plimit *lim;
681 {
682 register struct plimit *copy;
683
684 MALLOC(copy, struct plimit *, sizeof(struct plimit),
685 M_SUBPROC, M_WAITOK);
686 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
687 copy->p_lflags = 0;
688 copy->p_refcnt = 1;
689 return (copy);
690 }
691
692 /*
693 * Find the uidinfo structure for a uid. This structure is used to
694 * track the total resource consumption (process count, socket buffer
695 * size, etc.) for the uid and impose limits.
696 */
697 void
698 uihashinit()
699 {
700 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
701 }
702
703 static struct uidinfo *
704 uilookup(uid)
705 uid_t uid;
706 {
707 struct uihashhead *uipp;
708 struct uidinfo *uip;
709
710 uipp = UIHASH(uid);
711 LIST_FOREACH(uip, uipp, ui_hash)
712 if (uip->ui_uid == uid)
713 break;
714
715 return (uip);
716 }
717
718 static struct uidinfo *
719 uicreate(uid)
720 uid_t uid;
721 {
722 struct uidinfo *uip, *norace;
723
724 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_NOWAIT);
725 if (uip == NULL) {
726 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
727 /*
728 * if we M_WAITOK we must look afterwards or risk
729 * redundant entries
730 */
731 norace = uilookup(uid);
732 if (norace != NULL) {
733 FREE(uip, M_UIDINFO);
734 return (norace);
735 }
736 }
737 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
738 uip->ui_uid = uid;
739 uip->ui_proccnt = 0;
740 uip->ui_sbsize = 0;
741 uip->ui_ref = 0;
742 return (uip);
743 }
744
745 struct uidinfo *
746 uifind(uid)
747 uid_t uid;
748 {
749 struct uidinfo *uip;
750
751 uip = uilookup(uid);
752 if (uip == NULL)
753 uip = uicreate(uid);
754 uip->ui_ref++;
755 return (uip);
756 }
757
758 int
759 uifree(uip)
760 struct uidinfo *uip;
761 {
762
763 if (--uip->ui_ref == 0) {
764 if (uip->ui_sbsize != 0)
765 /* XXX no %qd in kernel. Truncate. */
766 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
767 uip->ui_uid, (long)uip->ui_sbsize);
768 if (uip->ui_proccnt != 0)
769 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
770 uip->ui_uid, uip->ui_proccnt);
771 LIST_REMOVE(uip, ui_hash);
772 FREE(uip, M_UIDINFO);
773 return (1);
774 }
775 return (0);
776 }
777
778 /*
779 * Change the count associated with number of processes
780 * a given user is using. When 'max' is 0, don't enforce a limit
781 */
782 int
783 chgproccnt(uip, diff, max)
784 struct uidinfo *uip;
785 int diff;
786 int max;
787 {
788 /* don't allow them to exceed max, but allow subtraction */
789 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0)
790 return (0);
791 uip->ui_proccnt += diff;
792 if (uip->ui_proccnt < 0)
793 printf("negative proccnt for uid = %d\n", uip->ui_uid);
794 return (1);
795 }
796
797 /*
798 * Change the total socket buffer size a user has used.
799 */
800 int
801 chgsbsize(uip, hiwat, to, max)
802 struct uidinfo *uip;
803 u_long *hiwat;
804 u_long to;
805 rlim_t max;
806 {
807 rlim_t new;
808 int s;
809
810 s = splnet();
811 new = uip->ui_sbsize + to - *hiwat;
812 /* don't allow them to exceed max, but allow subtraction */
813 if (to > *hiwat && new > max) {
814 splx(s);
815 return (0);
816 }
817 uip->ui_sbsize = new;
818 *hiwat = to;
819 if (uip->ui_sbsize < 0)
820 printf("negative sbsize for uid = %d\n", uip->ui_uid);
821 splx(s);
822 return (1);
823 }
Cache object: 373ef7d36aaadb9a4c0033397b31f3d1
|