1 /* $OpenBSD: kern_resource.c,v 1.76 2022/11/17 18:53:13 deraadt Exp $ */
2 /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
3
4 /*-
5 * Copyright (c) 1982, 1986, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/resourcevar.h>
45 #include <sys/pool.h>
46 #include <sys/proc.h>
47 #include <sys/ktrace.h>
48 #include <sys/sched.h>
49 #include <sys/signalvar.h>
50
51 #include <sys/mount.h>
52 #include <sys/syscallargs.h>
53
54 #include <uvm/uvm_extern.h>
55 #include <uvm/uvm.h>
56
57 /* Resource usage check interval in msec */
58 #define RUCHECK_INTERVAL 1000
59
60 /* SIGXCPU interval in seconds of process runtime */
61 #define SIGXCPU_INTERVAL 5
62
63 struct plimit *lim_copy(struct plimit *);
64 struct plimit *lim_write_begin(void);
65 void lim_write_commit(struct plimit *);
66
67 void tuagg_sub(struct tusage *, struct proc *);
68
69 /*
70 * Patchable maximum data and stack limits.
71 */
72 rlim_t maxdmap = MAXDSIZ;
73 rlim_t maxsmap = MAXSSIZ;
74
75 /*
76 * Serializes resource limit updates.
77 * This lock has to be held together with ps_mtx when updating
78 * the process' ps_limit.
79 */
80 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk");
81
82 /*
83 * Resource controls and accounting.
84 */
85
86 int
87 sys_getpriority(struct proc *curp, void *v, register_t *retval)
88 {
89 struct sys_getpriority_args /* {
90 syscallarg(int) which;
91 syscallarg(id_t) who;
92 } */ *uap = v;
93 struct process *pr;
94 int low = NZERO + PRIO_MAX + 1;
95
96 switch (SCARG(uap, which)) {
97
98 case PRIO_PROCESS:
99 if (SCARG(uap, who) == 0)
100 pr = curp->p_p;
101 else
102 pr = prfind(SCARG(uap, who));
103 if (pr == NULL)
104 break;
105 if (pr->ps_nice < low)
106 low = pr->ps_nice;
107 break;
108
109 case PRIO_PGRP: {
110 struct pgrp *pg;
111
112 if (SCARG(uap, who) == 0)
113 pg = curp->p_p->ps_pgrp;
114 else if ((pg = pgfind(SCARG(uap, who))) == NULL)
115 break;
116 LIST_FOREACH(pr, &pg->pg_members, ps_pglist)
117 if (pr->ps_nice < low)
118 low = pr->ps_nice;
119 break;
120 }
121
122 case PRIO_USER:
123 if (SCARG(uap, who) == 0)
124 SCARG(uap, who) = curp->p_ucred->cr_uid;
125 LIST_FOREACH(pr, &allprocess, ps_list)
126 if (pr->ps_ucred->cr_uid == SCARG(uap, who) &&
127 pr->ps_nice < low)
128 low = pr->ps_nice;
129 break;
130
131 default:
132 return (EINVAL);
133 }
134 if (low == NZERO + PRIO_MAX + 1)
135 return (ESRCH);
136 *retval = low - NZERO;
137 return (0);
138 }
139
140 int
141 sys_setpriority(struct proc *curp, void *v, register_t *retval)
142 {
143 struct sys_setpriority_args /* {
144 syscallarg(int) which;
145 syscallarg(id_t) who;
146 syscallarg(int) prio;
147 } */ *uap = v;
148 struct process *pr;
149 int found = 0, error = 0;
150
151 switch (SCARG(uap, which)) {
152
153 case PRIO_PROCESS:
154 if (SCARG(uap, who) == 0)
155 pr = curp->p_p;
156 else
157 pr = prfind(SCARG(uap, who));
158 if (pr == NULL)
159 break;
160 error = donice(curp, pr, SCARG(uap, prio));
161 found = 1;
162 break;
163
164 case PRIO_PGRP: {
165 struct pgrp *pg;
166
167 if (SCARG(uap, who) == 0)
168 pg = curp->p_p->ps_pgrp;
169 else if ((pg = pgfind(SCARG(uap, who))) == NULL)
170 break;
171 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
172 error = donice(curp, pr, SCARG(uap, prio));
173 found = 1;
174 }
175 break;
176 }
177
178 case PRIO_USER:
179 if (SCARG(uap, who) == 0)
180 SCARG(uap, who) = curp->p_ucred->cr_uid;
181 LIST_FOREACH(pr, &allprocess, ps_list)
182 if (pr->ps_ucred->cr_uid == SCARG(uap, who)) {
183 error = donice(curp, pr, SCARG(uap, prio));
184 found = 1;
185 }
186 break;
187
188 default:
189 return (EINVAL);
190 }
191 if (!found)
192 return (ESRCH);
193 return (error);
194 }
195
196 int
197 donice(struct proc *curp, struct process *chgpr, int n)
198 {
199 struct ucred *ucred = curp->p_ucred;
200 struct proc *p;
201 int s;
202
203 if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 &&
204 ucred->cr_uid != chgpr->ps_ucred->cr_uid &&
205 ucred->cr_ruid != chgpr->ps_ucred->cr_uid)
206 return (EPERM);
207 if (n > PRIO_MAX)
208 n = PRIO_MAX;
209 if (n < PRIO_MIN)
210 n = PRIO_MIN;
211 n += NZERO;
212 if (n < chgpr->ps_nice && suser(curp))
213 return (EACCES);
214 chgpr->ps_nice = n;
215 SCHED_LOCK(s);
216 TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
217 setpriority(p, p->p_estcpu, n);
218 }
219 SCHED_UNLOCK(s);
220 return (0);
221 }
222
223 int
224 sys_setrlimit(struct proc *p, void *v, register_t *retval)
225 {
226 struct sys_setrlimit_args /* {
227 syscallarg(int) which;
228 syscallarg(const struct rlimit *) rlp;
229 } */ *uap = v;
230 struct rlimit alim;
231 int error;
232
233 error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim,
234 sizeof (struct rlimit));
235 if (error)
236 return (error);
237 #ifdef KTRACE
238 if (KTRPOINT(p, KTR_STRUCT))
239 ktrrlimit(p, &alim);
240 #endif
241 return (dosetrlimit(p, SCARG(uap, which), &alim));
242 }
243
244 int
245 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
246 {
247 struct rlimit *alimp;
248 struct plimit *limit;
249 rlim_t maxlim;
250 int error;
251
252 if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max)
253 return (EINVAL);
254
255 rw_enter_write(&rlimit_lock);
256
257 alimp = &p->p_p->ps_limit->pl_rlimit[which];
258 if (limp->rlim_max > alimp->rlim_max) {
259 if ((error = suser(p)) != 0) {
260 rw_exit_write(&rlimit_lock);
261 return (error);
262 }
263 }
264
265 /* Get exclusive write access to the limit structure. */
266 limit = lim_write_begin();
267 alimp = &limit->pl_rlimit[which];
268
269 switch (which) {
270 case RLIMIT_DATA:
271 maxlim = maxdmap;
272 break;
273 case RLIMIT_STACK:
274 maxlim = maxsmap;
275 break;
276 case RLIMIT_NOFILE:
277 maxlim = maxfiles;
278 break;
279 case RLIMIT_NPROC:
280 maxlim = maxprocess;
281 break;
282 default:
283 maxlim = RLIM_INFINITY;
284 break;
285 }
286
287 if (limp->rlim_max > maxlim)
288 limp->rlim_max = maxlim;
289 if (limp->rlim_cur > limp->rlim_max)
290 limp->rlim_cur = limp->rlim_max;
291
292 if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY &&
293 alimp->rlim_cur == RLIM_INFINITY)
294 timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL);
295
296 if (which == RLIMIT_STACK) {
297 /*
298 * Stack is allocated to the max at exec time with only
299 * "rlim_cur" bytes accessible. If stack limit is going
300 * up make more accessible, if going down make inaccessible.
301 */
302 if (limp->rlim_cur != alimp->rlim_cur) {
303 vaddr_t addr;
304 vsize_t size;
305 vm_prot_t prot;
306 struct vmspace *vm = p->p_vmspace;
307
308 if (limp->rlim_cur > alimp->rlim_cur) {
309 prot = PROT_READ | PROT_WRITE;
310 size = limp->rlim_cur - alimp->rlim_cur;
311 #ifdef MACHINE_STACK_GROWS_UP
312 addr = (vaddr_t)vm->vm_maxsaddr +
313 alimp->rlim_cur;
314 #else
315 addr = (vaddr_t)vm->vm_minsaddr -
316 limp->rlim_cur;
317 #endif
318 } else {
319 prot = PROT_NONE;
320 size = alimp->rlim_cur - limp->rlim_cur;
321 #ifdef MACHINE_STACK_GROWS_UP
322 addr = (vaddr_t)vm->vm_maxsaddr +
323 limp->rlim_cur;
324 #else
325 addr = (vaddr_t)vm->vm_minsaddr -
326 alimp->rlim_cur;
327 #endif
328 }
329 addr = trunc_page(addr);
330 size = round_page(size);
331 KERNEL_LOCK();
332 (void) uvm_map_protect(&vm->vm_map, addr,
333 addr+size, prot, UVM_ET_STACK, FALSE, FALSE);
334 KERNEL_UNLOCK();
335 }
336 }
337
338 *alimp = *limp;
339
340 lim_write_commit(limit);
341 rw_exit_write(&rlimit_lock);
342
343 return (0);
344 }
345
346 int
347 sys_getrlimit(struct proc *p, void *v, register_t *retval)
348 {
349 struct sys_getrlimit_args /* {
350 syscallarg(int) which;
351 syscallarg(struct rlimit *) rlp;
352 } */ *uap = v;
353 struct plimit *limit;
354 struct rlimit alimp;
355 int error;
356
357 if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS)
358 return (EINVAL);
359 limit = lim_read_enter();
360 alimp = limit->pl_rlimit[SCARG(uap, which)];
361 lim_read_leave(limit);
362 error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit));
363 #ifdef KTRACE
364 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
365 ktrrlimit(p, &alimp);
366 #endif
367 return (error);
368 }
369
370 void
371 tuagg_sub(struct tusage *tup, struct proc *p)
372 {
373 timespecadd(&tup->tu_runtime, &p->p_rtime, &tup->tu_runtime);
374 tup->tu_uticks += p->p_uticks;
375 tup->tu_sticks += p->p_sticks;
376 tup->tu_iticks += p->p_iticks;
377 }
378
379 /*
380 * Aggregate a single thread's immediate time counts into the running
381 * totals for the thread and process
382 */
383 void
384 tuagg_unlocked(struct process *pr, struct proc *p)
385 {
386 tuagg_sub(&pr->ps_tu, p);
387 tuagg_sub(&p->p_tu, p);
388 timespecclear(&p->p_rtime);
389 p->p_uticks = 0;
390 p->p_sticks = 0;
391 p->p_iticks = 0;
392 }
393
394 void
395 tuagg(struct process *pr, struct proc *p)
396 {
397 int s;
398
399 SCHED_LOCK(s);
400 tuagg_unlocked(pr, p);
401 SCHED_UNLOCK(s);
402 }
403
404 /*
405 * Transform the running time and tick information in a struct tusage
406 * into user, system, and interrupt time usage.
407 */
408 void
409 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp,
410 struct timespec *ip)
411 {
412 u_quad_t st, ut, it;
413 int freq;
414
415 st = tup->tu_sticks;
416 ut = tup->tu_uticks;
417 it = tup->tu_iticks;
418
419 if (st + ut + it == 0) {
420 timespecclear(up);
421 timespecclear(sp);
422 if (ip != NULL)
423 timespecclear(ip);
424 return;
425 }
426
427 freq = stathz ? stathz : hz;
428
429 st = st * 1000000000 / freq;
430 sp->tv_sec = st / 1000000000;
431 sp->tv_nsec = st % 1000000000;
432 ut = ut * 1000000000 / freq;
433 up->tv_sec = ut / 1000000000;
434 up->tv_nsec = ut % 1000000000;
435 if (ip != NULL) {
436 it = it * 1000000000 / freq;
437 ip->tv_sec = it / 1000000000;
438 ip->tv_nsec = it % 1000000000;
439 }
440 }
441
442 void
443 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp,
444 struct timeval *ip)
445 {
446 struct timespec u, s, i;
447
448 calctsru(tup, &u, &s, ip != NULL ? &i : NULL);
449 TIMESPEC_TO_TIMEVAL(up, &u);
450 TIMESPEC_TO_TIMEVAL(sp, &s);
451 if (ip != NULL)
452 TIMESPEC_TO_TIMEVAL(ip, &i);
453 }
454
455 int
456 sys_getrusage(struct proc *p, void *v, register_t *retval)
457 {
458 struct sys_getrusage_args /* {
459 syscallarg(int) who;
460 syscallarg(struct rusage *) rusage;
461 } */ *uap = v;
462 struct rusage ru;
463 int error;
464
465 error = dogetrusage(p, SCARG(uap, who), &ru);
466 if (error == 0) {
467 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
468 #ifdef KTRACE
469 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
470 ktrrusage(p, &ru);
471 #endif
472 }
473 return (error);
474 }
475
476 int
477 dogetrusage(struct proc *p, int who, struct rusage *rup)
478 {
479 struct process *pr = p->p_p;
480 struct proc *q;
481
482 switch (who) {
483
484 case RUSAGE_SELF:
485 /* start with the sum of dead threads, if any */
486 if (pr->ps_ru != NULL)
487 *rup = *pr->ps_ru;
488 else
489 memset(rup, 0, sizeof(*rup));
490
491 /* add on all living threads */
492 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
493 ruadd(rup, &q->p_ru);
494 tuagg(pr, q);
495 }
496
497 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
498 break;
499
500 case RUSAGE_THREAD:
501 *rup = p->p_ru;
502 calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL);
503 break;
504
505 case RUSAGE_CHILDREN:
506 *rup = pr->ps_cru;
507 break;
508
509 default:
510 return (EINVAL);
511 }
512 return (0);
513 }
514
515 void
516 ruadd(struct rusage *ru, struct rusage *ru2)
517 {
518 long *ip, *ip2;
519 int i;
520
521 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
522 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
523 if (ru->ru_maxrss < ru2->ru_maxrss)
524 ru->ru_maxrss = ru2->ru_maxrss;
525 ip = &ru->ru_first; ip2 = &ru2->ru_first;
526 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
527 *ip++ += *ip2++;
528 }
529
530 /*
531 * Check if the process exceeds its cpu resource allocation.
532 * If over max, kill it.
533 */
534 void
535 rucheck(void *arg)
536 {
537 struct rlimit rlim;
538 struct process *pr = arg;
539 time_t runtime;
540 int s;
541
542 KERNEL_ASSERT_LOCKED();
543
544 SCHED_LOCK(s);
545 runtime = pr->ps_tu.tu_runtime.tv_sec;
546 SCHED_UNLOCK(s);
547
548 mtx_enter(&pr->ps_mtx);
549 rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU];
550 mtx_leave(&pr->ps_mtx);
551
552 if ((rlim_t)runtime >= rlim.rlim_cur) {
553 if ((rlim_t)runtime >= rlim.rlim_max) {
554 prsignal(pr, SIGKILL);
555 } else if (runtime >= pr->ps_nextxcpu) {
556 prsignal(pr, SIGXCPU);
557 pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL;
558 }
559 }
560
561 timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL);
562 }
563
564 struct pool plimit_pool;
565
566 void
567 lim_startup(struct plimit *limit0)
568 {
569 rlim_t lim;
570 int i;
571
572 pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR,
573 PR_WAITOK, "plimitpl", NULL);
574
575 for (i = 0; i < nitems(limit0->pl_rlimit); i++)
576 limit0->pl_rlimit[i].rlim_cur =
577 limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY;
578 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
579 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX,
580 (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE);
581 limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC;
582 lim = ptoa(uvmexp.free);
583 limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
584 lim = ptoa(64*1024); /* Default to very low */
585 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
586 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
587 refcnt_init(&limit0->pl_refcnt);
588 }
589
590 /*
591 * Make a copy of the plimit structure.
592 * We share these structures copy-on-write after fork,
593 * and copy when a limit is changed.
594 */
595 struct plimit *
596 lim_copy(struct plimit *lim)
597 {
598 struct plimit *newlim;
599
600 newlim = pool_get(&plimit_pool, PR_WAITOK);
601 memcpy(newlim->pl_rlimit, lim->pl_rlimit,
602 sizeof(struct rlimit) * RLIM_NLIMITS);
603 refcnt_init(&newlim->pl_refcnt);
604 return (newlim);
605 }
606
607 void
608 lim_free(struct plimit *lim)
609 {
610 if (refcnt_rele(&lim->pl_refcnt) == 0)
611 return;
612 pool_put(&plimit_pool, lim);
613 }
614
615 void
616 lim_fork(struct process *parent, struct process *child)
617 {
618 struct plimit *limit;
619
620 mtx_enter(&parent->ps_mtx);
621 limit = parent->ps_limit;
622 refcnt_take(&limit->pl_refcnt);
623 mtx_leave(&parent->ps_mtx);
624
625 child->ps_limit = limit;
626
627 if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)
628 timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL);
629 }
630
631 /*
632 * Return an exclusive write reference to the process' resource limit structure.
633 * The caller has to release the structure by calling lim_write_commit().
634 *
635 * This invalidates any plimit read reference held by the calling thread.
636 */
637 struct plimit *
638 lim_write_begin(void)
639 {
640 struct plimit *limit;
641 struct proc *p = curproc;
642
643 rw_assert_wrlock(&rlimit_lock);
644
645 if (p->p_limit != NULL)
646 lim_free(p->p_limit);
647 p->p_limit = NULL;
648
649 /*
650 * It is safe to access ps_limit here without holding ps_mtx
651 * because rlimit_lock excludes other writers.
652 */
653
654 limit = p->p_p->ps_limit;
655 if (P_HASSIBLING(p) || refcnt_shared(&limit->pl_refcnt))
656 limit = lim_copy(limit);
657
658 return (limit);
659 }
660
661 /*
662 * Finish exclusive write access to the plimit structure.
663 * This makes the structure visible to other threads in the process.
664 */
665 void
666 lim_write_commit(struct plimit *limit)
667 {
668 struct plimit *olimit;
669 struct proc *p = curproc;
670
671 rw_assert_wrlock(&rlimit_lock);
672
673 if (limit != p->p_p->ps_limit) {
674 mtx_enter(&p->p_p->ps_mtx);
675 olimit = p->p_p->ps_limit;
676 p->p_p->ps_limit = limit;
677 mtx_leave(&p->p_p->ps_mtx);
678
679 lim_free(olimit);
680 }
681 }
682
683 /*
684 * Begin read access to the process' resource limit structure.
685 * The access has to be finished by calling lim_read_leave().
686 *
687 * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest.
688 */
689 struct plimit *
690 lim_read_enter(void)
691 {
692 struct plimit *limit;
693 struct proc *p = curproc;
694 struct process *pr = p->p_p;
695
696 /*
697 * This thread might not observe the latest value of ps_limit
698 * if another thread updated the limits very recently on another CPU.
699 * However, the anomaly should disappear quickly, especially if
700 * there is any synchronization activity between the threads (or
701 * the CPUs).
702 */
703
704 limit = p->p_limit;
705 if (limit != pr->ps_limit) {
706 mtx_enter(&pr->ps_mtx);
707 limit = pr->ps_limit;
708 refcnt_take(&limit->pl_refcnt);
709 mtx_leave(&pr->ps_mtx);
710 if (p->p_limit != NULL)
711 lim_free(p->p_limit);
712 p->p_limit = limit;
713 }
714 KASSERT(limit != NULL);
715 return (limit);
716 }
717
718 /*
719 * Get the value of the resource limit in given process.
720 */
721 rlim_t
722 lim_cur_proc(struct proc *p, int which)
723 {
724 struct process *pr = p->p_p;
725 rlim_t val;
726
727 KASSERT(which >= 0 && which < RLIM_NLIMITS);
728
729 mtx_enter(&pr->ps_mtx);
730 val = pr->ps_limit->pl_rlimit[which].rlim_cur;
731 mtx_leave(&pr->ps_mtx);
732 return (val);
733 }
Cache object: 9f81877f927e3dd883c0982756835dc5
|