1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.0/sys/kern/kern_resource.c 247905 2013-03-07 02:53:29Z ian $");
39
40 #include "opt_compat.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
45 #include <sys/file.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/racct.h>
54 #include <sys/resourcevar.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/sx.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
61 #include <sys/time.h>
62 #include <sys/umtx.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68
69
70 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
71 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
73 static struct rwlock uihashtbl_lock;
74 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75 static u_long uihash; /* size of hash table - 1 */
76
77 static void calcru1(struct proc *p, struct rusage_ext *ruxp,
78 struct timeval *up, struct timeval *sp);
79 static int donice(struct thread *td, struct proc *chgp, int n);
80 static struct uidinfo *uilookup(uid_t uid);
81 static void ruxagg_locked(struct rusage_ext *rux, struct thread *td);
82
83 /*
84 * Resource controls and accounting.
85 */
86 #ifndef _SYS_SYSPROTO_H_
87 struct getpriority_args {
88 int which;
89 int who;
90 };
91 #endif
92 int
93 sys_getpriority(td, uap)
94 struct thread *td;
95 register struct getpriority_args *uap;
96 {
97 struct proc *p;
98 struct pgrp *pg;
99 int error, low;
100
101 error = 0;
102 low = PRIO_MAX + 1;
103 switch (uap->which) {
104
105 case PRIO_PROCESS:
106 if (uap->who == 0)
107 low = td->td_proc->p_nice;
108 else {
109 p = pfind(uap->who);
110 if (p == NULL)
111 break;
112 if (p_cansee(td, p) == 0)
113 low = p->p_nice;
114 PROC_UNLOCK(p);
115 }
116 break;
117
118 case PRIO_PGRP:
119 sx_slock(&proctree_lock);
120 if (uap->who == 0) {
121 pg = td->td_proc->p_pgrp;
122 PGRP_LOCK(pg);
123 } else {
124 pg = pgfind(uap->who);
125 if (pg == NULL) {
126 sx_sunlock(&proctree_lock);
127 break;
128 }
129 }
130 sx_sunlock(&proctree_lock);
131 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
132 PROC_LOCK(p);
133 if (p->p_state == PRS_NORMAL &&
134 p_cansee(td, p) == 0) {
135 if (p->p_nice < low)
136 low = p->p_nice;
137 }
138 PROC_UNLOCK(p);
139 }
140 PGRP_UNLOCK(pg);
141 break;
142
143 case PRIO_USER:
144 if (uap->who == 0)
145 uap->who = td->td_ucred->cr_uid;
146 sx_slock(&allproc_lock);
147 FOREACH_PROC_IN_SYSTEM(p) {
148 PROC_LOCK(p);
149 if (p->p_state == PRS_NORMAL &&
150 p_cansee(td, p) == 0 &&
151 p->p_ucred->cr_uid == uap->who) {
152 if (p->p_nice < low)
153 low = p->p_nice;
154 }
155 PROC_UNLOCK(p);
156 }
157 sx_sunlock(&allproc_lock);
158 break;
159
160 default:
161 error = EINVAL;
162 break;
163 }
164 if (low == PRIO_MAX + 1 && error == 0)
165 error = ESRCH;
166 td->td_retval[0] = low;
167 return (error);
168 }
169
170 #ifndef _SYS_SYSPROTO_H_
171 struct setpriority_args {
172 int which;
173 int who;
174 int prio;
175 };
176 #endif
177 int
178 sys_setpriority(td, uap)
179 struct thread *td;
180 struct setpriority_args *uap;
181 {
182 struct proc *curp, *p;
183 struct pgrp *pg;
184 int found = 0, error = 0;
185
186 curp = td->td_proc;
187 switch (uap->which) {
188 case PRIO_PROCESS:
189 if (uap->who == 0) {
190 PROC_LOCK(curp);
191 error = donice(td, curp, uap->prio);
192 PROC_UNLOCK(curp);
193 } else {
194 p = pfind(uap->who);
195 if (p == NULL)
196 break;
197 error = p_cansee(td, p);
198 if (error == 0)
199 error = donice(td, p, uap->prio);
200 PROC_UNLOCK(p);
201 }
202 found++;
203 break;
204
205 case PRIO_PGRP:
206 sx_slock(&proctree_lock);
207 if (uap->who == 0) {
208 pg = curp->p_pgrp;
209 PGRP_LOCK(pg);
210 } else {
211 pg = pgfind(uap->who);
212 if (pg == NULL) {
213 sx_sunlock(&proctree_lock);
214 break;
215 }
216 }
217 sx_sunlock(&proctree_lock);
218 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
219 PROC_LOCK(p);
220 if (p->p_state == PRS_NORMAL &&
221 p_cansee(td, p) == 0) {
222 error = donice(td, p, uap->prio);
223 found++;
224 }
225 PROC_UNLOCK(p);
226 }
227 PGRP_UNLOCK(pg);
228 break;
229
230 case PRIO_USER:
231 if (uap->who == 0)
232 uap->who = td->td_ucred->cr_uid;
233 sx_slock(&allproc_lock);
234 FOREACH_PROC_IN_SYSTEM(p) {
235 PROC_LOCK(p);
236 if (p->p_state == PRS_NORMAL &&
237 p->p_ucred->cr_uid == uap->who &&
238 p_cansee(td, p) == 0) {
239 error = donice(td, p, uap->prio);
240 found++;
241 }
242 PROC_UNLOCK(p);
243 }
244 sx_sunlock(&allproc_lock);
245 break;
246
247 default:
248 error = EINVAL;
249 break;
250 }
251 if (found == 0 && error == 0)
252 error = ESRCH;
253 return (error);
254 }
255
256 /*
257 * Set "nice" for a (whole) process.
258 */
259 static int
260 donice(struct thread *td, struct proc *p, int n)
261 {
262 int error;
263
264 PROC_LOCK_ASSERT(p, MA_OWNED);
265 if ((error = p_cansched(td, p)))
266 return (error);
267 if (n > PRIO_MAX)
268 n = PRIO_MAX;
269 if (n < PRIO_MIN)
270 n = PRIO_MIN;
271 if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
272 return (EACCES);
273 sched_nice(p, n);
274 return (0);
275 }
276
277 static int unprivileged_idprio;
278 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
279 &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
280
281 /*
282 * Set realtime priority for LWP.
283 */
284 #ifndef _SYS_SYSPROTO_H_
285 struct rtprio_thread_args {
286 int function;
287 lwpid_t lwpid;
288 struct rtprio *rtp;
289 };
290 #endif
291 int
292 sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
293 {
294 struct proc *p;
295 struct rtprio rtp;
296 struct thread *td1;
297 int cierror, error;
298
299 /* Perform copyin before acquiring locks if needed. */
300 if (uap->function == RTP_SET)
301 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
302 else
303 cierror = 0;
304
305 if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
306 p = td->td_proc;
307 td1 = td;
308 PROC_LOCK(p);
309 } else {
310 /* Only look up thread in current process */
311 td1 = tdfind(uap->lwpid, curproc->p_pid);
312 if (td1 == NULL)
313 return (ESRCH);
314 p = td1->td_proc;
315 }
316
317 switch (uap->function) {
318 case RTP_LOOKUP:
319 if ((error = p_cansee(td, p)))
320 break;
321 pri_to_rtp(td1, &rtp);
322 PROC_UNLOCK(p);
323 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
324 case RTP_SET:
325 if ((error = p_cansched(td, p)) || (error = cierror))
326 break;
327
328 /* Disallow setting rtprio in most cases if not superuser. */
329
330 /*
331 * Realtime priority has to be restricted for reasons which
332 * should be obvious. However, for idleprio processes, there is
333 * a potential for system deadlock if an idleprio process gains
334 * a lock on a resource that other processes need (and the
335 * idleprio process can't run due to a CPU-bound normal
336 * process). Fix me! XXX
337 *
338 * This problem is not only related to idleprio process.
339 * A user level program can obtain a file lock and hold it
340 * indefinitely. Additionally, without idleprio processes it is
341 * still conceivable that a program with low priority will never
342 * get to run. In short, allowing this feature might make it
343 * easier to lock a resource indefinitely, but it is not the
344 * only thing that makes it possible.
345 */
346 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
347 (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
348 unprivileged_idprio == 0)) {
349 error = priv_check(td, PRIV_SCHED_RTPRIO);
350 if (error)
351 break;
352 }
353 error = rtp_to_pri(&rtp, td1);
354 break;
355 default:
356 error = EINVAL;
357 break;
358 }
359 PROC_UNLOCK(p);
360 return (error);
361 }
362
363 /*
364 * Set realtime priority.
365 */
366 #ifndef _SYS_SYSPROTO_H_
367 struct rtprio_args {
368 int function;
369 pid_t pid;
370 struct rtprio *rtp;
371 };
372 #endif
373 int
374 sys_rtprio(td, uap)
375 struct thread *td; /* curthread */
376 register struct rtprio_args *uap;
377 {
378 struct proc *p;
379 struct thread *tdp;
380 struct rtprio rtp;
381 int cierror, error;
382
383 /* Perform copyin before acquiring locks if needed. */
384 if (uap->function == RTP_SET)
385 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
386 else
387 cierror = 0;
388
389 if (uap->pid == 0) {
390 p = td->td_proc;
391 PROC_LOCK(p);
392 } else {
393 p = pfind(uap->pid);
394 if (p == NULL)
395 return (ESRCH);
396 }
397
398 switch (uap->function) {
399 case RTP_LOOKUP:
400 if ((error = p_cansee(td, p)))
401 break;
402 /*
403 * Return OUR priority if no pid specified,
404 * or if one is, report the highest priority
405 * in the process. There isn't much more you can do as
406 * there is only room to return a single priority.
407 * Note: specifying our own pid is not the same
408 * as leaving it zero.
409 */
410 if (uap->pid == 0) {
411 pri_to_rtp(td, &rtp);
412 } else {
413 struct rtprio rtp2;
414
415 rtp.type = RTP_PRIO_IDLE;
416 rtp.prio = RTP_PRIO_MAX;
417 FOREACH_THREAD_IN_PROC(p, tdp) {
418 pri_to_rtp(tdp, &rtp2);
419 if (rtp2.type < rtp.type ||
420 (rtp2.type == rtp.type &&
421 rtp2.prio < rtp.prio)) {
422 rtp.type = rtp2.type;
423 rtp.prio = rtp2.prio;
424 }
425 }
426 }
427 PROC_UNLOCK(p);
428 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
429 case RTP_SET:
430 if ((error = p_cansched(td, p)) || (error = cierror))
431 break;
432
433 /*
434 * Disallow setting rtprio in most cases if not superuser.
435 * See the comment in sys_rtprio_thread about idprio
436 * threads holding a lock.
437 */
438 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
439 (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
440 !unprivileged_idprio)) {
441 error = priv_check(td, PRIV_SCHED_RTPRIO);
442 if (error)
443 break;
444 }
445
446 /*
447 * If we are setting our own priority, set just our
448 * thread but if we are doing another process,
449 * do all the threads on that process. If we
450 * specify our own pid we do the latter.
451 */
452 if (uap->pid == 0) {
453 error = rtp_to_pri(&rtp, td);
454 } else {
455 FOREACH_THREAD_IN_PROC(p, td) {
456 if ((error = rtp_to_pri(&rtp, td)) != 0)
457 break;
458 }
459 }
460 break;
461 default:
462 error = EINVAL;
463 break;
464 }
465 PROC_UNLOCK(p);
466 return (error);
467 }
468
469 int
470 rtp_to_pri(struct rtprio *rtp, struct thread *td)
471 {
472 u_char newpri, oldclass, oldpri;
473
474 switch (RTP_PRIO_BASE(rtp->type)) {
475 case RTP_PRIO_REALTIME:
476 if (rtp->prio > RTP_PRIO_MAX)
477 return (EINVAL);
478 newpri = PRI_MIN_REALTIME + rtp->prio;
479 break;
480 case RTP_PRIO_NORMAL:
481 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
482 return (EINVAL);
483 newpri = PRI_MIN_TIMESHARE + rtp->prio;
484 break;
485 case RTP_PRIO_IDLE:
486 if (rtp->prio > RTP_PRIO_MAX)
487 return (EINVAL);
488 newpri = PRI_MIN_IDLE + rtp->prio;
489 break;
490 default:
491 return (EINVAL);
492 }
493
494 thread_lock(td);
495 oldclass = td->td_pri_class;
496 sched_class(td, rtp->type); /* XXX fix */
497 oldpri = td->td_user_pri;
498 sched_user_prio(td, newpri);
499 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
500 td->td_pri_class != RTP_PRIO_NORMAL))
501 sched_prio(td, td->td_user_pri);
502 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
503 critical_enter();
504 thread_unlock(td);
505 umtx_pi_adjust(td, oldpri);
506 critical_exit();
507 } else
508 thread_unlock(td);
509 return (0);
510 }
511
512 void
513 pri_to_rtp(struct thread *td, struct rtprio *rtp)
514 {
515
516 thread_lock(td);
517 switch (PRI_BASE(td->td_pri_class)) {
518 case PRI_REALTIME:
519 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
520 break;
521 case PRI_TIMESHARE:
522 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
523 break;
524 case PRI_IDLE:
525 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
526 break;
527 default:
528 break;
529 }
530 rtp->type = td->td_pri_class;
531 thread_unlock(td);
532 }
533
534 #if defined(COMPAT_43)
535 #ifndef _SYS_SYSPROTO_H_
536 struct osetrlimit_args {
537 u_int which;
538 struct orlimit *rlp;
539 };
540 #endif
541 int
542 osetrlimit(td, uap)
543 struct thread *td;
544 register struct osetrlimit_args *uap;
545 {
546 struct orlimit olim;
547 struct rlimit lim;
548 int error;
549
550 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
551 return (error);
552 lim.rlim_cur = olim.rlim_cur;
553 lim.rlim_max = olim.rlim_max;
554 error = kern_setrlimit(td, uap->which, &lim);
555 return (error);
556 }
557
558 #ifndef _SYS_SYSPROTO_H_
559 struct ogetrlimit_args {
560 u_int which;
561 struct orlimit *rlp;
562 };
563 #endif
564 int
565 ogetrlimit(td, uap)
566 struct thread *td;
567 register struct ogetrlimit_args *uap;
568 {
569 struct orlimit olim;
570 struct rlimit rl;
571 struct proc *p;
572 int error;
573
574 if (uap->which >= RLIM_NLIMITS)
575 return (EINVAL);
576 p = td->td_proc;
577 PROC_LOCK(p);
578 lim_rlimit(p, uap->which, &rl);
579 PROC_UNLOCK(p);
580
581 /*
582 * XXX would be more correct to convert only RLIM_INFINITY to the
583 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
584 * values. Most 64->32 and 32->16 conversions, including not
585 * unimportant ones of uids are even more broken than what we
586 * do here (they blindly truncate). We don't do this correctly
587 * here since we have little experience with EOVERFLOW yet.
588 * Elsewhere, getuid() can't fail...
589 */
590 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
591 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
592 error = copyout(&olim, uap->rlp, sizeof(olim));
593 return (error);
594 }
595 #endif /* COMPAT_43 */
596
597 #ifndef _SYS_SYSPROTO_H_
598 struct __setrlimit_args {
599 u_int which;
600 struct rlimit *rlp;
601 };
602 #endif
603 int
604 sys_setrlimit(td, uap)
605 struct thread *td;
606 register struct __setrlimit_args *uap;
607 {
608 struct rlimit alim;
609 int error;
610
611 if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
612 return (error);
613 error = kern_setrlimit(td, uap->which, &alim);
614 return (error);
615 }
616
617 static void
618 lim_cb(void *arg)
619 {
620 struct rlimit rlim;
621 struct thread *td;
622 struct proc *p;
623
624 p = arg;
625 PROC_LOCK_ASSERT(p, MA_OWNED);
626 /*
627 * Check if the process exceeds its cpu resource allocation. If
628 * it reaches the max, arrange to kill the process in ast().
629 */
630 if (p->p_cpulimit == RLIM_INFINITY)
631 return;
632 PROC_SLOCK(p);
633 FOREACH_THREAD_IN_PROC(p, td) {
634 ruxagg(p, td);
635 }
636 PROC_SUNLOCK(p);
637 if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
638 lim_rlimit(p, RLIMIT_CPU, &rlim);
639 if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
640 killproc(p, "exceeded maximum CPU limit");
641 } else {
642 if (p->p_cpulimit < rlim.rlim_max)
643 p->p_cpulimit += 5;
644 kern_psignal(p, SIGXCPU);
645 }
646 }
647 if ((p->p_flag & P_WEXIT) == 0)
648 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
649 lim_cb, p, C_PREL(1));
650 }
651
652 int
653 kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
654 {
655
656 return (kern_proc_setrlimit(td, td->td_proc, which, limp));
657 }
658
659 int
660 kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
661 struct rlimit *limp)
662 {
663 struct plimit *newlim, *oldlim;
664 register struct rlimit *alimp;
665 struct rlimit oldssiz;
666 int error;
667
668 if (which >= RLIM_NLIMITS)
669 return (EINVAL);
670
671 /*
672 * Preserve historical bugs by treating negative limits as unsigned.
673 */
674 if (limp->rlim_cur < 0)
675 limp->rlim_cur = RLIM_INFINITY;
676 if (limp->rlim_max < 0)
677 limp->rlim_max = RLIM_INFINITY;
678
679 oldssiz.rlim_cur = 0;
680 newlim = lim_alloc();
681 PROC_LOCK(p);
682 oldlim = p->p_limit;
683 alimp = &oldlim->pl_rlimit[which];
684 if (limp->rlim_cur > alimp->rlim_max ||
685 limp->rlim_max > alimp->rlim_max)
686 if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
687 PROC_UNLOCK(p);
688 lim_free(newlim);
689 return (error);
690 }
691 if (limp->rlim_cur > limp->rlim_max)
692 limp->rlim_cur = limp->rlim_max;
693 lim_copy(newlim, oldlim);
694 alimp = &newlim->pl_rlimit[which];
695
696 switch (which) {
697
698 case RLIMIT_CPU:
699 if (limp->rlim_cur != RLIM_INFINITY &&
700 p->p_cpulimit == RLIM_INFINITY)
701 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
702 lim_cb, p, C_PREL(1));
703 p->p_cpulimit = limp->rlim_cur;
704 break;
705 case RLIMIT_DATA:
706 if (limp->rlim_cur > maxdsiz)
707 limp->rlim_cur = maxdsiz;
708 if (limp->rlim_max > maxdsiz)
709 limp->rlim_max = maxdsiz;
710 break;
711
712 case RLIMIT_STACK:
713 if (limp->rlim_cur > maxssiz)
714 limp->rlim_cur = maxssiz;
715 if (limp->rlim_max > maxssiz)
716 limp->rlim_max = maxssiz;
717 oldssiz = *alimp;
718 if (p->p_sysent->sv_fixlimit != NULL)
719 p->p_sysent->sv_fixlimit(&oldssiz,
720 RLIMIT_STACK);
721 break;
722
723 case RLIMIT_NOFILE:
724 if (limp->rlim_cur > maxfilesperproc)
725 limp->rlim_cur = maxfilesperproc;
726 if (limp->rlim_max > maxfilesperproc)
727 limp->rlim_max = maxfilesperproc;
728 break;
729
730 case RLIMIT_NPROC:
731 if (limp->rlim_cur > maxprocperuid)
732 limp->rlim_cur = maxprocperuid;
733 if (limp->rlim_max > maxprocperuid)
734 limp->rlim_max = maxprocperuid;
735 if (limp->rlim_cur < 1)
736 limp->rlim_cur = 1;
737 if (limp->rlim_max < 1)
738 limp->rlim_max = 1;
739 break;
740 }
741 if (p->p_sysent->sv_fixlimit != NULL)
742 p->p_sysent->sv_fixlimit(limp, which);
743 *alimp = *limp;
744 p->p_limit = newlim;
745 PROC_UNLOCK(p);
746 lim_free(oldlim);
747
748 if (which == RLIMIT_STACK) {
749 /*
750 * Stack is allocated to the max at exec time with only
751 * "rlim_cur" bytes accessible. If stack limit is going
752 * up make more accessible, if going down make inaccessible.
753 */
754 if (limp->rlim_cur != oldssiz.rlim_cur) {
755 vm_offset_t addr;
756 vm_size_t size;
757 vm_prot_t prot;
758
759 if (limp->rlim_cur > oldssiz.rlim_cur) {
760 prot = p->p_sysent->sv_stackprot;
761 size = limp->rlim_cur - oldssiz.rlim_cur;
762 addr = p->p_sysent->sv_usrstack -
763 limp->rlim_cur;
764 } else {
765 prot = VM_PROT_NONE;
766 size = oldssiz.rlim_cur - limp->rlim_cur;
767 addr = p->p_sysent->sv_usrstack -
768 oldssiz.rlim_cur;
769 }
770 addr = trunc_page(addr);
771 size = round_page(size);
772 (void)vm_map_protect(&p->p_vmspace->vm_map,
773 addr, addr + size, prot, FALSE);
774 }
775 }
776
777 return (0);
778 }
779
780 #ifndef _SYS_SYSPROTO_H_
781 struct __getrlimit_args {
782 u_int which;
783 struct rlimit *rlp;
784 };
785 #endif
786 /* ARGSUSED */
787 int
788 sys_getrlimit(td, uap)
789 struct thread *td;
790 register struct __getrlimit_args *uap;
791 {
792 struct rlimit rlim;
793 struct proc *p;
794 int error;
795
796 if (uap->which >= RLIM_NLIMITS)
797 return (EINVAL);
798 p = td->td_proc;
799 PROC_LOCK(p);
800 lim_rlimit(p, uap->which, &rlim);
801 PROC_UNLOCK(p);
802 error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
803 return (error);
804 }
805
806 /*
807 * Transform the running time and tick information for children of proc p
808 * into user and system time usage.
809 */
810 void
811 calccru(p, up, sp)
812 struct proc *p;
813 struct timeval *up;
814 struct timeval *sp;
815 {
816
817 PROC_LOCK_ASSERT(p, MA_OWNED);
818 calcru1(p, &p->p_crux, up, sp);
819 }
820
821 /*
822 * Transform the running time and tick information in proc p into user
823 * and system time usage. If appropriate, include the current time slice
824 * on this CPU.
825 */
826 void
827 calcru(struct proc *p, struct timeval *up, struct timeval *sp)
828 {
829 struct thread *td;
830 uint64_t runtime, u;
831
832 PROC_LOCK_ASSERT(p, MA_OWNED);
833 PROC_SLOCK_ASSERT(p, MA_OWNED);
834 /*
835 * If we are getting stats for the current process, then add in the
836 * stats that this thread has accumulated in its current time slice.
837 * We reset the thread and CPU state as if we had performed a context
838 * switch right here.
839 */
840 td = curthread;
841 if (td->td_proc == p) {
842 u = cpu_ticks();
843 runtime = u - PCPU_GET(switchtime);
844 td->td_runtime += runtime;
845 td->td_incruntime += runtime;
846 PCPU_SET(switchtime, u);
847 }
848 /* Make sure the per-thread stats are current. */
849 FOREACH_THREAD_IN_PROC(p, td) {
850 if (td->td_incruntime == 0)
851 continue;
852 ruxagg(p, td);
853 }
854 calcru1(p, &p->p_rux, up, sp);
855 }
856
857 /* Collect resource usage for a single thread. */
858 void
859 rufetchtd(struct thread *td, struct rusage *ru)
860 {
861 struct proc *p;
862 uint64_t runtime, u;
863
864 p = td->td_proc;
865 PROC_SLOCK_ASSERT(p, MA_OWNED);
866 THREAD_LOCK_ASSERT(td, MA_OWNED);
867 /*
868 * If we are getting stats for the current thread, then add in the
869 * stats that this thread has accumulated in its current time slice.
870 * We reset the thread and CPU state as if we had performed a context
871 * switch right here.
872 */
873 if (td == curthread) {
874 u = cpu_ticks();
875 runtime = u - PCPU_GET(switchtime);
876 td->td_runtime += runtime;
877 td->td_incruntime += runtime;
878 PCPU_SET(switchtime, u);
879 }
880 ruxagg(p, td);
881 *ru = td->td_ru;
882 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
883 }
884
885 static void
886 calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
887 struct timeval *sp)
888 {
889 /* {user, system, interrupt, total} {ticks, usec}: */
890 uint64_t ut, uu, st, su, it, tt, tu;
891
892 ut = ruxp->rux_uticks;
893 st = ruxp->rux_sticks;
894 it = ruxp->rux_iticks;
895 tt = ut + st + it;
896 if (tt == 0) {
897 /* Avoid divide by zero */
898 st = 1;
899 tt = 1;
900 }
901 tu = cputick2usec(ruxp->rux_runtime);
902 if ((int64_t)tu < 0) {
903 /* XXX: this should be an assert /phk */
904 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
905 (intmax_t)tu, p->p_pid, p->p_comm);
906 tu = ruxp->rux_tu;
907 }
908
909 if (tu >= ruxp->rux_tu) {
910 /*
911 * The normal case, time increased.
912 * Enforce monotonicity of bucketed numbers.
913 */
914 uu = (tu * ut) / tt;
915 if (uu < ruxp->rux_uu)
916 uu = ruxp->rux_uu;
917 su = (tu * st) / tt;
918 if (su < ruxp->rux_su)
919 su = ruxp->rux_su;
920 } else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
921 /*
922 * When we calibrate the cputicker, it is not uncommon to
923 * see the presumably fixed frequency increase slightly over
924 * time as a result of thermal stabilization and NTP
925 * discipline (of the reference clock). We therefore ignore
926 * a bit of backwards slop because we expect to catch up
927 * shortly. We use a 3 microsecond limit to catch low
928 * counts and a 1% limit for high counts.
929 */
930 uu = ruxp->rux_uu;
931 su = ruxp->rux_su;
932 tu = ruxp->rux_tu;
933 } else { /* tu < ruxp->rux_tu */
934 /*
935 * What happened here was likely that a laptop, which ran at
936 * a reduced clock frequency at boot, kicked into high gear.
937 * The wisdom of spamming this message in that case is
938 * dubious, but it might also be indicative of something
939 * serious, so lets keep it and hope laptops can be made
940 * more truthful about their CPU speed via ACPI.
941 */
942 printf("calcru: runtime went backwards from %ju usec "
943 "to %ju usec for pid %d (%s)\n",
944 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
945 p->p_pid, p->p_comm);
946 uu = (tu * ut) / tt;
947 su = (tu * st) / tt;
948 }
949
950 ruxp->rux_uu = uu;
951 ruxp->rux_su = su;
952 ruxp->rux_tu = tu;
953
954 up->tv_sec = uu / 1000000;
955 up->tv_usec = uu % 1000000;
956 sp->tv_sec = su / 1000000;
957 sp->tv_usec = su % 1000000;
958 }
959
960 #ifndef _SYS_SYSPROTO_H_
961 struct getrusage_args {
962 int who;
963 struct rusage *rusage;
964 };
965 #endif
966 int
967 sys_getrusage(td, uap)
968 register struct thread *td;
969 register struct getrusage_args *uap;
970 {
971 struct rusage ru;
972 int error;
973
974 error = kern_getrusage(td, uap->who, &ru);
975 if (error == 0)
976 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
977 return (error);
978 }
979
980 int
981 kern_getrusage(struct thread *td, int who, struct rusage *rup)
982 {
983 struct proc *p;
984 int error;
985
986 error = 0;
987 p = td->td_proc;
988 PROC_LOCK(p);
989 switch (who) {
990 case RUSAGE_SELF:
991 rufetchcalc(p, rup, &rup->ru_utime,
992 &rup->ru_stime);
993 break;
994
995 case RUSAGE_CHILDREN:
996 *rup = p->p_stats->p_cru;
997 calccru(p, &rup->ru_utime, &rup->ru_stime);
998 break;
999
1000 case RUSAGE_THREAD:
1001 PROC_SLOCK(p);
1002 thread_lock(td);
1003 rufetchtd(td, rup);
1004 thread_unlock(td);
1005 PROC_SUNLOCK(p);
1006 break;
1007
1008 default:
1009 error = EINVAL;
1010 }
1011 PROC_UNLOCK(p);
1012 return (error);
1013 }
1014
1015 void
1016 rucollect(struct rusage *ru, struct rusage *ru2)
1017 {
1018 long *ip, *ip2;
1019 int i;
1020
1021 if (ru->ru_maxrss < ru2->ru_maxrss)
1022 ru->ru_maxrss = ru2->ru_maxrss;
1023 ip = &ru->ru_first;
1024 ip2 = &ru2->ru_first;
1025 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1026 *ip++ += *ip2++;
1027 }
1028
1029 void
1030 ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1031 struct rusage_ext *rux2)
1032 {
1033
1034 rux->rux_runtime += rux2->rux_runtime;
1035 rux->rux_uticks += rux2->rux_uticks;
1036 rux->rux_sticks += rux2->rux_sticks;
1037 rux->rux_iticks += rux2->rux_iticks;
1038 rux->rux_uu += rux2->rux_uu;
1039 rux->rux_su += rux2->rux_su;
1040 rux->rux_tu += rux2->rux_tu;
1041 rucollect(ru, ru2);
1042 }
1043
1044 /*
1045 * Aggregate tick counts into the proc's rusage_ext.
1046 */
1047 static void
1048 ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1049 {
1050
1051 THREAD_LOCK_ASSERT(td, MA_OWNED);
1052 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1053 rux->rux_runtime += td->td_incruntime;
1054 rux->rux_uticks += td->td_uticks;
1055 rux->rux_sticks += td->td_sticks;
1056 rux->rux_iticks += td->td_iticks;
1057 }
1058
1059 void
1060 ruxagg(struct proc *p, struct thread *td)
1061 {
1062
1063 thread_lock(td);
1064 ruxagg_locked(&p->p_rux, td);
1065 ruxagg_locked(&td->td_rux, td);
1066 td->td_incruntime = 0;
1067 td->td_uticks = 0;
1068 td->td_iticks = 0;
1069 td->td_sticks = 0;
1070 thread_unlock(td);
1071 }
1072
1073 /*
1074 * Update the rusage_ext structure and fetch a valid aggregate rusage
1075 * for proc p if storage for one is supplied.
1076 */
1077 void
1078 rufetch(struct proc *p, struct rusage *ru)
1079 {
1080 struct thread *td;
1081
1082 PROC_SLOCK_ASSERT(p, MA_OWNED);
1083
1084 *ru = p->p_ru;
1085 if (p->p_numthreads > 0) {
1086 FOREACH_THREAD_IN_PROC(p, td) {
1087 ruxagg(p, td);
1088 rucollect(ru, &td->td_ru);
1089 }
1090 }
1091 }
1092
1093 /*
1094 * Atomically perform a rufetch and a calcru together.
1095 * Consumers, can safely assume the calcru is executed only once
1096 * rufetch is completed.
1097 */
1098 void
1099 rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1100 struct timeval *sp)
1101 {
1102
1103 PROC_SLOCK(p);
1104 rufetch(p, ru);
1105 calcru(p, up, sp);
1106 PROC_SUNLOCK(p);
1107 }
1108
1109 /*
1110 * Allocate a new resource limits structure and initialize its
1111 * reference count and mutex pointer.
1112 */
1113 struct plimit *
1114 lim_alloc()
1115 {
1116 struct plimit *limp;
1117
1118 limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1119 refcount_init(&limp->pl_refcnt, 1);
1120 return (limp);
1121 }
1122
1123 struct plimit *
1124 lim_hold(limp)
1125 struct plimit *limp;
1126 {
1127
1128 refcount_acquire(&limp->pl_refcnt);
1129 return (limp);
1130 }
1131
1132 void
1133 lim_fork(struct proc *p1, struct proc *p2)
1134 {
1135
1136 PROC_LOCK_ASSERT(p1, MA_OWNED);
1137 PROC_LOCK_ASSERT(p2, MA_OWNED);
1138
1139 p2->p_limit = lim_hold(p1->p_limit);
1140 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1141 if (p1->p_cpulimit != RLIM_INFINITY)
1142 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1143 lim_cb, p2, C_PREL(1));
1144 }
1145
1146 void
1147 lim_free(limp)
1148 struct plimit *limp;
1149 {
1150
1151 KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1152 if (refcount_release(&limp->pl_refcnt))
1153 free((void *)limp, M_PLIMIT);
1154 }
1155
1156 /*
1157 * Make a copy of the plimit structure.
1158 * We share these structures copy-on-write after fork.
1159 */
1160 void
1161 lim_copy(dst, src)
1162 struct plimit *dst, *src;
1163 {
1164
1165 KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1166 bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1167 }
1168
1169 /*
1170 * Return the hard limit for a particular system resource. The
1171 * which parameter specifies the index into the rlimit array.
1172 */
1173 rlim_t
1174 lim_max(struct proc *p, int which)
1175 {
1176 struct rlimit rl;
1177
1178 lim_rlimit(p, which, &rl);
1179 return (rl.rlim_max);
1180 }
1181
1182 /*
1183 * Return the current (soft) limit for a particular system resource.
1184 * The which parameter which specifies the index into the rlimit array
1185 */
1186 rlim_t
1187 lim_cur(struct proc *p, int which)
1188 {
1189 struct rlimit rl;
1190
1191 lim_rlimit(p, which, &rl);
1192 return (rl.rlim_cur);
1193 }
1194
1195 /*
1196 * Return a copy of the entire rlimit structure for the system limit
1197 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1198 */
1199 void
1200 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1201 {
1202
1203 PROC_LOCK_ASSERT(p, MA_OWNED);
1204 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1205 ("request for invalid resource limit"));
1206 *rlp = p->p_limit->pl_rlimit[which];
1207 if (p->p_sysent->sv_fixlimit != NULL)
1208 p->p_sysent->sv_fixlimit(rlp, which);
1209 }
1210
1211 void
1212 uihashinit()
1213 {
1214
1215 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1216 rw_init(&uihashtbl_lock, "uidinfo hash");
1217 }
1218
1219 /*
1220 * Look up a uidinfo struct for the parameter uid.
1221 * uihashtbl_lock must be locked.
1222 */
1223 static struct uidinfo *
1224 uilookup(uid)
1225 uid_t uid;
1226 {
1227 struct uihashhead *uipp;
1228 struct uidinfo *uip;
1229
1230 rw_assert(&uihashtbl_lock, RA_LOCKED);
1231 uipp = UIHASH(uid);
1232 LIST_FOREACH(uip, uipp, ui_hash)
1233 if (uip->ui_uid == uid)
1234 break;
1235
1236 return (uip);
1237 }
1238
1239 /*
1240 * Find or allocate a struct uidinfo for a particular uid.
1241 * Increase refcount on uidinfo struct returned.
1242 * uifree() should be called on a struct uidinfo when released.
1243 */
1244 struct uidinfo *
1245 uifind(uid)
1246 uid_t uid;
1247 {
1248 struct uidinfo *old_uip, *uip;
1249
1250 rw_rlock(&uihashtbl_lock);
1251 uip = uilookup(uid);
1252 if (uip == NULL) {
1253 rw_runlock(&uihashtbl_lock);
1254 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1255 racct_create(&uip->ui_racct);
1256 rw_wlock(&uihashtbl_lock);
1257 /*
1258 * There's a chance someone created our uidinfo while we
1259 * were in malloc and not holding the lock, so we have to
1260 * make sure we don't insert a duplicate uidinfo.
1261 */
1262 if ((old_uip = uilookup(uid)) != NULL) {
1263 /* Someone else beat us to it. */
1264 racct_destroy(&uip->ui_racct);
1265 free(uip, M_UIDINFO);
1266 uip = old_uip;
1267 } else {
1268 refcount_init(&uip->ui_ref, 0);
1269 uip->ui_uid = uid;
1270 mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1271 MTX_DEF);
1272 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1273 }
1274 }
1275 uihold(uip);
1276 rw_unlock(&uihashtbl_lock);
1277 return (uip);
1278 }
1279
1280 /*
1281 * Place another refcount on a uidinfo struct.
1282 */
1283 void
1284 uihold(uip)
1285 struct uidinfo *uip;
1286 {
1287
1288 refcount_acquire(&uip->ui_ref);
1289 }
1290
1291 /*-
1292 * Since uidinfo structs have a long lifetime, we use an
1293 * opportunistic refcounting scheme to avoid locking the lookup hash
1294 * for each release.
1295 *
1296 * If the refcount hits 0, we need to free the structure,
1297 * which means we need to lock the hash.
1298 * Optimal case:
1299 * After locking the struct and lowering the refcount, if we find
1300 * that we don't need to free, simply unlock and return.
1301 * Suboptimal case:
1302 * If refcount lowering results in need to free, bump the count
1303 * back up, lose the lock and acquire the locks in the proper
1304 * order to try again.
1305 */
1306 void
1307 uifree(uip)
1308 struct uidinfo *uip;
1309 {
1310 int old;
1311
1312 /* Prepare for optimal case. */
1313 old = uip->ui_ref;
1314 if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1315 return;
1316
1317 /* Prepare for suboptimal case. */
1318 rw_wlock(&uihashtbl_lock);
1319 if (refcount_release(&uip->ui_ref)) {
1320 racct_destroy(&uip->ui_racct);
1321 LIST_REMOVE(uip, ui_hash);
1322 rw_wunlock(&uihashtbl_lock);
1323 if (uip->ui_sbsize != 0)
1324 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1325 uip->ui_uid, uip->ui_sbsize);
1326 if (uip->ui_proccnt != 0)
1327 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1328 uip->ui_uid, uip->ui_proccnt);
1329 if (uip->ui_vmsize != 0)
1330 printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1331 uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1332 mtx_destroy(&uip->ui_vmsize_mtx);
1333 free(uip, M_UIDINFO);
1334 return;
1335 }
1336 /*
1337 * Someone added a reference between atomic_cmpset_int() and
1338 * rw_wlock(&uihashtbl_lock).
1339 */
1340 rw_wunlock(&uihashtbl_lock);
1341 }
1342
1343 void
1344 ui_racct_foreach(void (*callback)(struct racct *racct,
1345 void *arg2, void *arg3), void *arg2, void *arg3)
1346 {
1347 struct uidinfo *uip;
1348 struct uihashhead *uih;
1349
1350 rw_rlock(&uihashtbl_lock);
1351 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1352 LIST_FOREACH(uip, uih, ui_hash) {
1353 (callback)(uip->ui_racct, arg2, arg3);
1354 }
1355 }
1356 rw_runlock(&uihashtbl_lock);
1357 }
1358
1359 /*
1360 * Change the count associated with number of processes
1361 * a given user is using. When 'max' is 0, don't enforce a limit
1362 */
1363 int
1364 chgproccnt(uip, diff, max)
1365 struct uidinfo *uip;
1366 int diff;
1367 rlim_t max;
1368 {
1369
1370 /* Don't allow them to exceed max, but allow subtraction. */
1371 if (diff > 0 && max != 0) {
1372 if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1373 atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1374 return (0);
1375 }
1376 } else {
1377 atomic_add_long(&uip->ui_proccnt, (long)diff);
1378 if (uip->ui_proccnt < 0)
1379 printf("negative proccnt for uid = %d\n", uip->ui_uid);
1380 }
1381 return (1);
1382 }
1383
1384 /*
1385 * Change the total socket buffer size a user has used.
1386 */
1387 int
1388 chgsbsize(uip, hiwat, to, max)
1389 struct uidinfo *uip;
1390 u_int *hiwat;
1391 u_int to;
1392 rlim_t max;
1393 {
1394 int diff;
1395
1396 diff = to - *hiwat;
1397 if (diff > 0) {
1398 if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1399 atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1400 return (0);
1401 }
1402 } else {
1403 atomic_add_long(&uip->ui_sbsize, (long)diff);
1404 if (uip->ui_sbsize < 0)
1405 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1406 }
1407 *hiwat = to;
1408 return (1);
1409 }
1410
1411 /*
1412 * Change the count associated with number of pseudo-terminals
1413 * a given user is using. When 'max' is 0, don't enforce a limit
1414 */
1415 int
1416 chgptscnt(uip, diff, max)
1417 struct uidinfo *uip;
1418 int diff;
1419 rlim_t max;
1420 {
1421
1422 /* Don't allow them to exceed max, but allow subtraction. */
1423 if (diff > 0 && max != 0) {
1424 if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1425 atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1426 return (0);
1427 }
1428 } else {
1429 atomic_add_long(&uip->ui_ptscnt, (long)diff);
1430 if (uip->ui_ptscnt < 0)
1431 printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1432 }
1433 return (1);
1434 }
Cache object: 64ee8c1a394289cac38f3e74d8cfdfe4
|