The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_resource.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_resource.c     8.5 (Berkeley) 1/21/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD: releng/6.0/sys/kern/kern_resource.c 146879 2005-06-01 17:52:51Z alc $");
   39 
   40 #include "opt_compat.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/sysproto.h>
   45 #include <sys/file.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sched.h>
   53 #include <sys/sx.h>
   54 #include <sys/syscallsubr.h>
   55 #include <sys/sysent.h>
   56 #include <sys/time.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/vm_param.h>
   60 #include <vm/pmap.h>
   61 #include <vm/vm_map.h>
   62 
   63 
   64 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
   65 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
   66 #define UIHASH(uid)     (&uihashtbl[(uid) & uihash])
   67 static struct mtx uihashtbl_mtx;
   68 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
   69 static u_long uihash;           /* size of hash table - 1 */
   70 
   71 static void     calcru1(struct proc *p, struct rusage_ext *ruxp,
   72                     struct timeval *up, struct timeval *sp);
   73 static int      donice(struct thread *td, struct proc *chgp, int n);
   74 static struct uidinfo *uilookup(uid_t uid);
   75 
   76 /*
   77  * Resource controls and accounting.
   78  */
   79 
   80 #ifndef _SYS_SYSPROTO_H_
   81 struct getpriority_args {
   82         int     which;
   83         int     who;
   84 };
   85 #endif
   86 /*
   87  * MPSAFE
   88  */
   89 int
   90 getpriority(td, uap)
   91         struct thread *td;
   92         register struct getpriority_args *uap;
   93 {
   94         struct proc *p;
   95         struct pgrp *pg;
   96         int error, low;
   97 
   98         error = 0;
   99         low = PRIO_MAX + 1;
  100         switch (uap->which) {
  101 
  102         case PRIO_PROCESS:
  103                 if (uap->who == 0)
  104                         low = td->td_proc->p_nice;
  105                 else {
  106                         p = pfind(uap->who);
  107                         if (p == NULL)
  108                                 break;
  109                         if (p_cansee(td, p) == 0)
  110                                 low = p->p_nice;
  111                         PROC_UNLOCK(p);
  112                 }
  113                 break;
  114 
  115         case PRIO_PGRP:
  116                 sx_slock(&proctree_lock);
  117                 if (uap->who == 0) {
  118                         pg = td->td_proc->p_pgrp;
  119                         PGRP_LOCK(pg);
  120                 } else {
  121                         pg = pgfind(uap->who);
  122                         if (pg == NULL) {
  123                                 sx_sunlock(&proctree_lock);
  124                                 break;
  125                         }
  126                 }
  127                 sx_sunlock(&proctree_lock);
  128                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  129                         PROC_LOCK(p);
  130                         if (!p_cansee(td, p)) {
  131                                 if (p->p_nice < low)
  132                                         low = p->p_nice;
  133                         }
  134                         PROC_UNLOCK(p);
  135                 }
  136                 PGRP_UNLOCK(pg);
  137                 break;
  138 
  139         case PRIO_USER:
  140                 if (uap->who == 0)
  141                         uap->who = td->td_ucred->cr_uid;
  142                 sx_slock(&allproc_lock);
  143                 LIST_FOREACH(p, &allproc, p_list) {
  144                         PROC_LOCK(p);
  145                         if (!p_cansee(td, p) &&
  146                             p->p_ucred->cr_uid == uap->who) {
  147                                 if (p->p_nice < low)
  148                                         low = p->p_nice;
  149                         }
  150                         PROC_UNLOCK(p);
  151                 }
  152                 sx_sunlock(&allproc_lock);
  153                 break;
  154 
  155         default:
  156                 error = EINVAL;
  157                 break;
  158         }
  159         if (low == PRIO_MAX + 1 && error == 0)
  160                 error = ESRCH;
  161         td->td_retval[0] = low;
  162         return (error);
  163 }
  164 
  165 #ifndef _SYS_SYSPROTO_H_
  166 struct setpriority_args {
  167         int     which;
  168         int     who;
  169         int     prio;
  170 };
  171 #endif
  172 /*
  173  * MPSAFE
  174  */
  175 int
  176 setpriority(td, uap)
  177         struct thread *td;
  178         struct setpriority_args *uap;
  179 {
  180         struct proc *curp, *p;
  181         struct pgrp *pg;
  182         int found = 0, error = 0;
  183 
  184         curp = td->td_proc;
  185         switch (uap->which) {
  186         case PRIO_PROCESS:
  187                 if (uap->who == 0) {
  188                         PROC_LOCK(curp);
  189                         error = donice(td, curp, uap->prio);
  190                         PROC_UNLOCK(curp);
  191                 } else {
  192                         p = pfind(uap->who);
  193                         if (p == 0)
  194                                 break;
  195                         if (p_cansee(td, p) == 0)
  196                                 error = donice(td, p, uap->prio);
  197                         PROC_UNLOCK(p);
  198                 }
  199                 found++;
  200                 break;
  201 
  202         case PRIO_PGRP:
  203                 sx_slock(&proctree_lock);
  204                 if (uap->who == 0) {
  205                         pg = curp->p_pgrp;
  206                         PGRP_LOCK(pg);
  207                 } else {
  208                         pg = pgfind(uap->who);
  209                         if (pg == NULL) {
  210                                 sx_sunlock(&proctree_lock);
  211                                 break;
  212                         }
  213                 }
  214                 sx_sunlock(&proctree_lock);
  215                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  216                         PROC_LOCK(p);
  217                         if (!p_cansee(td, p)) {
  218                                 error = donice(td, p, uap->prio);
  219                                 found++;
  220                         }
  221                         PROC_UNLOCK(p);
  222                 }
  223                 PGRP_UNLOCK(pg);
  224                 break;
  225 
  226         case PRIO_USER:
  227                 if (uap->who == 0)
  228                         uap->who = td->td_ucred->cr_uid;
  229                 sx_slock(&allproc_lock);
  230                 FOREACH_PROC_IN_SYSTEM(p) {
  231                         PROC_LOCK(p);
  232                         if (p->p_ucred->cr_uid == uap->who &&
  233                             !p_cansee(td, p)) {
  234                                 error = donice(td, p, uap->prio);
  235                                 found++;
  236                         }
  237                         PROC_UNLOCK(p);
  238                 }
  239                 sx_sunlock(&allproc_lock);
  240                 break;
  241 
  242         default:
  243                 error = EINVAL;
  244                 break;
  245         }
  246         if (found == 0 && error == 0)
  247                 error = ESRCH;
  248         return (error);
  249 }
  250 
  251 /*
  252  * Set "nice" for a (whole) process.
  253  */
  254 static int
  255 donice(struct thread *td, struct proc *p, int n)
  256 {
  257         int error;
  258 
  259         PROC_LOCK_ASSERT(p, MA_OWNED);
  260         if ((error = p_cansched(td, p)))
  261                 return (error);
  262         if (n > PRIO_MAX)
  263                 n = PRIO_MAX;
  264         if (n < PRIO_MIN)
  265                 n = PRIO_MIN;
  266         if (n < p->p_nice && suser(td) != 0)
  267                 return (EACCES);
  268         mtx_lock_spin(&sched_lock);
  269         sched_nice(p, n);
  270         mtx_unlock_spin(&sched_lock);
  271         return (0);
  272 }
  273 
  274 /*
  275  * Set realtime priority.
  276  *
  277  * MPSAFE
  278  */
  279 #ifndef _SYS_SYSPROTO_H_
  280 struct rtprio_args {
  281         int             function;
  282         pid_t           pid;
  283         struct rtprio   *rtp;
  284 };
  285 #endif
  286 
  287 int
  288 rtprio(td, uap)
  289         struct thread *td;              /* curthread */
  290         register struct rtprio_args *uap;
  291 {
  292         struct proc *curp;
  293         struct proc *p;
  294         struct ksegrp *kg;
  295         struct rtprio rtp;
  296         int cierror, error;
  297 
  298         /* Perform copyin before acquiring locks if needed. */
  299         if (uap->function == RTP_SET)
  300                 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
  301         else
  302                 cierror = 0;
  303 
  304         curp = td->td_proc;
  305         if (uap->pid == 0) {
  306                 p = curp;
  307                 PROC_LOCK(p);
  308         } else {
  309                 p = pfind(uap->pid);
  310                 if (p == NULL)
  311                         return (ESRCH);
  312         }
  313 
  314         switch (uap->function) {
  315         case RTP_LOOKUP:
  316                 if ((error = p_cansee(td, p)))
  317                         break;
  318                 mtx_lock_spin(&sched_lock);
  319                 /*
  320                  * Return OUR priority if no pid specified,
  321                  * or if one is, report the highest priority
  322                  * in the process.  There isn't much more you can do as 
  323                  * there is only room to return a single priority.
  324                  * XXXKSE: maybe need a new interface to report 
  325                  * priorities of multiple system scope threads.
  326                  * Note: specifying our own pid is not the same
  327                  * as leaving it zero.
  328                  */
  329                 if (uap->pid == 0) {
  330                         pri_to_rtp(td->td_ksegrp, &rtp);
  331                 } else {
  332                         struct rtprio rtp2;
  333 
  334                         rtp.type = RTP_PRIO_IDLE;
  335                         rtp.prio = RTP_PRIO_MAX;
  336                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  337                                 pri_to_rtp(kg, &rtp2);
  338                                 if (rtp2.type <  rtp.type ||
  339                                     (rtp2.type == rtp.type &&
  340                                     rtp2.prio < rtp.prio)) {
  341                                         rtp.type = rtp2.type;
  342                                         rtp.prio = rtp2.prio;
  343                                 }
  344                         }
  345                 }
  346                 mtx_unlock_spin(&sched_lock);
  347                 PROC_UNLOCK(p);
  348                 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
  349         case RTP_SET:
  350                 if ((error = p_cansched(td, p)) || (error = cierror))
  351                         break;
  352 
  353                 /* Disallow setting rtprio in most cases if not superuser. */
  354                 if (suser(td) != 0) {
  355                         /* can't set someone else's */
  356                         if (uap->pid) {
  357                                 error = EPERM;
  358                                 break;
  359                         }
  360                         /* can't set realtime priority */
  361 /*
  362  * Realtime priority has to be restricted for reasons which should be
  363  * obvious.  However, for idle priority, there is a potential for
  364  * system deadlock if an idleprio process gains a lock on a resource
  365  * that other processes need (and the idleprio process can't run
  366  * due to a CPU-bound normal process).  Fix me!  XXX
  367  */
  368 #if 0
  369                         if (RTP_PRIO_IS_REALTIME(rtp.type)) {
  370 #else
  371                         if (rtp.type != RTP_PRIO_NORMAL) {
  372 #endif
  373                                 error = EPERM;
  374                                 break;
  375                         }
  376                 }
  377 
  378                 /*
  379                  * If we are setting our own priority, set just our
  380                  * KSEGRP but if we are doing another process,
  381                  * do all the groups on that process. If we
  382                  * specify our own pid we do the latter.
  383                  */
  384                 mtx_lock_spin(&sched_lock);
  385                 if (uap->pid == 0) {
  386                         error = rtp_to_pri(&rtp, td->td_ksegrp);
  387                 } else {
  388                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  389                                 if ((error = rtp_to_pri(&rtp, kg)) != 0) {
  390                                         break;
  391                                 }
  392                         }
  393                 }
  394                 mtx_unlock_spin(&sched_lock);
  395                 break;
  396         default:
  397                 error = EINVAL;
  398                 break;
  399         }
  400         PROC_UNLOCK(p);
  401         return (error);
  402 }
  403 
  404 int
  405 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
  406 {
  407 
  408         mtx_assert(&sched_lock, MA_OWNED);
  409         if (rtp->prio > RTP_PRIO_MAX)
  410                 return (EINVAL);
  411         switch (RTP_PRIO_BASE(rtp->type)) {
  412         case RTP_PRIO_REALTIME:
  413                 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
  414                 break;
  415         case RTP_PRIO_NORMAL:
  416                 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
  417                 break;
  418         case RTP_PRIO_IDLE:
  419                 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
  420                 break;
  421         default:
  422                 return (EINVAL);
  423         }
  424         sched_class(kg, rtp->type);
  425         if (curthread->td_ksegrp == kg) {
  426                 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
  427         }
  428         return (0);
  429 }
  430 
  431 void
  432 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
  433 {
  434 
  435         mtx_assert(&sched_lock, MA_OWNED);
  436         switch (PRI_BASE(kg->kg_pri_class)) {
  437         case PRI_REALTIME:
  438                 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
  439                 break;
  440         case PRI_TIMESHARE:
  441                 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
  442                 break;
  443         case PRI_IDLE:
  444                 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
  445                 break;
  446         default:
  447                 break;
  448         }
  449         rtp->type = kg->kg_pri_class;
  450 }
  451 
  452 #if defined(COMPAT_43)
  453 #ifndef _SYS_SYSPROTO_H_
  454 struct osetrlimit_args {
  455         u_int   which;
  456         struct  orlimit *rlp;
  457 };
  458 #endif
  459 /*
  460  * MPSAFE
  461  */
  462 int
  463 osetrlimit(td, uap)
  464         struct thread *td;
  465         register struct osetrlimit_args *uap;
  466 {
  467         struct orlimit olim;
  468         struct rlimit lim;
  469         int error;
  470 
  471         if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
  472                 return (error);
  473         lim.rlim_cur = olim.rlim_cur;
  474         lim.rlim_max = olim.rlim_max;
  475         error = kern_setrlimit(td, uap->which, &lim);
  476         return (error);
  477 }
  478 
  479 #ifndef _SYS_SYSPROTO_H_
  480 struct ogetrlimit_args {
  481         u_int   which;
  482         struct  orlimit *rlp;
  483 };
  484 #endif
  485 /*
  486  * MPSAFE
  487  */
  488 int
  489 ogetrlimit(td, uap)
  490         struct thread *td;
  491         register struct ogetrlimit_args *uap;
  492 {
  493         struct orlimit olim;
  494         struct rlimit rl;
  495         struct proc *p;
  496         int error;
  497 
  498         if (uap->which >= RLIM_NLIMITS)
  499                 return (EINVAL);
  500         p = td->td_proc;
  501         PROC_LOCK(p);
  502         lim_rlimit(p, uap->which, &rl);
  503         PROC_UNLOCK(p);
  504 
  505         /*
  506          * XXX would be more correct to convert only RLIM_INFINITY to the
  507          * old RLIM_INFINITY and fail with EOVERFLOW for other larger
  508          * values.  Most 64->32 and 32->16 conversions, including not
  509          * unimportant ones of uids are even more broken than what we
  510          * do here (they blindly truncate).  We don't do this correctly
  511          * here since we have little experience with EOVERFLOW yet.
  512          * Elsewhere, getuid() can't fail...
  513          */
  514         olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
  515         olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
  516         error = copyout(&olim, uap->rlp, sizeof(olim));
  517         return (error);
  518 }
  519 #endif /* COMPAT_43 */
  520 
  521 #ifndef _SYS_SYSPROTO_H_
  522 struct __setrlimit_args {
  523         u_int   which;
  524         struct  rlimit *rlp;
  525 };
  526 #endif
  527 /*
  528  * MPSAFE
  529  */
  530 int
  531 setrlimit(td, uap)
  532         struct thread *td;
  533         register struct __setrlimit_args *uap;
  534 {
  535         struct rlimit alim;
  536         int error;
  537 
  538         if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
  539                 return (error);
  540         error = kern_setrlimit(td, uap->which, &alim);
  541         return (error);
  542 }
  543 
  544 int
  545 kern_setrlimit(td, which, limp)
  546         struct thread *td;
  547         u_int which;
  548         struct rlimit *limp;
  549 {
  550         struct plimit *newlim, *oldlim;
  551         struct proc *p;
  552         register struct rlimit *alimp;
  553         rlim_t oldssiz;
  554         int error;
  555 
  556         if (which >= RLIM_NLIMITS)
  557                 return (EINVAL);
  558 
  559         /*
  560          * Preserve historical bugs by treating negative limits as unsigned.
  561          */
  562         if (limp->rlim_cur < 0)
  563                 limp->rlim_cur = RLIM_INFINITY;
  564         if (limp->rlim_max < 0)
  565                 limp->rlim_max = RLIM_INFINITY;
  566 
  567         oldssiz = 0;
  568         p = td->td_proc;
  569         newlim = lim_alloc();
  570         PROC_LOCK(p);
  571         oldlim = p->p_limit;
  572         alimp = &oldlim->pl_rlimit[which];
  573         if (limp->rlim_cur > alimp->rlim_max ||
  574             limp->rlim_max > alimp->rlim_max)
  575                 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL))) {
  576                         PROC_UNLOCK(p);
  577                         lim_free(newlim);
  578                         return (error);
  579                 }
  580         if (limp->rlim_cur > limp->rlim_max)
  581                 limp->rlim_cur = limp->rlim_max;
  582         lim_copy(newlim, oldlim);
  583         alimp = &newlim->pl_rlimit[which];
  584 
  585         switch (which) {
  586 
  587         case RLIMIT_CPU:
  588                 mtx_lock_spin(&sched_lock);
  589                 p->p_cpulimit = limp->rlim_cur;
  590                 mtx_unlock_spin(&sched_lock);
  591                 break;
  592         case RLIMIT_DATA:
  593                 if (limp->rlim_cur > maxdsiz)
  594                         limp->rlim_cur = maxdsiz;
  595                 if (limp->rlim_max > maxdsiz)
  596                         limp->rlim_max = maxdsiz;
  597                 break;
  598 
  599         case RLIMIT_STACK:
  600                 if (limp->rlim_cur > maxssiz)
  601                         limp->rlim_cur = maxssiz;
  602                 if (limp->rlim_max > maxssiz)
  603                         limp->rlim_max = maxssiz;
  604                 oldssiz = alimp->rlim_cur;
  605                 break;
  606 
  607         case RLIMIT_NOFILE:
  608                 if (limp->rlim_cur > maxfilesperproc)
  609                         limp->rlim_cur = maxfilesperproc;
  610                 if (limp->rlim_max > maxfilesperproc)
  611                         limp->rlim_max = maxfilesperproc;
  612                 break;
  613 
  614         case RLIMIT_NPROC:
  615                 if (limp->rlim_cur > maxprocperuid)
  616                         limp->rlim_cur = maxprocperuid;
  617                 if (limp->rlim_max > maxprocperuid)
  618                         limp->rlim_max = maxprocperuid;
  619                 if (limp->rlim_cur < 1)
  620                         limp->rlim_cur = 1;
  621                 if (limp->rlim_max < 1)
  622                         limp->rlim_max = 1;
  623                 break;
  624         }
  625         *alimp = *limp;
  626         p->p_limit = newlim;
  627         PROC_UNLOCK(p);
  628         lim_free(oldlim);
  629 
  630         if (which == RLIMIT_STACK) {
  631                 /*
  632                  * Stack is allocated to the max at exec time with only
  633                  * "rlim_cur" bytes accessible.  If stack limit is going
  634                  * up make more accessible, if going down make inaccessible.
  635                  */
  636                 if (limp->rlim_cur != oldssiz) {
  637                         vm_offset_t addr;
  638                         vm_size_t size;
  639                         vm_prot_t prot;
  640 
  641                         if (limp->rlim_cur > oldssiz) {
  642                                 prot = p->p_sysent->sv_stackprot;
  643                                 size = limp->rlim_cur - oldssiz;
  644                                 addr = p->p_sysent->sv_usrstack -
  645                                     limp->rlim_cur;
  646                         } else {
  647                                 prot = VM_PROT_NONE;
  648                                 size = oldssiz - limp->rlim_cur;
  649                                 addr = p->p_sysent->sv_usrstack - oldssiz;
  650                         }
  651                         addr = trunc_page(addr);
  652                         size = round_page(size);
  653                         (void)vm_map_protect(&p->p_vmspace->vm_map,
  654                             addr, addr + size, prot, FALSE);
  655                 }
  656         }
  657         return (0);
  658 }
  659 
  660 #ifndef _SYS_SYSPROTO_H_
  661 struct __getrlimit_args {
  662         u_int   which;
  663         struct  rlimit *rlp;
  664 };
  665 #endif
  666 /*
  667  * MPSAFE
  668  */
  669 /* ARGSUSED */
  670 int
  671 getrlimit(td, uap)
  672         struct thread *td;
  673         register struct __getrlimit_args *uap;
  674 {
  675         struct rlimit rlim;
  676         struct proc *p;
  677         int error;
  678 
  679         if (uap->which >= RLIM_NLIMITS)
  680                 return (EINVAL);
  681         p = td->td_proc;
  682         PROC_LOCK(p);
  683         lim_rlimit(p, uap->which, &rlim);
  684         PROC_UNLOCK(p);
  685         error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
  686         return (error);
  687 }
  688 
  689 /*
  690  * Transform the running time and tick information in proc p into user,
  691  * system, and interrupt time usage.
  692  */
  693 void
  694 calcru(p, up, sp)
  695         struct proc *p;
  696         struct timeval *up;
  697         struct timeval *sp;
  698 {
  699         struct bintime bt;
  700         struct rusage_ext rux;
  701         struct thread *td;
  702         int bt_valid;
  703 
  704         PROC_LOCK_ASSERT(p, MA_OWNED);
  705         mtx_assert(&sched_lock, MA_NOTOWNED);
  706         bt_valid = 0;
  707         mtx_lock_spin(&sched_lock);
  708         rux = p->p_rux;
  709         FOREACH_THREAD_IN_PROC(p, td) {
  710                 if (TD_IS_RUNNING(td)) {
  711                         /*
  712                          * Adjust for the current time slice.  This is
  713                          * actually fairly important since the error here is
  714                          * on the order of a time quantum which is much
  715                          * greater than the precision of binuptime().
  716                          */
  717                         KASSERT(td->td_oncpu != NOCPU,
  718                             ("%s: running thread has no CPU", __func__));
  719                         if (!bt_valid) {
  720                                 binuptime(&bt);
  721                                 bt_valid = 1;
  722                         }
  723                         bintime_add(&rux.rux_runtime, &bt);
  724                         bintime_sub(&rux.rux_runtime,
  725                             &pcpu_find(td->td_oncpu)->pc_switchtime);
  726                 }
  727         }
  728         mtx_unlock_spin(&sched_lock);
  729         calcru1(p, &rux, up, sp);
  730         p->p_rux.rux_uu = rux.rux_uu;
  731         p->p_rux.rux_su = rux.rux_su;
  732         p->p_rux.rux_iu = rux.rux_iu;
  733 }
  734 
  735 void
  736 calccru(p, up, sp)
  737         struct proc *p;
  738         struct timeval *up;
  739         struct timeval *sp;
  740 {
  741 
  742         PROC_LOCK_ASSERT(p, MA_OWNED);
  743         calcru1(p, &p->p_crux, up, sp);
  744 }
  745 
  746 static void
  747 calcru1(p, ruxp, up, sp)
  748         struct proc *p;
  749         struct rusage_ext *ruxp;
  750         struct timeval *up;
  751         struct timeval *sp;
  752 {
  753         struct timeval tv;
  754         /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
  755         u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
  756 
  757         ut = ruxp->rux_uticks;
  758         st = ruxp->rux_sticks;
  759         it = ruxp->rux_iticks;
  760         tt = ut + st + it;
  761         if (tt == 0) {
  762                 st = 1;
  763                 tt = 1;
  764         }
  765         bintime2timeval(&ruxp->rux_runtime, &tv);
  766         tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
  767         ptu = ruxp->rux_uu + ruxp->rux_su + ruxp->rux_iu;
  768         if (tu < ptu) {
  769                 printf(
  770 "calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n",
  771                     (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm);
  772                 tu = ptu;
  773         }
  774         if ((int64_t)tu < 0) {
  775                 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
  776                     (intmax_t)tu, p->p_pid, p->p_comm);
  777                 tu = ptu;
  778         }
  779 
  780         /* Subdivide tu. */
  781         uu = (tu * ut) / tt;
  782         su = (tu * st) / tt;
  783         iu = tu - uu - su;
  784 
  785         /* Enforce monotonicity. */
  786         if (uu < ruxp->rux_uu || su < ruxp->rux_su || iu < ruxp->rux_iu) {
  787                 if (uu < ruxp->rux_uu)
  788                         uu = ruxp->rux_uu;
  789                 else if (uu + ruxp->rux_su + ruxp->rux_iu > tu)
  790                         uu = tu - ruxp->rux_su - ruxp->rux_iu;
  791                 if (st == 0)
  792                         su = ruxp->rux_su;
  793                 else {
  794                         su = ((tu - uu) * st) / (st + it);
  795                         if (su < ruxp->rux_su)
  796                                 su = ruxp->rux_su;
  797                         else if (uu + su + ruxp->rux_iu > tu)
  798                                 su = tu - uu - ruxp->rux_iu;
  799                 }
  800                 KASSERT(uu + su + ruxp->rux_iu <= tu,
  801                     ("calcru: monotonisation botch 1"));
  802                 iu = tu - uu - su;
  803                 KASSERT(iu >= ruxp->rux_iu,
  804                     ("calcru: monotonisation botch 2"));
  805         }
  806         ruxp->rux_uu = uu;
  807         ruxp->rux_su = su;
  808         ruxp->rux_iu = iu;
  809 
  810         up->tv_sec = uu / 1000000;
  811         up->tv_usec = uu % 1000000;
  812         sp->tv_sec = su / 1000000;
  813         sp->tv_usec = su % 1000000;
  814 }
  815 
  816 #ifndef _SYS_SYSPROTO_H_
  817 struct getrusage_args {
  818         int     who;
  819         struct  rusage *rusage;
  820 };
  821 #endif
  822 /*
  823  * MPSAFE
  824  */
  825 int
  826 getrusage(td, uap)
  827         register struct thread *td;
  828         register struct getrusage_args *uap;
  829 {
  830         struct rusage ru;
  831         int error;
  832 
  833         error = kern_getrusage(td, uap->who, &ru);
  834         if (error == 0)
  835                 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
  836         return (error);
  837 }
  838 
  839 int
  840 kern_getrusage(td, who, rup)
  841         struct thread *td;
  842         int who;
  843         struct rusage *rup;
  844 {
  845         struct proc *p;
  846 
  847         p = td->td_proc;
  848         PROC_LOCK(p);
  849         switch (who) {
  850 
  851         case RUSAGE_SELF:
  852                 *rup = p->p_stats->p_ru;
  853                 calcru(p, &rup->ru_utime, &rup->ru_stime);
  854                 break;
  855 
  856         case RUSAGE_CHILDREN:
  857                 *rup = p->p_stats->p_cru;
  858                 calccru(p, &rup->ru_utime, &rup->ru_stime);
  859                 break;
  860 
  861         default:
  862                 PROC_UNLOCK(p);
  863                 return (EINVAL);
  864         }
  865         PROC_UNLOCK(p);
  866         return (0);
  867 }
  868 
  869 void
  870 ruadd(ru, rux, ru2, rux2)
  871         struct rusage *ru;
  872         struct rusage_ext *rux;
  873         struct rusage *ru2;
  874         struct rusage_ext *rux2;
  875 {
  876         register long *ip, *ip2;
  877         register int i;
  878 
  879         bintime_add(&rux->rux_runtime, &rux2->rux_runtime);
  880         rux->rux_uticks += rux2->rux_uticks;
  881         rux->rux_sticks += rux2->rux_sticks;
  882         rux->rux_iticks += rux2->rux_iticks;
  883         rux->rux_uu += rux2->rux_uu;
  884         rux->rux_su += rux2->rux_su;
  885         rux->rux_iu += rux2->rux_iu;
  886         if (ru->ru_maxrss < ru2->ru_maxrss)
  887                 ru->ru_maxrss = ru2->ru_maxrss;
  888         ip = &ru->ru_first;
  889         ip2 = &ru2->ru_first;
  890         for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
  891                 *ip++ += *ip2++;
  892 }
  893 
  894 /*
  895  * Allocate a new resource limits structure and initialize its
  896  * reference count and mutex pointer.
  897  */
  898 struct plimit *
  899 lim_alloc()
  900 {
  901         struct plimit *limp;
  902 
  903         limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
  904         limp->pl_refcnt = 1;
  905         limp->pl_mtx = mtx_pool_alloc(mtxpool_sleep);
  906         return (limp);
  907 }
  908 
  909 struct plimit *
  910 lim_hold(limp)
  911         struct plimit *limp;
  912 {
  913 
  914         LIM_LOCK(limp);
  915         limp->pl_refcnt++;
  916         LIM_UNLOCK(limp);
  917         return (limp);
  918 }
  919 
  920 void
  921 lim_free(limp)
  922         struct plimit *limp;
  923 {
  924 
  925         LIM_LOCK(limp);
  926         KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
  927         if (--limp->pl_refcnt == 0) {
  928                 LIM_UNLOCK(limp);
  929                 free((void *)limp, M_PLIMIT);
  930                 return;
  931         }
  932         LIM_UNLOCK(limp);
  933 }
  934 
  935 /*
  936  * Make a copy of the plimit structure.
  937  * We share these structures copy-on-write after fork.
  938  */
  939 void
  940 lim_copy(dst, src)
  941         struct plimit *dst, *src;
  942 {
  943 
  944         KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
  945         bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
  946 }
  947 
  948 /*
  949  * Return the hard limit for a particular system resource.  The
  950  * which parameter specifies the index into the rlimit array.
  951  */
  952 rlim_t
  953 lim_max(struct proc *p, int which)
  954 {
  955         struct rlimit rl;
  956 
  957         lim_rlimit(p, which, &rl);
  958         return (rl.rlim_max);
  959 }
  960 
  961 /*
  962  * Return the current (soft) limit for a particular system resource.
  963  * The which parameter which specifies the index into the rlimit array
  964  */
  965 rlim_t
  966 lim_cur(struct proc *p, int which)
  967 {
  968         struct rlimit rl;
  969 
  970         lim_rlimit(p, which, &rl);
  971         return (rl.rlim_cur);
  972 }
  973 
  974 /*
  975  * Return a copy of the entire rlimit structure for the system limit
  976  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
  977  */
  978 void
  979 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
  980 {
  981 
  982         PROC_LOCK_ASSERT(p, MA_OWNED);
  983         KASSERT(which >= 0 && which < RLIM_NLIMITS,
  984             ("request for invalid resource limit"));
  985         *rlp = p->p_limit->pl_rlimit[which];
  986 }
  987 
  988 /*
  989  * Find the uidinfo structure for a uid.  This structure is used to
  990  * track the total resource consumption (process count, socket buffer
  991  * size, etc.) for the uid and impose limits.
  992  */
  993 void
  994 uihashinit()
  995 {
  996 
  997         uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
  998         mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
  999 }
 1000 
 1001 /*
 1002  * Look up a uidinfo struct for the parameter uid.
 1003  * uihashtbl_mtx must be locked.
 1004  */
 1005 static struct uidinfo *
 1006 uilookup(uid)
 1007         uid_t uid;
 1008 {
 1009         struct uihashhead *uipp;
 1010         struct uidinfo *uip;
 1011 
 1012         mtx_assert(&uihashtbl_mtx, MA_OWNED);
 1013         uipp = UIHASH(uid);
 1014         LIST_FOREACH(uip, uipp, ui_hash)
 1015                 if (uip->ui_uid == uid)
 1016                         break;
 1017 
 1018         return (uip);
 1019 }
 1020 
 1021 /*
 1022  * Find or allocate a struct uidinfo for a particular uid.
 1023  * Increase refcount on uidinfo struct returned.
 1024  * uifree() should be called on a struct uidinfo when released.
 1025  */
 1026 struct uidinfo *
 1027 uifind(uid)
 1028         uid_t uid;
 1029 {
 1030         struct uidinfo *old_uip, *uip;
 1031 
 1032         mtx_lock(&uihashtbl_mtx);
 1033         uip = uilookup(uid);
 1034         if (uip == NULL) {
 1035                 mtx_unlock(&uihashtbl_mtx);
 1036                 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
 1037                 mtx_lock(&uihashtbl_mtx);
 1038                 /*
 1039                  * There's a chance someone created our uidinfo while we
 1040                  * were in malloc and not holding the lock, so we have to
 1041                  * make sure we don't insert a duplicate uidinfo.
 1042                  */
 1043                 if ((old_uip = uilookup(uid)) != NULL) {
 1044                         /* Someone else beat us to it. */
 1045                         free(uip, M_UIDINFO);
 1046                         uip = old_uip;
 1047                 } else {
 1048                         uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
 1049                         uip->ui_uid = uid;
 1050                         LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
 1051                 }
 1052         }
 1053         uihold(uip);
 1054         mtx_unlock(&uihashtbl_mtx);
 1055         return (uip);
 1056 }
 1057 
 1058 /*
 1059  * Place another refcount on a uidinfo struct.
 1060  */
 1061 void
 1062 uihold(uip)
 1063         struct uidinfo *uip;
 1064 {
 1065 
 1066         UIDINFO_LOCK(uip);
 1067         uip->ui_ref++;
 1068         UIDINFO_UNLOCK(uip);
 1069 }
 1070 
 1071 /*-
 1072  * Since uidinfo structs have a long lifetime, we use an
 1073  * opportunistic refcounting scheme to avoid locking the lookup hash
 1074  * for each release.
 1075  *
 1076  * If the refcount hits 0, we need to free the structure,
 1077  * which means we need to lock the hash.
 1078  * Optimal case:
 1079  *   After locking the struct and lowering the refcount, if we find
 1080  *   that we don't need to free, simply unlock and return.
 1081  * Suboptimal case:
 1082  *   If refcount lowering results in need to free, bump the count
 1083  *   back up, loose the lock and aquire the locks in the proper
 1084  *   order to try again.
 1085  */
 1086 void
 1087 uifree(uip)
 1088         struct uidinfo *uip;
 1089 {
 1090 
 1091         /* Prepare for optimal case. */
 1092         UIDINFO_LOCK(uip);
 1093 
 1094         if (--uip->ui_ref != 0) {
 1095                 UIDINFO_UNLOCK(uip);
 1096                 return;
 1097         }
 1098 
 1099         /* Prepare for suboptimal case. */
 1100         uip->ui_ref++;
 1101         UIDINFO_UNLOCK(uip);
 1102         mtx_lock(&uihashtbl_mtx);
 1103         UIDINFO_LOCK(uip);
 1104 
 1105         /*
 1106          * We must subtract one from the count again because we backed out
 1107          * our initial subtraction before dropping the lock.
 1108          * Since another thread may have added a reference after we dropped the
 1109          * initial lock we have to test for zero again.
 1110          */
 1111         if (--uip->ui_ref == 0) {
 1112                 LIST_REMOVE(uip, ui_hash);
 1113                 mtx_unlock(&uihashtbl_mtx);
 1114                 if (uip->ui_sbsize != 0)
 1115                         printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
 1116                             uip->ui_uid, (intmax_t)uip->ui_sbsize);
 1117                 if (uip->ui_proccnt != 0)
 1118                         printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
 1119                             uip->ui_uid, uip->ui_proccnt);
 1120                 UIDINFO_UNLOCK(uip);
 1121                 FREE(uip, M_UIDINFO);
 1122                 return;
 1123         }
 1124 
 1125         mtx_unlock(&uihashtbl_mtx);
 1126         UIDINFO_UNLOCK(uip);
 1127 }
 1128 
 1129 /*
 1130  * Change the count associated with number of processes
 1131  * a given user is using.  When 'max' is 0, don't enforce a limit
 1132  */
 1133 int
 1134 chgproccnt(uip, diff, max)
 1135         struct  uidinfo *uip;
 1136         int     diff;
 1137         int     max;
 1138 {
 1139 
 1140         UIDINFO_LOCK(uip);
 1141         /* Don't allow them to exceed max, but allow subtraction. */
 1142         if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
 1143                 UIDINFO_UNLOCK(uip);
 1144                 return (0);
 1145         }
 1146         uip->ui_proccnt += diff;
 1147         if (uip->ui_proccnt < 0)
 1148                 printf("negative proccnt for uid = %d\n", uip->ui_uid);
 1149         UIDINFO_UNLOCK(uip);
 1150         return (1);
 1151 }
 1152 
 1153 /*
 1154  * Change the total socket buffer size a user has used.
 1155  */
 1156 int
 1157 chgsbsize(uip, hiwat, to, max)
 1158         struct  uidinfo *uip;
 1159         u_int  *hiwat;
 1160         u_int   to;
 1161         rlim_t  max;
 1162 {
 1163         rlim_t new;
 1164 
 1165         UIDINFO_LOCK(uip);
 1166         new = uip->ui_sbsize + to - *hiwat;
 1167         /* Don't allow them to exceed max, but allow subtraction. */
 1168         if (to > *hiwat && new > max) {
 1169                 UIDINFO_UNLOCK(uip);
 1170                 return (0);
 1171         }
 1172         uip->ui_sbsize = new;
 1173         UIDINFO_UNLOCK(uip);
 1174         *hiwat = to;
 1175         if (new < 0)
 1176                 printf("negative sbsize for uid = %d\n", uip->ui_uid);
 1177         return (1);
 1178 }

Cache object: 279f3bf4280e883b6b55a1237f631fa9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.