The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_resource.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)kern_resource.c     8.5 (Berkeley) 1/21/94
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD$");
   39 
   40 #include "opt_compat.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/sysproto.h>
   45 #include <sys/file.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sched.h>
   53 #include <sys/sx.h>
   54 #include <sys/syscallsubr.h>
   55 #include <sys/sysent.h>
   56 #include <sys/time.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/vm_param.h>
   60 #include <vm/pmap.h>
   61 #include <vm/vm_map.h>
   62 
   63 static int donice(struct thread *td, struct proc *chgp, int n);
   64 
   65 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
   66 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
   67 #define UIHASH(uid)     (&uihashtbl[(uid) & uihash])
   68 static struct mtx uihashtbl_mtx;
   69 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
   70 static u_long uihash;           /* size of hash table - 1 */
   71 
   72 static struct uidinfo   *uilookup(uid_t uid);
   73 
   74 /*
   75  * Resource controls and accounting.
   76  */
   77 
   78 #ifndef _SYS_SYSPROTO_H_
   79 struct getpriority_args {
   80         int     which;
   81         int     who;
   82 };
   83 #endif
   84 /*
   85  * MPSAFE
   86  */
   87 int
   88 getpriority(td, uap)
   89         struct thread *td;
   90         register struct getpriority_args *uap;
   91 {
   92         struct proc *p;
   93         int error, low;
   94 
   95         error = 0;
   96         low = PRIO_MAX + 1;
   97         switch (uap->which) {
   98 
   99         case PRIO_PROCESS:
  100                 if (uap->who == 0)
  101                         low = td->td_proc->p_nice;
  102                 else {
  103                         p = pfind(uap->who);
  104                         if (p == NULL)
  105                                 break;
  106                         if (p_cansee(td, p) == 0) {
  107                                 low = p->p_nice;
  108                         }
  109                         PROC_UNLOCK(p);
  110                 }
  111                 break;
  112 
  113         case PRIO_PGRP: {
  114                 register struct pgrp *pg;
  115 
  116                 sx_slock(&proctree_lock);
  117                 if (uap->who == 0) {
  118                         pg = td->td_proc->p_pgrp;
  119                         PGRP_LOCK(pg);
  120                 } else {
  121                         pg = pgfind(uap->who);
  122                         if (pg == NULL) {
  123                                 sx_sunlock(&proctree_lock);
  124                                 break;
  125                         }
  126                 }
  127                 sx_sunlock(&proctree_lock);
  128                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  129                         PROC_LOCK(p);
  130                         if (!p_cansee(td, p)) {
  131                                 if (p->p_nice < low)
  132                                         low = p->p_nice;
  133                         }
  134                         PROC_UNLOCK(p);
  135                 }
  136                 PGRP_UNLOCK(pg);
  137                 break;
  138         }
  139 
  140         case PRIO_USER:
  141                 if (uap->who == 0)
  142                         uap->who = td->td_ucred->cr_uid;
  143                 sx_slock(&allproc_lock);
  144                 LIST_FOREACH(p, &allproc, p_list) {
  145                         PROC_LOCK(p);
  146                         if (!p_cansee(td, p) &&
  147                             p->p_ucred->cr_uid == uap->who) {
  148                                 if (p->p_nice < low)
  149                                         low = p->p_nice;
  150                         }
  151                         PROC_UNLOCK(p);
  152                 }
  153                 sx_sunlock(&allproc_lock);
  154                 break;
  155 
  156         default:
  157                 error = EINVAL;
  158                 break;
  159         }
  160         if (low == PRIO_MAX + 1 && error == 0)
  161                 error = ESRCH;
  162         td->td_retval[0] = low;
  163         return (error);
  164 }
  165 
  166 #ifndef _SYS_SYSPROTO_H_
  167 struct setpriority_args {
  168         int     which;
  169         int     who;
  170         int     prio;
  171 };
  172 #endif
  173 /*
  174  * MPSAFE
  175  */
  176 int
  177 setpriority(td, uap)
  178         struct thread *td;
  179         register struct setpriority_args *uap;
  180 {
  181         struct proc *curp;
  182         register struct proc *p;
  183         int found = 0, error = 0;
  184 
  185         curp = td->td_proc;
  186         switch (uap->which) {
  187         case PRIO_PROCESS:
  188                 if (uap->who == 0) {
  189                         PROC_LOCK(curp);
  190                         error = donice(td, curp, uap->prio);
  191                         PROC_UNLOCK(curp);
  192                 } else {
  193                         p = pfind(uap->who);
  194                         if (p == 0)
  195                                 break;
  196                         if (p_cansee(td, p) == 0)
  197                                 error = donice(td, p, uap->prio);
  198                         PROC_UNLOCK(p);
  199                 }
  200                 found++;
  201                 break;
  202 
  203         case PRIO_PGRP: {
  204                 register struct pgrp *pg;
  205 
  206                 sx_slock(&proctree_lock);
  207                 if (uap->who == 0) {
  208                         pg = curp->p_pgrp;
  209                         PGRP_LOCK(pg);
  210                 } else {
  211                         pg = pgfind(uap->who);
  212                         if (pg == NULL) {
  213                                 sx_sunlock(&proctree_lock);
  214                                 break;
  215                         }
  216                 }
  217                 sx_sunlock(&proctree_lock);
  218                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  219                         PROC_LOCK(p);
  220                         if (!p_cansee(td, p)) {
  221                                 error = donice(td, p, uap->prio);
  222                                 found++;
  223                         }
  224                         PROC_UNLOCK(p);
  225                 }
  226                 PGRP_UNLOCK(pg);
  227                 break;
  228         }
  229 
  230         case PRIO_USER:
  231                 if (uap->who == 0)
  232                         uap->who = td->td_ucred->cr_uid;
  233                 sx_slock(&allproc_lock);
  234                 FOREACH_PROC_IN_SYSTEM(p) {
  235                         /* Do not bother to check PRS_NEW processes */
  236                         if (p->p_state == PRS_NEW)
  237                                 continue;
  238                         PROC_LOCK(p);
  239                         if (p->p_ucred->cr_uid == uap->who &&
  240                             !p_cansee(td, p)) {
  241                                 error = donice(td, p, uap->prio);
  242                                 found++;
  243                         }
  244                         PROC_UNLOCK(p);
  245                 }
  246                 sx_sunlock(&allproc_lock);
  247                 break;
  248 
  249         default:
  250                 error = EINVAL;
  251                 break;
  252         }
  253         if (found == 0 && error == 0)
  254                 error = ESRCH;
  255         return (error);
  256 }
  257 
  258 /* 
  259  * Set "nice" for a (whole) process.
  260  */
  261 static int
  262 donice(struct thread *td, struct proc *p, int n)
  263 {
  264         int error;
  265 
  266         PROC_LOCK_ASSERT(p, MA_OWNED);
  267         if ((error = p_cansched(td, p)))
  268                 return (error);
  269         if (n > PRIO_MAX)
  270                 n = PRIO_MAX;
  271         if (n < PRIO_MIN)
  272                 n = PRIO_MIN;
  273         if (n <  p->p_nice && suser(td) != 0)
  274                 return (EACCES);
  275         mtx_lock_spin(&sched_lock);
  276         sched_nice(p, n);
  277         mtx_unlock_spin(&sched_lock);
  278         return (0);
  279 }
  280 
  281 /*
  282  * Set realtime priority
  283  *
  284  * MPSAFE
  285  */
  286 #ifndef _SYS_SYSPROTO_H_
  287 struct rtprio_args {
  288         int             function;
  289         pid_t           pid;
  290         struct rtprio   *rtp;
  291 };
  292 #endif
  293 
  294 int
  295 rtprio(td, uap)
  296         struct thread *td;              /* curthread */
  297         register struct rtprio_args *uap;
  298 {
  299         struct proc *curp;
  300         struct proc *p;
  301         struct ksegrp *kg;
  302         struct rtprio rtp;
  303         int cierror, error;
  304 
  305         /* Perform copyin before acquiring locks if needed. */
  306         if (uap->function == RTP_SET)
  307                 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
  308         else
  309                 cierror = 0;
  310 
  311         curp = td->td_proc;
  312         if (uap->pid == 0) {
  313                 p = curp;
  314                 PROC_LOCK(p);
  315         } else {
  316                 p = pfind(uap->pid);
  317                 if (p == NULL)
  318                         return (ESRCH);
  319         }
  320 
  321         switch (uap->function) {
  322         case RTP_LOOKUP:
  323                 if ((error = p_cansee(td, p)))
  324                         break;
  325                 mtx_lock_spin(&sched_lock);
  326                 /*
  327                  * Return OUR priority if no pid specified,
  328                  * or if one is, report the highest priority
  329                  * in the process. There isn't much more you can do as 
  330                  * there is only room to return a single priority.
  331                  * XXXKSE  Maybe need a new interface to report 
  332                  * priorities of multiple system scope threads.
  333                  * Note: specifying our own pid is not the same
  334                  * as leaving it zero.
  335                  */
  336                 if (uap->pid == 0) {
  337                         pri_to_rtp(td->td_ksegrp, &rtp);
  338                 } else {
  339                         struct rtprio rtp2;
  340 
  341                         rtp.type = RTP_PRIO_IDLE;
  342                         rtp.prio = RTP_PRIO_MAX;
  343                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  344                                 pri_to_rtp(kg, &rtp2);
  345                                 if ((rtp2.type <  rtp.type) ||
  346                                     ((rtp2.type == rtp.type) &&
  347                                      (rtp2.prio < rtp.prio))) {
  348                                         rtp.type = rtp2.type;
  349                                         rtp.prio = rtp2.prio;
  350                                 }
  351                         }
  352                 }
  353                 mtx_unlock_spin(&sched_lock);
  354                 PROC_UNLOCK(p);
  355                 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
  356         case RTP_SET:
  357                 if ((error = p_cansched(td, p)) || (error = cierror))
  358                         break;
  359                 /* disallow setting rtprio in most cases if not superuser */
  360                 if (suser(td) != 0) {
  361                         /* can't set someone else's */
  362                         if (uap->pid) {
  363                                 error = EPERM;
  364                                 break;
  365                         }
  366                         /* can't set realtime priority */
  367 /*
  368  * Realtime priority has to be restricted for reasons which should be
  369  * obvious. However, for idle priority, there is a potential for
  370  * system deadlock if an idleprio process gains a lock on a resource
  371  * that other processes need (and the idleprio process can't run
  372  * due to a CPU-bound normal process). Fix me! XXX
  373  */
  374 #if 0
  375                         if (RTP_PRIO_IS_REALTIME(rtp.type))
  376 #endif
  377                         if (rtp.type != RTP_PRIO_NORMAL) {
  378                                 error = EPERM;
  379                                 break;
  380                         }
  381                 }
  382                 mtx_lock_spin(&sched_lock);
  383                 /*
  384                  * If we are setting our own priority, set just our
  385                  * KSEGRP but if we are doing another process,
  386                  * do all the groups on that process. If we
  387                  * specify our own pid we do the latter.
  388                  */
  389                 if (uap->pid == 0) {
  390                         error = rtp_to_pri(&rtp, td->td_ksegrp);
  391                 } else {
  392                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  393                                 if ((error = rtp_to_pri(&rtp, kg)) != 0) {
  394                                         break;
  395                                 }
  396                         }
  397                 }
  398                 mtx_unlock_spin(&sched_lock);
  399                 break;
  400         default:
  401                 error = EINVAL;
  402                 break;
  403         }
  404         PROC_UNLOCK(p);
  405         return (error);
  406 }
  407 
  408 int
  409 rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
  410 {
  411 
  412         mtx_assert(&sched_lock, MA_OWNED);
  413         if (rtp->prio > RTP_PRIO_MAX)
  414                 return (EINVAL);
  415         switch (RTP_PRIO_BASE(rtp->type)) {
  416         case RTP_PRIO_REALTIME:
  417                 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
  418                 break;
  419         case RTP_PRIO_NORMAL:
  420                 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
  421                 break;
  422         case RTP_PRIO_IDLE:
  423                 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
  424                 break;
  425         default:
  426                 return (EINVAL);
  427         }
  428         sched_class(kg, rtp->type);
  429         if (curthread->td_ksegrp == kg) {
  430                 curthread->td_base_pri = kg->kg_user_pri;
  431                 sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
  432         }
  433         return (0);
  434 }
  435 
  436 void
  437 pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
  438 {
  439 
  440         mtx_assert(&sched_lock, MA_OWNED);
  441         switch (PRI_BASE(kg->kg_pri_class)) {
  442         case PRI_REALTIME:
  443                 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
  444                 break;
  445         case PRI_TIMESHARE:
  446                 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
  447                 break;
  448         case PRI_IDLE:
  449                 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
  450                 break;
  451         default:
  452                 break;
  453         }
  454         rtp->type = kg->kg_pri_class;
  455 }
  456 
  457 #if defined(COMPAT_43)
  458 #ifndef _SYS_SYSPROTO_H_
  459 struct osetrlimit_args {
  460         u_int   which;
  461         struct  orlimit *rlp;
  462 };
  463 #endif
  464 /*
  465  * MPSAFE
  466  */
  467 int
  468 osetrlimit(td, uap)
  469         struct thread *td;
  470         register struct osetrlimit_args *uap;
  471 {
  472         struct orlimit olim;
  473         struct rlimit lim;
  474         int error;
  475 
  476         if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
  477                 return (error);
  478         lim.rlim_cur = olim.rlim_cur;
  479         lim.rlim_max = olim.rlim_max;
  480         error = kern_setrlimit(td, uap->which, &lim);
  481         return (error);
  482 }
  483 
  484 #ifndef _SYS_SYSPROTO_H_
  485 struct ogetrlimit_args {
  486         u_int   which;
  487         struct  orlimit *rlp;
  488 };
  489 #endif
  490 /*
  491  * MPSAFE
  492  */
  493 int
  494 ogetrlimit(td, uap)
  495         struct thread *td;
  496         register struct ogetrlimit_args *uap;
  497 {
  498         struct orlimit olim;
  499         struct rlimit rl;
  500         struct proc *p;
  501         int error;
  502 
  503         if (uap->which >= RLIM_NLIMITS)
  504                 return (EINVAL);
  505         p = td->td_proc;
  506         PROC_LOCK(p);
  507         lim_rlimit(p, uap->which, &rl);
  508         PROC_UNLOCK(p);
  509 
  510         /*
  511          * XXX would be more correct to convert only RLIM_INFINITY to the
  512          * old RLIM_INFINITY and fail with EOVERFLOW for other larger
  513          * values.  Most 64->32 and 32->16 conversions, including not
  514          * unimportant ones of uids are even more broken than what we
  515          * do here (they blindly truncate).  We don't do this correctly
  516          * here since we have little experience with EOVERFLOW yet.
  517          * Elsewhere, getuid() can't fail...
  518          */
  519         olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
  520         olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
  521         error = copyout(&olim, uap->rlp, sizeof(olim));
  522         return (error);
  523 }
  524 #endif /* COMPAT_43 */
  525 
  526 #ifndef _SYS_SYSPROTO_H_
  527 struct __setrlimit_args {
  528         u_int   which;
  529         struct  rlimit *rlp;
  530 };
  531 #endif
  532 /*
  533  * MPSAFE
  534  */
  535 int
  536 setrlimit(td, uap)
  537         struct thread *td;
  538         register struct __setrlimit_args *uap;
  539 {
  540         struct rlimit alim;
  541         int error;
  542 
  543         if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
  544                 return (error);
  545         error = kern_setrlimit(td, uap->which, &alim);
  546         return (error);
  547 }
  548 
  549 int
  550 kern_setrlimit(td, which, limp)
  551         struct thread *td;
  552         u_int which;
  553         struct rlimit *limp;
  554 {
  555         struct plimit *newlim, *oldlim;
  556         struct proc *p;
  557         register struct rlimit *alimp;
  558         rlim_t oldssiz;
  559         int error;
  560 
  561         if (which >= RLIM_NLIMITS)
  562                 return (EINVAL);
  563 
  564         /*
  565          * Preserve historical bugs by treating negative limits as unsigned.
  566          */
  567         if (limp->rlim_cur < 0)
  568                 limp->rlim_cur = RLIM_INFINITY;
  569         if (limp->rlim_max < 0)
  570                 limp->rlim_max = RLIM_INFINITY;
  571 
  572         oldssiz = 0;
  573         p = td->td_proc;
  574         newlim = lim_alloc();
  575         PROC_LOCK(p);
  576         oldlim = p->p_limit;
  577         alimp = &oldlim->pl_rlimit[which];
  578         if (limp->rlim_cur > alimp->rlim_max ||
  579             limp->rlim_max > alimp->rlim_max)
  580                 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL))) {
  581                         PROC_UNLOCK(p);
  582                         lim_free(newlim);
  583                         return (error);
  584         }
  585         if (limp->rlim_cur > limp->rlim_max)
  586                 limp->rlim_cur = limp->rlim_max;
  587         lim_copy(newlim, oldlim);
  588         alimp = &newlim->pl_rlimit[which];
  589 
  590         switch (which) {
  591 
  592         case RLIMIT_CPU:
  593                 mtx_lock_spin(&sched_lock);
  594                 p->p_cpulimit = limp->rlim_cur;
  595                 mtx_unlock_spin(&sched_lock);
  596                 break;
  597         case RLIMIT_DATA:
  598                 if (limp->rlim_cur > maxdsiz)
  599                         limp->rlim_cur = maxdsiz;
  600                 if (limp->rlim_max > maxdsiz)
  601                         limp->rlim_max = maxdsiz;
  602                 break;
  603 
  604         case RLIMIT_STACK:
  605                 if (limp->rlim_cur > maxssiz)
  606                         limp->rlim_cur = maxssiz;
  607                 if (limp->rlim_max > maxssiz)
  608                         limp->rlim_max = maxssiz;
  609                 oldssiz = alimp->rlim_cur;
  610                 break;
  611 
  612         case RLIMIT_NOFILE:
  613                 if (limp->rlim_cur > maxfilesperproc)
  614                         limp->rlim_cur = maxfilesperproc;
  615                 if (limp->rlim_max > maxfilesperproc)
  616                         limp->rlim_max = maxfilesperproc;
  617                 break;
  618 
  619         case RLIMIT_NPROC:
  620                 if (limp->rlim_cur > maxprocperuid)
  621                         limp->rlim_cur = maxprocperuid;
  622                 if (limp->rlim_max > maxprocperuid)
  623                         limp->rlim_max = maxprocperuid;
  624                 if (limp->rlim_cur < 1)
  625                         limp->rlim_cur = 1;
  626                 if (limp->rlim_max < 1)
  627                         limp->rlim_max = 1;
  628                 break;
  629         }
  630         *alimp = *limp;
  631         p->p_limit = newlim;
  632         PROC_UNLOCK(p);
  633         lim_free(oldlim);
  634 
  635         if (which == RLIMIT_STACK) {
  636                 /*
  637                  * Stack is allocated to the max at exec time with only
  638                  * "rlim_cur" bytes accessible.  If stack limit is going
  639                  * up make more accessible, if going down make inaccessible.
  640                  */
  641                 if (limp->rlim_cur != oldssiz) {
  642                         vm_offset_t addr;
  643                         vm_size_t size;
  644                         vm_prot_t prot;
  645 
  646                         mtx_lock(&Giant);
  647                         if (limp->rlim_cur > oldssiz) {
  648                                 prot = p->p_sysent->sv_stackprot;
  649                                 size = limp->rlim_cur - oldssiz;
  650                                 addr = p->p_sysent->sv_usrstack -
  651                                     limp->rlim_cur;
  652                         } else {
  653                                 prot = VM_PROT_NONE;
  654                                 size = oldssiz - limp->rlim_cur;
  655                                 addr = p->p_sysent->sv_usrstack -
  656                                     oldssiz;
  657                         }
  658                         addr = trunc_page(addr);
  659                         size = round_page(size);
  660                         (void) vm_map_protect(&p->p_vmspace->vm_map,
  661                                               addr, addr+size, prot, FALSE);
  662                         mtx_unlock(&Giant);
  663                 }
  664         }
  665         return (0);
  666 }
  667 
  668 #ifndef _SYS_SYSPROTO_H_
  669 struct __getrlimit_args {
  670         u_int   which;
  671         struct  rlimit *rlp;
  672 };
  673 #endif
  674 /*
  675  * MPSAFE
  676  */
  677 /* ARGSUSED */
  678 int
  679 getrlimit(td, uap)
  680         struct thread *td;
  681         register struct __getrlimit_args *uap;
  682 {
  683         struct rlimit rlim;
  684         struct proc *p;
  685         int error;
  686 
  687         if (uap->which >= RLIM_NLIMITS)
  688                 return (EINVAL);
  689         p = td->td_proc;
  690         PROC_LOCK(p);
  691         lim_rlimit(p, uap->which, &rlim);
  692         PROC_UNLOCK(p);
  693         error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
  694         return(error);
  695 }
  696 
  697 /*
  698  * Transform the running time and tick information in proc p into user,
  699  * system, and interrupt time usage.
  700  */
  701 void
  702 calcru(p, up, sp, ip)
  703         struct proc *p;
  704         struct timeval *up;
  705         struct timeval *sp;
  706         struct timeval *ip;
  707 {
  708         struct bintime bt, rt;
  709         struct timeval tv;
  710         struct thread *td;
  711         /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
  712         u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
  713         int problemcase;
  714 
  715         mtx_assert(&sched_lock, MA_OWNED);
  716         /* XXX: why spl-protect ?  worst case is an off-by-one report */
  717 
  718         ut = p->p_uticks;
  719         st = p->p_sticks;
  720         it = p->p_iticks;
  721 
  722         tt = ut + st + it;
  723         if (tt == 0) {
  724                 st = 1;
  725                 tt = 1;
  726         }
  727         rt = p->p_runtime;
  728         problemcase = 0;
  729         FOREACH_THREAD_IN_PROC(p, td) {
  730                 /*
  731                  * Adjust for the current time slice.  This is actually fairly
  732                  * important since the error here is on the order of a time
  733                  * quantum, which is much greater than the sampling error.
  734                  */
  735                 if (td == curthread) {
  736                         binuptime(&bt);
  737                         bintime_sub(&bt, PCPU_PTR(switchtime));
  738                         bintime_add(&rt, &bt);
  739                 } else if (TD_IS_RUNNING(td)) {
  740                         /*
  741                          * XXX: this case should add the difference between
  742                          * the current time and the switch time as above,
  743                          * but the switch time is inaccessible, so we can't
  744                          * do the adjustment and will end up with a wrong
  745                          * runtime.  A previous call with a different
  746                          * curthread may have obtained a (right or wrong)
  747                          * runtime that is in advance of ours.  Just set a
  748                          * flag to avoid warning about this known problem.
  749                          */
  750                         problemcase = 1;
  751                 }
  752         }
  753         bintime2timeval(&rt, &tv);
  754         tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
  755         ptu = p->p_uu + p->p_su + p->p_iu;
  756         if (tu < ptu) {
  757                 if (!problemcase)
  758                         printf(
  759 "calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n",
  760                             (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm);
  761                 tu = ptu;
  762         }
  763         if ((int64_t)tu < 0) {
  764                 printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
  765                     (intmax_t)tu, p->p_pid, p->p_comm);
  766                 tu = ptu;
  767         }
  768 
  769         /* Subdivide tu. */
  770         uu = (tu * ut) / tt;
  771         su = (tu * st) / tt;
  772         iu = tu - uu - su;
  773 
  774         /* Enforce monotonicity. */
  775         if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
  776                 if (uu < p->p_uu)
  777                         uu = p->p_uu;
  778                 else if (uu + p->p_su + p->p_iu > tu)
  779                         uu = tu - p->p_su - p->p_iu;
  780                 if (st == 0)
  781                         su = p->p_su;
  782                 else {
  783                         su = ((tu - uu) * st) / (st + it);
  784                         if (su < p->p_su)
  785                                 su = p->p_su;
  786                         else if (uu + su + p->p_iu > tu)
  787                                 su = tu - uu - p->p_iu;
  788                 }
  789                 KASSERT(uu + su + p->p_iu <= tu,
  790                     ("calcru: monotonisation botch 1"));
  791                 iu = tu - uu - su;
  792                 KASSERT(iu >= p->p_iu,
  793                     ("calcru: monotonisation botch 2"));
  794         }
  795         p->p_uu = uu;
  796         p->p_su = su;
  797         p->p_iu = iu;
  798 
  799         up->tv_sec = uu / 1000000;
  800         up->tv_usec = uu % 1000000;
  801         sp->tv_sec = su / 1000000;
  802         sp->tv_usec = su % 1000000;
  803         if (ip != NULL) {
  804                 ip->tv_sec = iu / 1000000;
  805                 ip->tv_usec = iu % 1000000;
  806         }
  807 }
  808 
  809 #ifndef _SYS_SYSPROTO_H_
  810 struct getrusage_args {
  811         int     who;
  812         struct  rusage *rusage;
  813 };
  814 #endif
  815 /*
  816  * MPSAFE
  817  */
  818 /* ARGSUSED */
  819 int
  820 getrusage(td, uap)
  821         register struct thread *td;
  822         register struct getrusage_args *uap;
  823 {
  824         struct rusage ru;
  825         struct proc *p;
  826 
  827         p = td->td_proc;
  828         switch (uap->who) {
  829 
  830         case RUSAGE_SELF:
  831                 mtx_lock(&Giant);
  832                 mtx_lock_spin(&sched_lock);
  833                 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime,
  834                     NULL);
  835                 mtx_unlock_spin(&sched_lock);
  836                 ru = p->p_stats->p_ru;
  837                 mtx_unlock(&Giant);
  838                 break;
  839 
  840         case RUSAGE_CHILDREN:
  841                 mtx_lock(&Giant);
  842                 ru = p->p_stats->p_cru;
  843                 mtx_unlock(&Giant);
  844                 break;
  845 
  846         default:
  847                 return (EINVAL);
  848                 break;
  849         }
  850         return (copyout(&ru, uap->rusage, sizeof(struct rusage)));
  851 }
  852 
  853 void
  854 ruadd(ru, ru2)
  855         register struct rusage *ru, *ru2;
  856 {
  857         register long *ip, *ip2;
  858         register int i;
  859 
  860         timevaladd(&ru->ru_utime, &ru2->ru_utime);
  861         timevaladd(&ru->ru_stime, &ru2->ru_stime);
  862         if (ru->ru_maxrss < ru2->ru_maxrss)
  863                 ru->ru_maxrss = ru2->ru_maxrss;
  864         ip = &ru->ru_first; ip2 = &ru2->ru_first;
  865         for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
  866                 *ip++ += *ip2++;
  867 }
  868 
  869 /*
  870  * Allocate a new resource limits structure and initialize its
  871  * reference count and mutex pointer.
  872  */
  873 struct plimit *
  874 lim_alloc()
  875 {
  876         struct plimit *limp;
  877 
  878         limp = (struct plimit *)malloc(sizeof(struct plimit), M_PLIMIT,
  879             M_WAITOK);
  880         limp->pl_refcnt = 1;
  881         limp->pl_mtx = mtx_pool_alloc(mtxpool_sleep);
  882         return (limp);
  883 }
  884 
  885 struct plimit *
  886 lim_hold(limp)
  887         struct plimit *limp;
  888 {
  889 
  890         LIM_LOCK(limp);
  891         limp->pl_refcnt++;
  892         LIM_UNLOCK(limp);
  893         return (limp);
  894 }
  895 
  896 void
  897 lim_free(limp)
  898         struct plimit *limp;
  899 {
  900 
  901         LIM_LOCK(limp);
  902         KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
  903         if (--limp->pl_refcnt == 0) {
  904                 LIM_UNLOCK(limp);
  905                 free((void *)limp, M_PLIMIT);
  906                 return;
  907         }
  908         LIM_UNLOCK(limp);
  909 }
  910 
  911 /*
  912  * Make a copy of the plimit structure.
  913  * We share these structures copy-on-write after fork.
  914  */
  915 void
  916 lim_copy(dst, src)
  917         struct plimit *dst, *src;
  918 {
  919 
  920         KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
  921         bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
  922 }
  923 
  924 /*
  925  * Return the hard limit for a particular system resource.  The
  926  * which parameter specifies the index into the rlimit array.
  927  */
  928 rlim_t
  929 lim_max(struct proc *p, int which)
  930 {
  931         struct rlimit rl;
  932 
  933         lim_rlimit(p, which, &rl);
  934         return (rl.rlim_max);
  935 }
  936 
  937 /*
  938  * Return the current (soft) limit for a particular system resource.
  939  * The which parameter which specifies the index into the rlimit array
  940  */
  941 rlim_t
  942 lim_cur(struct proc *p, int which)
  943 {
  944         struct rlimit rl;
  945 
  946         lim_rlimit(p, which, &rl);
  947         return (rl.rlim_cur);
  948 }
  949 
  950 /*
  951  * Return a copy of the entire rlimit structure for the system limit
  952  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
  953  */
  954 void
  955 lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
  956 {
  957 
  958         PROC_LOCK_ASSERT(p, MA_OWNED);
  959         KASSERT(which >= 0 && which < RLIM_NLIMITS,
  960             ("request for invalid resource limit"));
  961         *rlp = p->p_limit->pl_rlimit[which];
  962 }
  963 
  964 /*
  965  * Find the uidinfo structure for a uid.  This structure is used to
  966  * track the total resource consumption (process count, socket buffer
  967  * size, etc.) for the uid and impose limits.
  968  */
  969 void
  970 uihashinit()
  971 {
  972 
  973         uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
  974         mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
  975 }
  976 
  977 /*
  978  * Look up a uidinfo struct for the parameter uid.
  979  * uihashtbl_mtx must be locked.
  980  */
  981 static struct uidinfo *
  982 uilookup(uid)
  983         uid_t uid;
  984 {
  985         struct uihashhead *uipp;
  986         struct uidinfo *uip;
  987 
  988         mtx_assert(&uihashtbl_mtx, MA_OWNED);
  989         uipp = UIHASH(uid);
  990         LIST_FOREACH(uip, uipp, ui_hash)
  991                 if (uip->ui_uid == uid)
  992                         break;
  993 
  994         return (uip);
  995 }
  996 
  997 /*
  998  * Find or allocate a struct uidinfo for a particular uid.
  999  * Increase refcount on uidinfo struct returned.
 1000  * uifree() should be called on a struct uidinfo when released.
 1001  */
 1002 struct uidinfo *
 1003 uifind(uid)
 1004         uid_t uid;
 1005 {
 1006         struct uidinfo *old_uip, *uip;
 1007 
 1008         mtx_lock(&uihashtbl_mtx);
 1009         uip = uilookup(uid);
 1010         if (uip == NULL) {
 1011                 mtx_unlock(&uihashtbl_mtx);
 1012                 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
 1013                 mtx_lock(&uihashtbl_mtx);
 1014                 /*
 1015                  * There's a chance someone created our uidinfo while we
 1016                  * were in malloc and not holding the lock, so we have to
 1017                  * make sure we don't insert a duplicate uidinfo.
 1018                  */
 1019                 if ((old_uip = uilookup(uid)) != NULL) {
 1020                         /* Someone else beat us to it. */
 1021                         free(uip, M_UIDINFO);
 1022                         uip = old_uip;
 1023                 } else {
 1024                         uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
 1025                         uip->ui_uid = uid;
 1026                         LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
 1027                 }
 1028         }
 1029         uihold(uip);
 1030         mtx_unlock(&uihashtbl_mtx);
 1031         return (uip);
 1032 }
 1033 
 1034 /*
 1035  * Place another refcount on a uidinfo struct.
 1036  */
 1037 void
 1038 uihold(uip)
 1039         struct uidinfo *uip;
 1040 {
 1041 
 1042         UIDINFO_LOCK(uip);
 1043         uip->ui_ref++;
 1044         UIDINFO_UNLOCK(uip);
 1045 }
 1046 
 1047 /*-
 1048  * Since uidinfo structs have a long lifetime, we use an
 1049  * opportunistic refcounting scheme to avoid locking the lookup hash
 1050  * for each release.
 1051  *
 1052  * If the refcount hits 0, we need to free the structure,
 1053  * which means we need to lock the hash.
 1054  * Optimal case:
 1055  *   After locking the struct and lowering the refcount, if we find
 1056  *   that we don't need to free, simply unlock and return.
 1057  * Suboptimal case:
 1058  *   If refcount lowering results in need to free, bump the count
 1059  *   back up, loose the lock and aquire the locks in the proper
 1060  *   order to try again.
 1061  */
 1062 void
 1063 uifree(uip)
 1064         struct uidinfo *uip;
 1065 {
 1066 
 1067         /* Prepare for optimal case. */
 1068         UIDINFO_LOCK(uip);
 1069 
 1070         if (--uip->ui_ref != 0) {
 1071                 UIDINFO_UNLOCK(uip);
 1072                 return;
 1073         }
 1074 
 1075         /* Prepare for suboptimal case. */
 1076         uip->ui_ref++;
 1077         UIDINFO_UNLOCK(uip);
 1078         mtx_lock(&uihashtbl_mtx);
 1079         UIDINFO_LOCK(uip);
 1080 
 1081         /*
 1082          * We must subtract one from the count again because we backed out
 1083          * our initial subtraction before dropping the lock.
 1084          * Since another thread may have added a reference after we dropped the
 1085          * initial lock we have to test for zero again.
 1086          */
 1087         if (--uip->ui_ref == 0) {
 1088                 LIST_REMOVE(uip, ui_hash);
 1089                 mtx_unlock(&uihashtbl_mtx);
 1090                 if (uip->ui_sbsize != 0)
 1091                         printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
 1092                             uip->ui_uid, (intmax_t)uip->ui_sbsize);
 1093                 if (uip->ui_proccnt != 0)
 1094                         printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
 1095                             uip->ui_uid, uip->ui_proccnt);
 1096                 UIDINFO_UNLOCK(uip);
 1097                 FREE(uip, M_UIDINFO);
 1098                 return;
 1099         }
 1100 
 1101         mtx_unlock(&uihashtbl_mtx);
 1102         UIDINFO_UNLOCK(uip);
 1103 }
 1104 
 1105 /*
 1106  * Change the count associated with number of processes
 1107  * a given user is using.  When 'max' is 0, don't enforce a limit
 1108  */
 1109 int
 1110 chgproccnt(uip, diff, max)
 1111         struct  uidinfo *uip;
 1112         int     diff;
 1113         int     max;
 1114 {
 1115 
 1116         UIDINFO_LOCK(uip);
 1117         /* Don't allow them to exceed max, but allow subtraction. */
 1118         if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
 1119                 UIDINFO_UNLOCK(uip);
 1120                 return (0);
 1121         }
 1122         uip->ui_proccnt += diff;
 1123         if (uip->ui_proccnt < 0)
 1124                 printf("negative proccnt for uid = %d\n", uip->ui_uid);
 1125         UIDINFO_UNLOCK(uip);
 1126         return (1);
 1127 }
 1128 
 1129 /*
 1130  * Change the total socket buffer size a user has used.
 1131  */
 1132 int
 1133 chgsbsize(uip, hiwat, to, max)
 1134         struct  uidinfo *uip;
 1135         u_int  *hiwat;
 1136         u_int   to;
 1137         rlim_t  max;
 1138 {
 1139         rlim_t new;
 1140 
 1141         UIDINFO_LOCK(uip);
 1142         new = uip->ui_sbsize + to - *hiwat;
 1143         /* Don't allow them to exceed max, but allow subtraction */
 1144         if (to > *hiwat && new > max) {
 1145                 UIDINFO_UNLOCK(uip);
 1146                 return (0);
 1147         }
 1148         uip->ui_sbsize = new;
 1149         UIDINFO_UNLOCK(uip);
 1150         *hiwat = to;
 1151         if (new < 0)
 1152                 printf("negative sbsize for uid = %d\n", uip->ui_uid);
 1153         return (1);
 1154 }

Cache object: b1092e1c0b10123aab4208f7da699acf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.