FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_racct.c
1 /*-
2 * Copyright (c) 2010 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Edward Tomasz Napierala under sponsorship
6 * from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/10.2/sys/kern/kern_racct.c 286326 2015-08-05 16:58:04Z trasz $
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_racct.c 286326 2015-08-05 16:58:04Z trasz $");
34
35 #include "opt_kdtrace.h"
36 #include "opt_sched.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/eventhandler.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/lock.h>
45 #include <sys/loginclass.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/racct.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/sdt.h>
54 #include <sys/smp.h>
55 #include <sys/sx.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysent.h>
58 #include <sys/sysproto.h>
59 #include <sys/umtx.h>
60 #include <machine/smp.h>
61
62 #ifdef RCTL
63 #include <sys/rctl.h>
64 #endif
65
66 #ifdef RACCT
67
68 FEATURE(racct, "Resource Accounting");
69
70 /*
71 * Do not block processes that have their %cpu usage <= pcpu_threshold.
72 */
73 static int pcpu_threshold = 1;
74 #ifdef RACCT_DEFAULT_TO_DISABLED
75 int racct_enable = 0;
76 #else
77 int racct_enable = 1;
78 #endif
79
80 SYSCTL_NODE(_kern, OID_AUTO, racct, CTLFLAG_RW, 0, "Resource Accounting");
81 TUNABLE_INT("kern.racct.enable", &racct_enable);
82 SYSCTL_UINT(_kern_racct, OID_AUTO, enable, CTLFLAG_RDTUN, &racct_enable,
83 0, "Enable RACCT/RCTL");
84 SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold,
85 0, "Processes with higher %cpu usage than this value can be throttled.");
86
87 /*
88 * How many seconds it takes to use the scheduler %cpu calculations. When a
89 * process starts, we compute its %cpu usage by dividing its runtime by the
90 * process wall clock time. After RACCT_PCPU_SECS pass, we use the value
91 * provided by the scheduler.
92 */
93 #define RACCT_PCPU_SECS 3
94
95 static struct mtx racct_lock;
96 MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF);
97
98 static uma_zone_t racct_zone;
99
100 static void racct_sub_racct(struct racct *dest, const struct racct *src);
101 static void racct_sub_cred_locked(struct ucred *cred, int resource,
102 uint64_t amount);
103 static void racct_add_cred_locked(struct ucred *cred, int resource,
104 uint64_t amount);
105
106 SDT_PROVIDER_DEFINE(racct);
107 SDT_PROBE_DEFINE3(racct, kernel, rusage, add, "struct proc *", "int",
108 "uint64_t");
109 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__failure,
110 "struct proc *", "int", "uint64_t");
111 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__cred, "struct ucred *",
112 "int", "uint64_t");
113 SDT_PROBE_DEFINE3(racct, kernel, rusage, add__force, "struct proc *",
114 "int", "uint64_t");
115 SDT_PROBE_DEFINE3(racct, kernel, rusage, set, "struct proc *", "int",
116 "uint64_t");
117 SDT_PROBE_DEFINE3(racct, kernel, rusage, set__failure,
118 "struct proc *", "int", "uint64_t");
119 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub, "struct proc *", "int",
120 "uint64_t");
121 SDT_PROBE_DEFINE3(racct, kernel, rusage, sub__cred, "struct ucred *",
122 "int", "uint64_t");
123 SDT_PROBE_DEFINE1(racct, kernel, racct, create, "struct racct *");
124 SDT_PROBE_DEFINE1(racct, kernel, racct, destroy, "struct racct *");
125 SDT_PROBE_DEFINE2(racct, kernel, racct, join, "struct racct *",
126 "struct racct *");
127 SDT_PROBE_DEFINE2(racct, kernel, racct, join__failure,
128 "struct racct *", "struct racct *");
129 SDT_PROBE_DEFINE2(racct, kernel, racct, leave, "struct racct *",
130 "struct racct *");
131
132 int racct_types[] = {
133 [RACCT_CPU] =
134 RACCT_IN_MILLIONS,
135 [RACCT_DATA] =
136 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
137 [RACCT_STACK] =
138 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
139 [RACCT_CORE] =
140 RACCT_DENIABLE,
141 [RACCT_RSS] =
142 RACCT_RECLAIMABLE,
143 [RACCT_MEMLOCK] =
144 RACCT_RECLAIMABLE | RACCT_DENIABLE,
145 [RACCT_NPROC] =
146 RACCT_RECLAIMABLE | RACCT_DENIABLE,
147 [RACCT_NOFILE] =
148 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
149 [RACCT_VMEM] =
150 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
151 [RACCT_NPTS] =
152 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
153 [RACCT_SWAP] =
154 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
155 [RACCT_NTHR] =
156 RACCT_RECLAIMABLE | RACCT_DENIABLE,
157 [RACCT_MSGQQUEUED] =
158 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
159 [RACCT_MSGQSIZE] =
160 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
161 [RACCT_NMSGQ] =
162 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
163 [RACCT_NSEM] =
164 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
165 [RACCT_NSEMOP] =
166 RACCT_RECLAIMABLE | RACCT_INHERITABLE | RACCT_DENIABLE,
167 [RACCT_NSHM] =
168 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
169 [RACCT_SHMSIZE] =
170 RACCT_RECLAIMABLE | RACCT_DENIABLE | RACCT_SLOPPY,
171 [RACCT_WALLCLOCK] =
172 RACCT_IN_MILLIONS,
173 [RACCT_PCTCPU] =
174 RACCT_DECAYING | RACCT_DENIABLE | RACCT_IN_MILLIONS };
175
176 static const fixpt_t RACCT_DECAY_FACTOR = 0.3 * FSCALE;
177
178 #ifdef SCHED_4BSD
179 /*
180 * Contains intermediate values for %cpu calculations to avoid using floating
181 * point in the kernel.
182 * ccpu_exp[k] = FSCALE * (ccpu/FSCALE)^k = FSCALE * exp(-k/20)
183 * It is needed only for the 4BSD scheduler, because in ULE, the ccpu equals to
184 * zero so the calculations are more straightforward.
185 */
186 fixpt_t ccpu_exp[] = {
187 [0] = FSCALE * 1,
188 [1] = FSCALE * 0.95122942450071400909,
189 [2] = FSCALE * 0.90483741803595957316,
190 [3] = FSCALE * 0.86070797642505780722,
191 [4] = FSCALE * 0.81873075307798185866,
192 [5] = FSCALE * 0.77880078307140486824,
193 [6] = FSCALE * 0.74081822068171786606,
194 [7] = FSCALE * 0.70468808971871343435,
195 [8] = FSCALE * 0.67032004603563930074,
196 [9] = FSCALE * 0.63762815162177329314,
197 [10] = FSCALE * 0.60653065971263342360,
198 [11] = FSCALE * 0.57694981038048669531,
199 [12] = FSCALE * 0.54881163609402643262,
200 [13] = FSCALE * 0.52204577676101604789,
201 [14] = FSCALE * 0.49658530379140951470,
202 [15] = FSCALE * 0.47236655274101470713,
203 [16] = FSCALE * 0.44932896411722159143,
204 [17] = FSCALE * 0.42741493194872666992,
205 [18] = FSCALE * 0.40656965974059911188,
206 [19] = FSCALE * 0.38674102345450120691,
207 [20] = FSCALE * 0.36787944117144232159,
208 [21] = FSCALE * 0.34993774911115535467,
209 [22] = FSCALE * 0.33287108369807955328,
210 [23] = FSCALE * 0.31663676937905321821,
211 [24] = FSCALE * 0.30119421191220209664,
212 [25] = FSCALE * 0.28650479686019010032,
213 [26] = FSCALE * 0.27253179303401260312,
214 [27] = FSCALE * 0.25924026064589150757,
215 [28] = FSCALE * 0.24659696394160647693,
216 [29] = FSCALE * 0.23457028809379765313,
217 [30] = FSCALE * 0.22313016014842982893,
218 [31] = FSCALE * 0.21224797382674305771,
219 [32] = FSCALE * 0.20189651799465540848,
220 [33] = FSCALE * 0.19204990862075411423,
221 [34] = FSCALE * 0.18268352405273465022,
222 [35] = FSCALE * 0.17377394345044512668,
223 [36] = FSCALE * 0.16529888822158653829,
224 [37] = FSCALE * 0.15723716631362761621,
225 [38] = FSCALE * 0.14956861922263505264,
226 [39] = FSCALE * 0.14227407158651357185,
227 [40] = FSCALE * 0.13533528323661269189,
228 [41] = FSCALE * 0.12873490358780421886,
229 [42] = FSCALE * 0.12245642825298191021,
230 [43] = FSCALE * 0.11648415777349695786,
231 [44] = FSCALE * 0.11080315836233388333,
232 [45] = FSCALE * 0.10539922456186433678,
233 [46] = FSCALE * 0.10025884372280373372,
234 [47] = FSCALE * 0.09536916221554961888,
235 [48] = FSCALE * 0.09071795328941250337,
236 [49] = FSCALE * 0.08629358649937051097,
237 [50] = FSCALE * 0.08208499862389879516,
238 [51] = FSCALE * 0.07808166600115315231,
239 [52] = FSCALE * 0.07427357821433388042,
240 [53] = FSCALE * 0.07065121306042958674,
241 [54] = FSCALE * 0.06720551273974976512,
242 [55] = FSCALE * 0.06392786120670757270,
243 [56] = FSCALE * 0.06081006262521796499,
244 [57] = FSCALE * 0.05784432087483846296,
245 [58] = FSCALE * 0.05502322005640722902,
246 [59] = FSCALE * 0.05233970594843239308,
247 [60] = FSCALE * 0.04978706836786394297,
248 [61] = FSCALE * 0.04735892439114092119,
249 [62] = FSCALE * 0.04504920239355780606,
250 [63] = FSCALE * 0.04285212686704017991,
251 [64] = FSCALE * 0.04076220397836621516,
252 [65] = FSCALE * 0.03877420783172200988,
253 [66] = FSCALE * 0.03688316740124000544,
254 [67] = FSCALE * 0.03508435410084502588,
255 [68] = FSCALE * 0.03337326996032607948,
256 [69] = FSCALE * 0.03174563637806794323,
257 [70] = FSCALE * 0.03019738342231850073,
258 [71] = FSCALE * 0.02872463965423942912,
259 [72] = FSCALE * 0.02732372244729256080,
260 [73] = FSCALE * 0.02599112877875534358,
261 [74] = FSCALE * 0.02472352647033939120,
262 [75] = FSCALE * 0.02351774585600910823,
263 [76] = FSCALE * 0.02237077185616559577,
264 [77] = FSCALE * 0.02127973643837716938,
265 [78] = FSCALE * 0.02024191144580438847,
266 [79] = FSCALE * 0.01925470177538692429,
267 [80] = FSCALE * 0.01831563888873418029,
268 [81] = FSCALE * 0.01742237463949351138,
269 [82] = FSCALE * 0.01657267540176124754,
270 [83] = FSCALE * 0.01576441648485449082,
271 [84] = FSCALE * 0.01499557682047770621,
272 [85] = FSCALE * 0.01426423390899925527,
273 [86] = FSCALE * 0.01356855901220093175,
274 [87] = FSCALE * 0.01290681258047986886,
275 [88] = FSCALE * 0.01227733990306844117,
276 [89] = FSCALE * 0.01167856697039544521,
277 [90] = FSCALE * 0.01110899653824230649,
278 [91] = FSCALE * 0.01056720438385265337,
279 [92] = FSCALE * 0.01005183574463358164,
280 [93] = FSCALE * 0.00956160193054350793,
281 [94] = FSCALE * 0.00909527710169581709,
282 [95] = FSCALE * 0.00865169520312063417,
283 [96] = FSCALE * 0.00822974704902002884,
284 [97] = FSCALE * 0.00782837754922577143,
285 [98] = FSCALE * 0.00744658307092434051,
286 [99] = FSCALE * 0.00708340892905212004,
287 [100] = FSCALE * 0.00673794699908546709,
288 [101] = FSCALE * 0.00640933344625638184,
289 [102] = FSCALE * 0.00609674656551563610,
290 [103] = FSCALE * 0.00579940472684214321,
291 [104] = FSCALE * 0.00551656442076077241,
292 [105] = FSCALE * 0.00524751839918138427,
293 [106] = FSCALE * 0.00499159390691021621,
294 [107] = FSCALE * 0.00474815099941147558,
295 [108] = FSCALE * 0.00451658094261266798,
296 [109] = FSCALE * 0.00429630469075234057,
297 [110] = FSCALE * 0.00408677143846406699,
298 };
299 #endif
300
301 #define CCPU_EXP_MAX 110
302
303 /*
304 * This function is analogical to the getpcpu() function in the ps(1) command.
305 * They should both calculate in the same way so that the racct %cpu
306 * calculations are consistent with the values showed by the ps(1) tool.
307 * The calculations are more complex in the 4BSD scheduler because of the value
308 * of the ccpu variable. In ULE it is defined to be zero which saves us some
309 * work.
310 */
311 static uint64_t
312 racct_getpcpu(struct proc *p, u_int pcpu)
313 {
314 u_int swtime;
315 #ifdef SCHED_4BSD
316 fixpt_t pctcpu, pctcpu_next;
317 #endif
318 #ifdef SMP
319 struct pcpu *pc;
320 int found;
321 #endif
322 fixpt_t p_pctcpu;
323 struct thread *td;
324
325 ASSERT_RACCT_ENABLED();
326
327 /*
328 * If the process is swapped out, we count its %cpu usage as zero.
329 * This behaviour is consistent with the userland ps(1) tool.
330 */
331 if ((p->p_flag & P_INMEM) == 0)
332 return (0);
333 swtime = (ticks - p->p_swtick) / hz;
334
335 /*
336 * For short-lived processes, the sched_pctcpu() returns small
337 * values even for cpu intensive processes. Therefore we use
338 * our own estimate in this case.
339 */
340 if (swtime < RACCT_PCPU_SECS)
341 return (pcpu);
342
343 p_pctcpu = 0;
344 FOREACH_THREAD_IN_PROC(p, td) {
345 if (td == PCPU_GET(idlethread))
346 continue;
347 #ifdef SMP
348 found = 0;
349 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
350 if (td == pc->pc_idlethread) {
351 found = 1;
352 break;
353 }
354 }
355 if (found)
356 continue;
357 #endif
358 thread_lock(td);
359 #ifdef SCHED_4BSD
360 pctcpu = sched_pctcpu(td);
361 /* Count also the yet unfinished second. */
362 pctcpu_next = (pctcpu * ccpu_exp[1]) >> FSHIFT;
363 pctcpu_next += sched_pctcpu_delta(td);
364 p_pctcpu += max(pctcpu, pctcpu_next);
365 #else
366 /*
367 * In ULE the %cpu statistics are updated on every
368 * sched_pctcpu() call. So special calculations to
369 * account for the latest (unfinished) second are
370 * not needed.
371 */
372 p_pctcpu += sched_pctcpu(td);
373 #endif
374 thread_unlock(td);
375 }
376
377 #ifdef SCHED_4BSD
378 if (swtime <= CCPU_EXP_MAX)
379 return ((100 * (uint64_t)p_pctcpu * 1000000) /
380 (FSCALE - ccpu_exp[swtime]));
381 #endif
382
383 return ((100 * (uint64_t)p_pctcpu * 1000000) / FSCALE);
384 }
385
386 static void
387 racct_add_racct(struct racct *dest, const struct racct *src)
388 {
389 int i;
390
391 ASSERT_RACCT_ENABLED();
392 mtx_assert(&racct_lock, MA_OWNED);
393
394 /*
395 * Update resource usage in dest.
396 */
397 for (i = 0; i <= RACCT_MAX; i++) {
398 KASSERT(dest->r_resources[i] >= 0,
399 ("%s: resource %d propagation meltdown: dest < 0",
400 __func__, i));
401 KASSERT(src->r_resources[i] >= 0,
402 ("%s: resource %d propagation meltdown: src < 0",
403 __func__, i));
404 dest->r_resources[i] += src->r_resources[i];
405 }
406 }
407
408 static void
409 racct_sub_racct(struct racct *dest, const struct racct *src)
410 {
411 int i;
412
413 ASSERT_RACCT_ENABLED();
414 mtx_assert(&racct_lock, MA_OWNED);
415
416 /*
417 * Update resource usage in dest.
418 */
419 for (i = 0; i <= RACCT_MAX; i++) {
420 if (!RACCT_IS_SLOPPY(i) && !RACCT_IS_DECAYING(i)) {
421 KASSERT(dest->r_resources[i] >= 0,
422 ("%s: resource %d propagation meltdown: dest < 0",
423 __func__, i));
424 KASSERT(src->r_resources[i] >= 0,
425 ("%s: resource %d propagation meltdown: src < 0",
426 __func__, i));
427 KASSERT(src->r_resources[i] <= dest->r_resources[i],
428 ("%s: resource %d propagation meltdown: src > dest",
429 __func__, i));
430 }
431 if (RACCT_CAN_DROP(i)) {
432 dest->r_resources[i] -= src->r_resources[i];
433 if (dest->r_resources[i] < 0) {
434 KASSERT(RACCT_IS_SLOPPY(i) ||
435 RACCT_IS_DECAYING(i),
436 ("%s: resource %d usage < 0", __func__, i));
437 dest->r_resources[i] = 0;
438 }
439 }
440 }
441 }
442
443 void
444 racct_create(struct racct **racctp)
445 {
446
447 if (!racct_enable)
448 return;
449
450 SDT_PROBE(racct, kernel, racct, create, racctp, 0, 0, 0, 0);
451
452 KASSERT(*racctp == NULL, ("racct already allocated"));
453
454 *racctp = uma_zalloc(racct_zone, M_WAITOK | M_ZERO);
455 }
456
457 static void
458 racct_destroy_locked(struct racct **racctp)
459 {
460 int i;
461 struct racct *racct;
462
463 ASSERT_RACCT_ENABLED();
464
465 SDT_PROBE(racct, kernel, racct, destroy, racctp, 0, 0, 0, 0);
466
467 mtx_assert(&racct_lock, MA_OWNED);
468 KASSERT(racctp != NULL, ("NULL racctp"));
469 KASSERT(*racctp != NULL, ("NULL racct"));
470
471 racct = *racctp;
472
473 for (i = 0; i <= RACCT_MAX; i++) {
474 if (RACCT_IS_SLOPPY(i))
475 continue;
476 if (!RACCT_IS_RECLAIMABLE(i))
477 continue;
478 KASSERT(racct->r_resources[i] == 0,
479 ("destroying non-empty racct: "
480 "%ju allocated for resource %d\n",
481 racct->r_resources[i], i));
482 }
483 uma_zfree(racct_zone, racct);
484 *racctp = NULL;
485 }
486
487 void
488 racct_destroy(struct racct **racct)
489 {
490
491 if (!racct_enable)
492 return;
493
494 mtx_lock(&racct_lock);
495 racct_destroy_locked(racct);
496 mtx_unlock(&racct_lock);
497 }
498
499 /*
500 * Increase consumption of 'resource' by 'amount' for 'racct'
501 * and all its parents. Differently from other cases, 'amount' here
502 * may be less than zero.
503 */
504 static void
505 racct_alloc_resource(struct racct *racct, int resource,
506 uint64_t amount)
507 {
508
509 ASSERT_RACCT_ENABLED();
510 mtx_assert(&racct_lock, MA_OWNED);
511 KASSERT(racct != NULL, ("NULL racct"));
512
513 racct->r_resources[resource] += amount;
514 if (racct->r_resources[resource] < 0) {
515 KASSERT(RACCT_IS_SLOPPY(resource) || RACCT_IS_DECAYING(resource),
516 ("%s: resource %d usage < 0", __func__, resource));
517 racct->r_resources[resource] = 0;
518 }
519
520 /*
521 * There are some cases where the racct %cpu resource would grow
522 * beyond 100%.
523 * For example in racct_proc_exit() we add the process %cpu usage
524 * to the ucred racct containers. If too many processes terminated
525 * in a short time span, the ucred %cpu resource could grow too much.
526 * Also, the 4BSD scheduler sometimes returns for a thread more than
527 * 100% cpu usage. So we set a boundary here to 100%.
528 */
529 if ((resource == RACCT_PCTCPU) &&
530 (racct->r_resources[RACCT_PCTCPU] > 100 * 1000000))
531 racct->r_resources[RACCT_PCTCPU] = 100 * 1000000;
532 }
533
534 static int
535 racct_add_locked(struct proc *p, int resource, uint64_t amount)
536 {
537 #ifdef RCTL
538 int error;
539 #endif
540
541 ASSERT_RACCT_ENABLED();
542
543 SDT_PROBE(racct, kernel, rusage, add, p, resource, amount, 0, 0);
544
545 /*
546 * We need proc lock to dereference p->p_ucred.
547 */
548 PROC_LOCK_ASSERT(p, MA_OWNED);
549
550 #ifdef RCTL
551 error = rctl_enforce(p, resource, amount);
552 if (error && RACCT_IS_DENIABLE(resource)) {
553 SDT_PROBE(racct, kernel, rusage, add__failure, p, resource,
554 amount, 0, 0);
555 return (error);
556 }
557 #endif
558 racct_alloc_resource(p->p_racct, resource, amount);
559 racct_add_cred_locked(p->p_ucred, resource, amount);
560
561 return (0);
562 }
563
564 /*
565 * Increase allocation of 'resource' by 'amount' for process 'p'.
566 * Return 0 if it's below limits, or errno, if it's not.
567 */
568 int
569 racct_add(struct proc *p, int resource, uint64_t amount)
570 {
571 int error;
572
573 if (!racct_enable)
574 return (0);
575
576 mtx_lock(&racct_lock);
577 error = racct_add_locked(p, resource, amount);
578 mtx_unlock(&racct_lock);
579 return (error);
580 }
581
582 static void
583 racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount)
584 {
585 struct prison *pr;
586
587 ASSERT_RACCT_ENABLED();
588
589 SDT_PROBE(racct, kernel, rusage, add__cred, cred, resource, amount,
590 0, 0);
591
592 racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, amount);
593 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
594 racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource,
595 amount);
596 racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, amount);
597 }
598
599 /*
600 * Increase allocation of 'resource' by 'amount' for credential 'cred'.
601 * Doesn't check for limits and never fails.
602 *
603 * XXX: Shouldn't this ever return an error?
604 */
605 void
606 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
607 {
608
609 if (!racct_enable)
610 return;
611
612 mtx_lock(&racct_lock);
613 racct_add_cred_locked(cred, resource, amount);
614 mtx_unlock(&racct_lock);
615 }
616
617 /*
618 * Increase allocation of 'resource' by 'amount' for process 'p'.
619 * Doesn't check for limits and never fails.
620 */
621 void
622 racct_add_force(struct proc *p, int resource, uint64_t amount)
623 {
624
625 if (!racct_enable)
626 return;
627
628 SDT_PROBE(racct, kernel, rusage, add__force, p, resource, amount, 0, 0);
629
630 /*
631 * We need proc lock to dereference p->p_ucred.
632 */
633 PROC_LOCK_ASSERT(p, MA_OWNED);
634
635 mtx_lock(&racct_lock);
636 racct_alloc_resource(p->p_racct, resource, amount);
637 mtx_unlock(&racct_lock);
638 racct_add_cred(p->p_ucred, resource, amount);
639 }
640
641 static int
642 racct_set_locked(struct proc *p, int resource, uint64_t amount)
643 {
644 int64_t old_amount, decayed_amount;
645 int64_t diff_proc, diff_cred;
646 #ifdef RCTL
647 int error;
648 #endif
649
650 ASSERT_RACCT_ENABLED();
651
652 SDT_PROBE(racct, kernel, rusage, set, p, resource, amount, 0, 0);
653
654 /*
655 * We need proc lock to dereference p->p_ucred.
656 */
657 PROC_LOCK_ASSERT(p, MA_OWNED);
658
659 old_amount = p->p_racct->r_resources[resource];
660 /*
661 * The diffs may be negative.
662 */
663 diff_proc = amount - old_amount;
664 if (RACCT_IS_DECAYING(resource)) {
665 /*
666 * Resources in per-credential racct containers may decay.
667 * If this is the case, we need to calculate the difference
668 * between the new amount and the proportional value of the
669 * old amount that has decayed in the ucred racct containers.
670 */
671 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
672 diff_cred = amount - decayed_amount;
673 } else
674 diff_cred = diff_proc;
675 #ifdef notyet
676 KASSERT(diff_proc >= 0 || RACCT_CAN_DROP(resource),
677 ("%s: usage of non-droppable resource %d dropping", __func__,
678 resource));
679 #endif
680 #ifdef RCTL
681 if (diff_proc > 0) {
682 error = rctl_enforce(p, resource, diff_proc);
683 if (error && RACCT_IS_DENIABLE(resource)) {
684 SDT_PROBE(racct, kernel, rusage, set__failure, p,
685 resource, amount, 0, 0);
686 return (error);
687 }
688 }
689 #endif
690 racct_alloc_resource(p->p_racct, resource, diff_proc);
691 if (diff_cred > 0)
692 racct_add_cred_locked(p->p_ucred, resource, diff_cred);
693 else if (diff_cred < 0)
694 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
695
696 return (0);
697 }
698
699 /*
700 * Set allocation of 'resource' to 'amount' for process 'p'.
701 * Return 0 if it's below limits, or errno, if it's not.
702 *
703 * Note that decreasing the allocation always returns 0,
704 * even if it's above the limit.
705 */
706 int
707 racct_set(struct proc *p, int resource, uint64_t amount)
708 {
709 int error;
710
711 if (!racct_enable)
712 return (0);
713
714 mtx_lock(&racct_lock);
715 error = racct_set_locked(p, resource, amount);
716 mtx_unlock(&racct_lock);
717 return (error);
718 }
719
720 static void
721 racct_set_force_locked(struct proc *p, int resource, uint64_t amount)
722 {
723 int64_t old_amount, decayed_amount;
724 int64_t diff_proc, diff_cred;
725
726 ASSERT_RACCT_ENABLED();
727
728 SDT_PROBE(racct, kernel, rusage, set, p, resource, amount, 0, 0);
729
730 /*
731 * We need proc lock to dereference p->p_ucred.
732 */
733 PROC_LOCK_ASSERT(p, MA_OWNED);
734
735 old_amount = p->p_racct->r_resources[resource];
736 /*
737 * The diffs may be negative.
738 */
739 diff_proc = amount - old_amount;
740 if (RACCT_IS_DECAYING(resource)) {
741 /*
742 * Resources in per-credential racct containers may decay.
743 * If this is the case, we need to calculate the difference
744 * between the new amount and the proportional value of the
745 * old amount that has decayed in the ucred racct containers.
746 */
747 decayed_amount = old_amount * RACCT_DECAY_FACTOR / FSCALE;
748 diff_cred = amount - decayed_amount;
749 } else
750 diff_cred = diff_proc;
751
752 racct_alloc_resource(p->p_racct, resource, diff_proc);
753 if (diff_cred > 0)
754 racct_add_cred_locked(p->p_ucred, resource, diff_cred);
755 else if (diff_cred < 0)
756 racct_sub_cred_locked(p->p_ucred, resource, -diff_cred);
757 }
758
759 void
760 racct_set_force(struct proc *p, int resource, uint64_t amount)
761 {
762
763 if (!racct_enable)
764 return;
765
766 mtx_lock(&racct_lock);
767 racct_set_force_locked(p, resource, amount);
768 mtx_unlock(&racct_lock);
769 }
770
771 /*
772 * Returns amount of 'resource' the process 'p' can keep allocated.
773 * Allocating more than that would be denied, unless the resource
774 * is marked undeniable. Amount of already allocated resource does
775 * not matter.
776 */
777 uint64_t
778 racct_get_limit(struct proc *p, int resource)
779 {
780
781 if (!racct_enable)
782 return (UINT64_MAX);
783
784 #ifdef RCTL
785 return (rctl_get_limit(p, resource));
786 #else
787 return (UINT64_MAX);
788 #endif
789 }
790
791 /*
792 * Returns amount of 'resource' the process 'p' can keep allocated.
793 * Allocating more than that would be denied, unless the resource
794 * is marked undeniable. Amount of already allocated resource does
795 * matter.
796 */
797 uint64_t
798 racct_get_available(struct proc *p, int resource)
799 {
800
801 if (!racct_enable)
802 return (UINT64_MAX);
803
804 #ifdef RCTL
805 return (rctl_get_available(p, resource));
806 #else
807 return (UINT64_MAX);
808 #endif
809 }
810
811 /*
812 * Returns amount of the %cpu resource that process 'p' can add to its %cpu
813 * utilization. Adding more than that would lead to the process being
814 * throttled.
815 */
816 static int64_t
817 racct_pcpu_available(struct proc *p)
818 {
819
820 ASSERT_RACCT_ENABLED();
821
822 #ifdef RCTL
823 return (rctl_pcpu_available(p));
824 #else
825 return (INT64_MAX);
826 #endif
827 }
828
829 /*
830 * Decrease allocation of 'resource' by 'amount' for process 'p'.
831 */
832 void
833 racct_sub(struct proc *p, int resource, uint64_t amount)
834 {
835
836 if (!racct_enable)
837 return;
838
839 SDT_PROBE(racct, kernel, rusage, sub, p, resource, amount, 0, 0);
840
841 /*
842 * We need proc lock to dereference p->p_ucred.
843 */
844 PROC_LOCK_ASSERT(p, MA_OWNED);
845 KASSERT(RACCT_CAN_DROP(resource),
846 ("%s: called for non-droppable resource %d", __func__, resource));
847
848 mtx_lock(&racct_lock);
849 KASSERT(amount <= p->p_racct->r_resources[resource],
850 ("%s: freeing %ju of resource %d, which is more "
851 "than allocated %jd for %s (pid %d)", __func__, amount, resource,
852 (intmax_t)p->p_racct->r_resources[resource], p->p_comm, p->p_pid));
853
854 racct_alloc_resource(p->p_racct, resource, -amount);
855 racct_sub_cred_locked(p->p_ucred, resource, amount);
856 mtx_unlock(&racct_lock);
857 }
858
859 static void
860 racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount)
861 {
862 struct prison *pr;
863
864 ASSERT_RACCT_ENABLED();
865
866 SDT_PROBE(racct, kernel, rusage, sub__cred, cred, resource, amount,
867 0, 0);
868
869 #ifdef notyet
870 KASSERT(RACCT_CAN_DROP(resource),
871 ("%s: called for resource %d which can not drop", __func__,
872 resource));
873 #endif
874
875 racct_alloc_resource(cred->cr_ruidinfo->ui_racct, resource, -amount);
876 for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent)
877 racct_alloc_resource(pr->pr_prison_racct->prr_racct, resource,
878 -amount);
879 racct_alloc_resource(cred->cr_loginclass->lc_racct, resource, -amount);
880 }
881
882 /*
883 * Decrease allocation of 'resource' by 'amount' for credential 'cred'.
884 */
885 void
886 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
887 {
888
889 if (!racct_enable)
890 return;
891
892 mtx_lock(&racct_lock);
893 racct_sub_cred_locked(cred, resource, amount);
894 mtx_unlock(&racct_lock);
895 }
896
897 /*
898 * Inherit resource usage information from the parent process.
899 */
900 int
901 racct_proc_fork(struct proc *parent, struct proc *child)
902 {
903 int i, error = 0;
904
905 if (!racct_enable)
906 return (0);
907
908 /*
909 * Create racct for the child process.
910 */
911 racct_create(&child->p_racct);
912
913 PROC_LOCK(parent);
914 PROC_LOCK(child);
915 mtx_lock(&racct_lock);
916
917 #ifdef RCTL
918 error = rctl_proc_fork(parent, child);
919 if (error != 0)
920 goto out;
921 #endif
922
923 /* Init process cpu time. */
924 child->p_prev_runtime = 0;
925 child->p_throttled = 0;
926
927 /*
928 * Inherit resource usage.
929 */
930 for (i = 0; i <= RACCT_MAX; i++) {
931 if (parent->p_racct->r_resources[i] == 0 ||
932 !RACCT_IS_INHERITABLE(i))
933 continue;
934
935 error = racct_set_locked(child, i,
936 parent->p_racct->r_resources[i]);
937 if (error != 0)
938 goto out;
939 }
940
941 error = racct_add_locked(child, RACCT_NPROC, 1);
942 error += racct_add_locked(child, RACCT_NTHR, 1);
943
944 out:
945 mtx_unlock(&racct_lock);
946 PROC_UNLOCK(child);
947 PROC_UNLOCK(parent);
948
949 if (error != 0)
950 racct_proc_exit(child);
951
952 return (error);
953 }
954
955 /*
956 * Called at the end of fork1(), to handle rules that require the process
957 * to be fully initialized.
958 */
959 void
960 racct_proc_fork_done(struct proc *child)
961 {
962
963 #ifdef RCTL
964 if (!racct_enable)
965 return;
966
967 PROC_LOCK(child);
968 mtx_lock(&racct_lock);
969 rctl_enforce(child, RACCT_NPROC, 0);
970 rctl_enforce(child, RACCT_NTHR, 0);
971 mtx_unlock(&racct_lock);
972 PROC_UNLOCK(child);
973 #endif
974 }
975
976 void
977 racct_proc_exit(struct proc *p)
978 {
979 int i;
980 uint64_t runtime;
981 struct timeval wallclock;
982 uint64_t pct_estimate, pct;
983
984 if (!racct_enable)
985 return;
986
987 PROC_LOCK(p);
988 /*
989 * We don't need to calculate rux, proc_reap() has already done this.
990 */
991 runtime = cputick2usec(p->p_rux.rux_runtime);
992 #ifdef notyet
993 KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime"));
994 #else
995 if (runtime < p->p_prev_runtime)
996 runtime = p->p_prev_runtime;
997 #endif
998 microuptime(&wallclock);
999 timevalsub(&wallclock, &p->p_stats->p_start);
1000 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1001 pct_estimate = (1000000 * runtime * 100) /
1002 ((uint64_t)wallclock.tv_sec * 1000000 +
1003 wallclock.tv_usec);
1004 } else
1005 pct_estimate = 0;
1006 pct = racct_getpcpu(p, pct_estimate);
1007
1008 mtx_lock(&racct_lock);
1009 racct_set_locked(p, RACCT_CPU, runtime);
1010 racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct);
1011
1012 for (i = 0; i <= RACCT_MAX; i++) {
1013 if (p->p_racct->r_resources[i] == 0)
1014 continue;
1015 if (!RACCT_IS_RECLAIMABLE(i))
1016 continue;
1017 racct_set_locked(p, i, 0);
1018 }
1019
1020 mtx_unlock(&racct_lock);
1021 PROC_UNLOCK(p);
1022
1023 #ifdef RCTL
1024 rctl_racct_release(p->p_racct);
1025 #endif
1026 racct_destroy(&p->p_racct);
1027 }
1028
1029 /*
1030 * Called after credentials change, to move resource utilisation
1031 * between raccts.
1032 */
1033 void
1034 racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
1035 struct ucred *newcred)
1036 {
1037 struct uidinfo *olduip, *newuip;
1038 struct loginclass *oldlc, *newlc;
1039 struct prison *oldpr, *newpr, *pr;
1040
1041 if (!racct_enable)
1042 return;
1043
1044 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
1045
1046 newuip = newcred->cr_ruidinfo;
1047 olduip = oldcred->cr_ruidinfo;
1048 newlc = newcred->cr_loginclass;
1049 oldlc = oldcred->cr_loginclass;
1050 newpr = newcred->cr_prison;
1051 oldpr = oldcred->cr_prison;
1052
1053 mtx_lock(&racct_lock);
1054 if (newuip != olduip) {
1055 racct_sub_racct(olduip->ui_racct, p->p_racct);
1056 racct_add_racct(newuip->ui_racct, p->p_racct);
1057 }
1058 if (newlc != oldlc) {
1059 racct_sub_racct(oldlc->lc_racct, p->p_racct);
1060 racct_add_racct(newlc->lc_racct, p->p_racct);
1061 }
1062 if (newpr != oldpr) {
1063 for (pr = oldpr; pr != NULL; pr = pr->pr_parent)
1064 racct_sub_racct(pr->pr_prison_racct->prr_racct,
1065 p->p_racct);
1066 for (pr = newpr; pr != NULL; pr = pr->pr_parent)
1067 racct_add_racct(pr->pr_prison_racct->prr_racct,
1068 p->p_racct);
1069 }
1070 mtx_unlock(&racct_lock);
1071
1072 #ifdef RCTL
1073 rctl_proc_ucred_changed(p, newcred);
1074 #endif
1075 }
1076
1077 void
1078 racct_move(struct racct *dest, struct racct *src)
1079 {
1080
1081 ASSERT_RACCT_ENABLED();
1082
1083 mtx_lock(&racct_lock);
1084
1085 racct_add_racct(dest, src);
1086 racct_sub_racct(src, src);
1087
1088 mtx_unlock(&racct_lock);
1089 }
1090
1091 static void
1092 racct_proc_throttle(struct proc *p)
1093 {
1094 struct thread *td;
1095 #ifdef SMP
1096 int cpuid;
1097 #endif
1098
1099 ASSERT_RACCT_ENABLED();
1100 PROC_LOCK_ASSERT(p, MA_OWNED);
1101
1102 /*
1103 * Do not block kernel processes. Also do not block processes with
1104 * low %cpu utilization to improve interactivity.
1105 */
1106 if (((p->p_flag & (P_SYSTEM | P_KTHREAD)) != 0) ||
1107 (p->p_racct->r_resources[RACCT_PCTCPU] <= pcpu_threshold))
1108 return;
1109 p->p_throttled = 1;
1110
1111 FOREACH_THREAD_IN_PROC(p, td) {
1112 thread_lock(td);
1113 switch (td->td_state) {
1114 case TDS_RUNQ:
1115 /*
1116 * If the thread is on the scheduler run-queue, we can
1117 * not just remove it from there. So we set the flag
1118 * TDF_NEEDRESCHED for the thread, so that once it is
1119 * running, it is taken off the cpu as soon as possible.
1120 */
1121 td->td_flags |= TDF_NEEDRESCHED;
1122 break;
1123 case TDS_RUNNING:
1124 /*
1125 * If the thread is running, we request a context
1126 * switch for it by setting the TDF_NEEDRESCHED flag.
1127 */
1128 td->td_flags |= TDF_NEEDRESCHED;
1129 #ifdef SMP
1130 cpuid = td->td_oncpu;
1131 if ((cpuid != NOCPU) && (td != curthread))
1132 ipi_cpu(cpuid, IPI_AST);
1133 #endif
1134 break;
1135 default:
1136 break;
1137 }
1138 thread_unlock(td);
1139 }
1140 }
1141
1142 static void
1143 racct_proc_wakeup(struct proc *p)
1144 {
1145
1146 ASSERT_RACCT_ENABLED();
1147
1148 PROC_LOCK_ASSERT(p, MA_OWNED);
1149
1150 if (p->p_throttled) {
1151 p->p_throttled = 0;
1152 wakeup(p->p_racct);
1153 }
1154 }
1155
1156 static void
1157 racct_decay_resource(struct racct *racct, void * res, void* dummy)
1158 {
1159 int resource;
1160 int64_t r_old, r_new;
1161
1162 ASSERT_RACCT_ENABLED();
1163
1164 resource = *(int *)res;
1165 r_old = racct->r_resources[resource];
1166
1167 /* If there is nothing to decay, just exit. */
1168 if (r_old <= 0)
1169 return;
1170
1171 mtx_lock(&racct_lock);
1172 r_new = r_old * RACCT_DECAY_FACTOR / FSCALE;
1173 racct->r_resources[resource] = r_new;
1174 mtx_unlock(&racct_lock);
1175 }
1176
1177 static void
1178 racct_decay(int resource)
1179 {
1180
1181 ASSERT_RACCT_ENABLED();
1182
1183 ui_racct_foreach(racct_decay_resource, &resource, NULL);
1184 loginclass_racct_foreach(racct_decay_resource, &resource, NULL);
1185 prison_racct_foreach(racct_decay_resource, &resource, NULL);
1186 }
1187
1188 static void
1189 racctd(void)
1190 {
1191 struct thread *td;
1192 struct proc *p;
1193 struct timeval wallclock;
1194 uint64_t runtime;
1195 uint64_t pct, pct_estimate;
1196
1197 ASSERT_RACCT_ENABLED();
1198
1199 for (;;) {
1200 racct_decay(RACCT_PCTCPU);
1201
1202 sx_slock(&allproc_lock);
1203
1204 LIST_FOREACH(p, &zombproc, p_list) {
1205 PROC_LOCK(p);
1206 racct_set(p, RACCT_PCTCPU, 0);
1207 PROC_UNLOCK(p);
1208 }
1209
1210 FOREACH_PROC_IN_SYSTEM(p) {
1211 PROC_LOCK(p);
1212 if (p->p_state != PRS_NORMAL) {
1213 PROC_UNLOCK(p);
1214 continue;
1215 }
1216
1217 microuptime(&wallclock);
1218 timevalsub(&wallclock, &p->p_stats->p_start);
1219 PROC_SLOCK(p);
1220 FOREACH_THREAD_IN_PROC(p, td)
1221 ruxagg(p, td);
1222 runtime = cputick2usec(p->p_rux.rux_runtime);
1223 PROC_SUNLOCK(p);
1224 #ifdef notyet
1225 KASSERT(runtime >= p->p_prev_runtime,
1226 ("runtime < p_prev_runtime"));
1227 #else
1228 if (runtime < p->p_prev_runtime)
1229 runtime = p->p_prev_runtime;
1230 #endif
1231 p->p_prev_runtime = runtime;
1232 if (wallclock.tv_sec > 0 || wallclock.tv_usec > 0) {
1233 pct_estimate = (1000000 * runtime * 100) /
1234 ((uint64_t)wallclock.tv_sec * 1000000 +
1235 wallclock.tv_usec);
1236 } else
1237 pct_estimate = 0;
1238 pct = racct_getpcpu(p, pct_estimate);
1239 mtx_lock(&racct_lock);
1240 racct_set_force_locked(p, RACCT_PCTCPU, pct);
1241 racct_set_locked(p, RACCT_CPU, runtime);
1242 racct_set_locked(p, RACCT_WALLCLOCK,
1243 (uint64_t)wallclock.tv_sec * 1000000 +
1244 wallclock.tv_usec);
1245 mtx_unlock(&racct_lock);
1246 PROC_UNLOCK(p);
1247 }
1248
1249 /*
1250 * To ensure that processes are throttled in a fair way, we need
1251 * to iterate over all processes again and check the limits
1252 * for %cpu resource only after ucred racct containers have been
1253 * properly filled.
1254 */
1255 FOREACH_PROC_IN_SYSTEM(p) {
1256 PROC_LOCK(p);
1257 if (p->p_state != PRS_NORMAL) {
1258 PROC_UNLOCK(p);
1259 continue;
1260 }
1261
1262 if (racct_pcpu_available(p) <= 0)
1263 racct_proc_throttle(p);
1264 else if (p->p_throttled)
1265 racct_proc_wakeup(p);
1266 PROC_UNLOCK(p);
1267 }
1268 sx_sunlock(&allproc_lock);
1269 pause("-", hz);
1270 }
1271 }
1272
1273 static struct kproc_desc racctd_kp = {
1274 "racctd",
1275 racctd,
1276 NULL
1277 };
1278
1279 static void
1280 racctd_init(void)
1281 {
1282 if (!racct_enable)
1283 return;
1284
1285 kproc_start(&racctd_kp);
1286 }
1287 SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, racctd_init, NULL);
1288
1289 static void
1290 racct_init(void)
1291 {
1292 if (!racct_enable)
1293 return;
1294
1295 racct_zone = uma_zcreate("racct", sizeof(struct racct),
1296 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
1297 /*
1298 * XXX: Move this somewhere.
1299 */
1300 prison0.pr_prison_racct = prison_racct_find("");
1301 }
1302 SYSINIT(racct, SI_SUB_RACCT, SI_ORDER_FIRST, racct_init, NULL);
1303
1304 #else /* !RACCT */
1305
1306 int
1307 racct_add(struct proc *p, int resource, uint64_t amount)
1308 {
1309
1310 return (0);
1311 }
1312
1313 void
1314 racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
1315 {
1316 }
1317
1318 void
1319 racct_add_force(struct proc *p, int resource, uint64_t amount)
1320 {
1321
1322 return;
1323 }
1324
1325 int
1326 racct_set(struct proc *p, int resource, uint64_t amount)
1327 {
1328
1329 return (0);
1330 }
1331
1332 void
1333 racct_set_force(struct proc *p, int resource, uint64_t amount)
1334 {
1335 }
1336
1337 void
1338 racct_sub(struct proc *p, int resource, uint64_t amount)
1339 {
1340 }
1341
1342 void
1343 racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
1344 {
1345 }
1346
1347 uint64_t
1348 racct_get_limit(struct proc *p, int resource)
1349 {
1350
1351 return (UINT64_MAX);
1352 }
1353
1354 uint64_t
1355 racct_get_available(struct proc *p, int resource)
1356 {
1357
1358 return (UINT64_MAX);
1359 }
1360
1361 void
1362 racct_create(struct racct **racctp)
1363 {
1364 }
1365
1366 void
1367 racct_destroy(struct racct **racctp)
1368 {
1369 }
1370
1371 int
1372 racct_proc_fork(struct proc *parent, struct proc *child)
1373 {
1374
1375 return (0);
1376 }
1377
1378 void
1379 racct_proc_fork_done(struct proc *child)
1380 {
1381 }
1382
1383 void
1384 racct_proc_exit(struct proc *p)
1385 {
1386 }
1387
1388 #endif /* !RACCT */
Cache object: b38b4d2e7979cf305b45eccb5b2796b1
|