FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pagequeue.h
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD$
63 */
64
65 #ifndef _VM_PAGEQUEUE_
66 #define _VM_PAGEQUEUE_
67
68 #ifdef _KERNEL
69 struct vm_pagequeue {
70 struct mtx pq_mutex;
71 struct pglist pq_pl;
72 int pq_cnt;
73 const char * const pq_name;
74 uint64_t pq_pdpages;
75 } __aligned(CACHE_LINE_SIZE);
76
77 #if __SIZEOF_LONG__ == 8
78 #define VM_BATCHQUEUE_SIZE 63
79 #else
80 #define VM_BATCHQUEUE_SIZE 15
81 #endif
82
83 struct vm_batchqueue {
84 vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
85 int bq_cnt;
86 } __aligned(CACHE_LINE_SIZE);
87
88 #include <vm/uma.h>
89 #include <sys/_blockcount.h>
90 #include <sys/pidctrl.h>
91 struct sysctl_oid;
92
93 /*
94 * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
95 * and accounting.
96 *
97 * Lock Key:
98 * f vmd_free_mtx
99 * p vmd_pageout_mtx
100 * d vm_domainset_lock
101 * a atomic
102 * c const after boot
103 * q page queue lock
104 *
105 * A unique page daemon thread manages each vm_domain structure and is
106 * responsible for ensuring that some free memory is available by freeing
107 * inactive pages and aging active pages. To decide how many pages to process,
108 * it uses thresholds derived from the number of pages in the domain:
109 *
110 * vmd_page_count
111 * ---
112 * |
113 * |-> vmd_inactive_target (~3%)
114 * | - The active queue scan target is given by
115 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
116 * |
117 * |
118 * |-> vmd_free_target (~2%)
119 * | - Target for page reclamation.
120 * |
121 * |-> vmd_pageout_wakeup_thresh (~1.8%)
122 * | - Threshold for waking up the page daemon.
123 * |
124 * |
125 * |-> vmd_free_min (~0.5%)
126 * | - First low memory threshold.
127 * | - Causes per-CPU caching to be lazily disabled in UMA.
128 * | - vm_wait() sleeps below this threshold.
129 * |
130 * |-> vmd_free_severe (~0.25%)
131 * | - Second low memory threshold.
132 * | - Triggers aggressive UMA reclamation, disables delayed buffer
133 * | writes.
134 * |
135 * |-> vmd_free_reserved (~0.13%)
136 * | - Minimum for VM_ALLOC_NORMAL page allocations.
137 * |-> vmd_pageout_free_min (32 + 2 pages)
138 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
139 * |-> vmd_interrupt_free_min (2 pages)
140 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
141 * ---
142 *
143 *--
144 * Free page count regulation:
145 *
146 * The page daemon attempts to ensure that the free page count is above the free
147 * target. It wakes up periodically (every 100ms) to input the current free
148 * page shortage (free_target - free_count) to a PID controller, which in
149 * response outputs the number of pages to attempt to reclaim. The shortage's
150 * current magnitude, rate of change, and cumulative value are together used to
151 * determine the controller's output. The page daemon target thus adapts
152 * dynamically to the system's demand for free pages, resulting in less
153 * burstiness than a simple hysteresis loop.
154 *
155 * When the free page count drops below the wakeup threshold,
156 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
157 * that the system responds promptly to a large instantaneous free page
158 * shortage.
159 *
160 * The page daemon also attempts to ensure that some fraction of the system's
161 * memory is present in the inactive (I) and laundry (L) page queues, so that it
162 * can respond promptly to a sudden free page shortage. In particular, the page
163 * daemon thread aggressively scans active pages so long as the following
164 * condition holds:
165 *
166 * len(I) + len(L) + free_target - free_count < inactive_target
167 *
168 * Otherwise, when the inactive target is met, the page daemon periodically
169 * scans a small portion of the active queue in order to maintain up-to-date
170 * per-page access history. Unreferenced pages in the active queue thus
171 * eventually migrate to the inactive queue.
172 *
173 * The per-domain laundry thread periodically launders dirty pages based on the
174 * number of clean pages freed by the page daemon since the last laundering. If
175 * the page daemon fails to meet its scan target (i.e., the PID controller
176 * output) because of a shortage of clean inactive pages, the laundry thread
177 * attempts to launder enough pages to meet the free page target.
178 *
179 *--
180 * Page allocation priorities:
181 *
182 * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
183 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
184 * claim any free page. This priority is used in the pmap layer when attempting
185 * to allocate a page for the kernel page tables; in such cases an allocation
186 * failure will usually result in a kernel panic. The system priority is used
187 * for most other kernel memory allocations, for instance by UMA's slab
188 * allocator or the buffer cache. Such allocations will fail if the free count
189 * is below interrupt_free_min. All other allocations occur at the normal
190 * priority, which is typically used for allocation of user pages, for instance
191 * in the page fault handler or when allocating page table pages or pv_entry
192 * structures for user pmaps. Such allocations fail if the free count is below
193 * the free_reserved threshold.
194 *
195 *--
196 * Free memory shortages:
197 *
198 * The system uses the free_min and free_severe thresholds to apply
199 * back-pressure and give the page daemon a chance to recover. When a page
200 * allocation fails due to a shortage and the allocating thread cannot handle
201 * failure, it may call vm_wait() to sleep until free pages are available.
202 * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
203 * above the free_min threshold; the page daemon and laundry threads are given
204 * priority and will wake up once free_count reaches the (much smaller)
205 * pageout_free_min threshold.
206 *
207 * On NUMA systems, the domainset iterators always prefer NUMA domains where the
208 * free page count is above the free_min threshold. This means that given the
209 * choice between two NUMA domains, one above the free_min threshold and one
210 * below, the former will be used to satisfy the allocation request regardless
211 * of the domain selection policy.
212 *
213 * In addition to reclaiming memory from the page queues, the vm_lowmem event
214 * fires every ten seconds so long as the system is under memory pressure (i.e.,
215 * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
216 * for notifications of free page shortages, upon which they may shrink their
217 * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
218 * they do not contain an excess of unused memory. When a domain is below the
219 * free_min threshold, UMA limits the population of per-CPU caches. When a
220 * domain falls below the free_severe threshold, UMA's caches are completely
221 * drained.
222 *
223 * If the system encounters a global memory shortage, it may resort to the
224 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
225 * last-ditch attempt to free up some pages. Either of the two following
226 * conditions will activate the OOM killer:
227 *
228 * 1. The page daemons collectively fail to reclaim any pages during their
229 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
230 * the page daemon thread votes for an OOM kill, and an OOM kill is
231 * triggered when all page daemons have voted. This heuristic is strict and
232 * may fail to trigger even when the system is effectively deadlocked.
233 *
234 * 2. Threads in the user fault handler are repeatedly unable to make progress
235 * while allocating a page to satisfy the fault. After
236 * vm_pfault_oom_attempts page allocation failures with intervening
237 * vm_wait() calls, the faulting thread will trigger an OOM kill.
238 */
239 struct vm_domain {
240 struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
241 struct mtx_padalign vmd_free_mtx;
242 struct mtx_padalign vmd_pageout_mtx;
243 struct vm_pgcache {
244 int domain;
245 int pool;
246 uma_zone_t zone;
247 } vmd_pgcache[VM_NFREEPOOL];
248 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
249 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
250 u_int vmd_domain; /* (c) Domain number. */
251 u_int vmd_page_count; /* (c) Total page count. */
252 long vmd_segs; /* (c) bitmask of the segments */
253 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
254 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
255 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
256
257 /* Paging control variables, used within single threaded page daemon. */
258 struct pidctrl vmd_pid; /* Pageout controller. */
259 boolean_t vmd_oom;
260 u_int vmd_inactive_threads;
261 u_int vmd_inactive_shortage; /* Per-thread shortage. */
262 blockcount_t vmd_inactive_running; /* Number of inactive threads. */
263 blockcount_t vmd_inactive_starting; /* Number of threads started. */
264 volatile u_int vmd_addl_shortage; /* Shortage accumulator. */
265 volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
266 volatile u_int vmd_inactive_us; /* Microseconds for above. */
267 u_int vmd_inactive_pps; /* Exponential decay frees/second. */
268 int vmd_oom_seq;
269 int vmd_last_active_scan;
270 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
271 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
272 struct vm_page vmd_clock[2]; /* markers for active queue scan */
273
274 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
275 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
276 bool vmd_minset; /* (d) Are we in vm_min_domains? */
277 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
278 enum {
279 VM_LAUNDRY_IDLE = 0,
280 VM_LAUNDRY_BACKGROUND,
281 VM_LAUNDRY_SHORTFALL
282 } vmd_laundry_request;
283
284 /* Paging thresholds and targets. */
285 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
286 u_int vmd_background_launder_target; /* (c) */
287 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
288 u_int vmd_free_target; /* (c) pages desired free */
289 u_int vmd_free_min; /* (c) pages desired free */
290 u_int vmd_inactive_target; /* (c) pages desired inactive */
291 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
292 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
293 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
294 u_int vmd_free_severe; /* (c) severe page depletion point */
295
296 /* Name for sysctl etc. */
297 struct sysctl_oid *vmd_oid;
298 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
299 } __aligned(CACHE_LINE_SIZE);
300
301 extern struct vm_domain vm_dom[MAXMEMDOM];
302
303 #define VM_DOMAIN(n) (&vm_dom[(n)])
304 #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
305
306 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
307 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
308 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
309 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
310 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
311
312 #define vm_domain_free_assert_locked(n) \
313 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
314 #define vm_domain_free_assert_unlocked(n) \
315 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
316 #define vm_domain_free_lock(d) \
317 mtx_lock(vm_domain_free_lockptr((d)))
318 #define vm_domain_free_lockptr(d) \
319 (&(d)->vmd_free_mtx)
320 #define vm_domain_free_trylock(d) \
321 mtx_trylock(vm_domain_free_lockptr((d)))
322 #define vm_domain_free_unlock(d) \
323 mtx_unlock(vm_domain_free_lockptr((d)))
324
325 #define vm_domain_pageout_lockptr(d) \
326 (&(d)->vmd_pageout_mtx)
327 #define vm_domain_pageout_assert_locked(n) \
328 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
329 #define vm_domain_pageout_assert_unlocked(n) \
330 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
331 #define vm_domain_pageout_lock(d) \
332 mtx_lock(vm_domain_pageout_lockptr((d)))
333 #define vm_domain_pageout_unlock(d) \
334 mtx_unlock(vm_domain_pageout_lockptr((d)))
335
336 static __inline void
337 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
338 {
339
340 vm_pagequeue_assert_locked(pq);
341 pq->pq_cnt += addend;
342 }
343 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
344 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
345
346 static inline void
347 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
348 {
349
350 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
351 vm_pagequeue_cnt_dec(pq);
352 }
353
354 static inline void
355 vm_batchqueue_init(struct vm_batchqueue *bq)
356 {
357
358 bq->bq_cnt = 0;
359 }
360
361 static inline int
362 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
363 {
364 int slots_free;
365
366 slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
367 if (slots_free > 0) {
368 bq->bq_pa[bq->bq_cnt++] = m;
369 return (slots_free);
370 }
371 return (slots_free);
372 }
373
374 static inline vm_page_t
375 vm_batchqueue_pop(struct vm_batchqueue *bq)
376 {
377
378 if (bq->bq_cnt == 0)
379 return (NULL);
380 return (bq->bq_pa[--bq->bq_cnt]);
381 }
382
383 void vm_domain_set(struct vm_domain *vmd);
384 void vm_domain_clear(struct vm_domain *vmd);
385 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
386
387 /*
388 * vm_pagequeue_domain:
389 *
390 * Return the memory domain the page belongs to.
391 */
392 static inline struct vm_domain *
393 vm_pagequeue_domain(vm_page_t m)
394 {
395
396 return (VM_DOMAIN(vm_page_domain(m)));
397 }
398
399 /*
400 * Return the number of pages we need to free-up or cache
401 * A positive number indicates that we do not have enough free pages.
402 */
403 static inline int
404 vm_paging_target(struct vm_domain *vmd)
405 {
406
407 return (vmd->vmd_free_target - vmd->vmd_free_count);
408 }
409
410 /*
411 * Returns TRUE if the pagedaemon needs to be woken up.
412 */
413 static inline int
414 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
415 {
416
417 return (free_count < vmd->vmd_pageout_wakeup_thresh);
418 }
419
420 /*
421 * Returns TRUE if the domain is below the min paging target.
422 */
423 static inline int
424 vm_paging_min(struct vm_domain *vmd)
425 {
426
427 return (vmd->vmd_free_min > vmd->vmd_free_count);
428 }
429
430 /*
431 * Returns TRUE if the domain is below the severe paging target.
432 */
433 static inline int
434 vm_paging_severe(struct vm_domain *vmd)
435 {
436
437 return (vmd->vmd_free_severe > vmd->vmd_free_count);
438 }
439
440 /*
441 * Return the number of pages we need to launder.
442 * A positive number indicates that we have a shortfall of clean pages.
443 */
444 static inline int
445 vm_laundry_target(struct vm_domain *vmd)
446 {
447
448 return (vm_paging_target(vmd));
449 }
450
451 void pagedaemon_wakeup(int domain);
452
453 static inline void
454 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
455 {
456 u_int old, new;
457
458 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
459 new = old + adj;
460 /*
461 * Only update bitsets on transitions. Notice we short-circuit the
462 * rest of the checks if we're above min already.
463 */
464 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
465 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
466 (old < vmd->vmd_pageout_free_min &&
467 new >= vmd->vmd_pageout_free_min)))
468 vm_domain_clear(vmd);
469 }
470
471 #endif /* _KERNEL */
472 #endif /* !_VM_PAGEQUEUE_ */
Cache object: d06d68a44de39410b6ba4128864889d8
|