1 /* $NetBSD: uvm_pdpolicy_clock.c,v 1.5 2006/11/01 10:18:27 yamt Exp $ */
2 /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Charles D. Cranor,
24 * Washington University, the University of California, Berkeley and
25 * its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
43 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70 #if defined(PDSIM)
71
72 #include "pdsim.h"
73
74 #else /* defined(PDSIM) */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.5 2006/11/01 10:18:27 yamt Exp $");
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_pdpolicy.h>
86 #include <uvm/uvm_pdpolicy_impl.h>
87
88 #endif /* defined(PDSIM) */
89
90 #define PQ_INACTIVE PQ_PRIVATE1 /* page is in inactive list */
91 #define PQ_ACTIVE PQ_PRIVATE2 /* page is in active list */
92
93 #if !defined(CLOCK_INACTIVEPCT)
94 #define CLOCK_INACTIVEPCT 33
95 #endif /* !defined(CLOCK_INACTIVEPCT) */
96
97 struct uvmpdpol_globalstate {
98 struct pglist s_activeq; /* allocated pages, in use */
99 struct pglist s_inactiveq; /* pages between the clock hands */
100 int s_active;
101 int s_inactive;
102 int s_inactarg;
103 struct uvm_pctparam s_anonmin;
104 struct uvm_pctparam s_filemin;
105 struct uvm_pctparam s_execmin;
106 struct uvm_pctparam s_anonmax;
107 struct uvm_pctparam s_filemax;
108 struct uvm_pctparam s_execmax;
109 struct uvm_pctparam s_inactivepct;
110 };
111
112 struct uvmpdpol_scanstate {
113 boolean_t ss_first;
114 boolean_t ss_anonreact, ss_filereact, ss_execreact;
115 struct vm_page *ss_nextpg;
116 };
117
118 static struct uvmpdpol_globalstate pdpol_state;
119 static struct uvmpdpol_scanstate pdpol_scanstate;
120
121 PDPOL_EVCNT_DEFINE(reactexec)
122 PDPOL_EVCNT_DEFINE(reactfile)
123 PDPOL_EVCNT_DEFINE(reactanon)
124
125 static void
126 clock_tune(void)
127 {
128 struct uvmpdpol_globalstate *s = &pdpol_state;
129
130 s->s_inactarg =
131 s->s_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
132 s->s_active + s->s_inactive);
133 if (s->s_inactarg <= uvmexp.freetarg) {
134 s->s_inactarg = uvmexp.freetarg + 1;
135 }
136 }
137
138 void
139 uvmpdpol_scaninit(void)
140 {
141 struct uvmpdpol_globalstate *s = &pdpol_state;
142 struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
143 int t;
144 boolean_t anonunder, fileunder, execunder;
145 boolean_t anonover, fileover, execover;
146 boolean_t anonreact, filereact, execreact;
147
148 /*
149 * decide which types of pages we want to reactivate instead of freeing
150 * to keep usage within the minimum and maximum usage limits.
151 */
152
153 t = s->s_active + s->s_inactive + uvmexp.free;
154 anonunder = uvmexp.anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
155 fileunder = uvmexp.filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
156 execunder = uvmexp.execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
157 anonover = uvmexp.anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
158 fileover = uvmexp.filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
159 execover = uvmexp.execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
160 anonreact = anonunder || (!anonover && (fileover || execover));
161 filereact = fileunder || (!fileover && (anonover || execover));
162 execreact = execunder || (!execover && (anonover || fileover));
163 if (filereact && execreact && (anonreact || uvm_swapisfull())) {
164 anonreact = filereact = execreact = FALSE;
165 }
166 ss->ss_anonreact = anonreact;
167 ss->ss_filereact = filereact;
168 ss->ss_execreact = execreact;
169
170 ss->ss_first = TRUE;
171 }
172
173 struct vm_page *
174 uvmpdpol_selectvictim(void)
175 {
176 struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
177 struct vm_page *pg;
178
179 UVM_LOCK_ASSERT_PAGEQ();
180
181 while (/* CONSTCOND */ 1) {
182 struct vm_anon *anon;
183 struct uvm_object *uobj;
184
185 if (ss->ss_first) {
186 pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
187 ss->ss_first = FALSE;
188 } else {
189 pg = ss->ss_nextpg;
190 if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
191 pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
192 }
193 }
194 if (pg == NULL) {
195 break;
196 }
197 ss->ss_nextpg = TAILQ_NEXT(pg, pageq);
198
199 uvmexp.pdscans++;
200
201 /*
202 * move referenced pages back to active queue and
203 * skip to next page.
204 */
205
206 if (pmap_is_referenced(pg)) {
207 uvmpdpol_pageactivate(pg);
208 uvmexp.pdreact++;
209 continue;
210 }
211
212 anon = pg->uanon;
213 uobj = pg->uobject;
214
215 /*
216 * enforce the minimum thresholds on different
217 * types of memory usage. if reusing the current
218 * page would reduce that type of usage below its
219 * minimum, reactivate the page instead and move
220 * on to the next page.
221 */
222
223 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
224 uvmpdpol_pageactivate(pg);
225 PDPOL_EVCNT_INCR(reactexec);
226 continue;
227 }
228 if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
229 !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
230 uvmpdpol_pageactivate(pg);
231 PDPOL_EVCNT_INCR(reactfile);
232 continue;
233 }
234 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
235 uvmpdpol_pageactivate(pg);
236 PDPOL_EVCNT_INCR(reactanon);
237 continue;
238 }
239
240 break;
241 }
242
243 return pg;
244 }
245
246 void
247 uvmpdpol_balancequeue(int swap_shortage)
248 {
249 int inactive_shortage;
250 struct vm_page *p, *nextpg;
251
252 /*
253 * we have done the scan to get free pages. now we work on meeting
254 * our inactive target.
255 */
256
257 inactive_shortage = pdpol_state.s_inactarg - pdpol_state.s_inactive;
258 for (p = TAILQ_FIRST(&pdpol_state.s_activeq);
259 p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
260 p = nextpg) {
261 nextpg = TAILQ_NEXT(p, pageq);
262
263 /*
264 * if there's a shortage of swap slots, try to free it.
265 */
266
267 if (swap_shortage > 0 && (p->pqflags & PQ_SWAPBACKED) != 0) {
268 if (uvmpd_trydropswap(p)) {
269 swap_shortage--;
270 }
271 }
272
273 /*
274 * if there's a shortage of inactive pages, deactivate.
275 */
276
277 if (inactive_shortage > 0) {
278 /* no need to check wire_count as pg is "active" */
279 pmap_clear_reference(p);
280 uvmpdpol_pagedeactivate(p);
281 uvmexp.pddeact++;
282 inactive_shortage--;
283 }
284 }
285 }
286
287 void
288 uvmpdpol_pagedeactivate(struct vm_page *pg)
289 {
290
291 UVM_LOCK_ASSERT_PAGEQ();
292 if (pg->pqflags & PQ_ACTIVE) {
293 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq);
294 pg->pqflags &= ~PQ_ACTIVE;
295 KASSERT(pdpol_state.s_active > 0);
296 pdpol_state.s_active--;
297 }
298 if ((pg->pqflags & PQ_INACTIVE) == 0) {
299 KASSERT(pg->wire_count == 0);
300 TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pageq);
301 pg->pqflags |= PQ_INACTIVE;
302 pdpol_state.s_inactive++;
303 }
304 }
305
306 void
307 uvmpdpol_pageactivate(struct vm_page *pg)
308 {
309
310 uvmpdpol_pagedequeue(pg);
311 TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq);
312 pg->pqflags |= PQ_ACTIVE;
313 pdpol_state.s_active++;
314 }
315
316 void
317 uvmpdpol_pagedequeue(struct vm_page *pg)
318 {
319
320 if (pg->pqflags & PQ_ACTIVE) {
321 UVM_LOCK_ASSERT_PAGEQ();
322 TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq);
323 pg->pqflags &= ~PQ_ACTIVE;
324 KASSERT(pdpol_state.s_active > 0);
325 pdpol_state.s_active--;
326 } else if (pg->pqflags & PQ_INACTIVE) {
327 UVM_LOCK_ASSERT_PAGEQ();
328 TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pageq);
329 pg->pqflags &= ~PQ_INACTIVE;
330 KASSERT(pdpol_state.s_inactive > 0);
331 pdpol_state.s_inactive--;
332 }
333 }
334
335 void
336 uvmpdpol_pageenqueue(struct vm_page *pg)
337 {
338
339 uvmpdpol_pageactivate(pg);
340 }
341
342 void
343 uvmpdpol_anfree(struct vm_anon *an)
344 {
345 }
346
347 boolean_t
348 uvmpdpol_pageisqueued_p(struct vm_page *pg)
349 {
350
351 return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
352 }
353
354 void
355 uvmpdpol_estimatepageable(int *active, int *inactive)
356 {
357
358 if (active) {
359 *active = pdpol_state.s_active;
360 }
361 if (inactive) {
362 *inactive = pdpol_state.s_inactive;
363 }
364 }
365
366 #if !defined(PDSIM)
367 static int
368 min_check(struct uvm_pctparam *pct, int t)
369 {
370 struct uvmpdpol_globalstate *s = &pdpol_state;
371 int total = t;
372
373 if (pct != &s->s_anonmin) {
374 total += uvm_pctparam_get(&s->s_anonmin);
375 }
376 if (pct != &s->s_filemin) {
377 total += uvm_pctparam_get(&s->s_filemin);
378 }
379 if (pct != &s->s_execmin) {
380 total += uvm_pctparam_get(&s->s_execmin);
381 }
382 if (total > 95) {
383 return EINVAL;
384 }
385 return 0;
386 }
387 #endif /* !defined(PDSIM) */
388
389 void
390 uvmpdpol_init(void)
391 {
392 struct uvmpdpol_globalstate *s = &pdpol_state;
393
394 TAILQ_INIT(&s->s_activeq);
395 TAILQ_INIT(&s->s_inactiveq);
396 uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
397 uvm_pctparam_init(&s->s_anonmin, 10, min_check);
398 uvm_pctparam_init(&s->s_filemin, 10, min_check);
399 uvm_pctparam_init(&s->s_execmin, 5, min_check);
400 uvm_pctparam_init(&s->s_anonmax, 80, NULL);
401 uvm_pctparam_init(&s->s_filemax, 50, NULL);
402 uvm_pctparam_init(&s->s_execmax, 30, NULL);
403 }
404
405 void
406 uvmpdpol_reinit(void)
407 {
408 }
409
410 boolean_t
411 uvmpdpol_needsscan_p(void)
412 {
413
414 return pdpol_state.s_inactive < pdpol_state.s_inactarg;
415 }
416
417 void
418 uvmpdpol_tune(void)
419 {
420
421 clock_tune();
422 }
423
424 #if !defined(PDSIM)
425
426 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
427
428 void
429 uvmpdpol_sysctlsetup(void)
430 {
431 struct uvmpdpol_globalstate *s = &pdpol_state;
432
433 uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
434 SYSCTL_DESCR("Percentage of physical memory reserved "
435 "for anonymous application data"));
436 uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
437 SYSCTL_DESCR("Percentage of physical memory reserved "
438 "for cached executable data"));
439 uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
440 SYSCTL_DESCR("Percentage of physical memory reserved "
441 "for cached file data"));
442
443 uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
444 SYSCTL_DESCR("Percentage of physical memory which will "
445 "be reclaimed from other usage for "
446 "anonymous application data"));
447 uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
448 SYSCTL_DESCR("Percentage of physical memory which will "
449 "be reclaimed from other usage for cached "
450 "file data"));
451 uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
452 SYSCTL_DESCR("Percentage of physical memory which will "
453 "be reclaimed from other usage for cached "
454 "executable data"));
455
456 uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
457 SYSCTL_DESCR("Percentage of inactive queue of "
458 "the entire (active + inactive) queue"));
459 }
460
461 #endif /* !defined(PDSIM) */
462
463 #if defined(PDSIM)
464 void
465 pdsim_dump(const char *id)
466 {
467 #if defined(DEBUG)
468 /* XXX */
469 #endif /* defined(DEBUG) */
470 }
471 #endif /* defined(PDSIM) */
Cache object: 6765ed891d291530a079bfb0b690d8df
|