FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pageq.c
1 /*-
2 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 * 1. Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * 2. Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * 4. Neither the name of the University nor the names of its contributors
12 * may be used to endorse or promote products derived from this software
13 * without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/6.0/sys/vm/vm_pageq.c 147217 2005-06-10 03:33:36Z alc $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/vmmeter.h>
38 #include <sys/vnode.h>
39
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_object.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46 #include <vm/vm_pager.h>
47 #include <vm/vm_extern.h>
48
49 struct vpgqueues vm_page_queues[PQ_COUNT];
50
51 void
52 vm_pageq_init(void)
53 {
54 int i;
55
56 for (i = 0; i < PQ_L2_SIZE; i++) {
57 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
58 }
59 for (i = 0; i < PQ_L2_SIZE; i++) {
60 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
61 }
62 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
63 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
64 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
65
66 for (i = 0; i < PQ_COUNT; i++) {
67 TAILQ_INIT(&vm_page_queues[i].pl);
68 }
69 }
70
71 void
72 vm_pageq_requeue(vm_page_t m)
73 {
74 int queue = m->queue;
75 struct vpgqueues *vpq;
76
77 if (queue != PQ_NONE) {
78 vpq = &vm_page_queues[queue];
79 TAILQ_REMOVE(&vpq->pl, m, pageq);
80 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
81 }
82 }
83
84 /*
85 * vm_pageq_enqueue:
86 */
87 void
88 vm_pageq_enqueue(int queue, vm_page_t m)
89 {
90 struct vpgqueues *vpq;
91
92 vpq = &vm_page_queues[queue];
93 m->queue = queue;
94 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
95 ++*vpq->cnt;
96 ++vpq->lcnt;
97 }
98
99 /*
100 * vm_add_new_page:
101 *
102 * Add a new page to the freelist for use by the system.
103 */
104 vm_page_t
105 vm_pageq_add_new_page(vm_paddr_t pa)
106 {
107 vm_paddr_t bad;
108 vm_page_t m;
109 char *cp, *list, *pos;
110
111 GIANT_REQUIRED;
112
113 /*
114 * See if a physical address in this page has been listed
115 * in the blacklist tunable. Entries in the tunable are
116 * separated by spaces or commas. If an invalid integer is
117 * encountered then the rest of the string is skipped.
118 */
119 if (testenv("vm.blacklist")) {
120 list = getenv("vm.blacklist");
121 for (pos = list; *pos != '\0'; pos = cp) {
122 bad = strtoq(pos, &cp, 0);
123 if (*cp != '\0') {
124 if (*cp == ' ' || *cp == ',') {
125 cp++;
126 if (cp == pos)
127 continue;
128 } else
129 break;
130 }
131 if (pa == trunc_page(bad)) {
132 printf("Skipping page with pa 0x%jx\n",
133 (uintmax_t)pa);
134 freeenv(list);
135 return (NULL);
136 }
137 }
138 freeenv(list);
139 }
140
141 ++cnt.v_page_count;
142 m = PHYS_TO_VM_PAGE(pa);
143 m->phys_addr = pa;
144 m->flags = 0;
145 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
146 pmap_page_init(m);
147 vm_pageq_enqueue(m->pc + PQ_FREE, m);
148 return (m);
149 }
150
151 /*
152 * vm_pageq_remove_nowakeup:
153 *
154 * vm_page_unqueue() without any wakeup
155 *
156 * The queue containing the given page must be locked.
157 * This routine may not block.
158 */
159 void
160 vm_pageq_remove_nowakeup(vm_page_t m)
161 {
162 int queue = m->queue;
163 struct vpgqueues *pq;
164 if (queue != PQ_NONE) {
165 pq = &vm_page_queues[queue];
166 m->queue = PQ_NONE;
167 TAILQ_REMOVE(&pq->pl, m, pageq);
168 (*pq->cnt)--;
169 pq->lcnt--;
170 }
171 }
172
173 /*
174 * vm_pageq_remove:
175 *
176 * Remove a page from its queue.
177 *
178 * The queue containing the given page must be locked.
179 * This routine may not block.
180 */
181 void
182 vm_pageq_remove(vm_page_t m)
183 {
184 int queue = m->queue;
185 struct vpgqueues *pq;
186
187 if (queue != PQ_NONE) {
188 m->queue = PQ_NONE;
189 pq = &vm_page_queues[queue];
190 TAILQ_REMOVE(&pq->pl, m, pageq);
191 (*pq->cnt)--;
192 pq->lcnt--;
193 if ((queue - m->pc) == PQ_CACHE) {
194 if (vm_paging_needed())
195 pagedaemon_wakeup();
196 }
197 }
198 }
199
200 #if PQ_L2_SIZE > 1
201
202 /*
203 * vm_pageq_find:
204 *
205 * Find a page on the specified queue with color optimization.
206 *
207 * The page coloring optimization attempts to locate a page
208 * that does not overload other nearby pages in the object in
209 * the cpu's L2 cache. We need this optimization because cpu
210 * caches tend to be physical caches, while object spaces tend
211 * to be virtual.
212 *
213 * The specified queue must be locked.
214 * This routine may not block.
215 *
216 * This routine may only be called from the vm_pageq_find()
217 * function in this file.
218 */
219 static __inline vm_page_t
220 _vm_pageq_find(int basequeue, int index)
221 {
222 int i;
223 vm_page_t m = NULL;
224 struct vpgqueues *pq;
225
226 pq = &vm_page_queues[basequeue];
227
228 /*
229 * Note that for the first loop, index+i and index-i wind up at the
230 * same place. Even though this is not totally optimal, we've already
231 * blown it by missing the cache case so we do not care.
232 */
233 for (i = PQ_L2_SIZE / 2; i > 0; --i) {
234 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
235 break;
236
237 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
238 break;
239 }
240 return (m);
241 }
242 #endif /* PQ_L2_SIZE > 1 */
243
244 vm_page_t
245 vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
246 {
247 vm_page_t m;
248
249 #if PQ_L2_SIZE > 1
250 if (prefer_zero) {
251 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
252 } else {
253 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
254 }
255 if (m == NULL) {
256 m = _vm_pageq_find(basequeue, index);
257 }
258 #else
259 if (prefer_zero) {
260 m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
261 } else {
262 m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
263 }
264 #endif
265 return (m);
266 }
267
Cache object: 0821a530585e207c009f9d96fbb45eb8
|