FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pageq.c
1 /*
2 * (c)Copyright 1998, Matthew Dillon. Terms for use and redistribution
3 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT.
4 *
5 * $FreeBSD: releng/5.0/sys/vm/vm_pageq.c 100396 2002-07-20 05:06:20Z alc $
6 */
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/lock.h>
11 #include <sys/malloc.h>
12 #include <sys/mutex.h>
13 #include <sys/proc.h>
14 #include <sys/vmmeter.h>
15 #include <sys/vnode.h>
16
17 #include <vm/vm.h>
18 #include <vm/vm_param.h>
19 #include <vm/vm_kern.h>
20 #include <vm/vm_object.h>
21 #include <vm/vm_page.h>
22 #include <vm/vm_pageout.h>
23 #include <vm/vm_pager.h>
24 #include <vm/vm_extern.h>
25
26 struct vpgqueues vm_page_queues[PQ_COUNT];
27
28 void
29 vm_pageq_init(void)
30 {
31 int i;
32
33 for (i = 0; i < PQ_L2_SIZE; i++) {
34 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
35 }
36 for (i = 0; i < PQ_L2_SIZE; i++) {
37 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
38 }
39 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
40 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
41 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
42
43 for (i = 0; i < PQ_COUNT; i++) {
44 TAILQ_INIT(&vm_page_queues[i].pl);
45 }
46 }
47
48 static __inline struct vpgqueues *
49 vm_pageq_aquire(int queue)
50 {
51 struct vpgqueues *vpq = NULL;
52
53 if (queue != PQ_NONE) {
54 vpq = &vm_page_queues[queue];
55 }
56 return (vpq);
57 }
58
59 void
60 vm_pageq_requeue(vm_page_t m)
61 {
62 int queue = m->queue;
63 struct vpgqueues *vpq;
64
65 vpq = vm_pageq_aquire(queue);
66 TAILQ_REMOVE(&vpq->pl, m, pageq);
67 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
68 }
69
70 /*
71 * vm_pageq_enqueue:
72 *
73 */
74 void
75 vm_pageq_enqueue(int queue, vm_page_t m)
76 {
77 struct vpgqueues *vpq;
78
79 vpq = &vm_page_queues[queue];
80 m->queue = queue;
81 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
82 ++*vpq->cnt;
83 ++vpq->lcnt;
84 }
85
86 /*
87 * vm_add_new_page:
88 *
89 * Add a new page to the freelist for use by the system.
90 * Must be called at splhigh().
91 */
92 vm_page_t
93 vm_pageq_add_new_page(vm_offset_t pa)
94 {
95 vm_page_t m;
96
97 GIANT_REQUIRED;
98
99 ++cnt.v_page_count;
100 m = PHYS_TO_VM_PAGE(pa);
101 m->phys_addr = pa;
102 m->flags = 0;
103 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
104 vm_pageq_enqueue(m->pc + PQ_FREE, m);
105 return (m);
106 }
107
108 /*
109 * vm_pageq_remove_nowakeup:
110 *
111 * vm_page_unqueue() without any wakeup
112 *
113 * This routine must be called at splhigh().
114 * This routine may not block.
115 */
116 void
117 vm_pageq_remove_nowakeup(vm_page_t m)
118 {
119 int queue = m->queue;
120 struct vpgqueues *pq;
121 if (queue != PQ_NONE) {
122 pq = &vm_page_queues[queue];
123 m->queue = PQ_NONE;
124 TAILQ_REMOVE(&pq->pl, m, pageq);
125 (*pq->cnt)--;
126 pq->lcnt--;
127 }
128 }
129
130 /*
131 * vm_pageq_remove:
132 *
133 * Remove a page from its queue.
134 *
135 * This routine must be called at splhigh().
136 * This routine may not block.
137 */
138 void
139 vm_pageq_remove(vm_page_t m)
140 {
141 int queue = m->queue;
142 struct vpgqueues *pq;
143
144 GIANT_REQUIRED;
145 if (queue != PQ_NONE) {
146 m->queue = PQ_NONE;
147 pq = &vm_page_queues[queue];
148 TAILQ_REMOVE(&pq->pl, m, pageq);
149 (*pq->cnt)--;
150 pq->lcnt--;
151 if ((queue - m->pc) == PQ_CACHE) {
152 if (vm_paging_needed())
153 pagedaemon_wakeup();
154 }
155 }
156 }
157
158 #if PQ_L2_SIZE > 1
159
160 /*
161 * vm_pageq_find:
162 *
163 * Find a page on the specified queue with color optimization.
164 *
165 * The page coloring optimization attempts to locate a page
166 * that does not overload other nearby pages in the object in
167 * the cpu's L1 or L2 caches. We need this optimization because
168 * cpu caches tend to be physical caches, while object spaces tend
169 * to be virtual.
170 *
171 * This routine must be called at splvm().
172 * This routine may not block.
173 *
174 * This routine may only be called from the vm_page_list_find() macro
175 * in vm_page.h
176 */
177 static __inline vm_page_t
178 _vm_pageq_find(int basequeue, int index)
179 {
180 int i;
181 vm_page_t m = NULL;
182 struct vpgqueues *pq;
183
184 pq = &vm_page_queues[basequeue];
185
186 /*
187 * Note that for the first loop, index+i and index-i wind up at the
188 * same place. Even though this is not totally optimal, we've already
189 * blown it by missing the cache case so we do not care.
190 */
191 for (i = PQ_L2_SIZE / 2; i > 0; --i) {
192 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
193 break;
194
195 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
196 break;
197 }
198 return (m);
199 }
200 #endif /* PQ_L2_SIZE > 1 */
201
202 vm_page_t
203 vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
204 {
205 vm_page_t m;
206
207 #if PQ_L2_SIZE > 1
208 if (prefer_zero) {
209 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
210 } else {
211 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
212 }
213 if (m == NULL) {
214 m = _vm_pageq_find(basequeue, index);
215 }
216 #else
217 if (prefer_zero) {
218 m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
219 } else {
220 m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
221 }
222 #endif
223 return (m);
224 }
225
Cache object: 529d0dfea602b5f24b254ce83482767f
|