FreeBSD/Linux Kernel Cross Reference
sys/vm/phys_pager.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2000 Peter Wemm
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37 #include <sys/mutex.h>
38 #include <sys/mman.h>
39 #include <sys/rwlock.h>
40 #include <sys/sysctl.h>
41
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_pager.h>
48
49 /* list of phys pager objects */
50 static struct pagerlst phys_pager_object_list;
51 /* protect access to phys_pager_object_list */
52 static struct mtx phys_pager_mtx;
53
54 static int default_phys_pager_getpages(vm_object_t object, vm_page_t *m,
55 int count, int *rbehind, int *rahead);
56 static int default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
57 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last);
58 static boolean_t default_phys_pager_haspage(vm_object_t object,
59 vm_pindex_t pindex, int *before, int *after);
60 struct phys_pager_ops default_phys_pg_ops = {
61 .phys_pg_getpages = default_phys_pager_getpages,
62 .phys_pg_populate = default_phys_pager_populate,
63 .phys_pg_haspage = default_phys_pager_haspage,
64 .phys_pg_ctor = NULL,
65 .phys_pg_dtor = NULL,
66 };
67
68 static void
69 phys_pager_init(void)
70 {
71
72 TAILQ_INIT(&phys_pager_object_list);
73 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
74 }
75
76 vm_object_t
77 phys_pager_allocate(void *handle, struct phys_pager_ops *ops, void *data,
78 vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
79 {
80 vm_object_t object, object1;
81 vm_pindex_t pindex;
82 bool init;
83
84 /*
85 * Offset should be page aligned.
86 */
87 if (foff & PAGE_MASK)
88 return (NULL);
89
90 pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
91 init = true;
92
93 if (handle != NULL) {
94 mtx_lock(&phys_pager_mtx);
95 /*
96 * Look up pager, creating as necessary.
97 */
98 object1 = NULL;
99 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
100 if (object == NULL) {
101 /*
102 * Allocate object and associate it with the pager.
103 */
104 mtx_unlock(&phys_pager_mtx);
105 object1 = vm_object_allocate(OBJT_PHYS, pindex);
106 mtx_lock(&phys_pager_mtx);
107 object = vm_pager_object_lookup(&phys_pager_object_list,
108 handle);
109 if (object != NULL) {
110 /*
111 * We raced with other thread while
112 * allocating object.
113 */
114 if (pindex > object->size)
115 object->size = pindex;
116 init = false;
117 } else {
118 object = object1;
119 object1 = NULL;
120 object->handle = handle;
121 object->un_pager.phys.ops = ops;
122 object->un_pager.phys.data_ptr = data;
123 if (ops->phys_pg_populate != NULL)
124 vm_object_set_flag(object, OBJ_POPULATE);
125 TAILQ_INSERT_TAIL(&phys_pager_object_list,
126 object, pager_object_list);
127 }
128 } else {
129 if (pindex > object->size)
130 object->size = pindex;
131 }
132 mtx_unlock(&phys_pager_mtx);
133 vm_object_deallocate(object1);
134 } else {
135 object = vm_object_allocate(OBJT_PHYS, pindex);
136 object->un_pager.phys.ops = ops;
137 object->un_pager.phys.data_ptr = data;
138 if (ops->phys_pg_populate != NULL)
139 vm_object_set_flag(object, OBJ_POPULATE);
140 }
141 if (init && ops->phys_pg_ctor != NULL)
142 ops->phys_pg_ctor(object, prot, foff, cred);
143
144 return (object);
145 }
146
147 static vm_object_t
148 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
149 vm_ooffset_t foff, struct ucred *ucred)
150 {
151 return (phys_pager_allocate(handle, &default_phys_pg_ops, NULL,
152 size, prot, foff, ucred));
153 }
154
155 static void
156 phys_pager_dealloc(vm_object_t object)
157 {
158
159 if (object->handle != NULL) {
160 VM_OBJECT_WUNLOCK(object);
161 mtx_lock(&phys_pager_mtx);
162 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
163 mtx_unlock(&phys_pager_mtx);
164 VM_OBJECT_WLOCK(object);
165 }
166 object->type = OBJT_DEAD;
167 if (object->un_pager.phys.ops->phys_pg_dtor != NULL)
168 object->un_pager.phys.ops->phys_pg_dtor(object);
169 object->handle = NULL;
170 }
171
172 /*
173 * Fill as many pages as vm_fault has allocated for us.
174 */
175 static int
176 default_phys_pager_getpages(vm_object_t object, vm_page_t *m, int count,
177 int *rbehind, int *rahead)
178 {
179 int i;
180
181 for (i = 0; i < count; i++) {
182 if (vm_page_none_valid(m[i])) {
183 if ((m[i]->flags & PG_ZERO) == 0)
184 pmap_zero_page(m[i]);
185 vm_page_valid(m[i]);
186 }
187 KASSERT(vm_page_all_valid(m[i]),
188 ("phys_pager_getpages: partially valid page %p", m[i]));
189 KASSERT(m[i]->dirty == 0,
190 ("phys_pager_getpages: dirty page %p", m[i]));
191 }
192 if (rbehind)
193 *rbehind = 0;
194 if (rahead)
195 *rahead = 0;
196 return (VM_PAGER_OK);
197 }
198
199 static int
200 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
201 int *rahead)
202 {
203 return (object->un_pager.phys.ops->phys_pg_getpages(object, m,
204 count, rbehind, rahead));
205 }
206
207 /*
208 * Implement a pretty aggressive clustered getpages strategy. Hint that
209 * everything in an entire 4MB window should be prefaulted at once.
210 *
211 * 4MB (1024 slots per page table page) is convenient for x86,
212 * but may not be for other arches.
213 */
214 #ifndef PHYSCLUSTER
215 #define PHYSCLUSTER 1024
216 #endif
217 static int phys_pager_cluster = PHYSCLUSTER;
218 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
219 &phys_pager_cluster, 0,
220 "prefault window size for phys pager");
221
222 /*
223 * Max hint to vm_page_alloc() about the further allocation needs
224 * inside the phys_pager_populate() loop. The number of bits used to
225 * implement VM_ALLOC_COUNT() determines the hard limit on this value.
226 * That limit is currently 65535.
227 */
228 #define PHYSALLOC 16
229
230 static int
231 default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
232 int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
233 vm_pindex_t *last)
234 {
235 vm_page_t m;
236 vm_pindex_t base, end, i;
237 int ahead;
238
239 base = rounddown(pidx, phys_pager_cluster);
240 end = base + phys_pager_cluster - 1;
241 if (end >= object->size)
242 end = object->size - 1;
243 if (*first > base)
244 base = *first;
245 if (end > *last)
246 end = *last;
247 *first = base;
248 *last = end;
249
250 for (i = base; i <= end; i++) {
251 ahead = MIN(end - i, PHYSALLOC);
252 m = vm_page_grab(object, i,
253 VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead));
254 if (!vm_page_all_valid(m))
255 vm_page_zero_invalid(m, TRUE);
256 KASSERT(m->dirty == 0,
257 ("phys_pager_populate: dirty page %p", m));
258 }
259 return (VM_PAGER_OK);
260 }
261
262 static int
263 phys_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
264 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
265 {
266 return (object->un_pager.phys.ops->phys_pg_populate(object, pidx,
267 fault_type, max_prot, first, last));
268 }
269
270 static void
271 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
272 int *rtvals)
273 {
274
275 panic("phys_pager_putpage called");
276 }
277
278 static boolean_t
279 default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
280 int *after)
281 {
282 vm_pindex_t base, end;
283
284 base = rounddown(pindex, phys_pager_cluster);
285 end = base + phys_pager_cluster - 1;
286 if (before != NULL)
287 *before = pindex - base;
288 if (after != NULL)
289 *after = end - pindex;
290 return (TRUE);
291 }
292
293 static boolean_t
294 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
295 int *after)
296 {
297 return (object->un_pager.phys.ops->phys_pg_haspage(object, pindex,
298 before, after));
299 }
300
301 struct pagerops physpagerops = {
302 .pgo_init = phys_pager_init,
303 .pgo_alloc = phys_pager_alloc,
304 .pgo_dealloc = phys_pager_dealloc,
305 .pgo_getpages = phys_pager_getpages,
306 .pgo_putpages = phys_pager_putpages,
307 .pgo_haspage = phys_pager_haspage,
308 .pgo_populate = phys_pager_populate,
309 };
Cache object: b4c9a50045301cf5f172c97f944faad0
|