FreeBSD/Linux Kernel Cross Reference
sys/vm/device_pager.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)device_pager.c 8.1 (Berkeley) 6/11/93
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/conf.h>
45 #include <sys/lock.h>
46 #include <sys/proc.h>
47 #include <sys/mutex.h>
48 #include <sys/mman.h>
49 #include <sys/rwlock.h>
50 #include <sys/sx.h>
51 #include <sys/user.h>
52 #include <sys/vmmeter.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_phys.h>
60 #include <vm/uma.h>
61
62 static void dev_pager_init(void);
63 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
64 vm_ooffset_t, struct ucred *);
65 static void dev_pager_dealloc(vm_object_t);
66 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
67 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
68 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
69 static void dev_pager_free_page(vm_object_t object, vm_page_t m);
70 static int dev_pager_populate(vm_object_t object, vm_pindex_t pidx,
71 int fault_type, vm_prot_t, vm_pindex_t *first, vm_pindex_t *last);
72
73 /* list of device pager objects */
74 static struct pagerlst dev_pager_object_list;
75 /* protect list manipulation */
76 static struct mtx dev_pager_mtx;
77
78 const struct pagerops devicepagerops = {
79 .pgo_kvme_type = KVME_TYPE_DEVICE,
80 .pgo_init = dev_pager_init,
81 .pgo_alloc = dev_pager_alloc,
82 .pgo_dealloc = dev_pager_dealloc,
83 .pgo_getpages = dev_pager_getpages,
84 .pgo_putpages = dev_pager_putpages,
85 .pgo_haspage = dev_pager_haspage,
86 };
87
88 const struct pagerops mgtdevicepagerops = {
89 .pgo_kvme_type = KVME_TYPE_MGTDEVICE,
90 .pgo_alloc = dev_pager_alloc,
91 .pgo_dealloc = dev_pager_dealloc,
92 .pgo_getpages = dev_pager_getpages,
93 .pgo_putpages = dev_pager_putpages,
94 .pgo_haspage = dev_pager_haspage,
95 .pgo_populate = dev_pager_populate,
96 };
97
98 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
99 vm_ooffset_t foff, struct ucred *cred, u_short *color);
100 static void old_dev_pager_dtor(void *handle);
101 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
102 int prot, vm_page_t *mres);
103
104 static const struct cdev_pager_ops old_dev_pager_ops = {
105 .cdev_pg_ctor = old_dev_pager_ctor,
106 .cdev_pg_dtor = old_dev_pager_dtor,
107 .cdev_pg_fault = old_dev_pager_fault
108 };
109
110 static void
111 dev_pager_init(void)
112 {
113
114 TAILQ_INIT(&dev_pager_object_list);
115 mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
116 }
117
118 vm_object_t
119 cdev_pager_lookup(void *handle)
120 {
121 vm_object_t object;
122
123 mtx_lock(&dev_pager_mtx);
124 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
125 mtx_unlock(&dev_pager_mtx);
126 return (object);
127 }
128
129 vm_object_t
130 cdev_pager_allocate(void *handle, enum obj_type tp,
131 const struct cdev_pager_ops *ops, vm_ooffset_t size, vm_prot_t prot,
132 vm_ooffset_t foff, struct ucred *cred)
133 {
134 vm_object_t object, object1;
135 vm_pindex_t pindex;
136 u_short color;
137
138 if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
139 return (NULL);
140 KASSERT(tp == OBJT_MGTDEVICE || ops->cdev_pg_populate == NULL,
141 ("populate on unmanaged device pager"));
142
143 /*
144 * Offset should be page aligned.
145 */
146 if (foff & PAGE_MASK)
147 return (NULL);
148
149 /*
150 * Treat the mmap(2) file offset as an unsigned value for a
151 * device mapping. This, in effect, allows a user to pass all
152 * possible off_t values as the mapping cookie to the driver. At
153 * this point, we know that both foff and size are a multiple
154 * of the page size. Do a check to avoid wrap.
155 */
156 size = round_page(size);
157 pindex = OFF_TO_IDX(foff) + OFF_TO_IDX(size);
158 if (pindex > OBJ_MAX_SIZE || pindex < OFF_TO_IDX(foff) ||
159 pindex < OFF_TO_IDX(size))
160 return (NULL);
161
162 if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
163 return (NULL);
164 mtx_lock(&dev_pager_mtx);
165
166 /*
167 * Look up pager, creating as necessary.
168 */
169 object1 = NULL;
170 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
171 if (object == NULL) {
172 /*
173 * Allocate object and associate it with the pager. Initialize
174 * the object's pg_color based upon the physical address of the
175 * device's memory.
176 */
177 mtx_unlock(&dev_pager_mtx);
178 object1 = vm_object_allocate(tp, pindex);
179 object1->flags |= OBJ_COLORED;
180 object1->pg_color = color;
181 object1->handle = handle;
182 object1->un_pager.devp.ops = ops;
183 object1->un_pager.devp.dev = handle;
184 TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
185 mtx_lock(&dev_pager_mtx);
186 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
187 if (object != NULL) {
188 /*
189 * We raced with other thread while allocating object.
190 */
191 if (pindex > object->size)
192 object->size = pindex;
193 KASSERT(object->type == tp,
194 ("Inconsistent device pager type %p %d",
195 object, tp));
196 KASSERT(object->un_pager.devp.ops == ops,
197 ("Inconsistent devops %p %p", object, ops));
198 } else {
199 object = object1;
200 object1 = NULL;
201 object->handle = handle;
202 TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
203 pager_object_list);
204 if (ops->cdev_pg_populate != NULL)
205 vm_object_set_flag(object, OBJ_POPULATE);
206 }
207 } else {
208 if (pindex > object->size)
209 object->size = pindex;
210 KASSERT(object->type == tp,
211 ("Inconsistent device pager type %p %d", object, tp));
212 }
213 mtx_unlock(&dev_pager_mtx);
214 if (object1 != NULL) {
215 object1->handle = object1;
216 mtx_lock(&dev_pager_mtx);
217 TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
218 pager_object_list);
219 mtx_unlock(&dev_pager_mtx);
220 vm_object_deallocate(object1);
221 }
222 return (object);
223 }
224
225 static vm_object_t
226 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
227 vm_ooffset_t foff, struct ucred *cred)
228 {
229
230 return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
231 size, prot, foff, cred));
232 }
233
234 void
235 cdev_pager_free_page(vm_object_t object, vm_page_t m)
236 {
237
238 VM_OBJECT_ASSERT_WLOCKED(object);
239 if (object->type == OBJT_MGTDEVICE) {
240 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
241 pmap_remove_all(m);
242 (void)vm_page_remove(m);
243 } else if (object->type == OBJT_DEVICE)
244 dev_pager_free_page(object, m);
245 }
246
247 static void
248 dev_pager_free_page(vm_object_t object, vm_page_t m)
249 {
250
251 VM_OBJECT_ASSERT_WLOCKED(object);
252 KASSERT((object->type == OBJT_DEVICE &&
253 (m->oflags & VPO_UNMANAGED) != 0),
254 ("Managed device or page obj %p m %p", object, m));
255 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, plinks.q);
256 vm_page_putfake(m);
257 }
258
259 static void
260 dev_pager_dealloc(vm_object_t object)
261 {
262 vm_page_t m;
263
264 VM_OBJECT_WUNLOCK(object);
265 object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
266
267 mtx_lock(&dev_pager_mtx);
268 TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
269 mtx_unlock(&dev_pager_mtx);
270 VM_OBJECT_WLOCK(object);
271
272 if (object->type == OBJT_DEVICE) {
273 /*
274 * Free up our fake pages.
275 */
276 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist))
277 != NULL) {
278 if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
279 continue;
280
281 dev_pager_free_page(object, m);
282 }
283 }
284 object->handle = NULL;
285 object->type = OBJT_DEAD;
286 }
287
288 static int
289 dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
290 int *rahead)
291 {
292 int error;
293
294 /* Since our haspage reports zero after/before, the count is 1. */
295 KASSERT(count == 1, ("%s: count %d", __func__, count));
296 if (object->un_pager.devp.ops->cdev_pg_fault == NULL)
297 return (VM_PAGER_FAIL);
298 VM_OBJECT_WLOCK(object);
299 error = object->un_pager.devp.ops->cdev_pg_fault(object,
300 IDX_TO_OFF(ma[0]->pindex), PROT_READ, &ma[0]);
301
302 VM_OBJECT_ASSERT_WLOCKED(object);
303
304 if (error == VM_PAGER_OK) {
305 KASSERT((object->type == OBJT_DEVICE &&
306 (ma[0]->oflags & VPO_UNMANAGED) != 0) ||
307 (object->type == OBJT_MGTDEVICE &&
308 (ma[0]->oflags & VPO_UNMANAGED) == 0),
309 ("Wrong page type %p %p", ma[0], object));
310 if (object->type == OBJT_DEVICE) {
311 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
312 ma[0], plinks.q);
313 }
314 if (rbehind)
315 *rbehind = 0;
316 if (rahead)
317 *rahead = 0;
318 }
319 VM_OBJECT_WUNLOCK(object);
320
321 return (error);
322 }
323
324 static int
325 dev_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
326 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
327 {
328
329 VM_OBJECT_ASSERT_WLOCKED(object);
330 if (object->un_pager.devp.ops->cdev_pg_populate == NULL)
331 return (VM_PAGER_FAIL);
332 return (object->un_pager.devp.ops->cdev_pg_populate(object, pidx,
333 fault_type, max_prot, first, last));
334 }
335
336 static int
337 old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
338 vm_page_t *mres)
339 {
340 vm_paddr_t paddr;
341 vm_page_t m_paddr, page;
342 struct cdev *dev;
343 struct cdevsw *csw;
344 struct file *fpop;
345 struct thread *td;
346 vm_memattr_t memattr, memattr1;
347 int ref, ret;
348
349 memattr = object->memattr;
350
351 VM_OBJECT_WUNLOCK(object);
352
353 dev = object->handle;
354 csw = dev_refthread(dev, &ref);
355 if (csw == NULL) {
356 VM_OBJECT_WLOCK(object);
357 return (VM_PAGER_FAIL);
358 }
359 td = curthread;
360 fpop = td->td_fpop;
361 td->td_fpop = NULL;
362 ret = csw->d_mmap(dev, offset, &paddr, prot, &memattr);
363 td->td_fpop = fpop;
364 dev_relthread(dev, ref);
365 if (ret != 0) {
366 printf(
367 "WARNING: dev_pager_getpage: map function returns error %d", ret);
368 VM_OBJECT_WLOCK(object);
369 return (VM_PAGER_FAIL);
370 }
371
372 /* If "paddr" is a real page, perform a sanity check on "memattr". */
373 if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
374 (memattr1 = pmap_page_get_memattr(m_paddr)) != memattr) {
375 /*
376 * For the /dev/mem d_mmap routine to return the
377 * correct memattr, pmap_page_get_memattr() needs to
378 * be called, which we do there.
379 */
380 if ((csw->d_flags & D_MEM) == 0) {
381 printf("WARNING: Device driver %s has set "
382 "\"memattr\" inconsistently (drv %u pmap %u).\n",
383 csw->d_name, memattr, memattr1);
384 }
385 memattr = memattr1;
386 }
387 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
388 /*
389 * If the passed in result page is a fake page, update it with
390 * the new physical address.
391 */
392 page = *mres;
393 VM_OBJECT_WLOCK(object);
394 vm_page_updatefake(page, paddr, memattr);
395 } else {
396 /*
397 * Replace the passed in reqpage page with our own fake page and
398 * free up the all of the original pages.
399 */
400 page = vm_page_getfake(paddr, memattr);
401 VM_OBJECT_WLOCK(object);
402 vm_page_replace(page, object, (*mres)->pindex, *mres);
403 *mres = page;
404 }
405 vm_page_valid(page);
406 return (VM_PAGER_OK);
407 }
408
409 static void
410 dev_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
411 int *rtvals)
412 {
413
414 panic("dev_pager_putpage called");
415 }
416
417 static boolean_t
418 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
419 int *after)
420 {
421
422 if (before != NULL)
423 *before = 0;
424 if (after != NULL)
425 *after = 0;
426 return (TRUE);
427 }
428
429 static int
430 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
431 vm_ooffset_t foff, struct ucred *cred, u_short *color)
432 {
433 struct cdev *dev;
434 struct cdevsw *csw;
435 vm_memattr_t dummy;
436 vm_ooffset_t off;
437 vm_paddr_t paddr;
438 unsigned int npages;
439 int ref;
440
441 /*
442 * Make sure this device can be mapped.
443 */
444 dev = handle;
445 csw = dev_refthread(dev, &ref);
446 if (csw == NULL)
447 return (ENXIO);
448
449 /*
450 * Check that the specified range of the device allows the desired
451 * protection.
452 *
453 * XXX assumes VM_PROT_* == PROT_*
454 */
455 npages = OFF_TO_IDX(size);
456 paddr = 0; /* Make paddr initialized for the case of size == 0. */
457 for (off = foff; npages--; off += PAGE_SIZE) {
458 if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
459 dev_relthread(dev, ref);
460 return (EINVAL);
461 }
462 }
463
464 dev_ref(dev);
465 dev_relthread(dev, ref);
466 *color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
467 return (0);
468 }
469
470 static void
471 old_dev_pager_dtor(void *handle)
472 {
473
474 dev_rel(handle);
475 }
Cache object: d6c595a1c1c821731a33612fcdfd0280
|