FreeBSD/Linux Kernel Cross Reference
sys/vm/device_pager.c
1 /*-
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)device_pager.c 8.1 (Berkeley) 6/11/93
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.0/sys/vm/device_pager.c 254182 2013-08-10 17:36:42Z kib $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/rwlock.h>
48 #include <sys/sx.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_phys.h>
56 #include <vm/uma.h>
57
58 static void dev_pager_init(void);
59 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
60 vm_ooffset_t, struct ucred *);
61 static void dev_pager_dealloc(vm_object_t);
62 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
63 static void dev_pager_putpages(vm_object_t, vm_page_t *, int,
64 boolean_t, int *);
65 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
66 int *);
67 static void dev_pager_free_page(vm_object_t object, vm_page_t m);
68
69 /* list of device pager objects */
70 static struct pagerlst dev_pager_object_list;
71 /* protect list manipulation */
72 static struct mtx dev_pager_mtx;
73
74 struct pagerops devicepagerops = {
75 .pgo_init = dev_pager_init,
76 .pgo_alloc = dev_pager_alloc,
77 .pgo_dealloc = dev_pager_dealloc,
78 .pgo_getpages = dev_pager_getpages,
79 .pgo_putpages = dev_pager_putpages,
80 .pgo_haspage = dev_pager_haspage,
81 };
82
83 struct pagerops mgtdevicepagerops = {
84 .pgo_alloc = dev_pager_alloc,
85 .pgo_dealloc = dev_pager_dealloc,
86 .pgo_getpages = dev_pager_getpages,
87 .pgo_putpages = dev_pager_putpages,
88 .pgo_haspage = dev_pager_haspage,
89 };
90
91 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
92 vm_ooffset_t foff, struct ucred *cred, u_short *color);
93 static void old_dev_pager_dtor(void *handle);
94 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
95 int prot, vm_page_t *mres);
96
97 static struct cdev_pager_ops old_dev_pager_ops = {
98 .cdev_pg_ctor = old_dev_pager_ctor,
99 .cdev_pg_dtor = old_dev_pager_dtor,
100 .cdev_pg_fault = old_dev_pager_fault
101 };
102
103 static void
104 dev_pager_init()
105 {
106 TAILQ_INIT(&dev_pager_object_list);
107 mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
108 }
109
110 vm_object_t
111 cdev_pager_lookup(void *handle)
112 {
113 vm_object_t object;
114
115 mtx_lock(&dev_pager_mtx);
116 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
117 mtx_unlock(&dev_pager_mtx);
118 return (object);
119 }
120
121 vm_object_t
122 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
123 vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
124 {
125 vm_object_t object, object1;
126 vm_pindex_t pindex;
127 u_short color;
128
129 if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
130 return (NULL);
131
132 /*
133 * Offset should be page aligned.
134 */
135 if (foff & PAGE_MASK)
136 return (NULL);
137
138 size = round_page(size);
139 pindex = OFF_TO_IDX(foff + size);
140
141 if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
142 return (NULL);
143 mtx_lock(&dev_pager_mtx);
144
145 /*
146 * Look up pager, creating as necessary.
147 */
148 object1 = NULL;
149 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
150 if (object == NULL) {
151 /*
152 * Allocate object and associate it with the pager. Initialize
153 * the object's pg_color based upon the physical address of the
154 * device's memory.
155 */
156 mtx_unlock(&dev_pager_mtx);
157 object1 = vm_object_allocate(tp, pindex);
158 object1->flags |= OBJ_COLORED;
159 object1->pg_color = color;
160 object1->handle = handle;
161 object1->un_pager.devp.ops = ops;
162 object1->un_pager.devp.dev = handle;
163 TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
164 mtx_lock(&dev_pager_mtx);
165 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
166 if (object != NULL) {
167 /*
168 * We raced with other thread while allocating object.
169 */
170 if (pindex > object->size)
171 object->size = pindex;
172 } else {
173 object = object1;
174 object1 = NULL;
175 object->handle = handle;
176 TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
177 pager_object_list);
178 KASSERT(object->type == tp,
179 ("Inconsistent device pager type %p %d", object, tp));
180 }
181 } else {
182 if (pindex > object->size)
183 object->size = pindex;
184 }
185 mtx_unlock(&dev_pager_mtx);
186 if (object1 != NULL) {
187 object1->handle = object1;
188 mtx_lock(&dev_pager_mtx);
189 TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
190 pager_object_list);
191 mtx_unlock(&dev_pager_mtx);
192 vm_object_deallocate(object1);
193 }
194 return (object);
195 }
196
197 static vm_object_t
198 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
199 vm_ooffset_t foff, struct ucred *cred)
200 {
201
202 return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
203 size, prot, foff, cred));
204 }
205
206 void
207 cdev_pager_free_page(vm_object_t object, vm_page_t m)
208 {
209
210 VM_OBJECT_ASSERT_WLOCKED(object);
211 if (object->type == OBJT_MGTDEVICE) {
212 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
213 pmap_remove_all(m);
214 vm_page_lock(m);
215 vm_page_remove(m);
216 vm_page_unlock(m);
217 } else if (object->type == OBJT_DEVICE)
218 dev_pager_free_page(object, m);
219 }
220
221 static void
222 dev_pager_free_page(vm_object_t object, vm_page_t m)
223 {
224
225 VM_OBJECT_ASSERT_WLOCKED(object);
226 KASSERT((object->type == OBJT_DEVICE &&
227 (m->oflags & VPO_UNMANAGED) != 0),
228 ("Managed device or page obj %p m %p", object, m));
229 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, plinks.q);
230 vm_page_putfake(m);
231 }
232
233 static void
234 dev_pager_dealloc(object)
235 vm_object_t object;
236 {
237 vm_page_t m;
238
239 VM_OBJECT_WUNLOCK(object);
240 object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
241
242 mtx_lock(&dev_pager_mtx);
243 TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
244 mtx_unlock(&dev_pager_mtx);
245 VM_OBJECT_WLOCK(object);
246
247 if (object->type == OBJT_DEVICE) {
248 /*
249 * Free up our fake pages.
250 */
251 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist))
252 != NULL)
253 dev_pager_free_page(object, m);
254 }
255 }
256
257 static int
258 dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
259 {
260 int error, i;
261
262 VM_OBJECT_ASSERT_WLOCKED(object);
263 error = object->un_pager.devp.ops->cdev_pg_fault(object,
264 IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
265
266 VM_OBJECT_ASSERT_WLOCKED(object);
267
268 for (i = 0; i < count; i++) {
269 if (i != reqpage) {
270 vm_page_lock(ma[i]);
271 vm_page_free(ma[i]);
272 vm_page_unlock(ma[i]);
273 }
274 }
275
276 if (error == VM_PAGER_OK) {
277 KASSERT((object->type == OBJT_DEVICE &&
278 (ma[reqpage]->oflags & VPO_UNMANAGED) != 0) ||
279 (object->type == OBJT_MGTDEVICE &&
280 (ma[reqpage]->oflags & VPO_UNMANAGED) == 0),
281 ("Wrong page type %p %p", ma[reqpage], object));
282 if (object->type == OBJT_DEVICE) {
283 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
284 ma[reqpage], plinks.q);
285 }
286 }
287
288 return (error);
289 }
290
291 static int
292 old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
293 vm_page_t *mres)
294 {
295 vm_pindex_t pidx;
296 vm_paddr_t paddr;
297 vm_page_t m_paddr, page;
298 struct cdev *dev;
299 struct cdevsw *csw;
300 struct file *fpop;
301 struct thread *td;
302 vm_memattr_t memattr;
303 int ref, ret;
304
305 pidx = OFF_TO_IDX(offset);
306 memattr = object->memattr;
307
308 VM_OBJECT_WUNLOCK(object);
309
310 dev = object->handle;
311 csw = dev_refthread(dev, &ref);
312 if (csw == NULL) {
313 VM_OBJECT_WLOCK(object);
314 return (VM_PAGER_FAIL);
315 }
316 td = curthread;
317 fpop = td->td_fpop;
318 td->td_fpop = NULL;
319 ret = csw->d_mmap(dev, offset, &paddr, prot, &memattr);
320 td->td_fpop = fpop;
321 dev_relthread(dev, ref);
322 if (ret != 0) {
323 printf(
324 "WARNING: dev_pager_getpage: map function returns error %d", ret);
325 VM_OBJECT_WLOCK(object);
326 return (VM_PAGER_FAIL);
327 }
328
329 /* If "paddr" is a real page, perform a sanity check on "memattr". */
330 if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
331 pmap_page_get_memattr(m_paddr) != memattr) {
332 memattr = pmap_page_get_memattr(m_paddr);
333 printf(
334 "WARNING: A device driver has set \"memattr\" inconsistently.\n");
335 }
336 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
337 /*
338 * If the passed in result page is a fake page, update it with
339 * the new physical address.
340 */
341 page = *mres;
342 VM_OBJECT_WLOCK(object);
343 vm_page_updatefake(page, paddr, memattr);
344 } else {
345 /*
346 * Replace the passed in reqpage page with our own fake page and
347 * free up the all of the original pages.
348 */
349 page = vm_page_getfake(paddr, memattr);
350 VM_OBJECT_WLOCK(object);
351 if (vm_page_replace(page, object, (*mres)->pindex) != *mres)
352 panic("old_dev_pager_fault: invalid page replacement");
353 vm_page_lock(*mres);
354 vm_page_free(*mres);
355 vm_page_unlock(*mres);
356 *mres = page;
357 }
358 page->valid = VM_PAGE_BITS_ALL;
359 return (VM_PAGER_OK);
360 }
361
362 static void
363 dev_pager_putpages(object, m, count, sync, rtvals)
364 vm_object_t object;
365 vm_page_t *m;
366 int count;
367 boolean_t sync;
368 int *rtvals;
369 {
370
371 panic("dev_pager_putpage called");
372 }
373
374 static boolean_t
375 dev_pager_haspage(object, pindex, before, after)
376 vm_object_t object;
377 vm_pindex_t pindex;
378 int *before;
379 int *after;
380 {
381 if (before != NULL)
382 *before = 0;
383 if (after != NULL)
384 *after = 0;
385 return (TRUE);
386 }
387
388 static int
389 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
390 vm_ooffset_t foff, struct ucred *cred, u_short *color)
391 {
392 struct cdev *dev;
393 struct cdevsw *csw;
394 vm_memattr_t dummy;
395 vm_ooffset_t off;
396 vm_paddr_t paddr;
397 unsigned int npages;
398 int ref;
399
400 /*
401 * Make sure this device can be mapped.
402 */
403 dev = handle;
404 csw = dev_refthread(dev, &ref);
405 if (csw == NULL)
406 return (ENXIO);
407
408 /*
409 * Check that the specified range of the device allows the desired
410 * protection.
411 *
412 * XXX assumes VM_PROT_* == PROT_*
413 */
414 npages = OFF_TO_IDX(size);
415 for (off = foff; npages--; off += PAGE_SIZE) {
416 if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
417 dev_relthread(dev, ref);
418 return (EINVAL);
419 }
420 }
421
422 dev_ref(dev);
423 dev_relthread(dev, ref);
424 *color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
425 return (0);
426 }
427
428 static void
429 old_dev_pager_dtor(void *handle)
430 {
431
432 dev_rel(handle);
433 }
Cache object: d063c364da78e2ad96f29f9fc1291db2
|