FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_pager.c
1 /* $NetBSD: uvm_pager.c,v 1.92.14.1 2010/11/21 18:09:00 riz Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.92.14.1 2010/11/21 18:09:00 riz Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46 #include "opt_pagermap.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/vnode.h>
54
55 #include <uvm/uvm.h>
56
57 /*
58 * XXX
59 * this is needed until the device strategy interface
60 * is changed to do physically-addressed i/o.
61 */
62
63 #ifndef PAGER_MAP_DEFAULT_SIZE
64 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024)
65 #endif
66
67 #ifndef PAGER_MAP_SIZE
68 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE
69 #endif
70
71 size_t pager_map_size = PAGER_MAP_SIZE;
72
73 struct pool *uvm_aiobuf_pool;
74
75 /*
76 * list of uvm pagers in the system
77 */
78
79 const struct uvm_pagerops * const uvmpagerops[] = {
80 &aobj_pager,
81 &uvm_deviceops,
82 &uvm_vnodeops,
83 &ubc_pager,
84 };
85
86 /*
87 * the pager map: provides KVA for I/O
88 */
89
90 struct vm_map *pager_map; /* XXX */
91 kmutex_t pager_map_wanted_lock;
92 bool pager_map_wanted; /* locked by pager map */
93 static vaddr_t emergva;
94 static bool emerginuse;
95
96 /*
97 * uvm_pager_init: init pagers (at boot time)
98 */
99
100 void
101 uvm_pager_init(void)
102 {
103 u_int lcv;
104 vaddr_t sva, eva;
105
106 /*
107 * init pager map
108 */
109
110 sva = 0;
111 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
112 false, NULL);
113 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
114 pager_map_wanted = false;
115 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
116 UVM_KMF_VAONLY);
117 #if defined(DEBUG)
118 if (emergva == 0)
119 panic("emergva");
120 #endif
121 emerginuse = false;
122
123 /*
124 * init ASYNC I/O queue
125 */
126
127 TAILQ_INIT(&uvm.aio_done);
128
129 /*
130 * call pager init functions
131 */
132 for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
133 if (uvmpagerops[lcv]->pgo_init)
134 uvmpagerops[lcv]->pgo_init();
135 }
136 }
137
138 /*
139 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
140 *
141 * we basically just map in a blank map entry to reserve the space in the
142 * map and then use pmap_enter() to put the mappings in by hand.
143 */
144
145 vaddr_t
146 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
147 {
148 vsize_t size;
149 vaddr_t kva;
150 vaddr_t cva;
151 struct vm_page *pp;
152 vm_prot_t prot;
153 const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
154 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
155
156 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
157
158 /*
159 * compute protection. outgoing I/O only needs read
160 * access to the page, whereas incoming needs read/write.
161 */
162
163 prot = VM_PROT_READ;
164 if (flags & UVMPAGER_MAPIN_READ)
165 prot |= VM_PROT_WRITE;
166
167 ReStart:
168 size = npages << PAGE_SHIFT;
169 kva = 0; /* let system choose VA */
170
171 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
172 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
173 if (pdaemon) {
174 mutex_enter(&pager_map_wanted_lock);
175 if (emerginuse) {
176 UVM_UNLOCK_AND_WAIT(&emergva,
177 &pager_map_wanted_lock, false,
178 "emergva", 0);
179 goto ReStart;
180 }
181 emerginuse = true;
182 mutex_exit(&pager_map_wanted_lock);
183 kva = emergva;
184 /* The shift implicitly truncates to PAGE_SIZE */
185 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
186 goto enter;
187 }
188 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
189 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
190 return(0);
191 }
192 mutex_enter(&pager_map_wanted_lock);
193 pager_map_wanted = true;
194 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
195 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
196 "pager_map", 0);
197 goto ReStart;
198 }
199
200 enter:
201 /* got it */
202 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
203 pp = *pps++;
204 KASSERT(pp);
205 KASSERT(pp->flags & PG_BUSY);
206 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
207 }
208 pmap_update(vm_map_pmap(pager_map));
209
210 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
211 return(kva);
212 }
213
214 /*
215 * uvm_pagermapout: remove pager_map mapping
216 *
217 * we remove our mappings by hand and then remove the mapping (waking
218 * up anyone wanting space).
219 */
220
221 void
222 uvm_pagermapout(vaddr_t kva, int npages)
223 {
224 vsize_t size = npages << PAGE_SHIFT;
225 struct vm_map_entry *entries;
226 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
227
228 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
229
230 /*
231 * duplicate uvm_unmap, but add in pager_map_wanted handling.
232 */
233
234 pmap_kremove(kva, npages << PAGE_SHIFT);
235 pmap_update(pmap_kernel());
236
237 if (kva == emergva) {
238 mutex_enter(&pager_map_wanted_lock);
239 emerginuse = false;
240 wakeup(&emergva);
241 mutex_exit(&pager_map_wanted_lock);
242 return;
243 }
244
245 vm_map_lock(pager_map);
246 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
247 mutex_enter(&pager_map_wanted_lock);
248 if (pager_map_wanted) {
249 pager_map_wanted = false;
250 wakeup(pager_map);
251 }
252 mutex_exit(&pager_map_wanted_lock);
253 vm_map_unlock(pager_map);
254 if (entries)
255 uvm_unmap_detach(entries, 0);
256 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
257 }
258
259 /*
260 * interrupt-context iodone handler for nested i/o bufs.
261 *
262 * => the buffer is private so need not be locked here
263 */
264
265 void
266 uvm_aio_biodone1(struct buf *bp)
267 {
268 struct buf *mbp = bp->b_private;
269
270 KASSERT(mbp != bp);
271 if (bp->b_error != 0) {
272 mbp->b_error = bp->b_error;
273 }
274 mbp->b_resid -= bp->b_bcount;
275 putiobuf(bp);
276 if (mbp->b_resid == 0) {
277 biodone(mbp);
278 }
279 }
280
281 /*
282 * interrupt-context iodone handler for single-buf i/os
283 * or the top-level buf of a nested-buf i/o.
284 */
285
286 void
287 uvm_aio_biodone(struct buf *bp)
288 {
289 /* reset b_iodone for when this is a single-buf i/o. */
290 bp->b_iodone = uvm_aio_aiodone;
291
292 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
293 }
294
295 void
296 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
297 {
298 struct uvm_object *uobj;
299 struct vm_page *pg;
300 kmutex_t *slock;
301 int pageout_done;
302 int swslot;
303 int i;
304 bool swap;
305 UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
306
307 swslot = 0;
308 pageout_done = 0;
309 slock = NULL;
310 uobj = NULL;
311 pg = pgs[0];
312 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
313 (pg->pqflags & PQ_AOBJ) != 0;
314 if (!swap) {
315 uobj = pg->uobject;
316 slock = &uobj->vmobjlock;
317 mutex_enter(slock);
318 mutex_enter(&uvm_pageqlock);
319 } else {
320 #if defined(VMSWAP)
321 if (error) {
322 if (pg->uobject != NULL) {
323 swslot = uao_find_swslot(pg->uobject,
324 pg->offset >> PAGE_SHIFT);
325 } else {
326 KASSERT(pg->uanon != NULL);
327 swslot = pg->uanon->an_swslot;
328 }
329 KASSERT(swslot);
330 }
331 #else /* defined(VMSWAP) */
332 panic("%s: swap", __func__);
333 #endif /* defined(VMSWAP) */
334 }
335 for (i = 0; i < npages; i++) {
336 pg = pgs[i];
337 KASSERT(swap || pg->uobject == uobj);
338 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
339
340 #if defined(VMSWAP)
341 /*
342 * for swap i/os, lock each page's object (or anon)
343 * individually since each page may need a different lock.
344 */
345
346 if (swap) {
347 if (pg->uobject != NULL) {
348 slock = &pg->uobject->vmobjlock;
349 } else {
350 slock = &pg->uanon->an_lock;
351 }
352 mutex_enter(slock);
353 mutex_enter(&uvm_pageqlock);
354 }
355 #endif /* defined(VMSWAP) */
356
357 /*
358 * process errors. for reads, just mark the page to be freed.
359 * for writes, if the error was ENOMEM, we assume this was
360 * a transient failure so we mark the page dirty so that
361 * we'll try to write it again later. for all other write
362 * errors, we assume the error is permanent, thus the data
363 * in the page is lost. bummer.
364 */
365
366 if (error) {
367 int slot;
368 if (!write) {
369 pg->flags |= PG_RELEASED;
370 continue;
371 } else if (error == ENOMEM) {
372 if (pg->flags & PG_PAGEOUT) {
373 pg->flags &= ~PG_PAGEOUT;
374 pageout_done++;
375 }
376 pg->flags &= ~PG_CLEAN;
377 uvm_pageactivate(pg);
378 slot = 0;
379 } else
380 slot = SWSLOT_BAD;
381
382 #if defined(VMSWAP)
383 if (swap) {
384 if (pg->uobject != NULL) {
385 int oldslot;
386 oldslot = uao_set_swslot(pg->uobject,
387 pg->offset >> PAGE_SHIFT, slot);
388 KASSERT(oldslot == swslot + i);
389 } else {
390 KASSERT(pg->uanon->an_swslot ==
391 swslot + i);
392 pg->uanon->an_swslot = slot;
393 }
394 }
395 #endif /* defined(VMSWAP) */
396 }
397
398 /*
399 * if the page is PG_FAKE, this must have been a read to
400 * initialize the page. clear PG_FAKE and activate the page.
401 * we must also clear the pmap "modified" flag since it may
402 * still be set from the page's previous identity.
403 */
404
405 if (pg->flags & PG_FAKE) {
406 KASSERT(!write);
407 pg->flags &= ~PG_FAKE;
408 #if defined(READAHEAD_STATS)
409 pg->pqflags |= PQ_READAHEAD;
410 uvm_ra_total.ev_count++;
411 #endif /* defined(READAHEAD_STATS) */
412 KASSERT((pg->flags & PG_CLEAN) != 0);
413 uvm_pageenqueue(pg);
414 pmap_clear_modify(pg);
415 }
416
417 /*
418 * do accounting for pagedaemon i/o and arrange to free
419 * the pages instead of just unbusying them.
420 */
421
422 if (pg->flags & PG_PAGEOUT) {
423 pg->flags &= ~PG_PAGEOUT;
424 pageout_done++;
425 uvmexp.pdfreed++;
426 pg->flags |= PG_RELEASED;
427 }
428
429 #if defined(VMSWAP)
430 /*
431 * for swap pages, unlock everything for this page now.
432 */
433
434 if (swap) {
435 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
436 (pg->flags & PG_RELEASED) != 0) {
437 mutex_exit(&uvm_pageqlock);
438 uvm_anon_release(pg->uanon);
439 } else {
440 uvm_page_unbusy(&pg, 1);
441 mutex_exit(&uvm_pageqlock);
442 mutex_exit(slock);
443 }
444 }
445 #endif /* defined(VMSWAP) */
446 }
447 uvm_pageout_done(pageout_done);
448 if (!swap) {
449 uvm_page_unbusy(pgs, npages);
450 mutex_exit(&uvm_pageqlock);
451 mutex_exit(slock);
452 } else {
453 #if defined(VMSWAP)
454 KASSERT(write);
455
456 /* these pages are now only in swap. */
457 mutex_enter(&uvm_swap_data_lock);
458 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
459 if (error != ENOMEM)
460 uvmexp.swpgonly += npages;
461 mutex_exit(&uvm_swap_data_lock);
462 if (error) {
463 if (error != ENOMEM)
464 uvm_swap_markbad(swslot, npages);
465 else
466 uvm_swap_free(swslot, npages);
467 }
468 uvmexp.pdpending--;
469 #endif /* defined(VMSWAP) */
470 }
471 }
472
473 /*
474 * uvm_aio_aiodone: do iodone processing for async i/os.
475 * this should be called in thread context, not interrupt context.
476 */
477
478 void
479 uvm_aio_aiodone(struct buf *bp)
480 {
481 int npages = bp->b_bufsize >> PAGE_SHIFT;
482 struct vm_page *pgs[npages];
483 int i, error;
484 bool write;
485 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
486 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
487
488 error = bp->b_error;
489 write = (bp->b_flags & B_READ) == 0;
490 /* XXXUBC BC_NOCACHE is for swap pager, should be done differently */
491 if (write && !(bp->b_cflags & BC_NOCACHE) && bioopsp != NULL)
492 (*bioopsp->io_pageiodone)(bp);
493
494 for (i = 0; i < npages; i++) {
495 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
496 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
497 }
498 uvm_pagermapout((vaddr_t)bp->b_data, npages);
499
500 uvm_aio_aiodone_pages(pgs, npages, write, error);
501
502 if (write && (bp->b_cflags & BC_AGE) != 0) {
503 mutex_enter(bp->b_objlock);
504 vwakeup(bp);
505 mutex_exit(bp->b_objlock);
506 }
507 putiobuf(bp);
508 }
509
510 /*
511 * uvm_pageratop: convert KVAs in the pager map back to their page
512 * structures.
513 */
514
515 struct vm_page *
516 uvm_pageratop(vaddr_t kva)
517 {
518 struct vm_page *pg;
519 paddr_t pa;
520 bool rv;
521
522 rv = pmap_extract(pmap_kernel(), kva, &pa);
523 KASSERT(rv);
524 pg = PHYS_TO_VM_PAGE(pa);
525 KASSERT(pg != NULL);
526 return (pg);
527 }
Cache object: 003b8e8881264b16e7c81fc10f93e12f
|