FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_pager.c
1 /* $NetBSD: uvm_pager.c,v 1.78 2006/09/15 15:51:13 yamt Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.78 2006/09/15 15:51:13 yamt Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/vnode.h>
53
54 #include <uvm/uvm.h>
55
56 struct pool *uvm_aiobuf_pool;
57
58 /*
59 * list of uvm pagers in the system
60 */
61
62 struct uvm_pagerops * const uvmpagerops[] = {
63 &aobj_pager,
64 &uvm_deviceops,
65 &uvm_vnodeops,
66 &ubc_pager,
67 };
68
69 /*
70 * the pager map: provides KVA for I/O
71 */
72
73 struct vm_map *pager_map; /* XXX */
74 struct simplelock pager_map_wanted_lock;
75 boolean_t pager_map_wanted; /* locked by pager map */
76 static vaddr_t emergva;
77 static boolean_t emerginuse;
78
79 /*
80 * uvm_pager_init: init pagers (at boot time)
81 */
82
83 void
84 uvm_pager_init(void)
85 {
86 u_int lcv;
87 vaddr_t sva, eva;
88
89 /*
90 * init pager map
91 */
92
93 sva = 0;
94 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
95 FALSE, NULL);
96 simple_lock_init(&pager_map_wanted_lock);
97 pager_map_wanted = FALSE;
98 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
99 UVM_KMF_VAONLY);
100 #if defined(DEBUG)
101 if (emergva == 0)
102 panic("emergva");
103 #endif
104 emerginuse = FALSE;
105
106 /*
107 * init ASYNC I/O queue
108 */
109
110 TAILQ_INIT(&uvm.aio_done);
111
112 /*
113 * call pager init functions
114 */
115 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
116 lcv++) {
117 if (uvmpagerops[lcv]->pgo_init)
118 uvmpagerops[lcv]->pgo_init();
119 }
120 }
121
122 /*
123 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
124 *
125 * we basically just map in a blank map entry to reserve the space in the
126 * map and then use pmap_enter() to put the mappings in by hand.
127 */
128
129 vaddr_t
130 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
131 {
132 vsize_t size;
133 vaddr_t kva;
134 vaddr_t cva;
135 struct vm_page *pp;
136 vm_prot_t prot;
137 const boolean_t pdaemon = curproc == uvm.pagedaemon_proc;
138 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
139
140 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
141
142 /*
143 * compute protection. outgoing I/O only needs read
144 * access to the page, whereas incoming needs read/write.
145 */
146
147 prot = VM_PROT_READ;
148 if (flags & UVMPAGER_MAPIN_READ)
149 prot |= VM_PROT_WRITE;
150
151 ReStart:
152 size = npages << PAGE_SHIFT;
153 kva = 0; /* let system choose VA */
154
155 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
156 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
157 if (pdaemon) {
158 simple_lock(&pager_map_wanted_lock);
159 if (emerginuse) {
160 UVM_UNLOCK_AND_WAIT(&emergva,
161 &pager_map_wanted_lock, FALSE,
162 "emergva", 0);
163 goto ReStart;
164 }
165 emerginuse = TRUE;
166 simple_unlock(&pager_map_wanted_lock);
167 kva = emergva;
168 /* The shift implicitly truncates to PAGE_SIZE */
169 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
170 goto enter;
171 }
172 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
173 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
174 return(0);
175 }
176 simple_lock(&pager_map_wanted_lock);
177 pager_map_wanted = TRUE;
178 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
179 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
180 "pager_map", 0);
181 goto ReStart;
182 }
183
184 enter:
185 /* got it */
186 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
187 pp = *pps++;
188 KASSERT(pp);
189 KASSERT(pp->flags & PG_BUSY);
190 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
191 }
192 pmap_update(vm_map_pmap(pager_map));
193
194 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
195 return(kva);
196 }
197
198 /*
199 * uvm_pagermapout: remove pager_map mapping
200 *
201 * we remove our mappings by hand and then remove the mapping (waking
202 * up anyone wanting space).
203 */
204
205 void
206 uvm_pagermapout(vaddr_t kva, int npages)
207 {
208 vsize_t size = npages << PAGE_SHIFT;
209 struct vm_map_entry *entries;
210 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
211
212 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
213
214 /*
215 * duplicate uvm_unmap, but add in pager_map_wanted handling.
216 */
217
218 pmap_kremove(kva, npages << PAGE_SHIFT);
219 if (kva == emergva) {
220 simple_lock(&pager_map_wanted_lock);
221 emerginuse = FALSE;
222 wakeup(&emergva);
223 simple_unlock(&pager_map_wanted_lock);
224 return;
225 }
226
227 vm_map_lock(pager_map);
228 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
229 simple_lock(&pager_map_wanted_lock);
230 if (pager_map_wanted) {
231 pager_map_wanted = FALSE;
232 wakeup(pager_map);
233 }
234 simple_unlock(&pager_map_wanted_lock);
235 vm_map_unlock(pager_map);
236 if (entries)
237 uvm_unmap_detach(entries, 0);
238 pmap_update(pmap_kernel());
239 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
240 }
241
242 /*
243 * interrupt-context iodone handler for nested i/o bufs.
244 *
245 * => must be at splbio().
246 */
247
248 void
249 uvm_aio_biodone1(struct buf *bp)
250 {
251 struct buf *mbp = bp->b_private;
252
253 KASSERT(mbp != bp);
254 if (bp->b_flags & B_ERROR) {
255 mbp->b_flags |= B_ERROR;
256 mbp->b_error = bp->b_error;
257 }
258 mbp->b_resid -= bp->b_bcount;
259 putiobuf(bp);
260 if (mbp->b_resid == 0) {
261 biodone(mbp);
262 }
263 }
264
265 /*
266 * interrupt-context iodone handler for single-buf i/os
267 * or the top-level buf of a nested-buf i/o.
268 *
269 * => must be at splbio().
270 */
271
272 void
273 uvm_aio_biodone(struct buf *bp)
274 {
275 /* reset b_iodone for when this is a single-buf i/o. */
276 bp->b_iodone = uvm_aio_aiodone;
277
278 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
279 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
280 wakeup(&uvm.aiodoned);
281 simple_unlock(&uvm.aiodoned_lock);
282 }
283
284 /*
285 * uvm_aio_aiodone: do iodone processing for async i/os.
286 * this should be called in thread context, not interrupt context.
287 */
288
289 void
290 uvm_aio_aiodone(struct buf *bp)
291 {
292 int npages = bp->b_bufsize >> PAGE_SHIFT;
293 struct vm_page *pg, *pgs[npages];
294 struct uvm_object *uobj;
295 struct simplelock *slock;
296 int s, i, error, swslot;
297 boolean_t write, swap;
298 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
299 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
300
301 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
302 write = (bp->b_flags & B_READ) == 0;
303 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
304 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
305 (*bioops.io_pageiodone)(bp);
306 }
307
308 uobj = NULL;
309 for (i = 0; i < npages; i++) {
310 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
311 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
312 }
313 uvm_pagermapout((vaddr_t)bp->b_data, npages);
314
315 swslot = 0;
316 slock = NULL;
317 pg = pgs[0];
318 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
319 (pg->pqflags & PQ_AOBJ) != 0;
320 if (!swap) {
321 uobj = pg->uobject;
322 slock = &uobj->vmobjlock;
323 simple_lock(slock);
324 uvm_lock_pageq();
325 } else {
326 #if defined(VMSWAP)
327 if (error) {
328 if (pg->uobject != NULL) {
329 swslot = uao_find_swslot(pg->uobject,
330 pg->offset >> PAGE_SHIFT);
331 } else {
332 KASSERT(pg->uanon != NULL);
333 swslot = pg->uanon->an_swslot;
334 }
335 KASSERT(swslot);
336 }
337 #else /* defined(VMSWAP) */
338 panic("%s: swap", __func__);
339 #endif /* defined(VMSWAP) */
340 }
341 for (i = 0; i < npages; i++) {
342 pg = pgs[i];
343 KASSERT(swap || pg->uobject == uobj);
344 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
345
346 #if defined(VMSWAP)
347 /*
348 * for swap i/os, lock each page's object (or anon)
349 * individually since each page may need a different lock.
350 */
351
352 if (swap) {
353 if (pg->uobject != NULL) {
354 slock = &pg->uobject->vmobjlock;
355 } else {
356 slock = &pg->uanon->an_lock;
357 }
358 simple_lock(slock);
359 uvm_lock_pageq();
360 }
361 #endif /* defined(VMSWAP) */
362
363 /*
364 * process errors. for reads, just mark the page to be freed.
365 * for writes, if the error was ENOMEM, we assume this was
366 * a transient failure so we mark the page dirty so that
367 * we'll try to write it again later. for all other write
368 * errors, we assume the error is permanent, thus the data
369 * in the page is lost. bummer.
370 */
371
372 if (error) {
373 int slot;
374 if (!write) {
375 pg->flags |= PG_RELEASED;
376 continue;
377 } else if (error == ENOMEM) {
378 if (pg->flags & PG_PAGEOUT) {
379 pg->flags &= ~PG_PAGEOUT;
380 uvmexp.paging--;
381 }
382 pg->flags &= ~PG_CLEAN;
383 uvm_pageactivate(pg);
384 slot = 0;
385 } else
386 slot = SWSLOT_BAD;
387
388 #if defined(VMSWAP)
389 if (swap) {
390 if (pg->uobject != NULL) {
391 int oldslot;
392 oldslot = uao_set_swslot(pg->uobject,
393 pg->offset >> PAGE_SHIFT, slot);
394 KASSERT(oldslot == swslot + i);
395 } else {
396 KASSERT(pg->uanon->an_swslot ==
397 swslot + i);
398 pg->uanon->an_swslot = slot;
399 }
400 }
401 #endif /* defined(VMSWAP) */
402 }
403
404 /*
405 * if the page is PG_FAKE, this must have been a read to
406 * initialize the page. clear PG_FAKE and activate the page.
407 * we must also clear the pmap "modified" flag since it may
408 * still be set from the page's previous identity.
409 */
410
411 if (pg->flags & PG_FAKE) {
412 KASSERT(!write);
413 pg->flags &= ~PG_FAKE;
414 #if defined(READAHEAD_STATS)
415 pg->pqflags |= PQ_READAHEAD;
416 uvm_ra_total.ev_count++;
417 #endif /* defined(READAHEAD_STATS) */
418 KASSERT((pg->flags & PG_CLEAN) != 0);
419 uvm_pageenqueue(pg);
420 pmap_clear_modify(pg);
421 }
422
423 /*
424 * do accounting for pagedaemon i/o and arrange to free
425 * the pages instead of just unbusying them.
426 */
427
428 if (pg->flags & PG_PAGEOUT) {
429 pg->flags &= ~PG_PAGEOUT;
430 uvmexp.paging--;
431 uvmexp.pdfreed++;
432 pg->flags |= PG_RELEASED;
433 }
434
435 #if defined(VMSWAP)
436 /*
437 * for swap pages, unlock everything for this page now.
438 */
439
440 if (swap) {
441 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
442 (pg->flags & PG_RELEASED) != 0) {
443 uvm_unlock_pageq();
444 uvm_anon_release(pg->uanon);
445 } else {
446 uvm_page_unbusy(&pg, 1);
447 uvm_unlock_pageq();
448 simple_unlock(slock);
449 }
450 }
451 #endif /* defined(VMSWAP) */
452 }
453 if (!swap) {
454 uvm_page_unbusy(pgs, npages);
455 uvm_unlock_pageq();
456 simple_unlock(slock);
457 } else {
458 #if defined(VMSWAP)
459 KASSERT(write);
460
461 /* these pages are now only in swap. */
462 simple_lock(&uvm.swap_data_lock);
463 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
464 if (error != ENOMEM)
465 uvmexp.swpgonly += npages;
466 simple_unlock(&uvm.swap_data_lock);
467 if (error) {
468 if (error != ENOMEM)
469 uvm_swap_markbad(swslot, npages);
470 else
471 uvm_swap_free(swslot, npages);
472 }
473 uvmexp.pdpending--;
474 #endif /* defined(VMSWAP) */
475 }
476 s = splbio();
477 if (write && (bp->b_flags & B_AGE) != 0) {
478 vwakeup(bp);
479 }
480 putiobuf(bp);
481 splx(s);
482 }
483
484 /*
485 * uvm_pageratop: convert KVAs in the pager map back to their page
486 * structures.
487 */
488
489 struct vm_page *
490 uvm_pageratop(vaddr_t kva)
491 {
492 struct vm_page *pg;
493 paddr_t pa;
494 boolean_t rv;
495
496 rv = pmap_extract(pmap_kernel(), kva, &pa);
497 KASSERT(rv);
498 pg = PHYS_TO_VM_PAGE(pa);
499 KASSERT(pg != NULL);
500 return (pg);
501 }
Cache object: 30410697ec30099a6f13a0dbb2d37c39
|