FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pager.c
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD$
65 */
66
67 /*
68 * Paging space routine stubs. Emulates a matchmaker-like interface
69 * for builtin pagers.
70 */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/vnode.h>
76 #include <sys/buf.h>
77 #include <sys/ucred.h>
78 #include <sys/malloc.h>
79 #include <sys/proc.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_extern.h>
87
88 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
89
90 extern struct pagerops defaultpagerops;
91 extern struct pagerops swappagerops;
92 extern struct pagerops vnodepagerops;
93 extern struct pagerops devicepagerops;
94 extern struct pagerops physpagerops;
95
96 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
97
98 static int dead_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
99 static vm_object_t dead_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
100 vm_ooffset_t));
101 static void dead_pager_putpages __P((vm_object_t, vm_page_t *, int, int, int *));
102 static boolean_t dead_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
103 static void dead_pager_dealloc __P((vm_object_t));
104
105 static int
106 dead_pager_getpages(obj, ma, count, req)
107 vm_object_t obj;
108 vm_page_t *ma;
109 int count;
110 int req;
111 {
112 return VM_PAGER_FAIL;
113 }
114
115 static vm_object_t
116 dead_pager_alloc(handle, size, prot, off)
117 void *handle;
118 vm_ooffset_t size;
119 vm_prot_t prot;
120 vm_ooffset_t off;
121 {
122 return NULL;
123 }
124
125 static void
126 dead_pager_putpages(object, m, count, flags, rtvals)
127 vm_object_t object;
128 vm_page_t *m;
129 int count;
130 int flags;
131 int *rtvals;
132 {
133 int i;
134
135 for (i = 0; i < count; i++) {
136 rtvals[i] = VM_PAGER_AGAIN;
137 }
138 }
139
140 static int
141 dead_pager_haspage(object, pindex, prev, next)
142 vm_object_t object;
143 vm_pindex_t pindex;
144 int *prev;
145 int *next;
146 {
147 if (prev)
148 *prev = 0;
149 if (next)
150 *next = 0;
151 return FALSE;
152 }
153
154 static void
155 dead_pager_dealloc(object)
156 vm_object_t object;
157 {
158 return;
159 }
160
161 static struct pagerops deadpagerops = {
162 NULL,
163 dead_pager_alloc,
164 dead_pager_dealloc,
165 dead_pager_getpages,
166 dead_pager_putpages,
167 dead_pager_haspage,
168 NULL
169 };
170
171 struct pagerops *pagertab[] = {
172 &defaultpagerops, /* OBJT_DEFAULT */
173 &swappagerops, /* OBJT_SWAP */
174 &vnodepagerops, /* OBJT_VNODE */
175 &devicepagerops, /* OBJT_DEVICE */
176 &physpagerops, /* OBJT_PHYS */
177 &deadpagerops /* OBJT_DEAD */
178 };
179
180 int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
181
182 /*
183 * Kernel address space for mapping pages.
184 * Used by pagers where KVAs are needed for IO.
185 *
186 * XXX needs to be large enough to support the number of pending async
187 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
188 * (MAXPHYS == 64k) if you want to get the most efficiency.
189 */
190 #define PAGER_MAP_SIZE (8 * 1024 * 1024)
191
192 int pager_map_size = PAGER_MAP_SIZE;
193 vm_map_t pager_map;
194 static int bswneeded;
195 static vm_offset_t swapbkva; /* swap buffers kva */
196
197 void
198 vm_pager_init()
199 {
200 struct pagerops **pgops;
201
202 /*
203 * Initialize known pagers
204 */
205 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
206 if (pgops && ((*pgops)->pgo_init != NULL))
207 (*(*pgops)->pgo_init) ();
208 }
209
210 void
211 vm_pager_bufferinit()
212 {
213 struct buf *bp;
214 int i;
215
216 bp = swbuf;
217 /*
218 * Now set up swap and physical I/O buffer headers.
219 */
220 for (i = 0; i < nswbuf; i++, bp++) {
221 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
222 BUF_LOCKINIT(bp);
223 LIST_INIT(&bp->b_dep);
224 bp->b_rcred = bp->b_wcred = NOCRED;
225 bp->b_xflags = 0;
226 }
227
228 cluster_pbuf_freecnt = nswbuf / 2;
229
230 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
231 if (!swapbkva)
232 panic("Not enough pager_map VM space for physical buffers");
233 }
234
235 /*
236 * Allocate an instance of a pager of the given type.
237 * Size, protection and offset parameters are passed in for pagers that
238 * need to perform page-level validation (e.g. the device pager).
239 */
240 vm_object_t
241 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, vm_prot_t prot,
242 vm_ooffset_t off)
243 {
244 struct pagerops *ops;
245
246 ops = pagertab[type];
247 if (ops)
248 return ((*ops->pgo_alloc) (handle, size, prot, off));
249 return (NULL);
250 }
251
252 void
253 vm_pager_deallocate(object)
254 vm_object_t object;
255 {
256 (*pagertab[object->type]->pgo_dealloc) (object);
257 }
258
259 /*
260 * vm_pager_strategy:
261 *
262 * called with no specific spl
263 * Execute strategy routine directly to pager.
264 */
265
266 void
267 vm_pager_strategy(vm_object_t object, struct buf *bp)
268 {
269 if (pagertab[object->type]->pgo_strategy) {
270 (*pagertab[object->type]->pgo_strategy)(object, bp);
271 } else {
272 bp->b_flags |= B_ERROR;
273 bp->b_error = ENXIO;
274 biodone(bp);
275 }
276 }
277
278 /*
279 * vm_pager_get_pages() - inline, see vm/vm_pager.h
280 * vm_pager_put_pages() - inline, see vm/vm_pager.h
281 * vm_pager_has_page() - inline, see vm/vm_pager.h
282 * vm_pager_page_inserted() - inline, see vm/vm_pager.h
283 * vm_pager_page_removed() - inline, see vm/vm_pager.h
284 */
285
286 #if 0
287 /*
288 * vm_pager_sync:
289 *
290 * Called by pageout daemon before going back to sleep.
291 * Gives pagers a chance to clean up any completed async pageing
292 * operations.
293 */
294 void
295 vm_pager_sync()
296 {
297 struct pagerops **pgops;
298
299 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
300 if (pgops && ((*pgops)->pgo_sync != NULL))
301 (*(*pgops)->pgo_sync) ();
302 }
303
304 #endif
305
306 vm_offset_t
307 vm_pager_map_page(m)
308 vm_page_t m;
309 {
310 vm_offset_t kva;
311
312 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
313 pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
314 return (kva);
315 }
316
317 void
318 vm_pager_unmap_page(kva)
319 vm_offset_t kva;
320 {
321 pmap_kremove(kva);
322 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
323 }
324
325 vm_object_t
326 vm_pager_object_lookup(pg_list, handle)
327 register struct pagerlst *pg_list;
328 void *handle;
329 {
330 register vm_object_t object;
331
332 for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
333 if (object->handle == handle)
334 return (object);
335 return (NULL);
336 }
337
338 /*
339 * initialize a physical buffer
340 */
341
342 static void
343 initpbuf(struct buf *bp)
344 {
345 bp->b_rcred = NOCRED;
346 bp->b_wcred = NOCRED;
347 bp->b_qindex = QUEUE_NONE;
348 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
349 bp->b_kvabase = bp->b_data;
350 bp->b_kvasize = MAXPHYS;
351 bp->b_xflags = 0;
352 bp->b_flags = 0;
353 bp->b_error = 0;
354 BUF_LOCK(bp, LK_EXCLUSIVE);
355 }
356
357 /*
358 * allocate a physical buffer
359 *
360 * There are a limited number (nswbuf) of physical buffers. We need
361 * to make sure that no single subsystem is able to hog all of them,
362 * so each subsystem implements a counter which is typically initialized
363 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
364 * increments it on release, and blocks if the counter hits zero. A
365 * subsystem may initialize the counter to -1 to disable the feature,
366 * but it must still be sure to match up all uses of getpbuf() with
367 * relpbuf() using the same variable.
368 *
369 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
370 * relatively soon when the rest of the subsystems get smart about it. XXX
371 */
372 struct buf *
373 getpbuf(pfreecnt)
374 int *pfreecnt;
375 {
376 int s;
377 struct buf *bp;
378
379 s = splvm();
380
381 for (;;) {
382 if (pfreecnt) {
383 while (*pfreecnt == 0) {
384 tsleep(pfreecnt, PVM, "wswbuf0", 0);
385 }
386 }
387
388 /* get a bp from the swap buffer header pool */
389 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
390 break;
391
392 bswneeded = 1;
393 tsleep(&bswneeded, PVM, "wswbuf1", 0);
394 /* loop in case someone else grabbed one */
395 }
396 TAILQ_REMOVE(&bswlist, bp, b_freelist);
397 if (pfreecnt)
398 --*pfreecnt;
399 splx(s);
400
401 initpbuf(bp);
402 return bp;
403 }
404
405 /*
406 * allocate a physical buffer, if one is available.
407 *
408 * Note that there is no NULL hack here - all subsystems using this
409 * call understand how to use pfreecnt.
410 */
411 struct buf *
412 trypbuf(pfreecnt)
413 int *pfreecnt;
414 {
415 int s;
416 struct buf *bp;
417
418 s = splvm();
419 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
420 splx(s);
421 return NULL;
422 }
423 TAILQ_REMOVE(&bswlist, bp, b_freelist);
424
425 --*pfreecnt;
426
427 splx(s);
428
429 initpbuf(bp);
430
431 return bp;
432 }
433
434 /*
435 * release a physical buffer
436 *
437 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
438 * relatively soon when the rest of the subsystems get smart about it. XXX
439 */
440 void
441 relpbuf(bp, pfreecnt)
442 struct buf *bp;
443 int *pfreecnt;
444 {
445 int s;
446
447 s = splvm();
448
449 if (bp->b_rcred != NOCRED) {
450 crfree(bp->b_rcred);
451 bp->b_rcred = NOCRED;
452 }
453 if (bp->b_wcred != NOCRED) {
454 crfree(bp->b_wcred);
455 bp->b_wcred = NOCRED;
456 }
457
458 if (bp->b_vp)
459 pbrelvp(bp);
460
461 BUF_UNLOCK(bp);
462
463 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
464
465 if (bswneeded) {
466 bswneeded = 0;
467 wakeup(&bswneeded);
468 }
469 if (pfreecnt) {
470 if (++*pfreecnt == 1)
471 wakeup(pfreecnt);
472 }
473 splx(s);
474 }
475
476 /********************************************************
477 * CHAINING FUNCTIONS *
478 ********************************************************
479 *
480 * These functions support recursion of I/O operations
481 * on bp's, typically by chaining one or more 'child' bp's
482 * to the parent. Synchronous, asynchronous, and semi-synchronous
483 * chaining is possible.
484 */
485
486 /*
487 * vm_pager_chain_iodone:
488 *
489 * io completion routine for child bp. Currently we fudge a bit
490 * on dealing with b_resid. Since users of these routines may issue
491 * multiple children simultaniously, sequencing of the error can be lost.
492 */
493
494 static void
495 vm_pager_chain_iodone(struct buf *nbp)
496 {
497 struct buf *bp;
498
499 if ((bp = nbp->b_chain.parent) != NULL) {
500 if (nbp->b_flags & B_ERROR) {
501 bp->b_flags |= B_ERROR;
502 bp->b_error = nbp->b_error;
503 } else if (nbp->b_resid != 0) {
504 bp->b_flags |= B_ERROR;
505 bp->b_error = EINVAL;
506 } else {
507 bp->b_resid -= nbp->b_bcount;
508 }
509 nbp->b_chain.parent = NULL;
510 --bp->b_chain.count;
511 if (bp->b_flags & B_WANT) {
512 bp->b_flags &= ~B_WANT;
513 wakeup(bp);
514 }
515 if (!bp->b_chain.count && (bp->b_xflags & BX_AUTOCHAINDONE)) {
516 bp->b_xflags &= ~BX_AUTOCHAINDONE;
517 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
518 bp->b_flags |= B_ERROR;
519 bp->b_error = EINVAL;
520 }
521 biodone(bp);
522 }
523 }
524 nbp->b_flags |= B_DONE;
525 nbp->b_flags &= ~B_ASYNC;
526 relpbuf(nbp, NULL);
527 }
528
529 /*
530 * getchainbuf:
531 *
532 * Obtain a physical buffer and chain it to its parent buffer. When
533 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
534 * automatically propogated to the parent
535 *
536 * Since these are brand new buffers, we do not have to clear B_INVAL
537 * and B_ERROR because they are already clear.
538 */
539
540 struct buf *
541 getchainbuf(struct buf *bp, struct vnode *vp, int flags)
542 {
543 struct buf *nbp = getpbuf(NULL);
544
545 nbp->b_chain.parent = bp;
546 ++bp->b_chain.count;
547
548 if (bp->b_chain.count > 4)
549 waitchainbuf(bp, 4, 0);
550
551 nbp->b_flags = B_CALL | (bp->b_flags & B_ORDERED) | flags;
552 nbp->b_rcred = nbp->b_wcred = proc0.p_ucred;
553 nbp->b_iodone = vm_pager_chain_iodone;
554
555 crhold(nbp->b_rcred);
556 crhold(nbp->b_wcred);
557
558 if (vp)
559 pbgetvp(vp, nbp);
560 return(nbp);
561 }
562
563 void
564 flushchainbuf(struct buf *nbp)
565 {
566 if (nbp->b_bcount) {
567 nbp->b_bufsize = nbp->b_bcount;
568 if ((nbp->b_flags & B_READ) == 0)
569 nbp->b_dirtyend = nbp->b_bcount;
570 BUF_KERNPROC(nbp);
571 VOP_STRATEGY(nbp->b_vp, nbp);
572 } else {
573 biodone(nbp);
574 }
575 }
576
577 void
578 waitchainbuf(struct buf *bp, int count, int done)
579 {
580 int s;
581
582 s = splbio();
583 while (bp->b_chain.count > count) {
584 bp->b_flags |= B_WANT;
585 tsleep(bp, PRIBIO + 4, "bpchain", 0);
586 }
587 if (done) {
588 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
589 bp->b_flags |= B_ERROR;
590 bp->b_error = EINVAL;
591 }
592 biodone(bp);
593 }
594 splx(s);
595 }
596
597 void
598 autochaindone(struct buf *bp)
599 {
600 int s;
601
602 s = splbio();
603 if (bp->b_chain.count == 0)
604 biodone(bp);
605 else
606 bp->b_xflags |= BX_AUTOCHAINDONE;
607 splx(s);
608 }
609
Cache object: bfe543a237e60dca1f82f26cf60b4eef
|