FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pager.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Paging space routine stubs. Emulates a matchmaker-like interface
63 * for builtin pagers.
64 */
65
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD: releng/9.0/sys/vm/vm_pager.c 217529 2011-01-18 04:54:43Z alc $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
73 #include <sys/bio.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pager.h>
83 #include <vm/vm_extern.h>
84
85 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
86
87 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
88 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
89 vm_ooffset_t, struct ucred *);
90 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
91 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
92 static void dead_pager_dealloc(vm_object_t);
93
94 static int
95 dead_pager_getpages(obj, ma, count, req)
96 vm_object_t obj;
97 vm_page_t *ma;
98 int count;
99 int req;
100 {
101 return VM_PAGER_FAIL;
102 }
103
104 static vm_object_t
105 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
106 vm_ooffset_t off, struct ucred *cred)
107 {
108 return NULL;
109 }
110
111 static void
112 dead_pager_putpages(object, m, count, flags, rtvals)
113 vm_object_t object;
114 vm_page_t *m;
115 int count;
116 int flags;
117 int *rtvals;
118 {
119 int i;
120
121 for (i = 0; i < count; i++) {
122 rtvals[i] = VM_PAGER_AGAIN;
123 }
124 }
125
126 static int
127 dead_pager_haspage(object, pindex, prev, next)
128 vm_object_t object;
129 vm_pindex_t pindex;
130 int *prev;
131 int *next;
132 {
133 if (prev)
134 *prev = 0;
135 if (next)
136 *next = 0;
137 return FALSE;
138 }
139
140 static void
141 dead_pager_dealloc(object)
142 vm_object_t object;
143 {
144 return;
145 }
146
147 static struct pagerops deadpagerops = {
148 .pgo_alloc = dead_pager_alloc,
149 .pgo_dealloc = dead_pager_dealloc,
150 .pgo_getpages = dead_pager_getpages,
151 .pgo_putpages = dead_pager_putpages,
152 .pgo_haspage = dead_pager_haspage,
153 };
154
155 struct pagerops *pagertab[] = {
156 &defaultpagerops, /* OBJT_DEFAULT */
157 &swappagerops, /* OBJT_SWAP */
158 &vnodepagerops, /* OBJT_VNODE */
159 &devicepagerops, /* OBJT_DEVICE */
160 &physpagerops, /* OBJT_PHYS */
161 &deadpagerops, /* OBJT_DEAD */
162 &sgpagerops /* OBJT_SG */
163 };
164
165 static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
166
167 /*
168 * Kernel address space for mapping pages.
169 * Used by pagers where KVAs are needed for IO.
170 *
171 * XXX needs to be large enough to support the number of pending async
172 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
173 * (MAXPHYS == 64k) if you want to get the most efficiency.
174 */
175 vm_map_t pager_map;
176 static int bswneeded;
177 static vm_offset_t swapbkva; /* swap buffers kva */
178 struct mtx pbuf_mtx;
179 static TAILQ_HEAD(swqueue, buf) bswlist;
180
181 void
182 vm_pager_init()
183 {
184 struct pagerops **pgops;
185
186 TAILQ_INIT(&bswlist);
187 /*
188 * Initialize known pagers
189 */
190 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
191 if (pgops && ((*pgops)->pgo_init != NULL))
192 (*(*pgops)->pgo_init) ();
193 }
194
195 void
196 vm_pager_bufferinit()
197 {
198 struct buf *bp;
199 int i;
200
201 mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
202 bp = swbuf;
203 /*
204 * Now set up swap and physical I/O buffer headers.
205 */
206 for (i = 0; i < nswbuf; i++, bp++) {
207 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
208 BUF_LOCKINIT(bp);
209 LIST_INIT(&bp->b_dep);
210 bp->b_rcred = bp->b_wcred = NOCRED;
211 bp->b_xflags = 0;
212 }
213
214 cluster_pbuf_freecnt = nswbuf / 2;
215 vnode_pbuf_freecnt = nswbuf / 2 + 1;
216
217 swapbkva = kmem_alloc_nofault(pager_map, nswbuf * MAXPHYS);
218 if (!swapbkva)
219 panic("Not enough pager_map VM space for physical buffers");
220 }
221
222 /*
223 * Allocate an instance of a pager of the given type.
224 * Size, protection and offset parameters are passed in for pagers that
225 * need to perform page-level validation (e.g. the device pager).
226 */
227 vm_object_t
228 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
229 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
230 {
231 vm_object_t ret;
232 struct pagerops *ops;
233
234 ops = pagertab[type];
235 if (ops)
236 ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
237 else
238 ret = NULL;
239 return (ret);
240 }
241
242 /*
243 * The object must be locked.
244 */
245 void
246 vm_pager_deallocate(object)
247 vm_object_t object;
248 {
249
250 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
251 (*pagertab[object->type]->pgo_dealloc) (object);
252 }
253
254 /*
255 * vm_pager_get_pages() - inline, see vm/vm_pager.h
256 * vm_pager_put_pages() - inline, see vm/vm_pager.h
257 * vm_pager_has_page() - inline, see vm/vm_pager.h
258 */
259
260 /*
261 * Search the specified pager object list for an object with the
262 * specified handle. If an object with the specified handle is found,
263 * increase its reference count and return it. Otherwise, return NULL.
264 *
265 * The pager object list must be locked.
266 */
267 vm_object_t
268 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
269 {
270 vm_object_t object;
271
272 TAILQ_FOREACH(object, pg_list, pager_object_list) {
273 VM_OBJECT_LOCK(object);
274 if (object->handle == handle &&
275 (object->flags & OBJ_DEAD) == 0) {
276 vm_object_reference_locked(object);
277 VM_OBJECT_UNLOCK(object);
278 break;
279 }
280 VM_OBJECT_UNLOCK(object);
281 }
282 return (object);
283 }
284
285 /*
286 * initialize a physical buffer
287 */
288
289 /*
290 * XXX This probably belongs in vfs_bio.c
291 */
292 static void
293 initpbuf(struct buf *bp)
294 {
295 KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
296 KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
297 bp->b_rcred = NOCRED;
298 bp->b_wcred = NOCRED;
299 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
300 bp->b_saveaddr = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
301 bp->b_data = bp->b_saveaddr;
302 bp->b_kvabase = bp->b_saveaddr;
303 bp->b_kvasize = MAXPHYS;
304 bp->b_xflags = 0;
305 bp->b_flags = 0;
306 bp->b_ioflags = 0;
307 bp->b_iodone = NULL;
308 bp->b_error = 0;
309 BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
310 }
311
312 /*
313 * allocate a physical buffer
314 *
315 * There are a limited number (nswbuf) of physical buffers. We need
316 * to make sure that no single subsystem is able to hog all of them,
317 * so each subsystem implements a counter which is typically initialized
318 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
319 * increments it on release, and blocks if the counter hits zero. A
320 * subsystem may initialize the counter to -1 to disable the feature,
321 * but it must still be sure to match up all uses of getpbuf() with
322 * relpbuf() using the same variable.
323 *
324 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
325 * relatively soon when the rest of the subsystems get smart about it. XXX
326 */
327 struct buf *
328 getpbuf(int *pfreecnt)
329 {
330 struct buf *bp;
331
332 mtx_lock(&pbuf_mtx);
333
334 for (;;) {
335 if (pfreecnt) {
336 while (*pfreecnt == 0) {
337 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
338 }
339 }
340
341 /* get a bp from the swap buffer header pool */
342 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
343 break;
344
345 bswneeded = 1;
346 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
347 /* loop in case someone else grabbed one */
348 }
349 TAILQ_REMOVE(&bswlist, bp, b_freelist);
350 if (pfreecnt)
351 --*pfreecnt;
352 mtx_unlock(&pbuf_mtx);
353
354 initpbuf(bp);
355 return bp;
356 }
357
358 /*
359 * allocate a physical buffer, if one is available.
360 *
361 * Note that there is no NULL hack here - all subsystems using this
362 * call understand how to use pfreecnt.
363 */
364 struct buf *
365 trypbuf(int *pfreecnt)
366 {
367 struct buf *bp;
368
369 mtx_lock(&pbuf_mtx);
370 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
371 mtx_unlock(&pbuf_mtx);
372 return NULL;
373 }
374 TAILQ_REMOVE(&bswlist, bp, b_freelist);
375
376 --*pfreecnt;
377
378 mtx_unlock(&pbuf_mtx);
379
380 initpbuf(bp);
381
382 return bp;
383 }
384
385 /*
386 * release a physical buffer
387 *
388 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
389 * relatively soon when the rest of the subsystems get smart about it. XXX
390 */
391 void
392 relpbuf(struct buf *bp, int *pfreecnt)
393 {
394
395 if (bp->b_rcred != NOCRED) {
396 crfree(bp->b_rcred);
397 bp->b_rcred = NOCRED;
398 }
399 if (bp->b_wcred != NOCRED) {
400 crfree(bp->b_wcred);
401 bp->b_wcred = NOCRED;
402 }
403
404 KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
405 KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
406
407 BUF_UNLOCK(bp);
408
409 mtx_lock(&pbuf_mtx);
410 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
411
412 if (bswneeded) {
413 bswneeded = 0;
414 wakeup(&bswneeded);
415 }
416 if (pfreecnt) {
417 if (++*pfreecnt == 1)
418 wakeup(pfreecnt);
419 }
420 mtx_unlock(&pbuf_mtx);
421 }
422
423 /*
424 * Associate a p-buffer with a vnode.
425 *
426 * Also sets B_PAGING flag to indicate that vnode is not fully associated
427 * with the buffer. i.e. the bp has not been linked into the vnode or
428 * ref-counted.
429 */
430 void
431 pbgetvp(struct vnode *vp, struct buf *bp)
432 {
433
434 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
435 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
436
437 bp->b_vp = vp;
438 bp->b_flags |= B_PAGING;
439 bp->b_bufobj = &vp->v_bufobj;
440 }
441
442 /*
443 * Associate a p-buffer with a vnode.
444 *
445 * Also sets B_PAGING flag to indicate that vnode is not fully associated
446 * with the buffer. i.e. the bp has not been linked into the vnode or
447 * ref-counted.
448 */
449 void
450 pbgetbo(struct bufobj *bo, struct buf *bp)
451 {
452
453 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
454 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
455
456 bp->b_flags |= B_PAGING;
457 bp->b_bufobj = bo;
458 }
459
460 /*
461 * Disassociate a p-buffer from a vnode.
462 */
463 void
464 pbrelvp(struct buf *bp)
465 {
466
467 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
468 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
469
470 /* XXX REMOVE ME */
471 BO_LOCK(bp->b_bufobj);
472 if (TAILQ_NEXT(bp, b_bobufs) != NULL) {
473 panic(
474 "relpbuf(): b_vp was probably reassignbuf()d %p %x",
475 bp,
476 (int)bp->b_flags
477 );
478 }
479 BO_UNLOCK(bp->b_bufobj);
480 bp->b_vp = NULL;
481 bp->b_bufobj = NULL;
482 bp->b_flags &= ~B_PAGING;
483 }
484
485 /*
486 * Disassociate a p-buffer from a bufobj.
487 */
488 void
489 pbrelbo(struct buf *bp)
490 {
491
492 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
493 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
494
495 /* XXX REMOVE ME */
496 BO_LOCK(bp->b_bufobj);
497 if (TAILQ_NEXT(bp, b_bobufs) != NULL) {
498 panic(
499 "relpbuf(): b_vp was probably reassignbuf()d %p %x",
500 bp,
501 (int)bp->b_flags
502 );
503 }
504 BO_UNLOCK(bp->b_bufobj);
505 bp->b_bufobj = NULL;
506 bp->b_flags &= ~B_PAGING;
507 }
Cache object: aed139326b8111125eda1e398f10f5cd
|