FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pager.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Paging space routine stubs. Emulates a matchmaker-like interface
63 * for builtin pagers.
64 */
65
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD: releng/10.0/sys/vm/vm_pager.c 252330 2013-06-28 03:51:20Z jeff $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
73 #include <sys/bio.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
86
87 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
88
89 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
90 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
91 vm_ooffset_t, struct ucred *);
92 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
93 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
94 static void dead_pager_dealloc(vm_object_t);
95
96 static int
97 dead_pager_getpages(obj, ma, count, req)
98 vm_object_t obj;
99 vm_page_t *ma;
100 int count;
101 int req;
102 {
103 return VM_PAGER_FAIL;
104 }
105
106 static vm_object_t
107 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
108 vm_ooffset_t off, struct ucred *cred)
109 {
110 return NULL;
111 }
112
113 static void
114 dead_pager_putpages(object, m, count, flags, rtvals)
115 vm_object_t object;
116 vm_page_t *m;
117 int count;
118 int flags;
119 int *rtvals;
120 {
121 int i;
122
123 for (i = 0; i < count; i++) {
124 rtvals[i] = VM_PAGER_AGAIN;
125 }
126 }
127
128 static int
129 dead_pager_haspage(object, pindex, prev, next)
130 vm_object_t object;
131 vm_pindex_t pindex;
132 int *prev;
133 int *next;
134 {
135 if (prev)
136 *prev = 0;
137 if (next)
138 *next = 0;
139 return FALSE;
140 }
141
142 static void
143 dead_pager_dealloc(object)
144 vm_object_t object;
145 {
146 return;
147 }
148
149 static struct pagerops deadpagerops = {
150 .pgo_alloc = dead_pager_alloc,
151 .pgo_dealloc = dead_pager_dealloc,
152 .pgo_getpages = dead_pager_getpages,
153 .pgo_putpages = dead_pager_putpages,
154 .pgo_haspage = dead_pager_haspage,
155 };
156
157 struct pagerops *pagertab[] = {
158 &defaultpagerops, /* OBJT_DEFAULT */
159 &swappagerops, /* OBJT_SWAP */
160 &vnodepagerops, /* OBJT_VNODE */
161 &devicepagerops, /* OBJT_DEVICE */
162 &physpagerops, /* OBJT_PHYS */
163 &deadpagerops, /* OBJT_DEAD */
164 &sgpagerops, /* OBJT_SG */
165 &mgtdevicepagerops, /* OBJT_MGTDEVICE */
166 };
167
168 static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
169
170 /*
171 * Kernel address space for mapping pages.
172 * Used by pagers where KVAs are needed for IO.
173 *
174 * XXX needs to be large enough to support the number of pending async
175 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
176 * (MAXPHYS == 64k) if you want to get the most efficiency.
177 */
178 struct mtx_padalign pbuf_mtx;
179 static TAILQ_HEAD(swqueue, buf) bswlist;
180 static int bswneeded;
181 vm_offset_t swapbkva; /* swap buffers kva */
182
183 void
184 vm_pager_init()
185 {
186 struct pagerops **pgops;
187
188 TAILQ_INIT(&bswlist);
189 /*
190 * Initialize known pagers
191 */
192 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
193 if ((*pgops)->pgo_init != NULL)
194 (*(*pgops)->pgo_init) ();
195 }
196
197 void
198 vm_pager_bufferinit()
199 {
200 struct buf *bp;
201 int i;
202
203 mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
204 bp = swbuf;
205 /*
206 * Now set up swap and physical I/O buffer headers.
207 */
208 for (i = 0; i < nswbuf; i++, bp++) {
209 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
210 BUF_LOCKINIT(bp);
211 LIST_INIT(&bp->b_dep);
212 bp->b_rcred = bp->b_wcred = NOCRED;
213 bp->b_xflags = 0;
214 }
215
216 cluster_pbuf_freecnt = nswbuf / 2;
217 vnode_pbuf_freecnt = nswbuf / 2 + 1;
218 }
219
220 /*
221 * Allocate an instance of a pager of the given type.
222 * Size, protection and offset parameters are passed in for pagers that
223 * need to perform page-level validation (e.g. the device pager).
224 */
225 vm_object_t
226 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
227 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
228 {
229 vm_object_t ret;
230 struct pagerops *ops;
231
232 ops = pagertab[type];
233 if (ops)
234 ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
235 else
236 ret = NULL;
237 return (ret);
238 }
239
240 /*
241 * The object must be locked.
242 */
243 void
244 vm_pager_deallocate(object)
245 vm_object_t object;
246 {
247
248 VM_OBJECT_ASSERT_WLOCKED(object);
249 (*pagertab[object->type]->pgo_dealloc) (object);
250 }
251
252 /*
253 * vm_pager_get_pages() - inline, see vm/vm_pager.h
254 * vm_pager_put_pages() - inline, see vm/vm_pager.h
255 * vm_pager_has_page() - inline, see vm/vm_pager.h
256 */
257
258 /*
259 * Search the specified pager object list for an object with the
260 * specified handle. If an object with the specified handle is found,
261 * increase its reference count and return it. Otherwise, return NULL.
262 *
263 * The pager object list must be locked.
264 */
265 vm_object_t
266 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
267 {
268 vm_object_t object;
269
270 TAILQ_FOREACH(object, pg_list, pager_object_list) {
271 if (object->handle == handle) {
272 VM_OBJECT_WLOCK(object);
273 if ((object->flags & OBJ_DEAD) == 0) {
274 vm_object_reference_locked(object);
275 VM_OBJECT_WUNLOCK(object);
276 break;
277 }
278 VM_OBJECT_WUNLOCK(object);
279 }
280 }
281 return (object);
282 }
283
284 /*
285 * initialize a physical buffer
286 */
287
288 /*
289 * XXX This probably belongs in vfs_bio.c
290 */
291 static void
292 initpbuf(struct buf *bp)
293 {
294 KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
295 KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
296 bp->b_rcred = NOCRED;
297 bp->b_wcred = NOCRED;
298 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
299 bp->b_saveaddr = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
300 bp->b_data = bp->b_saveaddr;
301 bp->b_kvabase = bp->b_saveaddr;
302 bp->b_kvasize = MAXPHYS;
303 bp->b_xflags = 0;
304 bp->b_flags = 0;
305 bp->b_ioflags = 0;
306 bp->b_iodone = NULL;
307 bp->b_error = 0;
308 BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
309 }
310
311 /*
312 * allocate a physical buffer
313 *
314 * There are a limited number (nswbuf) of physical buffers. We need
315 * to make sure that no single subsystem is able to hog all of them,
316 * so each subsystem implements a counter which is typically initialized
317 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
318 * increments it on release, and blocks if the counter hits zero. A
319 * subsystem may initialize the counter to -1 to disable the feature,
320 * but it must still be sure to match up all uses of getpbuf() with
321 * relpbuf() using the same variable.
322 *
323 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
324 * relatively soon when the rest of the subsystems get smart about it. XXX
325 */
326 struct buf *
327 getpbuf(int *pfreecnt)
328 {
329 struct buf *bp;
330
331 mtx_lock(&pbuf_mtx);
332
333 for (;;) {
334 if (pfreecnt) {
335 while (*pfreecnt == 0) {
336 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
337 }
338 }
339
340 /* get a bp from the swap buffer header pool */
341 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
342 break;
343
344 bswneeded = 1;
345 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
346 /* loop in case someone else grabbed one */
347 }
348 TAILQ_REMOVE(&bswlist, bp, b_freelist);
349 if (pfreecnt)
350 --*pfreecnt;
351 mtx_unlock(&pbuf_mtx);
352
353 initpbuf(bp);
354 return bp;
355 }
356
357 /*
358 * allocate a physical buffer, if one is available.
359 *
360 * Note that there is no NULL hack here - all subsystems using this
361 * call understand how to use pfreecnt.
362 */
363 struct buf *
364 trypbuf(int *pfreecnt)
365 {
366 struct buf *bp;
367
368 mtx_lock(&pbuf_mtx);
369 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
370 mtx_unlock(&pbuf_mtx);
371 return NULL;
372 }
373 TAILQ_REMOVE(&bswlist, bp, b_freelist);
374
375 --*pfreecnt;
376
377 mtx_unlock(&pbuf_mtx);
378
379 initpbuf(bp);
380
381 return bp;
382 }
383
384 /*
385 * release a physical buffer
386 *
387 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
388 * relatively soon when the rest of the subsystems get smart about it. XXX
389 */
390 void
391 relpbuf(struct buf *bp, int *pfreecnt)
392 {
393
394 if (bp->b_rcred != NOCRED) {
395 crfree(bp->b_rcred);
396 bp->b_rcred = NOCRED;
397 }
398 if (bp->b_wcred != NOCRED) {
399 crfree(bp->b_wcred);
400 bp->b_wcred = NOCRED;
401 }
402
403 KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
404 KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
405
406 BUF_UNLOCK(bp);
407
408 mtx_lock(&pbuf_mtx);
409 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
410
411 if (bswneeded) {
412 bswneeded = 0;
413 wakeup(&bswneeded);
414 }
415 if (pfreecnt) {
416 if (++*pfreecnt == 1)
417 wakeup(pfreecnt);
418 }
419 mtx_unlock(&pbuf_mtx);
420 }
421
422 /*
423 * Associate a p-buffer with a vnode.
424 *
425 * Also sets B_PAGING flag to indicate that vnode is not fully associated
426 * with the buffer. i.e. the bp has not been linked into the vnode or
427 * ref-counted.
428 */
429 void
430 pbgetvp(struct vnode *vp, struct buf *bp)
431 {
432
433 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
434 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
435
436 bp->b_vp = vp;
437 bp->b_flags |= B_PAGING;
438 bp->b_bufobj = &vp->v_bufobj;
439 }
440
441 /*
442 * Associate a p-buffer with a vnode.
443 *
444 * Also sets B_PAGING flag to indicate that vnode is not fully associated
445 * with the buffer. i.e. the bp has not been linked into the vnode or
446 * ref-counted.
447 */
448 void
449 pbgetbo(struct bufobj *bo, struct buf *bp)
450 {
451
452 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
453 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
454
455 bp->b_flags |= B_PAGING;
456 bp->b_bufobj = bo;
457 }
458
459 /*
460 * Disassociate a p-buffer from a vnode.
461 */
462 void
463 pbrelvp(struct buf *bp)
464 {
465
466 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
467 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
468 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
469 ("pbrelvp: pager buf on vnode list."));
470
471 bp->b_vp = NULL;
472 bp->b_bufobj = NULL;
473 bp->b_flags &= ~B_PAGING;
474 }
475
476 /*
477 * Disassociate a p-buffer from a bufobj.
478 */
479 void
480 pbrelbo(struct buf *bp)
481 {
482
483 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
484 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
485 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
486 ("pbrelbo: pager buf on vnode list."));
487
488 bp->b_bufobj = NULL;
489 bp->b_flags &= ~B_PAGING;
490 }
Cache object: 2e94b320919d0f4efddf6691053e5f4f
|