FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pager.c
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63 /*
64 * Paging space routine stubs. Emulates a matchmaker-like interface
65 * for builtin pagers.
66 */
67
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD: releng/12.0/sys/vm/vm_pager.c 326403 2017-11-30 15:48:35Z pfg $");
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/vnode.h>
75 #include <sys/bio.h>
76 #include <sys/buf.h>
77 #include <sys/ucred.h>
78 #include <sys/malloc.h>
79 #include <sys/rwlock.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
88
89 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
90
91 struct buf *swbuf;
92
93 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
94 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
95 vm_ooffset_t, struct ucred *);
96 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
97 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
98 static void dead_pager_dealloc(vm_object_t);
99
100 static int
101 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
102 int *rahead)
103 {
104
105 return (VM_PAGER_FAIL);
106 }
107
108 static vm_object_t
109 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
110 vm_ooffset_t off, struct ucred *cred)
111 {
112
113 return (NULL);
114 }
115
116 static void
117 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
118 int flags, int *rtvals)
119 {
120 int i;
121
122 for (i = 0; i < count; i++)
123 rtvals[i] = VM_PAGER_AGAIN;
124 }
125
126 static int
127 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
128 {
129
130 if (prev != NULL)
131 *prev = 0;
132 if (next != NULL)
133 *next = 0;
134 return (FALSE);
135 }
136
137 static void
138 dead_pager_dealloc(vm_object_t object)
139 {
140
141 }
142
143 static struct pagerops deadpagerops = {
144 .pgo_alloc = dead_pager_alloc,
145 .pgo_dealloc = dead_pager_dealloc,
146 .pgo_getpages = dead_pager_getpages,
147 .pgo_putpages = dead_pager_putpages,
148 .pgo_haspage = dead_pager_haspage,
149 };
150
151 struct pagerops *pagertab[] = {
152 &defaultpagerops, /* OBJT_DEFAULT */
153 &swappagerops, /* OBJT_SWAP */
154 &vnodepagerops, /* OBJT_VNODE */
155 &devicepagerops, /* OBJT_DEVICE */
156 &physpagerops, /* OBJT_PHYS */
157 &deadpagerops, /* OBJT_DEAD */
158 &sgpagerops, /* OBJT_SG */
159 &mgtdevicepagerops, /* OBJT_MGTDEVICE */
160 };
161
162 /*
163 * Kernel address space for mapping pages.
164 * Used by pagers where KVAs are needed for IO.
165 *
166 * XXX needs to be large enough to support the number of pending async
167 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
168 * (MAXPHYS == 64k) if you want to get the most efficiency.
169 */
170 struct mtx_padalign __exclusive_cache_line pbuf_mtx;
171 static TAILQ_HEAD(swqueue, buf) bswlist;
172 static int bswneeded;
173 vm_offset_t swapbkva; /* swap buffers kva */
174
175 void
176 vm_pager_init(void)
177 {
178 struct pagerops **pgops;
179
180 TAILQ_INIT(&bswlist);
181 /*
182 * Initialize known pagers
183 */
184 for (pgops = pagertab; pgops < &pagertab[nitems(pagertab)]; pgops++)
185 if ((*pgops)->pgo_init != NULL)
186 (*(*pgops)->pgo_init)();
187 }
188
189 void
190 vm_pager_bufferinit(void)
191 {
192 struct buf *bp;
193 int i;
194
195 mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
196 bp = swbuf;
197 /*
198 * Now set up swap and physical I/O buffer headers.
199 */
200 for (i = 0; i < nswbuf; i++, bp++) {
201 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
202 BUF_LOCKINIT(bp);
203 LIST_INIT(&bp->b_dep);
204 bp->b_rcred = bp->b_wcred = NOCRED;
205 bp->b_xflags = 0;
206 }
207
208 cluster_pbuf_freecnt = nswbuf / 2;
209 vnode_pbuf_freecnt = nswbuf / 2 + 1;
210 vnode_async_pbuf_freecnt = nswbuf / 2;
211 }
212
213 /*
214 * Allocate an instance of a pager of the given type.
215 * Size, protection and offset parameters are passed in for pagers that
216 * need to perform page-level validation (e.g. the device pager).
217 */
218 vm_object_t
219 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
220 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
221 {
222 vm_object_t ret;
223 struct pagerops *ops;
224
225 ops = pagertab[type];
226 if (ops)
227 ret = (*ops->pgo_alloc)(handle, size, prot, off, cred);
228 else
229 ret = NULL;
230 return (ret);
231 }
232
233 /*
234 * The object must be locked.
235 */
236 void
237 vm_pager_deallocate(vm_object_t object)
238 {
239
240 VM_OBJECT_ASSERT_WLOCKED(object);
241 (*pagertab[object->type]->pgo_dealloc) (object);
242 }
243
244 static void
245 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
246 {
247 #ifdef INVARIANTS
248
249 VM_OBJECT_ASSERT_WLOCKED(object);
250 KASSERT(count > 0, ("%s: 0 count", __func__));
251 /*
252 * All pages must be busied, not mapped, not fully valid,
253 * not dirty and belong to the proper object.
254 */
255 for (int i = 0 ; i < count; i++) {
256 if (m[i] == bogus_page)
257 continue;
258 vm_page_assert_xbusied(m[i]);
259 KASSERT(!pmap_page_is_mapped(m[i]),
260 ("%s: page %p is mapped", __func__, m[i]));
261 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
262 ("%s: request for a valid page %p", __func__, m[i]));
263 KASSERT(m[i]->dirty == 0,
264 ("%s: page %p is dirty", __func__, m[i]));
265 KASSERT(m[i]->object == object,
266 ("%s: wrong object %p/%p", __func__, object, m[i]->object));
267 }
268 #endif
269 }
270
271 /*
272 * Page in the pages for the object using its associated pager.
273 * The requested page must be fully valid on successful return.
274 */
275 int
276 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
277 int *rahead)
278 {
279 #ifdef INVARIANTS
280 vm_pindex_t pindex = m[0]->pindex;
281 #endif
282 int r;
283
284 vm_pager_assert_in(object, m, count);
285
286 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
287 rahead);
288 if (r != VM_PAGER_OK)
289 return (r);
290
291 for (int i = 0; i < count; i++) {
292 /*
293 * If pager has replaced a page, assert that it had
294 * updated the array.
295 */
296 KASSERT(m[i] == vm_page_lookup(object, pindex++),
297 ("%s: mismatch page %p pindex %ju", __func__,
298 m[i], (uintmax_t )pindex - 1));
299 /*
300 * Zero out partially filled data.
301 */
302 if (m[i]->valid != VM_PAGE_BITS_ALL)
303 vm_page_zero_invalid(m[i], TRUE);
304 }
305 return (VM_PAGER_OK);
306 }
307
308 int
309 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
310 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
311 {
312
313 vm_pager_assert_in(object, m, count);
314
315 return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
316 count, rbehind, rahead, iodone, arg));
317 }
318
319 /*
320 * vm_pager_put_pages() - inline, see vm/vm_pager.h
321 * vm_pager_has_page() - inline, see vm/vm_pager.h
322 */
323
324 /*
325 * Search the specified pager object list for an object with the
326 * specified handle. If an object with the specified handle is found,
327 * increase its reference count and return it. Otherwise, return NULL.
328 *
329 * The pager object list must be locked.
330 */
331 vm_object_t
332 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
333 {
334 vm_object_t object;
335
336 TAILQ_FOREACH(object, pg_list, pager_object_list) {
337 if (object->handle == handle) {
338 VM_OBJECT_WLOCK(object);
339 if ((object->flags & OBJ_DEAD) == 0) {
340 vm_object_reference_locked(object);
341 VM_OBJECT_WUNLOCK(object);
342 break;
343 }
344 VM_OBJECT_WUNLOCK(object);
345 }
346 }
347 return (object);
348 }
349
350 /*
351 * initialize a physical buffer
352 */
353
354 /*
355 * XXX This probably belongs in vfs_bio.c
356 */
357 static void
358 initpbuf(struct buf *bp)
359 {
360
361 KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
362 KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
363 bp->b_rcred = NOCRED;
364 bp->b_wcred = NOCRED;
365 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
366 bp->b_kvabase = (caddr_t)(MAXPHYS * (bp - swbuf)) + swapbkva;
367 bp->b_data = bp->b_kvabase;
368 bp->b_kvasize = MAXPHYS;
369 bp->b_flags = 0;
370 bp->b_xflags = 0;
371 bp->b_ioflags = 0;
372 bp->b_iodone = NULL;
373 bp->b_error = 0;
374 BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
375 buf_track(bp, __func__);
376 }
377
378 /*
379 * allocate a physical buffer
380 *
381 * There are a limited number (nswbuf) of physical buffers. We need
382 * to make sure that no single subsystem is able to hog all of them,
383 * so each subsystem implements a counter which is typically initialized
384 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
385 * increments it on release, and blocks if the counter hits zero. A
386 * subsystem may initialize the counter to -1 to disable the feature,
387 * but it must still be sure to match up all uses of getpbuf() with
388 * relpbuf() using the same variable.
389 *
390 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
391 * relatively soon when the rest of the subsystems get smart about it. XXX
392 */
393 struct buf *
394 getpbuf(int *pfreecnt)
395 {
396 struct buf *bp;
397
398 mtx_lock(&pbuf_mtx);
399 for (;;) {
400 if (pfreecnt != NULL) {
401 while (*pfreecnt == 0) {
402 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
403 }
404 }
405
406 /* get a bp from the swap buffer header pool */
407 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
408 break;
409
410 bswneeded = 1;
411 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
412 /* loop in case someone else grabbed one */
413 }
414 TAILQ_REMOVE(&bswlist, bp, b_freelist);
415 if (pfreecnt)
416 --*pfreecnt;
417 mtx_unlock(&pbuf_mtx);
418 initpbuf(bp);
419 return (bp);
420 }
421
422 /*
423 * allocate a physical buffer, if one is available.
424 *
425 * Note that there is no NULL hack here - all subsystems using this
426 * call understand how to use pfreecnt.
427 */
428 struct buf *
429 trypbuf(int *pfreecnt)
430 {
431 struct buf *bp;
432
433 mtx_lock(&pbuf_mtx);
434 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
435 mtx_unlock(&pbuf_mtx);
436 return NULL;
437 }
438 TAILQ_REMOVE(&bswlist, bp, b_freelist);
439 --*pfreecnt;
440 mtx_unlock(&pbuf_mtx);
441 initpbuf(bp);
442 return (bp);
443 }
444
445 /*
446 * release a physical buffer
447 *
448 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
449 * relatively soon when the rest of the subsystems get smart about it. XXX
450 */
451 void
452 relpbuf(struct buf *bp, int *pfreecnt)
453 {
454
455 if (bp->b_rcred != NOCRED) {
456 crfree(bp->b_rcred);
457 bp->b_rcred = NOCRED;
458 }
459 if (bp->b_wcred != NOCRED) {
460 crfree(bp->b_wcred);
461 bp->b_wcred = NOCRED;
462 }
463
464 KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
465 KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
466
467 buf_track(bp, __func__);
468 BUF_UNLOCK(bp);
469
470 mtx_lock(&pbuf_mtx);
471 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
472
473 if (bswneeded) {
474 bswneeded = 0;
475 wakeup(&bswneeded);
476 }
477 if (pfreecnt) {
478 if (++*pfreecnt == 1)
479 wakeup(pfreecnt);
480 }
481 mtx_unlock(&pbuf_mtx);
482 }
483
484 /*
485 * Associate a p-buffer with a vnode.
486 *
487 * Also sets B_PAGING flag to indicate that vnode is not fully associated
488 * with the buffer. i.e. the bp has not been linked into the vnode or
489 * ref-counted.
490 */
491 void
492 pbgetvp(struct vnode *vp, struct buf *bp)
493 {
494
495 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
496 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
497
498 bp->b_vp = vp;
499 bp->b_flags |= B_PAGING;
500 bp->b_bufobj = &vp->v_bufobj;
501 }
502
503 /*
504 * Associate a p-buffer with a vnode.
505 *
506 * Also sets B_PAGING flag to indicate that vnode is not fully associated
507 * with the buffer. i.e. the bp has not been linked into the vnode or
508 * ref-counted.
509 */
510 void
511 pbgetbo(struct bufobj *bo, struct buf *bp)
512 {
513
514 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
515 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
516
517 bp->b_flags |= B_PAGING;
518 bp->b_bufobj = bo;
519 }
520
521 /*
522 * Disassociate a p-buffer from a vnode.
523 */
524 void
525 pbrelvp(struct buf *bp)
526 {
527
528 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
529 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
530 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
531 ("pbrelvp: pager buf on vnode list."));
532
533 bp->b_vp = NULL;
534 bp->b_bufobj = NULL;
535 bp->b_flags &= ~B_PAGING;
536 }
537
538 /*
539 * Disassociate a p-buffer from a bufobj.
540 */
541 void
542 pbrelbo(struct buf *bp)
543 {
544
545 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
546 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
547 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
548 ("pbrelbo: pager buf on vnode list."));
549
550 bp->b_bufobj = NULL;
551 bp->b_flags &= ~B_PAGING;
552 }
Cache object: 7a205fe22ac1a74ab1fcaf4d04b9e1df
|