FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1998 Matthew Dillon,
5 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1990 University of Utah.
7 * Copyright (c) 1982, 1986, 1989, 1993
8 * The Regents of the University of California. All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * New Swap System
43 * Matthew Dillon
44 *
45 * Radix Bitmap 'blists'.
46 *
47 * - The new swapper uses the new radix bitmap code. This should scale
48 * to arbitrarily small or arbitrarily large swap spaces and an almost
49 * arbitrary degree of fragmentation.
50 *
51 * Features:
52 *
53 * - on the fly reallocation of swap during putpages. The new system
54 * does not try to keep previously allocated swap blocks for dirty
55 * pages.
56 *
57 * - on the fly deallocation of swap
58 *
59 * - No more garbage collection required. Unnecessarily allocated swap
60 * blocks only exist for dirty vm_page_t's now and these are already
61 * cycled (in a high-load system) by the pager. We also do on-the-fly
62 * removal of invalidated swap blocks when a page is destroyed
63 * or renamed.
64 *
65 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
66 *
67 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
68 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
69 */
70
71 #include <sys/cdefs.h>
72 __FBSDID("$FreeBSD$");
73
74 #include "opt_swap.h"
75 #include "opt_vm.h"
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/conf.h>
80 #include <sys/kernel.h>
81 #include <sys/priv.h>
82 #include <sys/proc.h>
83 #include <sys/bio.h>
84 #include <sys/buf.h>
85 #include <sys/disk.h>
86 #include <sys/disklabel.h>
87 #include <sys/fcntl.h>
88 #include <sys/mount.h>
89 #include <sys/namei.h>
90 #include <sys/vnode.h>
91 #include <sys/malloc.h>
92 #include <sys/pctrie.h>
93 #include <sys/racct.h>
94 #include <sys/resource.h>
95 #include <sys/resourcevar.h>
96 #include <sys/rwlock.h>
97 #include <sys/sbuf.h>
98 #include <sys/sysctl.h>
99 #include <sys/sysproto.h>
100 #include <sys/blist.h>
101 #include <sys/lock.h>
102 #include <sys/sx.h>
103 #include <sys/vmmeter.h>
104
105 #include <security/mac/mac_framework.h>
106
107 #include <vm/vm.h>
108 #include <vm/pmap.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
111 #include <vm/vm_object.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_pager.h>
114 #include <vm/vm_pageout.h>
115 #include <vm/vm_param.h>
116 #include <vm/swap_pager.h>
117 #include <vm/vm_extern.h>
118 #include <vm/uma.h>
119
120 #include <geom/geom.h>
121
122 /*
123 * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
124 * The 64-page limit is due to the radix code (kern/subr_blist.c).
125 */
126 #ifndef MAX_PAGEOUT_CLUSTER
127 #define MAX_PAGEOUT_CLUSTER 32
128 #endif
129
130 #if !defined(SWB_NPAGES)
131 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
132 #endif
133
134 #define SWAP_META_PAGES PCTRIE_COUNT
135
136 /*
137 * A swblk structure maps each page index within a
138 * SWAP_META_PAGES-aligned and sized range to the address of an
139 * on-disk swap block (or SWAPBLK_NONE). The collection of these
140 * mappings for an entire vm object is implemented as a pc-trie.
141 */
142 struct swblk {
143 vm_pindex_t p;
144 daddr_t d[SWAP_META_PAGES];
145 };
146
147 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
148 static struct mtx sw_dev_mtx;
149 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
150 static struct swdevt *swdevhd; /* Allocate from here next */
151 static int nswapdev; /* Number of swap devices */
152 int swap_pager_avail;
153 static struct sx swdev_syscall_lock; /* serialize swap(on|off) */
154
155 static __exclusive_cache_line u_long swap_reserved;
156 static u_long swap_total;
157 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
158 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
159 &swap_reserved, 0, sysctl_page_shift, "A",
160 "Amount of swap storage needed to back all allocated anonymous memory.");
161 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
162 &swap_total, 0, sysctl_page_shift, "A",
163 "Total amount of available swap storage.");
164
165 static int overcommit = 0;
166 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0,
167 "Configure virtual memory overcommit behavior. See tuning(7) "
168 "for details.");
169 static unsigned long swzone;
170 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
171 "Actual size of swap metadata zone");
172 static unsigned long swap_maxpages;
173 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
174 "Maximum amount of swap supported");
175
176 /* bits from overcommit */
177 #define SWAP_RESERVE_FORCE_ON (1 << 0)
178 #define SWAP_RESERVE_RLIMIT_ON (1 << 1)
179 #define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2)
180
181 static int
182 sysctl_page_shift(SYSCTL_HANDLER_ARGS)
183 {
184 uint64_t newval;
185 u_long value = *(u_long *)arg1;
186
187 newval = ((uint64_t)value) << PAGE_SHIFT;
188 return (sysctl_handle_64(oidp, &newval, 0, req));
189 }
190
191 int
192 swap_reserve(vm_ooffset_t incr)
193 {
194
195 return (swap_reserve_by_cred(incr, curthread->td_ucred));
196 }
197
198 int
199 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
200 {
201 u_long r, s, prev, pincr;
202 int res, error;
203 static int curfail;
204 static struct timeval lastfail;
205 struct uidinfo *uip;
206
207 uip = cred->cr_ruidinfo;
208
209 KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
210 (uintmax_t)incr));
211
212 #ifdef RACCT
213 if (racct_enable) {
214 PROC_LOCK(curproc);
215 error = racct_add(curproc, RACCT_SWAP, incr);
216 PROC_UNLOCK(curproc);
217 if (error != 0)
218 return (0);
219 }
220 #endif
221
222 pincr = atop(incr);
223 res = 0;
224 prev = atomic_fetchadd_long(&swap_reserved, pincr);
225 r = prev + pincr;
226 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
227 s = vm_cnt.v_page_count - vm_cnt.v_free_reserved -
228 vm_wire_count();
229 } else
230 s = 0;
231 s += swap_total;
232 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
233 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
234 res = 1;
235 } else {
236 prev = atomic_fetchadd_long(&swap_reserved, -pincr);
237 if (prev < pincr)
238 panic("swap_reserved < incr on overcommit fail");
239 }
240 if (res) {
241 prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
242 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
243 prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
244 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) {
245 res = 0;
246 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
247 if (prev < pincr)
248 panic("uip->ui_vmsize < incr on overcommit fail");
249 }
250 }
251 if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
252 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
253 uip->ui_uid, curproc->p_pid, incr);
254 }
255
256 #ifdef RACCT
257 if (racct_enable && !res) {
258 PROC_LOCK(curproc);
259 racct_sub(curproc, RACCT_SWAP, incr);
260 PROC_UNLOCK(curproc);
261 }
262 #endif
263
264 return (res);
265 }
266
267 void
268 swap_reserve_force(vm_ooffset_t incr)
269 {
270 struct uidinfo *uip;
271 u_long pincr;
272
273 KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
274 (uintmax_t)incr));
275
276 PROC_LOCK(curproc);
277 #ifdef RACCT
278 if (racct_enable)
279 racct_add_force(curproc, RACCT_SWAP, incr);
280 #endif
281 pincr = atop(incr);
282 atomic_add_long(&swap_reserved, pincr);
283 uip = curproc->p_ucred->cr_ruidinfo;
284 atomic_add_long(&uip->ui_vmsize, pincr);
285 PROC_UNLOCK(curproc);
286 }
287
288 void
289 swap_release(vm_ooffset_t decr)
290 {
291 struct ucred *cred;
292
293 PROC_LOCK(curproc);
294 cred = curproc->p_ucred;
295 swap_release_by_cred(decr, cred);
296 PROC_UNLOCK(curproc);
297 }
298
299 void
300 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
301 {
302 u_long prev, pdecr;
303 struct uidinfo *uip;
304
305 uip = cred->cr_ruidinfo;
306
307 KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK", __func__,
308 (uintmax_t)decr));
309
310 pdecr = atop(decr);
311 prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
312 if (prev < pdecr)
313 panic("swap_reserved < decr");
314
315 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
316 if (prev < pdecr)
317 printf("negative vmsize for uid = %d\n", uip->ui_uid);
318 #ifdef RACCT
319 if (racct_enable)
320 racct_sub_cred(cred, RACCT_SWAP, decr);
321 #endif
322 }
323
324 #define SWM_POP 0x01 /* pop out */
325
326 static int swap_pager_full = 2; /* swap space exhaustion (task killing) */
327 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
328 static int nsw_rcount; /* free read buffers */
329 static int nsw_wcount_sync; /* limit write buffers / synchronous */
330 static int nsw_wcount_async; /* limit write buffers / asynchronous */
331 static int nsw_wcount_async_max;/* assigned maximum */
332 static int nsw_cluster_max; /* maximum VOP I/O allowed */
333
334 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
335 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
336 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
337 "Maximum running async swap ops");
338 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
339 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
340 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
341 "Swap Fragmentation Info");
342
343 static struct sx sw_alloc_sx;
344
345 /*
346 * "named" and "unnamed" anon region objects. Try to reduce the overhead
347 * of searching a named list by hashing it just a little.
348 */
349
350 #define NOBJLISTS 8
351
352 #define NOBJLIST(handle) \
353 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
354
355 static struct pagerlst swap_pager_object_list[NOBJLISTS];
356 static uma_zone_t swblk_zone;
357 static uma_zone_t swpctrie_zone;
358
359 /*
360 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
361 * calls hooked from other parts of the VM system and do not appear here.
362 * (see vm/swap_pager.h).
363 */
364 static vm_object_t
365 swap_pager_alloc(void *handle, vm_ooffset_t size,
366 vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
367 static void swap_pager_dealloc(vm_object_t object);
368 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
369 int *);
370 static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
371 int *, pgo_getpages_iodone_t, void *);
372 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
373 static boolean_t
374 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
375 static void swap_pager_init(void);
376 static void swap_pager_unswapped(vm_page_t);
377 static void swap_pager_swapoff(struct swdevt *sp);
378 static void swap_pager_update_writecount(vm_object_t object,
379 vm_offset_t start, vm_offset_t end);
380 static void swap_pager_release_writecount(vm_object_t object,
381 vm_offset_t start, vm_offset_t end);
382
383 struct pagerops swappagerops = {
384 .pgo_init = swap_pager_init, /* early system initialization of pager */
385 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */
386 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
387 .pgo_getpages = swap_pager_getpages, /* pagein */
388 .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */
389 .pgo_putpages = swap_pager_putpages, /* pageout */
390 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */
391 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
392 .pgo_update_writecount = swap_pager_update_writecount,
393 .pgo_release_writecount = swap_pager_release_writecount,
394 };
395
396 /*
397 * swap_*() routines are externally accessible. swp_*() routines are
398 * internal.
399 */
400 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
401 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
402
403 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
404 "Maximum size of a swap block in pages");
405
406 static void swp_sizecheck(void);
407 static void swp_pager_async_iodone(struct buf *bp);
408 static bool swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
409 static int swapongeom(struct vnode *);
410 static int swaponvp(struct thread *, struct vnode *, u_long);
411 static int swapoff_one(struct swdevt *sp, struct ucred *cred);
412
413 /*
414 * Swap bitmap functions
415 */
416 static void swp_pager_freeswapspace(daddr_t blk, daddr_t npages);
417 static daddr_t swp_pager_getswapspace(int npages);
418
419 /*
420 * Metadata functions
421 */
422 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
423 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
424 static void swp_pager_meta_free_all(vm_object_t);
425 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
426
427 static void
428 swp_pager_init_freerange(daddr_t *start, daddr_t *num)
429 {
430
431 *start = SWAPBLK_NONE;
432 *num = 0;
433 }
434
435 static void
436 swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr)
437 {
438
439 if (*start + *num == addr) {
440 (*num)++;
441 } else {
442 swp_pager_freeswapspace(*start, *num);
443 *start = addr;
444 *num = 1;
445 }
446 }
447
448 static void *
449 swblk_trie_alloc(struct pctrie *ptree)
450 {
451
452 return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
453 M_USE_RESERVE : 0)));
454 }
455
456 static void
457 swblk_trie_free(struct pctrie *ptree, void *node)
458 {
459
460 uma_zfree(swpctrie_zone, node);
461 }
462
463 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
464
465 /*
466 * SWP_SIZECHECK() - update swap_pager_full indication
467 *
468 * update the swap_pager_almost_full indication and warn when we are
469 * about to run out of swap space, using lowat/hiwat hysteresis.
470 *
471 * Clear swap_pager_full ( task killing ) indication when lowat is met.
472 *
473 * No restrictions on call
474 * This routine may not block.
475 */
476 static void
477 swp_sizecheck(void)
478 {
479
480 if (swap_pager_avail < nswap_lowat) {
481 if (swap_pager_almost_full == 0) {
482 printf("swap_pager: out of swap space\n");
483 swap_pager_almost_full = 1;
484 }
485 } else {
486 swap_pager_full = 0;
487 if (swap_pager_avail > nswap_hiwat)
488 swap_pager_almost_full = 0;
489 }
490 }
491
492 /*
493 * SWAP_PAGER_INIT() - initialize the swap pager!
494 *
495 * Expected to be started from system init. NOTE: This code is run
496 * before much else so be careful what you depend on. Most of the VM
497 * system has yet to be initialized at this point.
498 */
499 static void
500 swap_pager_init(void)
501 {
502 /*
503 * Initialize object lists
504 */
505 int i;
506
507 for (i = 0; i < NOBJLISTS; ++i)
508 TAILQ_INIT(&swap_pager_object_list[i]);
509 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
510 sx_init(&sw_alloc_sx, "swspsx");
511 sx_init(&swdev_syscall_lock, "swsysc");
512 }
513
514 /*
515 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
516 *
517 * Expected to be started from pageout process once, prior to entering
518 * its main loop.
519 */
520 void
521 swap_pager_swap_init(void)
522 {
523 unsigned long n, n2;
524
525 /*
526 * Number of in-transit swap bp operations. Don't
527 * exhaust the pbufs completely. Make sure we
528 * initialize workable values (0 will work for hysteresis
529 * but it isn't very efficient).
530 *
531 * The nsw_cluster_max is constrained by the bp->b_pages[]
532 * array (MAXPHYS/PAGE_SIZE) and our locally defined
533 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
534 * constrained by the swap device interleave stripe size.
535 *
536 * Currently we hardwire nsw_wcount_async to 4. This limit is
537 * designed to prevent other I/O from having high latencies due to
538 * our pageout I/O. The value 4 works well for one or two active swap
539 * devices but is probably a little low if you have more. Even so,
540 * a higher value would probably generate only a limited improvement
541 * with three or four active swap devices since the system does not
542 * typically have to pageout at extreme bandwidths. We will want
543 * at least 2 per swap devices, and 4 is a pretty good value if you
544 * have one NFS swap device due to the command/ack latency over NFS.
545 * So it all works out pretty well.
546 */
547 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
548
549 mtx_lock(&pbuf_mtx);
550 nsw_rcount = (nswbuf + 1) / 2;
551 nsw_wcount_sync = (nswbuf + 3) / 4;
552 nsw_wcount_async = 4;
553 nsw_wcount_async_max = nsw_wcount_async;
554 mtx_unlock(&pbuf_mtx);
555
556 /*
557 * Initialize our zone, taking the user's requested size or
558 * estimating the number we need based on the number of pages
559 * in the system.
560 */
561 n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
562 vm_cnt.v_page_count / 2;
563 swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
564 pctrie_zone_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
565 if (swpctrie_zone == NULL)
566 panic("failed to create swap pctrie zone.");
567 swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
568 NULL, NULL, _Alignof(struct swblk) - 1, UMA_ZONE_VM);
569 if (swblk_zone == NULL)
570 panic("failed to create swap blk zone.");
571 n2 = n;
572 do {
573 if (uma_zone_reserve_kva(swblk_zone, n))
574 break;
575 /*
576 * if the allocation failed, try a zone two thirds the
577 * size of the previous attempt.
578 */
579 n -= ((n + 2) / 3);
580 } while (n > 0);
581
582 /*
583 * Often uma_zone_reserve_kva() cannot reserve exactly the
584 * requested size. Account for the difference when
585 * calculating swap_maxpages.
586 */
587 n = uma_zone_get_max(swblk_zone);
588
589 if (n < n2)
590 printf("Swap blk zone entries changed from %lu to %lu.\n",
591 n2, n);
592 /* absolute maximum we can handle assuming 100% efficiency */
593 swap_maxpages = n * SWAP_META_PAGES;
594 swzone = n * sizeof(struct swblk);
595 if (!uma_zone_reserve_kva(swpctrie_zone, n))
596 printf("Cannot reserve swap pctrie zone, "
597 "reduce kern.maxswzone.\n");
598 }
599
600 static vm_object_t
601 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
602 vm_ooffset_t offset)
603 {
604 vm_object_t object;
605
606 if (cred != NULL) {
607 if (!swap_reserve_by_cred(size, cred))
608 return (NULL);
609 crhold(cred);
610 }
611
612 /*
613 * The un_pager.swp.swp_blks trie is initialized by
614 * vm_object_allocate() to ensure the correct order of
615 * visibility to other threads.
616 */
617 object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
618 PAGE_MASK + size));
619
620 object->un_pager.swp.writemappings = 0;
621 object->handle = handle;
622 if (cred != NULL) {
623 object->cred = cred;
624 object->charge = size;
625 }
626 return (object);
627 }
628
629 /*
630 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
631 * its metadata structures.
632 *
633 * This routine is called from the mmap and fork code to create a new
634 * OBJT_SWAP object.
635 *
636 * This routine must ensure that no live duplicate is created for
637 * the named object request, which is protected against by
638 * holding the sw_alloc_sx lock in case handle != NULL.
639 */
640 static vm_object_t
641 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
642 vm_ooffset_t offset, struct ucred *cred)
643 {
644 vm_object_t object;
645
646 if (handle != NULL) {
647 /*
648 * Reference existing named region or allocate new one. There
649 * should not be a race here against swp_pager_meta_build()
650 * as called from vm_page_remove() in regards to the lookup
651 * of the handle.
652 */
653 sx_xlock(&sw_alloc_sx);
654 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
655 if (object == NULL) {
656 object = swap_pager_alloc_init(handle, cred, size,
657 offset);
658 if (object != NULL) {
659 TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
660 object, pager_object_list);
661 }
662 }
663 sx_xunlock(&sw_alloc_sx);
664 } else {
665 object = swap_pager_alloc_init(handle, cred, size, offset);
666 }
667 return (object);
668 }
669
670 /*
671 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
672 *
673 * The swap backing for the object is destroyed. The code is
674 * designed such that we can reinstantiate it later, but this
675 * routine is typically called only when the entire object is
676 * about to be destroyed.
677 *
678 * The object must be locked.
679 */
680 static void
681 swap_pager_dealloc(vm_object_t object)
682 {
683
684 VM_OBJECT_ASSERT_WLOCKED(object);
685 KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
686
687 /*
688 * Remove from list right away so lookups will fail if we block for
689 * pageout completion.
690 */
691 if (object->handle != NULL) {
692 VM_OBJECT_WUNLOCK(object);
693 sx_xlock(&sw_alloc_sx);
694 TAILQ_REMOVE(NOBJLIST(object->handle), object,
695 pager_object_list);
696 sx_xunlock(&sw_alloc_sx);
697 VM_OBJECT_WLOCK(object);
698 }
699
700 vm_object_pip_wait(object, "swpdea");
701
702 /*
703 * Free all remaining metadata. We only bother to free it from
704 * the swap meta data. We do not attempt to free swapblk's still
705 * associated with vm_page_t's for this object. We do not care
706 * if paging is still in progress on some objects.
707 */
708 swp_pager_meta_free_all(object);
709 object->handle = NULL;
710 object->type = OBJT_DEAD;
711 }
712
713 /************************************************************************
714 * SWAP PAGER BITMAP ROUTINES *
715 ************************************************************************/
716
717 /*
718 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
719 *
720 * Allocate swap for the requested number of pages. The starting
721 * swap block number (a page index) is returned or SWAPBLK_NONE
722 * if the allocation failed.
723 *
724 * Also has the side effect of advising that somebody made a mistake
725 * when they configured swap and didn't configure enough.
726 *
727 * This routine may not sleep.
728 *
729 * We allocate in round-robin fashion from the configured devices.
730 */
731 static daddr_t
732 swp_pager_getswapspace(int npages)
733 {
734 daddr_t blk;
735 struct swdevt *sp;
736 int i;
737
738 blk = SWAPBLK_NONE;
739 mtx_lock(&sw_dev_mtx);
740 sp = swdevhd;
741 for (i = 0; i < nswapdev; i++) {
742 if (sp == NULL)
743 sp = TAILQ_FIRST(&swtailq);
744 if (!(sp->sw_flags & SW_CLOSING)) {
745 blk = blist_alloc(sp->sw_blist, npages);
746 if (blk != SWAPBLK_NONE) {
747 blk += sp->sw_first;
748 sp->sw_used += npages;
749 swap_pager_avail -= npages;
750 swp_sizecheck();
751 swdevhd = TAILQ_NEXT(sp, sw_list);
752 goto done;
753 }
754 }
755 sp = TAILQ_NEXT(sp, sw_list);
756 }
757 if (swap_pager_full != 2) {
758 printf("swap_pager_getswapspace(%d): failed\n", npages);
759 swap_pager_full = 2;
760 swap_pager_almost_full = 1;
761 }
762 swdevhd = NULL;
763 done:
764 mtx_unlock(&sw_dev_mtx);
765 return (blk);
766 }
767
768 static int
769 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
770 {
771
772 return (blk >= sp->sw_first && blk < sp->sw_end);
773 }
774
775 static void
776 swp_pager_strategy(struct buf *bp)
777 {
778 struct swdevt *sp;
779
780 mtx_lock(&sw_dev_mtx);
781 TAILQ_FOREACH(sp, &swtailq, sw_list) {
782 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
783 mtx_unlock(&sw_dev_mtx);
784 if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
785 unmapped_buf_allowed) {
786 bp->b_data = unmapped_buf;
787 bp->b_offset = 0;
788 } else {
789 pmap_qenter((vm_offset_t)bp->b_data,
790 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
791 }
792 sp->sw_strategy(bp, sp);
793 return;
794 }
795 }
796 panic("Swapdev not found");
797 }
798
799
800 /*
801 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
802 *
803 * This routine returns the specified swap blocks back to the bitmap.
804 *
805 * This routine may not sleep.
806 */
807 static void
808 swp_pager_freeswapspace(daddr_t blk, daddr_t npages)
809 {
810 struct swdevt *sp;
811
812 if (npages == 0)
813 return;
814 mtx_lock(&sw_dev_mtx);
815 TAILQ_FOREACH(sp, &swtailq, sw_list) {
816 if (blk >= sp->sw_first && blk < sp->sw_end) {
817 sp->sw_used -= npages;
818 /*
819 * If we are attempting to stop swapping on
820 * this device, we don't want to mark any
821 * blocks free lest they be reused.
822 */
823 if ((sp->sw_flags & SW_CLOSING) == 0) {
824 blist_free(sp->sw_blist, blk - sp->sw_first,
825 npages);
826 swap_pager_avail += npages;
827 swp_sizecheck();
828 }
829 mtx_unlock(&sw_dev_mtx);
830 return;
831 }
832 }
833 panic("Swapdev not found");
834 }
835
836 /*
837 * SYSCTL_SWAP_FRAGMENTATION() - produce raw swap space stats
838 */
839 static int
840 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
841 {
842 struct sbuf sbuf;
843 struct swdevt *sp;
844 const char *devname;
845 int error;
846
847 error = sysctl_wire_old_buffer(req, 0);
848 if (error != 0)
849 return (error);
850 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
851 mtx_lock(&sw_dev_mtx);
852 TAILQ_FOREACH(sp, &swtailq, sw_list) {
853 if (vn_isdisk(sp->sw_vp, NULL))
854 devname = devtoname(sp->sw_vp->v_rdev);
855 else
856 devname = "[file]";
857 sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
858 blist_stats(sp->sw_blist, &sbuf);
859 }
860 mtx_unlock(&sw_dev_mtx);
861 error = sbuf_finish(&sbuf);
862 sbuf_delete(&sbuf);
863 return (error);
864 }
865
866 /*
867 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
868 * range within an object.
869 *
870 * This is a globally accessible routine.
871 *
872 * This routine removes swapblk assignments from swap metadata.
873 *
874 * The external callers of this routine typically have already destroyed
875 * or renamed vm_page_t's associated with this range in the object so
876 * we should be ok.
877 *
878 * The object must be locked.
879 */
880 void
881 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
882 {
883
884 swp_pager_meta_free(object, start, size);
885 }
886
887 /*
888 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
889 *
890 * Assigns swap blocks to the specified range within the object. The
891 * swap blocks are not zeroed. Any previous swap assignment is destroyed.
892 *
893 * Returns 0 on success, -1 on failure.
894 */
895 int
896 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
897 {
898 int n = 0;
899 daddr_t blk = SWAPBLK_NONE;
900 vm_pindex_t beg = start; /* save start index */
901 daddr_t addr, n_free, s_free;
902
903 swp_pager_init_freerange(&s_free, &n_free);
904 VM_OBJECT_WLOCK(object);
905 while (size) {
906 if (n == 0) {
907 n = BLIST_MAX_ALLOC;
908 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
909 n >>= 1;
910 if (n == 0) {
911 swp_pager_meta_free(object, beg, start - beg);
912 VM_OBJECT_WUNLOCK(object);
913 return (-1);
914 }
915 }
916 }
917 addr = swp_pager_meta_build(object, start, blk);
918 if (addr != SWAPBLK_NONE)
919 swp_pager_update_freerange(&s_free, &n_free, addr);
920 --size;
921 ++start;
922 ++blk;
923 --n;
924 }
925 swp_pager_freeswapspace(s_free, n_free);
926 swp_pager_meta_free(object, start, n);
927 VM_OBJECT_WUNLOCK(object);
928 return (0);
929 }
930
931 /*
932 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
933 * and destroy the source.
934 *
935 * Copy any valid swapblks from the source to the destination. In
936 * cases where both the source and destination have a valid swapblk,
937 * we keep the destination's.
938 *
939 * This routine is allowed to sleep. It may sleep allocating metadata
940 * indirectly through swp_pager_meta_build() or if paging is still in
941 * progress on the source.
942 *
943 * The source object contains no vm_page_t's (which is just as well)
944 *
945 * The source object is of type OBJT_SWAP.
946 *
947 * The source and destination objects must be locked.
948 * Both object locks may temporarily be released.
949 */
950 void
951 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
952 vm_pindex_t offset, int destroysource)
953 {
954 vm_pindex_t i;
955 daddr_t dstaddr, n_free, s_free, srcaddr;
956
957 VM_OBJECT_ASSERT_WLOCKED(srcobject);
958 VM_OBJECT_ASSERT_WLOCKED(dstobject);
959
960 /*
961 * If destroysource is set, we remove the source object from the
962 * swap_pager internal queue now.
963 */
964 if (destroysource && srcobject->handle != NULL) {
965 vm_object_pip_add(srcobject, 1);
966 VM_OBJECT_WUNLOCK(srcobject);
967 vm_object_pip_add(dstobject, 1);
968 VM_OBJECT_WUNLOCK(dstobject);
969 sx_xlock(&sw_alloc_sx);
970 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
971 pager_object_list);
972 sx_xunlock(&sw_alloc_sx);
973 VM_OBJECT_WLOCK(dstobject);
974 vm_object_pip_wakeup(dstobject);
975 VM_OBJECT_WLOCK(srcobject);
976 vm_object_pip_wakeup(srcobject);
977 }
978
979 /*
980 * Transfer source to destination.
981 */
982 swp_pager_init_freerange(&s_free, &n_free);
983 for (i = 0; i < dstobject->size; ++i) {
984 srcaddr = swp_pager_meta_ctl(srcobject, i + offset, SWM_POP);
985 if (srcaddr == SWAPBLK_NONE)
986 continue;
987 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
988 if (dstaddr != SWAPBLK_NONE) {
989 /*
990 * Destination has valid swapblk or it is represented
991 * by a resident page. We destroy the source block.
992 */
993 swp_pager_update_freerange(&s_free, &n_free, srcaddr);
994 continue;
995 }
996
997 /*
998 * Destination has no swapblk and is not resident,
999 * copy source.
1000 *
1001 * swp_pager_meta_build() can sleep.
1002 */
1003 vm_object_pip_add(srcobject, 1);
1004 VM_OBJECT_WUNLOCK(srcobject);
1005 vm_object_pip_add(dstobject, 1);
1006 dstaddr = swp_pager_meta_build(dstobject, i, srcaddr);
1007 KASSERT(dstaddr == SWAPBLK_NONE,
1008 ("Unexpected destination swapblk"));
1009 vm_object_pip_wakeup(dstobject);
1010 VM_OBJECT_WLOCK(srcobject);
1011 vm_object_pip_wakeup(srcobject);
1012 }
1013 swp_pager_freeswapspace(s_free, n_free);
1014
1015 /*
1016 * Free left over swap blocks in source.
1017 *
1018 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
1019 * double-remove the object from the swap queues.
1020 */
1021 if (destroysource) {
1022 swp_pager_meta_free_all(srcobject);
1023 /*
1024 * Reverting the type is not necessary, the caller is going
1025 * to destroy srcobject directly, but I'm doing it here
1026 * for consistency since we've removed the object from its
1027 * queues.
1028 */
1029 srcobject->type = OBJT_DEFAULT;
1030 }
1031 }
1032
1033 /*
1034 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
1035 * the requested page.
1036 *
1037 * We determine whether good backing store exists for the requested
1038 * page and return TRUE if it does, FALSE if it doesn't.
1039 *
1040 * If TRUE, we also try to determine how much valid, contiguous backing
1041 * store exists before and after the requested page.
1042 */
1043 static boolean_t
1044 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1045 int *after)
1046 {
1047 daddr_t blk, blk0;
1048 int i;
1049
1050 VM_OBJECT_ASSERT_LOCKED(object);
1051
1052 /*
1053 * do we have good backing store at the requested index ?
1054 */
1055 blk0 = swp_pager_meta_ctl(object, pindex, 0);
1056 if (blk0 == SWAPBLK_NONE) {
1057 if (before)
1058 *before = 0;
1059 if (after)
1060 *after = 0;
1061 return (FALSE);
1062 }
1063
1064 /*
1065 * find backwards-looking contiguous good backing store
1066 */
1067 if (before != NULL) {
1068 for (i = 1; i < SWB_NPAGES; i++) {
1069 if (i > pindex)
1070 break;
1071 blk = swp_pager_meta_ctl(object, pindex - i, 0);
1072 if (blk != blk0 - i)
1073 break;
1074 }
1075 *before = i - 1;
1076 }
1077
1078 /*
1079 * find forward-looking contiguous good backing store
1080 */
1081 if (after != NULL) {
1082 for (i = 1; i < SWB_NPAGES; i++) {
1083 blk = swp_pager_meta_ctl(object, pindex + i, 0);
1084 if (blk != blk0 + i)
1085 break;
1086 }
1087 *after = i - 1;
1088 }
1089 return (TRUE);
1090 }
1091
1092 /*
1093 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1094 *
1095 * This removes any associated swap backing store, whether valid or
1096 * not, from the page.
1097 *
1098 * This routine is typically called when a page is made dirty, at
1099 * which point any associated swap can be freed. MADV_FREE also
1100 * calls us in a special-case situation
1101 *
1102 * NOTE!!! If the page is clean and the swap was valid, the caller
1103 * should make the page dirty before calling this routine. This routine
1104 * does NOT change the m->dirty status of the page. Also: MADV_FREE
1105 * depends on it.
1106 *
1107 * This routine may not sleep.
1108 *
1109 * The object containing the page must be locked.
1110 */
1111 static void
1112 swap_pager_unswapped(vm_page_t m)
1113 {
1114 daddr_t srcaddr;
1115
1116 srcaddr = swp_pager_meta_ctl(m->object, m->pindex, SWM_POP);
1117 if (srcaddr != SWAPBLK_NONE)
1118 swp_pager_freeswapspace(srcaddr, 1);
1119 }
1120
1121 /*
1122 * swap_pager_getpages() - bring pages in from swap
1123 *
1124 * Attempt to page in the pages in array "ma" of length "count". The
1125 * caller may optionally specify that additional pages preceding and
1126 * succeeding the specified range be paged in. The number of such pages
1127 * is returned in the "rbehind" and "rahead" parameters, and they will
1128 * be in the inactive queue upon return.
1129 *
1130 * The pages in "ma" must be busied and will remain busied upon return.
1131 */
1132 static int
1133 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
1134 int *rahead)
1135 {
1136 struct buf *bp;
1137 vm_page_t bm, mpred, msucc, p;
1138 vm_pindex_t pindex;
1139 daddr_t blk;
1140 int i, maxahead, maxbehind, reqcount;
1141
1142 reqcount = count;
1143
1144 /*
1145 * Determine the final number of read-behind pages and
1146 * allocate them BEFORE releasing the object lock. Otherwise,
1147 * there can be a problematic race with vm_object_split().
1148 * Specifically, vm_object_split() might first transfer pages
1149 * that precede ma[0] in the current object to a new object,
1150 * and then this function incorrectly recreates those pages as
1151 * read-behind pages in the current object.
1152 */
1153 if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead))
1154 return (VM_PAGER_FAIL);
1155
1156 /*
1157 * Clip the readahead and readbehind ranges to exclude resident pages.
1158 */
1159 if (rahead != NULL) {
1160 KASSERT(reqcount - 1 <= maxahead,
1161 ("page count %d extends beyond swap block", reqcount));
1162 *rahead = imin(*rahead, maxahead - (reqcount - 1));
1163 pindex = ma[reqcount - 1]->pindex;
1164 msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1165 if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1166 *rahead = msucc->pindex - pindex - 1;
1167 }
1168 if (rbehind != NULL) {
1169 *rbehind = imin(*rbehind, maxbehind);
1170 pindex = ma[0]->pindex;
1171 mpred = TAILQ_PREV(ma[0], pglist, listq);
1172 if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1173 *rbehind = pindex - mpred->pindex - 1;
1174 }
1175
1176 bm = ma[0];
1177 for (i = 0; i < count; i++)
1178 ma[i]->oflags |= VPO_SWAPINPROG;
1179
1180 /*
1181 * Allocate readahead and readbehind pages.
1182 */
1183 if (rbehind != NULL) {
1184 for (i = 1; i <= *rbehind; i++) {
1185 p = vm_page_alloc(object, ma[0]->pindex - i,
1186 VM_ALLOC_NORMAL);
1187 if (p == NULL)
1188 break;
1189 p->oflags |= VPO_SWAPINPROG;
1190 bm = p;
1191 }
1192 *rbehind = i - 1;
1193 }
1194 if (rahead != NULL) {
1195 for (i = 0; i < *rahead; i++) {
1196 p = vm_page_alloc(object,
1197 ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1198 if (p == NULL)
1199 break;
1200 p->oflags |= VPO_SWAPINPROG;
1201 }
1202 *rahead = i;
1203 }
1204 if (rbehind != NULL)
1205 count += *rbehind;
1206 if (rahead != NULL)
1207 count += *rahead;
1208
1209 vm_object_pip_add(object, count);
1210
1211 pindex = bm->pindex;
1212 blk = swp_pager_meta_ctl(object, pindex, 0);
1213 KASSERT(blk != SWAPBLK_NONE,
1214 ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1215
1216 VM_OBJECT_WUNLOCK(object);
1217 bp = getpbuf(&nsw_rcount);
1218 /* Pages cannot leave the object while busy. */
1219 for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1220 MPASS(p->pindex == bm->pindex + i);
1221 bp->b_pages[i] = p;
1222 }
1223
1224 bp->b_flags |= B_PAGING;
1225 bp->b_iocmd = BIO_READ;
1226 bp->b_iodone = swp_pager_async_iodone;
1227 bp->b_rcred = crhold(thread0.td_ucred);
1228 bp->b_wcred = crhold(thread0.td_ucred);
1229 bp->b_blkno = blk;
1230 bp->b_bcount = PAGE_SIZE * count;
1231 bp->b_bufsize = PAGE_SIZE * count;
1232 bp->b_npages = count;
1233 bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1234 bp->b_pgafter = rahead != NULL ? *rahead : 0;
1235
1236 VM_CNT_INC(v_swapin);
1237 VM_CNT_ADD(v_swappgsin, count);
1238
1239 /*
1240 * perform the I/O. NOTE!!! bp cannot be considered valid after
1241 * this point because we automatically release it on completion.
1242 * Instead, we look at the one page we are interested in which we
1243 * still hold a lock on even through the I/O completion.
1244 *
1245 * The other pages in our ma[] array are also released on completion,
1246 * so we cannot assume they are valid anymore either.
1247 *
1248 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1249 */
1250 BUF_KERNPROC(bp);
1251 swp_pager_strategy(bp);
1252
1253 /*
1254 * Wait for the pages we want to complete. VPO_SWAPINPROG is always
1255 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1256 * is set in the metadata for each page in the request.
1257 */
1258 VM_OBJECT_WLOCK(object);
1259 while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
1260 ma[0]->oflags |= VPO_SWAPSLEEP;
1261 VM_CNT_INC(v_intrans);
1262 if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
1263 "swread", hz * 20)) {
1264 printf(
1265 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1266 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1267 }
1268 }
1269
1270 /*
1271 * If we had an unrecoverable read error pages will not be valid.
1272 */
1273 for (i = 0; i < reqcount; i++)
1274 if (ma[i]->valid != VM_PAGE_BITS_ALL)
1275 return (VM_PAGER_ERROR);
1276
1277 return (VM_PAGER_OK);
1278
1279 /*
1280 * A final note: in a low swap situation, we cannot deallocate swap
1281 * and mark a page dirty here because the caller is likely to mark
1282 * the page clean when we return, causing the page to possibly revert
1283 * to all-zero's later.
1284 */
1285 }
1286
1287 /*
1288 * swap_pager_getpages_async():
1289 *
1290 * Right now this is emulation of asynchronous operation on top of
1291 * swap_pager_getpages().
1292 */
1293 static int
1294 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1295 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1296 {
1297 int r, error;
1298
1299 r = swap_pager_getpages(object, ma, count, rbehind, rahead);
1300 VM_OBJECT_WUNLOCK(object);
1301 switch (r) {
1302 case VM_PAGER_OK:
1303 error = 0;
1304 break;
1305 case VM_PAGER_ERROR:
1306 error = EIO;
1307 break;
1308 case VM_PAGER_FAIL:
1309 error = EINVAL;
1310 break;
1311 default:
1312 panic("unhandled swap_pager_getpages() error %d", r);
1313 }
1314 (iodone)(arg, ma, count, error);
1315 VM_OBJECT_WLOCK(object);
1316
1317 return (r);
1318 }
1319
1320 /*
1321 * swap_pager_putpages:
1322 *
1323 * Assign swap (if necessary) and initiate I/O on the specified pages.
1324 *
1325 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1326 * are automatically converted to SWAP objects.
1327 *
1328 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1329 * vm_page reservation system coupled with properly written VFS devices
1330 * should ensure that no low-memory deadlock occurs. This is an area
1331 * which needs work.
1332 *
1333 * The parent has N vm_object_pip_add() references prior to
1334 * calling us and will remove references for rtvals[] that are
1335 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1336 * completion.
1337 *
1338 * The parent has soft-busy'd the pages it passes us and will unbusy
1339 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1340 * We need to unbusy the rest on I/O completion.
1341 */
1342 static void
1343 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1344 int flags, int *rtvals)
1345 {
1346 int i, n;
1347 boolean_t sync;
1348 daddr_t addr, n_free, s_free;
1349
1350 swp_pager_init_freerange(&s_free, &n_free);
1351 if (count && ma[0]->object != object) {
1352 panic("swap_pager_putpages: object mismatch %p/%p",
1353 object,
1354 ma[0]->object
1355 );
1356 }
1357
1358 /*
1359 * Step 1
1360 *
1361 * Turn object into OBJT_SWAP
1362 * check for bogus sysops
1363 * force sync if not pageout process
1364 */
1365 if (object->type != OBJT_SWAP) {
1366 addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1367 KASSERT(addr == SWAPBLK_NONE,
1368 ("unexpected object swap block"));
1369 }
1370 VM_OBJECT_WUNLOCK(object);
1371
1372 n = 0;
1373 if (curproc != pageproc)
1374 sync = TRUE;
1375 else
1376 sync = (flags & VM_PAGER_PUT_SYNC) != 0;
1377
1378 /*
1379 * Step 2
1380 *
1381 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1382 * The page is left dirty until the pageout operation completes
1383 * successfully.
1384 */
1385 for (i = 0; i < count; i += n) {
1386 int j;
1387 struct buf *bp;
1388 daddr_t blk;
1389
1390 /*
1391 * Maximum I/O size is limited by a number of factors.
1392 */
1393 n = min(BLIST_MAX_ALLOC, count - i);
1394 n = min(n, nsw_cluster_max);
1395
1396 /*
1397 * Get biggest block of swap we can. If we fail, fall
1398 * back and try to allocate a smaller block. Don't go
1399 * overboard trying to allocate space if it would overly
1400 * fragment swap.
1401 */
1402 while (
1403 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1404 n > 4
1405 ) {
1406 n >>= 1;
1407 }
1408 if (blk == SWAPBLK_NONE) {
1409 for (j = 0; j < n; ++j)
1410 rtvals[i+j] = VM_PAGER_FAIL;
1411 continue;
1412 }
1413
1414 /*
1415 * All I/O parameters have been satisfied, build the I/O
1416 * request and assign the swap space.
1417 */
1418 if (sync == TRUE) {
1419 bp = getpbuf(&nsw_wcount_sync);
1420 } else {
1421 bp = getpbuf(&nsw_wcount_async);
1422 bp->b_flags = B_ASYNC;
1423 }
1424 bp->b_flags |= B_PAGING;
1425 bp->b_iocmd = BIO_WRITE;
1426
1427 bp->b_rcred = crhold(thread0.td_ucred);
1428 bp->b_wcred = crhold(thread0.td_ucred);
1429 bp->b_bcount = PAGE_SIZE * n;
1430 bp->b_bufsize = PAGE_SIZE * n;
1431 bp->b_blkno = blk;
1432
1433 VM_OBJECT_WLOCK(object);
1434 for (j = 0; j < n; ++j) {
1435 vm_page_t mreq = ma[i+j];
1436
1437 addr = swp_pager_meta_build(mreq->object, mreq->pindex,
1438 blk + j);
1439 if (addr != SWAPBLK_NONE)
1440 swp_pager_update_freerange(&s_free, &n_free,
1441 addr);
1442 MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
1443 mreq->oflags |= VPO_SWAPINPROG;
1444 bp->b_pages[j] = mreq;
1445 }
1446 VM_OBJECT_WUNLOCK(object);
1447 bp->b_npages = n;
1448 /*
1449 * Must set dirty range for NFS to work.
1450 */
1451 bp->b_dirtyoff = 0;
1452 bp->b_dirtyend = bp->b_bcount;
1453
1454 VM_CNT_INC(v_swapout);
1455 VM_CNT_ADD(v_swappgsout, bp->b_npages);
1456
1457 /*
1458 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1459 * can call the async completion routine at the end of a
1460 * synchronous I/O operation. Otherwise, our caller would
1461 * perform duplicate unbusy and wakeup operations on the page
1462 * and object, respectively.
1463 */
1464 for (j = 0; j < n; j++)
1465 rtvals[i + j] = VM_PAGER_PEND;
1466
1467 /*
1468 * asynchronous
1469 *
1470 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1471 */
1472 if (sync == FALSE) {
1473 bp->b_iodone = swp_pager_async_iodone;
1474 BUF_KERNPROC(bp);
1475 swp_pager_strategy(bp);
1476 continue;
1477 }
1478
1479 /*
1480 * synchronous
1481 *
1482 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1483 */
1484 bp->b_iodone = bdone;
1485 swp_pager_strategy(bp);
1486
1487 /*
1488 * Wait for the sync I/O to complete.
1489 */
1490 bwait(bp, PVM, "swwrt");
1491
1492 /*
1493 * Now that we are through with the bp, we can call the
1494 * normal async completion, which frees everything up.
1495 */
1496 swp_pager_async_iodone(bp);
1497 }
1498 VM_OBJECT_WLOCK(object);
1499 swp_pager_freeswapspace(s_free, n_free);
1500 }
1501
1502 /*
1503 * swp_pager_async_iodone:
1504 *
1505 * Completion routine for asynchronous reads and writes from/to swap.
1506 * Also called manually by synchronous code to finish up a bp.
1507 *
1508 * This routine may not sleep.
1509 */
1510 static void
1511 swp_pager_async_iodone(struct buf *bp)
1512 {
1513 int i;
1514 vm_object_t object = NULL;
1515
1516 /*
1517 * report error
1518 */
1519 if (bp->b_ioflags & BIO_ERROR) {
1520 printf(
1521 "swap_pager: I/O error - %s failed; blkno %ld,"
1522 "size %ld, error %d\n",
1523 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1524 (long)bp->b_blkno,
1525 (long)bp->b_bcount,
1526 bp->b_error
1527 );
1528 }
1529
1530 /*
1531 * remove the mapping for kernel virtual
1532 */
1533 if (buf_mapped(bp))
1534 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1535 else
1536 bp->b_data = bp->b_kvabase;
1537
1538 if (bp->b_npages) {
1539 object = bp->b_pages[0]->object;
1540 VM_OBJECT_WLOCK(object);
1541 }
1542
1543 /*
1544 * cleanup pages. If an error occurs writing to swap, we are in
1545 * very serious trouble. If it happens to be a disk error, though,
1546 * we may be able to recover by reassigning the swap later on. So
1547 * in this case we remove the m->swapblk assignment for the page
1548 * but do not free it in the rlist. The errornous block(s) are thus
1549 * never reallocated as swap. Redirty the page and continue.
1550 */
1551 for (i = 0; i < bp->b_npages; ++i) {
1552 vm_page_t m = bp->b_pages[i];
1553
1554 m->oflags &= ~VPO_SWAPINPROG;
1555 if (m->oflags & VPO_SWAPSLEEP) {
1556 m->oflags &= ~VPO_SWAPSLEEP;
1557 wakeup(&object->paging_in_progress);
1558 }
1559
1560 if (bp->b_ioflags & BIO_ERROR) {
1561 /*
1562 * If an error occurs I'd love to throw the swapblk
1563 * away without freeing it back to swapspace, so it
1564 * can never be used again. But I can't from an
1565 * interrupt.
1566 */
1567 if (bp->b_iocmd == BIO_READ) {
1568 /*
1569 * NOTE: for reads, m->dirty will probably
1570 * be overridden by the original caller of
1571 * getpages so don't play cute tricks here.
1572 */
1573 m->valid = 0;
1574 } else {
1575 /*
1576 * If a write error occurs, reactivate page
1577 * so it doesn't clog the inactive list,
1578 * then finish the I/O.
1579 */
1580 MPASS(m->dirty == VM_PAGE_BITS_ALL);
1581 vm_page_lock(m);
1582 vm_page_activate(m);
1583 vm_page_unlock(m);
1584 vm_page_sunbusy(m);
1585 }
1586 } else if (bp->b_iocmd == BIO_READ) {
1587 /*
1588 * NOTE: for reads, m->dirty will probably be
1589 * overridden by the original caller of getpages so
1590 * we cannot set them in order to free the underlying
1591 * swap in a low-swap situation. I don't think we'd
1592 * want to do that anyway, but it was an optimization
1593 * that existed in the old swapper for a time before
1594 * it got ripped out due to precisely this problem.
1595 */
1596 KASSERT(!pmap_page_is_mapped(m),
1597 ("swp_pager_async_iodone: page %p is mapped", m));
1598 KASSERT(m->dirty == 0,
1599 ("swp_pager_async_iodone: page %p is dirty", m));
1600
1601 m->valid = VM_PAGE_BITS_ALL;
1602 if (i < bp->b_pgbefore ||
1603 i >= bp->b_npages - bp->b_pgafter)
1604 vm_page_readahead_finish(m);
1605 } else {
1606 /*
1607 * For write success, clear the dirty
1608 * status, then finish the I/O ( which decrements the
1609 * busy count and possibly wakes waiter's up ).
1610 * A page is only written to swap after a period of
1611 * inactivity. Therefore, we do not expect it to be
1612 * reused.
1613 */
1614 KASSERT(!pmap_page_is_write_mapped(m),
1615 ("swp_pager_async_iodone: page %p is not write"
1616 " protected", m));
1617 vm_page_undirty(m);
1618 vm_page_lock(m);
1619 vm_page_deactivate_noreuse(m);
1620 vm_page_unlock(m);
1621 vm_page_sunbusy(m);
1622 }
1623 }
1624
1625 /*
1626 * adjust pip. NOTE: the original parent may still have its own
1627 * pip refs on the object.
1628 */
1629 if (object != NULL) {
1630 vm_object_pip_wakeupn(object, bp->b_npages);
1631 VM_OBJECT_WUNLOCK(object);
1632 }
1633
1634 /*
1635 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1636 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1637 * trigger a KASSERT in relpbuf().
1638 */
1639 if (bp->b_vp) {
1640 bp->b_vp = NULL;
1641 bp->b_bufobj = NULL;
1642 }
1643 /*
1644 * release the physical I/O buffer
1645 */
1646 relpbuf(
1647 bp,
1648 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1649 ((bp->b_flags & B_ASYNC) ?
1650 &nsw_wcount_async :
1651 &nsw_wcount_sync
1652 )
1653 )
1654 );
1655 }
1656
1657 int
1658 swap_pager_nswapdev(void)
1659 {
1660
1661 return (nswapdev);
1662 }
1663
1664 static void
1665 swp_pager_force_dirty(vm_page_t m)
1666 {
1667
1668 vm_page_dirty(m);
1669 #ifdef INVARIANTS
1670 vm_page_lock(m);
1671 if (!vm_page_wired(m) && m->queue == PQ_NONE)
1672 panic("page %p is neither wired nor queued", m);
1673 vm_page_unlock(m);
1674 #endif
1675 vm_page_xunbusy(m);
1676 swap_pager_unswapped(m);
1677 }
1678
1679 static void
1680 swp_pager_force_launder(vm_page_t m)
1681 {
1682
1683 vm_page_dirty(m);
1684 vm_page_lock(m);
1685 vm_page_launder(m);
1686 vm_page_unlock(m);
1687 vm_page_xunbusy(m);
1688 swap_pager_unswapped(m);
1689 }
1690
1691 /*
1692 * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in
1693 *
1694 * This routine dissociates pages starting at the given index within an
1695 * object from their backing store, paging them in if they do not reside
1696 * in memory. Pages that are paged in are marked dirty and placed in the
1697 * laundry queue. Pages are marked dirty because they no longer have
1698 * backing store. They are placed in the laundry queue because they have
1699 * not been accessed recently. Otherwise, they would already reside in
1700 * memory.
1701 */
1702 static void
1703 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages)
1704 {
1705 vm_page_t ma[npages];
1706 int i, j;
1707
1708 KASSERT(npages > 0, ("%s: No pages", __func__));
1709 KASSERT(npages <= MAXPHYS / PAGE_SIZE,
1710 ("%s: Too many pages: %d", __func__, npages));
1711 vm_object_pip_add(object, npages);
1712 vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages);
1713 for (i = j = 0;; i++) {
1714 /* Count nonresident pages, to page-in all at once. */
1715 if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL)
1716 continue;
1717 if (j < i) {
1718 /* Page-in nonresident pages. Mark for laundering. */
1719 if (swap_pager_getpages(object, &ma[j], i - j, NULL,
1720 NULL) != VM_PAGER_OK)
1721 panic("%s: read from swap failed", __func__);
1722 do {
1723 swp_pager_force_launder(ma[j]);
1724 } while (++j < i);
1725 }
1726 if (i == npages)
1727 break;
1728 /* Mark dirty a resident page. */
1729 swp_pager_force_dirty(ma[j++]);
1730 }
1731 vm_object_pip_wakeupn(object, npages);
1732 }
1733
1734 /*
1735 * swap_pager_swapoff_object:
1736 *
1737 * Page in all of the pages that have been paged out for an object
1738 * to a swap device.
1739 */
1740 static void
1741 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
1742 {
1743 struct swblk *sb;
1744 vm_pindex_t pi, s_pindex;
1745 daddr_t blk, n_blks, s_blk;
1746 int i;
1747
1748 n_blks = 0;
1749 for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1750 &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1751 for (i = 0; i < SWAP_META_PAGES; i++) {
1752 blk = sb->d[i];
1753 if (!swp_pager_isondev(blk, sp))
1754 blk = SWAPBLK_NONE;
1755
1756 /*
1757 * If there are no blocks/pages accumulated, start a new
1758 * accumulation here.
1759 */
1760 if (n_blks == 0) {
1761 if (blk != SWAPBLK_NONE) {
1762 s_blk = blk;
1763 s_pindex = sb->p + i;
1764 n_blks = 1;
1765 }
1766 continue;
1767 }
1768
1769 /*
1770 * If the accumulation can be extended without breaking
1771 * the sequence of consecutive blocks and pages that
1772 * swp_pager_force_pagein() depends on, do so.
1773 */
1774 if (n_blks < MAXPHYS / PAGE_SIZE &&
1775 s_blk + n_blks == blk &&
1776 s_pindex + n_blks == sb->p + i) {
1777 ++n_blks;
1778 continue;
1779 }
1780
1781 /*
1782 * The sequence of consecutive blocks and pages cannot
1783 * be extended, so page them all in here. Then,
1784 * because doing so involves releasing and reacquiring
1785 * a lock that protects the swap block pctrie, do not
1786 * rely on the current swap block. Break this loop and
1787 * re-fetch the same pindex from the pctrie again.
1788 */
1789 swp_pager_force_pagein(object, s_pindex, n_blks);
1790 n_blks = 0;
1791 break;
1792 }
1793 if (i == SWAP_META_PAGES)
1794 pi = sb->p + SWAP_META_PAGES;
1795 }
1796 if (n_blks > 0)
1797 swp_pager_force_pagein(object, s_pindex, n_blks);
1798 }
1799
1800 /*
1801 * swap_pager_swapoff:
1802 *
1803 * Page in all of the pages that have been paged out to the
1804 * given device. The corresponding blocks in the bitmap must be
1805 * marked as allocated and the device must be flagged SW_CLOSING.
1806 * There may be no processes swapped out to the device.
1807 *
1808 * This routine may block.
1809 */
1810 static void
1811 swap_pager_swapoff(struct swdevt *sp)
1812 {
1813 vm_object_t object;
1814 int retries;
1815
1816 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1817
1818 retries = 0;
1819 full_rescan:
1820 mtx_lock(&vm_object_list_mtx);
1821 TAILQ_FOREACH(object, &vm_object_list, object_list) {
1822 if (object->type != OBJT_SWAP)
1823 continue;
1824 mtx_unlock(&vm_object_list_mtx);
1825 /* Depends on type-stability. */
1826 VM_OBJECT_WLOCK(object);
1827
1828 /*
1829 * Dead objects are eventually terminated on their own.
1830 */
1831 if ((object->flags & OBJ_DEAD) != 0)
1832 goto next_obj;
1833
1834 /*
1835 * Sync with fences placed after pctrie
1836 * initialization. We must not access pctrie below
1837 * unless we checked that our object is swap and not
1838 * dead.
1839 */
1840 atomic_thread_fence_acq();
1841 if (object->type != OBJT_SWAP)
1842 goto next_obj;
1843
1844 swap_pager_swapoff_object(sp, object);
1845 next_obj:
1846 VM_OBJECT_WUNLOCK(object);
1847 mtx_lock(&vm_object_list_mtx);
1848 }
1849 mtx_unlock(&vm_object_list_mtx);
1850
1851 if (sp->sw_used) {
1852 /*
1853 * Objects may be locked or paging to the device being
1854 * removed, so we will miss their pages and need to
1855 * make another pass. We have marked this device as
1856 * SW_CLOSING, so the activity should finish soon.
1857 */
1858 retries++;
1859 if (retries > 100) {
1860 panic("swapoff: failed to locate %d swap blocks",
1861 sp->sw_used);
1862 }
1863 pause("swpoff", hz / 20);
1864 goto full_rescan;
1865 }
1866 EVENTHANDLER_INVOKE(swapoff, sp);
1867 }
1868
1869 /************************************************************************
1870 * SWAP META DATA *
1871 ************************************************************************
1872 *
1873 * These routines manipulate the swap metadata stored in the
1874 * OBJT_SWAP object.
1875 *
1876 * Swap metadata is implemented with a global hash and not directly
1877 * linked into the object. Instead the object simply contains
1878 * appropriate tracking counters.
1879 */
1880
1881 /*
1882 * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
1883 */
1884 static bool
1885 swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
1886 {
1887 int i;
1888
1889 MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
1890 for (i = start; i < limit; i++) {
1891 if (sb->d[i] != SWAPBLK_NONE)
1892 return (false);
1893 }
1894 return (true);
1895 }
1896
1897 /*
1898 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1899 *
1900 * We first convert the object to a swap object if it is a default
1901 * object.
1902 *
1903 * The specified swapblk is added to the object's swap metadata. If
1904 * the swapblk is not valid, it is freed instead. Any previously
1905 * assigned swapblk is returned.
1906 */
1907 static daddr_t
1908 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1909 {
1910 static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
1911 struct swblk *sb, *sb1;
1912 vm_pindex_t modpi, rdpi;
1913 daddr_t prev_swapblk;
1914 int error, i;
1915
1916 VM_OBJECT_ASSERT_WLOCKED(object);
1917
1918 /*
1919 * Convert default object to swap object if necessary
1920 */
1921 if (object->type != OBJT_SWAP) {
1922 pctrie_init(&object->un_pager.swp.swp_blks);
1923
1924 /*
1925 * Ensure that swap_pager_swapoff()'s iteration over
1926 * object_list does not see a garbage pctrie.
1927 */
1928 atomic_thread_fence_rel();
1929
1930 object->type = OBJT_SWAP;
1931 object->un_pager.swp.writemappings = 0;
1932 KASSERT(object->handle == NULL, ("default pager with handle"));
1933 }
1934
1935 rdpi = rounddown(pindex, SWAP_META_PAGES);
1936 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
1937 if (sb == NULL) {
1938 if (swapblk == SWAPBLK_NONE)
1939 return (SWAPBLK_NONE);
1940 for (;;) {
1941 sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
1942 pageproc ? M_USE_RESERVE : 0));
1943 if (sb != NULL) {
1944 sb->p = rdpi;
1945 for (i = 0; i < SWAP_META_PAGES; i++)
1946 sb->d[i] = SWAPBLK_NONE;
1947 if (atomic_cmpset_int(&swblk_zone_exhausted,
1948 1, 0))
1949 printf("swblk zone ok\n");
1950 break;
1951 }
1952 VM_OBJECT_WUNLOCK(object);
1953 if (uma_zone_exhausted(swblk_zone)) {
1954 if (atomic_cmpset_int(&swblk_zone_exhausted,
1955 0, 1))
1956 printf("swap blk zone exhausted, "
1957 "increase kern.maxswzone\n");
1958 vm_pageout_oom(VM_OOM_SWAPZ);
1959 pause("swzonxb", 10);
1960 } else
1961 uma_zwait(swblk_zone);
1962 VM_OBJECT_WLOCK(object);
1963 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1964 rdpi);
1965 if (sb != NULL)
1966 /*
1967 * Somebody swapped out a nearby page,
1968 * allocating swblk at the rdpi index,
1969 * while we dropped the object lock.
1970 */
1971 goto allocated;
1972 }
1973 for (;;) {
1974 error = SWAP_PCTRIE_INSERT(
1975 &object->un_pager.swp.swp_blks, sb);
1976 if (error == 0) {
1977 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1978 1, 0))
1979 printf("swpctrie zone ok\n");
1980 break;
1981 }
1982 VM_OBJECT_WUNLOCK(object);
1983 if (uma_zone_exhausted(swpctrie_zone)) {
1984 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1985 0, 1))
1986 printf("swap pctrie zone exhausted, "
1987 "increase kern.maxswzone\n");
1988 vm_pageout_oom(VM_OOM_SWAPZ);
1989 pause("swzonxp", 10);
1990 } else
1991 uma_zwait(swpctrie_zone);
1992 VM_OBJECT_WLOCK(object);
1993 sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1994 rdpi);
1995 if (sb1 != NULL) {
1996 uma_zfree(swblk_zone, sb);
1997 sb = sb1;
1998 goto allocated;
1999 }
2000 }
2001 }
2002 allocated:
2003 MPASS(sb->p == rdpi);
2004
2005 modpi = pindex % SWAP_META_PAGES;
2006 /* Return prior contents of metadata. */
2007 prev_swapblk = sb->d[modpi];
2008 /* Enter block into metadata. */
2009 sb->d[modpi] = swapblk;
2010
2011 /*
2012 * Free the swblk if we end up with the empty page run.
2013 */
2014 if (swapblk == SWAPBLK_NONE &&
2015 swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
2016 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, rdpi);
2017 uma_zfree(swblk_zone, sb);
2018 }
2019 return (prev_swapblk);
2020 }
2021
2022 /*
2023 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2024 *
2025 * The requested range of blocks is freed, with any associated swap
2026 * returned to the swap bitmap.
2027 *
2028 * This routine will free swap metadata structures as they are cleaned
2029 * out. This routine does *NOT* operate on swap metadata associated
2030 * with resident pages.
2031 */
2032 static void
2033 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
2034 {
2035 struct swblk *sb;
2036 daddr_t n_free, s_free;
2037 vm_pindex_t last;
2038 int i, limit, start;
2039
2040 VM_OBJECT_ASSERT_WLOCKED(object);
2041 if (object->type != OBJT_SWAP || count == 0)
2042 return;
2043
2044 swp_pager_init_freerange(&s_free, &n_free);
2045 last = pindex + count;
2046 for (;;) {
2047 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2048 rounddown(pindex, SWAP_META_PAGES));
2049 if (sb == NULL || sb->p >= last)
2050 break;
2051 start = pindex > sb->p ? pindex - sb->p : 0;
2052 limit = last - sb->p < SWAP_META_PAGES ? last - sb->p :
2053 SWAP_META_PAGES;
2054 for (i = start; i < limit; i++) {
2055 if (sb->d[i] == SWAPBLK_NONE)
2056 continue;
2057 swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
2058 sb->d[i] = SWAPBLK_NONE;
2059 }
2060 pindex = sb->p + SWAP_META_PAGES;
2061 if (swp_pager_swblk_empty(sb, 0, start) &&
2062 swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2063 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
2064 sb->p);
2065 uma_zfree(swblk_zone, sb);
2066 }
2067 }
2068 swp_pager_freeswapspace(s_free, n_free);
2069 }
2070
2071 /*
2072 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2073 *
2074 * This routine locates and destroys all swap metadata associated with
2075 * an object.
2076 */
2077 static void
2078 swp_pager_meta_free_all(vm_object_t object)
2079 {
2080 struct swblk *sb;
2081 daddr_t n_free, s_free;
2082 vm_pindex_t pindex;
2083 int i;
2084
2085 VM_OBJECT_ASSERT_WLOCKED(object);
2086 if (object->type != OBJT_SWAP)
2087 return;
2088
2089 swp_pager_init_freerange(&s_free, &n_free);
2090 for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
2091 &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
2092 pindex = sb->p + SWAP_META_PAGES;
2093 for (i = 0; i < SWAP_META_PAGES; i++) {
2094 if (sb->d[i] == SWAPBLK_NONE)
2095 continue;
2096 swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
2097 }
2098 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
2099 uma_zfree(swblk_zone, sb);
2100 }
2101 swp_pager_freeswapspace(s_free, n_free);
2102 }
2103
2104 /*
2105 * SWP_PAGER_METACTL() - misc control of swap meta data.
2106 *
2107 * This routine is capable of looking up, or removing swapblk
2108 * assignments in the swap meta data. It returns the swapblk being
2109 * looked-up, popped, or SWAPBLK_NONE if the block was invalid.
2110 *
2111 * When acting on a busy resident page and paging is in progress, we
2112 * have to wait until paging is complete but otherwise can act on the
2113 * busy page.
2114 *
2115 * SWM_POP remove from meta data but do not free it
2116 */
2117 static daddr_t
2118 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
2119 {
2120 struct swblk *sb;
2121 daddr_t r1;
2122
2123 if ((flags & SWM_POP) != 0)
2124 VM_OBJECT_ASSERT_WLOCKED(object);
2125 else
2126 VM_OBJECT_ASSERT_LOCKED(object);
2127
2128 /*
2129 * The meta data only exists if the object is OBJT_SWAP
2130 * and even then might not be allocated yet.
2131 */
2132 if (object->type != OBJT_SWAP)
2133 return (SWAPBLK_NONE);
2134
2135 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2136 rounddown(pindex, SWAP_META_PAGES));
2137 if (sb == NULL)
2138 return (SWAPBLK_NONE);
2139 r1 = sb->d[pindex % SWAP_META_PAGES];
2140 if (r1 == SWAPBLK_NONE)
2141 return (SWAPBLK_NONE);
2142 if ((flags & SWM_POP) != 0) {
2143 sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
2144 if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
2145 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
2146 rounddown(pindex, SWAP_META_PAGES));
2147 uma_zfree(swblk_zone, sb);
2148 }
2149 }
2150 return (r1);
2151 }
2152
2153 /*
2154 * Returns the least page index which is greater than or equal to the
2155 * parameter pindex and for which there is a swap block allocated.
2156 * Returns object's size if the object's type is not swap or if there
2157 * are no allocated swap blocks for the object after the requested
2158 * pindex.
2159 */
2160 vm_pindex_t
2161 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
2162 {
2163 struct swblk *sb;
2164 int i;
2165
2166 VM_OBJECT_ASSERT_LOCKED(object);
2167 if (object->type != OBJT_SWAP)
2168 return (object->size);
2169
2170 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2171 rounddown(pindex, SWAP_META_PAGES));
2172 if (sb == NULL)
2173 return (object->size);
2174 if (sb->p < pindex) {
2175 for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2176 if (sb->d[i] != SWAPBLK_NONE)
2177 return (sb->p + i);
2178 }
2179 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2180 roundup(pindex, SWAP_META_PAGES));
2181 if (sb == NULL)
2182 return (object->size);
2183 }
2184 for (i = 0; i < SWAP_META_PAGES; i++) {
2185 if (sb->d[i] != SWAPBLK_NONE)
2186 return (sb->p + i);
2187 }
2188
2189 /*
2190 * We get here if a swblk is present in the trie but it
2191 * doesn't map any blocks.
2192 */
2193 MPASS(0);
2194 return (object->size);
2195 }
2196
2197 /*
2198 * System call swapon(name) enables swapping on device name,
2199 * which must be in the swdevsw. Return EBUSY
2200 * if already swapping on this device.
2201 */
2202 #ifndef _SYS_SYSPROTO_H_
2203 struct swapon_args {
2204 char *name;
2205 };
2206 #endif
2207
2208 /*
2209 * MPSAFE
2210 */
2211 /* ARGSUSED */
2212 int
2213 sys_swapon(struct thread *td, struct swapon_args *uap)
2214 {
2215 struct vattr attr;
2216 struct vnode *vp;
2217 struct nameidata nd;
2218 int error;
2219
2220 error = priv_check(td, PRIV_SWAPON);
2221 if (error)
2222 return (error);
2223
2224 sx_xlock(&swdev_syscall_lock);
2225
2226 /*
2227 * Swap metadata may not fit in the KVM if we have physical
2228 * memory of >1GB.
2229 */
2230 if (swblk_zone == NULL) {
2231 error = ENOMEM;
2232 goto done;
2233 }
2234
2235 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2236 uap->name, td);
2237 error = namei(&nd);
2238 if (error)
2239 goto done;
2240
2241 NDFREE(&nd, NDF_ONLY_PNBUF);
2242 vp = nd.ni_vp;
2243
2244 if (vn_isdisk(vp, &error)) {
2245 error = swapongeom(vp);
2246 } else if (vp->v_type == VREG &&
2247 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2248 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2249 /*
2250 * Allow direct swapping to NFS regular files in the same
2251 * way that nfs_mountroot() sets up diskless swapping.
2252 */
2253 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2254 }
2255
2256 if (error)
2257 vrele(vp);
2258 done:
2259 sx_xunlock(&swdev_syscall_lock);
2260 return (error);
2261 }
2262
2263 /*
2264 * Check that the total amount of swap currently configured does not
2265 * exceed half the theoretical maximum. If it does, print a warning
2266 * message.
2267 */
2268 static void
2269 swapon_check_swzone(void)
2270 {
2271
2272 /* recommend using no more than half that amount */
2273 if (swap_total > swap_maxpages / 2) {
2274 printf("warning: total configured swap (%lu pages) "
2275 "exceeds maximum recommended amount (%lu pages).\n",
2276 swap_total, swap_maxpages / 2);
2277 printf("warning: increase kern.maxswzone "
2278 "or reduce amount of swap.\n");
2279 }
2280 }
2281
2282 static void
2283 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2284 sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2285 {
2286 struct swdevt *sp, *tsp;
2287 daddr_t dvbase;
2288 u_long mblocks;
2289
2290 /*
2291 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2292 * First chop nblks off to page-align it, then convert.
2293 *
2294 * sw->sw_nblks is in page-sized chunks now too.
2295 */
2296 nblks &= ~(ctodb(1) - 1);
2297 nblks = dbtoc(nblks);
2298
2299 /*
2300 * If we go beyond this, we get overflows in the radix
2301 * tree bitmap code.
2302 */
2303 mblocks = 0x40000000 / BLIST_META_RADIX;
2304 if (nblks > mblocks) {
2305 printf(
2306 "WARNING: reducing swap size to maximum of %luMB per unit\n",
2307 mblocks / 1024 / 1024 * PAGE_SIZE);
2308 nblks = mblocks;
2309 }
2310
2311 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2312 sp->sw_vp = vp;
2313 sp->sw_id = id;
2314 sp->sw_dev = dev;
2315 sp->sw_nblks = nblks;
2316 sp->sw_used = 0;
2317 sp->sw_strategy = strategy;
2318 sp->sw_close = close;
2319 sp->sw_flags = flags;
2320
2321 sp->sw_blist = blist_create(nblks, M_WAITOK);
2322 /*
2323 * Do not free the first blocks in order to avoid overwriting
2324 * any bsd label at the front of the partition
2325 */
2326 blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE),
2327 nblks - howmany(BBSIZE, PAGE_SIZE));
2328
2329 dvbase = 0;
2330 mtx_lock(&sw_dev_mtx);
2331 TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2332 if (tsp->sw_end >= dvbase) {
2333 /*
2334 * We put one uncovered page between the devices
2335 * in order to definitively prevent any cross-device
2336 * I/O requests
2337 */
2338 dvbase = tsp->sw_end + 1;
2339 }
2340 }
2341 sp->sw_first = dvbase;
2342 sp->sw_end = dvbase + nblks;
2343 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2344 nswapdev++;
2345 swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE);
2346 swap_total += nblks;
2347 swapon_check_swzone();
2348 swp_sizecheck();
2349 mtx_unlock(&sw_dev_mtx);
2350 EVENTHANDLER_INVOKE(swapon, sp);
2351 }
2352
2353 /*
2354 * SYSCALL: swapoff(devname)
2355 *
2356 * Disable swapping on the given device.
2357 *
2358 * XXX: Badly designed system call: it should use a device index
2359 * rather than filename as specification. We keep sw_vp around
2360 * only to make this work.
2361 */
2362 #ifndef _SYS_SYSPROTO_H_
2363 struct swapoff_args {
2364 char *name;
2365 };
2366 #endif
2367
2368 /*
2369 * MPSAFE
2370 */
2371 /* ARGSUSED */
2372 int
2373 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2374 {
2375 struct vnode *vp;
2376 struct nameidata nd;
2377 struct swdevt *sp;
2378 int error;
2379
2380 error = priv_check(td, PRIV_SWAPOFF);
2381 if (error)
2382 return (error);
2383
2384 sx_xlock(&swdev_syscall_lock);
2385
2386 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2387 td);
2388 error = namei(&nd);
2389 if (error)
2390 goto done;
2391 NDFREE(&nd, NDF_ONLY_PNBUF);
2392 vp = nd.ni_vp;
2393
2394 mtx_lock(&sw_dev_mtx);
2395 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2396 if (sp->sw_vp == vp)
2397 break;
2398 }
2399 mtx_unlock(&sw_dev_mtx);
2400 if (sp == NULL) {
2401 error = EINVAL;
2402 goto done;
2403 }
2404 error = swapoff_one(sp, td->td_ucred);
2405 done:
2406 sx_xunlock(&swdev_syscall_lock);
2407 return (error);
2408 }
2409
2410 static int
2411 swapoff_one(struct swdevt *sp, struct ucred *cred)
2412 {
2413 u_long nblks;
2414 #ifdef MAC
2415 int error;
2416 #endif
2417
2418 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2419 #ifdef MAC
2420 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2421 error = mac_system_check_swapoff(cred, sp->sw_vp);
2422 (void) VOP_UNLOCK(sp->sw_vp, 0);
2423 if (error != 0)
2424 return (error);
2425 #endif
2426 nblks = sp->sw_nblks;
2427
2428 /*
2429 * We can turn off this swap device safely only if the
2430 * available virtual memory in the system will fit the amount
2431 * of data we will have to page back in, plus an epsilon so
2432 * the system doesn't become critically low on swap space.
2433 */
2434 if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
2435 return (ENOMEM);
2436
2437 /*
2438 * Prevent further allocations on this device.
2439 */
2440 mtx_lock(&sw_dev_mtx);
2441 sp->sw_flags |= SW_CLOSING;
2442 swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2443 swap_total -= nblks;
2444 mtx_unlock(&sw_dev_mtx);
2445
2446 /*
2447 * Page in the contents of the device and close it.
2448 */
2449 swap_pager_swapoff(sp);
2450
2451 sp->sw_close(curthread, sp);
2452 mtx_lock(&sw_dev_mtx);
2453 sp->sw_id = NULL;
2454 TAILQ_REMOVE(&swtailq, sp, sw_list);
2455 nswapdev--;
2456 if (nswapdev == 0) {
2457 swap_pager_full = 2;
2458 swap_pager_almost_full = 1;
2459 }
2460 if (swdevhd == sp)
2461 swdevhd = NULL;
2462 mtx_unlock(&sw_dev_mtx);
2463 blist_destroy(sp->sw_blist);
2464 free(sp, M_VMPGDATA);
2465 return (0);
2466 }
2467
2468 void
2469 swapoff_all(void)
2470 {
2471 struct swdevt *sp, *spt;
2472 const char *devname;
2473 int error;
2474
2475 sx_xlock(&swdev_syscall_lock);
2476
2477 mtx_lock(&sw_dev_mtx);
2478 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2479 mtx_unlock(&sw_dev_mtx);
2480 if (vn_isdisk(sp->sw_vp, NULL))
2481 devname = devtoname(sp->sw_vp->v_rdev);
2482 else
2483 devname = "[file]";
2484 error = swapoff_one(sp, thread0.td_ucred);
2485 if (error != 0) {
2486 printf("Cannot remove swap device %s (error=%d), "
2487 "skipping.\n", devname, error);
2488 } else if (bootverbose) {
2489 printf("Swap device %s removed.\n", devname);
2490 }
2491 mtx_lock(&sw_dev_mtx);
2492 }
2493 mtx_unlock(&sw_dev_mtx);
2494
2495 sx_xunlock(&swdev_syscall_lock);
2496 }
2497
2498 void
2499 swap_pager_status(int *total, int *used)
2500 {
2501 struct swdevt *sp;
2502
2503 *total = 0;
2504 *used = 0;
2505 mtx_lock(&sw_dev_mtx);
2506 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2507 *total += sp->sw_nblks;
2508 *used += sp->sw_used;
2509 }
2510 mtx_unlock(&sw_dev_mtx);
2511 }
2512
2513 int
2514 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2515 {
2516 struct swdevt *sp;
2517 const char *tmp_devname;
2518 int error, n;
2519
2520 n = 0;
2521 error = ENOENT;
2522 mtx_lock(&sw_dev_mtx);
2523 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2524 if (n != name) {
2525 n++;
2526 continue;
2527 }
2528 xs->xsw_version = XSWDEV_VERSION;
2529 xs->xsw_dev = sp->sw_dev;
2530 xs->xsw_flags = sp->sw_flags;
2531 xs->xsw_nblks = sp->sw_nblks;
2532 xs->xsw_used = sp->sw_used;
2533 if (devname != NULL) {
2534 if (vn_isdisk(sp->sw_vp, NULL))
2535 tmp_devname = devtoname(sp->sw_vp->v_rdev);
2536 else
2537 tmp_devname = "[file]";
2538 strncpy(devname, tmp_devname, len);
2539 }
2540 error = 0;
2541 break;
2542 }
2543 mtx_unlock(&sw_dev_mtx);
2544 return (error);
2545 }
2546
2547 #if defined(COMPAT_FREEBSD11)
2548 #define XSWDEV_VERSION_11 1
2549 struct xswdev11 {
2550 u_int xsw_version;
2551 uint32_t xsw_dev;
2552 int xsw_flags;
2553 int xsw_nblks;
2554 int xsw_used;
2555 };
2556 #endif
2557
2558 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2559 struct xswdev32 {
2560 u_int xsw_version;
2561 u_int xsw_dev1, xsw_dev2;
2562 int xsw_flags;
2563 int xsw_nblks;
2564 int xsw_used;
2565 };
2566 #endif
2567
2568 static int
2569 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2570 {
2571 struct xswdev xs;
2572 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2573 struct xswdev32 xs32;
2574 #endif
2575 #if defined(COMPAT_FREEBSD11)
2576 struct xswdev11 xs11;
2577 #endif
2578 int error;
2579
2580 if (arg2 != 1) /* name length */
2581 return (EINVAL);
2582 error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2583 if (error != 0)
2584 return (error);
2585 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2586 if (req->oldlen == sizeof(xs32)) {
2587 xs32.xsw_version = XSWDEV_VERSION;
2588 xs32.xsw_dev1 = xs.xsw_dev;
2589 xs32.xsw_dev2 = xs.xsw_dev >> 32;
2590 xs32.xsw_flags = xs.xsw_flags;
2591 xs32.xsw_nblks = xs.xsw_nblks;
2592 xs32.xsw_used = xs.xsw_used;
2593 error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
2594 return (error);
2595 }
2596 #endif
2597 #if defined(COMPAT_FREEBSD11)
2598 if (req->oldlen == sizeof(xs11)) {
2599 xs11.xsw_version = XSWDEV_VERSION_11;
2600 xs11.xsw_dev = xs.xsw_dev; /* truncation */
2601 xs11.xsw_flags = xs.xsw_flags;
2602 xs11.xsw_nblks = xs.xsw_nblks;
2603 xs11.xsw_used = xs.xsw_used;
2604 error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
2605 return (error);
2606 }
2607 #endif
2608 error = SYSCTL_OUT(req, &xs, sizeof(xs));
2609 return (error);
2610 }
2611
2612 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2613 "Number of swap devices");
2614 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2615 sysctl_vm_swap_info,
2616 "Swap statistics by device");
2617
2618 /*
2619 * Count the approximate swap usage in pages for a vmspace. The
2620 * shadowed or not yet copied on write swap blocks are not accounted.
2621 * The map must be locked.
2622 */
2623 long
2624 vmspace_swap_count(struct vmspace *vmspace)
2625 {
2626 vm_map_t map;
2627 vm_map_entry_t cur;
2628 vm_object_t object;
2629 struct swblk *sb;
2630 vm_pindex_t e, pi;
2631 long count;
2632 int i;
2633
2634 map = &vmspace->vm_map;
2635 count = 0;
2636
2637 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2638 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2639 continue;
2640 object = cur->object.vm_object;
2641 if (object == NULL || object->type != OBJT_SWAP)
2642 continue;
2643 VM_OBJECT_RLOCK(object);
2644 if (object->type != OBJT_SWAP)
2645 goto unlock;
2646 pi = OFF_TO_IDX(cur->offset);
2647 e = pi + OFF_TO_IDX(cur->end - cur->start);
2648 for (;; pi = sb->p + SWAP_META_PAGES) {
2649 sb = SWAP_PCTRIE_LOOKUP_GE(
2650 &object->un_pager.swp.swp_blks, pi);
2651 if (sb == NULL || sb->p >= e)
2652 break;
2653 for (i = 0; i < SWAP_META_PAGES; i++) {
2654 if (sb->p + i < e &&
2655 sb->d[i] != SWAPBLK_NONE)
2656 count++;
2657 }
2658 }
2659 unlock:
2660 VM_OBJECT_RUNLOCK(object);
2661 }
2662 return (count);
2663 }
2664
2665 /*
2666 * GEOM backend
2667 *
2668 * Swapping onto disk devices.
2669 *
2670 */
2671
2672 static g_orphan_t swapgeom_orphan;
2673
2674 static struct g_class g_swap_class = {
2675 .name = "SWAP",
2676 .version = G_VERSION,
2677 .orphan = swapgeom_orphan,
2678 };
2679
2680 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2681
2682
2683 static void
2684 swapgeom_close_ev(void *arg, int flags)
2685 {
2686 struct g_consumer *cp;
2687
2688 cp = arg;
2689 g_access(cp, -1, -1, 0);
2690 g_detach(cp);
2691 g_destroy_consumer(cp);
2692 }
2693
2694 /*
2695 * Add a reference to the g_consumer for an inflight transaction.
2696 */
2697 static void
2698 swapgeom_acquire(struct g_consumer *cp)
2699 {
2700
2701 mtx_assert(&sw_dev_mtx, MA_OWNED);
2702 cp->index++;
2703 }
2704
2705 /*
2706 * Remove a reference from the g_consumer. Post a close event if all
2707 * references go away, since the function might be called from the
2708 * biodone context.
2709 */
2710 static void
2711 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2712 {
2713
2714 mtx_assert(&sw_dev_mtx, MA_OWNED);
2715 cp->index--;
2716 if (cp->index == 0) {
2717 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2718 sp->sw_id = NULL;
2719 }
2720 }
2721
2722 static void
2723 swapgeom_done(struct bio *bp2)
2724 {
2725 struct swdevt *sp;
2726 struct buf *bp;
2727 struct g_consumer *cp;
2728
2729 bp = bp2->bio_caller2;
2730 cp = bp2->bio_from;
2731 bp->b_ioflags = bp2->bio_flags;
2732 if (bp2->bio_error)
2733 bp->b_ioflags |= BIO_ERROR;
2734 bp->b_resid = bp->b_bcount - bp2->bio_completed;
2735 bp->b_error = bp2->bio_error;
2736 bufdone(bp);
2737 sp = bp2->bio_caller1;
2738 mtx_lock(&sw_dev_mtx);
2739 swapgeom_release(cp, sp);
2740 mtx_unlock(&sw_dev_mtx);
2741 g_destroy_bio(bp2);
2742 }
2743
2744 static void
2745 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2746 {
2747 struct bio *bio;
2748 struct g_consumer *cp;
2749
2750 mtx_lock(&sw_dev_mtx);
2751 cp = sp->sw_id;
2752 if (cp == NULL) {
2753 mtx_unlock(&sw_dev_mtx);
2754 bp->b_error = ENXIO;
2755 bp->b_ioflags |= BIO_ERROR;
2756 bufdone(bp);
2757 return;
2758 }
2759 swapgeom_acquire(cp);
2760 mtx_unlock(&sw_dev_mtx);
2761 if (bp->b_iocmd == BIO_WRITE)
2762 bio = g_new_bio();
2763 else
2764 bio = g_alloc_bio();
2765 if (bio == NULL) {
2766 mtx_lock(&sw_dev_mtx);
2767 swapgeom_release(cp, sp);
2768 mtx_unlock(&sw_dev_mtx);
2769 bp->b_error = ENOMEM;
2770 bp->b_ioflags |= BIO_ERROR;
2771 bufdone(bp);
2772 return;
2773 }
2774
2775 bio->bio_caller1 = sp;
2776 bio->bio_caller2 = bp;
2777 bio->bio_cmd = bp->b_iocmd;
2778 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2779 bio->bio_length = bp->b_bcount;
2780 bio->bio_done = swapgeom_done;
2781 if (!buf_mapped(bp)) {
2782 bio->bio_ma = bp->b_pages;
2783 bio->bio_data = unmapped_buf;
2784 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
2785 bio->bio_ma_n = bp->b_npages;
2786 bio->bio_flags |= BIO_UNMAPPED;
2787 } else {
2788 bio->bio_data = bp->b_data;
2789 bio->bio_ma = NULL;
2790 }
2791 g_io_request(bio, cp);
2792 return;
2793 }
2794
2795 static void
2796 swapgeom_orphan(struct g_consumer *cp)
2797 {
2798 struct swdevt *sp;
2799 int destroy;
2800
2801 mtx_lock(&sw_dev_mtx);
2802 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2803 if (sp->sw_id == cp) {
2804 sp->sw_flags |= SW_CLOSING;
2805 break;
2806 }
2807 }
2808 /*
2809 * Drop reference we were created with. Do directly since we're in a
2810 * special context where we don't have to queue the call to
2811 * swapgeom_close_ev().
2812 */
2813 cp->index--;
2814 destroy = ((sp != NULL) && (cp->index == 0));
2815 if (destroy)
2816 sp->sw_id = NULL;
2817 mtx_unlock(&sw_dev_mtx);
2818 if (destroy)
2819 swapgeom_close_ev(cp, 0);
2820 }
2821
2822 static void
2823 swapgeom_close(struct thread *td, struct swdevt *sw)
2824 {
2825 struct g_consumer *cp;
2826
2827 mtx_lock(&sw_dev_mtx);
2828 cp = sw->sw_id;
2829 sw->sw_id = NULL;
2830 mtx_unlock(&sw_dev_mtx);
2831
2832 /*
2833 * swapgeom_close() may be called from the biodone context,
2834 * where we cannot perform topology changes. Delegate the
2835 * work to the events thread.
2836 */
2837 if (cp != NULL)
2838 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2839 }
2840
2841 static int
2842 swapongeom_locked(struct cdev *dev, struct vnode *vp)
2843 {
2844 struct g_provider *pp;
2845 struct g_consumer *cp;
2846 static struct g_geom *gp;
2847 struct swdevt *sp;
2848 u_long nblks;
2849 int error;
2850
2851 pp = g_dev_getprovider(dev);
2852 if (pp == NULL)
2853 return (ENODEV);
2854 mtx_lock(&sw_dev_mtx);
2855 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2856 cp = sp->sw_id;
2857 if (cp != NULL && cp->provider == pp) {
2858 mtx_unlock(&sw_dev_mtx);
2859 return (EBUSY);
2860 }
2861 }
2862 mtx_unlock(&sw_dev_mtx);
2863 if (gp == NULL)
2864 gp = g_new_geomf(&g_swap_class, "swap");
2865 cp = g_new_consumer(gp);
2866 cp->index = 1; /* Number of active I/Os, plus one for being active. */
2867 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2868 g_attach(cp, pp);
2869 /*
2870 * XXX: Every time you think you can improve the margin for
2871 * footshooting, somebody depends on the ability to do so:
2872 * savecore(8) wants to write to our swapdev so we cannot
2873 * set an exclusive count :-(
2874 */
2875 error = g_access(cp, 1, 1, 0);
2876 if (error != 0) {
2877 g_detach(cp);
2878 g_destroy_consumer(cp);
2879 return (error);
2880 }
2881 nblks = pp->mediasize / DEV_BSIZE;
2882 swaponsomething(vp, cp, nblks, swapgeom_strategy,
2883 swapgeom_close, dev2udev(dev),
2884 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
2885 return (0);
2886 }
2887
2888 static int
2889 swapongeom(struct vnode *vp)
2890 {
2891 int error;
2892
2893 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2894 if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
2895 error = ENOENT;
2896 } else {
2897 g_topology_lock();
2898 error = swapongeom_locked(vp->v_rdev, vp);
2899 g_topology_unlock();
2900 }
2901 VOP_UNLOCK(vp, 0);
2902 return (error);
2903 }
2904
2905 /*
2906 * VNODE backend
2907 *
2908 * This is used mainly for network filesystem (read: probably only tested
2909 * with NFS) swapfiles.
2910 *
2911 */
2912
2913 static void
2914 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2915 {
2916 struct vnode *vp2;
2917
2918 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2919
2920 vp2 = sp->sw_id;
2921 vhold(vp2);
2922 if (bp->b_iocmd == BIO_WRITE) {
2923 if (bp->b_bufobj)
2924 bufobj_wdrop(bp->b_bufobj);
2925 bufobj_wref(&vp2->v_bufobj);
2926 }
2927 if (bp->b_bufobj != &vp2->v_bufobj)
2928 bp->b_bufobj = &vp2->v_bufobj;
2929 bp->b_vp = vp2;
2930 bp->b_iooffset = dbtob(bp->b_blkno);
2931 bstrategy(bp);
2932 return;
2933 }
2934
2935 static void
2936 swapdev_close(struct thread *td, struct swdevt *sp)
2937 {
2938
2939 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2940 vrele(sp->sw_vp);
2941 }
2942
2943
2944 static int
2945 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2946 {
2947 struct swdevt *sp;
2948 int error;
2949
2950 if (nblks == 0)
2951 return (ENXIO);
2952 mtx_lock(&sw_dev_mtx);
2953 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2954 if (sp->sw_id == vp) {
2955 mtx_unlock(&sw_dev_mtx);
2956 return (EBUSY);
2957 }
2958 }
2959 mtx_unlock(&sw_dev_mtx);
2960
2961 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2962 #ifdef MAC
2963 error = mac_system_check_swapon(td->td_ucred, vp);
2964 if (error == 0)
2965 #endif
2966 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
2967 (void) VOP_UNLOCK(vp, 0);
2968 if (error)
2969 return (error);
2970
2971 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2972 NODEV, 0);
2973 return (0);
2974 }
2975
2976 static int
2977 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
2978 {
2979 int error, new, n;
2980
2981 new = nsw_wcount_async_max;
2982 error = sysctl_handle_int(oidp, &new, 0, req);
2983 if (error != 0 || req->newptr == NULL)
2984 return (error);
2985
2986 if (new > nswbuf / 2 || new < 1)
2987 return (EINVAL);
2988
2989 mtx_lock(&pbuf_mtx);
2990 while (nsw_wcount_async_max != new) {
2991 /*
2992 * Adjust difference. If the current async count is too low,
2993 * we will need to sqeeze our update slowly in. Sleep with a
2994 * higher priority than getpbuf() to finish faster.
2995 */
2996 n = new - nsw_wcount_async_max;
2997 if (nsw_wcount_async + n >= 0) {
2998 nsw_wcount_async += n;
2999 nsw_wcount_async_max += n;
3000 wakeup(&nsw_wcount_async);
3001 } else {
3002 nsw_wcount_async_max -= nsw_wcount_async;
3003 nsw_wcount_async = 0;
3004 msleep(&nsw_wcount_async, &pbuf_mtx, PSWP,
3005 "swpsysctl", 0);
3006 }
3007 }
3008 mtx_unlock(&pbuf_mtx);
3009
3010 return (0);
3011 }
3012
3013 static void
3014 swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
3015 vm_offset_t end)
3016 {
3017
3018 VM_OBJECT_WLOCK(object);
3019 KASSERT((object->flags & OBJ_NOSPLIT) != 0,
3020 ("Splittable object with writecount"));
3021 object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
3022 VM_OBJECT_WUNLOCK(object);
3023 }
3024
3025 static void
3026 swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
3027 vm_offset_t end)
3028 {
3029
3030 VM_OBJECT_WLOCK(object);
3031 KASSERT((object->flags & OBJ_NOSPLIT) != 0,
3032 ("Splittable object with writecount"));
3033 object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
3034 VM_OBJECT_WUNLOCK(object);
3035 }
Cache object: 41e6c5df2e73186f416678a79d86d95e
|