FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c
1 /*-
2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * New Swap System
41 * Matthew Dillon
42 *
43 * Radix Bitmap 'blists'.
44 *
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
48 *
49 * Features:
50 *
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
53 * pages.
54 *
55 * - on the fly deallocation of swap
56 *
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
61 * or renamed.
62 *
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64 *
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
67 */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71
72 #include "opt_swap.h"
73 #include "opt_vm.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/conf.h>
78 #include <sys/kernel.h>
79 #include <sys/priv.h>
80 #include <sys/proc.h>
81 #include <sys/bio.h>
82 #include <sys/buf.h>
83 #include <sys/disk.h>
84 #include <sys/fcntl.h>
85 #include <sys/mount.h>
86 #include <sys/namei.h>
87 #include <sys/vnode.h>
88 #include <sys/malloc.h>
89 #include <sys/pctrie.h>
90 #include <sys/racct.h>
91 #include <sys/resource.h>
92 #include <sys/resourcevar.h>
93 #include <sys/rwlock.h>
94 #include <sys/sbuf.h>
95 #include <sys/sysctl.h>
96 #include <sys/sysproto.h>
97 #include <sys/blist.h>
98 #include <sys/lock.h>
99 #include <sys/sx.h>
100 #include <sys/vmmeter.h>
101
102 #include <security/mac/mac_framework.h>
103
104 #include <vm/vm.h>
105 #include <vm/pmap.h>
106 #include <vm/vm_map.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_param.h>
113 #include <vm/swap_pager.h>
114 #include <vm/vm_extern.h>
115 #include <vm/uma.h>
116
117 #include <geom/geom.h>
118
119 /*
120 * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
121 * The 64-page limit is due to the radix code (kern/subr_blist.c).
122 */
123 #ifndef MAX_PAGEOUT_CLUSTER
124 #define MAX_PAGEOUT_CLUSTER 32
125 #endif
126
127 #if !defined(SWB_NPAGES)
128 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
129 #endif
130
131 #define SWAP_META_PAGES PCTRIE_COUNT
132
133 /*
134 * A swblk structure maps each page index within a
135 * SWAP_META_PAGES-aligned and sized range to the address of an
136 * on-disk swap block (or SWAPBLK_NONE). The collection of these
137 * mappings for an entire vm object is implemented as a pc-trie.
138 */
139 struct swblk {
140 vm_pindex_t p;
141 daddr_t d[SWAP_META_PAGES];
142 };
143
144 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
145 static struct mtx sw_dev_mtx;
146 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
147 static struct swdevt *swdevhd; /* Allocate from here next */
148 static int nswapdev; /* Number of swap devices */
149 int swap_pager_avail;
150 static struct sx swdev_syscall_lock; /* serialize swap(on|off) */
151
152 static vm_ooffset_t swap_total;
153 SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0,
154 "Total amount of available swap storage.");
155 static vm_ooffset_t swap_reserved;
156 SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0,
157 "Amount of swap storage needed to back all allocated anonymous memory.");
158 static int overcommit = 0;
159 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0,
160 "Configure virtual memory overcommit behavior. See tuning(7) "
161 "for details.");
162 static unsigned long swzone;
163 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
164 "Actual size of swap metadata zone");
165 static unsigned long swap_maxpages;
166 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
167 "Maximum amount of swap supported");
168
169 /* bits from overcommit */
170 #define SWAP_RESERVE_FORCE_ON (1 << 0)
171 #define SWAP_RESERVE_RLIMIT_ON (1 << 1)
172 #define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2)
173
174 int
175 swap_reserve(vm_ooffset_t incr)
176 {
177
178 return (swap_reserve_by_cred(incr, curthread->td_ucred));
179 }
180
181 int
182 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
183 {
184 vm_ooffset_t r, s;
185 int res, error;
186 static int curfail;
187 static struct timeval lastfail;
188 struct uidinfo *uip;
189
190 uip = cred->cr_ruidinfo;
191
192 if (incr & PAGE_MASK)
193 panic("swap_reserve: & PAGE_MASK");
194
195 #ifdef RACCT
196 if (racct_enable) {
197 PROC_LOCK(curproc);
198 error = racct_add(curproc, RACCT_SWAP, incr);
199 PROC_UNLOCK(curproc);
200 if (error != 0)
201 return (0);
202 }
203 #endif
204
205 res = 0;
206 mtx_lock(&sw_dev_mtx);
207 r = swap_reserved + incr;
208 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
209 s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count;
210 s *= PAGE_SIZE;
211 } else
212 s = 0;
213 s += swap_total;
214 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
215 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
216 res = 1;
217 swap_reserved = r;
218 }
219 mtx_unlock(&sw_dev_mtx);
220
221 if (res) {
222 UIDINFO_VMSIZE_LOCK(uip);
223 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
224 uip->ui_vmsize + incr > lim_cur(curthread, RLIMIT_SWAP) &&
225 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT))
226 res = 0;
227 else
228 uip->ui_vmsize += incr;
229 UIDINFO_VMSIZE_UNLOCK(uip);
230 if (!res) {
231 mtx_lock(&sw_dev_mtx);
232 swap_reserved -= incr;
233 mtx_unlock(&sw_dev_mtx);
234 }
235 }
236 if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
237 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
238 uip->ui_uid, curproc->p_pid, incr);
239 }
240
241 #ifdef RACCT
242 if (!res) {
243 PROC_LOCK(curproc);
244 racct_sub(curproc, RACCT_SWAP, incr);
245 PROC_UNLOCK(curproc);
246 }
247 #endif
248
249 return (res);
250 }
251
252 void
253 swap_reserve_force(vm_ooffset_t incr)
254 {
255 struct uidinfo *uip;
256
257 mtx_lock(&sw_dev_mtx);
258 swap_reserved += incr;
259 mtx_unlock(&sw_dev_mtx);
260
261 #ifdef RACCT
262 PROC_LOCK(curproc);
263 racct_add_force(curproc, RACCT_SWAP, incr);
264 PROC_UNLOCK(curproc);
265 #endif
266
267 uip = curthread->td_ucred->cr_ruidinfo;
268 PROC_LOCK(curproc);
269 UIDINFO_VMSIZE_LOCK(uip);
270 uip->ui_vmsize += incr;
271 UIDINFO_VMSIZE_UNLOCK(uip);
272 PROC_UNLOCK(curproc);
273 }
274
275 void
276 swap_release(vm_ooffset_t decr)
277 {
278 struct ucred *cred;
279
280 PROC_LOCK(curproc);
281 cred = curthread->td_ucred;
282 swap_release_by_cred(decr, cred);
283 PROC_UNLOCK(curproc);
284 }
285
286 void
287 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
288 {
289 struct uidinfo *uip;
290
291 uip = cred->cr_ruidinfo;
292
293 if (decr & PAGE_MASK)
294 panic("swap_release: & PAGE_MASK");
295
296 mtx_lock(&sw_dev_mtx);
297 if (swap_reserved < decr)
298 panic("swap_reserved < decr");
299 swap_reserved -= decr;
300 mtx_unlock(&sw_dev_mtx);
301
302 UIDINFO_VMSIZE_LOCK(uip);
303 if (uip->ui_vmsize < decr)
304 printf("negative vmsize for uid = %d\n", uip->ui_uid);
305 uip->ui_vmsize -= decr;
306 UIDINFO_VMSIZE_UNLOCK(uip);
307
308 racct_sub_cred(cred, RACCT_SWAP, decr);
309 }
310
311 #define SWM_FREE 0x02 /* free, period */
312 #define SWM_POP 0x04 /* pop out */
313
314 static int swap_pager_full = 2; /* swap space exhaustion (task killing) */
315 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
316 static int nsw_rcount; /* free read buffers */
317 static int nsw_wcount_sync; /* limit write buffers / synchronous */
318 static int nsw_wcount_async; /* limit write buffers / asynchronous */
319 static int nsw_wcount_async_max;/* assigned maximum */
320 static int nsw_cluster_max; /* maximum VOP I/O allowed */
321
322 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
323 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
324 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
325 "Maximum running async swap ops");
326 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
327 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
328 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
329 "Swap Fragmentation Info");
330
331 static struct sx sw_alloc_sx;
332
333 /*
334 * "named" and "unnamed" anon region objects. Try to reduce the overhead
335 * of searching a named list by hashing it just a little.
336 */
337
338 #define NOBJLISTS 8
339
340 #define NOBJLIST(handle) \
341 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
342
343 static struct pagerlst swap_pager_object_list[NOBJLISTS];
344 static uma_zone_t swblk_zone;
345 static uma_zone_t swpctrie_zone;
346
347 /*
348 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
349 * calls hooked from other parts of the VM system and do not appear here.
350 * (see vm/swap_pager.h).
351 */
352 static vm_object_t
353 swap_pager_alloc(void *handle, vm_ooffset_t size,
354 vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
355 static void swap_pager_dealloc(vm_object_t object);
356 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
357 int *);
358 static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
359 int *, pgo_getpages_iodone_t, void *);
360 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
361 static boolean_t
362 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
363 static void swap_pager_init(void);
364 static void swap_pager_unswapped(vm_page_t);
365 static void swap_pager_swapoff(struct swdevt *sp);
366
367 struct pagerops swappagerops = {
368 .pgo_init = swap_pager_init, /* early system initialization of pager */
369 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */
370 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
371 .pgo_getpages = swap_pager_getpages, /* pagein */
372 .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */
373 .pgo_putpages = swap_pager_putpages, /* pageout */
374 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */
375 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
376 };
377
378 /*
379 * swap_*() routines are externally accessible. swp_*() routines are
380 * internal.
381 */
382 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
383 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
384
385 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
386 "Maximum size of a swap block in pages");
387
388 static void swp_sizecheck(void);
389 static void swp_pager_async_iodone(struct buf *bp);
390 static int swapongeom(struct vnode *);
391 static int swaponvp(struct thread *, struct vnode *, u_long);
392 static int swapoff_one(struct swdevt *sp, struct ucred *cred);
393
394 /*
395 * Swap bitmap functions
396 */
397 static void swp_pager_freeswapspace(daddr_t blk, int npages);
398 static daddr_t swp_pager_getswapspace(int npages);
399
400 /*
401 * Metadata functions
402 */
403 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
404 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
405 static void swp_pager_meta_free_all(vm_object_t);
406 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
407
408 static void *
409 swblk_trie_alloc(struct pctrie *ptree)
410 {
411
412 return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
413 M_USE_RESERVE : 0)));
414 }
415
416 static void
417 swblk_trie_free(struct pctrie *ptree, void *node)
418 {
419
420 uma_zfree(swpctrie_zone, node);
421 }
422
423 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
424
425 /*
426 * SWP_SIZECHECK() - update swap_pager_full indication
427 *
428 * update the swap_pager_almost_full indication and warn when we are
429 * about to run out of swap space, using lowat/hiwat hysteresis.
430 *
431 * Clear swap_pager_full ( task killing ) indication when lowat is met.
432 *
433 * No restrictions on call
434 * This routine may not block.
435 */
436 static void
437 swp_sizecheck(void)
438 {
439
440 if (swap_pager_avail < nswap_lowat) {
441 if (swap_pager_almost_full == 0) {
442 printf("swap_pager: out of swap space\n");
443 swap_pager_almost_full = 1;
444 }
445 } else {
446 swap_pager_full = 0;
447 if (swap_pager_avail > nswap_hiwat)
448 swap_pager_almost_full = 0;
449 }
450 }
451
452 /*
453 * SWAP_PAGER_INIT() - initialize the swap pager!
454 *
455 * Expected to be started from system init. NOTE: This code is run
456 * before much else so be careful what you depend on. Most of the VM
457 * system has yet to be initialized at this point.
458 */
459 static void
460 swap_pager_init(void)
461 {
462 /*
463 * Initialize object lists
464 */
465 int i;
466
467 for (i = 0; i < NOBJLISTS; ++i)
468 TAILQ_INIT(&swap_pager_object_list[i]);
469 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
470 sx_init(&sw_alloc_sx, "swspsx");
471 sx_init(&swdev_syscall_lock, "swsysc");
472 }
473
474 /*
475 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
476 *
477 * Expected to be started from pageout process once, prior to entering
478 * its main loop.
479 */
480 void
481 swap_pager_swap_init(void)
482 {
483 unsigned long n, n2;
484
485 /*
486 * Number of in-transit swap bp operations. Don't
487 * exhaust the pbufs completely. Make sure we
488 * initialize workable values (0 will work for hysteresis
489 * but it isn't very efficient).
490 *
491 * The nsw_cluster_max is constrained by the bp->b_pages[]
492 * array (MAXPHYS/PAGE_SIZE) and our locally defined
493 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
494 * constrained by the swap device interleave stripe size.
495 *
496 * Currently we hardwire nsw_wcount_async to 4. This limit is
497 * designed to prevent other I/O from having high latencies due to
498 * our pageout I/O. The value 4 works well for one or two active swap
499 * devices but is probably a little low if you have more. Even so,
500 * a higher value would probably generate only a limited improvement
501 * with three or four active swap devices since the system does not
502 * typically have to pageout at extreme bandwidths. We will want
503 * at least 2 per swap devices, and 4 is a pretty good value if you
504 * have one NFS swap device due to the command/ack latency over NFS.
505 * So it all works out pretty well.
506 */
507 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
508
509 mtx_lock(&pbuf_mtx);
510 nsw_rcount = (nswbuf + 1) / 2;
511 nsw_wcount_sync = (nswbuf + 3) / 4;
512 nsw_wcount_async = 4;
513 nsw_wcount_async_max = nsw_wcount_async;
514 mtx_unlock(&pbuf_mtx);
515
516 /*
517 * Initialize our zone, taking the user's requested size or
518 * estimating the number we need based on the number of pages
519 * in the system.
520 */
521 n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
522 vm_cnt.v_page_count / 2;
523 swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
524 pctrie_zone_init, NULL, UMA_ALIGN_PTR,
525 UMA_ZONE_NOFREE | UMA_ZONE_VM);
526 if (swpctrie_zone == NULL)
527 panic("failed to create swap pctrie zone.");
528 swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
529 NULL, NULL, _Alignof(struct swblk) - 1,
530 UMA_ZONE_NOFREE | UMA_ZONE_VM);
531 if (swblk_zone == NULL)
532 panic("failed to create swap blk zone.");
533 n2 = n;
534 do {
535 if (uma_zone_reserve_kva(swblk_zone, n))
536 break;
537 /*
538 * if the allocation failed, try a zone two thirds the
539 * size of the previous attempt.
540 */
541 n -= ((n + 2) / 3);
542 } while (n > 0);
543
544 /*
545 * Often uma_zone_reserve_kva() cannot reserve exactly the
546 * requested size. Account for the difference when
547 * calculating swap_maxpages.
548 */
549 n = uma_zone_get_max(swblk_zone);
550
551 if (n < n2)
552 printf("Swap blk zone entries changed from %lu to %lu.\n",
553 n2, n);
554 swap_maxpages = n * SWAP_META_PAGES;
555 swzone = n * sizeof(struct swblk);
556 if (!uma_zone_reserve_kva(swpctrie_zone, n))
557 printf("Cannot reserve swap pctrie zone, "
558 "reduce kern.maxswzone.\n");
559 }
560
561 static vm_object_t
562 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
563 vm_ooffset_t offset)
564 {
565 vm_object_t object;
566
567 if (cred != NULL) {
568 if (!swap_reserve_by_cred(size, cred))
569 return (NULL);
570 crhold(cred);
571 }
572
573 /*
574 * The un_pager.swp.swp_blks trie is initialized by
575 * vm_object_allocate() to ensure the correct order of
576 * visibility to other threads.
577 */
578 object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
579 PAGE_MASK + size));
580
581 object->handle = handle;
582 if (cred != NULL) {
583 object->cred = cred;
584 object->charge = size;
585 }
586 return (object);
587 }
588
589 /*
590 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
591 * its metadata structures.
592 *
593 * This routine is called from the mmap and fork code to create a new
594 * OBJT_SWAP object.
595 *
596 * This routine must ensure that no live duplicate is created for
597 * the named object request, which is protected against by
598 * holding the sw_alloc_sx lock in case handle != NULL.
599 */
600 static vm_object_t
601 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
602 vm_ooffset_t offset, struct ucred *cred)
603 {
604 vm_object_t object;
605
606 if (handle != NULL) {
607 /*
608 * Reference existing named region or allocate new one. There
609 * should not be a race here against swp_pager_meta_build()
610 * as called from vm_page_remove() in regards to the lookup
611 * of the handle.
612 */
613 sx_xlock(&sw_alloc_sx);
614 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
615 if (object == NULL) {
616 object = swap_pager_alloc_init(handle, cred, size,
617 offset);
618 if (object != NULL) {
619 TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
620 object, pager_object_list);
621 }
622 }
623 sx_xunlock(&sw_alloc_sx);
624 } else {
625 object = swap_pager_alloc_init(handle, cred, size, offset);
626 }
627 return (object);
628 }
629
630 /*
631 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
632 *
633 * The swap backing for the object is destroyed. The code is
634 * designed such that we can reinstantiate it later, but this
635 * routine is typically called only when the entire object is
636 * about to be destroyed.
637 *
638 * The object must be locked.
639 */
640 static void
641 swap_pager_dealloc(vm_object_t object)
642 {
643
644 VM_OBJECT_ASSERT_WLOCKED(object);
645 KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
646
647 /*
648 * Remove from list right away so lookups will fail if we block for
649 * pageout completion.
650 */
651 if (object->handle != NULL) {
652 VM_OBJECT_WUNLOCK(object);
653 sx_xlock(&sw_alloc_sx);
654 TAILQ_REMOVE(NOBJLIST(object->handle), object,
655 pager_object_list);
656 sx_xunlock(&sw_alloc_sx);
657 VM_OBJECT_WLOCK(object);
658 }
659
660 vm_object_pip_wait(object, "swpdea");
661
662 /*
663 * Free all remaining metadata. We only bother to free it from
664 * the swap meta data. We do not attempt to free swapblk's still
665 * associated with vm_page_t's for this object. We do not care
666 * if paging is still in progress on some objects.
667 */
668 swp_pager_meta_free_all(object);
669 object->handle = NULL;
670 object->type = OBJT_DEAD;
671 }
672
673 /************************************************************************
674 * SWAP PAGER BITMAP ROUTINES *
675 ************************************************************************/
676
677 /*
678 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
679 *
680 * Allocate swap for the requested number of pages. The starting
681 * swap block number (a page index) is returned or SWAPBLK_NONE
682 * if the allocation failed.
683 *
684 * Also has the side effect of advising that somebody made a mistake
685 * when they configured swap and didn't configure enough.
686 *
687 * This routine may not sleep.
688 *
689 * We allocate in round-robin fashion from the configured devices.
690 */
691 static daddr_t
692 swp_pager_getswapspace(int npages)
693 {
694 daddr_t blk;
695 struct swdevt *sp;
696 int i;
697
698 blk = SWAPBLK_NONE;
699 mtx_lock(&sw_dev_mtx);
700 sp = swdevhd;
701 for (i = 0; i < nswapdev; i++) {
702 if (sp == NULL)
703 sp = TAILQ_FIRST(&swtailq);
704 if (!(sp->sw_flags & SW_CLOSING)) {
705 blk = blist_alloc(sp->sw_blist, npages);
706 if (blk != SWAPBLK_NONE) {
707 blk += sp->sw_first;
708 sp->sw_used += npages;
709 swap_pager_avail -= npages;
710 swp_sizecheck();
711 swdevhd = TAILQ_NEXT(sp, sw_list);
712 goto done;
713 }
714 }
715 sp = TAILQ_NEXT(sp, sw_list);
716 }
717 if (swap_pager_full != 2) {
718 printf("swap_pager_getswapspace(%d): failed\n", npages);
719 swap_pager_full = 2;
720 swap_pager_almost_full = 1;
721 }
722 swdevhd = NULL;
723 done:
724 mtx_unlock(&sw_dev_mtx);
725 return (blk);
726 }
727
728 static int
729 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
730 {
731
732 return (blk >= sp->sw_first && blk < sp->sw_end);
733 }
734
735 static void
736 swp_pager_strategy(struct buf *bp)
737 {
738 struct swdevt *sp;
739
740 mtx_lock(&sw_dev_mtx);
741 TAILQ_FOREACH(sp, &swtailq, sw_list) {
742 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
743 mtx_unlock(&sw_dev_mtx);
744 if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
745 unmapped_buf_allowed) {
746 bp->b_data = unmapped_buf;
747 bp->b_offset = 0;
748 } else {
749 pmap_qenter((vm_offset_t)bp->b_data,
750 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
751 }
752 sp->sw_strategy(bp, sp);
753 return;
754 }
755 }
756 panic("Swapdev not found");
757 }
758
759
760 /*
761 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
762 *
763 * This routine returns the specified swap blocks back to the bitmap.
764 *
765 * This routine may not sleep.
766 */
767 static void
768 swp_pager_freeswapspace(daddr_t blk, int npages)
769 {
770 struct swdevt *sp;
771
772 mtx_lock(&sw_dev_mtx);
773 TAILQ_FOREACH(sp, &swtailq, sw_list) {
774 if (blk >= sp->sw_first && blk < sp->sw_end) {
775 sp->sw_used -= npages;
776 /*
777 * If we are attempting to stop swapping on
778 * this device, we don't want to mark any
779 * blocks free lest they be reused.
780 */
781 if ((sp->sw_flags & SW_CLOSING) == 0) {
782 blist_free(sp->sw_blist, blk - sp->sw_first,
783 npages);
784 swap_pager_avail += npages;
785 swp_sizecheck();
786 }
787 mtx_unlock(&sw_dev_mtx);
788 return;
789 }
790 }
791 panic("Swapdev not found");
792 }
793
794 /*
795 * SYSCTL_SWAP_FRAGMENTATION() - produce raw swap space stats
796 */
797 static int
798 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
799 {
800 struct sbuf sbuf;
801 struct swdevt *sp;
802 const char *devname;
803 int error;
804
805 error = sysctl_wire_old_buffer(req, 0);
806 if (error != 0)
807 return (error);
808 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
809 mtx_lock(&sw_dev_mtx);
810 TAILQ_FOREACH(sp, &swtailq, sw_list) {
811 if (vn_isdisk(sp->sw_vp, NULL))
812 devname = devtoname(sp->sw_vp->v_rdev);
813 else
814 devname = "[file]";
815 sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
816 blist_stats(sp->sw_blist, &sbuf);
817 }
818 mtx_unlock(&sw_dev_mtx);
819 error = sbuf_finish(&sbuf);
820 sbuf_delete(&sbuf);
821 return (error);
822 }
823
824 /*
825 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
826 * range within an object.
827 *
828 * This is a globally accessible routine.
829 *
830 * This routine removes swapblk assignments from swap metadata.
831 *
832 * The external callers of this routine typically have already destroyed
833 * or renamed vm_page_t's associated with this range in the object so
834 * we should be ok.
835 *
836 * The object must be locked.
837 */
838 void
839 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
840 {
841
842 swp_pager_meta_free(object, start, size);
843 }
844
845 /*
846 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
847 *
848 * Assigns swap blocks to the specified range within the object. The
849 * swap blocks are not zeroed. Any previous swap assignment is destroyed.
850 *
851 * Returns 0 on success, -1 on failure.
852 */
853 int
854 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
855 {
856 int n = 0;
857 daddr_t blk = SWAPBLK_NONE;
858 vm_pindex_t beg = start; /* save start index */
859
860 VM_OBJECT_WLOCK(object);
861 while (size) {
862 if (n == 0) {
863 n = BLIST_MAX_ALLOC;
864 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
865 n >>= 1;
866 if (n == 0) {
867 swp_pager_meta_free(object, beg, start - beg);
868 VM_OBJECT_WUNLOCK(object);
869 return (-1);
870 }
871 }
872 }
873 swp_pager_meta_build(object, start, blk);
874 --size;
875 ++start;
876 ++blk;
877 --n;
878 }
879 swp_pager_meta_free(object, start, n);
880 VM_OBJECT_WUNLOCK(object);
881 return (0);
882 }
883
884 /*
885 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
886 * and destroy the source.
887 *
888 * Copy any valid swapblks from the source to the destination. In
889 * cases where both the source and destination have a valid swapblk,
890 * we keep the destination's.
891 *
892 * This routine is allowed to sleep. It may sleep allocating metadata
893 * indirectly through swp_pager_meta_build() or if paging is still in
894 * progress on the source.
895 *
896 * The source object contains no vm_page_t's (which is just as well)
897 *
898 * The source object is of type OBJT_SWAP.
899 *
900 * The source and destination objects must be locked.
901 * Both object locks may temporarily be released.
902 */
903 void
904 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
905 vm_pindex_t offset, int destroysource)
906 {
907 vm_pindex_t i;
908
909 VM_OBJECT_ASSERT_WLOCKED(srcobject);
910 VM_OBJECT_ASSERT_WLOCKED(dstobject);
911
912 /*
913 * If destroysource is set, we remove the source object from the
914 * swap_pager internal queue now.
915 */
916 if (destroysource && srcobject->handle != NULL) {
917 vm_object_pip_add(srcobject, 1);
918 VM_OBJECT_WUNLOCK(srcobject);
919 vm_object_pip_add(dstobject, 1);
920 VM_OBJECT_WUNLOCK(dstobject);
921 sx_xlock(&sw_alloc_sx);
922 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
923 pager_object_list);
924 sx_xunlock(&sw_alloc_sx);
925 VM_OBJECT_WLOCK(dstobject);
926 vm_object_pip_wakeup(dstobject);
927 VM_OBJECT_WLOCK(srcobject);
928 vm_object_pip_wakeup(srcobject);
929 }
930
931 /*
932 * transfer source to destination.
933 */
934 for (i = 0; i < dstobject->size; ++i) {
935 daddr_t dstaddr;
936
937 /*
938 * Locate (without changing) the swapblk on the destination,
939 * unless it is invalid in which case free it silently, or
940 * if the destination is a resident page, in which case the
941 * source is thrown away.
942 */
943 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
944
945 if (dstaddr == SWAPBLK_NONE) {
946 /*
947 * Destination has no swapblk and is not resident,
948 * copy source.
949 */
950 daddr_t srcaddr;
951
952 srcaddr = swp_pager_meta_ctl(
953 srcobject,
954 i + offset,
955 SWM_POP
956 );
957
958 if (srcaddr != SWAPBLK_NONE) {
959 /*
960 * swp_pager_meta_build() can sleep.
961 */
962 vm_object_pip_add(srcobject, 1);
963 VM_OBJECT_WUNLOCK(srcobject);
964 vm_object_pip_add(dstobject, 1);
965 swp_pager_meta_build(dstobject, i, srcaddr);
966 vm_object_pip_wakeup(dstobject);
967 VM_OBJECT_WLOCK(srcobject);
968 vm_object_pip_wakeup(srcobject);
969 }
970 } else {
971 /*
972 * Destination has valid swapblk or it is represented
973 * by a resident page. We destroy the sourceblock.
974 */
975
976 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
977 }
978 }
979
980 /*
981 * Free left over swap blocks in source.
982 *
983 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
984 * double-remove the object from the swap queues.
985 */
986 if (destroysource) {
987 swp_pager_meta_free_all(srcobject);
988 /*
989 * Reverting the type is not necessary, the caller is going
990 * to destroy srcobject directly, but I'm doing it here
991 * for consistency since we've removed the object from its
992 * queues.
993 */
994 srcobject->type = OBJT_DEFAULT;
995 }
996 }
997
998 /*
999 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
1000 * the requested page.
1001 *
1002 * We determine whether good backing store exists for the requested
1003 * page and return TRUE if it does, FALSE if it doesn't.
1004 *
1005 * If TRUE, we also try to determine how much valid, contiguous backing
1006 * store exists before and after the requested page.
1007 */
1008 static boolean_t
1009 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1010 int *after)
1011 {
1012 daddr_t blk, blk0;
1013 int i;
1014
1015 VM_OBJECT_ASSERT_LOCKED(object);
1016
1017 /*
1018 * do we have good backing store at the requested index ?
1019 */
1020 blk0 = swp_pager_meta_ctl(object, pindex, 0);
1021 if (blk0 == SWAPBLK_NONE) {
1022 if (before)
1023 *before = 0;
1024 if (after)
1025 *after = 0;
1026 return (FALSE);
1027 }
1028
1029 /*
1030 * find backwards-looking contiguous good backing store
1031 */
1032 if (before != NULL) {
1033 for (i = 1; i < SWB_NPAGES; i++) {
1034 if (i > pindex)
1035 break;
1036 blk = swp_pager_meta_ctl(object, pindex - i, 0);
1037 if (blk != blk0 - i)
1038 break;
1039 }
1040 *before = i - 1;
1041 }
1042
1043 /*
1044 * find forward-looking contiguous good backing store
1045 */
1046 if (after != NULL) {
1047 for (i = 1; i < SWB_NPAGES; i++) {
1048 blk = swp_pager_meta_ctl(object, pindex + i, 0);
1049 if (blk != blk0 + i)
1050 break;
1051 }
1052 *after = i - 1;
1053 }
1054 return (TRUE);
1055 }
1056
1057 /*
1058 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1059 *
1060 * This removes any associated swap backing store, whether valid or
1061 * not, from the page.
1062 *
1063 * This routine is typically called when a page is made dirty, at
1064 * which point any associated swap can be freed. MADV_FREE also
1065 * calls us in a special-case situation
1066 *
1067 * NOTE!!! If the page is clean and the swap was valid, the caller
1068 * should make the page dirty before calling this routine. This routine
1069 * does NOT change the m->dirty status of the page. Also: MADV_FREE
1070 * depends on it.
1071 *
1072 * This routine may not sleep.
1073 *
1074 * The object containing the page must be locked.
1075 */
1076 static void
1077 swap_pager_unswapped(vm_page_t m)
1078 {
1079
1080 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
1081 }
1082
1083 /*
1084 * swap_pager_getpages() - bring pages in from swap
1085 *
1086 * Attempt to page in the pages in array "ma" of length "count". The
1087 * caller may optionally specify that additional pages preceding and
1088 * succeeding the specified range be paged in. The number of such pages
1089 * is returned in the "rbehind" and "rahead" parameters, and they will
1090 * be in the inactive queue upon return.
1091 *
1092 * The pages in "ma" must be busied and will remain busied upon return.
1093 */
1094 static int
1095 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
1096 int *rahead)
1097 {
1098 struct buf *bp;
1099 vm_page_t bm, mpred, msucc, p;
1100 vm_pindex_t pindex;
1101 daddr_t blk;
1102 int i, maxahead, maxbehind, reqcount;
1103
1104 reqcount = count;
1105
1106 /*
1107 * Determine the final number of read-behind pages and
1108 * allocate them BEFORE releasing the object lock. Otherwise,
1109 * there can be a problematic race with vm_object_split().
1110 * Specifically, vm_object_split() might first transfer pages
1111 * that precede ma[0] in the current object to a new object,
1112 * and then this function incorrectly recreates those pages as
1113 * read-behind pages in the current object.
1114 */
1115 if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead))
1116 return (VM_PAGER_FAIL);
1117
1118 /*
1119 * Clip the readahead and readbehind ranges to exclude resident pages.
1120 */
1121 if (rahead != NULL) {
1122 KASSERT(reqcount - 1 <= maxahead,
1123 ("page count %d extends beyond swap block", reqcount));
1124 *rahead = imin(*rahead, maxahead - (reqcount - 1));
1125 pindex = ma[reqcount - 1]->pindex;
1126 msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1127 if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1128 *rahead = msucc->pindex - pindex - 1;
1129 }
1130 if (rbehind != NULL) {
1131 *rbehind = imin(*rbehind, maxbehind);
1132 pindex = ma[0]->pindex;
1133 mpred = TAILQ_PREV(ma[0], pglist, listq);
1134 if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1135 *rbehind = pindex - mpred->pindex - 1;
1136 }
1137
1138 bm = ma[0];
1139 for (i = 0; i < count; i++)
1140 ma[i]->oflags |= VPO_SWAPINPROG;
1141
1142 /*
1143 * Allocate readahead and readbehind pages.
1144 */
1145 if (rbehind != NULL) {
1146 for (i = 1; i <= *rbehind; i++) {
1147 p = vm_page_alloc(object, ma[0]->pindex - i,
1148 VM_ALLOC_NORMAL);
1149 if (p == NULL)
1150 break;
1151 p->oflags |= VPO_SWAPINPROG;
1152 bm = p;
1153 }
1154 *rbehind = i - 1;
1155 }
1156 if (rahead != NULL) {
1157 for (i = 0; i < *rahead; i++) {
1158 p = vm_page_alloc(object,
1159 ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1160 if (p == NULL)
1161 break;
1162 p->oflags |= VPO_SWAPINPROG;
1163 }
1164 *rahead = i;
1165 }
1166 if (rbehind != NULL)
1167 count += *rbehind;
1168 if (rahead != NULL)
1169 count += *rahead;
1170
1171 vm_object_pip_add(object, count);
1172
1173 pindex = bm->pindex;
1174 blk = swp_pager_meta_ctl(object, pindex, 0);
1175 KASSERT(blk != SWAPBLK_NONE,
1176 ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1177
1178 VM_OBJECT_WUNLOCK(object);
1179 bp = getpbuf(&nsw_rcount);
1180 /* Pages cannot leave the object while busy. */
1181 for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1182 MPASS(p->pindex == bm->pindex + i);
1183 bp->b_pages[i] = p;
1184 }
1185
1186 bp->b_flags |= B_PAGING;
1187 bp->b_iocmd = BIO_READ;
1188 bp->b_iodone = swp_pager_async_iodone;
1189 bp->b_rcred = crhold(thread0.td_ucred);
1190 bp->b_wcred = crhold(thread0.td_ucred);
1191 bp->b_blkno = blk;
1192 bp->b_bcount = PAGE_SIZE * count;
1193 bp->b_bufsize = PAGE_SIZE * count;
1194 bp->b_npages = count;
1195 bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1196 bp->b_pgafter = rahead != NULL ? *rahead : 0;
1197
1198 PCPU_INC(cnt.v_swapin);
1199 PCPU_ADD(cnt.v_swappgsin, count);
1200
1201 /*
1202 * perform the I/O. NOTE!!! bp cannot be considered valid after
1203 * this point because we automatically release it on completion.
1204 * Instead, we look at the one page we are interested in which we
1205 * still hold a lock on even through the I/O completion.
1206 *
1207 * The other pages in our ma[] array are also released on completion,
1208 * so we cannot assume they are valid anymore either.
1209 *
1210 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1211 */
1212 BUF_KERNPROC(bp);
1213 swp_pager_strategy(bp);
1214
1215 /*
1216 * Wait for the pages we want to complete. VPO_SWAPINPROG is always
1217 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1218 * is set in the metadata for each page in the request.
1219 */
1220 VM_OBJECT_WLOCK(object);
1221 while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
1222 ma[0]->oflags |= VPO_SWAPSLEEP;
1223 PCPU_INC(cnt.v_intrans);
1224 if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
1225 "swread", hz * 20)) {
1226 printf(
1227 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1228 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1229 }
1230 }
1231
1232 /*
1233 * If we had an unrecoverable read error pages will not be valid.
1234 */
1235 for (i = 0; i < reqcount; i++)
1236 if (ma[i]->valid != VM_PAGE_BITS_ALL)
1237 return (VM_PAGER_ERROR);
1238
1239 return (VM_PAGER_OK);
1240
1241 /*
1242 * A final note: in a low swap situation, we cannot deallocate swap
1243 * and mark a page dirty here because the caller is likely to mark
1244 * the page clean when we return, causing the page to possibly revert
1245 * to all-zero's later.
1246 */
1247 }
1248
1249 /*
1250 * swap_pager_getpages_async():
1251 *
1252 * Right now this is emulation of asynchronous operation on top of
1253 * swap_pager_getpages().
1254 */
1255 static int
1256 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1257 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1258 {
1259 int r, error;
1260
1261 r = swap_pager_getpages(object, ma, count, rbehind, rahead);
1262 VM_OBJECT_WUNLOCK(object);
1263 switch (r) {
1264 case VM_PAGER_OK:
1265 error = 0;
1266 break;
1267 case VM_PAGER_ERROR:
1268 error = EIO;
1269 break;
1270 case VM_PAGER_FAIL:
1271 error = EINVAL;
1272 break;
1273 default:
1274 panic("unhandled swap_pager_getpages() error %d", r);
1275 }
1276 (iodone)(arg, ma, count, error);
1277 VM_OBJECT_WLOCK(object);
1278
1279 return (r);
1280 }
1281
1282 /*
1283 * swap_pager_putpages:
1284 *
1285 * Assign swap (if necessary) and initiate I/O on the specified pages.
1286 *
1287 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1288 * are automatically converted to SWAP objects.
1289 *
1290 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1291 * vm_page reservation system coupled with properly written VFS devices
1292 * should ensure that no low-memory deadlock occurs. This is an area
1293 * which needs work.
1294 *
1295 * The parent has N vm_object_pip_add() references prior to
1296 * calling us and will remove references for rtvals[] that are
1297 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1298 * completion.
1299 *
1300 * The parent has soft-busy'd the pages it passes us and will unbusy
1301 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1302 * We need to unbusy the rest on I/O completion.
1303 */
1304 static void
1305 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1306 int flags, int *rtvals)
1307 {
1308 int i, n;
1309 boolean_t sync;
1310
1311 if (count && ma[0]->object != object) {
1312 panic("swap_pager_putpages: object mismatch %p/%p",
1313 object,
1314 ma[0]->object
1315 );
1316 }
1317
1318 /*
1319 * Step 1
1320 *
1321 * Turn object into OBJT_SWAP
1322 * check for bogus sysops
1323 * force sync if not pageout process
1324 */
1325 if (object->type != OBJT_SWAP)
1326 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1327 VM_OBJECT_WUNLOCK(object);
1328
1329 n = 0;
1330 if (curproc != pageproc)
1331 sync = TRUE;
1332 else
1333 sync = (flags & VM_PAGER_PUT_SYNC) != 0;
1334
1335 /*
1336 * Step 2
1337 *
1338 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1339 * The page is left dirty until the pageout operation completes
1340 * successfully.
1341 */
1342 for (i = 0; i < count; i += n) {
1343 int j;
1344 struct buf *bp;
1345 daddr_t blk;
1346
1347 /*
1348 * Maximum I/O size is limited by a number of factors.
1349 */
1350 n = min(BLIST_MAX_ALLOC, count - i);
1351 n = min(n, nsw_cluster_max);
1352
1353 /*
1354 * Get biggest block of swap we can. If we fail, fall
1355 * back and try to allocate a smaller block. Don't go
1356 * overboard trying to allocate space if it would overly
1357 * fragment swap.
1358 */
1359 while (
1360 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1361 n > 4
1362 ) {
1363 n >>= 1;
1364 }
1365 if (blk == SWAPBLK_NONE) {
1366 for (j = 0; j < n; ++j)
1367 rtvals[i+j] = VM_PAGER_FAIL;
1368 continue;
1369 }
1370
1371 /*
1372 * All I/O parameters have been satisfied, build the I/O
1373 * request and assign the swap space.
1374 */
1375 if (sync == TRUE) {
1376 bp = getpbuf(&nsw_wcount_sync);
1377 } else {
1378 bp = getpbuf(&nsw_wcount_async);
1379 bp->b_flags = B_ASYNC;
1380 }
1381 bp->b_flags |= B_PAGING;
1382 bp->b_iocmd = BIO_WRITE;
1383
1384 bp->b_rcred = crhold(thread0.td_ucred);
1385 bp->b_wcred = crhold(thread0.td_ucred);
1386 bp->b_bcount = PAGE_SIZE * n;
1387 bp->b_bufsize = PAGE_SIZE * n;
1388 bp->b_blkno = blk;
1389
1390 VM_OBJECT_WLOCK(object);
1391 for (j = 0; j < n; ++j) {
1392 vm_page_t mreq = ma[i+j];
1393
1394 swp_pager_meta_build(
1395 mreq->object,
1396 mreq->pindex,
1397 blk + j
1398 );
1399 MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
1400 mreq->oflags |= VPO_SWAPINPROG;
1401 bp->b_pages[j] = mreq;
1402 }
1403 VM_OBJECT_WUNLOCK(object);
1404 bp->b_npages = n;
1405 /*
1406 * Must set dirty range for NFS to work.
1407 */
1408 bp->b_dirtyoff = 0;
1409 bp->b_dirtyend = bp->b_bcount;
1410
1411 PCPU_INC(cnt.v_swapout);
1412 PCPU_ADD(cnt.v_swappgsout, bp->b_npages);
1413
1414 /*
1415 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1416 * can call the async completion routine at the end of a
1417 * synchronous I/O operation. Otherwise, our caller would
1418 * perform duplicate unbusy and wakeup operations on the page
1419 * and object, respectively.
1420 */
1421 for (j = 0; j < n; j++)
1422 rtvals[i + j] = VM_PAGER_PEND;
1423
1424 /*
1425 * asynchronous
1426 *
1427 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1428 */
1429 if (sync == FALSE) {
1430 bp->b_iodone = swp_pager_async_iodone;
1431 BUF_KERNPROC(bp);
1432 swp_pager_strategy(bp);
1433 continue;
1434 }
1435
1436 /*
1437 * synchronous
1438 *
1439 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1440 */
1441 bp->b_iodone = bdone;
1442 swp_pager_strategy(bp);
1443
1444 /*
1445 * Wait for the sync I/O to complete.
1446 */
1447 bwait(bp, PVM, "swwrt");
1448
1449 /*
1450 * Now that we are through with the bp, we can call the
1451 * normal async completion, which frees everything up.
1452 */
1453 swp_pager_async_iodone(bp);
1454 }
1455 VM_OBJECT_WLOCK(object);
1456 }
1457
1458 /*
1459 * swp_pager_async_iodone:
1460 *
1461 * Completion routine for asynchronous reads and writes from/to swap.
1462 * Also called manually by synchronous code to finish up a bp.
1463 *
1464 * This routine may not sleep.
1465 */
1466 static void
1467 swp_pager_async_iodone(struct buf *bp)
1468 {
1469 int i;
1470 vm_object_t object = NULL;
1471
1472 /*
1473 * report error
1474 */
1475 if (bp->b_ioflags & BIO_ERROR) {
1476 printf(
1477 "swap_pager: I/O error - %s failed; blkno %ld,"
1478 "size %ld, error %d\n",
1479 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1480 (long)bp->b_blkno,
1481 (long)bp->b_bcount,
1482 bp->b_error
1483 );
1484 }
1485
1486 /*
1487 * remove the mapping for kernel virtual
1488 */
1489 if (buf_mapped(bp))
1490 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1491 else
1492 bp->b_data = bp->b_kvabase;
1493
1494 if (bp->b_npages) {
1495 object = bp->b_pages[0]->object;
1496 VM_OBJECT_WLOCK(object);
1497 }
1498
1499 /*
1500 * cleanup pages. If an error occurs writing to swap, we are in
1501 * very serious trouble. If it happens to be a disk error, though,
1502 * we may be able to recover by reassigning the swap later on. So
1503 * in this case we remove the m->swapblk assignment for the page
1504 * but do not free it in the rlist. The errornous block(s) are thus
1505 * never reallocated as swap. Redirty the page and continue.
1506 */
1507 for (i = 0; i < bp->b_npages; ++i) {
1508 vm_page_t m = bp->b_pages[i];
1509
1510 m->oflags &= ~VPO_SWAPINPROG;
1511 if (m->oflags & VPO_SWAPSLEEP) {
1512 m->oflags &= ~VPO_SWAPSLEEP;
1513 wakeup(&object->paging_in_progress);
1514 }
1515
1516 if (bp->b_ioflags & BIO_ERROR) {
1517 /*
1518 * If an error occurs I'd love to throw the swapblk
1519 * away without freeing it back to swapspace, so it
1520 * can never be used again. But I can't from an
1521 * interrupt.
1522 */
1523 if (bp->b_iocmd == BIO_READ) {
1524 /*
1525 * NOTE: for reads, m->dirty will probably
1526 * be overridden by the original caller of
1527 * getpages so don't play cute tricks here.
1528 */
1529 m->valid = 0;
1530 } else {
1531 /*
1532 * If a write error occurs, reactivate page
1533 * so it doesn't clog the inactive list,
1534 * then finish the I/O.
1535 */
1536 MPASS(m->dirty == VM_PAGE_BITS_ALL);
1537 vm_page_lock(m);
1538 vm_page_activate(m);
1539 vm_page_unlock(m);
1540 vm_page_sunbusy(m);
1541 }
1542 } else if (bp->b_iocmd == BIO_READ) {
1543 /*
1544 * NOTE: for reads, m->dirty will probably be
1545 * overridden by the original caller of getpages so
1546 * we cannot set them in order to free the underlying
1547 * swap in a low-swap situation. I don't think we'd
1548 * want to do that anyway, but it was an optimization
1549 * that existed in the old swapper for a time before
1550 * it got ripped out due to precisely this problem.
1551 */
1552 KASSERT(!pmap_page_is_mapped(m),
1553 ("swp_pager_async_iodone: page %p is mapped", m));
1554 KASSERT(m->dirty == 0,
1555 ("swp_pager_async_iodone: page %p is dirty", m));
1556
1557 m->valid = VM_PAGE_BITS_ALL;
1558 if (i < bp->b_pgbefore ||
1559 i >= bp->b_npages - bp->b_pgafter)
1560 vm_page_readahead_finish(m);
1561 } else {
1562 /*
1563 * For write success, clear the dirty
1564 * status, then finish the I/O ( which decrements the
1565 * busy count and possibly wakes waiter's up ).
1566 * A page is only written to swap after a period of
1567 * inactivity. Therefore, we do not expect it to be
1568 * reused.
1569 */
1570 KASSERT(!pmap_page_is_write_mapped(m),
1571 ("swp_pager_async_iodone: page %p is not write"
1572 " protected", m));
1573 vm_page_undirty(m);
1574 vm_page_lock(m);
1575 vm_page_deactivate_noreuse(m);
1576 vm_page_unlock(m);
1577 vm_page_sunbusy(m);
1578 }
1579 }
1580
1581 /*
1582 * adjust pip. NOTE: the original parent may still have its own
1583 * pip refs on the object.
1584 */
1585 if (object != NULL) {
1586 vm_object_pip_wakeupn(object, bp->b_npages);
1587 VM_OBJECT_WUNLOCK(object);
1588 }
1589
1590 /*
1591 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1592 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1593 * trigger a KASSERT in relpbuf().
1594 */
1595 if (bp->b_vp) {
1596 bp->b_vp = NULL;
1597 bp->b_bufobj = NULL;
1598 }
1599 /*
1600 * release the physical I/O buffer
1601 */
1602 relpbuf(
1603 bp,
1604 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1605 ((bp->b_flags & B_ASYNC) ?
1606 &nsw_wcount_async :
1607 &nsw_wcount_sync
1608 )
1609 )
1610 );
1611 }
1612
1613 /*
1614 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
1615 *
1616 * This routine dissociates the page at the given index within an object
1617 * from its backing store, paging it in if it does not reside in memory.
1618 * If the page is paged in, it is marked dirty and placed in the laundry
1619 * queue. The page is marked dirty because it no longer has backing
1620 * store. It is placed in the laundry queue because it has not been
1621 * accessed recently. Otherwise, it would already reside in memory.
1622 *
1623 * We also attempt to swap in all other pages in the swap block.
1624 * However, we only guarantee that the one at the specified index is
1625 * paged in.
1626 *
1627 * XXX - The code to page the whole block in doesn't work, so we
1628 * revert to the one-by-one behavior for now. Sigh.
1629 */
1630 static inline void
1631 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
1632 {
1633 vm_page_t m;
1634
1635 vm_object_pip_add(object, 1);
1636 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
1637 if (m->valid == VM_PAGE_BITS_ALL) {
1638 vm_object_pip_wakeup(object);
1639 vm_page_dirty(m);
1640 vm_page_lock(m);
1641 vm_page_activate(m);
1642 vm_page_unlock(m);
1643 vm_page_xunbusy(m);
1644 vm_pager_page_unswapped(m);
1645 return;
1646 }
1647
1648 if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK)
1649 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
1650 vm_object_pip_wakeup(object);
1651 vm_page_dirty(m);
1652 vm_page_lock(m);
1653 vm_page_launder(m);
1654 vm_page_unlock(m);
1655 vm_page_xunbusy(m);
1656 vm_pager_page_unswapped(m);
1657 }
1658
1659 /*
1660 * swap_pager_swapoff:
1661 *
1662 * Page in all of the pages that have been paged out to the
1663 * given device. The corresponding blocks in the bitmap must be
1664 * marked as allocated and the device must be flagged SW_CLOSING.
1665 * There may be no processes swapped out to the device.
1666 *
1667 * This routine may block.
1668 */
1669 static void
1670 swap_pager_swapoff(struct swdevt *sp)
1671 {
1672 struct swblk *sb;
1673 vm_object_t object;
1674 vm_pindex_t pi;
1675 int i, retries;
1676
1677 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1678
1679 retries = 0;
1680 full_rescan:
1681 mtx_lock(&vm_object_list_mtx);
1682 TAILQ_FOREACH(object, &vm_object_list, object_list) {
1683 if (object->type != OBJT_SWAP)
1684 continue;
1685 mtx_unlock(&vm_object_list_mtx);
1686 /* Depends on type-stability. */
1687 VM_OBJECT_WLOCK(object);
1688
1689 /*
1690 * Dead objects are eventually terminated on their own.
1691 */
1692 if ((object->flags & OBJ_DEAD) != 0)
1693 goto next_obj;
1694
1695 /*
1696 * Sync with fences placed after pctrie
1697 * initialization. We must not access pctrie below
1698 * unless we checked that our object is swap and not
1699 * dead.
1700 */
1701 atomic_thread_fence_acq();
1702 if (object->type != OBJT_SWAP)
1703 goto next_obj;
1704
1705 for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1706 &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1707 pi = sb->p + SWAP_META_PAGES;
1708 for (i = 0; i < SWAP_META_PAGES; i++) {
1709 if (sb->d[i] == SWAPBLK_NONE)
1710 continue;
1711 if (swp_pager_isondev(sb->d[i], sp))
1712 swp_pager_force_pagein(object,
1713 sb->p + i);
1714 }
1715 }
1716 next_obj:
1717 VM_OBJECT_WUNLOCK(object);
1718 mtx_lock(&vm_object_list_mtx);
1719 }
1720 mtx_unlock(&vm_object_list_mtx);
1721
1722 if (sp->sw_used) {
1723 /*
1724 * Objects may be locked or paging to the device being
1725 * removed, so we will miss their pages and need to
1726 * make another pass. We have marked this device as
1727 * SW_CLOSING, so the activity should finish soon.
1728 */
1729 retries++;
1730 if (retries > 100) {
1731 panic("swapoff: failed to locate %d swap blocks",
1732 sp->sw_used);
1733 }
1734 pause("swpoff", hz / 20);
1735 goto full_rescan;
1736 }
1737 }
1738
1739 /************************************************************************
1740 * SWAP META DATA *
1741 ************************************************************************
1742 *
1743 * These routines manipulate the swap metadata stored in the
1744 * OBJT_SWAP object.
1745 *
1746 * Swap metadata is implemented with a global hash and not directly
1747 * linked into the object. Instead the object simply contains
1748 * appropriate tracking counters.
1749 */
1750
1751 /*
1752 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1753 *
1754 * We first convert the object to a swap object if it is a default
1755 * object.
1756 *
1757 * The specified swapblk is added to the object's swap metadata. If
1758 * the swapblk is not valid, it is freed instead. Any previously
1759 * assigned swapblk is freed.
1760 */
1761 static void
1762 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1763 {
1764 static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
1765 struct swblk *sb, *sb1;
1766 vm_pindex_t modpi, rdpi;
1767 int error, i;
1768
1769 VM_OBJECT_ASSERT_WLOCKED(object);
1770
1771 /*
1772 * Convert default object to swap object if necessary
1773 */
1774 if (object->type != OBJT_SWAP) {
1775 pctrie_init(&object->un_pager.swp.swp_blks);
1776
1777 /*
1778 * Ensure that swap_pager_swapoff()'s iteration over
1779 * object_list does not see a garbage pctrie.
1780 */
1781 atomic_thread_fence_rel();
1782
1783 object->type = OBJT_SWAP;
1784 KASSERT(object->handle == NULL, ("default pager with handle"));
1785 }
1786
1787 rdpi = rounddown(pindex, SWAP_META_PAGES);
1788 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
1789 if (sb == NULL) {
1790 if (swapblk == SWAPBLK_NONE)
1791 return;
1792 for (;;) {
1793 sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
1794 pageproc ? M_USE_RESERVE : 0));
1795 if (sb != NULL) {
1796 sb->p = rdpi;
1797 for (i = 0; i < SWAP_META_PAGES; i++)
1798 sb->d[i] = SWAPBLK_NONE;
1799 if (atomic_cmpset_int(&swblk_zone_exhausted,
1800 1, 0))
1801 printf("swblk zone ok\n");
1802 break;
1803 }
1804 VM_OBJECT_WUNLOCK(object);
1805 if (uma_zone_exhausted(swblk_zone)) {
1806 if (atomic_cmpset_int(&swblk_zone_exhausted,
1807 0, 1))
1808 printf("swap blk zone exhausted, "
1809 "increase kern.maxswzone\n");
1810 vm_pageout_oom(VM_OOM_SWAPZ);
1811 pause("swzonxb", 10);
1812 } else
1813 uma_zwait(swblk_zone);
1814 VM_OBJECT_WLOCK(object);
1815 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1816 rdpi);
1817 if (sb != NULL)
1818 /*
1819 * Somebody swapped out a nearby page,
1820 * allocating swblk at the rdpi index,
1821 * while we dropped the object lock.
1822 */
1823 goto allocated;
1824 }
1825 for (;;) {
1826 error = SWAP_PCTRIE_INSERT(
1827 &object->un_pager.swp.swp_blks, sb);
1828 if (error == 0) {
1829 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1830 1, 0))
1831 printf("swpctrie zone ok\n");
1832 break;
1833 }
1834 VM_OBJECT_WUNLOCK(object);
1835 if (uma_zone_exhausted(swpctrie_zone)) {
1836 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1837 0, 1))
1838 printf("swap pctrie zone exhausted, "
1839 "increase kern.maxswzone\n");
1840 vm_pageout_oom(VM_OOM_SWAPZ);
1841 pause("swzonxp", 10);
1842 } else
1843 uma_zwait(swpctrie_zone);
1844 VM_OBJECT_WLOCK(object);
1845 sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1846 rdpi);
1847 if (sb1 != NULL) {
1848 uma_zfree(swblk_zone, sb);
1849 sb = sb1;
1850 goto allocated;
1851 }
1852 }
1853 }
1854 allocated:
1855 MPASS(sb->p == rdpi);
1856
1857 modpi = pindex % SWAP_META_PAGES;
1858 /* Delete prior contents of metadata. */
1859 if (sb->d[modpi] != SWAPBLK_NONE)
1860 swp_pager_freeswapspace(sb->d[modpi], 1);
1861 /* Enter block into metadata. */
1862 sb->d[modpi] = swapblk;
1863
1864 /*
1865 * Free the swblk if we end up with the empty page run.
1866 */
1867 if (swapblk == SWAPBLK_NONE) {
1868 for (i = 0; i < SWAP_META_PAGES; i++) {
1869 if (sb->d[i] != SWAPBLK_NONE)
1870 break;
1871 }
1872 if (i == SWAP_META_PAGES) {
1873 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
1874 rdpi);
1875 uma_zfree(swblk_zone, sb);
1876 }
1877 }
1878 }
1879
1880 /*
1881 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1882 *
1883 * The requested range of blocks is freed, with any associated swap
1884 * returned to the swap bitmap.
1885 *
1886 * This routine will free swap metadata structures as they are cleaned
1887 * out. This routine does *NOT* operate on swap metadata associated
1888 * with resident pages.
1889 */
1890 static void
1891 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
1892 {
1893 struct swblk *sb;
1894 vm_pindex_t last;
1895 int i;
1896 bool empty;
1897
1898 VM_OBJECT_ASSERT_WLOCKED(object);
1899 if (object->type != OBJT_SWAP || count == 0)
1900 return;
1901
1902 last = pindex + count - 1;
1903 for (;;) {
1904 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
1905 rounddown(pindex, SWAP_META_PAGES));
1906 if (sb == NULL || sb->p > last)
1907 break;
1908 empty = true;
1909 for (i = 0; i < SWAP_META_PAGES; i++) {
1910 if (sb->d[i] == SWAPBLK_NONE)
1911 continue;
1912 if (pindex <= sb->p + i && sb->p + i <= last) {
1913 swp_pager_freeswapspace(sb->d[i], 1);
1914 sb->d[i] = SWAPBLK_NONE;
1915 } else
1916 empty = false;
1917 }
1918 pindex = sb->p + SWAP_META_PAGES;
1919 if (empty) {
1920 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
1921 sb->p);
1922 uma_zfree(swblk_zone, sb);
1923 }
1924 }
1925 }
1926
1927 /*
1928 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1929 *
1930 * This routine locates and destroys all swap metadata associated with
1931 * an object.
1932 */
1933 static void
1934 swp_pager_meta_free_all(vm_object_t object)
1935 {
1936 struct swblk *sb;
1937 vm_pindex_t pindex;
1938 int i;
1939
1940 VM_OBJECT_ASSERT_WLOCKED(object);
1941 if (object->type != OBJT_SWAP)
1942 return;
1943
1944 for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1945 &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
1946 pindex = sb->p + SWAP_META_PAGES;
1947 for (i = 0; i < SWAP_META_PAGES; i++) {
1948 if (sb->d[i] != SWAPBLK_NONE)
1949 swp_pager_freeswapspace(sb->d[i], 1);
1950 }
1951 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
1952 uma_zfree(swblk_zone, sb);
1953 }
1954 }
1955
1956 /*
1957 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1958 *
1959 * This routine is capable of looking up, popping, or freeing
1960 * swapblk assignments in the swap meta data or in the vm_page_t.
1961 * The routine typically returns the swapblk being looked-up, or popped,
1962 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1963 * was invalid. This routine will automatically free any invalid
1964 * meta-data swapblks.
1965 *
1966 * When acting on a busy resident page and paging is in progress, we
1967 * have to wait until paging is complete but otherwise can act on the
1968 * busy page.
1969 *
1970 * SWM_FREE remove and free swap block from metadata
1971 * SWM_POP remove from meta data but do not free.. pop it out
1972 */
1973 static daddr_t
1974 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
1975 {
1976 struct swblk *sb;
1977 daddr_t r1;
1978 int i;
1979
1980 if ((flags & (SWM_FREE | SWM_POP)) != 0)
1981 VM_OBJECT_ASSERT_WLOCKED(object);
1982 else
1983 VM_OBJECT_ASSERT_LOCKED(object);
1984
1985 /*
1986 * The meta data only exists if the object is OBJT_SWAP
1987 * and even then might not be allocated yet.
1988 */
1989 if (object->type != OBJT_SWAP)
1990 return (SWAPBLK_NONE);
1991
1992 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1993 rounddown(pindex, SWAP_META_PAGES));
1994 if (sb == NULL)
1995 return (SWAPBLK_NONE);
1996 r1 = sb->d[pindex % SWAP_META_PAGES];
1997 if (r1 == SWAPBLK_NONE)
1998 return (SWAPBLK_NONE);
1999 if ((flags & (SWM_FREE | SWM_POP)) != 0) {
2000 sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
2001 for (i = 0; i < SWAP_META_PAGES; i++) {
2002 if (sb->d[i] != SWAPBLK_NONE)
2003 break;
2004 }
2005 if (i == SWAP_META_PAGES) {
2006 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
2007 rounddown(pindex, SWAP_META_PAGES));
2008 uma_zfree(swblk_zone, sb);
2009 }
2010 }
2011 if ((flags & SWM_FREE) != 0) {
2012 swp_pager_freeswapspace(r1, 1);
2013 r1 = SWAPBLK_NONE;
2014 }
2015 return (r1);
2016 }
2017
2018 /*
2019 * Returns the least page index which is greater than or equal to the
2020 * parameter pindex and for which there is a swap block allocated.
2021 * Returns object's size if the object's type is not swap or if there
2022 * are no allocated swap blocks for the object after the requested
2023 * pindex.
2024 */
2025 vm_pindex_t
2026 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
2027 {
2028 struct swblk *sb;
2029 int i;
2030
2031 VM_OBJECT_ASSERT_LOCKED(object);
2032 if (object->type != OBJT_SWAP)
2033 return (object->size);
2034
2035 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2036 rounddown(pindex, SWAP_META_PAGES));
2037 if (sb == NULL)
2038 return (object->size);
2039 if (sb->p < pindex) {
2040 for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2041 if (sb->d[i] != SWAPBLK_NONE)
2042 return (sb->p + i);
2043 }
2044 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2045 roundup(pindex, SWAP_META_PAGES));
2046 if (sb == NULL)
2047 return (object->size);
2048 }
2049 for (i = 0; i < SWAP_META_PAGES; i++) {
2050 if (sb->d[i] != SWAPBLK_NONE)
2051 return (sb->p + i);
2052 }
2053
2054 /*
2055 * We get here if a swblk is present in the trie but it
2056 * doesn't map any blocks.
2057 */
2058 MPASS(0);
2059 return (object->size);
2060 }
2061
2062 /*
2063 * System call swapon(name) enables swapping on device name,
2064 * which must be in the swdevsw. Return EBUSY
2065 * if already swapping on this device.
2066 */
2067 #ifndef _SYS_SYSPROTO_H_
2068 struct swapon_args {
2069 char *name;
2070 };
2071 #endif
2072
2073 /*
2074 * MPSAFE
2075 */
2076 /* ARGSUSED */
2077 int
2078 sys_swapon(struct thread *td, struct swapon_args *uap)
2079 {
2080 struct vattr attr;
2081 struct vnode *vp;
2082 struct nameidata nd;
2083 int error;
2084
2085 error = priv_check(td, PRIV_SWAPON);
2086 if (error)
2087 return (error);
2088
2089 sx_xlock(&swdev_syscall_lock);
2090
2091 /*
2092 * Swap metadata may not fit in the KVM if we have physical
2093 * memory of >1GB.
2094 */
2095 if (swblk_zone == NULL) {
2096 error = ENOMEM;
2097 goto done;
2098 }
2099
2100 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2101 uap->name, td);
2102 error = namei(&nd);
2103 if (error)
2104 goto done;
2105
2106 NDFREE(&nd, NDF_ONLY_PNBUF);
2107 vp = nd.ni_vp;
2108
2109 if (vn_isdisk(vp, &error)) {
2110 error = swapongeom(vp);
2111 } else if (vp->v_type == VREG &&
2112 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2113 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2114 /*
2115 * Allow direct swapping to NFS regular files in the same
2116 * way that nfs_mountroot() sets up diskless swapping.
2117 */
2118 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2119 }
2120
2121 if (error)
2122 vrele(vp);
2123 done:
2124 sx_xunlock(&swdev_syscall_lock);
2125 return (error);
2126 }
2127
2128 /*
2129 * Check that the total amount of swap currently configured does not
2130 * exceed half the theoretical maximum. If it does, print a warning
2131 * message.
2132 */
2133 static void
2134 swapon_check_swzone(void)
2135 {
2136 unsigned long maxpages, npages;
2137
2138 npages = swap_total / PAGE_SIZE;
2139 /* absolute maximum we can handle assuming 100% efficiency */
2140 maxpages = uma_zone_get_max(swblk_zone) * SWAP_META_PAGES;
2141
2142 /* recommend using no more than half that amount */
2143 if (npages > maxpages / 2) {
2144 printf("warning: total configured swap (%lu pages) "
2145 "exceeds maximum recommended amount (%lu pages).\n",
2146 npages, maxpages / 2);
2147 printf("warning: increase kern.maxswzone "
2148 "or reduce amount of swap.\n");
2149 }
2150 }
2151
2152 static void
2153 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2154 sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2155 {
2156 struct swdevt *sp, *tsp;
2157 swblk_t dvbase;
2158 u_long mblocks;
2159
2160 /*
2161 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2162 * First chop nblks off to page-align it, then convert.
2163 *
2164 * sw->sw_nblks is in page-sized chunks now too.
2165 */
2166 nblks &= ~(ctodb(1) - 1);
2167 nblks = dbtoc(nblks);
2168
2169 /*
2170 * If we go beyond this, we get overflows in the radix
2171 * tree bitmap code.
2172 */
2173 mblocks = 0x40000000 / BLIST_META_RADIX;
2174 if (nblks > mblocks) {
2175 printf(
2176 "WARNING: reducing swap size to maximum of %luMB per unit\n",
2177 mblocks / 1024 / 1024 * PAGE_SIZE);
2178 nblks = mblocks;
2179 }
2180
2181 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2182 sp->sw_vp = vp;
2183 sp->sw_id = id;
2184 sp->sw_dev = dev;
2185 sp->sw_nblks = nblks;
2186 sp->sw_used = 0;
2187 sp->sw_strategy = strategy;
2188 sp->sw_close = close;
2189 sp->sw_flags = flags;
2190
2191 sp->sw_blist = blist_create(nblks, M_WAITOK);
2192 /*
2193 * Do not free the first two block in order to avoid overwriting
2194 * any bsd label at the front of the partition
2195 */
2196 blist_free(sp->sw_blist, 2, nblks - 2);
2197
2198 dvbase = 0;
2199 mtx_lock(&sw_dev_mtx);
2200 TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2201 if (tsp->sw_end >= dvbase) {
2202 /*
2203 * We put one uncovered page between the devices
2204 * in order to definitively prevent any cross-device
2205 * I/O requests
2206 */
2207 dvbase = tsp->sw_end + 1;
2208 }
2209 }
2210 sp->sw_first = dvbase;
2211 sp->sw_end = dvbase + nblks;
2212 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2213 nswapdev++;
2214 swap_pager_avail += nblks - 2;
2215 swap_total += (vm_ooffset_t)nblks * PAGE_SIZE;
2216 swapon_check_swzone();
2217 swp_sizecheck();
2218 mtx_unlock(&sw_dev_mtx);
2219 }
2220
2221 /*
2222 * SYSCALL: swapoff(devname)
2223 *
2224 * Disable swapping on the given device.
2225 *
2226 * XXX: Badly designed system call: it should use a device index
2227 * rather than filename as specification. We keep sw_vp around
2228 * only to make this work.
2229 */
2230 #ifndef _SYS_SYSPROTO_H_
2231 struct swapoff_args {
2232 char *name;
2233 };
2234 #endif
2235
2236 /*
2237 * MPSAFE
2238 */
2239 /* ARGSUSED */
2240 int
2241 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2242 {
2243 struct vnode *vp;
2244 struct nameidata nd;
2245 struct swdevt *sp;
2246 int error;
2247
2248 error = priv_check(td, PRIV_SWAPOFF);
2249 if (error)
2250 return (error);
2251
2252 sx_xlock(&swdev_syscall_lock);
2253
2254 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2255 td);
2256 error = namei(&nd);
2257 if (error)
2258 goto done;
2259 NDFREE(&nd, NDF_ONLY_PNBUF);
2260 vp = nd.ni_vp;
2261
2262 mtx_lock(&sw_dev_mtx);
2263 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2264 if (sp->sw_vp == vp)
2265 break;
2266 }
2267 mtx_unlock(&sw_dev_mtx);
2268 if (sp == NULL) {
2269 error = EINVAL;
2270 goto done;
2271 }
2272 error = swapoff_one(sp, td->td_ucred);
2273 done:
2274 sx_xunlock(&swdev_syscall_lock);
2275 return (error);
2276 }
2277
2278 static int
2279 swapoff_one(struct swdevt *sp, struct ucred *cred)
2280 {
2281 u_long nblks;
2282 #ifdef MAC
2283 int error;
2284 #endif
2285
2286 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2287 #ifdef MAC
2288 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2289 error = mac_system_check_swapoff(cred, sp->sw_vp);
2290 (void) VOP_UNLOCK(sp->sw_vp, 0);
2291 if (error != 0)
2292 return (error);
2293 #endif
2294 nblks = sp->sw_nblks;
2295
2296 /*
2297 * We can turn off this swap device safely only if the
2298 * available virtual memory in the system will fit the amount
2299 * of data we will have to page back in, plus an epsilon so
2300 * the system doesn't become critically low on swap space.
2301 */
2302 if (vm_cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat)
2303 return (ENOMEM);
2304
2305 /*
2306 * Prevent further allocations on this device.
2307 */
2308 mtx_lock(&sw_dev_mtx);
2309 sp->sw_flags |= SW_CLOSING;
2310 swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2311 swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE;
2312 mtx_unlock(&sw_dev_mtx);
2313
2314 /*
2315 * Page in the contents of the device and close it.
2316 */
2317 swap_pager_swapoff(sp);
2318
2319 sp->sw_close(curthread, sp);
2320 mtx_lock(&sw_dev_mtx);
2321 sp->sw_id = NULL;
2322 TAILQ_REMOVE(&swtailq, sp, sw_list);
2323 nswapdev--;
2324 if (nswapdev == 0) {
2325 swap_pager_full = 2;
2326 swap_pager_almost_full = 1;
2327 }
2328 if (swdevhd == sp)
2329 swdevhd = NULL;
2330 mtx_unlock(&sw_dev_mtx);
2331 blist_destroy(sp->sw_blist);
2332 free(sp, M_VMPGDATA);
2333 return (0);
2334 }
2335
2336 void
2337 swapoff_all(void)
2338 {
2339 struct swdevt *sp, *spt;
2340 const char *devname;
2341 int error;
2342
2343 sx_xlock(&swdev_syscall_lock);
2344
2345 mtx_lock(&sw_dev_mtx);
2346 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2347 mtx_unlock(&sw_dev_mtx);
2348 if (vn_isdisk(sp->sw_vp, NULL))
2349 devname = devtoname(sp->sw_vp->v_rdev);
2350 else
2351 devname = "[file]";
2352 error = swapoff_one(sp, thread0.td_ucred);
2353 if (error != 0) {
2354 printf("Cannot remove swap device %s (error=%d), "
2355 "skipping.\n", devname, error);
2356 } else if (bootverbose) {
2357 printf("Swap device %s removed.\n", devname);
2358 }
2359 mtx_lock(&sw_dev_mtx);
2360 }
2361 mtx_unlock(&sw_dev_mtx);
2362
2363 sx_xunlock(&swdev_syscall_lock);
2364 }
2365
2366 void
2367 swap_pager_status(int *total, int *used)
2368 {
2369 struct swdevt *sp;
2370
2371 *total = 0;
2372 *used = 0;
2373 mtx_lock(&sw_dev_mtx);
2374 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2375 *total += sp->sw_nblks;
2376 *used += sp->sw_used;
2377 }
2378 mtx_unlock(&sw_dev_mtx);
2379 }
2380
2381 int
2382 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2383 {
2384 struct swdevt *sp;
2385 const char *tmp_devname;
2386 int error, n;
2387
2388 n = 0;
2389 error = ENOENT;
2390 mtx_lock(&sw_dev_mtx);
2391 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2392 if (n != name) {
2393 n++;
2394 continue;
2395 }
2396 xs->xsw_version = XSWDEV_VERSION;
2397 xs->xsw_dev = sp->sw_dev;
2398 xs->xsw_flags = sp->sw_flags;
2399 xs->xsw_nblks = sp->sw_nblks;
2400 xs->xsw_used = sp->sw_used;
2401 if (devname != NULL) {
2402 if (vn_isdisk(sp->sw_vp, NULL))
2403 tmp_devname = devtoname(sp->sw_vp->v_rdev);
2404 else
2405 tmp_devname = "[file]";
2406 strncpy(devname, tmp_devname, len);
2407 }
2408 error = 0;
2409 break;
2410 }
2411 mtx_unlock(&sw_dev_mtx);
2412 return (error);
2413 }
2414
2415 static int
2416 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2417 {
2418 struct xswdev xs;
2419 int error;
2420
2421 if (arg2 != 1) /* name length */
2422 return (EINVAL);
2423 error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2424 if (error != 0)
2425 return (error);
2426 error = SYSCTL_OUT(req, &xs, sizeof(xs));
2427 return (error);
2428 }
2429
2430 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2431 "Number of swap devices");
2432 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2433 sysctl_vm_swap_info,
2434 "Swap statistics by device");
2435
2436 /*
2437 * Count the approximate swap usage in pages for a vmspace. The
2438 * shadowed or not yet copied on write swap blocks are not accounted.
2439 * The map must be locked.
2440 */
2441 long
2442 vmspace_swap_count(struct vmspace *vmspace)
2443 {
2444 vm_map_t map;
2445 vm_map_entry_t cur;
2446 vm_object_t object;
2447 struct swblk *sb;
2448 vm_pindex_t e, pi;
2449 long count;
2450 int i;
2451
2452 map = &vmspace->vm_map;
2453 count = 0;
2454
2455 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2456 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2457 continue;
2458 object = cur->object.vm_object;
2459 if (object == NULL || object->type != OBJT_SWAP)
2460 continue;
2461 VM_OBJECT_RLOCK(object);
2462 if (object->type != OBJT_SWAP)
2463 goto unlock;
2464 pi = OFF_TO_IDX(cur->offset);
2465 e = pi + OFF_TO_IDX(cur->end - cur->start);
2466 for (;; pi = sb->p + SWAP_META_PAGES) {
2467 sb = SWAP_PCTRIE_LOOKUP_GE(
2468 &object->un_pager.swp.swp_blks, pi);
2469 if (sb == NULL || sb->p >= e)
2470 break;
2471 for (i = 0; i < SWAP_META_PAGES; i++) {
2472 if (sb->p + i < e &&
2473 sb->d[i] != SWAPBLK_NONE)
2474 count++;
2475 }
2476 }
2477 unlock:
2478 VM_OBJECT_RUNLOCK(object);
2479 }
2480 return (count);
2481 }
2482
2483 /*
2484 * GEOM backend
2485 *
2486 * Swapping onto disk devices.
2487 *
2488 */
2489
2490 static g_orphan_t swapgeom_orphan;
2491
2492 static struct g_class g_swap_class = {
2493 .name = "SWAP",
2494 .version = G_VERSION,
2495 .orphan = swapgeom_orphan,
2496 };
2497
2498 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2499
2500
2501 static void
2502 swapgeom_close_ev(void *arg, int flags)
2503 {
2504 struct g_consumer *cp;
2505
2506 cp = arg;
2507 g_access(cp, -1, -1, 0);
2508 g_detach(cp);
2509 g_destroy_consumer(cp);
2510 }
2511
2512 /*
2513 * Add a reference to the g_consumer for an inflight transaction.
2514 */
2515 static void
2516 swapgeom_acquire(struct g_consumer *cp)
2517 {
2518
2519 mtx_assert(&sw_dev_mtx, MA_OWNED);
2520 cp->index++;
2521 }
2522
2523 /*
2524 * Remove a reference from the g_consumer. Post a close event if all
2525 * references go away, since the function might be called from the
2526 * biodone context.
2527 */
2528 static void
2529 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2530 {
2531
2532 mtx_assert(&sw_dev_mtx, MA_OWNED);
2533 cp->index--;
2534 if (cp->index == 0) {
2535 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2536 sp->sw_id = NULL;
2537 }
2538 }
2539
2540 static void
2541 swapgeom_done(struct bio *bp2)
2542 {
2543 struct swdevt *sp;
2544 struct buf *bp;
2545 struct g_consumer *cp;
2546
2547 bp = bp2->bio_caller2;
2548 cp = bp2->bio_from;
2549 bp->b_ioflags = bp2->bio_flags;
2550 if (bp2->bio_error)
2551 bp->b_ioflags |= BIO_ERROR;
2552 bp->b_resid = bp->b_bcount - bp2->bio_completed;
2553 bp->b_error = bp2->bio_error;
2554 bufdone(bp);
2555 sp = bp2->bio_caller1;
2556 mtx_lock(&sw_dev_mtx);
2557 swapgeom_release(cp, sp);
2558 mtx_unlock(&sw_dev_mtx);
2559 g_destroy_bio(bp2);
2560 }
2561
2562 static void
2563 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2564 {
2565 struct bio *bio;
2566 struct g_consumer *cp;
2567
2568 mtx_lock(&sw_dev_mtx);
2569 cp = sp->sw_id;
2570 if (cp == NULL) {
2571 mtx_unlock(&sw_dev_mtx);
2572 bp->b_error = ENXIO;
2573 bp->b_ioflags |= BIO_ERROR;
2574 bufdone(bp);
2575 return;
2576 }
2577 swapgeom_acquire(cp);
2578 mtx_unlock(&sw_dev_mtx);
2579 if (bp->b_iocmd == BIO_WRITE)
2580 bio = g_new_bio();
2581 else
2582 bio = g_alloc_bio();
2583 if (bio == NULL) {
2584 mtx_lock(&sw_dev_mtx);
2585 swapgeom_release(cp, sp);
2586 mtx_unlock(&sw_dev_mtx);
2587 bp->b_error = ENOMEM;
2588 bp->b_ioflags |= BIO_ERROR;
2589 bufdone(bp);
2590 return;
2591 }
2592
2593 bio->bio_caller1 = sp;
2594 bio->bio_caller2 = bp;
2595 bio->bio_cmd = bp->b_iocmd;
2596 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2597 bio->bio_length = bp->b_bcount;
2598 bio->bio_done = swapgeom_done;
2599 if (!buf_mapped(bp)) {
2600 bio->bio_ma = bp->b_pages;
2601 bio->bio_data = unmapped_buf;
2602 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
2603 bio->bio_ma_n = bp->b_npages;
2604 bio->bio_flags |= BIO_UNMAPPED;
2605 } else {
2606 bio->bio_data = bp->b_data;
2607 bio->bio_ma = NULL;
2608 }
2609 g_io_request(bio, cp);
2610 return;
2611 }
2612
2613 static void
2614 swapgeom_orphan(struct g_consumer *cp)
2615 {
2616 struct swdevt *sp;
2617 int destroy;
2618
2619 mtx_lock(&sw_dev_mtx);
2620 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2621 if (sp->sw_id == cp) {
2622 sp->sw_flags |= SW_CLOSING;
2623 break;
2624 }
2625 }
2626 /*
2627 * Drop reference we were created with. Do directly since we're in a
2628 * special context where we don't have to queue the call to
2629 * swapgeom_close_ev().
2630 */
2631 cp->index--;
2632 destroy = ((sp != NULL) && (cp->index == 0));
2633 if (destroy)
2634 sp->sw_id = NULL;
2635 mtx_unlock(&sw_dev_mtx);
2636 if (destroy)
2637 swapgeom_close_ev(cp, 0);
2638 }
2639
2640 static void
2641 swapgeom_close(struct thread *td, struct swdevt *sw)
2642 {
2643 struct g_consumer *cp;
2644
2645 mtx_lock(&sw_dev_mtx);
2646 cp = sw->sw_id;
2647 sw->sw_id = NULL;
2648 mtx_unlock(&sw_dev_mtx);
2649
2650 /*
2651 * swapgeom_close() may be called from the biodone context,
2652 * where we cannot perform topology changes. Delegate the
2653 * work to the events thread.
2654 */
2655 if (cp != NULL)
2656 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2657 }
2658
2659 static int
2660 swapongeom_locked(struct cdev *dev, struct vnode *vp)
2661 {
2662 struct g_provider *pp;
2663 struct g_consumer *cp;
2664 static struct g_geom *gp;
2665 struct swdevt *sp;
2666 u_long nblks;
2667 int error;
2668
2669 pp = g_dev_getprovider(dev);
2670 if (pp == NULL)
2671 return (ENODEV);
2672 mtx_lock(&sw_dev_mtx);
2673 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2674 cp = sp->sw_id;
2675 if (cp != NULL && cp->provider == pp) {
2676 mtx_unlock(&sw_dev_mtx);
2677 return (EBUSY);
2678 }
2679 }
2680 mtx_unlock(&sw_dev_mtx);
2681 if (gp == NULL)
2682 gp = g_new_geomf(&g_swap_class, "swap");
2683 cp = g_new_consumer(gp);
2684 cp->index = 1; /* Number of active I/Os, plus one for being active. */
2685 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2686 g_attach(cp, pp);
2687 /*
2688 * XXX: Every time you think you can improve the margin for
2689 * footshooting, somebody depends on the ability to do so:
2690 * savecore(8) wants to write to our swapdev so we cannot
2691 * set an exclusive count :-(
2692 */
2693 error = g_access(cp, 1, 1, 0);
2694 if (error != 0) {
2695 g_detach(cp);
2696 g_destroy_consumer(cp);
2697 return (error);
2698 }
2699 nblks = pp->mediasize / DEV_BSIZE;
2700 swaponsomething(vp, cp, nblks, swapgeom_strategy,
2701 swapgeom_close, dev2udev(dev),
2702 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
2703 return (0);
2704 }
2705
2706 static int
2707 swapongeom(struct vnode *vp)
2708 {
2709 int error;
2710
2711 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2712 if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
2713 error = ENOENT;
2714 } else {
2715 g_topology_lock();
2716 error = swapongeom_locked(vp->v_rdev, vp);
2717 g_topology_unlock();
2718 }
2719 VOP_UNLOCK(vp, 0);
2720 return (error);
2721 }
2722
2723 /*
2724 * VNODE backend
2725 *
2726 * This is used mainly for network filesystem (read: probably only tested
2727 * with NFS) swapfiles.
2728 *
2729 */
2730
2731 static void
2732 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2733 {
2734 struct vnode *vp2;
2735
2736 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2737
2738 vp2 = sp->sw_id;
2739 vhold(vp2);
2740 if (bp->b_iocmd == BIO_WRITE) {
2741 if (bp->b_bufobj)
2742 bufobj_wdrop(bp->b_bufobj);
2743 bufobj_wref(&vp2->v_bufobj);
2744 }
2745 if (bp->b_bufobj != &vp2->v_bufobj)
2746 bp->b_bufobj = &vp2->v_bufobj;
2747 bp->b_vp = vp2;
2748 bp->b_iooffset = dbtob(bp->b_blkno);
2749 bstrategy(bp);
2750 return;
2751 }
2752
2753 static void
2754 swapdev_close(struct thread *td, struct swdevt *sp)
2755 {
2756
2757 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2758 vrele(sp->sw_vp);
2759 }
2760
2761
2762 static int
2763 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2764 {
2765 struct swdevt *sp;
2766 int error;
2767
2768 if (nblks == 0)
2769 return (ENXIO);
2770 mtx_lock(&sw_dev_mtx);
2771 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2772 if (sp->sw_id == vp) {
2773 mtx_unlock(&sw_dev_mtx);
2774 return (EBUSY);
2775 }
2776 }
2777 mtx_unlock(&sw_dev_mtx);
2778
2779 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2780 #ifdef MAC
2781 error = mac_system_check_swapon(td->td_ucred, vp);
2782 if (error == 0)
2783 #endif
2784 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
2785 (void) VOP_UNLOCK(vp, 0);
2786 if (error)
2787 return (error);
2788
2789 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2790 NODEV, 0);
2791 return (0);
2792 }
2793
2794 static int
2795 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
2796 {
2797 int error, new, n;
2798
2799 new = nsw_wcount_async_max;
2800 error = sysctl_handle_int(oidp, &new, 0, req);
2801 if (error != 0 || req->newptr == NULL)
2802 return (error);
2803
2804 if (new > nswbuf / 2 || new < 1)
2805 return (EINVAL);
2806
2807 mtx_lock(&pbuf_mtx);
2808 while (nsw_wcount_async_max != new) {
2809 /*
2810 * Adjust difference. If the current async count is too low,
2811 * we will need to sqeeze our update slowly in. Sleep with a
2812 * higher priority than getpbuf() to finish faster.
2813 */
2814 n = new - nsw_wcount_async_max;
2815 if (nsw_wcount_async + n >= 0) {
2816 nsw_wcount_async += n;
2817 nsw_wcount_async_max += n;
2818 wakeup(&nsw_wcount_async);
2819 } else {
2820 nsw_wcount_async_max -= nsw_wcount_async;
2821 nsw_wcount_async = 0;
2822 msleep(&nsw_wcount_async, &pbuf_mtx, PSWP,
2823 "swpsysctl", 0);
2824 }
2825 }
2826 mtx_unlock(&pbuf_mtx);
2827
2828 return (0);
2829 }
Cache object: 73a9359ad5ad05b842739f6d5066fcf8
|