FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c
1 /*
2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * New Swap System
41 * Matthew Dillon
42 *
43 * Radix Bitmap 'blists'.
44 *
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
48 *
49 * Features:
50 *
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
53 * pages.
54 *
55 * - on the fly deallocation of swap
56 *
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
61 * or renamed.
62 *
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64 *
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
67 */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD: releng/5.2/sys/vm/swap_pager.c 121854 2003-11-01 08:57:26Z alc $");
71
72 #include "opt_mac.h"
73 #include "opt_swap.h"
74 #include "opt_vm.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/conf.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/bio.h>
82 #include <sys/buf.h>
83 #include <sys/disk.h>
84 #include <sys/fcntl.h>
85 #include <sys/mount.h>
86 #include <sys/namei.h>
87 #include <sys/vnode.h>
88 #include <sys/mac.h>
89 #include <sys/malloc.h>
90 #include <sys/sysctl.h>
91 #include <sys/sysproto.h>
92 #include <sys/blist.h>
93 #include <sys/lock.h>
94 #include <sys/sx.h>
95 #include <sys/vmmeter.h>
96
97 #include <vm/vm.h>
98 #include <vm/pmap.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pager.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_param.h>
106 #include <vm/swap_pager.h>
107 #include <vm/vm_extern.h>
108 #include <vm/uma.h>
109
110 #include <geom/geom.h>
111
112 /*
113 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, or 16
114 * pages per allocation. We recommend you stick with the default of 8.
115 * The 16-page limit is due to the radix code (kern/subr_blist.c).
116 */
117 #ifndef MAX_PAGEOUT_CLUSTER
118 #define MAX_PAGEOUT_CLUSTER 16
119 #endif
120
121 #if !defined(SWB_NPAGES)
122 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
123 #endif
124
125 /*
126 * Piecemeal swap metadata structure. Swap is stored in a radix tree.
127 *
128 * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix
129 * is basically 8. Assuming PAGE_SIZE == 4096, one tree level represents
130 * 32K worth of data, two levels represent 256K, three levels represent
131 * 2 MBytes. This is acceptable.
132 *
133 * Overall memory utilization is about the same as the old swap structure.
134 */
135 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t))
136 #define SWAP_META_PAGES (SWB_NPAGES * 2)
137 #define SWAP_META_MASK (SWAP_META_PAGES - 1)
138
139 typedef int32_t swblk_t; /*
140 * swap offset. This is the type used to
141 * address the "virtual swap device" and
142 * therefore the maximum swap space is
143 * 2^32 pages.
144 */
145
146 struct swdevt;
147 typedef void sw_strategy_t(struct buf *bp, struct swdevt *sw);
148 typedef void sw_close_t(struct thread *td, struct swdevt *sw);
149
150 /*
151 * Swap device table
152 */
153 struct swdevt {
154 int sw_flags;
155 int sw_nblks;
156 int sw_used;
157 udev_t sw_udev;
158 struct vnode *sw_vp;
159 void *sw_id;
160 swblk_t sw_first;
161 swblk_t sw_end;
162 struct blist *sw_blist;
163 TAILQ_ENTRY(swdevt) sw_list;
164 sw_strategy_t *sw_strategy;
165 sw_close_t *sw_close;
166 };
167
168 #define SW_CLOSING 0x04
169
170 struct swblock {
171 struct swblock *swb_hnext;
172 vm_object_t swb_object;
173 vm_pindex_t swb_index;
174 int swb_count;
175 daddr_t swb_pages[SWAP_META_PAGES];
176 };
177
178 static struct mtx sw_dev_mtx;
179 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
180 static struct swdevt *swdevhd; /* Allocate from here next */
181 static int nswapdev; /* Number of swap devices */
182 int swap_pager_avail;
183 static int swdev_syscall_active = 0; /* serialize swap(on|off) */
184
185 static void swapdev_strategy(struct buf *, struct swdevt *sw);
186
187 #define SWM_FREE 0x02 /* free, period */
188 #define SWM_POP 0x04 /* pop out */
189
190 int swap_pager_full; /* swap space exhaustion (task killing) */
191 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
192 static int nsw_rcount; /* free read buffers */
193 static int nsw_wcount_sync; /* limit write buffers / synchronous */
194 static int nsw_wcount_async; /* limit write buffers / asynchronous */
195 static int nsw_wcount_async_max;/* assigned maximum */
196 static int nsw_cluster_max; /* maximum VOP I/O allowed */
197
198 static struct swblock **swhash;
199 static int swhash_mask;
200 static struct mtx swhash_mtx;
201
202 static int swap_async_max = 4; /* maximum in-progress async I/O's */
203 static struct sx sw_alloc_sx;
204
205
206 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
207 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
208
209 /*
210 * "named" and "unnamed" anon region objects. Try to reduce the overhead
211 * of searching a named list by hashing it just a little.
212 */
213
214 #define NOBJLISTS 8
215
216 #define NOBJLIST(handle) \
217 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
218
219 static struct mtx sw_alloc_mtx; /* protect list manipulation */
220 static struct pagerlst swap_pager_object_list[NOBJLISTS];
221 static struct pagerlst swap_pager_un_object_list;
222 static uma_zone_t swap_zone;
223
224 /*
225 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
226 * calls hooked from other parts of the VM system and do not appear here.
227 * (see vm/swap_pager.h).
228 */
229 static vm_object_t
230 swap_pager_alloc(void *handle, vm_ooffset_t size,
231 vm_prot_t prot, vm_ooffset_t offset);
232 static void swap_pager_dealloc(vm_object_t object);
233 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
234 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
235 static boolean_t
236 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
237 static void swap_pager_init(void);
238 static void swap_pager_unswapped(vm_page_t);
239 static void swap_pager_swapoff(struct swdevt *sp, int *sw_used);
240
241 struct pagerops swappagerops = {
242 .pgo_init = swap_pager_init, /* early system initialization of pager */
243 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */
244 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
245 .pgo_getpages = swap_pager_getpages, /* pagein */
246 .pgo_putpages = swap_pager_putpages, /* pageout */
247 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */
248 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
249 };
250
251 /*
252 * dmmax is in page-sized chunks with the new swap system. It was
253 * dev-bsized chunks in the old. dmmax is always a power of 2.
254 *
255 * swap_*() routines are externally accessible. swp_*() routines are
256 * internal.
257 */
258 static int dmmax;
259 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
260 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
261
262 SYSCTL_INT(_vm, OID_AUTO, dmmax,
263 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
264
265 static void swp_sizecheck(void);
266 static void swp_pager_sync_iodone(struct buf *bp);
267 static void swp_pager_async_iodone(struct buf *bp);
268 static int swapongeom(struct thread *, struct vnode *);
269 static int swaponvp(struct thread *, struct vnode *, u_long);
270
271 /*
272 * Swap bitmap functions
273 */
274 static void swp_pager_freeswapspace(daddr_t blk, int npages);
275 static daddr_t swp_pager_getswapspace(int npages);
276
277 /*
278 * Metadata functions
279 */
280 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index);
281 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
282 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
283 static void swp_pager_meta_free_all(vm_object_t);
284 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
285
286 /*
287 * SWP_SIZECHECK() - update swap_pager_full indication
288 *
289 * update the swap_pager_almost_full indication and warn when we are
290 * about to run out of swap space, using lowat/hiwat hysteresis.
291 *
292 * Clear swap_pager_full ( task killing ) indication when lowat is met.
293 *
294 * No restrictions on call
295 * This routine may not block.
296 * This routine must be called at splvm()
297 */
298 static void
299 swp_sizecheck(void)
300 {
301
302 if (swap_pager_avail < nswap_lowat) {
303 if (swap_pager_almost_full == 0) {
304 printf("swap_pager: out of swap space\n");
305 swap_pager_almost_full = 1;
306 }
307 } else {
308 swap_pager_full = 0;
309 if (swap_pager_avail > nswap_hiwat)
310 swap_pager_almost_full = 0;
311 }
312 }
313
314 /*
315 * SWP_PAGER_HASH() - hash swap meta data
316 *
317 * This is an helper function which hashes the swapblk given
318 * the object and page index. It returns a pointer to a pointer
319 * to the object, or a pointer to a NULL pointer if it could not
320 * find a swapblk.
321 *
322 * This routine must be called at splvm().
323 */
324 static struct swblock **
325 swp_pager_hash(vm_object_t object, vm_pindex_t index)
326 {
327 struct swblock **pswap;
328 struct swblock *swap;
329
330 index &= ~(vm_pindex_t)SWAP_META_MASK;
331 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
332 while ((swap = *pswap) != NULL) {
333 if (swap->swb_object == object &&
334 swap->swb_index == index
335 ) {
336 break;
337 }
338 pswap = &swap->swb_hnext;
339 }
340 return (pswap);
341 }
342
343 /*
344 * SWAP_PAGER_INIT() - initialize the swap pager!
345 *
346 * Expected to be started from system init. NOTE: This code is run
347 * before much else so be careful what you depend on. Most of the VM
348 * system has yet to be initialized at this point.
349 */
350 static void
351 swap_pager_init(void)
352 {
353 /*
354 * Initialize object lists
355 */
356 int i;
357
358 for (i = 0; i < NOBJLISTS; ++i)
359 TAILQ_INIT(&swap_pager_object_list[i]);
360 TAILQ_INIT(&swap_pager_un_object_list);
361 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
362 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
363
364 /*
365 * Device Stripe, in PAGE_SIZE'd blocks
366 */
367 dmmax = SWB_NPAGES * 2;
368 }
369
370 /*
371 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
372 *
373 * Expected to be started from pageout process once, prior to entering
374 * its main loop.
375 */
376 void
377 swap_pager_swap_init(void)
378 {
379 int n, n2;
380
381 /*
382 * Number of in-transit swap bp operations. Don't
383 * exhaust the pbufs completely. Make sure we
384 * initialize workable values (0 will work for hysteresis
385 * but it isn't very efficient).
386 *
387 * The nsw_cluster_max is constrained by the bp->b_pages[]
388 * array (MAXPHYS/PAGE_SIZE) and our locally defined
389 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
390 * constrained by the swap device interleave stripe size.
391 *
392 * Currently we hardwire nsw_wcount_async to 4. This limit is
393 * designed to prevent other I/O from having high latencies due to
394 * our pageout I/O. The value 4 works well for one or two active swap
395 * devices but is probably a little low if you have more. Even so,
396 * a higher value would probably generate only a limited improvement
397 * with three or four active swap devices since the system does not
398 * typically have to pageout at extreme bandwidths. We will want
399 * at least 2 per swap devices, and 4 is a pretty good value if you
400 * have one NFS swap device due to the command/ack latency over NFS.
401 * So it all works out pretty well.
402 */
403 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
404
405 mtx_lock(&pbuf_mtx);
406 nsw_rcount = (nswbuf + 1) / 2;
407 nsw_wcount_sync = (nswbuf + 3) / 4;
408 nsw_wcount_async = 4;
409 nsw_wcount_async_max = nsw_wcount_async;
410 mtx_unlock(&pbuf_mtx);
411
412 /*
413 * Initialize our zone. Right now I'm just guessing on the number
414 * we need based on the number of pages in the system. Each swblock
415 * can hold 16 pages, so this is probably overkill. This reservation
416 * is typically limited to around 32MB by default.
417 */
418 n = cnt.v_page_count / 2;
419 if (maxswzone && n > maxswzone / sizeof(struct swblock))
420 n = maxswzone / sizeof(struct swblock);
421 n2 = n;
422 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
423 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
424 do {
425 if (uma_zone_set_obj(swap_zone, NULL, n))
426 break;
427 /*
428 * if the allocation failed, try a zone two thirds the
429 * size of the previous attempt.
430 */
431 n -= ((n + 2) / 3);
432 } while (n > 0);
433 if (swap_zone == NULL)
434 panic("failed to create swap_zone.");
435 if (n2 != n)
436 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
437 n2 = n;
438
439 /*
440 * Initialize our meta-data hash table. The swapper does not need to
441 * be quite as efficient as the VM system, so we do not use an
442 * oversized hash table.
443 *
444 * n: size of hash table, must be power of 2
445 * swhash_mask: hash table index mask
446 */
447 for (n = 1; n < n2 / 8; n *= 2)
448 ;
449 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
450 swhash_mask = n - 1;
451 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF);
452 }
453
454 /*
455 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
456 * its metadata structures.
457 *
458 * This routine is called from the mmap and fork code to create a new
459 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
460 * and then converting it with swp_pager_meta_build().
461 *
462 * This routine may block in vm_object_allocate() and create a named
463 * object lookup race, so we must interlock. We must also run at
464 * splvm() for the object lookup to handle races with interrupts, but
465 * we do not have to maintain splvm() in between the lookup and the
466 * add because (I believe) it is not possible to attempt to create
467 * a new swap object w/handle when a default object with that handle
468 * already exists.
469 *
470 * MPSAFE
471 */
472 static vm_object_t
473 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
474 vm_ooffset_t offset)
475 {
476 vm_object_t object;
477
478 mtx_lock(&Giant);
479 if (handle) {
480 /*
481 * Reference existing named region or allocate new one. There
482 * should not be a race here against swp_pager_meta_build()
483 * as called from vm_page_remove() in regards to the lookup
484 * of the handle.
485 */
486 sx_xlock(&sw_alloc_sx);
487 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
488
489 if (object != NULL) {
490 vm_object_reference(object);
491 } else {
492 object = vm_object_allocate(OBJT_DEFAULT,
493 OFF_TO_IDX(offset + PAGE_MASK + size));
494 object->handle = handle;
495
496 VM_OBJECT_LOCK(object);
497 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
498 VM_OBJECT_UNLOCK(object);
499 }
500 sx_xunlock(&sw_alloc_sx);
501 } else {
502 object = vm_object_allocate(OBJT_DEFAULT,
503 OFF_TO_IDX(offset + PAGE_MASK + size));
504
505 VM_OBJECT_LOCK(object);
506 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
507 VM_OBJECT_UNLOCK(object);
508 }
509 mtx_unlock(&Giant);
510 return (object);
511 }
512
513 /*
514 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
515 *
516 * The swap backing for the object is destroyed. The code is
517 * designed such that we can reinstantiate it later, but this
518 * routine is typically called only when the entire object is
519 * about to be destroyed.
520 *
521 * This routine may block, but no longer does.
522 *
523 * The object must be locked or unreferenceable.
524 */
525 static void
526 swap_pager_dealloc(vm_object_t object)
527 {
528 int s;
529
530 /*
531 * Remove from list right away so lookups will fail if we block for
532 * pageout completion.
533 */
534 mtx_lock(&sw_alloc_mtx);
535 if (object->handle == NULL) {
536 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
537 } else {
538 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
539 }
540 mtx_unlock(&sw_alloc_mtx);
541
542 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
543 vm_object_pip_wait(object, "swpdea");
544
545 /*
546 * Free all remaining metadata. We only bother to free it from
547 * the swap meta data. We do not attempt to free swapblk's still
548 * associated with vm_page_t's for this object. We do not care
549 * if paging is still in progress on some objects.
550 */
551 s = splvm();
552 swp_pager_meta_free_all(object);
553 splx(s);
554 }
555
556 /************************************************************************
557 * SWAP PAGER BITMAP ROUTINES *
558 ************************************************************************/
559
560 /*
561 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
562 *
563 * Allocate swap for the requested number of pages. The starting
564 * swap block number (a page index) is returned or SWAPBLK_NONE
565 * if the allocation failed.
566 *
567 * Also has the side effect of advising that somebody made a mistake
568 * when they configured swap and didn't configure enough.
569 *
570 * Must be called at splvm() to avoid races with bitmap frees from
571 * vm_page_remove() aka swap_pager_page_removed().
572 *
573 * This routine may not block
574 * This routine must be called at splvm().
575 *
576 * We allocate in round-robin fashion from the configured devices.
577 */
578 static daddr_t
579 swp_pager_getswapspace(int npages)
580 {
581 daddr_t blk;
582 struct swdevt *sp;
583 int i;
584
585 blk = SWAPBLK_NONE;
586 mtx_lock(&sw_dev_mtx);
587 sp = swdevhd;
588 for (i = 0; i < nswapdev; i++) {
589 if (sp == NULL)
590 sp = TAILQ_FIRST(&swtailq);
591 if (!(sp->sw_flags & SW_CLOSING)) {
592 blk = blist_alloc(sp->sw_blist, npages);
593 if (blk != SWAPBLK_NONE) {
594 blk += sp->sw_first;
595 sp->sw_used += npages;
596 swap_pager_avail -= npages;
597 swp_sizecheck();
598 swdevhd = TAILQ_NEXT(sp, sw_list);
599 goto done;
600 }
601 }
602 sp = TAILQ_NEXT(sp, sw_list);
603 }
604 if (swap_pager_full != 2) {
605 printf("swap_pager_getswapspace(%d): failed\n", npages);
606 swap_pager_full = 2;
607 swap_pager_almost_full = 1;
608 }
609 swdevhd = NULL;
610 done:
611 mtx_unlock(&sw_dev_mtx);
612 return (blk);
613 }
614
615 static struct swdevt *
616 swp_pager_find_dev(daddr_t blk)
617 {
618 struct swdevt *sp;
619
620 mtx_lock(&sw_dev_mtx);
621 TAILQ_FOREACH(sp, &swtailq, sw_list) {
622 if (blk >= sp->sw_first && blk < sp->sw_end) {
623 mtx_unlock(&sw_dev_mtx);
624 return (sp);
625 }
626 }
627 panic("Swapdev not found");
628 }
629
630 static void
631 swp_pager_strategy(struct buf *bp)
632 {
633 struct swdevt *sp;
634
635 mtx_lock(&sw_dev_mtx);
636 TAILQ_FOREACH(sp, &swtailq, sw_list) {
637 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
638 mtx_unlock(&sw_dev_mtx);
639 sp->sw_strategy(bp, sp);
640 return;
641 }
642 }
643 panic("Swapdev not found");
644 }
645
646
647 /*
648 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
649 *
650 * This routine returns the specified swap blocks back to the bitmap.
651 *
652 * Note: This routine may not block (it could in the old swap code),
653 * and through the use of the new blist routines it does not block.
654 *
655 * We must be called at splvm() to avoid races with bitmap frees from
656 * vm_page_remove() aka swap_pager_page_removed().
657 *
658 * This routine may not block
659 * This routine must be called at splvm().
660 */
661 static void
662 swp_pager_freeswapspace(daddr_t blk, int npages)
663 {
664 struct swdevt *sp;
665
666 mtx_lock(&sw_dev_mtx);
667 TAILQ_FOREACH(sp, &swtailq, sw_list) {
668 if (blk >= sp->sw_first && blk < sp->sw_end) {
669 sp->sw_used -= npages;
670 /*
671 * If we are attempting to stop swapping on
672 * this device, we don't want to mark any
673 * blocks free lest they be reused.
674 */
675 if ((sp->sw_flags & SW_CLOSING) == 0) {
676 blist_free(sp->sw_blist, blk - sp->sw_first,
677 npages);
678 swap_pager_avail += npages;
679 swp_sizecheck();
680 }
681 mtx_unlock(&sw_dev_mtx);
682 return;
683 }
684 }
685 panic("Swapdev not found");
686 }
687
688 /*
689 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
690 * range within an object.
691 *
692 * This is a globally accessible routine.
693 *
694 * This routine removes swapblk assignments from swap metadata.
695 *
696 * The external callers of this routine typically have already destroyed
697 * or renamed vm_page_t's associated with this range in the object so
698 * we should be ok.
699 *
700 * This routine may be called at any spl. We up our spl to splvm temporarily
701 * in order to perform the metadata removal.
702 */
703 void
704 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
705 {
706 int s = splvm();
707
708 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
709 swp_pager_meta_free(object, start, size);
710 splx(s);
711 }
712
713 /*
714 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
715 *
716 * Assigns swap blocks to the specified range within the object. The
717 * swap blocks are not zerod. Any previous swap assignment is destroyed.
718 *
719 * Returns 0 on success, -1 on failure.
720 */
721 int
722 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
723 {
724 int s;
725 int n = 0;
726 daddr_t blk = SWAPBLK_NONE;
727 vm_pindex_t beg = start; /* save start index */
728
729 s = splvm();
730 VM_OBJECT_LOCK(object);
731 while (size) {
732 if (n == 0) {
733 n = BLIST_MAX_ALLOC;
734 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
735 n >>= 1;
736 if (n == 0) {
737 swp_pager_meta_free(object, beg, start - beg);
738 VM_OBJECT_UNLOCK(object);
739 splx(s);
740 return (-1);
741 }
742 }
743 }
744 swp_pager_meta_build(object, start, blk);
745 --size;
746 ++start;
747 ++blk;
748 --n;
749 }
750 swp_pager_meta_free(object, start, n);
751 VM_OBJECT_UNLOCK(object);
752 splx(s);
753 return (0);
754 }
755
756 /*
757 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
758 * and destroy the source.
759 *
760 * Copy any valid swapblks from the source to the destination. In
761 * cases where both the source and destination have a valid swapblk,
762 * we keep the destination's.
763 *
764 * This routine is allowed to block. It may block allocating metadata
765 * indirectly through swp_pager_meta_build() or if paging is still in
766 * progress on the source.
767 *
768 * This routine can be called at any spl
769 *
770 * XXX vm_page_collapse() kinda expects us not to block because we
771 * supposedly do not need to allocate memory, but for the moment we
772 * *may* have to get a little memory from the zone allocator, but
773 * it is taken from the interrupt memory. We should be ok.
774 *
775 * The source object contains no vm_page_t's (which is just as well)
776 *
777 * The source object is of type OBJT_SWAP.
778 *
779 * The source and destination objects must be locked or
780 * inaccessible (XXX are they ?)
781 */
782 void
783 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
784 vm_pindex_t offset, int destroysource)
785 {
786 vm_pindex_t i;
787 int s;
788
789 VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
790 VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
791
792 s = splvm();
793 /*
794 * If destroysource is set, we remove the source object from the
795 * swap_pager internal queue now.
796 */
797 if (destroysource) {
798 mtx_lock(&sw_alloc_mtx);
799 if (srcobject->handle == NULL) {
800 TAILQ_REMOVE(
801 &swap_pager_un_object_list,
802 srcobject,
803 pager_object_list
804 );
805 } else {
806 TAILQ_REMOVE(
807 NOBJLIST(srcobject->handle),
808 srcobject,
809 pager_object_list
810 );
811 }
812 mtx_unlock(&sw_alloc_mtx);
813 }
814
815 /*
816 * transfer source to destination.
817 */
818 for (i = 0; i < dstobject->size; ++i) {
819 daddr_t dstaddr;
820
821 /*
822 * Locate (without changing) the swapblk on the destination,
823 * unless it is invalid in which case free it silently, or
824 * if the destination is a resident page, in which case the
825 * source is thrown away.
826 */
827 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
828
829 if (dstaddr == SWAPBLK_NONE) {
830 /*
831 * Destination has no swapblk and is not resident,
832 * copy source.
833 */
834 daddr_t srcaddr;
835
836 srcaddr = swp_pager_meta_ctl(
837 srcobject,
838 i + offset,
839 SWM_POP
840 );
841
842 if (srcaddr != SWAPBLK_NONE) {
843 /*
844 * swp_pager_meta_build() can sleep.
845 */
846 vm_object_pip_add(srcobject, 1);
847 VM_OBJECT_UNLOCK(srcobject);
848 vm_object_pip_add(dstobject, 1);
849 swp_pager_meta_build(dstobject, i, srcaddr);
850 vm_object_pip_wakeup(dstobject);
851 VM_OBJECT_LOCK(srcobject);
852 vm_object_pip_wakeup(srcobject);
853 }
854 } else {
855 /*
856 * Destination has valid swapblk or it is represented
857 * by a resident page. We destroy the sourceblock.
858 */
859
860 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
861 }
862 }
863
864 /*
865 * Free left over swap blocks in source.
866 *
867 * We have to revert the type to OBJT_DEFAULT so we do not accidently
868 * double-remove the object from the swap queues.
869 */
870 if (destroysource) {
871 swp_pager_meta_free_all(srcobject);
872 /*
873 * Reverting the type is not necessary, the caller is going
874 * to destroy srcobject directly, but I'm doing it here
875 * for consistency since we've removed the object from its
876 * queues.
877 */
878 srcobject->type = OBJT_DEFAULT;
879 }
880 splx(s);
881 }
882
883 /*
884 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
885 * the requested page.
886 *
887 * We determine whether good backing store exists for the requested
888 * page and return TRUE if it does, FALSE if it doesn't.
889 *
890 * If TRUE, we also try to determine how much valid, contiguous backing
891 * store exists before and after the requested page within a reasonable
892 * distance. We do not try to restrict it to the swap device stripe
893 * (that is handled in getpages/putpages). It probably isn't worth
894 * doing here.
895 */
896 static boolean_t
897 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after)
898 {
899 daddr_t blk0;
900 int s;
901
902 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
903 /*
904 * do we have good backing store at the requested index ?
905 */
906 s = splvm();
907 blk0 = swp_pager_meta_ctl(object, pindex, 0);
908
909 if (blk0 == SWAPBLK_NONE) {
910 splx(s);
911 if (before)
912 *before = 0;
913 if (after)
914 *after = 0;
915 return (FALSE);
916 }
917
918 /*
919 * find backwards-looking contiguous good backing store
920 */
921 if (before != NULL) {
922 int i;
923
924 for (i = 1; i < (SWB_NPAGES/2); ++i) {
925 daddr_t blk;
926
927 if (i > pindex)
928 break;
929 blk = swp_pager_meta_ctl(object, pindex - i, 0);
930 if (blk != blk0 - i)
931 break;
932 }
933 *before = (i - 1);
934 }
935
936 /*
937 * find forward-looking contiguous good backing store
938 */
939 if (after != NULL) {
940 int i;
941
942 for (i = 1; i < (SWB_NPAGES/2); ++i) {
943 daddr_t blk;
944
945 blk = swp_pager_meta_ctl(object, pindex + i, 0);
946 if (blk != blk0 + i)
947 break;
948 }
949 *after = (i - 1);
950 }
951 splx(s);
952 return (TRUE);
953 }
954
955 /*
956 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
957 *
958 * This removes any associated swap backing store, whether valid or
959 * not, from the page.
960 *
961 * This routine is typically called when a page is made dirty, at
962 * which point any associated swap can be freed. MADV_FREE also
963 * calls us in a special-case situation
964 *
965 * NOTE!!! If the page is clean and the swap was valid, the caller
966 * should make the page dirty before calling this routine. This routine
967 * does NOT change the m->dirty status of the page. Also: MADV_FREE
968 * depends on it.
969 *
970 * This routine may not block
971 * This routine must be called at splvm()
972 */
973 static void
974 swap_pager_unswapped(vm_page_t m)
975 {
976
977 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
978 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
979 }
980
981 /*
982 * SWAP_PAGER_GETPAGES() - bring pages in from swap
983 *
984 * Attempt to retrieve (m, count) pages from backing store, but make
985 * sure we retrieve at least m[reqpage]. We try to load in as large
986 * a chunk surrounding m[reqpage] as is contiguous in swap and which
987 * belongs to the same object.
988 *
989 * The code is designed for asynchronous operation and
990 * immediate-notification of 'reqpage' but tends not to be
991 * used that way. Please do not optimize-out this algorithmic
992 * feature, I intend to improve on it in the future.
993 *
994 * The parent has a single vm_object_pip_add() reference prior to
995 * calling us and we should return with the same.
996 *
997 * The parent has BUSY'd the pages. We should return with 'm'
998 * left busy, but the others adjusted.
999 */
1000 static int
1001 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
1002 {
1003 struct buf *bp;
1004 vm_page_t mreq;
1005 int s;
1006 int i;
1007 int j;
1008 daddr_t blk;
1009
1010 mreq = m[reqpage];
1011
1012 KASSERT(mreq->object == object,
1013 ("swap_pager_getpages: object mismatch %p/%p",
1014 object, mreq->object));
1015
1016 /*
1017 * Calculate range to retrieve. The pages have already been assigned
1018 * their swapblks. We require a *contiguous* range but we know it to
1019 * not span devices. If we do not supply it, bad things
1020 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1021 * loops are set up such that the case(s) are handled implicitly.
1022 *
1023 * The swp_*() calls must be made at splvm(). vm_page_free() does
1024 * not need to be, but it will go a little faster if it is.
1025 */
1026 s = splvm();
1027 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1028
1029 for (i = reqpage - 1; i >= 0; --i) {
1030 daddr_t iblk;
1031
1032 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1033 if (blk != iblk + (reqpage - i))
1034 break;
1035 }
1036 ++i;
1037
1038 for (j = reqpage + 1; j < count; ++j) {
1039 daddr_t jblk;
1040
1041 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1042 if (blk != jblk - (j - reqpage))
1043 break;
1044 }
1045
1046 /*
1047 * free pages outside our collection range. Note: we never free
1048 * mreq, it must remain busy throughout.
1049 */
1050 vm_page_lock_queues();
1051 {
1052 int k;
1053
1054 for (k = 0; k < i; ++k)
1055 vm_page_free(m[k]);
1056 for (k = j; k < count; ++k)
1057 vm_page_free(m[k]);
1058 }
1059 vm_page_unlock_queues();
1060 splx(s);
1061
1062
1063 /*
1064 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1065 * still busy, but the others unbusied.
1066 */
1067 if (blk == SWAPBLK_NONE)
1068 return (VM_PAGER_FAIL);
1069
1070 /*
1071 * Getpbuf() can sleep.
1072 */
1073 VM_OBJECT_UNLOCK(object);
1074 /*
1075 * Get a swap buffer header to perform the IO
1076 */
1077 bp = getpbuf(&nsw_rcount);
1078 bp->b_flags |= B_PAGING;
1079
1080 /*
1081 * map our page(s) into kva for input
1082 */
1083 pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i);
1084
1085 bp->b_iocmd = BIO_READ;
1086 bp->b_iodone = swp_pager_async_iodone;
1087 bp->b_rcred = crhold(thread0.td_ucred);
1088 bp->b_wcred = crhold(thread0.td_ucred);
1089 bp->b_blkno = blk - (reqpage - i);
1090 bp->b_bcount = PAGE_SIZE * (j - i);
1091 bp->b_bufsize = PAGE_SIZE * (j - i);
1092 bp->b_pager.pg_reqpage = reqpage - i;
1093
1094 VM_OBJECT_LOCK(object);
1095 vm_page_lock_queues();
1096 {
1097 int k;
1098
1099 for (k = i; k < j; ++k) {
1100 bp->b_pages[k - i] = m[k];
1101 vm_page_flag_set(m[k], PG_SWAPINPROG);
1102 }
1103 }
1104 vm_page_unlock_queues();
1105 VM_OBJECT_UNLOCK(object);
1106 bp->b_npages = j - i;
1107
1108 cnt.v_swapin++;
1109 cnt.v_swappgsin += bp->b_npages;
1110
1111 /*
1112 * We still hold the lock on mreq, and our automatic completion routine
1113 * does not remove it.
1114 */
1115 VM_OBJECT_LOCK(mreq->object);
1116 vm_object_pip_add(mreq->object, bp->b_npages);
1117 VM_OBJECT_UNLOCK(mreq->object);
1118
1119 /*
1120 * perform the I/O. NOTE!!! bp cannot be considered valid after
1121 * this point because we automatically release it on completion.
1122 * Instead, we look at the one page we are interested in which we
1123 * still hold a lock on even through the I/O completion.
1124 *
1125 * The other pages in our m[] array are also released on completion,
1126 * so we cannot assume they are valid anymore either.
1127 *
1128 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1129 */
1130 BUF_KERNPROC(bp);
1131 swp_pager_strategy(bp);
1132
1133 /*
1134 * wait for the page we want to complete. PG_SWAPINPROG is always
1135 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1136 * is set in the meta-data.
1137 */
1138 s = splvm();
1139 vm_page_lock_queues();
1140 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1141 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1142 cnt.v_intrans++;
1143 if (msleep(mreq, &vm_page_queue_mtx, PSWP, "swread", hz*20)) {
1144 printf(
1145 "swap_pager: indefinite wait buffer: device:"
1146 " %s, blkno: %ld, size: %ld\n",
1147 devtoname(bp->b_dev), (long)bp->b_blkno,
1148 bp->b_bcount
1149 );
1150 }
1151 }
1152 vm_page_unlock_queues();
1153 splx(s);
1154
1155 VM_OBJECT_LOCK(mreq->object);
1156 /*
1157 * mreq is left busied after completion, but all the other pages
1158 * are freed. If we had an unrecoverable read error the page will
1159 * not be valid.
1160 */
1161 if (mreq->valid != VM_PAGE_BITS_ALL) {
1162 return (VM_PAGER_ERROR);
1163 } else {
1164 return (VM_PAGER_OK);
1165 }
1166
1167 /*
1168 * A final note: in a low swap situation, we cannot deallocate swap
1169 * and mark a page dirty here because the caller is likely to mark
1170 * the page clean when we return, causing the page to possibly revert
1171 * to all-zero's later.
1172 */
1173 }
1174
1175 /*
1176 * swap_pager_putpages:
1177 *
1178 * Assign swap (if necessary) and initiate I/O on the specified pages.
1179 *
1180 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1181 * are automatically converted to SWAP objects.
1182 *
1183 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1184 * vm_page reservation system coupled with properly written VFS devices
1185 * should ensure that no low-memory deadlock occurs. This is an area
1186 * which needs work.
1187 *
1188 * The parent has N vm_object_pip_add() references prior to
1189 * calling us and will remove references for rtvals[] that are
1190 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1191 * completion.
1192 *
1193 * The parent has soft-busy'd the pages it passes us and will unbusy
1194 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1195 * We need to unbusy the rest on I/O completion.
1196 */
1197 void
1198 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1199 boolean_t sync, int *rtvals)
1200 {
1201 int i;
1202 int n = 0;
1203
1204 GIANT_REQUIRED;
1205 if (count && m[0]->object != object) {
1206 panic("swap_pager_getpages: object mismatch %p/%p",
1207 object,
1208 m[0]->object
1209 );
1210 }
1211
1212 /*
1213 * Step 1
1214 *
1215 * Turn object into OBJT_SWAP
1216 * check for bogus sysops
1217 * force sync if not pageout process
1218 */
1219 if (object->type != OBJT_SWAP)
1220 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1221 VM_OBJECT_UNLOCK(object);
1222
1223 if (curproc != pageproc)
1224 sync = TRUE;
1225
1226 /*
1227 * Step 2
1228 *
1229 * Update nsw parameters from swap_async_max sysctl values.
1230 * Do not let the sysop crash the machine with bogus numbers.
1231 */
1232 mtx_lock(&pbuf_mtx);
1233 if (swap_async_max != nsw_wcount_async_max) {
1234 int n;
1235 int s;
1236
1237 /*
1238 * limit range
1239 */
1240 if ((n = swap_async_max) > nswbuf / 2)
1241 n = nswbuf / 2;
1242 if (n < 1)
1243 n = 1;
1244 swap_async_max = n;
1245
1246 /*
1247 * Adjust difference ( if possible ). If the current async
1248 * count is too low, we may not be able to make the adjustment
1249 * at this time.
1250 */
1251 s = splvm();
1252 n -= nsw_wcount_async_max;
1253 if (nsw_wcount_async + n >= 0) {
1254 nsw_wcount_async += n;
1255 nsw_wcount_async_max += n;
1256 wakeup(&nsw_wcount_async);
1257 }
1258 splx(s);
1259 }
1260 mtx_unlock(&pbuf_mtx);
1261
1262 /*
1263 * Step 3
1264 *
1265 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1266 * The page is left dirty until the pageout operation completes
1267 * successfully.
1268 */
1269 for (i = 0; i < count; i += n) {
1270 int s;
1271 int j;
1272 struct buf *bp;
1273 daddr_t blk;
1274
1275 /*
1276 * Maximum I/O size is limited by a number of factors.
1277 */
1278 n = min(BLIST_MAX_ALLOC, count - i);
1279 n = min(n, nsw_cluster_max);
1280
1281 s = splvm();
1282
1283 /*
1284 * Get biggest block of swap we can. If we fail, fall
1285 * back and try to allocate a smaller block. Don't go
1286 * overboard trying to allocate space if it would overly
1287 * fragment swap.
1288 */
1289 while (
1290 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1291 n > 4
1292 ) {
1293 n >>= 1;
1294 }
1295 if (blk == SWAPBLK_NONE) {
1296 for (j = 0; j < n; ++j)
1297 rtvals[i+j] = VM_PAGER_FAIL;
1298 splx(s);
1299 continue;
1300 }
1301
1302 /*
1303 * All I/O parameters have been satisfied, build the I/O
1304 * request and assign the swap space.
1305 */
1306 if (sync == TRUE) {
1307 bp = getpbuf(&nsw_wcount_sync);
1308 } else {
1309 bp = getpbuf(&nsw_wcount_async);
1310 bp->b_flags = B_ASYNC;
1311 }
1312 bp->b_flags |= B_PAGING;
1313 bp->b_iocmd = BIO_WRITE;
1314
1315 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1316
1317 bp->b_rcred = crhold(thread0.td_ucred);
1318 bp->b_wcred = crhold(thread0.td_ucred);
1319 bp->b_bcount = PAGE_SIZE * n;
1320 bp->b_bufsize = PAGE_SIZE * n;
1321 bp->b_blkno = blk;
1322
1323 VM_OBJECT_LOCK(object);
1324 for (j = 0; j < n; ++j) {
1325 vm_page_t mreq = m[i+j];
1326
1327 swp_pager_meta_build(
1328 mreq->object,
1329 mreq->pindex,
1330 blk + j
1331 );
1332 vm_page_dirty(mreq);
1333 rtvals[i+j] = VM_PAGER_OK;
1334
1335 vm_page_lock_queues();
1336 vm_page_flag_set(mreq, PG_SWAPINPROG);
1337 vm_page_unlock_queues();
1338 bp->b_pages[j] = mreq;
1339 }
1340 VM_OBJECT_UNLOCK(object);
1341 bp->b_npages = n;
1342 /*
1343 * Must set dirty range for NFS to work.
1344 */
1345 bp->b_dirtyoff = 0;
1346 bp->b_dirtyend = bp->b_bcount;
1347
1348 cnt.v_swapout++;
1349 cnt.v_swappgsout += bp->b_npages;
1350
1351 splx(s);
1352
1353 /*
1354 * asynchronous
1355 *
1356 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1357 */
1358 if (sync == FALSE) {
1359 bp->b_iodone = swp_pager_async_iodone;
1360 BUF_KERNPROC(bp);
1361 swp_pager_strategy(bp);
1362
1363 for (j = 0; j < n; ++j)
1364 rtvals[i+j] = VM_PAGER_PEND;
1365 /* restart outter loop */
1366 continue;
1367 }
1368
1369 /*
1370 * synchronous
1371 *
1372 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1373 */
1374 bp->b_iodone = swp_pager_sync_iodone;
1375 swp_pager_strategy(bp);
1376
1377 /*
1378 * Wait for the sync I/O to complete, then update rtvals.
1379 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1380 * our async completion routine at the end, thus avoiding a
1381 * double-free.
1382 */
1383 s = splbio();
1384 while ((bp->b_flags & B_DONE) == 0) {
1385 tsleep(bp, PVM, "swwrt", 0);
1386 }
1387 for (j = 0; j < n; ++j)
1388 rtvals[i+j] = VM_PAGER_PEND;
1389 /*
1390 * Now that we are through with the bp, we can call the
1391 * normal async completion, which frees everything up.
1392 */
1393 swp_pager_async_iodone(bp);
1394 splx(s);
1395 }
1396 VM_OBJECT_LOCK(object);
1397 }
1398
1399 /*
1400 * swap_pager_sync_iodone:
1401 *
1402 * Completion routine for synchronous reads and writes from/to swap.
1403 * We just mark the bp is complete and wake up anyone waiting on it.
1404 *
1405 * This routine may not block. This routine is called at splbio() or better.
1406 */
1407 static void
1408 swp_pager_sync_iodone(struct buf *bp)
1409 {
1410 bp->b_flags |= B_DONE;
1411 bp->b_flags &= ~B_ASYNC;
1412 wakeup(bp);
1413 }
1414
1415 /*
1416 * swp_pager_async_iodone:
1417 *
1418 * Completion routine for asynchronous reads and writes from/to swap.
1419 * Also called manually by synchronous code to finish up a bp.
1420 *
1421 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1422 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1423 * unbusy all pages except the 'main' request page. For WRITE
1424 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1425 * because we marked them all VM_PAGER_PEND on return from putpages ).
1426 *
1427 * This routine may not block.
1428 * This routine is called at splbio() or better
1429 *
1430 * We up ourselves to splvm() as required for various vm_page related
1431 * calls.
1432 */
1433 static void
1434 swp_pager_async_iodone(struct buf *bp)
1435 {
1436 int s;
1437 int i;
1438 vm_object_t object = NULL;
1439
1440 GIANT_REQUIRED;
1441 bp->b_flags |= B_DONE;
1442
1443 /*
1444 * report error
1445 */
1446 if (bp->b_ioflags & BIO_ERROR) {
1447 printf(
1448 "swap_pager: I/O error - %s failed; blkno %ld,"
1449 "size %ld, error %d\n",
1450 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1451 (long)bp->b_blkno,
1452 (long)bp->b_bcount,
1453 bp->b_error
1454 );
1455 }
1456
1457 /*
1458 * set object, raise to splvm().
1459 */
1460 s = splvm();
1461
1462 /*
1463 * remove the mapping for kernel virtual
1464 */
1465 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1466
1467 if (bp->b_npages) {
1468 object = bp->b_pages[0]->object;
1469 VM_OBJECT_LOCK(object);
1470 }
1471 vm_page_lock_queues();
1472 /*
1473 * cleanup pages. If an error occurs writing to swap, we are in
1474 * very serious trouble. If it happens to be a disk error, though,
1475 * we may be able to recover by reassigning the swap later on. So
1476 * in this case we remove the m->swapblk assignment for the page
1477 * but do not free it in the rlist. The errornous block(s) are thus
1478 * never reallocated as swap. Redirty the page and continue.
1479 */
1480 for (i = 0; i < bp->b_npages; ++i) {
1481 vm_page_t m = bp->b_pages[i];
1482
1483 vm_page_flag_clear(m, PG_SWAPINPROG);
1484
1485 if (bp->b_ioflags & BIO_ERROR) {
1486 /*
1487 * If an error occurs I'd love to throw the swapblk
1488 * away without freeing it back to swapspace, so it
1489 * can never be used again. But I can't from an
1490 * interrupt.
1491 */
1492 if (bp->b_iocmd == BIO_READ) {
1493 /*
1494 * When reading, reqpage needs to stay
1495 * locked for the parent, but all other
1496 * pages can be freed. We still want to
1497 * wakeup the parent waiting on the page,
1498 * though. ( also: pg_reqpage can be -1 and
1499 * not match anything ).
1500 *
1501 * We have to wake specifically requested pages
1502 * up too because we cleared PG_SWAPINPROG and
1503 * someone may be waiting for that.
1504 *
1505 * NOTE: for reads, m->dirty will probably
1506 * be overridden by the original caller of
1507 * getpages so don't play cute tricks here.
1508 *
1509 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1510 * AS THIS MESSES WITH object->memq, and it is
1511 * not legal to mess with object->memq from an
1512 * interrupt.
1513 */
1514 m->valid = 0;
1515 vm_page_flag_clear(m, PG_ZERO);
1516 if (i != bp->b_pager.pg_reqpage)
1517 vm_page_free(m);
1518 else
1519 vm_page_flash(m);
1520 /*
1521 * If i == bp->b_pager.pg_reqpage, do not wake
1522 * the page up. The caller needs to.
1523 */
1524 } else {
1525 /*
1526 * If a write error occurs, reactivate page
1527 * so it doesn't clog the inactive list,
1528 * then finish the I/O.
1529 */
1530 vm_page_dirty(m);
1531 vm_page_activate(m);
1532 vm_page_io_finish(m);
1533 }
1534 } else if (bp->b_iocmd == BIO_READ) {
1535 /*
1536 * For read success, clear dirty bits. Nobody should
1537 * have this page mapped but don't take any chances,
1538 * make sure the pmap modify bits are also cleared.
1539 *
1540 * NOTE: for reads, m->dirty will probably be
1541 * overridden by the original caller of getpages so
1542 * we cannot set them in order to free the underlying
1543 * swap in a low-swap situation. I don't think we'd
1544 * want to do that anyway, but it was an optimization
1545 * that existed in the old swapper for a time before
1546 * it got ripped out due to precisely this problem.
1547 *
1548 * clear PG_ZERO in page.
1549 *
1550 * If not the requested page then deactivate it.
1551 *
1552 * Note that the requested page, reqpage, is left
1553 * busied, but we still have to wake it up. The
1554 * other pages are released (unbusied) by
1555 * vm_page_wakeup(). We do not set reqpage's
1556 * valid bits here, it is up to the caller.
1557 */
1558 pmap_clear_modify(m);
1559 m->valid = VM_PAGE_BITS_ALL;
1560 vm_page_undirty(m);
1561 vm_page_flag_clear(m, PG_ZERO);
1562
1563 /*
1564 * We have to wake specifically requested pages
1565 * up too because we cleared PG_SWAPINPROG and
1566 * could be waiting for it in getpages. However,
1567 * be sure to not unbusy getpages specifically
1568 * requested page - getpages expects it to be
1569 * left busy.
1570 */
1571 if (i != bp->b_pager.pg_reqpage) {
1572 vm_page_deactivate(m);
1573 vm_page_wakeup(m);
1574 } else {
1575 vm_page_flash(m);
1576 }
1577 } else {
1578 /*
1579 * For write success, clear the modify and dirty
1580 * status, then finish the I/O ( which decrements the
1581 * busy count and possibly wakes waiter's up ).
1582 */
1583 pmap_clear_modify(m);
1584 vm_page_undirty(m);
1585 vm_page_io_finish(m);
1586 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1587 pmap_page_protect(m, VM_PROT_READ);
1588 }
1589 }
1590 vm_page_unlock_queues();
1591
1592 /*
1593 * adjust pip. NOTE: the original parent may still have its own
1594 * pip refs on the object.
1595 */
1596 if (object != NULL) {
1597 vm_object_pip_wakeupn(object, bp->b_npages);
1598 VM_OBJECT_UNLOCK(object);
1599 }
1600
1601 /*
1602 * release the physical I/O buffer
1603 */
1604 relpbuf(
1605 bp,
1606 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1607 ((bp->b_flags & B_ASYNC) ?
1608 &nsw_wcount_async :
1609 &nsw_wcount_sync
1610 )
1611 )
1612 );
1613 splx(s);
1614 }
1615
1616 /*
1617 * swap_pager_isswapped:
1618 *
1619 * Return 1 if at least one page in the given object is paged
1620 * out to the given swap device.
1621 *
1622 * This routine may not block.
1623 */
1624 int
1625 swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
1626 {
1627 daddr_t index = 0;
1628 int bcount;
1629 int i;
1630
1631 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1632 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) {
1633 struct swblock *swap;
1634
1635 mtx_lock(&swhash_mtx);
1636 if ((swap = *swp_pager_hash(object, index)) != NULL) {
1637 for (i = 0; i < SWAP_META_PAGES; ++i) {
1638 daddr_t v = swap->swb_pages[i];
1639 if (v == SWAPBLK_NONE)
1640 continue;
1641 if (swp_pager_find_dev(v) == sp) {
1642 mtx_unlock(&swhash_mtx);
1643 return 1;
1644 }
1645 }
1646 }
1647 mtx_unlock(&swhash_mtx);
1648 index += SWAP_META_PAGES;
1649 if (index > 0x20000000)
1650 panic("swap_pager_isswapped: failed to locate all swap meta blocks");
1651 }
1652 return 0;
1653 }
1654
1655 /*
1656 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
1657 *
1658 * This routine dissociates the page at the given index within a
1659 * swap block from its backing store, paging it in if necessary.
1660 * If the page is paged in, it is placed in the inactive queue,
1661 * since it had its backing store ripped out from under it.
1662 * We also attempt to swap in all other pages in the swap block,
1663 * we only guarantee that the one at the specified index is
1664 * paged in.
1665 *
1666 * XXX - The code to page the whole block in doesn't work, so we
1667 * revert to the one-by-one behavior for now. Sigh.
1668 */
1669 static __inline void
1670 swp_pager_force_pagein(struct swblock *swap, int idx)
1671 {
1672 vm_object_t object;
1673 vm_page_t m;
1674 vm_pindex_t pindex;
1675
1676 object = swap->swb_object;
1677 pindex = swap->swb_index;
1678 mtx_unlock(&swhash_mtx);
1679
1680 VM_OBJECT_LOCK(object);
1681 vm_object_pip_add(object, 1);
1682 m = vm_page_grab(object, pindex + idx, VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
1683 if (m->valid == VM_PAGE_BITS_ALL) {
1684 vm_object_pip_subtract(object, 1);
1685 vm_page_lock_queues();
1686 vm_page_activate(m);
1687 vm_page_dirty(m);
1688 vm_page_wakeup(m);
1689 vm_page_unlock_queues();
1690 vm_pager_page_unswapped(m);
1691 VM_OBJECT_UNLOCK(object);
1692 return;
1693 }
1694
1695 if (swap_pager_getpages(object, &m, 1, 0) !=
1696 VM_PAGER_OK)
1697 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
1698 vm_object_pip_subtract(object, 1);
1699 vm_page_lock_queues();
1700 vm_page_dirty(m);
1701 vm_page_dontneed(m);
1702 vm_page_wakeup(m);
1703 vm_page_unlock_queues();
1704 vm_pager_page_unswapped(m);
1705 VM_OBJECT_UNLOCK(object);
1706 }
1707
1708
1709 /*
1710 * swap_pager_swapoff:
1711 *
1712 * Page in all of the pages that have been paged out to the
1713 * given device. The corresponding blocks in the bitmap must be
1714 * marked as allocated and the device must be flagged SW_CLOSING.
1715 * There may be no processes swapped out to the device.
1716 *
1717 * The sw_used parameter points to the field in the swdev structure
1718 * that contains a count of the number of blocks still allocated
1719 * on the device. If we encounter objects with a nonzero pip count
1720 * in our scan, we use this number to determine if we're really done.
1721 *
1722 * This routine may block.
1723 */
1724 static void
1725 swap_pager_swapoff(struct swdevt *sp, int *sw_used)
1726 {
1727 struct swblock **pswap;
1728 struct swblock *swap;
1729 vm_object_t waitobj;
1730 daddr_t v;
1731 int i, j;
1732
1733 GIANT_REQUIRED;
1734
1735 full_rescan:
1736 waitobj = NULL;
1737 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */
1738 restart:
1739 pswap = &swhash[i];
1740 mtx_lock(&swhash_mtx);
1741 while ((swap = *pswap) != NULL) {
1742 for (j = 0; j < SWAP_META_PAGES; ++j) {
1743 v = swap->swb_pages[j];
1744 if (v != SWAPBLK_NONE &&
1745 swp_pager_find_dev(v) == sp)
1746 break;
1747 }
1748 if (j < SWAP_META_PAGES) {
1749 swp_pager_force_pagein(swap, j);
1750 goto restart;
1751 } else if (swap->swb_object->paging_in_progress) {
1752 if (!waitobj)
1753 waitobj = swap->swb_object;
1754 }
1755 pswap = &swap->swb_hnext;
1756 }
1757 mtx_unlock(&swhash_mtx);
1758 }
1759 if (waitobj && *sw_used) {
1760 /*
1761 * We wait on an arbitrary object to clock our rescans
1762 * to the rate of paging completion.
1763 */
1764 VM_OBJECT_LOCK(waitobj);
1765 vm_object_pip_wait(waitobj, "swpoff");
1766 VM_OBJECT_UNLOCK(waitobj);
1767 goto full_rescan;
1768 }
1769 if (*sw_used)
1770 panic("swapoff: failed to locate %d swap blocks", *sw_used);
1771 }
1772
1773 /************************************************************************
1774 * SWAP META DATA *
1775 ************************************************************************
1776 *
1777 * These routines manipulate the swap metadata stored in the
1778 * OBJT_SWAP object. All swp_*() routines must be called at
1779 * splvm() because swap can be freed up by the low level vm_page
1780 * code which might be called from interrupts beyond what splbio() covers.
1781 *
1782 * Swap metadata is implemented with a global hash and not directly
1783 * linked into the object. Instead the object simply contains
1784 * appropriate tracking counters.
1785 */
1786
1787 /*
1788 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1789 *
1790 * We first convert the object to a swap object if it is a default
1791 * object.
1792 *
1793 * The specified swapblk is added to the object's swap metadata. If
1794 * the swapblk is not valid, it is freed instead. Any previously
1795 * assigned swapblk is freed.
1796 *
1797 * This routine must be called at splvm(), except when used to convert
1798 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1799 */
1800 static void
1801 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1802 {
1803 struct swblock *swap;
1804 struct swblock **pswap;
1805 int idx;
1806
1807 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1808 /*
1809 * Convert default object to swap object if necessary
1810 */
1811 if (object->type != OBJT_SWAP) {
1812 object->type = OBJT_SWAP;
1813 object->un_pager.swp.swp_bcount = 0;
1814
1815 mtx_lock(&sw_alloc_mtx);
1816 if (object->handle != NULL) {
1817 TAILQ_INSERT_TAIL(
1818 NOBJLIST(object->handle),
1819 object,
1820 pager_object_list
1821 );
1822 } else {
1823 TAILQ_INSERT_TAIL(
1824 &swap_pager_un_object_list,
1825 object,
1826 pager_object_list
1827 );
1828 }
1829 mtx_unlock(&sw_alloc_mtx);
1830 }
1831
1832 /*
1833 * Locate hash entry. If not found create, but if we aren't adding
1834 * anything just return. If we run out of space in the map we wait
1835 * and, since the hash table may have changed, retry.
1836 */
1837 retry:
1838 mtx_lock(&swhash_mtx);
1839 pswap = swp_pager_hash(object, pindex);
1840
1841 if ((swap = *pswap) == NULL) {
1842 int i;
1843
1844 if (swapblk == SWAPBLK_NONE)
1845 goto done;
1846
1847 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
1848 if (swap == NULL) {
1849 mtx_unlock(&swhash_mtx);
1850 VM_OBJECT_UNLOCK(object);
1851 VM_WAIT;
1852 VM_OBJECT_LOCK(object);
1853 goto retry;
1854 }
1855
1856 swap->swb_hnext = NULL;
1857 swap->swb_object = object;
1858 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
1859 swap->swb_count = 0;
1860
1861 ++object->un_pager.swp.swp_bcount;
1862
1863 for (i = 0; i < SWAP_META_PAGES; ++i)
1864 swap->swb_pages[i] = SWAPBLK_NONE;
1865 }
1866
1867 /*
1868 * Delete prior contents of metadata
1869 */
1870 idx = pindex & SWAP_META_MASK;
1871
1872 if (swap->swb_pages[idx] != SWAPBLK_NONE) {
1873 swp_pager_freeswapspace(swap->swb_pages[idx], 1);
1874 --swap->swb_count;
1875 }
1876
1877 /*
1878 * Enter block into metadata
1879 */
1880 swap->swb_pages[idx] = swapblk;
1881 if (swapblk != SWAPBLK_NONE)
1882 ++swap->swb_count;
1883 done:
1884 mtx_unlock(&swhash_mtx);
1885 }
1886
1887 /*
1888 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1889 *
1890 * The requested range of blocks is freed, with any associated swap
1891 * returned to the swap bitmap.
1892 *
1893 * This routine will free swap metadata structures as they are cleaned
1894 * out. This routine does *NOT* operate on swap metadata associated
1895 * with resident pages.
1896 *
1897 * This routine must be called at splvm()
1898 */
1899 static void
1900 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1901 {
1902
1903 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1904 if (object->type != OBJT_SWAP)
1905 return;
1906
1907 while (count > 0) {
1908 struct swblock **pswap;
1909 struct swblock *swap;
1910
1911 mtx_lock(&swhash_mtx);
1912 pswap = swp_pager_hash(object, index);
1913
1914 if ((swap = *pswap) != NULL) {
1915 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1916
1917 if (v != SWAPBLK_NONE) {
1918 swp_pager_freeswapspace(v, 1);
1919 swap->swb_pages[index & SWAP_META_MASK] =
1920 SWAPBLK_NONE;
1921 if (--swap->swb_count == 0) {
1922 *pswap = swap->swb_hnext;
1923 uma_zfree(swap_zone, swap);
1924 --object->un_pager.swp.swp_bcount;
1925 }
1926 }
1927 --count;
1928 ++index;
1929 } else {
1930 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1931 count -= n;
1932 index += n;
1933 }
1934 mtx_unlock(&swhash_mtx);
1935 }
1936 }
1937
1938 /*
1939 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1940 *
1941 * This routine locates and destroys all swap metadata associated with
1942 * an object.
1943 *
1944 * This routine must be called at splvm()
1945 */
1946 static void
1947 swp_pager_meta_free_all(vm_object_t object)
1948 {
1949 daddr_t index = 0;
1950
1951 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1952 if (object->type != OBJT_SWAP)
1953 return;
1954
1955 while (object->un_pager.swp.swp_bcount) {
1956 struct swblock **pswap;
1957 struct swblock *swap;
1958
1959 mtx_lock(&swhash_mtx);
1960 pswap = swp_pager_hash(object, index);
1961 if ((swap = *pswap) != NULL) {
1962 int i;
1963
1964 for (i = 0; i < SWAP_META_PAGES; ++i) {
1965 daddr_t v = swap->swb_pages[i];
1966 if (v != SWAPBLK_NONE) {
1967 --swap->swb_count;
1968 swp_pager_freeswapspace(v, 1);
1969 }
1970 }
1971 if (swap->swb_count != 0)
1972 panic("swap_pager_meta_free_all: swb_count != 0");
1973 *pswap = swap->swb_hnext;
1974 uma_zfree(swap_zone, swap);
1975 --object->un_pager.swp.swp_bcount;
1976 }
1977 mtx_unlock(&swhash_mtx);
1978 index += SWAP_META_PAGES;
1979 if (index > 0x20000000)
1980 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1981 }
1982 }
1983
1984 /*
1985 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1986 *
1987 * This routine is capable of looking up, popping, or freeing
1988 * swapblk assignments in the swap meta data or in the vm_page_t.
1989 * The routine typically returns the swapblk being looked-up, or popped,
1990 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1991 * was invalid. This routine will automatically free any invalid
1992 * meta-data swapblks.
1993 *
1994 * It is not possible to store invalid swapblks in the swap meta data
1995 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1996 *
1997 * When acting on a busy resident page and paging is in progress, we
1998 * have to wait until paging is complete but otherwise can act on the
1999 * busy page.
2000 *
2001 * This routine must be called at splvm().
2002 *
2003 * SWM_FREE remove and free swap block from metadata
2004 * SWM_POP remove from meta data but do not free.. pop it out
2005 */
2006 static daddr_t
2007 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
2008 {
2009 struct swblock **pswap;
2010 struct swblock *swap;
2011 daddr_t r1;
2012 int idx;
2013
2014 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2015 /*
2016 * The meta data only exists of the object is OBJT_SWAP
2017 * and even then might not be allocated yet.
2018 */
2019 if (object->type != OBJT_SWAP)
2020 return (SWAPBLK_NONE);
2021
2022 r1 = SWAPBLK_NONE;
2023 mtx_lock(&swhash_mtx);
2024 pswap = swp_pager_hash(object, pindex);
2025
2026 if ((swap = *pswap) != NULL) {
2027 idx = pindex & SWAP_META_MASK;
2028 r1 = swap->swb_pages[idx];
2029
2030 if (r1 != SWAPBLK_NONE) {
2031 if (flags & SWM_FREE) {
2032 swp_pager_freeswapspace(r1, 1);
2033 r1 = SWAPBLK_NONE;
2034 }
2035 if (flags & (SWM_FREE|SWM_POP)) {
2036 swap->swb_pages[idx] = SWAPBLK_NONE;
2037 if (--swap->swb_count == 0) {
2038 *pswap = swap->swb_hnext;
2039 uma_zfree(swap_zone, swap);
2040 --object->un_pager.swp.swp_bcount;
2041 }
2042 }
2043 }
2044 }
2045 mtx_unlock(&swhash_mtx);
2046 return (r1);
2047 }
2048
2049 /*
2050 * System call swapon(name) enables swapping on device name,
2051 * which must be in the swdevsw. Return EBUSY
2052 * if already swapping on this device.
2053 */
2054 #ifndef _SYS_SYSPROTO_H_
2055 struct swapon_args {
2056 char *name;
2057 };
2058 #endif
2059
2060 /*
2061 * MPSAFE
2062 */
2063 /* ARGSUSED */
2064 int
2065 swapon(struct thread *td, struct swapon_args *uap)
2066 {
2067 struct vattr attr;
2068 struct vnode *vp;
2069 struct nameidata nd;
2070 int error;
2071
2072 mtx_lock(&Giant);
2073 error = suser(td);
2074 if (error)
2075 goto done2;
2076
2077 while (swdev_syscall_active)
2078 tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0);
2079 swdev_syscall_active = 1;
2080
2081 /*
2082 * Swap metadata may not fit in the KVM if we have physical
2083 * memory of >1GB.
2084 */
2085 if (swap_zone == NULL) {
2086 error = ENOMEM;
2087 goto done;
2088 }
2089
2090 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td);
2091 error = namei(&nd);
2092 if (error)
2093 goto done;
2094
2095 NDFREE(&nd, NDF_ONLY_PNBUF);
2096 vp = nd.ni_vp;
2097
2098 if (vn_isdisk(vp, &error)) {
2099 error = swapongeom(td, vp);
2100 } else if (vp->v_type == VREG &&
2101 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2102 (error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) == 0) {
2103 /*
2104 * Allow direct swapping to NFS regular files in the same
2105 * way that nfs_mountroot() sets up diskless swapping.
2106 */
2107 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2108 }
2109
2110 if (error)
2111 vrele(vp);
2112 done:
2113 swdev_syscall_active = 0;
2114 wakeup_one(&swdev_syscall_active);
2115 done2:
2116 mtx_unlock(&Giant);
2117 return (error);
2118 }
2119
2120 static void
2121 swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, udev_t udev)
2122 {
2123 struct swdevt *sp, *tsp;
2124 swblk_t dvbase;
2125 u_long mblocks;
2126
2127 /*
2128 * If we go beyond this, we get overflows in the radix
2129 * tree bitmap code.
2130 */
2131 mblocks = 0x40000000 / BLIST_META_RADIX;
2132 if (nblks > mblocks) {
2133 printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n",
2134 mblocks);
2135 nblks = mblocks;
2136 }
2137 /*
2138 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2139 * First chop nblks off to page-align it, then convert.
2140 *
2141 * sw->sw_nblks is in page-sized chunks now too.
2142 */
2143 nblks &= ~(ctodb(1) - 1);
2144 nblks = dbtoc(nblks);
2145
2146 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2147 sp->sw_vp = vp;
2148 sp->sw_id = id;
2149 sp->sw_udev = udev;
2150 sp->sw_flags = 0;
2151 sp->sw_nblks = nblks;
2152 sp->sw_used = 0;
2153 sp->sw_strategy = strategy;
2154 sp->sw_close = close;
2155
2156 sp->sw_blist = blist_create(nblks);
2157 /*
2158 * Do not free the first two block in order to avoid overwriting
2159 * any bsd label at the front of the partition
2160 */
2161 blist_free(sp->sw_blist, 2, nblks - 2);
2162
2163 dvbase = 0;
2164 mtx_lock(&sw_dev_mtx);
2165 TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2166 if (tsp->sw_end >= dvbase) {
2167 /*
2168 * We put one uncovered page between the devices
2169 * in order to definitively prevent any cross-device
2170 * I/O requests
2171 */
2172 dvbase = tsp->sw_end + 1;
2173 }
2174 }
2175 sp->sw_first = dvbase;
2176 sp->sw_end = dvbase + nblks;
2177 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2178 nswapdev++;
2179 swap_pager_avail += nblks;
2180 swp_sizecheck();
2181 mtx_unlock(&sw_dev_mtx);
2182 }
2183
2184 /*
2185 * SYSCALL: swapoff(devname)
2186 *
2187 * Disable swapping on the given device.
2188 *
2189 * XXX: Badly designed system call: it should use a device index
2190 * rather than filename as specification. We keep sw_vp around
2191 * only to make this work.
2192 */
2193 #ifndef _SYS_SYSPROTO_H_
2194 struct swapoff_args {
2195 char *name;
2196 };
2197 #endif
2198
2199 /*
2200 * MPSAFE
2201 */
2202 /* ARGSUSED */
2203 int
2204 swapoff(struct thread *td, struct swapoff_args *uap)
2205 {
2206 struct vnode *vp;
2207 struct nameidata nd;
2208 struct swdevt *sp;
2209 u_long nblks, dvbase;
2210 int error;
2211
2212 mtx_lock(&Giant);
2213
2214 error = suser(td);
2215 if (error)
2216 goto done2;
2217
2218 while (swdev_syscall_active)
2219 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
2220 swdev_syscall_active = 1;
2221
2222 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td);
2223 error = namei(&nd);
2224 if (error)
2225 goto done;
2226 NDFREE(&nd, NDF_ONLY_PNBUF);
2227 vp = nd.ni_vp;
2228
2229 mtx_lock(&sw_dev_mtx);
2230 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2231 if (sp->sw_vp == vp)
2232 goto found;
2233 }
2234 mtx_unlock(&sw_dev_mtx);
2235 error = EINVAL;
2236 goto done;
2237 found:
2238 mtx_unlock(&sw_dev_mtx);
2239 #ifdef MAC
2240 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2241 error = mac_check_system_swapoff(td->td_ucred, vp);
2242 (void) VOP_UNLOCK(vp, 0, td);
2243 if (error != 0)
2244 goto done;
2245 #endif
2246
2247 nblks = sp->sw_nblks;
2248
2249 /*
2250 * We can turn off this swap device safely only if the
2251 * available virtual memory in the system will fit the amount
2252 * of data we will have to page back in, plus an epsilon so
2253 * the system doesn't become critically low on swap space.
2254 */
2255 if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
2256 nblks + nswap_lowat) {
2257 error = ENOMEM;
2258 goto done;
2259 }
2260
2261 /*
2262 * Prevent further allocations on this device.
2263 */
2264 mtx_lock(&sw_dev_mtx);
2265 sp->sw_flags |= SW_CLOSING;
2266 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) {
2267 swap_pager_avail -= blist_fill(sp->sw_blist,
2268 dvbase, dmmax);
2269 }
2270 mtx_unlock(&sw_dev_mtx);
2271
2272 /*
2273 * Page in the contents of the device and close it.
2274 */
2275 #ifndef NO_SWAPPING
2276 vm_proc_swapin_all(sp);
2277 #endif /* !NO_SWAPPING */
2278 swap_pager_swapoff(sp, &sp->sw_used);
2279
2280 sp->sw_close(td, sp);
2281 sp->sw_id = NULL;
2282 mtx_lock(&sw_dev_mtx);
2283 TAILQ_REMOVE(&swtailq, sp, sw_list);
2284 nswapdev--;
2285 if (swdevhd == sp)
2286 swdevhd = NULL;
2287 mtx_unlock(&sw_dev_mtx);
2288 blist_destroy(sp->sw_blist);
2289 free(sp, M_VMPGDATA);
2290
2291 done:
2292 swdev_syscall_active = 0;
2293 wakeup_one(&swdev_syscall_active);
2294 done2:
2295 mtx_unlock(&Giant);
2296 return (error);
2297 }
2298
2299 void
2300 swap_pager_status(int *total, int *used)
2301 {
2302 struct swdevt *sp;
2303
2304 *total = 0;
2305 *used = 0;
2306 mtx_lock(&sw_dev_mtx);
2307 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2308 *total += sp->sw_nblks;
2309 *used += sp->sw_used;
2310 }
2311 mtx_unlock(&sw_dev_mtx);
2312 }
2313
2314 static int
2315 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2316 {
2317 int *name = (int *)arg1;
2318 int error, n;
2319 struct xswdev xs;
2320 struct swdevt *sp;
2321
2322 if (arg2 != 1) /* name length */
2323 return (EINVAL);
2324
2325 n = 0;
2326 mtx_lock(&sw_dev_mtx);
2327 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2328 if (n == *name) {
2329 mtx_unlock(&sw_dev_mtx);
2330 xs.xsw_version = XSWDEV_VERSION;
2331 xs.xsw_dev = sp->sw_udev;
2332 xs.xsw_flags = sp->sw_flags;
2333 xs.xsw_nblks = sp->sw_nblks;
2334 xs.xsw_used = sp->sw_used;
2335
2336 error = SYSCTL_OUT(req, &xs, sizeof(xs));
2337 return (error);
2338 }
2339 n++;
2340 }
2341 mtx_unlock(&sw_dev_mtx);
2342 return (ENOENT);
2343 }
2344
2345 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2346 "Number of swap devices");
2347 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info,
2348 "Swap statistics by device");
2349
2350 /*
2351 * vmspace_swap_count() - count the approximate swap useage in pages for a
2352 * vmspace.
2353 *
2354 * The map must be locked.
2355 *
2356 * Swap useage is determined by taking the proportional swap used by
2357 * VM objects backing the VM map. To make up for fractional losses,
2358 * if the VM object has any swap use at all the associated map entries
2359 * count for at least 1 swap page.
2360 */
2361 int
2362 vmspace_swap_count(struct vmspace *vmspace)
2363 {
2364 vm_map_t map = &vmspace->vm_map;
2365 vm_map_entry_t cur;
2366 int count = 0;
2367
2368 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2369 vm_object_t object;
2370
2371 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2372 (object = cur->object.vm_object) != NULL) {
2373 VM_OBJECT_LOCK(object);
2374 if (object->type == OBJT_SWAP &&
2375 object->un_pager.swp.swp_bcount != 0) {
2376 int n = (cur->end - cur->start) / PAGE_SIZE;
2377
2378 count += object->un_pager.swp.swp_bcount *
2379 SWAP_META_PAGES * n / object->size + 1;
2380 }
2381 VM_OBJECT_UNLOCK(object);
2382 }
2383 }
2384 return (count);
2385 }
2386
2387 /*
2388 * GEOM backend
2389 *
2390 * Swapping onto disk devices.
2391 *
2392 */
2393
2394 static struct g_class g_swap_class = {
2395 .name = "SWAP",
2396 };
2397
2398 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2399
2400
2401 static void
2402 swapgeom_done(struct bio *bp2)
2403 {
2404 struct buf *bp;
2405
2406 bp = bp2->bio_caller2;
2407 if (bp2->bio_error)
2408 bp->b_ioflags |= BIO_ERROR;
2409 mtx_lock(&Giant);
2410 bufdone(bp);
2411 mtx_unlock(&Giant);
2412 g_destroy_bio(bp2);
2413 }
2414
2415 static void
2416 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2417 {
2418 struct bio *bio;
2419 struct g_consumer *cp;
2420
2421 cp = sp->sw_id;
2422 if (cp == NULL) {
2423 bp->b_error = ENXIO;
2424 bp->b_ioflags |= BIO_ERROR;
2425 bufdone(bp);
2426 return;
2427 }
2428 bio = g_clone_bio(&bp->b_io);
2429 bio->bio_caller2 = bp;
2430 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2431 bio->bio_length = bp->b_bcount;
2432 bio->bio_done = swapgeom_done;
2433 g_io_request(bio, cp);
2434 return;
2435 }
2436
2437 static void
2438 swapgeom_orphan(struct g_consumer *cp)
2439 {
2440 struct swdevt *sp;
2441
2442 mtx_lock(&sw_dev_mtx);
2443 TAILQ_FOREACH(sp, &swtailq, sw_list)
2444 if (sp->sw_id == cp)
2445 sp->sw_id = NULL;
2446 mtx_unlock(&sw_dev_mtx);
2447 }
2448
2449 static void
2450 swapgeom_close_ev(void *arg, int flags)
2451 {
2452 struct g_consumer *cp;
2453
2454 cp = arg;
2455 g_access_rel(cp, -1, -1, 0);
2456 g_detach(cp);
2457 g_destroy_consumer(cp);
2458 }
2459
2460 static void
2461 swapgeom_close(struct thread *td, struct swdevt *sw)
2462 {
2463
2464 /* XXX: direct call when Giant untangled */
2465 g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL);
2466 }
2467
2468
2469 struct swh0h0 {
2470 dev_t dev;
2471 struct vnode *vp;
2472 int error;
2473 };
2474
2475 static void
2476 swapongeom_ev(void *arg, int flags)
2477 {
2478 struct swh0h0 *swh;
2479 struct g_provider *pp;
2480 struct g_consumer *cp;
2481 static struct g_geom *gp;
2482 struct swdevt *sp;
2483 u_long nblks;
2484 int error;
2485
2486 swh = arg;
2487 swh->error = 0;
2488 pp = g_dev_getprovider(swh->dev);
2489 if (pp == NULL) {
2490 swh->error = ENODEV;
2491 return;
2492 }
2493 mtx_lock(&sw_dev_mtx);
2494 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2495 cp = sp->sw_id;
2496 if (cp != NULL && cp->provider == pp) {
2497 mtx_unlock(&sw_dev_mtx);
2498 swh->error = EBUSY;
2499 return;
2500 }
2501 }
2502 mtx_unlock(&sw_dev_mtx);
2503 if (gp == NULL) {
2504 gp = g_new_geomf(&g_swap_class, "swap", NULL);
2505 gp->orphan = swapgeom_orphan;
2506 }
2507 cp = g_new_consumer(gp);
2508 g_attach(cp, pp);
2509 /*
2510 * XXX: Everytime you think you can improve the margin for
2511 * footshooting, somebody depends on the ability to do so:
2512 * savecore(8) wants to write to our swapdev so we cannot
2513 * set an exclusive count :-(
2514 */
2515 error = g_access_rel(cp, 1, 1, 0);
2516 if (error) {
2517 g_detach(cp);
2518 g_destroy_consumer(cp);
2519 swh->error = error;
2520 return;
2521 }
2522 nblks = pp->mediasize / DEV_BSIZE;
2523 swaponsomething(swh->vp, cp, nblks, swapgeom_strategy,
2524 swapgeom_close, dev2udev(swh->dev));
2525 swh->error = 0;
2526 return;
2527 }
2528
2529 static int
2530 swapongeom(struct thread *td, struct vnode *vp)
2531 {
2532 int error;
2533 struct swh0h0 swh;
2534
2535 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2536
2537 swh.dev = vp->v_rdev;
2538 swh.vp = vp;
2539 swh.error = 0;
2540 /* XXX: direct call when Giant untangled */
2541 error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL);
2542 if (!error)
2543 error = swh.error;
2544 VOP_UNLOCK(vp, 0, td);
2545 return (error);
2546 }
2547
2548 /*
2549 * VNODE backend
2550 *
2551 * This is used mainly for network filesystem (read: probably only tested
2552 * with NFS) swapfiles.
2553 *
2554 */
2555
2556 static void
2557 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2558 {
2559 int s;
2560 struct vnode *vp, *vp2;
2561
2562 bp->b_dev = NODEV;
2563 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2564
2565 vp2 = sp->sw_id;
2566 vhold(vp2);
2567 s = splvm();
2568 if (bp->b_iocmd == BIO_WRITE) {
2569 vp = bp->b_vp;
2570 if (vp) {
2571 VI_LOCK(vp);
2572 vp->v_numoutput--;
2573 if ((vp->v_iflag & VI_BWAIT) && vp->v_numoutput <= 0) {
2574 vp->v_iflag &= ~VI_BWAIT;
2575 wakeup(&vp->v_numoutput);
2576 }
2577 VI_UNLOCK(vp);
2578 }
2579 VI_LOCK(vp2);
2580 vp2->v_numoutput++;
2581 VI_UNLOCK(vp2);
2582 }
2583 bp->b_vp = vp2;
2584 splx(s);
2585 bp->b_iooffset = dbtob(bp->b_blkno);
2586 VOP_STRATEGY(vp2, bp);
2587 return;
2588 }
2589
2590 static void
2591 swapdev_close(struct thread *td, struct swdevt *sp)
2592 {
2593
2594 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2595 vrele(sp->sw_vp);
2596 }
2597
2598
2599 static int
2600 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2601 {
2602 struct swdevt *sp;
2603 int error;
2604
2605 if (nblks == 0)
2606 return (ENXIO);
2607 mtx_lock(&sw_dev_mtx);
2608 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2609 if (sp->sw_id == vp) {
2610 mtx_unlock(&sw_dev_mtx);
2611 return (EBUSY);
2612 }
2613 }
2614 mtx_unlock(&sw_dev_mtx);
2615
2616 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2617 #ifdef MAC
2618 error = mac_check_system_swapon(td->td_ucred, vp);
2619 if (error == 0)
2620 #endif
2621 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, -1);
2622 (void) VOP_UNLOCK(vp, 0, td);
2623 if (error)
2624 return (error);
2625
2626 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2627 NOUDEV);
2628 return (0);
2629 }
Cache object: 04f2462c46ac598091d01707c3a723ec
|