FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c
1 /*-
2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * New Swap System
41 * Matthew Dillon
42 *
43 * Radix Bitmap 'blists'.
44 *
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
48 *
49 * Features:
50 *
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
53 * pages.
54 *
55 * - on the fly deallocation of swap
56 *
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
61 * or renamed.
62 *
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64 *
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
67 */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD: releng/11.0/sys/vm/swap_pager.c 303905 2016-08-10 12:11:11Z kib $");
71
72 #include "opt_swap.h"
73 #include "opt_vm.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/conf.h>
78 #include <sys/kernel.h>
79 #include <sys/priv.h>
80 #include <sys/proc.h>
81 #include <sys/bio.h>
82 #include <sys/buf.h>
83 #include <sys/disk.h>
84 #include <sys/fcntl.h>
85 #include <sys/mount.h>
86 #include <sys/namei.h>
87 #include <sys/vnode.h>
88 #include <sys/malloc.h>
89 #include <sys/racct.h>
90 #include <sys/resource.h>
91 #include <sys/resourcevar.h>
92 #include <sys/rwlock.h>
93 #include <sys/sysctl.h>
94 #include <sys/sysproto.h>
95 #include <sys/blist.h>
96 #include <sys/lock.h>
97 #include <sys/sx.h>
98 #include <sys/vmmeter.h>
99
100 #include <security/mac/mac_framework.h>
101
102 #include <vm/vm.h>
103 #include <vm/pmap.h>
104 #include <vm/vm_map.h>
105 #include <vm/vm_kern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_pager.h>
109 #include <vm/vm_pageout.h>
110 #include <vm/vm_param.h>
111 #include <vm/swap_pager.h>
112 #include <vm/vm_extern.h>
113 #include <vm/uma.h>
114
115 #include <geom/geom.h>
116
117 /*
118 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, 16
119 * or 32 pages per allocation.
120 * The 32-page limit is due to the radix code (kern/subr_blist.c).
121 */
122 #ifndef MAX_PAGEOUT_CLUSTER
123 #define MAX_PAGEOUT_CLUSTER 16
124 #endif
125
126 #if !defined(SWB_NPAGES)
127 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
128 #endif
129
130 /*
131 * The swblock structure maps an object and a small, fixed-size range
132 * of page indices to disk addresses within a swap area.
133 * The collection of these mappings is implemented as a hash table.
134 * Unused disk addresses within a swap area are allocated and managed
135 * using a blist.
136 */
137 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t))
138 #define SWAP_META_PAGES (SWB_NPAGES * 2)
139 #define SWAP_META_MASK (SWAP_META_PAGES - 1)
140
141 struct swblock {
142 struct swblock *swb_hnext;
143 vm_object_t swb_object;
144 vm_pindex_t swb_index;
145 int swb_count;
146 daddr_t swb_pages[SWAP_META_PAGES];
147 };
148
149 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
150 static struct mtx sw_dev_mtx;
151 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
152 static struct swdevt *swdevhd; /* Allocate from here next */
153 static int nswapdev; /* Number of swap devices */
154 int swap_pager_avail;
155 static struct sx swdev_syscall_lock; /* serialize swap(on|off) */
156
157 static vm_ooffset_t swap_total;
158 SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0,
159 "Total amount of available swap storage.");
160 static vm_ooffset_t swap_reserved;
161 SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0,
162 "Amount of swap storage needed to back all allocated anonymous memory.");
163 static int overcommit = 0;
164 SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0,
165 "Configure virtual memory overcommit behavior. See tuning(7) "
166 "for details.");
167 static unsigned long swzone;
168 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
169 "Actual size of swap metadata zone");
170 static unsigned long swap_maxpages;
171 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
172 "Maximum amount of swap supported");
173
174 /* bits from overcommit */
175 #define SWAP_RESERVE_FORCE_ON (1 << 0)
176 #define SWAP_RESERVE_RLIMIT_ON (1 << 1)
177 #define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2)
178
179 int
180 swap_reserve(vm_ooffset_t incr)
181 {
182
183 return (swap_reserve_by_cred(incr, curthread->td_ucred));
184 }
185
186 int
187 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
188 {
189 vm_ooffset_t r, s;
190 int res, error;
191 static int curfail;
192 static struct timeval lastfail;
193 struct uidinfo *uip;
194
195 uip = cred->cr_ruidinfo;
196
197 if (incr & PAGE_MASK)
198 panic("swap_reserve: & PAGE_MASK");
199
200 #ifdef RACCT
201 if (racct_enable) {
202 PROC_LOCK(curproc);
203 error = racct_add(curproc, RACCT_SWAP, incr);
204 PROC_UNLOCK(curproc);
205 if (error != 0)
206 return (0);
207 }
208 #endif
209
210 res = 0;
211 mtx_lock(&sw_dev_mtx);
212 r = swap_reserved + incr;
213 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
214 s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count;
215 s *= PAGE_SIZE;
216 } else
217 s = 0;
218 s += swap_total;
219 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
220 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
221 res = 1;
222 swap_reserved = r;
223 }
224 mtx_unlock(&sw_dev_mtx);
225
226 if (res) {
227 UIDINFO_VMSIZE_LOCK(uip);
228 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
229 uip->ui_vmsize + incr > lim_cur(curthread, RLIMIT_SWAP) &&
230 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT))
231 res = 0;
232 else
233 uip->ui_vmsize += incr;
234 UIDINFO_VMSIZE_UNLOCK(uip);
235 if (!res) {
236 mtx_lock(&sw_dev_mtx);
237 swap_reserved -= incr;
238 mtx_unlock(&sw_dev_mtx);
239 }
240 }
241 if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
242 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
243 uip->ui_uid, curproc->p_pid, incr);
244 }
245
246 #ifdef RACCT
247 if (!res) {
248 PROC_LOCK(curproc);
249 racct_sub(curproc, RACCT_SWAP, incr);
250 PROC_UNLOCK(curproc);
251 }
252 #endif
253
254 return (res);
255 }
256
257 void
258 swap_reserve_force(vm_ooffset_t incr)
259 {
260 struct uidinfo *uip;
261
262 mtx_lock(&sw_dev_mtx);
263 swap_reserved += incr;
264 mtx_unlock(&sw_dev_mtx);
265
266 #ifdef RACCT
267 PROC_LOCK(curproc);
268 racct_add_force(curproc, RACCT_SWAP, incr);
269 PROC_UNLOCK(curproc);
270 #endif
271
272 uip = curthread->td_ucred->cr_ruidinfo;
273 PROC_LOCK(curproc);
274 UIDINFO_VMSIZE_LOCK(uip);
275 uip->ui_vmsize += incr;
276 UIDINFO_VMSIZE_UNLOCK(uip);
277 PROC_UNLOCK(curproc);
278 }
279
280 void
281 swap_release(vm_ooffset_t decr)
282 {
283 struct ucred *cred;
284
285 PROC_LOCK(curproc);
286 cred = curthread->td_ucred;
287 swap_release_by_cred(decr, cred);
288 PROC_UNLOCK(curproc);
289 }
290
291 void
292 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
293 {
294 struct uidinfo *uip;
295
296 uip = cred->cr_ruidinfo;
297
298 if (decr & PAGE_MASK)
299 panic("swap_release: & PAGE_MASK");
300
301 mtx_lock(&sw_dev_mtx);
302 if (swap_reserved < decr)
303 panic("swap_reserved < decr");
304 swap_reserved -= decr;
305 mtx_unlock(&sw_dev_mtx);
306
307 UIDINFO_VMSIZE_LOCK(uip);
308 if (uip->ui_vmsize < decr)
309 printf("negative vmsize for uid = %d\n", uip->ui_uid);
310 uip->ui_vmsize -= decr;
311 UIDINFO_VMSIZE_UNLOCK(uip);
312
313 racct_sub_cred(cred, RACCT_SWAP, decr);
314 }
315
316 #define SWM_FREE 0x02 /* free, period */
317 #define SWM_POP 0x04 /* pop out */
318
319 int swap_pager_full = 2; /* swap space exhaustion (task killing) */
320 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
321 static int nsw_rcount; /* free read buffers */
322 static int nsw_wcount_sync; /* limit write buffers / synchronous */
323 static int nsw_wcount_async; /* limit write buffers / asynchronous */
324 static int nsw_wcount_async_max;/* assigned maximum */
325 static int nsw_cluster_max; /* maximum VOP I/O allowed */
326
327 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
328 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
329 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
330 "Maximum running async swap ops");
331
332 static struct swblock **swhash;
333 static int swhash_mask;
334 static struct mtx swhash_mtx;
335
336 static struct sx sw_alloc_sx;
337
338 /*
339 * "named" and "unnamed" anon region objects. Try to reduce the overhead
340 * of searching a named list by hashing it just a little.
341 */
342
343 #define NOBJLISTS 8
344
345 #define NOBJLIST(handle) \
346 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
347
348 static struct pagerlst swap_pager_object_list[NOBJLISTS];
349 static uma_zone_t swap_zone;
350
351 /*
352 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
353 * calls hooked from other parts of the VM system and do not appear here.
354 * (see vm/swap_pager.h).
355 */
356 static vm_object_t
357 swap_pager_alloc(void *handle, vm_ooffset_t size,
358 vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
359 static void swap_pager_dealloc(vm_object_t object);
360 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
361 int *);
362 static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
363 int *, pgo_getpages_iodone_t, void *);
364 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
365 static boolean_t
366 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
367 static void swap_pager_init(void);
368 static void swap_pager_unswapped(vm_page_t);
369 static void swap_pager_swapoff(struct swdevt *sp);
370
371 struct pagerops swappagerops = {
372 .pgo_init = swap_pager_init, /* early system initialization of pager */
373 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */
374 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
375 .pgo_getpages = swap_pager_getpages, /* pagein */
376 .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */
377 .pgo_putpages = swap_pager_putpages, /* pageout */
378 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */
379 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
380 };
381
382 /*
383 * dmmax is in page-sized chunks with the new swap system. It was
384 * dev-bsized chunks in the old. dmmax is always a power of 2.
385 *
386 * swap_*() routines are externally accessible. swp_*() routines are
387 * internal.
388 */
389 static int dmmax;
390 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
391 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
392
393 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &dmmax, 0,
394 "Maximum size of a swap block");
395
396 static void swp_sizecheck(void);
397 static void swp_pager_async_iodone(struct buf *bp);
398 static int swapongeom(struct vnode *);
399 static int swaponvp(struct thread *, struct vnode *, u_long);
400 static int swapoff_one(struct swdevt *sp, struct ucred *cred);
401
402 /*
403 * Swap bitmap functions
404 */
405 static void swp_pager_freeswapspace(daddr_t blk, int npages);
406 static daddr_t swp_pager_getswapspace(int npages);
407
408 /*
409 * Metadata functions
410 */
411 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index);
412 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
413 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
414 static void swp_pager_meta_free_all(vm_object_t);
415 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
416
417 /*
418 * SWP_SIZECHECK() - update swap_pager_full indication
419 *
420 * update the swap_pager_almost_full indication and warn when we are
421 * about to run out of swap space, using lowat/hiwat hysteresis.
422 *
423 * Clear swap_pager_full ( task killing ) indication when lowat is met.
424 *
425 * No restrictions on call
426 * This routine may not block.
427 */
428 static void
429 swp_sizecheck(void)
430 {
431
432 if (swap_pager_avail < nswap_lowat) {
433 if (swap_pager_almost_full == 0) {
434 printf("swap_pager: out of swap space\n");
435 swap_pager_almost_full = 1;
436 }
437 } else {
438 swap_pager_full = 0;
439 if (swap_pager_avail > nswap_hiwat)
440 swap_pager_almost_full = 0;
441 }
442 }
443
444 /*
445 * SWP_PAGER_HASH() - hash swap meta data
446 *
447 * This is an helper function which hashes the swapblk given
448 * the object and page index. It returns a pointer to a pointer
449 * to the object, or a pointer to a NULL pointer if it could not
450 * find a swapblk.
451 */
452 static struct swblock **
453 swp_pager_hash(vm_object_t object, vm_pindex_t index)
454 {
455 struct swblock **pswap;
456 struct swblock *swap;
457
458 index &= ~(vm_pindex_t)SWAP_META_MASK;
459 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
460 while ((swap = *pswap) != NULL) {
461 if (swap->swb_object == object &&
462 swap->swb_index == index
463 ) {
464 break;
465 }
466 pswap = &swap->swb_hnext;
467 }
468 return (pswap);
469 }
470
471 /*
472 * SWAP_PAGER_INIT() - initialize the swap pager!
473 *
474 * Expected to be started from system init. NOTE: This code is run
475 * before much else so be careful what you depend on. Most of the VM
476 * system has yet to be initialized at this point.
477 */
478 static void
479 swap_pager_init(void)
480 {
481 /*
482 * Initialize object lists
483 */
484 int i;
485
486 for (i = 0; i < NOBJLISTS; ++i)
487 TAILQ_INIT(&swap_pager_object_list[i]);
488 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
489 sx_init(&sw_alloc_sx, "swspsx");
490 sx_init(&swdev_syscall_lock, "swsysc");
491
492 /*
493 * Device Stripe, in PAGE_SIZE'd blocks
494 */
495 dmmax = SWB_NPAGES * 2;
496 }
497
498 /*
499 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
500 *
501 * Expected to be started from pageout process once, prior to entering
502 * its main loop.
503 */
504 void
505 swap_pager_swap_init(void)
506 {
507 unsigned long n, n2;
508
509 /*
510 * Number of in-transit swap bp operations. Don't
511 * exhaust the pbufs completely. Make sure we
512 * initialize workable values (0 will work for hysteresis
513 * but it isn't very efficient).
514 *
515 * The nsw_cluster_max is constrained by the bp->b_pages[]
516 * array (MAXPHYS/PAGE_SIZE) and our locally defined
517 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
518 * constrained by the swap device interleave stripe size.
519 *
520 * Currently we hardwire nsw_wcount_async to 4. This limit is
521 * designed to prevent other I/O from having high latencies due to
522 * our pageout I/O. The value 4 works well for one or two active swap
523 * devices but is probably a little low if you have more. Even so,
524 * a higher value would probably generate only a limited improvement
525 * with three or four active swap devices since the system does not
526 * typically have to pageout at extreme bandwidths. We will want
527 * at least 2 per swap devices, and 4 is a pretty good value if you
528 * have one NFS swap device due to the command/ack latency over NFS.
529 * So it all works out pretty well.
530 */
531 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
532
533 mtx_lock(&pbuf_mtx);
534 nsw_rcount = (nswbuf + 1) / 2;
535 nsw_wcount_sync = (nswbuf + 3) / 4;
536 nsw_wcount_async = 4;
537 nsw_wcount_async_max = nsw_wcount_async;
538 mtx_unlock(&pbuf_mtx);
539
540 /*
541 * Initialize our zone. Right now I'm just guessing on the number
542 * we need based on the number of pages in the system. Each swblock
543 * can hold 32 pages, so this is probably overkill. This reservation
544 * is typically limited to around 32MB by default.
545 */
546 n = vm_cnt.v_page_count / 2;
547 if (maxswzone && n > maxswzone / sizeof(struct swblock))
548 n = maxswzone / sizeof(struct swblock);
549 n2 = n;
550 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
551 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
552 if (swap_zone == NULL)
553 panic("failed to create swap_zone.");
554 do {
555 if (uma_zone_reserve_kva(swap_zone, n))
556 break;
557 /*
558 * if the allocation failed, try a zone two thirds the
559 * size of the previous attempt.
560 */
561 n -= ((n + 2) / 3);
562 } while (n > 0);
563 if (n2 != n)
564 printf("Swap zone entries reduced from %lu to %lu.\n", n2, n);
565 swap_maxpages = n * SWAP_META_PAGES;
566 swzone = n * sizeof(struct swblock);
567 n2 = n;
568
569 /*
570 * Initialize our meta-data hash table. The swapper does not need to
571 * be quite as efficient as the VM system, so we do not use an
572 * oversized hash table.
573 *
574 * n: size of hash table, must be power of 2
575 * swhash_mask: hash table index mask
576 */
577 for (n = 1; n < n2 / 8; n *= 2)
578 ;
579 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
580 swhash_mask = n - 1;
581 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF);
582 }
583
584 static vm_object_t
585 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
586 vm_ooffset_t offset)
587 {
588 vm_object_t object;
589
590 if (cred != NULL) {
591 if (!swap_reserve_by_cred(size, cred))
592 return (NULL);
593 crhold(cred);
594 }
595 object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
596 PAGE_MASK + size));
597 object->handle = handle;
598 if (cred != NULL) {
599 object->cred = cred;
600 object->charge = size;
601 }
602 object->un_pager.swp.swp_bcount = 0;
603 return (object);
604 }
605
606 /*
607 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
608 * its metadata structures.
609 *
610 * This routine is called from the mmap and fork code to create a new
611 * OBJT_SWAP object.
612 *
613 * This routine must ensure that no live duplicate is created for
614 * the named object request, which is protected against by
615 * holding the sw_alloc_sx lock in case handle != NULL.
616 */
617 static vm_object_t
618 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
619 vm_ooffset_t offset, struct ucred *cred)
620 {
621 vm_object_t object;
622
623 if (handle != NULL) {
624 /*
625 * Reference existing named region or allocate new one. There
626 * should not be a race here against swp_pager_meta_build()
627 * as called from vm_page_remove() in regards to the lookup
628 * of the handle.
629 */
630 sx_xlock(&sw_alloc_sx);
631 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
632 if (object == NULL) {
633 object = swap_pager_alloc_init(handle, cred, size,
634 offset);
635 if (object != NULL) {
636 TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
637 object, pager_object_list);
638 }
639 }
640 sx_xunlock(&sw_alloc_sx);
641 } else {
642 object = swap_pager_alloc_init(handle, cred, size, offset);
643 }
644 return (object);
645 }
646
647 /*
648 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
649 *
650 * The swap backing for the object is destroyed. The code is
651 * designed such that we can reinstantiate it later, but this
652 * routine is typically called only when the entire object is
653 * about to be destroyed.
654 *
655 * The object must be locked.
656 */
657 static void
658 swap_pager_dealloc(vm_object_t object)
659 {
660
661 VM_OBJECT_ASSERT_WLOCKED(object);
662 KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
663
664 /*
665 * Remove from list right away so lookups will fail if we block for
666 * pageout completion.
667 */
668 if (object->handle != NULL) {
669 VM_OBJECT_WUNLOCK(object);
670 sx_xlock(&sw_alloc_sx);
671 TAILQ_REMOVE(NOBJLIST(object->handle), object,
672 pager_object_list);
673 sx_xunlock(&sw_alloc_sx);
674 VM_OBJECT_WLOCK(object);
675 }
676
677 vm_object_pip_wait(object, "swpdea");
678
679 /*
680 * Free all remaining metadata. We only bother to free it from
681 * the swap meta data. We do not attempt to free swapblk's still
682 * associated with vm_page_t's for this object. We do not care
683 * if paging is still in progress on some objects.
684 */
685 swp_pager_meta_free_all(object);
686 object->handle = NULL;
687 object->type = OBJT_DEAD;
688 }
689
690 /************************************************************************
691 * SWAP PAGER BITMAP ROUTINES *
692 ************************************************************************/
693
694 /*
695 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
696 *
697 * Allocate swap for the requested number of pages. The starting
698 * swap block number (a page index) is returned or SWAPBLK_NONE
699 * if the allocation failed.
700 *
701 * Also has the side effect of advising that somebody made a mistake
702 * when they configured swap and didn't configure enough.
703 *
704 * This routine may not sleep.
705 *
706 * We allocate in round-robin fashion from the configured devices.
707 */
708 static daddr_t
709 swp_pager_getswapspace(int npages)
710 {
711 daddr_t blk;
712 struct swdevt *sp;
713 int i;
714
715 blk = SWAPBLK_NONE;
716 mtx_lock(&sw_dev_mtx);
717 sp = swdevhd;
718 for (i = 0; i < nswapdev; i++) {
719 if (sp == NULL)
720 sp = TAILQ_FIRST(&swtailq);
721 if (!(sp->sw_flags & SW_CLOSING)) {
722 blk = blist_alloc(sp->sw_blist, npages);
723 if (blk != SWAPBLK_NONE) {
724 blk += sp->sw_first;
725 sp->sw_used += npages;
726 swap_pager_avail -= npages;
727 swp_sizecheck();
728 swdevhd = TAILQ_NEXT(sp, sw_list);
729 goto done;
730 }
731 }
732 sp = TAILQ_NEXT(sp, sw_list);
733 }
734 if (swap_pager_full != 2) {
735 printf("swap_pager_getswapspace(%d): failed\n", npages);
736 swap_pager_full = 2;
737 swap_pager_almost_full = 1;
738 }
739 swdevhd = NULL;
740 done:
741 mtx_unlock(&sw_dev_mtx);
742 return (blk);
743 }
744
745 static int
746 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
747 {
748
749 return (blk >= sp->sw_first && blk < sp->sw_end);
750 }
751
752 static void
753 swp_pager_strategy(struct buf *bp)
754 {
755 struct swdevt *sp;
756
757 mtx_lock(&sw_dev_mtx);
758 TAILQ_FOREACH(sp, &swtailq, sw_list) {
759 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
760 mtx_unlock(&sw_dev_mtx);
761 if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
762 unmapped_buf_allowed) {
763 bp->b_data = unmapped_buf;
764 bp->b_offset = 0;
765 } else {
766 pmap_qenter((vm_offset_t)bp->b_data,
767 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
768 }
769 sp->sw_strategy(bp, sp);
770 return;
771 }
772 }
773 panic("Swapdev not found");
774 }
775
776
777 /*
778 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
779 *
780 * This routine returns the specified swap blocks back to the bitmap.
781 *
782 * This routine may not sleep.
783 */
784 static void
785 swp_pager_freeswapspace(daddr_t blk, int npages)
786 {
787 struct swdevt *sp;
788
789 mtx_lock(&sw_dev_mtx);
790 TAILQ_FOREACH(sp, &swtailq, sw_list) {
791 if (blk >= sp->sw_first && blk < sp->sw_end) {
792 sp->sw_used -= npages;
793 /*
794 * If we are attempting to stop swapping on
795 * this device, we don't want to mark any
796 * blocks free lest they be reused.
797 */
798 if ((sp->sw_flags & SW_CLOSING) == 0) {
799 blist_free(sp->sw_blist, blk - sp->sw_first,
800 npages);
801 swap_pager_avail += npages;
802 swp_sizecheck();
803 }
804 mtx_unlock(&sw_dev_mtx);
805 return;
806 }
807 }
808 panic("Swapdev not found");
809 }
810
811 /*
812 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
813 * range within an object.
814 *
815 * This is a globally accessible routine.
816 *
817 * This routine removes swapblk assignments from swap metadata.
818 *
819 * The external callers of this routine typically have already destroyed
820 * or renamed vm_page_t's associated with this range in the object so
821 * we should be ok.
822 *
823 * The object must be locked.
824 */
825 void
826 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
827 {
828
829 swp_pager_meta_free(object, start, size);
830 }
831
832 /*
833 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
834 *
835 * Assigns swap blocks to the specified range within the object. The
836 * swap blocks are not zeroed. Any previous swap assignment is destroyed.
837 *
838 * Returns 0 on success, -1 on failure.
839 */
840 int
841 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
842 {
843 int n = 0;
844 daddr_t blk = SWAPBLK_NONE;
845 vm_pindex_t beg = start; /* save start index */
846
847 VM_OBJECT_WLOCK(object);
848 while (size) {
849 if (n == 0) {
850 n = BLIST_MAX_ALLOC;
851 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
852 n >>= 1;
853 if (n == 0) {
854 swp_pager_meta_free(object, beg, start - beg);
855 VM_OBJECT_WUNLOCK(object);
856 return (-1);
857 }
858 }
859 }
860 swp_pager_meta_build(object, start, blk);
861 --size;
862 ++start;
863 ++blk;
864 --n;
865 }
866 swp_pager_meta_free(object, start, n);
867 VM_OBJECT_WUNLOCK(object);
868 return (0);
869 }
870
871 /*
872 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
873 * and destroy the source.
874 *
875 * Copy any valid swapblks from the source to the destination. In
876 * cases where both the source and destination have a valid swapblk,
877 * we keep the destination's.
878 *
879 * This routine is allowed to sleep. It may sleep allocating metadata
880 * indirectly through swp_pager_meta_build() or if paging is still in
881 * progress on the source.
882 *
883 * The source object contains no vm_page_t's (which is just as well)
884 *
885 * The source object is of type OBJT_SWAP.
886 *
887 * The source and destination objects must be locked.
888 * Both object locks may temporarily be released.
889 */
890 void
891 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
892 vm_pindex_t offset, int destroysource)
893 {
894 vm_pindex_t i;
895
896 VM_OBJECT_ASSERT_WLOCKED(srcobject);
897 VM_OBJECT_ASSERT_WLOCKED(dstobject);
898
899 /*
900 * If destroysource is set, we remove the source object from the
901 * swap_pager internal queue now.
902 */
903 if (destroysource && srcobject->handle != NULL) {
904 vm_object_pip_add(srcobject, 1);
905 VM_OBJECT_WUNLOCK(srcobject);
906 vm_object_pip_add(dstobject, 1);
907 VM_OBJECT_WUNLOCK(dstobject);
908 sx_xlock(&sw_alloc_sx);
909 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
910 pager_object_list);
911 sx_xunlock(&sw_alloc_sx);
912 VM_OBJECT_WLOCK(dstobject);
913 vm_object_pip_wakeup(dstobject);
914 VM_OBJECT_WLOCK(srcobject);
915 vm_object_pip_wakeup(srcobject);
916 }
917
918 /*
919 * transfer source to destination.
920 */
921 for (i = 0; i < dstobject->size; ++i) {
922 daddr_t dstaddr;
923
924 /*
925 * Locate (without changing) the swapblk on the destination,
926 * unless it is invalid in which case free it silently, or
927 * if the destination is a resident page, in which case the
928 * source is thrown away.
929 */
930 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
931
932 if (dstaddr == SWAPBLK_NONE) {
933 /*
934 * Destination has no swapblk and is not resident,
935 * copy source.
936 */
937 daddr_t srcaddr;
938
939 srcaddr = swp_pager_meta_ctl(
940 srcobject,
941 i + offset,
942 SWM_POP
943 );
944
945 if (srcaddr != SWAPBLK_NONE) {
946 /*
947 * swp_pager_meta_build() can sleep.
948 */
949 vm_object_pip_add(srcobject, 1);
950 VM_OBJECT_WUNLOCK(srcobject);
951 vm_object_pip_add(dstobject, 1);
952 swp_pager_meta_build(dstobject, i, srcaddr);
953 vm_object_pip_wakeup(dstobject);
954 VM_OBJECT_WLOCK(srcobject);
955 vm_object_pip_wakeup(srcobject);
956 }
957 } else {
958 /*
959 * Destination has valid swapblk or it is represented
960 * by a resident page. We destroy the sourceblock.
961 */
962
963 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
964 }
965 }
966
967 /*
968 * Free left over swap blocks in source.
969 *
970 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
971 * double-remove the object from the swap queues.
972 */
973 if (destroysource) {
974 swp_pager_meta_free_all(srcobject);
975 /*
976 * Reverting the type is not necessary, the caller is going
977 * to destroy srcobject directly, but I'm doing it here
978 * for consistency since we've removed the object from its
979 * queues.
980 */
981 srcobject->type = OBJT_DEFAULT;
982 }
983 }
984
985 /*
986 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
987 * the requested page.
988 *
989 * We determine whether good backing store exists for the requested
990 * page and return TRUE if it does, FALSE if it doesn't.
991 *
992 * If TRUE, we also try to determine how much valid, contiguous backing
993 * store exists before and after the requested page within a reasonable
994 * distance. We do not try to restrict it to the swap device stripe
995 * (that is handled in getpages/putpages). It probably isn't worth
996 * doing here.
997 */
998 static boolean_t
999 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after)
1000 {
1001 daddr_t blk0;
1002
1003 VM_OBJECT_ASSERT_LOCKED(object);
1004 /*
1005 * do we have good backing store at the requested index ?
1006 */
1007 blk0 = swp_pager_meta_ctl(object, pindex, 0);
1008
1009 if (blk0 == SWAPBLK_NONE) {
1010 if (before)
1011 *before = 0;
1012 if (after)
1013 *after = 0;
1014 return (FALSE);
1015 }
1016
1017 /*
1018 * find backwards-looking contiguous good backing store
1019 */
1020 if (before != NULL) {
1021 int i;
1022
1023 for (i = 1; i < (SWB_NPAGES/2); ++i) {
1024 daddr_t blk;
1025
1026 if (i > pindex)
1027 break;
1028 blk = swp_pager_meta_ctl(object, pindex - i, 0);
1029 if (blk != blk0 - i)
1030 break;
1031 }
1032 *before = (i - 1);
1033 }
1034
1035 /*
1036 * find forward-looking contiguous good backing store
1037 */
1038 if (after != NULL) {
1039 int i;
1040
1041 for (i = 1; i < (SWB_NPAGES/2); ++i) {
1042 daddr_t blk;
1043
1044 blk = swp_pager_meta_ctl(object, pindex + i, 0);
1045 if (blk != blk0 + i)
1046 break;
1047 }
1048 *after = (i - 1);
1049 }
1050 return (TRUE);
1051 }
1052
1053 /*
1054 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1055 *
1056 * This removes any associated swap backing store, whether valid or
1057 * not, from the page.
1058 *
1059 * This routine is typically called when a page is made dirty, at
1060 * which point any associated swap can be freed. MADV_FREE also
1061 * calls us in a special-case situation
1062 *
1063 * NOTE!!! If the page is clean and the swap was valid, the caller
1064 * should make the page dirty before calling this routine. This routine
1065 * does NOT change the m->dirty status of the page. Also: MADV_FREE
1066 * depends on it.
1067 *
1068 * This routine may not sleep.
1069 *
1070 * The object containing the page must be locked.
1071 */
1072 static void
1073 swap_pager_unswapped(vm_page_t m)
1074 {
1075
1076 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
1077 }
1078
1079 /*
1080 * SWAP_PAGER_GETPAGES() - bring pages in from swap
1081 *
1082 * Attempt to retrieve (m, count) pages from backing store, but make
1083 * sure we retrieve at least m[reqpage]. We try to load in as large
1084 * a chunk surrounding m[reqpage] as is contiguous in swap and which
1085 * belongs to the same object.
1086 *
1087 * The code is designed for asynchronous operation and
1088 * immediate-notification of 'reqpage' but tends not to be
1089 * used that way. Please do not optimize-out this algorithmic
1090 * feature, I intend to improve on it in the future.
1091 *
1092 * The parent has a single vm_object_pip_add() reference prior to
1093 * calling us and we should return with the same.
1094 *
1095 * The parent has BUSY'd the pages. We should return with 'm'
1096 * left busy, but the others adjusted.
1097 */
1098 static int
1099 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
1100 int *rahead)
1101 {
1102 struct buf *bp;
1103 daddr_t blk;
1104
1105 /*
1106 * Calculate range to retrieve. The pages have already been assigned
1107 * their swapblks. We require a *contiguous* range but we know it to
1108 * not span devices. If we do not supply it, bad things
1109 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1110 * loops are set up such that the case(s) are handled implicitly.
1111 *
1112 * The swp_*() calls must be made with the object locked.
1113 */
1114 blk = swp_pager_meta_ctl(m[0]->object, m[0]->pindex, 0);
1115
1116 if (blk == SWAPBLK_NONE)
1117 return (VM_PAGER_FAIL);
1118
1119 #ifdef INVARIANTS
1120 for (int i = 0; i < count; i++)
1121 KASSERT(blk + i ==
1122 swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0),
1123 ("%s: range is not contiguous", __func__));
1124 #endif
1125
1126 /*
1127 * Getpbuf() can sleep.
1128 */
1129 VM_OBJECT_WUNLOCK(object);
1130 /*
1131 * Get a swap buffer header to perform the IO
1132 */
1133 bp = getpbuf(&nsw_rcount);
1134 bp->b_flags |= B_PAGING;
1135
1136 bp->b_iocmd = BIO_READ;
1137 bp->b_iodone = swp_pager_async_iodone;
1138 bp->b_rcred = crhold(thread0.td_ucred);
1139 bp->b_wcred = crhold(thread0.td_ucred);
1140 bp->b_blkno = blk;
1141 bp->b_bcount = PAGE_SIZE * count;
1142 bp->b_bufsize = PAGE_SIZE * count;
1143 bp->b_npages = count;
1144
1145 VM_OBJECT_WLOCK(object);
1146 for (int i = 0; i < count; i++) {
1147 bp->b_pages[i] = m[i];
1148 m[i]->oflags |= VPO_SWAPINPROG;
1149 }
1150
1151 PCPU_INC(cnt.v_swapin);
1152 PCPU_ADD(cnt.v_swappgsin, bp->b_npages);
1153
1154 /*
1155 * We still hold the lock on mreq, and our automatic completion routine
1156 * does not remove it.
1157 */
1158 vm_object_pip_add(object, bp->b_npages);
1159 VM_OBJECT_WUNLOCK(object);
1160
1161 /*
1162 * perform the I/O. NOTE!!! bp cannot be considered valid after
1163 * this point because we automatically release it on completion.
1164 * Instead, we look at the one page we are interested in which we
1165 * still hold a lock on even through the I/O completion.
1166 *
1167 * The other pages in our m[] array are also released on completion,
1168 * so we cannot assume they are valid anymore either.
1169 *
1170 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1171 */
1172 BUF_KERNPROC(bp);
1173 swp_pager_strategy(bp);
1174
1175 /*
1176 * wait for the page we want to complete. VPO_SWAPINPROG is always
1177 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1178 * is set in the meta-data.
1179 */
1180 VM_OBJECT_WLOCK(object);
1181 while ((m[0]->oflags & VPO_SWAPINPROG) != 0) {
1182 m[0]->oflags |= VPO_SWAPSLEEP;
1183 PCPU_INC(cnt.v_intrans);
1184 if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
1185 "swread", hz * 20)) {
1186 printf(
1187 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1188 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1189 }
1190 }
1191
1192 /*
1193 * If we had an unrecoverable read error pages will not be valid.
1194 */
1195 for (int i = 0; i < count; i++)
1196 if (m[i]->valid != VM_PAGE_BITS_ALL)
1197 return (VM_PAGER_ERROR);
1198
1199 if (rbehind)
1200 *rbehind = 0;
1201 if (rahead)
1202 *rahead = 0;
1203
1204 return (VM_PAGER_OK);
1205
1206 /*
1207 * A final note: in a low swap situation, we cannot deallocate swap
1208 * and mark a page dirty here because the caller is likely to mark
1209 * the page clean when we return, causing the page to possibly revert
1210 * to all-zero's later.
1211 */
1212 }
1213
1214 /*
1215 * swap_pager_getpages_async():
1216 *
1217 * Right now this is emulation of asynchronous operation on top of
1218 * swap_pager_getpages().
1219 */
1220 static int
1221 swap_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
1222 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1223 {
1224 int r, error;
1225
1226 r = swap_pager_getpages(object, m, count, rbehind, rahead);
1227 VM_OBJECT_WUNLOCK(object);
1228 switch (r) {
1229 case VM_PAGER_OK:
1230 error = 0;
1231 break;
1232 case VM_PAGER_ERROR:
1233 error = EIO;
1234 break;
1235 case VM_PAGER_FAIL:
1236 error = EINVAL;
1237 break;
1238 default:
1239 panic("unhandled swap_pager_getpages() error %d", r);
1240 }
1241 (iodone)(arg, m, count, error);
1242 VM_OBJECT_WLOCK(object);
1243
1244 return (r);
1245 }
1246
1247 /*
1248 * swap_pager_putpages:
1249 *
1250 * Assign swap (if necessary) and initiate I/O on the specified pages.
1251 *
1252 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1253 * are automatically converted to SWAP objects.
1254 *
1255 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1256 * vm_page reservation system coupled with properly written VFS devices
1257 * should ensure that no low-memory deadlock occurs. This is an area
1258 * which needs work.
1259 *
1260 * The parent has N vm_object_pip_add() references prior to
1261 * calling us and will remove references for rtvals[] that are
1262 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1263 * completion.
1264 *
1265 * The parent has soft-busy'd the pages it passes us and will unbusy
1266 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1267 * We need to unbusy the rest on I/O completion.
1268 */
1269 static void
1270 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1271 int flags, int *rtvals)
1272 {
1273 int i, n;
1274 boolean_t sync;
1275
1276 if (count && m[0]->object != object) {
1277 panic("swap_pager_putpages: object mismatch %p/%p",
1278 object,
1279 m[0]->object
1280 );
1281 }
1282
1283 /*
1284 * Step 1
1285 *
1286 * Turn object into OBJT_SWAP
1287 * check for bogus sysops
1288 * force sync if not pageout process
1289 */
1290 if (object->type != OBJT_SWAP)
1291 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1292 VM_OBJECT_WUNLOCK(object);
1293
1294 n = 0;
1295 if (curproc != pageproc)
1296 sync = TRUE;
1297 else
1298 sync = (flags & VM_PAGER_PUT_SYNC) != 0;
1299
1300 /*
1301 * Step 2
1302 *
1303 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1304 * The page is left dirty until the pageout operation completes
1305 * successfully.
1306 */
1307 for (i = 0; i < count; i += n) {
1308 int j;
1309 struct buf *bp;
1310 daddr_t blk;
1311
1312 /*
1313 * Maximum I/O size is limited by a number of factors.
1314 */
1315 n = min(BLIST_MAX_ALLOC, count - i);
1316 n = min(n, nsw_cluster_max);
1317
1318 /*
1319 * Get biggest block of swap we can. If we fail, fall
1320 * back and try to allocate a smaller block. Don't go
1321 * overboard trying to allocate space if it would overly
1322 * fragment swap.
1323 */
1324 while (
1325 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1326 n > 4
1327 ) {
1328 n >>= 1;
1329 }
1330 if (blk == SWAPBLK_NONE) {
1331 for (j = 0; j < n; ++j)
1332 rtvals[i+j] = VM_PAGER_FAIL;
1333 continue;
1334 }
1335
1336 /*
1337 * All I/O parameters have been satisfied, build the I/O
1338 * request and assign the swap space.
1339 */
1340 if (sync == TRUE) {
1341 bp = getpbuf(&nsw_wcount_sync);
1342 } else {
1343 bp = getpbuf(&nsw_wcount_async);
1344 bp->b_flags = B_ASYNC;
1345 }
1346 bp->b_flags |= B_PAGING;
1347 bp->b_iocmd = BIO_WRITE;
1348
1349 bp->b_rcred = crhold(thread0.td_ucred);
1350 bp->b_wcred = crhold(thread0.td_ucred);
1351 bp->b_bcount = PAGE_SIZE * n;
1352 bp->b_bufsize = PAGE_SIZE * n;
1353 bp->b_blkno = blk;
1354
1355 VM_OBJECT_WLOCK(object);
1356 for (j = 0; j < n; ++j) {
1357 vm_page_t mreq = m[i+j];
1358
1359 swp_pager_meta_build(
1360 mreq->object,
1361 mreq->pindex,
1362 blk + j
1363 );
1364 vm_page_dirty(mreq);
1365 mreq->oflags |= VPO_SWAPINPROG;
1366 bp->b_pages[j] = mreq;
1367 }
1368 VM_OBJECT_WUNLOCK(object);
1369 bp->b_npages = n;
1370 /*
1371 * Must set dirty range for NFS to work.
1372 */
1373 bp->b_dirtyoff = 0;
1374 bp->b_dirtyend = bp->b_bcount;
1375
1376 PCPU_INC(cnt.v_swapout);
1377 PCPU_ADD(cnt.v_swappgsout, bp->b_npages);
1378
1379 /*
1380 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1381 * can call the async completion routine at the end of a
1382 * synchronous I/O operation. Otherwise, our caller would
1383 * perform duplicate unbusy and wakeup operations on the page
1384 * and object, respectively.
1385 */
1386 for (j = 0; j < n; j++)
1387 rtvals[i + j] = VM_PAGER_PEND;
1388
1389 /*
1390 * asynchronous
1391 *
1392 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1393 */
1394 if (sync == FALSE) {
1395 bp->b_iodone = swp_pager_async_iodone;
1396 BUF_KERNPROC(bp);
1397 swp_pager_strategy(bp);
1398 continue;
1399 }
1400
1401 /*
1402 * synchronous
1403 *
1404 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1405 */
1406 bp->b_iodone = bdone;
1407 swp_pager_strategy(bp);
1408
1409 /*
1410 * Wait for the sync I/O to complete.
1411 */
1412 bwait(bp, PVM, "swwrt");
1413
1414 /*
1415 * Now that we are through with the bp, we can call the
1416 * normal async completion, which frees everything up.
1417 */
1418 swp_pager_async_iodone(bp);
1419 }
1420 VM_OBJECT_WLOCK(object);
1421 }
1422
1423 /*
1424 * swp_pager_async_iodone:
1425 *
1426 * Completion routine for asynchronous reads and writes from/to swap.
1427 * Also called manually by synchronous code to finish up a bp.
1428 *
1429 * This routine may not sleep.
1430 */
1431 static void
1432 swp_pager_async_iodone(struct buf *bp)
1433 {
1434 int i;
1435 vm_object_t object = NULL;
1436
1437 /*
1438 * report error
1439 */
1440 if (bp->b_ioflags & BIO_ERROR) {
1441 printf(
1442 "swap_pager: I/O error - %s failed; blkno %ld,"
1443 "size %ld, error %d\n",
1444 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1445 (long)bp->b_blkno,
1446 (long)bp->b_bcount,
1447 bp->b_error
1448 );
1449 }
1450
1451 /*
1452 * remove the mapping for kernel virtual
1453 */
1454 if (buf_mapped(bp))
1455 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1456 else
1457 bp->b_data = bp->b_kvabase;
1458
1459 if (bp->b_npages) {
1460 object = bp->b_pages[0]->object;
1461 VM_OBJECT_WLOCK(object);
1462 }
1463
1464 /*
1465 * cleanup pages. If an error occurs writing to swap, we are in
1466 * very serious trouble. If it happens to be a disk error, though,
1467 * we may be able to recover by reassigning the swap later on. So
1468 * in this case we remove the m->swapblk assignment for the page
1469 * but do not free it in the rlist. The errornous block(s) are thus
1470 * never reallocated as swap. Redirty the page and continue.
1471 */
1472 for (i = 0; i < bp->b_npages; ++i) {
1473 vm_page_t m = bp->b_pages[i];
1474
1475 m->oflags &= ~VPO_SWAPINPROG;
1476 if (m->oflags & VPO_SWAPSLEEP) {
1477 m->oflags &= ~VPO_SWAPSLEEP;
1478 wakeup(&object->paging_in_progress);
1479 }
1480
1481 if (bp->b_ioflags & BIO_ERROR) {
1482 /*
1483 * If an error occurs I'd love to throw the swapblk
1484 * away without freeing it back to swapspace, so it
1485 * can never be used again. But I can't from an
1486 * interrupt.
1487 */
1488 if (bp->b_iocmd == BIO_READ) {
1489 /*
1490 * NOTE: for reads, m->dirty will probably
1491 * be overridden by the original caller of
1492 * getpages so don't play cute tricks here.
1493 */
1494 m->valid = 0;
1495 } else {
1496 /*
1497 * If a write error occurs, reactivate page
1498 * so it doesn't clog the inactive list,
1499 * then finish the I/O.
1500 */
1501 vm_page_dirty(m);
1502 vm_page_lock(m);
1503 vm_page_activate(m);
1504 vm_page_unlock(m);
1505 vm_page_sunbusy(m);
1506 }
1507 } else if (bp->b_iocmd == BIO_READ) {
1508 /*
1509 * NOTE: for reads, m->dirty will probably be
1510 * overridden by the original caller of getpages so
1511 * we cannot set them in order to free the underlying
1512 * swap in a low-swap situation. I don't think we'd
1513 * want to do that anyway, but it was an optimization
1514 * that existed in the old swapper for a time before
1515 * it got ripped out due to precisely this problem.
1516 */
1517 KASSERT(!pmap_page_is_mapped(m),
1518 ("swp_pager_async_iodone: page %p is mapped", m));
1519 KASSERT(m->dirty == 0,
1520 ("swp_pager_async_iodone: page %p is dirty", m));
1521 m->valid = VM_PAGE_BITS_ALL;
1522 } else {
1523 /*
1524 * For write success, clear the dirty
1525 * status, then finish the I/O ( which decrements the
1526 * busy count and possibly wakes waiter's up ).
1527 */
1528 KASSERT(!pmap_page_is_write_mapped(m),
1529 ("swp_pager_async_iodone: page %p is not write"
1530 " protected", m));
1531 vm_page_undirty(m);
1532 vm_page_sunbusy(m);
1533 if (vm_page_count_severe()) {
1534 vm_page_lock(m);
1535 vm_page_try_to_cache(m);
1536 vm_page_unlock(m);
1537 }
1538 }
1539 }
1540
1541 /*
1542 * adjust pip. NOTE: the original parent may still have its own
1543 * pip refs on the object.
1544 */
1545 if (object != NULL) {
1546 vm_object_pip_wakeupn(object, bp->b_npages);
1547 VM_OBJECT_WUNLOCK(object);
1548 }
1549
1550 /*
1551 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1552 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1553 * trigger a KASSERT in relpbuf().
1554 */
1555 if (bp->b_vp) {
1556 bp->b_vp = NULL;
1557 bp->b_bufobj = NULL;
1558 }
1559 /*
1560 * release the physical I/O buffer
1561 */
1562 relpbuf(
1563 bp,
1564 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1565 ((bp->b_flags & B_ASYNC) ?
1566 &nsw_wcount_async :
1567 &nsw_wcount_sync
1568 )
1569 )
1570 );
1571 }
1572
1573 /*
1574 * swap_pager_isswapped:
1575 *
1576 * Return 1 if at least one page in the given object is paged
1577 * out to the given swap device.
1578 *
1579 * This routine may not sleep.
1580 */
1581 int
1582 swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
1583 {
1584 daddr_t index = 0;
1585 int bcount;
1586 int i;
1587
1588 VM_OBJECT_ASSERT_WLOCKED(object);
1589 if (object->type != OBJT_SWAP)
1590 return (0);
1591
1592 mtx_lock(&swhash_mtx);
1593 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) {
1594 struct swblock *swap;
1595
1596 if ((swap = *swp_pager_hash(object, index)) != NULL) {
1597 for (i = 0; i < SWAP_META_PAGES; ++i) {
1598 if (swp_pager_isondev(swap->swb_pages[i], sp)) {
1599 mtx_unlock(&swhash_mtx);
1600 return (1);
1601 }
1602 }
1603 }
1604 index += SWAP_META_PAGES;
1605 }
1606 mtx_unlock(&swhash_mtx);
1607 return (0);
1608 }
1609
1610 /*
1611 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
1612 *
1613 * This routine dissociates the page at the given index within a
1614 * swap block from its backing store, paging it in if necessary.
1615 * If the page is paged in, it is placed in the inactive queue,
1616 * since it had its backing store ripped out from under it.
1617 * We also attempt to swap in all other pages in the swap block,
1618 * we only guarantee that the one at the specified index is
1619 * paged in.
1620 *
1621 * XXX - The code to page the whole block in doesn't work, so we
1622 * revert to the one-by-one behavior for now. Sigh.
1623 */
1624 static inline void
1625 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
1626 {
1627 vm_page_t m;
1628
1629 vm_object_pip_add(object, 1);
1630 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
1631 if (m->valid == VM_PAGE_BITS_ALL) {
1632 vm_object_pip_wakeup(object);
1633 vm_page_dirty(m);
1634 vm_page_lock(m);
1635 vm_page_activate(m);
1636 vm_page_unlock(m);
1637 vm_page_xunbusy(m);
1638 vm_pager_page_unswapped(m);
1639 return;
1640 }
1641
1642 if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK)
1643 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
1644 vm_object_pip_wakeup(object);
1645 vm_page_dirty(m);
1646 vm_page_lock(m);
1647 vm_page_deactivate(m);
1648 vm_page_unlock(m);
1649 vm_page_xunbusy(m);
1650 vm_pager_page_unswapped(m);
1651 }
1652
1653 /*
1654 * swap_pager_swapoff:
1655 *
1656 * Page in all of the pages that have been paged out to the
1657 * given device. The corresponding blocks in the bitmap must be
1658 * marked as allocated and the device must be flagged SW_CLOSING.
1659 * There may be no processes swapped out to the device.
1660 *
1661 * This routine may block.
1662 */
1663 static void
1664 swap_pager_swapoff(struct swdevt *sp)
1665 {
1666 struct swblock *swap;
1667 int i, j, retries;
1668
1669 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1670
1671 retries = 0;
1672 full_rescan:
1673 mtx_lock(&swhash_mtx);
1674 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */
1675 restart:
1676 for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) {
1677 vm_object_t object = swap->swb_object;
1678 vm_pindex_t pindex = swap->swb_index;
1679 for (j = 0; j < SWAP_META_PAGES; ++j) {
1680 if (swp_pager_isondev(swap->swb_pages[j], sp)) {
1681 /* avoid deadlock */
1682 if (!VM_OBJECT_TRYWLOCK(object)) {
1683 break;
1684 } else {
1685 mtx_unlock(&swhash_mtx);
1686 swp_pager_force_pagein(object,
1687 pindex + j);
1688 VM_OBJECT_WUNLOCK(object);
1689 mtx_lock(&swhash_mtx);
1690 goto restart;
1691 }
1692 }
1693 }
1694 }
1695 }
1696 mtx_unlock(&swhash_mtx);
1697 if (sp->sw_used) {
1698 /*
1699 * Objects may be locked or paging to the device being
1700 * removed, so we will miss their pages and need to
1701 * make another pass. We have marked this device as
1702 * SW_CLOSING, so the activity should finish soon.
1703 */
1704 retries++;
1705 if (retries > 100) {
1706 panic("swapoff: failed to locate %d swap blocks",
1707 sp->sw_used);
1708 }
1709 pause("swpoff", hz / 20);
1710 goto full_rescan;
1711 }
1712 }
1713
1714 /************************************************************************
1715 * SWAP META DATA *
1716 ************************************************************************
1717 *
1718 * These routines manipulate the swap metadata stored in the
1719 * OBJT_SWAP object.
1720 *
1721 * Swap metadata is implemented with a global hash and not directly
1722 * linked into the object. Instead the object simply contains
1723 * appropriate tracking counters.
1724 */
1725
1726 /*
1727 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1728 *
1729 * We first convert the object to a swap object if it is a default
1730 * object.
1731 *
1732 * The specified swapblk is added to the object's swap metadata. If
1733 * the swapblk is not valid, it is freed instead. Any previously
1734 * assigned swapblk is freed.
1735 */
1736 static void
1737 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1738 {
1739 static volatile int exhausted;
1740 struct swblock *swap;
1741 struct swblock **pswap;
1742 int idx;
1743
1744 VM_OBJECT_ASSERT_WLOCKED(object);
1745 /*
1746 * Convert default object to swap object if necessary
1747 */
1748 if (object->type != OBJT_SWAP) {
1749 object->type = OBJT_SWAP;
1750 object->un_pager.swp.swp_bcount = 0;
1751 KASSERT(object->handle == NULL, ("default pager with handle"));
1752 }
1753
1754 /*
1755 * Locate hash entry. If not found create, but if we aren't adding
1756 * anything just return. If we run out of space in the map we wait
1757 * and, since the hash table may have changed, retry.
1758 */
1759 retry:
1760 mtx_lock(&swhash_mtx);
1761 pswap = swp_pager_hash(object, pindex);
1762
1763 if ((swap = *pswap) == NULL) {
1764 int i;
1765
1766 if (swapblk == SWAPBLK_NONE)
1767 goto done;
1768
1769 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT |
1770 (curproc == pageproc ? M_USE_RESERVE : 0));
1771 if (swap == NULL) {
1772 mtx_unlock(&swhash_mtx);
1773 VM_OBJECT_WUNLOCK(object);
1774 if (uma_zone_exhausted(swap_zone)) {
1775 if (atomic_cmpset_int(&exhausted, 0, 1))
1776 printf("swap zone exhausted, "
1777 "increase kern.maxswzone\n");
1778 vm_pageout_oom(VM_OOM_SWAPZ);
1779 pause("swzonex", 10);
1780 } else
1781 VM_WAIT;
1782 VM_OBJECT_WLOCK(object);
1783 goto retry;
1784 }
1785
1786 if (atomic_cmpset_int(&exhausted, 1, 0))
1787 printf("swap zone ok\n");
1788
1789 swap->swb_hnext = NULL;
1790 swap->swb_object = object;
1791 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
1792 swap->swb_count = 0;
1793
1794 ++object->un_pager.swp.swp_bcount;
1795
1796 for (i = 0; i < SWAP_META_PAGES; ++i)
1797 swap->swb_pages[i] = SWAPBLK_NONE;
1798 }
1799
1800 /*
1801 * Delete prior contents of metadata
1802 */
1803 idx = pindex & SWAP_META_MASK;
1804
1805 if (swap->swb_pages[idx] != SWAPBLK_NONE) {
1806 swp_pager_freeswapspace(swap->swb_pages[idx], 1);
1807 --swap->swb_count;
1808 }
1809
1810 /*
1811 * Enter block into metadata
1812 */
1813 swap->swb_pages[idx] = swapblk;
1814 if (swapblk != SWAPBLK_NONE)
1815 ++swap->swb_count;
1816 done:
1817 mtx_unlock(&swhash_mtx);
1818 }
1819
1820 /*
1821 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1822 *
1823 * The requested range of blocks is freed, with any associated swap
1824 * returned to the swap bitmap.
1825 *
1826 * This routine will free swap metadata structures as they are cleaned
1827 * out. This routine does *NOT* operate on swap metadata associated
1828 * with resident pages.
1829 */
1830 static void
1831 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1832 {
1833
1834 VM_OBJECT_ASSERT_LOCKED(object);
1835 if (object->type != OBJT_SWAP)
1836 return;
1837
1838 while (count > 0) {
1839 struct swblock **pswap;
1840 struct swblock *swap;
1841
1842 mtx_lock(&swhash_mtx);
1843 pswap = swp_pager_hash(object, index);
1844
1845 if ((swap = *pswap) != NULL) {
1846 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1847
1848 if (v != SWAPBLK_NONE) {
1849 swp_pager_freeswapspace(v, 1);
1850 swap->swb_pages[index & SWAP_META_MASK] =
1851 SWAPBLK_NONE;
1852 if (--swap->swb_count == 0) {
1853 *pswap = swap->swb_hnext;
1854 uma_zfree(swap_zone, swap);
1855 --object->un_pager.swp.swp_bcount;
1856 }
1857 }
1858 --count;
1859 ++index;
1860 } else {
1861 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1862 count -= n;
1863 index += n;
1864 }
1865 mtx_unlock(&swhash_mtx);
1866 }
1867 }
1868
1869 /*
1870 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1871 *
1872 * This routine locates and destroys all swap metadata associated with
1873 * an object.
1874 */
1875 static void
1876 swp_pager_meta_free_all(vm_object_t object)
1877 {
1878 daddr_t index = 0;
1879
1880 VM_OBJECT_ASSERT_WLOCKED(object);
1881 if (object->type != OBJT_SWAP)
1882 return;
1883
1884 while (object->un_pager.swp.swp_bcount) {
1885 struct swblock **pswap;
1886 struct swblock *swap;
1887
1888 mtx_lock(&swhash_mtx);
1889 pswap = swp_pager_hash(object, index);
1890 if ((swap = *pswap) != NULL) {
1891 int i;
1892
1893 for (i = 0; i < SWAP_META_PAGES; ++i) {
1894 daddr_t v = swap->swb_pages[i];
1895 if (v != SWAPBLK_NONE) {
1896 --swap->swb_count;
1897 swp_pager_freeswapspace(v, 1);
1898 }
1899 }
1900 if (swap->swb_count != 0)
1901 panic("swap_pager_meta_free_all: swb_count != 0");
1902 *pswap = swap->swb_hnext;
1903 uma_zfree(swap_zone, swap);
1904 --object->un_pager.swp.swp_bcount;
1905 }
1906 mtx_unlock(&swhash_mtx);
1907 index += SWAP_META_PAGES;
1908 }
1909 }
1910
1911 /*
1912 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1913 *
1914 * This routine is capable of looking up, popping, or freeing
1915 * swapblk assignments in the swap meta data or in the vm_page_t.
1916 * The routine typically returns the swapblk being looked-up, or popped,
1917 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1918 * was invalid. This routine will automatically free any invalid
1919 * meta-data swapblks.
1920 *
1921 * It is not possible to store invalid swapblks in the swap meta data
1922 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1923 *
1924 * When acting on a busy resident page and paging is in progress, we
1925 * have to wait until paging is complete but otherwise can act on the
1926 * busy page.
1927 *
1928 * SWM_FREE remove and free swap block from metadata
1929 * SWM_POP remove from meta data but do not free.. pop it out
1930 */
1931 static daddr_t
1932 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
1933 {
1934 struct swblock **pswap;
1935 struct swblock *swap;
1936 daddr_t r1;
1937 int idx;
1938
1939 VM_OBJECT_ASSERT_LOCKED(object);
1940 /*
1941 * The meta data only exists of the object is OBJT_SWAP
1942 * and even then might not be allocated yet.
1943 */
1944 if (object->type != OBJT_SWAP)
1945 return (SWAPBLK_NONE);
1946
1947 r1 = SWAPBLK_NONE;
1948 mtx_lock(&swhash_mtx);
1949 pswap = swp_pager_hash(object, pindex);
1950
1951 if ((swap = *pswap) != NULL) {
1952 idx = pindex & SWAP_META_MASK;
1953 r1 = swap->swb_pages[idx];
1954
1955 if (r1 != SWAPBLK_NONE) {
1956 if (flags & SWM_FREE) {
1957 swp_pager_freeswapspace(r1, 1);
1958 r1 = SWAPBLK_NONE;
1959 }
1960 if (flags & (SWM_FREE|SWM_POP)) {
1961 swap->swb_pages[idx] = SWAPBLK_NONE;
1962 if (--swap->swb_count == 0) {
1963 *pswap = swap->swb_hnext;
1964 uma_zfree(swap_zone, swap);
1965 --object->un_pager.swp.swp_bcount;
1966 }
1967 }
1968 }
1969 }
1970 mtx_unlock(&swhash_mtx);
1971 return (r1);
1972 }
1973
1974 /*
1975 * System call swapon(name) enables swapping on device name,
1976 * which must be in the swdevsw. Return EBUSY
1977 * if already swapping on this device.
1978 */
1979 #ifndef _SYS_SYSPROTO_H_
1980 struct swapon_args {
1981 char *name;
1982 };
1983 #endif
1984
1985 /*
1986 * MPSAFE
1987 */
1988 /* ARGSUSED */
1989 int
1990 sys_swapon(struct thread *td, struct swapon_args *uap)
1991 {
1992 struct vattr attr;
1993 struct vnode *vp;
1994 struct nameidata nd;
1995 int error;
1996
1997 error = priv_check(td, PRIV_SWAPON);
1998 if (error)
1999 return (error);
2000
2001 sx_xlock(&swdev_syscall_lock);
2002
2003 /*
2004 * Swap metadata may not fit in the KVM if we have physical
2005 * memory of >1GB.
2006 */
2007 if (swap_zone == NULL) {
2008 error = ENOMEM;
2009 goto done;
2010 }
2011
2012 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2013 uap->name, td);
2014 error = namei(&nd);
2015 if (error)
2016 goto done;
2017
2018 NDFREE(&nd, NDF_ONLY_PNBUF);
2019 vp = nd.ni_vp;
2020
2021 if (vn_isdisk(vp, &error)) {
2022 error = swapongeom(vp);
2023 } else if (vp->v_type == VREG &&
2024 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2025 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2026 /*
2027 * Allow direct swapping to NFS regular files in the same
2028 * way that nfs_mountroot() sets up diskless swapping.
2029 */
2030 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2031 }
2032
2033 if (error)
2034 vrele(vp);
2035 done:
2036 sx_xunlock(&swdev_syscall_lock);
2037 return (error);
2038 }
2039
2040 /*
2041 * Check that the total amount of swap currently configured does not
2042 * exceed half the theoretical maximum. If it does, print a warning
2043 * message and return -1; otherwise, return 0.
2044 */
2045 static int
2046 swapon_check_swzone(unsigned long npages)
2047 {
2048 unsigned long maxpages;
2049
2050 /* absolute maximum we can handle assuming 100% efficiency */
2051 maxpages = uma_zone_get_max(swap_zone) * SWAP_META_PAGES;
2052
2053 /* recommend using no more than half that amount */
2054 if (npages > maxpages / 2) {
2055 printf("warning: total configured swap (%lu pages) "
2056 "exceeds maximum recommended amount (%lu pages).\n",
2057 npages, maxpages / 2);
2058 printf("warning: increase kern.maxswzone "
2059 "or reduce amount of swap.\n");
2060 return (-1);
2061 }
2062 return (0);
2063 }
2064
2065 static void
2066 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2067 sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2068 {
2069 struct swdevt *sp, *tsp;
2070 swblk_t dvbase;
2071 u_long mblocks;
2072
2073 /*
2074 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2075 * First chop nblks off to page-align it, then convert.
2076 *
2077 * sw->sw_nblks is in page-sized chunks now too.
2078 */
2079 nblks &= ~(ctodb(1) - 1);
2080 nblks = dbtoc(nblks);
2081
2082 /*
2083 * If we go beyond this, we get overflows in the radix
2084 * tree bitmap code.
2085 */
2086 mblocks = 0x40000000 / BLIST_META_RADIX;
2087 if (nblks > mblocks) {
2088 printf(
2089 "WARNING: reducing swap size to maximum of %luMB per unit\n",
2090 mblocks / 1024 / 1024 * PAGE_SIZE);
2091 nblks = mblocks;
2092 }
2093
2094 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2095 sp->sw_vp = vp;
2096 sp->sw_id = id;
2097 sp->sw_dev = dev;
2098 sp->sw_flags = 0;
2099 sp->sw_nblks = nblks;
2100 sp->sw_used = 0;
2101 sp->sw_strategy = strategy;
2102 sp->sw_close = close;
2103 sp->sw_flags = flags;
2104
2105 sp->sw_blist = blist_create(nblks, M_WAITOK);
2106 /*
2107 * Do not free the first two block in order to avoid overwriting
2108 * any bsd label at the front of the partition
2109 */
2110 blist_free(sp->sw_blist, 2, nblks - 2);
2111
2112 dvbase = 0;
2113 mtx_lock(&sw_dev_mtx);
2114 TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2115 if (tsp->sw_end >= dvbase) {
2116 /*
2117 * We put one uncovered page between the devices
2118 * in order to definitively prevent any cross-device
2119 * I/O requests
2120 */
2121 dvbase = tsp->sw_end + 1;
2122 }
2123 }
2124 sp->sw_first = dvbase;
2125 sp->sw_end = dvbase + nblks;
2126 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2127 nswapdev++;
2128 swap_pager_avail += nblks;
2129 swap_total += (vm_ooffset_t)nblks * PAGE_SIZE;
2130 swapon_check_swzone(swap_total / PAGE_SIZE);
2131 swp_sizecheck();
2132 mtx_unlock(&sw_dev_mtx);
2133 }
2134
2135 /*
2136 * SYSCALL: swapoff(devname)
2137 *
2138 * Disable swapping on the given device.
2139 *
2140 * XXX: Badly designed system call: it should use a device index
2141 * rather than filename as specification. We keep sw_vp around
2142 * only to make this work.
2143 */
2144 #ifndef _SYS_SYSPROTO_H_
2145 struct swapoff_args {
2146 char *name;
2147 };
2148 #endif
2149
2150 /*
2151 * MPSAFE
2152 */
2153 /* ARGSUSED */
2154 int
2155 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2156 {
2157 struct vnode *vp;
2158 struct nameidata nd;
2159 struct swdevt *sp;
2160 int error;
2161
2162 error = priv_check(td, PRIV_SWAPOFF);
2163 if (error)
2164 return (error);
2165
2166 sx_xlock(&swdev_syscall_lock);
2167
2168 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2169 td);
2170 error = namei(&nd);
2171 if (error)
2172 goto done;
2173 NDFREE(&nd, NDF_ONLY_PNBUF);
2174 vp = nd.ni_vp;
2175
2176 mtx_lock(&sw_dev_mtx);
2177 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2178 if (sp->sw_vp == vp)
2179 break;
2180 }
2181 mtx_unlock(&sw_dev_mtx);
2182 if (sp == NULL) {
2183 error = EINVAL;
2184 goto done;
2185 }
2186 error = swapoff_one(sp, td->td_ucred);
2187 done:
2188 sx_xunlock(&swdev_syscall_lock);
2189 return (error);
2190 }
2191
2192 static int
2193 swapoff_one(struct swdevt *sp, struct ucred *cred)
2194 {
2195 u_long nblks, dvbase;
2196 #ifdef MAC
2197 int error;
2198 #endif
2199
2200 sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2201 #ifdef MAC
2202 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2203 error = mac_system_check_swapoff(cred, sp->sw_vp);
2204 (void) VOP_UNLOCK(sp->sw_vp, 0);
2205 if (error != 0)
2206 return (error);
2207 #endif
2208 nblks = sp->sw_nblks;
2209
2210 /*
2211 * We can turn off this swap device safely only if the
2212 * available virtual memory in the system will fit the amount
2213 * of data we will have to page back in, plus an epsilon so
2214 * the system doesn't become critically low on swap space.
2215 */
2216 if (vm_cnt.v_free_count + vm_cnt.v_cache_count + swap_pager_avail <
2217 nblks + nswap_lowat) {
2218 return (ENOMEM);
2219 }
2220
2221 /*
2222 * Prevent further allocations on this device.
2223 */
2224 mtx_lock(&sw_dev_mtx);
2225 sp->sw_flags |= SW_CLOSING;
2226 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) {
2227 swap_pager_avail -= blist_fill(sp->sw_blist,
2228 dvbase, dmmax);
2229 }
2230 swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE;
2231 mtx_unlock(&sw_dev_mtx);
2232
2233 /*
2234 * Page in the contents of the device and close it.
2235 */
2236 swap_pager_swapoff(sp);
2237
2238 sp->sw_close(curthread, sp);
2239 mtx_lock(&sw_dev_mtx);
2240 sp->sw_id = NULL;
2241 TAILQ_REMOVE(&swtailq, sp, sw_list);
2242 nswapdev--;
2243 if (nswapdev == 0) {
2244 swap_pager_full = 2;
2245 swap_pager_almost_full = 1;
2246 }
2247 if (swdevhd == sp)
2248 swdevhd = NULL;
2249 mtx_unlock(&sw_dev_mtx);
2250 blist_destroy(sp->sw_blist);
2251 free(sp, M_VMPGDATA);
2252 return (0);
2253 }
2254
2255 void
2256 swapoff_all(void)
2257 {
2258 struct swdevt *sp, *spt;
2259 const char *devname;
2260 int error;
2261
2262 sx_xlock(&swdev_syscall_lock);
2263
2264 mtx_lock(&sw_dev_mtx);
2265 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2266 mtx_unlock(&sw_dev_mtx);
2267 if (vn_isdisk(sp->sw_vp, NULL))
2268 devname = devtoname(sp->sw_vp->v_rdev);
2269 else
2270 devname = "[file]";
2271 error = swapoff_one(sp, thread0.td_ucred);
2272 if (error != 0) {
2273 printf("Cannot remove swap device %s (error=%d), "
2274 "skipping.\n", devname, error);
2275 } else if (bootverbose) {
2276 printf("Swap device %s removed.\n", devname);
2277 }
2278 mtx_lock(&sw_dev_mtx);
2279 }
2280 mtx_unlock(&sw_dev_mtx);
2281
2282 sx_xunlock(&swdev_syscall_lock);
2283 }
2284
2285 void
2286 swap_pager_status(int *total, int *used)
2287 {
2288 struct swdevt *sp;
2289
2290 *total = 0;
2291 *used = 0;
2292 mtx_lock(&sw_dev_mtx);
2293 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2294 *total += sp->sw_nblks;
2295 *used += sp->sw_used;
2296 }
2297 mtx_unlock(&sw_dev_mtx);
2298 }
2299
2300 int
2301 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2302 {
2303 struct swdevt *sp;
2304 const char *tmp_devname;
2305 int error, n;
2306
2307 n = 0;
2308 error = ENOENT;
2309 mtx_lock(&sw_dev_mtx);
2310 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2311 if (n != name) {
2312 n++;
2313 continue;
2314 }
2315 xs->xsw_version = XSWDEV_VERSION;
2316 xs->xsw_dev = sp->sw_dev;
2317 xs->xsw_flags = sp->sw_flags;
2318 xs->xsw_nblks = sp->sw_nblks;
2319 xs->xsw_used = sp->sw_used;
2320 if (devname != NULL) {
2321 if (vn_isdisk(sp->sw_vp, NULL))
2322 tmp_devname = devtoname(sp->sw_vp->v_rdev);
2323 else
2324 tmp_devname = "[file]";
2325 strncpy(devname, tmp_devname, len);
2326 }
2327 error = 0;
2328 break;
2329 }
2330 mtx_unlock(&sw_dev_mtx);
2331 return (error);
2332 }
2333
2334 static int
2335 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2336 {
2337 struct xswdev xs;
2338 int error;
2339
2340 if (arg2 != 1) /* name length */
2341 return (EINVAL);
2342 error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2343 if (error != 0)
2344 return (error);
2345 error = SYSCTL_OUT(req, &xs, sizeof(xs));
2346 return (error);
2347 }
2348
2349 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2350 "Number of swap devices");
2351 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2352 sysctl_vm_swap_info,
2353 "Swap statistics by device");
2354
2355 /*
2356 * vmspace_swap_count() - count the approximate swap usage in pages for a
2357 * vmspace.
2358 *
2359 * The map must be locked.
2360 *
2361 * Swap usage is determined by taking the proportional swap used by
2362 * VM objects backing the VM map. To make up for fractional losses,
2363 * if the VM object has any swap use at all the associated map entries
2364 * count for at least 1 swap page.
2365 */
2366 long
2367 vmspace_swap_count(struct vmspace *vmspace)
2368 {
2369 vm_map_t map;
2370 vm_map_entry_t cur;
2371 vm_object_t object;
2372 long count, n;
2373
2374 map = &vmspace->vm_map;
2375 count = 0;
2376
2377 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2378 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2379 (object = cur->object.vm_object) != NULL) {
2380 VM_OBJECT_WLOCK(object);
2381 if (object->type == OBJT_SWAP &&
2382 object->un_pager.swp.swp_bcount != 0) {
2383 n = (cur->end - cur->start) / PAGE_SIZE;
2384 count += object->un_pager.swp.swp_bcount *
2385 SWAP_META_PAGES * n / object->size + 1;
2386 }
2387 VM_OBJECT_WUNLOCK(object);
2388 }
2389 }
2390 return (count);
2391 }
2392
2393 /*
2394 * GEOM backend
2395 *
2396 * Swapping onto disk devices.
2397 *
2398 */
2399
2400 static g_orphan_t swapgeom_orphan;
2401
2402 static struct g_class g_swap_class = {
2403 .name = "SWAP",
2404 .version = G_VERSION,
2405 .orphan = swapgeom_orphan,
2406 };
2407
2408 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2409
2410
2411 static void
2412 swapgeom_close_ev(void *arg, int flags)
2413 {
2414 struct g_consumer *cp;
2415
2416 cp = arg;
2417 g_access(cp, -1, -1, 0);
2418 g_detach(cp);
2419 g_destroy_consumer(cp);
2420 }
2421
2422 /*
2423 * Add a reference to the g_consumer for an inflight transaction.
2424 */
2425 static void
2426 swapgeom_acquire(struct g_consumer *cp)
2427 {
2428
2429 mtx_assert(&sw_dev_mtx, MA_OWNED);
2430 cp->index++;
2431 }
2432
2433 /*
2434 * Remove a reference from the g_consumer. Post a close event if all
2435 * references go away, since the function might be called from the
2436 * biodone context.
2437 */
2438 static void
2439 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2440 {
2441
2442 mtx_assert(&sw_dev_mtx, MA_OWNED);
2443 cp->index--;
2444 if (cp->index == 0) {
2445 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2446 sp->sw_id = NULL;
2447 }
2448 }
2449
2450 static void
2451 swapgeom_done(struct bio *bp2)
2452 {
2453 struct swdevt *sp;
2454 struct buf *bp;
2455 struct g_consumer *cp;
2456
2457 bp = bp2->bio_caller2;
2458 cp = bp2->bio_from;
2459 bp->b_ioflags = bp2->bio_flags;
2460 if (bp2->bio_error)
2461 bp->b_ioflags |= BIO_ERROR;
2462 bp->b_resid = bp->b_bcount - bp2->bio_completed;
2463 bp->b_error = bp2->bio_error;
2464 bufdone(bp);
2465 sp = bp2->bio_caller1;
2466 mtx_lock(&sw_dev_mtx);
2467 swapgeom_release(cp, sp);
2468 mtx_unlock(&sw_dev_mtx);
2469 g_destroy_bio(bp2);
2470 }
2471
2472 static void
2473 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2474 {
2475 struct bio *bio;
2476 struct g_consumer *cp;
2477
2478 mtx_lock(&sw_dev_mtx);
2479 cp = sp->sw_id;
2480 if (cp == NULL) {
2481 mtx_unlock(&sw_dev_mtx);
2482 bp->b_error = ENXIO;
2483 bp->b_ioflags |= BIO_ERROR;
2484 bufdone(bp);
2485 return;
2486 }
2487 swapgeom_acquire(cp);
2488 mtx_unlock(&sw_dev_mtx);
2489 if (bp->b_iocmd == BIO_WRITE)
2490 bio = g_new_bio();
2491 else
2492 bio = g_alloc_bio();
2493 if (bio == NULL) {
2494 mtx_lock(&sw_dev_mtx);
2495 swapgeom_release(cp, sp);
2496 mtx_unlock(&sw_dev_mtx);
2497 bp->b_error = ENOMEM;
2498 bp->b_ioflags |= BIO_ERROR;
2499 bufdone(bp);
2500 return;
2501 }
2502
2503 bio->bio_caller1 = sp;
2504 bio->bio_caller2 = bp;
2505 bio->bio_cmd = bp->b_iocmd;
2506 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2507 bio->bio_length = bp->b_bcount;
2508 bio->bio_done = swapgeom_done;
2509 if (!buf_mapped(bp)) {
2510 bio->bio_ma = bp->b_pages;
2511 bio->bio_data = unmapped_buf;
2512 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
2513 bio->bio_ma_n = bp->b_npages;
2514 bio->bio_flags |= BIO_UNMAPPED;
2515 } else {
2516 bio->bio_data = bp->b_data;
2517 bio->bio_ma = NULL;
2518 }
2519 g_io_request(bio, cp);
2520 return;
2521 }
2522
2523 static void
2524 swapgeom_orphan(struct g_consumer *cp)
2525 {
2526 struct swdevt *sp;
2527 int destroy;
2528
2529 mtx_lock(&sw_dev_mtx);
2530 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2531 if (sp->sw_id == cp) {
2532 sp->sw_flags |= SW_CLOSING;
2533 break;
2534 }
2535 }
2536 /*
2537 * Drop reference we were created with. Do directly since we're in a
2538 * special context where we don't have to queue the call to
2539 * swapgeom_close_ev().
2540 */
2541 cp->index--;
2542 destroy = ((sp != NULL) && (cp->index == 0));
2543 if (destroy)
2544 sp->sw_id = NULL;
2545 mtx_unlock(&sw_dev_mtx);
2546 if (destroy)
2547 swapgeom_close_ev(cp, 0);
2548 }
2549
2550 static void
2551 swapgeom_close(struct thread *td, struct swdevt *sw)
2552 {
2553 struct g_consumer *cp;
2554
2555 mtx_lock(&sw_dev_mtx);
2556 cp = sw->sw_id;
2557 sw->sw_id = NULL;
2558 mtx_unlock(&sw_dev_mtx);
2559
2560 /*
2561 * swapgeom_close() may be called from the biodone context,
2562 * where we cannot perform topology changes. Delegate the
2563 * work to the events thread.
2564 */
2565 if (cp != NULL)
2566 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2567 }
2568
2569 static int
2570 swapongeom_locked(struct cdev *dev, struct vnode *vp)
2571 {
2572 struct g_provider *pp;
2573 struct g_consumer *cp;
2574 static struct g_geom *gp;
2575 struct swdevt *sp;
2576 u_long nblks;
2577 int error;
2578
2579 pp = g_dev_getprovider(dev);
2580 if (pp == NULL)
2581 return (ENODEV);
2582 mtx_lock(&sw_dev_mtx);
2583 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2584 cp = sp->sw_id;
2585 if (cp != NULL && cp->provider == pp) {
2586 mtx_unlock(&sw_dev_mtx);
2587 return (EBUSY);
2588 }
2589 }
2590 mtx_unlock(&sw_dev_mtx);
2591 if (gp == NULL)
2592 gp = g_new_geomf(&g_swap_class, "swap");
2593 cp = g_new_consumer(gp);
2594 cp->index = 1; /* Number of active I/Os, plus one for being active. */
2595 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2596 g_attach(cp, pp);
2597 /*
2598 * XXX: Every time you think you can improve the margin for
2599 * footshooting, somebody depends on the ability to do so:
2600 * savecore(8) wants to write to our swapdev so we cannot
2601 * set an exclusive count :-(
2602 */
2603 error = g_access(cp, 1, 1, 0);
2604 if (error != 0) {
2605 g_detach(cp);
2606 g_destroy_consumer(cp);
2607 return (error);
2608 }
2609 nblks = pp->mediasize / DEV_BSIZE;
2610 swaponsomething(vp, cp, nblks, swapgeom_strategy,
2611 swapgeom_close, dev2udev(dev),
2612 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
2613 return (0);
2614 }
2615
2616 static int
2617 swapongeom(struct vnode *vp)
2618 {
2619 int error;
2620
2621 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2622 if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
2623 error = ENOENT;
2624 } else {
2625 g_topology_lock();
2626 error = swapongeom_locked(vp->v_rdev, vp);
2627 g_topology_unlock();
2628 }
2629 VOP_UNLOCK(vp, 0);
2630 return (error);
2631 }
2632
2633 /*
2634 * VNODE backend
2635 *
2636 * This is used mainly for network filesystem (read: probably only tested
2637 * with NFS) swapfiles.
2638 *
2639 */
2640
2641 static void
2642 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2643 {
2644 struct vnode *vp2;
2645
2646 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2647
2648 vp2 = sp->sw_id;
2649 vhold(vp2);
2650 if (bp->b_iocmd == BIO_WRITE) {
2651 if (bp->b_bufobj)
2652 bufobj_wdrop(bp->b_bufobj);
2653 bufobj_wref(&vp2->v_bufobj);
2654 }
2655 if (bp->b_bufobj != &vp2->v_bufobj)
2656 bp->b_bufobj = &vp2->v_bufobj;
2657 bp->b_vp = vp2;
2658 bp->b_iooffset = dbtob(bp->b_blkno);
2659 bstrategy(bp);
2660 return;
2661 }
2662
2663 static void
2664 swapdev_close(struct thread *td, struct swdevt *sp)
2665 {
2666
2667 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2668 vrele(sp->sw_vp);
2669 }
2670
2671
2672 static int
2673 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2674 {
2675 struct swdevt *sp;
2676 int error;
2677
2678 if (nblks == 0)
2679 return (ENXIO);
2680 mtx_lock(&sw_dev_mtx);
2681 TAILQ_FOREACH(sp, &swtailq, sw_list) {
2682 if (sp->sw_id == vp) {
2683 mtx_unlock(&sw_dev_mtx);
2684 return (EBUSY);
2685 }
2686 }
2687 mtx_unlock(&sw_dev_mtx);
2688
2689 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2690 #ifdef MAC
2691 error = mac_system_check_swapon(td->td_ucred, vp);
2692 if (error == 0)
2693 #endif
2694 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
2695 (void) VOP_UNLOCK(vp, 0);
2696 if (error)
2697 return (error);
2698
2699 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2700 NODEV, 0);
2701 return (0);
2702 }
2703
2704 static int
2705 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
2706 {
2707 int error, new, n;
2708
2709 new = nsw_wcount_async_max;
2710 error = sysctl_handle_int(oidp, &new, 0, req);
2711 if (error != 0 || req->newptr == NULL)
2712 return (error);
2713
2714 if (new > nswbuf / 2 || new < 1)
2715 return (EINVAL);
2716
2717 mtx_lock(&pbuf_mtx);
2718 while (nsw_wcount_async_max != new) {
2719 /*
2720 * Adjust difference. If the current async count is too low,
2721 * we will need to sqeeze our update slowly in. Sleep with a
2722 * higher priority than getpbuf() to finish faster.
2723 */
2724 n = new - nsw_wcount_async_max;
2725 if (nsw_wcount_async + n >= 0) {
2726 nsw_wcount_async += n;
2727 nsw_wcount_async_max += n;
2728 wakeup(&nsw_wcount_async);
2729 } else {
2730 nsw_wcount_async_max -= nsw_wcount_async;
2731 nsw_wcount_async = 0;
2732 msleep(&nsw_wcount_async, &pbuf_mtx, PSWP,
2733 "swpsysctl", 0);
2734 }
2735 }
2736 mtx_unlock(&pbuf_mtx);
2737
2738 return (0);
2739 }
Cache object: cf99dc9baf2b28ed380160d9bd66e8d1
|