FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_swap.c
1 /* $NetBSD: uvm_swap.c,v 1.113.2.2 2006/12/09 11:53:42 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.113.2.2 2006/12/09 11:53:42 bouyer Exp $");
36
37 #include "fs_nfs.h"
38 #include "opt_uvmhist.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/bufq.h>
46 #include <sys/conf.h>
47 #include <sys/proc.h>
48 #include <sys/namei.h>
49 #include <sys/disklabel.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/vnode.h>
54 #include <sys/file.h>
55 #include <sys/vmem.h>
56 #include <sys/blist.h>
57 #include <sys/mount.h>
58 #include <sys/pool.h>
59 #include <sys/sa.h>
60 #include <sys/syscallargs.h>
61 #include <sys/swap.h>
62 #include <sys/kauth.h>
63
64 #include <uvm/uvm.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 /*
69 * uvm_swap.c: manage configuration and i/o to swap space.
70 */
71
72 /*
73 * swap space is managed in the following way:
74 *
75 * each swap partition or file is described by a "swapdev" structure.
76 * each "swapdev" structure contains a "swapent" structure which contains
77 * information that is passed up to the user (via system calls).
78 *
79 * each swap partition is assigned a "priority" (int) which controls
80 * swap parition usage.
81 *
82 * the system maintains a global data structure describing all swap
83 * partitions/files. there is a sorted LIST of "swappri" structures
84 * which describe "swapdev"'s at that priority. this LIST is headed
85 * by the "swap_priority" global var. each "swappri" contains a
86 * CIRCLEQ of "swapdev" structures at that priority.
87 *
88 * locking:
89 * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
90 * system call and prevents the swap priority list from changing
91 * while we are in the middle of a system call (e.g. SWAP_STATS).
92 * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
93 * structures including the priority list, the swapdev structures,
94 * and the swapmap arena.
95 *
96 * each swap device has the following info:
97 * - swap device in use (could be disabled, preventing future use)
98 * - swap enabled (allows new allocations on swap)
99 * - map info in /dev/drum
100 * - vnode pointer
101 * for swap files only:
102 * - block size
103 * - max byte count in buffer
104 * - buffer
105 *
106 * userland controls and configures swap with the swapctl(2) system call.
107 * the sys_swapctl performs the following operations:
108 * [1] SWAP_NSWAP: returns the number of swap devices currently configured
109 * [2] SWAP_STATS: given a pointer to an array of swapent structures
110 * (passed in via "arg") of a size passed in via "misc" ... we load
111 * the current swap config into the array. The actual work is done
112 * in the uvm_swap_stats(9) function.
113 * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
114 * priority in "misc", start swapping on it.
115 * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
116 * [5] SWAP_CTL: changes the priority of a swap device (new priority in
117 * "misc")
118 */
119
120 /*
121 * swapdev: describes a single swap partition/file
122 *
123 * note the following should be true:
124 * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
125 * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
126 */
127 struct swapdev {
128 struct oswapent swd_ose;
129 #define swd_dev swd_ose.ose_dev /* device id */
130 #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
131 #define swd_priority swd_ose.ose_priority /* our priority */
132 /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
133 char *swd_path; /* saved pathname of device */
134 int swd_pathlen; /* length of pathname */
135 int swd_npages; /* #pages we can use */
136 int swd_npginuse; /* #pages in use */
137 int swd_npgbad; /* #pages bad */
138 int swd_drumoffset; /* page0 offset in drum */
139 int swd_drumsize; /* #pages in drum */
140 blist_t swd_blist; /* blist for this swapdev */
141 struct vnode *swd_vp; /* backing vnode */
142 CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
143
144 int swd_bsize; /* blocksize (bytes) */
145 int swd_maxactive; /* max active i/o reqs */
146 struct bufq_state *swd_tab; /* buffer list */
147 int swd_active; /* number of active buffers */
148 };
149
150 /*
151 * swap device priority entry; the list is kept sorted on `spi_priority'.
152 */
153 struct swappri {
154 int spi_priority; /* priority */
155 CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
156 /* circleq of swapdevs at this priority */
157 LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
158 };
159
160 /*
161 * The following two structures are used to keep track of data transfers
162 * on swap devices associated with regular files.
163 * NOTE: this code is more or less a copy of vnd.c; we use the same
164 * structure names here to ease porting..
165 */
166 struct vndxfer {
167 struct buf *vx_bp; /* Pointer to parent buffer */
168 struct swapdev *vx_sdp;
169 int vx_error;
170 int vx_pending; /* # of pending aux buffers */
171 int vx_flags;
172 #define VX_BUSY 1
173 #define VX_DEAD 2
174 };
175
176 struct vndbuf {
177 struct buf vb_buf;
178 struct vndxfer *vb_xfer;
179 };
180
181
182 /*
183 * We keep a of pool vndbuf's and vndxfer structures.
184 */
185 POOL_INIT(vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx", NULL);
186 POOL_INIT(vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd", NULL);
187
188 #define getvndxfer(vnx) do { \
189 int sp = splbio(); \
190 vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
191 splx(sp); \
192 } while (/*CONSTCOND*/ 0)
193
194 #define putvndxfer(vnx) { \
195 pool_put(&vndxfer_pool, (void *)(vnx)); \
196 }
197
198 #define getvndbuf(vbp) do { \
199 int sp = splbio(); \
200 vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
201 splx(sp); \
202 } while (/*CONSTCOND*/ 0)
203
204 #define putvndbuf(vbp) { \
205 pool_put(&vndbuf_pool, (void *)(vbp)); \
206 }
207
208 /*
209 * local variables
210 */
211 MALLOC_DEFINE(M_VMSWAP, "VM swap", "VM swap structures");
212 static vmem_t *swapmap; /* controls the mapping of /dev/drum */
213
214 /* list of all active swap devices [by priority] */
215 LIST_HEAD(swap_priority, swappri);
216 static struct swap_priority swap_priority;
217
218 /* locks */
219 static struct lock swap_syscall_lock;
220
221 /*
222 * prototypes
223 */
224 static struct swapdev *swapdrum_getsdp(int);
225
226 static struct swapdev *swaplist_find(struct vnode *, int);
227 static void swaplist_insert(struct swapdev *,
228 struct swappri *, int);
229 static void swaplist_trim(void);
230
231 static int swap_on(struct lwp *, struct swapdev *);
232 static int swap_off(struct lwp *, struct swapdev *);
233
234 static void uvm_swap_stats_locked(int, struct swapent *, int, register_t *);
235
236 static void sw_reg_strategy(struct swapdev *, struct buf *, int);
237 static void sw_reg_iodone(struct buf *);
238 static void sw_reg_start(struct swapdev *);
239
240 static int uvm_swap_io(struct vm_page **, int, int, int);
241
242 /*
243 * uvm_swap_init: init the swap system data structures and locks
244 *
245 * => called at boot time from init_main.c after the filesystems
246 * are brought up (which happens after uvm_init())
247 */
248 void
249 uvm_swap_init(void)
250 {
251 UVMHIST_FUNC("uvm_swap_init");
252
253 UVMHIST_CALLED(pdhist);
254 /*
255 * first, init the swap list, its counter, and its lock.
256 * then get a handle on the vnode for /dev/drum by using
257 * the its dev_t number ("swapdev", from MD conf.c).
258 */
259
260 LIST_INIT(&swap_priority);
261 uvmexp.nswapdev = 0;
262 lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
263 simple_lock_init(&uvm.swap_data_lock);
264
265 if (bdevvp(swapdev, &swapdev_vp))
266 panic("uvm_swap_init: can't get vnode for swap device");
267
268 /*
269 * create swap block resource map to map /dev/drum. the range
270 * from 1 to INT_MAX allows 2 gigablocks of swap space. note
271 * that block 0 is reserved (used to indicate an allocation
272 * failure, or no allocation).
273 */
274 swapmap = vmem_create("swapmap", 1, INT_MAX - 1, 1, NULL, NULL, NULL, 0,
275 VM_NOSLEEP);
276 if (swapmap == 0)
277 panic("uvm_swap_init: extent_create failed");
278
279 /*
280 * done!
281 */
282 UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
283 }
284
285 /*
286 * swaplist functions: functions that operate on the list of swap
287 * devices on the system.
288 */
289
290 /*
291 * swaplist_insert: insert swap device "sdp" into the global list
292 *
293 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
294 * => caller must provide a newly malloc'd swappri structure (we will
295 * FREE it if we don't need it... this it to prevent malloc blocking
296 * here while adding swap)
297 */
298 static void
299 swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
300 {
301 struct swappri *spp, *pspp;
302 UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
303
304 /*
305 * find entry at or after which to insert the new device.
306 */
307 pspp = NULL;
308 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
309 if (priority <= spp->spi_priority)
310 break;
311 pspp = spp;
312 }
313
314 /*
315 * new priority?
316 */
317 if (spp == NULL || spp->spi_priority != priority) {
318 spp = newspp; /* use newspp! */
319 UVMHIST_LOG(pdhist, "created new swappri = %d",
320 priority, 0, 0, 0);
321
322 spp->spi_priority = priority;
323 CIRCLEQ_INIT(&spp->spi_swapdev);
324
325 if (pspp)
326 LIST_INSERT_AFTER(pspp, spp, spi_swappri);
327 else
328 LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
329 } else {
330 /* we don't need a new priority structure, free it */
331 FREE(newspp, M_VMSWAP);
332 }
333
334 /*
335 * priority found (or created). now insert on the priority's
336 * circleq list and bump the total number of swapdevs.
337 */
338 sdp->swd_priority = priority;
339 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
340 uvmexp.nswapdev++;
341 }
342
343 /*
344 * swaplist_find: find and optionally remove a swap device from the
345 * global list.
346 *
347 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
348 * => we return the swapdev we found (and removed)
349 */
350 static struct swapdev *
351 swaplist_find(struct vnode *vp, boolean_t remove)
352 {
353 struct swapdev *sdp;
354 struct swappri *spp;
355
356 /*
357 * search the lists for the requested vp
358 */
359
360 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
361 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
362 if (sdp->swd_vp == vp) {
363 if (remove) {
364 CIRCLEQ_REMOVE(&spp->spi_swapdev,
365 sdp, swd_next);
366 uvmexp.nswapdev--;
367 }
368 return(sdp);
369 }
370 }
371 }
372 return (NULL);
373 }
374
375 /*
376 * swaplist_trim: scan priority list for empty priority entries and kill
377 * them.
378 *
379 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
380 */
381 static void
382 swaplist_trim(void)
383 {
384 struct swappri *spp, *nextspp;
385
386 for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
387 nextspp = LIST_NEXT(spp, spi_swappri);
388 if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
389 (void *)&spp->spi_swapdev)
390 continue;
391 LIST_REMOVE(spp, spi_swappri);
392 free(spp, M_VMSWAP);
393 }
394 }
395
396 /*
397 * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
398 * to the "swapdev" that maps that section of the drum.
399 *
400 * => each swapdev takes one big contig chunk of the drum
401 * => caller must hold uvm.swap_data_lock
402 */
403 static struct swapdev *
404 swapdrum_getsdp(int pgno)
405 {
406 struct swapdev *sdp;
407 struct swappri *spp;
408
409 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
410 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
411 if (sdp->swd_flags & SWF_FAKE)
412 continue;
413 if (pgno >= sdp->swd_drumoffset &&
414 pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
415 return sdp;
416 }
417 }
418 }
419 return NULL;
420 }
421
422
423 /*
424 * sys_swapctl: main entry point for swapctl(2) system call
425 * [with two helper functions: swap_on and swap_off]
426 */
427 int
428 sys_swapctl(struct lwp *l, void *v, register_t *retval)
429 {
430 struct sys_swapctl_args /* {
431 syscallarg(int) cmd;
432 syscallarg(void *) arg;
433 syscallarg(int) misc;
434 } */ *uap = (struct sys_swapctl_args *)v;
435 struct vnode *vp;
436 struct nameidata nd;
437 struct swappri *spp;
438 struct swapdev *sdp;
439 struct swapent *sep;
440 #define SWAP_PATH_MAX (PATH_MAX + 1)
441 char *userpath;
442 size_t len;
443 int error, misc;
444 int priority;
445 UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
446
447 misc = SCARG(uap, misc);
448
449 /*
450 * ensure serialized syscall access by grabbing the swap_syscall_lock
451 */
452 lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
453
454 userpath = malloc(SWAP_PATH_MAX, M_TEMP, M_WAITOK);
455 /*
456 * we handle the non-priv NSWAP and STATS request first.
457 *
458 * SWAP_NSWAP: return number of config'd swap devices
459 * [can also be obtained with uvmexp sysctl]
460 */
461 if (SCARG(uap, cmd) == SWAP_NSWAP) {
462 UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
463 0, 0, 0);
464 *retval = uvmexp.nswapdev;
465 error = 0;
466 goto out;
467 }
468
469 /*
470 * SWAP_STATS: get stats on current # of configured swap devs
471 *
472 * note that the swap_priority list can't change as long
473 * as we are holding the swap_syscall_lock. we don't want
474 * to grab the uvm.swap_data_lock because we may fault&sleep during
475 * copyout() and we don't want to be holding that lock then!
476 */
477 if (SCARG(uap, cmd) == SWAP_STATS
478 #if defined(COMPAT_13)
479 || SCARG(uap, cmd) == SWAP_OSTATS
480 #endif
481 ) {
482 if ((size_t)misc > (size_t)uvmexp.nswapdev)
483 misc = uvmexp.nswapdev;
484 #if defined(COMPAT_13)
485 if (SCARG(uap, cmd) == SWAP_OSTATS)
486 len = sizeof(struct oswapent) * misc;
487 else
488 #endif
489 len = sizeof(struct swapent) * misc;
490 sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
491
492 uvm_swap_stats_locked(SCARG(uap, cmd), sep, misc, retval);
493 error = copyout(sep, SCARG(uap, arg), len);
494
495 free(sep, M_TEMP);
496 UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
497 goto out;
498 }
499 if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
500 dev_t *devp = (dev_t *)SCARG(uap, arg);
501
502 error = copyout(&dumpdev, devp, sizeof(dumpdev));
503 goto out;
504 }
505
506 /*
507 * all other requests require superuser privs. verify.
508 */
509 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_SWAPCTL,
510 0, NULL, NULL, NULL)))
511 goto out;
512
513 if (SCARG(uap, cmd) == SWAP_DUMPOFF) {
514 /* drop the current dump device */
515 dumpdev = NODEV;
516 cpu_dumpconf();
517 goto out;
518 }
519
520 /*
521 * at this point we expect a path name in arg. we will
522 * use namei() to gain a vnode reference (vref), and lock
523 * the vnode (VOP_LOCK).
524 *
525 * XXX: a NULL arg means use the root vnode pointer (e.g. for
526 * miniroot)
527 */
528 if (SCARG(uap, arg) == NULL) {
529 vp = rootvp; /* miniroot */
530 if (vget(vp, LK_EXCLUSIVE)) {
531 error = EBUSY;
532 goto out;
533 }
534 if (SCARG(uap, cmd) == SWAP_ON &&
535 copystr("miniroot", userpath, SWAP_PATH_MAX, &len))
536 panic("swapctl: miniroot copy failed");
537 } else {
538 int space;
539 char *where;
540
541 if (SCARG(uap, cmd) == SWAP_ON) {
542 if ((error = copyinstr(SCARG(uap, arg), userpath,
543 SWAP_PATH_MAX, &len)))
544 goto out;
545 space = UIO_SYSSPACE;
546 where = userpath;
547 } else {
548 space = UIO_USERSPACE;
549 where = (char *)SCARG(uap, arg);
550 }
551 NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, l);
552 if ((error = namei(&nd)))
553 goto out;
554 vp = nd.ni_vp;
555 }
556 /* note: "vp" is referenced and locked */
557
558 error = 0; /* assume no error */
559 switch(SCARG(uap, cmd)) {
560
561 case SWAP_DUMPDEV:
562 if (vp->v_type != VBLK) {
563 error = ENOTBLK;
564 break;
565 }
566 if (bdevsw_lookup(vp->v_rdev))
567 dumpdev = vp->v_rdev;
568 else
569 dumpdev = NODEV;
570 cpu_dumpconf();
571 break;
572
573 case SWAP_CTL:
574 /*
575 * get new priority, remove old entry (if any) and then
576 * reinsert it in the correct place. finally, prune out
577 * any empty priority structures.
578 */
579 priority = SCARG(uap, misc);
580 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
581 simple_lock(&uvm.swap_data_lock);
582 if ((sdp = swaplist_find(vp, 1)) == NULL) {
583 error = ENOENT;
584 } else {
585 swaplist_insert(sdp, spp, priority);
586 swaplist_trim();
587 }
588 simple_unlock(&uvm.swap_data_lock);
589 if (error)
590 free(spp, M_VMSWAP);
591 break;
592
593 case SWAP_ON:
594
595 /*
596 * check for duplicates. if none found, then insert a
597 * dummy entry on the list to prevent someone else from
598 * trying to enable this device while we are working on
599 * it.
600 */
601
602 priority = SCARG(uap, misc);
603 sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
604 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
605 memset(sdp, 0, sizeof(*sdp));
606 sdp->swd_flags = SWF_FAKE;
607 sdp->swd_vp = vp;
608 sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
609 bufq_alloc(&sdp->swd_tab, "disksort", BUFQ_SORT_RAWBLOCK);
610 simple_lock(&uvm.swap_data_lock);
611 if (swaplist_find(vp, 0) != NULL) {
612 error = EBUSY;
613 simple_unlock(&uvm.swap_data_lock);
614 bufq_free(sdp->swd_tab);
615 free(sdp, M_VMSWAP);
616 free(spp, M_VMSWAP);
617 break;
618 }
619 swaplist_insert(sdp, spp, priority);
620 simple_unlock(&uvm.swap_data_lock);
621
622 sdp->swd_pathlen = len;
623 sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
624 if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
625 panic("swapctl: copystr");
626
627 /*
628 * we've now got a FAKE placeholder in the swap list.
629 * now attempt to enable swap on it. if we fail, undo
630 * what we've done and kill the fake entry we just inserted.
631 * if swap_on is a success, it will clear the SWF_FAKE flag
632 */
633
634 if ((error = swap_on(l, sdp)) != 0) {
635 simple_lock(&uvm.swap_data_lock);
636 (void) swaplist_find(vp, 1); /* kill fake entry */
637 swaplist_trim();
638 simple_unlock(&uvm.swap_data_lock);
639 bufq_free(sdp->swd_tab);
640 free(sdp->swd_path, M_VMSWAP);
641 free(sdp, M_VMSWAP);
642 break;
643 }
644 break;
645
646 case SWAP_OFF:
647 simple_lock(&uvm.swap_data_lock);
648 if ((sdp = swaplist_find(vp, 0)) == NULL) {
649 simple_unlock(&uvm.swap_data_lock);
650 error = ENXIO;
651 break;
652 }
653
654 /*
655 * If a device isn't in use or enabled, we
656 * can't stop swapping from it (again).
657 */
658 if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
659 simple_unlock(&uvm.swap_data_lock);
660 error = EBUSY;
661 break;
662 }
663
664 /*
665 * do the real work.
666 */
667 error = swap_off(l, sdp);
668 break;
669
670 default:
671 error = EINVAL;
672 }
673
674 /*
675 * done! release the ref gained by namei() and unlock.
676 */
677 vput(vp);
678
679 out:
680 free(userpath, M_TEMP);
681 lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
682
683 UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
684 return (error);
685 }
686
687 /*
688 * swap_stats: implements swapctl(SWAP_STATS). The function is kept
689 * away from sys_swapctl() in order to allow COMPAT_* swapctl()
690 * emulation to use it directly without going through sys_swapctl().
691 * The problem with using sys_swapctl() there is that it involves
692 * copying the swapent array to the stackgap, and this array's size
693 * is not known at build time. Hence it would not be possible to
694 * ensure it would fit in the stackgap in any case.
695 */
696 void
697 uvm_swap_stats(int cmd, struct swapent *sep, int sec, register_t *retval)
698 {
699
700 lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
701 uvm_swap_stats_locked(cmd, sep, sec, retval);
702 lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
703 }
704
705 static void
706 uvm_swap_stats_locked(int cmd, struct swapent *sep, int sec, register_t *retval)
707 {
708 struct swappri *spp;
709 struct swapdev *sdp;
710 int count = 0;
711
712 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
713 for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
714 sdp != (void *)&spp->spi_swapdev && sec-- > 0;
715 sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
716 /*
717 * backwards compatibility for system call.
718 * note that we use 'struct oswapent' as an
719 * overlay into both 'struct swapdev' and
720 * the userland 'struct swapent', as we
721 * want to retain backwards compatibility
722 * with NetBSD 1.3.
723 */
724 sdp->swd_ose.ose_inuse =
725 btodb((uint64_t)sdp->swd_npginuse <<
726 PAGE_SHIFT);
727 (void)memcpy(sep, &sdp->swd_ose,
728 sizeof(struct oswapent));
729
730 /* now copy out the path if necessary */
731 #if !defined(COMPAT_13)
732 (void) cmd;
733 #endif
734 #if defined(COMPAT_13)
735 if (cmd == SWAP_STATS)
736 #endif
737 (void)memcpy(&sep->se_path, sdp->swd_path,
738 sdp->swd_pathlen);
739
740 count++;
741 #if defined(COMPAT_13)
742 if (cmd == SWAP_OSTATS)
743 sep = (struct swapent *)
744 ((struct oswapent *)sep + 1);
745 else
746 #endif
747 sep++;
748 }
749 }
750
751 *retval = count;
752 return;
753 }
754
755 /*
756 * swap_on: attempt to enable a swapdev for swapping. note that the
757 * swapdev is already on the global list, but disabled (marked
758 * SWF_FAKE).
759 *
760 * => we avoid the start of the disk (to protect disk labels)
761 * => we also avoid the miniroot, if we are swapping to root.
762 * => caller should leave uvm.swap_data_lock unlocked, we may lock it
763 * if needed.
764 */
765 static int
766 swap_on(struct lwp *l, struct swapdev *sdp)
767 {
768 struct vnode *vp;
769 int error, npages, nblocks, size;
770 long addr;
771 u_long result;
772 struct vattr va;
773 #ifdef NFS
774 extern int (**nfsv2_vnodeop_p)(void *);
775 #endif /* NFS */
776 const struct bdevsw *bdev;
777 dev_t dev;
778 UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
779
780 /*
781 * we want to enable swapping on sdp. the swd_vp contains
782 * the vnode we want (locked and ref'd), and the swd_dev
783 * contains the dev_t of the file, if it a block device.
784 */
785
786 vp = sdp->swd_vp;
787 dev = sdp->swd_dev;
788
789 /*
790 * open the swap file (mostly useful for block device files to
791 * let device driver know what is up).
792 *
793 * we skip the open/close for root on swap because the root
794 * has already been opened when root was mounted (mountroot).
795 */
796 if (vp != rootvp) {
797 if ((error = VOP_OPEN(vp, FREAD|FWRITE, l->l_cred, l)))
798 return (error);
799 }
800
801 /* XXX this only works for block devices */
802 UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
803
804 /*
805 * we now need to determine the size of the swap area. for
806 * block specials we can call the d_psize function.
807 * for normal files, we must stat [get attrs].
808 *
809 * we put the result in nblks.
810 * for normal files, we also want the filesystem block size
811 * (which we get with statfs).
812 */
813 switch (vp->v_type) {
814 case VBLK:
815 bdev = bdevsw_lookup(dev);
816 if (bdev == NULL || bdev->d_psize == NULL ||
817 (nblocks = (*bdev->d_psize)(dev)) == -1) {
818 error = ENXIO;
819 goto bad;
820 }
821 break;
822
823 case VREG:
824 if ((error = VOP_GETATTR(vp, &va, l->l_cred, l)))
825 goto bad;
826 nblocks = (int)btodb(va.va_size);
827 if ((error =
828 VFS_STATVFS(vp->v_mount, &vp->v_mount->mnt_stat, l)) != 0)
829 goto bad;
830
831 sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
832 /*
833 * limit the max # of outstanding I/O requests we issue
834 * at any one time. take it easy on NFS servers.
835 */
836 #ifdef NFS
837 if (vp->v_op == nfsv2_vnodeop_p)
838 sdp->swd_maxactive = 2; /* XXX */
839 else
840 #endif /* NFS */
841 sdp->swd_maxactive = 8; /* XXX */
842 break;
843
844 default:
845 error = ENXIO;
846 goto bad;
847 }
848
849 /*
850 * save nblocks in a safe place and convert to pages.
851 */
852
853 sdp->swd_ose.ose_nblks = nblocks;
854 npages = dbtob((uint64_t)nblocks) >> PAGE_SHIFT;
855
856 /*
857 * for block special files, we want to make sure that leave
858 * the disklabel and bootblocks alone, so we arrange to skip
859 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
860 * note that because of this the "size" can be less than the
861 * actual number of blocks on the device.
862 */
863 if (vp->v_type == VBLK) {
864 /* we use pages 1 to (size - 1) [inclusive] */
865 size = npages - 1;
866 addr = 1;
867 } else {
868 /* we use pages 0 to (size - 1) [inclusive] */
869 size = npages;
870 addr = 0;
871 }
872
873 /*
874 * make sure we have enough blocks for a reasonable sized swap
875 * area. we want at least one page.
876 */
877
878 if (size < 1) {
879 UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
880 error = EINVAL;
881 goto bad;
882 }
883
884 UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
885
886 /*
887 * now we need to allocate an extent to manage this swap device
888 */
889
890 sdp->swd_blist = blist_create(npages);
891 /* mark all expect the `saved' region free. */
892 blist_free(sdp->swd_blist, addr, size);
893
894 /*
895 * if the vnode we are swapping to is the root vnode
896 * (i.e. we are swapping to the miniroot) then we want
897 * to make sure we don't overwrite it. do a statfs to
898 * find its size and skip over it.
899 */
900 if (vp == rootvp) {
901 struct mount *mp;
902 struct statvfs *sp;
903 int rootblocks, rootpages;
904
905 mp = rootvnode->v_mount;
906 sp = &mp->mnt_stat;
907 rootblocks = sp->f_blocks * btodb(sp->f_frsize);
908 /*
909 * XXX: sp->f_blocks isn't the total number of
910 * blocks in the filesystem, it's the number of
911 * data blocks. so, our rootblocks almost
912 * definitely underestimates the total size
913 * of the filesystem - how badly depends on the
914 * details of the filesystem type. there isn't
915 * an obvious way to deal with this cleanly
916 * and perfectly, so for now we just pad our
917 * rootblocks estimate with an extra 5 percent.
918 */
919 rootblocks += (rootblocks >> 5) +
920 (rootblocks >> 6) +
921 (rootblocks >> 7);
922 rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
923 if (rootpages > size)
924 panic("swap_on: miniroot larger than swap?");
925
926 if (rootpages != blist_fill(sdp->swd_blist, addr, rootpages)) {
927 panic("swap_on: unable to preserve miniroot");
928 }
929
930 size -= rootpages;
931 printf("Preserved %d pages of miniroot ", rootpages);
932 printf("leaving %d pages of swap\n", size);
933 }
934
935 /*
936 * add a ref to vp to reflect usage as a swap device.
937 */
938 vref(vp);
939
940 /*
941 * now add the new swapdev to the drum and enable.
942 */
943 result = vmem_alloc(swapmap, npages, VM_BESTFIT | VM_SLEEP);
944 if (result == 0)
945 panic("swapdrum_add");
946
947 sdp->swd_drumoffset = (int)result;
948 sdp->swd_drumsize = npages;
949 sdp->swd_npages = size;
950 simple_lock(&uvm.swap_data_lock);
951 sdp->swd_flags &= ~SWF_FAKE; /* going live */
952 sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
953 uvmexp.swpages += size;
954 uvmexp.swpgavail += size;
955 simple_unlock(&uvm.swap_data_lock);
956 return (0);
957
958 /*
959 * failure: clean up and return error.
960 */
961
962 bad:
963 if (sdp->swd_blist) {
964 blist_destroy(sdp->swd_blist);
965 }
966 if (vp != rootvp) {
967 (void)VOP_CLOSE(vp, FREAD|FWRITE, l->l_cred, l);
968 }
969 return (error);
970 }
971
972 /*
973 * swap_off: stop swapping on swapdev
974 *
975 * => swap data should be locked, we will unlock.
976 */
977 static int
978 swap_off(struct lwp *l, struct swapdev *sdp)
979 {
980 int npages = sdp->swd_npages;
981 int error = 0;
982
983 UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
984 UVMHIST_LOG(pdhist, " dev=%x, npages=%d", sdp->swd_dev,npages,0,0);
985
986 /* disable the swap area being removed */
987 sdp->swd_flags &= ~SWF_ENABLE;
988 uvmexp.swpgavail -= npages;
989 simple_unlock(&uvm.swap_data_lock);
990
991 /*
992 * the idea is to find all the pages that are paged out to this
993 * device, and page them all in. in uvm, swap-backed pageable
994 * memory can take two forms: aobjs and anons. call the
995 * swapoff hook for each subsystem to bring in pages.
996 */
997
998 if (uao_swap_off(sdp->swd_drumoffset,
999 sdp->swd_drumoffset + sdp->swd_drumsize) ||
1000 amap_swap_off(sdp->swd_drumoffset,
1001 sdp->swd_drumoffset + sdp->swd_drumsize)) {
1002 error = ENOMEM;
1003 } else if (sdp->swd_npginuse > sdp->swd_npgbad) {
1004 error = EBUSY;
1005 }
1006
1007 if (error) {
1008 simple_lock(&uvm.swap_data_lock);
1009 sdp->swd_flags |= SWF_ENABLE;
1010 uvmexp.swpgavail += npages;
1011 simple_unlock(&uvm.swap_data_lock);
1012
1013 return error;
1014 }
1015
1016 /*
1017 * done with the vnode.
1018 * drop our ref on the vnode before calling VOP_CLOSE()
1019 * so that spec_close() can tell if this is the last close.
1020 */
1021 vrele(sdp->swd_vp);
1022 if (sdp->swd_vp != rootvp) {
1023 (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, l->l_cred, l);
1024 }
1025
1026 simple_lock(&uvm.swap_data_lock);
1027 uvmexp.swpages -= npages;
1028 uvmexp.swpginuse -= sdp->swd_npgbad;
1029
1030 if (swaplist_find(sdp->swd_vp, 1) == NULL)
1031 panic("swap_off: swapdev not in list");
1032 swaplist_trim();
1033 simple_unlock(&uvm.swap_data_lock);
1034
1035 /*
1036 * free all resources!
1037 */
1038 vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize);
1039 blist_destroy(sdp->swd_blist);
1040 bufq_free(sdp->swd_tab);
1041 free(sdp, M_VMSWAP);
1042 return (0);
1043 }
1044
1045 /*
1046 * /dev/drum interface and i/o functions
1047 */
1048
1049 /*
1050 * swstrategy: perform I/O on the drum
1051 *
1052 * => we must map the i/o request from the drum to the correct swapdev.
1053 */
1054 static void
1055 swstrategy(struct buf *bp)
1056 {
1057 struct swapdev *sdp;
1058 struct vnode *vp;
1059 int s, pageno, bn;
1060 UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1061
1062 /*
1063 * convert block number to swapdev. note that swapdev can't
1064 * be yanked out from under us because we are holding resources
1065 * in it (i.e. the blocks we are doing I/O on).
1066 */
1067 pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1068 simple_lock(&uvm.swap_data_lock);
1069 sdp = swapdrum_getsdp(pageno);
1070 simple_unlock(&uvm.swap_data_lock);
1071 if (sdp == NULL) {
1072 bp->b_error = EINVAL;
1073 bp->b_flags |= B_ERROR;
1074 biodone(bp);
1075 UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1076 return;
1077 }
1078
1079 /*
1080 * convert drum page number to block number on this swapdev.
1081 */
1082
1083 pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1084 bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1085
1086 UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1087 ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1088 sdp->swd_drumoffset, bn, bp->b_bcount);
1089
1090 /*
1091 * for block devices we finish up here.
1092 * for regular files we have to do more work which we delegate
1093 * to sw_reg_strategy().
1094 */
1095
1096 switch (sdp->swd_vp->v_type) {
1097 default:
1098 panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1099
1100 case VBLK:
1101
1102 /*
1103 * must convert "bp" from an I/O on /dev/drum to an I/O
1104 * on the swapdev (sdp).
1105 */
1106 s = splbio();
1107 bp->b_blkno = bn; /* swapdev block number */
1108 vp = sdp->swd_vp; /* swapdev vnode pointer */
1109 bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1110
1111 /*
1112 * if we are doing a write, we have to redirect the i/o on
1113 * drum's v_numoutput counter to the swapdevs.
1114 */
1115 if ((bp->b_flags & B_READ) == 0) {
1116 vwakeup(bp); /* kills one 'v_numoutput' on drum */
1117 V_INCR_NUMOUTPUT(vp); /* put it on swapdev */
1118 }
1119
1120 /*
1121 * finally plug in swapdev vnode and start I/O
1122 */
1123 bp->b_vp = vp;
1124 splx(s);
1125 VOP_STRATEGY(vp, bp);
1126 return;
1127
1128 case VREG:
1129 /*
1130 * delegate to sw_reg_strategy function.
1131 */
1132 sw_reg_strategy(sdp, bp, bn);
1133 return;
1134 }
1135 /* NOTREACHED */
1136 }
1137
1138 /*
1139 * swread: the read function for the drum (just a call to physio)
1140 */
1141 /*ARGSUSED*/
1142 static int
1143 swread(dev_t dev, struct uio *uio, int ioflag)
1144 {
1145 UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1146
1147 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1148 return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1149 }
1150
1151 /*
1152 * swwrite: the write function for the drum (just a call to physio)
1153 */
1154 /*ARGSUSED*/
1155 static int
1156 swwrite(dev_t dev, struct uio *uio, int ioflag)
1157 {
1158 UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1159
1160 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1161 return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1162 }
1163
1164 const struct bdevsw swap_bdevsw = {
1165 noopen, noclose, swstrategy, noioctl, nodump, nosize, D_OTHER,
1166 };
1167
1168 const struct cdevsw swap_cdevsw = {
1169 nullopen, nullclose, swread, swwrite, noioctl,
1170 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
1171 };
1172
1173 /*
1174 * sw_reg_strategy: handle swap i/o to regular files
1175 */
1176 static void
1177 sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
1178 {
1179 struct vnode *vp;
1180 struct vndxfer *vnx;
1181 daddr_t nbn;
1182 caddr_t addr;
1183 off_t byteoff;
1184 int s, off, nra, error, sz, resid;
1185 UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1186
1187 /*
1188 * allocate a vndxfer head for this transfer and point it to
1189 * our buffer.
1190 */
1191 getvndxfer(vnx);
1192 vnx->vx_flags = VX_BUSY;
1193 vnx->vx_error = 0;
1194 vnx->vx_pending = 0;
1195 vnx->vx_bp = bp;
1196 vnx->vx_sdp = sdp;
1197
1198 /*
1199 * setup for main loop where we read filesystem blocks into
1200 * our buffer.
1201 */
1202 error = 0;
1203 bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1204 addr = bp->b_data; /* current position in buffer */
1205 byteoff = dbtob((uint64_t)bn);
1206
1207 for (resid = bp->b_resid; resid; resid -= sz) {
1208 struct vndbuf *nbp;
1209
1210 /*
1211 * translate byteoffset into block number. return values:
1212 * vp = vnode of underlying device
1213 * nbn = new block number (on underlying vnode dev)
1214 * nra = num blocks we can read-ahead (excludes requested
1215 * block)
1216 */
1217 nra = 0;
1218 error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1219 &vp, &nbn, &nra);
1220
1221 if (error == 0 && nbn == (daddr_t)-1) {
1222 /*
1223 * this used to just set error, but that doesn't
1224 * do the right thing. Instead, it causes random
1225 * memory errors. The panic() should remain until
1226 * this condition doesn't destabilize the system.
1227 */
1228 #if 1
1229 panic("sw_reg_strategy: swap to sparse file");
1230 #else
1231 error = EIO; /* failure */
1232 #endif
1233 }
1234
1235 /*
1236 * punt if there was an error or a hole in the file.
1237 * we must wait for any i/o ops we have already started
1238 * to finish before returning.
1239 *
1240 * XXX we could deal with holes here but it would be
1241 * a hassle (in the write case).
1242 */
1243 if (error) {
1244 s = splbio();
1245 vnx->vx_error = error; /* pass error up */
1246 goto out;
1247 }
1248
1249 /*
1250 * compute the size ("sz") of this transfer (in bytes).
1251 */
1252 off = byteoff % sdp->swd_bsize;
1253 sz = (1 + nra) * sdp->swd_bsize - off;
1254 if (sz > resid)
1255 sz = resid;
1256
1257 UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1258 "vp %p/%p offset 0x%x/0x%x",
1259 sdp->swd_vp, vp, byteoff, nbn);
1260
1261 /*
1262 * now get a buf structure. note that the vb_buf is
1263 * at the front of the nbp structure so that you can
1264 * cast pointers between the two structure easily.
1265 */
1266 getvndbuf(nbp);
1267 BUF_INIT(&nbp->vb_buf);
1268 nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1269 nbp->vb_buf.b_bcount = sz;
1270 nbp->vb_buf.b_bufsize = sz;
1271 nbp->vb_buf.b_error = 0;
1272 nbp->vb_buf.b_data = addr;
1273 nbp->vb_buf.b_lblkno = 0;
1274 nbp->vb_buf.b_blkno = nbn + btodb(off);
1275 nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1276 nbp->vb_buf.b_iodone = sw_reg_iodone;
1277 nbp->vb_buf.b_vp = vp;
1278 if (vp->v_type == VBLK) {
1279 nbp->vb_buf.b_dev = vp->v_rdev;
1280 }
1281
1282 nbp->vb_xfer = vnx; /* patch it back in to vnx */
1283
1284 /*
1285 * Just sort by block number
1286 */
1287 s = splbio();
1288 if (vnx->vx_error != 0) {
1289 putvndbuf(nbp);
1290 goto out;
1291 }
1292 vnx->vx_pending++;
1293
1294 /* sort it in and start I/O if we are not over our limit */
1295 BUFQ_PUT(sdp->swd_tab, &nbp->vb_buf);
1296 sw_reg_start(sdp);
1297 splx(s);
1298
1299 /*
1300 * advance to the next I/O
1301 */
1302 byteoff += sz;
1303 addr += sz;
1304 }
1305
1306 s = splbio();
1307
1308 out: /* Arrive here at splbio */
1309 vnx->vx_flags &= ~VX_BUSY;
1310 if (vnx->vx_pending == 0) {
1311 if (vnx->vx_error != 0) {
1312 bp->b_error = vnx->vx_error;
1313 bp->b_flags |= B_ERROR;
1314 }
1315 putvndxfer(vnx);
1316 biodone(bp);
1317 }
1318 splx(s);
1319 }
1320
1321 /*
1322 * sw_reg_start: start an I/O request on the requested swapdev
1323 *
1324 * => reqs are sorted by b_rawblkno (above)
1325 */
1326 static void
1327 sw_reg_start(struct swapdev *sdp)
1328 {
1329 struct buf *bp;
1330 UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1331
1332 /* recursion control */
1333 if ((sdp->swd_flags & SWF_BUSY) != 0)
1334 return;
1335
1336 sdp->swd_flags |= SWF_BUSY;
1337
1338 while (sdp->swd_active < sdp->swd_maxactive) {
1339 bp = BUFQ_GET(sdp->swd_tab);
1340 if (bp == NULL)
1341 break;
1342 sdp->swd_active++;
1343
1344 UVMHIST_LOG(pdhist,
1345 "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1346 bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1347 if ((bp->b_flags & B_READ) == 0)
1348 V_INCR_NUMOUTPUT(bp->b_vp);
1349
1350 VOP_STRATEGY(bp->b_vp, bp);
1351 }
1352 sdp->swd_flags &= ~SWF_BUSY;
1353 }
1354
1355 /*
1356 * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1357 *
1358 * => note that we can recover the vndbuf struct by casting the buf ptr
1359 */
1360 static void
1361 sw_reg_iodone(struct buf *bp)
1362 {
1363 struct vndbuf *vbp = (struct vndbuf *) bp;
1364 struct vndxfer *vnx = vbp->vb_xfer;
1365 struct buf *pbp = vnx->vx_bp; /* parent buffer */
1366 struct swapdev *sdp = vnx->vx_sdp;
1367 int s, resid, error;
1368 UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1369
1370 UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1371 vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1372 UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1373 vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1374
1375 /*
1376 * protect vbp at splbio and update.
1377 */
1378
1379 s = splbio();
1380 resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1381 pbp->b_resid -= resid;
1382 vnx->vx_pending--;
1383
1384 if (vbp->vb_buf.b_flags & B_ERROR) {
1385 /* pass error upward */
1386 error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
1387 UVMHIST_LOG(pdhist, " got error=%d !", error, 0, 0, 0);
1388 vnx->vx_error = error;
1389 }
1390
1391 /*
1392 * kill vbp structure
1393 */
1394 putvndbuf(vbp);
1395
1396 /*
1397 * wrap up this transaction if it has run to completion or, in
1398 * case of an error, when all auxiliary buffers have returned.
1399 */
1400 if (vnx->vx_error != 0) {
1401 /* pass error upward */
1402 pbp->b_flags |= B_ERROR;
1403 pbp->b_error = vnx->vx_error;
1404 if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1405 putvndxfer(vnx);
1406 biodone(pbp);
1407 }
1408 } else if (pbp->b_resid == 0) {
1409 KASSERT(vnx->vx_pending == 0);
1410 if ((vnx->vx_flags & VX_BUSY) == 0) {
1411 UVMHIST_LOG(pdhist, " iodone error=%d !",
1412 pbp, vnx->vx_error, 0, 0);
1413 putvndxfer(vnx);
1414 biodone(pbp);
1415 }
1416 }
1417
1418 /*
1419 * done! start next swapdev I/O if one is pending
1420 */
1421 sdp->swd_active--;
1422 sw_reg_start(sdp);
1423 splx(s);
1424 }
1425
1426
1427 /*
1428 * uvm_swap_alloc: allocate space on swap
1429 *
1430 * => allocation is done "round robin" down the priority list, as we
1431 * allocate in a priority we "rotate" the circle queue.
1432 * => space can be freed with uvm_swap_free
1433 * => we return the page slot number in /dev/drum (0 == invalid slot)
1434 * => we lock uvm.swap_data_lock
1435 * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1436 */
1437 int
1438 uvm_swap_alloc(int *nslots /* IN/OUT */, boolean_t lessok)
1439 {
1440 struct swapdev *sdp;
1441 struct swappri *spp;
1442 UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1443
1444 /*
1445 * no swap devices configured yet? definite failure.
1446 */
1447 if (uvmexp.nswapdev < 1)
1448 return 0;
1449
1450 /*
1451 * lock data lock, convert slots into blocks, and enter loop
1452 */
1453 simple_lock(&uvm.swap_data_lock);
1454
1455 ReTry: /* XXXMRG */
1456 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
1457 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1458 uint64_t result;
1459
1460 /* if it's not enabled, then we can't swap from it */
1461 if ((sdp->swd_flags & SWF_ENABLE) == 0)
1462 continue;
1463 if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1464 continue;
1465 result = blist_alloc(sdp->swd_blist, *nslots);
1466 if (result == BLIST_NONE) {
1467 continue;
1468 }
1469 KASSERT(result < sdp->swd_drumsize);
1470
1471 /*
1472 * successful allocation! now rotate the circleq.
1473 */
1474 CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1475 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1476 sdp->swd_npginuse += *nslots;
1477 uvmexp.swpginuse += *nslots;
1478 simple_unlock(&uvm.swap_data_lock);
1479 /* done! return drum slot number */
1480 UVMHIST_LOG(pdhist,
1481 "success! returning %d slots starting at %d",
1482 *nslots, result + sdp->swd_drumoffset, 0, 0);
1483 return (result + sdp->swd_drumoffset);
1484 }
1485 }
1486
1487 /* XXXMRG: BEGIN HACK */
1488 if (*nslots > 1 && lessok) {
1489 *nslots = 1;
1490 /* XXXMRG: ugh! blist should support this for us */
1491 goto ReTry;
1492 }
1493 /* XXXMRG: END HACK */
1494
1495 simple_unlock(&uvm.swap_data_lock);
1496 return 0;
1497 }
1498
1499 boolean_t
1500 uvm_swapisfull(void)
1501 {
1502 boolean_t rv;
1503
1504 simple_lock(&uvm.swap_data_lock);
1505 KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1506 rv = (uvmexp.swpgonly >= uvmexp.swpgavail);
1507 simple_unlock(&uvm.swap_data_lock);
1508
1509 return (rv);
1510 }
1511
1512 /*
1513 * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1514 *
1515 * => we lock uvm.swap_data_lock
1516 */
1517 void
1518 uvm_swap_markbad(int startslot, int nslots)
1519 {
1520 struct swapdev *sdp;
1521 UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1522
1523 simple_lock(&uvm.swap_data_lock);
1524 sdp = swapdrum_getsdp(startslot);
1525 KASSERT(sdp != NULL);
1526
1527 /*
1528 * we just keep track of how many pages have been marked bad
1529 * in this device, to make everything add up in swap_off().
1530 * we assume here that the range of slots will all be within
1531 * one swap device.
1532 */
1533
1534 KASSERT(uvmexp.swpgonly >= nslots);
1535 uvmexp.swpgonly -= nslots;
1536 sdp->swd_npgbad += nslots;
1537 UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1538 simple_unlock(&uvm.swap_data_lock);
1539 }
1540
1541 /*
1542 * uvm_swap_free: free swap slots
1543 *
1544 * => this can be all or part of an allocation made by uvm_swap_alloc
1545 * => we lock uvm.swap_data_lock
1546 */
1547 void
1548 uvm_swap_free(int startslot, int nslots)
1549 {
1550 struct swapdev *sdp;
1551 UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1552
1553 UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1554 startslot, 0, 0);
1555
1556 /*
1557 * ignore attempts to free the "bad" slot.
1558 */
1559
1560 if (startslot == SWSLOT_BAD) {
1561 return;
1562 }
1563
1564 /*
1565 * convert drum slot offset back to sdp, free the blocks
1566 * in the extent, and return. must hold pri lock to do
1567 * lookup and access the extent.
1568 */
1569
1570 simple_lock(&uvm.swap_data_lock);
1571 sdp = swapdrum_getsdp(startslot);
1572 KASSERT(uvmexp.nswapdev >= 1);
1573 KASSERT(sdp != NULL);
1574 KASSERT(sdp->swd_npginuse >= nslots);
1575 blist_free(sdp->swd_blist, startslot - sdp->swd_drumoffset, nslots);
1576 sdp->swd_npginuse -= nslots;
1577 uvmexp.swpginuse -= nslots;
1578 simple_unlock(&uvm.swap_data_lock);
1579 }
1580
1581 /*
1582 * uvm_swap_put: put any number of pages into a contig place on swap
1583 *
1584 * => can be sync or async
1585 */
1586
1587 int
1588 uvm_swap_put(int swslot, struct vm_page **ppsp, int npages, int flags)
1589 {
1590 int error;
1591
1592 error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1593 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1594 return error;
1595 }
1596
1597 /*
1598 * uvm_swap_get: get a single page from swap
1599 *
1600 * => usually a sync op (from fault)
1601 */
1602
1603 int
1604 uvm_swap_get(struct vm_page *page, int swslot, int flags)
1605 {
1606 int error;
1607
1608 uvmexp.nswget++;
1609 KASSERT(flags & PGO_SYNCIO);
1610 if (swslot == SWSLOT_BAD) {
1611 return EIO;
1612 }
1613
1614 error = uvm_swap_io(&page, swslot, 1, B_READ |
1615 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1616 if (error == 0) {
1617
1618 /*
1619 * this page is no longer only in swap.
1620 */
1621
1622 simple_lock(&uvm.swap_data_lock);
1623 KASSERT(uvmexp.swpgonly > 0);
1624 uvmexp.swpgonly--;
1625 simple_unlock(&uvm.swap_data_lock);
1626 }
1627 return error;
1628 }
1629
1630 /*
1631 * uvm_swap_io: do an i/o operation to swap
1632 */
1633
1634 static int
1635 uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
1636 {
1637 daddr_t startblk;
1638 struct buf *bp;
1639 vaddr_t kva;
1640 int error, s, mapinflags;
1641 boolean_t write, async;
1642 UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1643
1644 UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1645 startslot, npages, flags, 0);
1646
1647 write = (flags & B_READ) == 0;
1648 async = (flags & B_ASYNC) != 0;
1649
1650 /*
1651 * convert starting drum slot to block number
1652 */
1653
1654 startblk = btodb((uint64_t)startslot << PAGE_SHIFT);
1655
1656 /*
1657 * first, map the pages into the kernel.
1658 */
1659
1660 mapinflags = !write ?
1661 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
1662 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
1663 kva = uvm_pagermapin(pps, npages, mapinflags);
1664
1665 /*
1666 * now allocate a buf for the i/o.
1667 */
1668
1669 bp = getiobuf();
1670
1671 /*
1672 * fill in the bp/sbp. we currently route our i/o through
1673 * /dev/drum's vnode [swapdev_vp].
1674 */
1675
1676 bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1677 bp->b_proc = &proc0; /* XXX */
1678 bp->b_vnbufs.le_next = NOLIST;
1679 bp->b_data = (caddr_t)kva;
1680 bp->b_blkno = startblk;
1681 bp->b_vp = swapdev_vp;
1682 bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1683
1684 /*
1685 * bump v_numoutput (counter of number of active outputs).
1686 */
1687
1688 if (write) {
1689 s = splbio();
1690 V_INCR_NUMOUTPUT(swapdev_vp);
1691 splx(s);
1692 }
1693
1694 /*
1695 * for async ops we must set up the iodone handler.
1696 */
1697
1698 if (async) {
1699 bp->b_flags |= B_CALL;
1700 bp->b_iodone = uvm_aio_biodone;
1701 UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1702 if (curproc == uvm.pagedaemon_proc)
1703 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1704 else
1705 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
1706 } else {
1707 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1708 }
1709 UVMHIST_LOG(pdhist,
1710 "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1711 bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1712
1713 /*
1714 * now we start the I/O, and if async, return.
1715 */
1716
1717 VOP_STRATEGY(swapdev_vp, bp);
1718 if (async)
1719 return 0;
1720
1721 /*
1722 * must be sync i/o. wait for it to finish
1723 */
1724
1725 error = biowait(bp);
1726
1727 /*
1728 * kill the pager mapping
1729 */
1730
1731 uvm_pagermapout(kva, npages);
1732
1733 /*
1734 * now dispose of the buf and we're done.
1735 */
1736
1737 s = splbio();
1738 if (write)
1739 vwakeup(bp);
1740 putiobuf(bp);
1741 splx(s);
1742 UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1743 return (error);
1744 }
Cache object: 144e7e01b882e90723a7f7ba4041a5e0
|