FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_sync.c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
36 */
37
38 /*
39 * External virtual filesystem routines
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/dirent.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mount.h>
55 #include <sys/proc.h>
56 #include <sys/namei.h>
57 #include <sys/reboot.h>
58 #include <sys/socket.h>
59 #include <sys/stat.h>
60 #include <sys/sysctl.h>
61 #include <sys/syslog.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
64
65 #include <machine/limits.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_kern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vnode_pager.h>
76
77 #include <sys/buf2.h>
78 #include <sys/thread2.h>
79
80 /*
81 * The workitem queue.
82 */
83 #define SYNCER_MAXDELAY 32
84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS);
85 time_t syncdelay = 30; /* max time to delay syncing data */
86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay");
88 time_t filedelay = 30; /* time to delay syncing files */
89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
90 &filedelay, 0, "File synchronization delay");
91 time_t dirdelay = 29; /* time to delay syncing directories */
92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
93 &dirdelay, 0, "Directory synchronization delay");
94 time_t metadelay = 28; /* time to delay syncing metadata */
95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
96 &metadelay, 0, "VFS metadata synchronization delay");
97 static int rushjob; /* number of slots to run ASAP */
98 static int stat_rush_requests; /* number of times I/O speeded up */
99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
100 &stat_rush_requests, 0, "");
101
102 LIST_HEAD(synclist, vnode);
103
104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */
105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */
106
107 struct syncer_ctx {
108 struct mount *sc_mp;
109 struct lwkt_token sc_token;
110 struct thread *sc_thread;
111 int sc_flags;
112 struct synclist *syncer_workitem_pending;
113 long syncer_mask;
114 int syncer_delayno;
115 int syncer_forced;
116 int syncer_rushjob;
117 };
118
119 static void syncer_thread(void *);
120
121 static int
122 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)
123 {
124 int error;
125 int v = syncdelay;
126
127 error = sysctl_handle_int(oidp, &v, 0, req);
128 if (error || !req->newptr)
129 return (error);
130 if (v < 1)
131 v = 1;
132 if (v > SYNCER_MAXDELAY)
133 v = SYNCER_MAXDELAY;
134 syncdelay = v;
135
136 return(0);
137 }
138
139 /*
140 * The workitem queue.
141 *
142 * It is useful to delay writes of file data and filesystem metadata
143 * for tens of seconds so that quickly created and deleted files need
144 * not waste disk bandwidth being created and removed. To realize this,
145 * we append vnodes to a "workitem" queue. When running with a soft
146 * updates implementation, most pending metadata dependencies should
147 * not wait for more than a few seconds. Thus, mounted on block devices
148 * are delayed only about a half the time that file data is delayed.
149 * Similarly, directory updates are more critical, so are only delayed
150 * about a third the time that file data is delayed. Thus, there are
151 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
152 * one each second (driven off the filesystem syncer process). The
153 * syncer_delayno variable indicates the next queue that is to be processed.
154 * Items that need to be processed soon are placed in this queue:
155 *
156 * syncer_workitem_pending[syncer_delayno]
157 *
158 * A delay of fifteen seconds is done by placing the request fifteen
159 * entries later in the queue:
160 *
161 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
162 *
163 */
164
165 /*
166 * Add an item to the syncer work queue.
167 *
168 * WARNING: Cannot get vp->v_token here if not already held, we must
169 * depend on the syncer_token (which might already be held by
170 * the caller) to protect v_synclist and VONWORKLST.
171 *
172 * MPSAFE
173 */
174 void
175 vn_syncer_add(struct vnode *vp, int delay)
176 {
177 struct syncer_ctx *ctx;
178 int slot;
179
180 ctx = vp->v_mount->mnt_syncer_ctx;
181 lwkt_gettoken(&ctx->sc_token);
182
183 if (vp->v_flag & VONWORKLST)
184 LIST_REMOVE(vp, v_synclist);
185 if (delay <= 0) {
186 slot = -delay & ctx->syncer_mask;
187 } else {
188 if (delay > SYNCER_MAXDELAY - 2)
189 delay = SYNCER_MAXDELAY - 2;
190 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask;
191 }
192
193 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist);
194 vsetflags(vp, VONWORKLST);
195
196 lwkt_reltoken(&ctx->sc_token);
197 }
198
199 /*
200 * Removes the vnode from the syncer list. Since we might block while
201 * acquiring the syncer_token we have to recheck conditions.
202 *
203 * vp->v_token held on call
204 */
205 void
206 vn_syncer_remove(struct vnode *vp)
207 {
208 struct syncer_ctx *ctx;
209
210 ctx = vp->v_mount->mnt_syncer_ctx;
211 lwkt_gettoken(&ctx->sc_token);
212
213 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
214 RB_EMPTY(&vp->v_rbdirty_tree)) {
215 vclrflags(vp, VONWORKLST);
216 LIST_REMOVE(vp, v_synclist);
217 }
218
219 lwkt_reltoken(&ctx->sc_token);
220 }
221
222 /*
223 * vnode must be locked
224 */
225 void
226 vclrisdirty(struct vnode *vp)
227 {
228 vclrflags(vp, VISDIRTY);
229 if (vp->v_flag & VONWORKLST)
230 vn_syncer_remove(vp);
231 }
232
233 void
234 vclrobjdirty(struct vnode *vp)
235 {
236 vclrflags(vp, VOBJDIRTY);
237 if (vp->v_flag & VONWORKLST)
238 vn_syncer_remove(vp);
239 }
240
241 /*
242 * vnode must be stable
243 */
244 void
245 vsetisdirty(struct vnode *vp)
246 {
247 struct syncer_ctx *ctx;
248
249 if ((vp->v_flag & VISDIRTY) == 0) {
250 ctx = vp->v_mount->mnt_syncer_ctx;
251 vsetflags(vp, VISDIRTY);
252 lwkt_gettoken(&ctx->sc_token);
253 if ((vp->v_flag & VONWORKLST) == 0)
254 vn_syncer_add(vp, syncdelay);
255 lwkt_reltoken(&ctx->sc_token);
256 }
257 }
258
259 void
260 vsetobjdirty(struct vnode *vp)
261 {
262 struct syncer_ctx *ctx;
263
264 if ((vp->v_flag & VOBJDIRTY) == 0) {
265 ctx = vp->v_mount->mnt_syncer_ctx;
266 vsetflags(vp, VOBJDIRTY);
267 lwkt_gettoken(&ctx->sc_token);
268 if ((vp->v_flag & VONWORKLST) == 0)
269 vn_syncer_add(vp, syncdelay);
270 lwkt_reltoken(&ctx->sc_token);
271 }
272 }
273
274 /*
275 * Create per-filesystem syncer process
276 */
277 void
278 vn_syncer_thr_create(struct mount *mp)
279 {
280 struct syncer_ctx *ctx;
281 static int syncalloc = 0;
282
283 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO);
284 ctx->sc_mp = mp;
285 ctx->sc_flags = 0;
286 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF,
287 &ctx->syncer_mask);
288 ctx->syncer_delayno = 0;
289 lwkt_token_init(&ctx->sc_token, "syncer");
290 mp->mnt_syncer_ctx = ctx;
291 kthread_create(syncer_thread, ctx, &ctx->sc_thread,
292 "syncer%d", ++syncalloc & 0x7FFFFFFF);
293 }
294
295 /*
296 * Stop per-filesystem syncer process
297 */
298 void
299 vn_syncer_thr_stop(struct mount *mp)
300 {
301 struct syncer_ctx *ctx;
302
303 ctx = mp->mnt_syncer_ctx;
304 if (ctx == NULL)
305 return;
306
307 lwkt_gettoken(&ctx->sc_token);
308
309 /* Signal the syncer process to exit */
310 ctx->sc_flags |= SC_FLAG_EXIT;
311 wakeup(ctx);
312
313 /* Wait till syncer process exits */
314 while ((ctx->sc_flags & SC_FLAG_DONE) == 0)
315 tsleep(&ctx->sc_flags, 0, "syncexit", hz);
316
317 mp->mnt_syncer_ctx = NULL;
318 lwkt_reltoken(&ctx->sc_token);
319
320 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
321 kfree(ctx, M_TEMP);
322 }
323
324 struct thread *updatethread;
325
326 /*
327 * System filesystem synchronizer daemon.
328 */
329 static void
330 syncer_thread(void *_ctx)
331 {
332 struct syncer_ctx *ctx = _ctx;
333 struct synclist *slp;
334 struct vnode *vp;
335 long starttime;
336 int *sc_flagsp;
337 int sc_flags;
338 int vnodes_synced = 0;
339 int delta;
340 int dummy = 0;
341
342 for (;;) {
343 kproc_suspend_loop();
344
345 starttime = time_uptime;
346 lwkt_gettoken(&ctx->sc_token);
347
348 /*
349 * Push files whose dirty time has expired. Be careful
350 * of interrupt race on slp queue.
351 */
352 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
353 ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
354 ctx->syncer_mask;
355
356 while ((vp = LIST_FIRST(slp)) != NULL) {
357 if (ctx->syncer_forced) {
358 if (vget(vp, LK_EXCLUSIVE) == 0) {
359 VOP_FSYNC(vp, MNT_NOWAIT, 0);
360 vput(vp);
361 vnodes_synced++;
362 }
363 } else {
364 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
365 VOP_FSYNC(vp, MNT_LAZY, 0);
366 vput(vp);
367 vnodes_synced++;
368 }
369 }
370
371 /*
372 * vp is stale but can still be used if we can
373 * verify that it remains at the head of the list.
374 * Be careful not to try to get vp->v_token as
375 * vp can become stale if this blocks.
376 *
377 * If the vp is still at the head of the list were
378 * unable to completely flush it and move it to
379 * a later slot to give other vnodes a fair shot.
380 *
381 * Note that v_tag VT_VFS vnodes can remain on the
382 * worklist with no dirty blocks, but sync_fsync()
383 * moves it to a later slot so we will never see it
384 * here.
385 *
386 * It is possible to race a vnode with no dirty
387 * buffers being removed from the list. If this
388 * occurs we will move the vnode in the synclist
389 * and then the other thread will remove it. Do
390 * not try to remove it here.
391 */
392 if (LIST_FIRST(slp) == vp)
393 vn_syncer_add(vp, syncdelay);
394 }
395
396 sc_flags = ctx->sc_flags;
397
398 /* Exit on unmount */
399 if (sc_flags & SC_FLAG_EXIT)
400 break;
401
402 lwkt_reltoken(&ctx->sc_token);
403
404 /*
405 * Do sync processing for each mount.
406 */
407 if (ctx->sc_mp)
408 bio_ops_sync(ctx->sc_mp);
409
410 /*
411 * The variable rushjob allows the kernel to speed up the
412 * processing of the filesystem syncer process. A rushjob
413 * value of N tells the filesystem syncer to process the next
414 * N seconds worth of work on its queue ASAP. Currently rushjob
415 * is used by the soft update code to speed up the filesystem
416 * syncer process when the incore state is getting so far
417 * ahead of the disk that the kernel memory pool is being
418 * threatened with exhaustion.
419 */
420 delta = rushjob - ctx->syncer_rushjob;
421 if ((u_int)delta > syncdelay / 2) {
422 ctx->syncer_rushjob = rushjob - syncdelay / 2;
423 tsleep(&dummy, 0, "rush", 1);
424 continue;
425 }
426 if (delta) {
427 ++ctx->syncer_rushjob;
428 tsleep(&dummy, 0, "rush", 1);
429 continue;
430 }
431
432 /*
433 * If it has taken us less than a second to process the
434 * current work, then wait. Otherwise start right over
435 * again. We can still lose time if any single round
436 * takes more than two seconds, but it does not really
437 * matter as we are just trying to generally pace the
438 * filesystem activity.
439 */
440 if (time_uptime == starttime)
441 tsleep(ctx, 0, "syncer", hz);
442 }
443
444 /*
445 * Unmount/exit path for per-filesystem syncers; sc_token held
446 */
447 ctx->sc_flags |= SC_FLAG_DONE;
448 sc_flagsp = &ctx->sc_flags;
449 lwkt_reltoken(&ctx->sc_token);
450 wakeup(sc_flagsp);
451
452 kthread_exit();
453 }
454
455 /*
456 * Request that the syncer daemon for a specific mount speed up its work.
457 * If mp is NULL the caller generally wants to speed up all syncers.
458 */
459 void
460 speedup_syncer(struct mount *mp)
461 {
462 /*
463 * Don't bother protecting the test. unsleep_and_wakeup_thread()
464 * will only do something real if the thread is in the right state.
465 */
466 atomic_add_int(&rushjob, 1);
467 ++stat_rush_requests;
468 if (mp)
469 wakeup(mp->mnt_syncer_ctx);
470 }
471
472 /*
473 * Routine to create and manage a filesystem syncer vnode.
474 */
475 static int sync_close(struct vop_close_args *);
476 static int sync_fsync(struct vop_fsync_args *);
477 static int sync_inactive(struct vop_inactive_args *);
478 static int sync_reclaim (struct vop_reclaim_args *);
479 static int sync_print(struct vop_print_args *);
480
481 static struct vop_ops sync_vnode_vops = {
482 .vop_default = vop_eopnotsupp,
483 .vop_close = sync_close,
484 .vop_fsync = sync_fsync,
485 .vop_inactive = sync_inactive,
486 .vop_reclaim = sync_reclaim,
487 .vop_print = sync_print,
488 };
489
490 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
491
492 VNODEOP_SET(sync_vnode_vops);
493
494 /*
495 * Create a new filesystem syncer vnode for the specified mount point.
496 * This vnode is placed on the worklist and is responsible for sync'ing
497 * the filesystem.
498 *
499 * NOTE: read-only mounts are also placed on the worklist. The filesystem
500 * sync code is also responsible for cleaning up vnodes.
501 */
502 int
503 vfs_allocate_syncvnode(struct mount *mp)
504 {
505 struct vnode *vp;
506 static long start, incr, next;
507 int error;
508
509 /* Allocate a new vnode */
510 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
511 if (error) {
512 mp->mnt_syncer = NULL;
513 return (error);
514 }
515 vp->v_type = VNON;
516 /*
517 * Place the vnode onto the syncer worklist. We attempt to
518 * scatter them about on the list so that they will go off
519 * at evenly distributed times even if all the filesystems
520 * are mounted at once.
521 */
522 next += incr;
523 if (next == 0 || next > SYNCER_MAXDELAY) {
524 start /= 2;
525 incr /= 2;
526 if (start == 0) {
527 start = SYNCER_MAXDELAY / 2;
528 incr = SYNCER_MAXDELAY;
529 }
530 next = start;
531 }
532
533 /*
534 * Only put the syncer vnode onto the syncer list if we have a
535 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer
536 * thread.
537 */
538 if (mp->mnt_syncer_ctx)
539 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
540
541 /*
542 * The mnt_syncer field inherits the vnode reference, which is
543 * held until later decomissioning.
544 */
545 mp->mnt_syncer = vp;
546 vx_unlock(vp);
547 return (0);
548 }
549
550 static int
551 sync_close(struct vop_close_args *ap)
552 {
553 return (0);
554 }
555
556 /*
557 * Do a lazy sync of the filesystem.
558 *
559 * sync_fsync { struct vnode *a_vp, int a_waitfor }
560 */
561 static int
562 sync_fsync(struct vop_fsync_args *ap)
563 {
564 struct vnode *syncvp = ap->a_vp;
565 struct mount *mp = syncvp->v_mount;
566 int asyncflag;
567
568 /*
569 * We only need to do something if this is a lazy evaluation.
570 */
571 if ((ap->a_waitfor & MNT_LAZY) == 0)
572 return (0);
573
574 /*
575 * Move ourselves to the back of the sync list.
576 */
577 vn_syncer_add(syncvp, syncdelay);
578
579 /*
580 * Walk the list of vnodes pushing all that are dirty and
581 * not already on the sync list, and freeing vnodes which have
582 * no refs and whos VM objects are empty. vfs_msync() handles
583 * the VM issues and must be called whether the mount is readonly
584 * or not.
585 */
586 if (vfs_busy(mp, LK_NOWAIT) != 0)
587 return (0);
588 if (mp->mnt_flag & MNT_RDONLY) {
589 vfs_msync(mp, MNT_NOWAIT);
590 } else {
591 asyncflag = mp->mnt_flag & MNT_ASYNC;
592 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */
593 vfs_msync(mp, MNT_NOWAIT);
594 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
595 if (asyncflag)
596 mp->mnt_flag |= MNT_ASYNC;
597 }
598 vfs_unbusy(mp);
599 return (0);
600 }
601
602 /*
603 * The syncer vnode is no longer referenced.
604 *
605 * sync_inactive { struct vnode *a_vp, struct proc *a_p }
606 */
607 static int
608 sync_inactive(struct vop_inactive_args *ap)
609 {
610 vgone_vxlocked(ap->a_vp);
611 return (0);
612 }
613
614 /*
615 * The syncer vnode is no longer needed and is being decommissioned.
616 * This can only occur when the last reference has been released on
617 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
618 *
619 * Modifications to the worklist must be protected with a critical
620 * section.
621 *
622 * sync_reclaim { struct vnode *a_vp }
623 */
624 static int
625 sync_reclaim(struct vop_reclaim_args *ap)
626 {
627 struct vnode *vp = ap->a_vp;
628 struct syncer_ctx *ctx;
629
630 ctx = vp->v_mount->mnt_syncer_ctx;
631 if (ctx) {
632 lwkt_gettoken(&ctx->sc_token);
633 KKASSERT(vp->v_mount->mnt_syncer != vp);
634 if (vp->v_flag & VONWORKLST) {
635 LIST_REMOVE(vp, v_synclist);
636 vclrflags(vp, VONWORKLST);
637 }
638 lwkt_reltoken(&ctx->sc_token);
639 } else {
640 KKASSERT((vp->v_flag & VONWORKLST) == 0);
641 }
642
643 return (0);
644 }
645
646 /*
647 * This is very similar to vmntvnodescan() but it only scans the
648 * vnodes on the syncer list. VFS's which support faster VFS_SYNC
649 * operations use the VISDIRTY flag on the vnode to ensure that vnodes
650 * with dirty inodes are added to the syncer in addition to vnodes
651 * with dirty buffers, and can use this function instead of nmntvnodescan().
652 *
653 * This is important when a system has millions of vnodes.
654 */
655 int
656 vsyncscan(
657 struct mount *mp,
658 int vmsc_flags,
659 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
660 void *data
661 ) {
662 struct syncer_ctx *ctx;
663 struct synclist *slp;
664 struct vnode *vp;
665 int b;
666 int i;
667 int lkflags;
668
669 if (vmsc_flags & VMSC_NOWAIT)
670 lkflags = LK_NOWAIT;
671 else
672 lkflags = 0;
673
674 /*
675 * Syncer list context. This API requires a dedicated syncer thread.
676 * (MNTK_THR_SYNC).
677 */
678 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
679 ctx = mp->mnt_syncer_ctx;
680 lwkt_gettoken(&ctx->sc_token);
681
682 /*
683 * Setup for loop. Allow races against the syncer thread but
684 * require that the syncer thread no be lazy if we were told
685 * not to be lazy.
686 */
687 b = ctx->syncer_delayno & ctx->syncer_mask;
688 i = b;
689 if ((vmsc_flags & VMSC_NOWAIT) == 0)
690 ++ctx->syncer_forced;
691
692 do {
693 slp = &ctx->syncer_workitem_pending[i];
694
695 while ((vp = LIST_FIRST(slp)) != NULL) {
696 KKASSERT(vp->v_mount == mp);
697 if (vmsc_flags & VMSC_GETVP) {
698 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
699 slowfunc(mp, vp, data);
700 vput(vp);
701 }
702 } else if (vmsc_flags & VMSC_GETVX) {
703 vx_get(vp);
704 slowfunc(mp, vp, data);
705 vx_put(vp);
706 } else {
707 vhold(vp);
708 slowfunc(mp, vp, data);
709 vdrop(vp);
710 }
711 if (LIST_FIRST(slp) == vp)
712 vn_syncer_add(vp, -(i + syncdelay));
713 }
714 i = (i + 1) & ctx->syncer_mask;
715 } while (i != b);
716
717 if ((vmsc_flags & VMSC_NOWAIT) == 0)
718 --ctx->syncer_forced;
719 lwkt_reltoken(&ctx->sc_token);
720 return(0);
721 }
722
723 /*
724 * Print out a syncer vnode.
725 *
726 * sync_print { struct vnode *a_vp }
727 */
728 static int
729 sync_print(struct vop_print_args *ap)
730 {
731 struct vnode *vp = ap->a_vp;
732
733 kprintf("syncer vnode");
734 lockmgr_printinfo(&vp->v_lock);
735 kprintf("\n");
736 return (0);
737 }
738
Cache object: 8a389463a68639aa443906e84fb7146f
|