1 /* $NetBSD: lfs_vnops.c,v 1.129 2004/02/26 22:41:36 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1986, 1989, 1991, 1993, 1995
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.129 2004/02/26 22:41:36 yamt Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/resourcevar.h>
76 #include <sys/kernel.h>
77 #include <sys/file.h>
78 #include <sys/stat.h>
79 #include <sys/buf.h>
80 #include <sys/proc.h>
81 #include <sys/mount.h>
82 #include <sys/vnode.h>
83 #include <sys/malloc.h>
84 #include <sys/pool.h>
85 #include <sys/signalvar.h>
86
87 #include <miscfs/fifofs/fifo.h>
88 #include <miscfs/genfs/genfs.h>
89 #include <miscfs/specfs/specdev.h>
90
91 #include <ufs/ufs/inode.h>
92 #include <ufs/ufs/dir.h>
93 #include <ufs/ufs/ufsmount.h>
94 #include <ufs/ufs/ufs_extern.h>
95
96 #include <uvm/uvm.h>
97 #include <uvm/uvm_pmap.h>
98 #include <uvm/uvm_stat.h>
99 #include <uvm/uvm_pager.h>
100
101 #include <ufs/lfs/lfs.h>
102 #include <ufs/lfs/lfs_extern.h>
103
104 extern pid_t lfs_writer_daemon;
105
106 /* Global vfs data structures for lfs. */
107 int (**lfs_vnodeop_p)(void *);
108 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
109 { &vop_default_desc, vn_default_error },
110 { &vop_lookup_desc, ufs_lookup }, /* lookup */
111 { &vop_create_desc, lfs_create }, /* create */
112 { &vop_whiteout_desc, ufs_whiteout }, /* whiteout */
113 { &vop_mknod_desc, lfs_mknod }, /* mknod */
114 { &vop_open_desc, ufs_open }, /* open */
115 { &vop_close_desc, lfs_close }, /* close */
116 { &vop_access_desc, ufs_access }, /* access */
117 { &vop_getattr_desc, lfs_getattr }, /* getattr */
118 { &vop_setattr_desc, lfs_setattr }, /* setattr */
119 { &vop_read_desc, lfs_read }, /* read */
120 { &vop_write_desc, lfs_write }, /* write */
121 { &vop_lease_desc, ufs_lease_check }, /* lease */
122 { &vop_ioctl_desc, ufs_ioctl }, /* ioctl */
123 { &vop_fcntl_desc, lfs_fcntl }, /* fcntl */
124 { &vop_poll_desc, ufs_poll }, /* poll */
125 { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */
126 { &vop_revoke_desc, ufs_revoke }, /* revoke */
127 { &vop_mmap_desc, lfs_mmap }, /* mmap */
128 { &vop_fsync_desc, lfs_fsync }, /* fsync */
129 { &vop_seek_desc, ufs_seek }, /* seek */
130 { &vop_remove_desc, lfs_remove }, /* remove */
131 { &vop_link_desc, lfs_link }, /* link */
132 { &vop_rename_desc, lfs_rename }, /* rename */
133 { &vop_mkdir_desc, lfs_mkdir }, /* mkdir */
134 { &vop_rmdir_desc, lfs_rmdir }, /* rmdir */
135 { &vop_symlink_desc, lfs_symlink }, /* symlink */
136 { &vop_readdir_desc, ufs_readdir }, /* readdir */
137 { &vop_readlink_desc, ufs_readlink }, /* readlink */
138 { &vop_abortop_desc, ufs_abortop }, /* abortop */
139 { &vop_inactive_desc, lfs_inactive }, /* inactive */
140 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
141 { &vop_lock_desc, ufs_lock }, /* lock */
142 { &vop_unlock_desc, ufs_unlock }, /* unlock */
143 { &vop_bmap_desc, ufs_bmap }, /* bmap */
144 { &vop_strategy_desc, lfs_strategy }, /* strategy */
145 { &vop_print_desc, ufs_print }, /* print */
146 { &vop_islocked_desc, ufs_islocked }, /* islocked */
147 { &vop_pathconf_desc, ufs_pathconf }, /* pathconf */
148 { &vop_advlock_desc, ufs_advlock }, /* advlock */
149 { &vop_blkatoff_desc, lfs_blkatoff }, /* blkatoff */
150 { &vop_valloc_desc, lfs_valloc }, /* valloc */
151 { &vop_balloc_desc, lfs_balloc }, /* balloc */
152 { &vop_vfree_desc, lfs_vfree }, /* vfree */
153 { &vop_truncate_desc, lfs_truncate }, /* truncate */
154 { &vop_update_desc, lfs_update }, /* update */
155 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
156 { &vop_getpages_desc, lfs_getpages }, /* getpages */
157 { &vop_putpages_desc, lfs_putpages }, /* putpages */
158 { NULL, NULL }
159 };
160 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
161 { &lfs_vnodeop_p, lfs_vnodeop_entries };
162
163 int (**lfs_specop_p)(void *);
164 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
165 { &vop_default_desc, vn_default_error },
166 { &vop_lookup_desc, spec_lookup }, /* lookup */
167 { &vop_create_desc, spec_create }, /* create */
168 { &vop_mknod_desc, spec_mknod }, /* mknod */
169 { &vop_open_desc, spec_open }, /* open */
170 { &vop_close_desc, lfsspec_close }, /* close */
171 { &vop_access_desc, ufs_access }, /* access */
172 { &vop_getattr_desc, lfs_getattr }, /* getattr */
173 { &vop_setattr_desc, lfs_setattr }, /* setattr */
174 { &vop_read_desc, ufsspec_read }, /* read */
175 { &vop_write_desc, ufsspec_write }, /* write */
176 { &vop_lease_desc, spec_lease_check }, /* lease */
177 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
178 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */
179 { &vop_poll_desc, spec_poll }, /* poll */
180 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
181 { &vop_revoke_desc, spec_revoke }, /* revoke */
182 { &vop_mmap_desc, spec_mmap }, /* mmap */
183 { &vop_fsync_desc, spec_fsync }, /* fsync */
184 { &vop_seek_desc, spec_seek }, /* seek */
185 { &vop_remove_desc, spec_remove }, /* remove */
186 { &vop_link_desc, spec_link }, /* link */
187 { &vop_rename_desc, spec_rename }, /* rename */
188 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
189 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
190 { &vop_symlink_desc, spec_symlink }, /* symlink */
191 { &vop_readdir_desc, spec_readdir }, /* readdir */
192 { &vop_readlink_desc, spec_readlink }, /* readlink */
193 { &vop_abortop_desc, spec_abortop }, /* abortop */
194 { &vop_inactive_desc, lfs_inactive }, /* inactive */
195 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
196 { &vop_lock_desc, ufs_lock }, /* lock */
197 { &vop_unlock_desc, ufs_unlock }, /* unlock */
198 { &vop_bmap_desc, spec_bmap }, /* bmap */
199 { &vop_strategy_desc, spec_strategy }, /* strategy */
200 { &vop_print_desc, ufs_print }, /* print */
201 { &vop_islocked_desc, ufs_islocked }, /* islocked */
202 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
203 { &vop_advlock_desc, spec_advlock }, /* advlock */
204 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */
205 { &vop_valloc_desc, spec_valloc }, /* valloc */
206 { &vop_vfree_desc, lfs_vfree }, /* vfree */
207 { &vop_truncate_desc, spec_truncate }, /* truncate */
208 { &vop_update_desc, lfs_update }, /* update */
209 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */
210 { &vop_getpages_desc, spec_getpages }, /* getpages */
211 { &vop_putpages_desc, spec_putpages }, /* putpages */
212 { NULL, NULL }
213 };
214 const struct vnodeopv_desc lfs_specop_opv_desc =
215 { &lfs_specop_p, lfs_specop_entries };
216
217 int (**lfs_fifoop_p)(void *);
218 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
219 { &vop_default_desc, vn_default_error },
220 { &vop_lookup_desc, fifo_lookup }, /* lookup */
221 { &vop_create_desc, fifo_create }, /* create */
222 { &vop_mknod_desc, fifo_mknod }, /* mknod */
223 { &vop_open_desc, fifo_open }, /* open */
224 { &vop_close_desc, lfsfifo_close }, /* close */
225 { &vop_access_desc, ufs_access }, /* access */
226 { &vop_getattr_desc, lfs_getattr }, /* getattr */
227 { &vop_setattr_desc, lfs_setattr }, /* setattr */
228 { &vop_read_desc, ufsfifo_read }, /* read */
229 { &vop_write_desc, ufsfifo_write }, /* write */
230 { &vop_lease_desc, fifo_lease_check }, /* lease */
231 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
232 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */
233 { &vop_poll_desc, fifo_poll }, /* poll */
234 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
235 { &vop_revoke_desc, fifo_revoke }, /* revoke */
236 { &vop_mmap_desc, fifo_mmap }, /* mmap */
237 { &vop_fsync_desc, fifo_fsync }, /* fsync */
238 { &vop_seek_desc, fifo_seek }, /* seek */
239 { &vop_remove_desc, fifo_remove }, /* remove */
240 { &vop_link_desc, fifo_link }, /* link */
241 { &vop_rename_desc, fifo_rename }, /* rename */
242 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
243 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
244 { &vop_symlink_desc, fifo_symlink }, /* symlink */
245 { &vop_readdir_desc, fifo_readdir }, /* readdir */
246 { &vop_readlink_desc, fifo_readlink }, /* readlink */
247 { &vop_abortop_desc, fifo_abortop }, /* abortop */
248 { &vop_inactive_desc, lfs_inactive }, /* inactive */
249 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
250 { &vop_lock_desc, ufs_lock }, /* lock */
251 { &vop_unlock_desc, ufs_unlock }, /* unlock */
252 { &vop_bmap_desc, fifo_bmap }, /* bmap */
253 { &vop_strategy_desc, fifo_strategy }, /* strategy */
254 { &vop_print_desc, ufs_print }, /* print */
255 { &vop_islocked_desc, ufs_islocked }, /* islocked */
256 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
257 { &vop_advlock_desc, fifo_advlock }, /* advlock */
258 { &vop_blkatoff_desc, fifo_blkatoff }, /* blkatoff */
259 { &vop_valloc_desc, fifo_valloc }, /* valloc */
260 { &vop_vfree_desc, lfs_vfree }, /* vfree */
261 { &vop_truncate_desc, fifo_truncate }, /* truncate */
262 { &vop_update_desc, lfs_update }, /* update */
263 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */
264 { &vop_putpages_desc, fifo_putpages }, /* putpages */
265 { NULL, NULL }
266 };
267 const struct vnodeopv_desc lfs_fifoop_opv_desc =
268 { &lfs_fifoop_p, lfs_fifoop_entries };
269
270 /*
271 * A function version of LFS_ITIMES, for the UFS functions which call ITIMES
272 */
273 void
274 lfs_itimes(struct inode *ip, struct timespec *acc, struct timespec *mod, struct timespec *cre)
275 {
276 LFS_ITIMES(ip, acc, mod, cre);
277 }
278
279 #define LFS_READWRITE
280 #include <ufs/ufs/ufs_readwrite.c>
281 #undef LFS_READWRITE
282
283 /*
284 * Synch an open file.
285 */
286 /* ARGSUSED */
287 int
288 lfs_fsync(void *v)
289 {
290 struct vop_fsync_args /* {
291 struct vnode *a_vp;
292 struct ucred *a_cred;
293 int a_flags;
294 off_t offlo;
295 off_t offhi;
296 struct proc *a_p;
297 } */ *ap = v;
298 struct vnode *vp = ap->a_vp;
299 int error, wait;
300
301 /*
302 * Trickle sync checks for need to do a checkpoint after possible
303 * activity from the pagedaemon.
304 */
305 if (ap->a_flags & FSYNC_LAZY) {
306 simple_lock(&lfs_subsys_lock);
307 wakeup(&lfs_writer_daemon);
308 simple_unlock(&lfs_subsys_lock);
309 return 0;
310 }
311
312 wait = (ap->a_flags & FSYNC_WAIT);
313 simple_lock(&vp->v_interlock);
314 error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
315 round_page(ap->a_offhi),
316 PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
317 if (error)
318 return error;
319 error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
320 if (wait && !VPISEMPTY(vp))
321 LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
322
323 return error;
324 }
325
326 /*
327 * Take IN_ADIROP off, then call ufs_inactive.
328 */
329 int
330 lfs_inactive(void *v)
331 {
332 struct vop_inactive_args /* {
333 struct vnode *a_vp;
334 struct proc *a_p;
335 } */ *ap = v;
336
337 KASSERT(VTOI(ap->a_vp)->i_nlink == VTOI(ap->a_vp)->i_ffs_effnlink);
338
339 lfs_unmark_vnode(ap->a_vp);
340
341 /*
342 * The Ifile is only ever inactivated on unmount.
343 * Streamline this process by not giving it more dirty blocks.
344 */
345 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
346 LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
347 VOP_UNLOCK(ap->a_vp, 0);
348 return 0;
349 }
350
351 return ufs_inactive(v);
352 }
353
354 /*
355 * These macros are used to bracket UFS directory ops, so that we can
356 * identify all the pages touched during directory ops which need to
357 * be ordered and flushed atomically, so that they may be recovered.
358 */
359 /*
360 * XXX KS - Because we have to mark nodes VDIROP in order to prevent
361 * the cache from reclaiming them while a dirop is in progress, we must
362 * also manage the number of nodes so marked (otherwise we can run out).
363 * We do this by setting lfs_dirvcount to the number of marked vnodes; it
364 * is decremented during segment write, when VDIROP is taken off.
365 */
366 #define SET_DIROP(vp) SET_DIROP2((vp), NULL)
367 #define SET_DIROP2(vp, vp2) lfs_set_dirop((vp), (vp2))
368 static int lfs_set_dirop(struct vnode *, struct vnode *);
369
370 #define NRESERVE(fs) (btofsb(fs, (NIADDR + 3 + (2 * NIADDR + 3)) << fs->lfs_bshift))
371
372 static int
373 lfs_set_dirop(struct vnode *vp, struct vnode *vp2)
374 {
375 struct lfs *fs;
376 int error;
377
378 KASSERT(VOP_ISLOCKED(vp));
379 KASSERT(vp2 == NULL || VOP_ISLOCKED(vp2));
380
381 fs = VTOI(vp)->i_lfs;
382 /*
383 * We might need one directory block plus supporting indirect blocks,
384 * plus an inode block and ifile page for the new vnode.
385 */
386 if ((error = lfs_reserve(fs, vp, vp2, NRESERVE(fs))) != 0)
387 return (error);
388
389 if (fs->lfs_dirops == 0)
390 lfs_check(vp, LFS_UNUSED_LBN, 0);
391 restart:
392 simple_lock(&fs->lfs_interlock);
393 if (fs->lfs_writer) {
394 ltsleep(&fs->lfs_dirops, (PRIBIO + 1) | PNORELOCK,
395 "lfs_sdirop", 0, &fs->lfs_interlock);
396 goto restart;
397 }
398 simple_lock(&lfs_subsys_lock);
399 if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
400 wakeup(&lfs_writer_daemon);
401 simple_unlock(&lfs_subsys_lock);
402 simple_unlock(&fs->lfs_interlock);
403 preempt(1);
404 goto restart;
405 }
406
407 if (lfs_dirvcount > LFS_MAX_DIROP) {
408 simple_unlock(&fs->lfs_interlock);
409 #ifdef DEBUG_LFS
410 printf("lfs_set_dirop: sleeping with dirops=%d, "
411 "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount);
412 #endif
413 if ((error = ltsleep(&lfs_dirvcount,
414 PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
415 &lfs_subsys_lock)) != 0) {
416 goto unreserve;
417 }
418 goto restart;
419 }
420 simple_unlock(&lfs_subsys_lock);
421
422 ++fs->lfs_dirops;
423 fs->lfs_doifile = 1;
424 simple_unlock(&fs->lfs_interlock);
425
426 /* Hold a reference so SET_ENDOP will be happy */
427 vref(vp);
428 if (vp2)
429 vref(vp2);
430
431 return 0;
432
433 unreserve:
434 lfs_reserve(fs, vp, vp2, -NRESERVE(fs));
435 return error;
436 }
437
438 #define SET_ENDOP(fs, vp, str) SET_ENDOP2((fs), (vp), NULL, (str))
439 #define SET_ENDOP2(fs, vp, vp2, str) { \
440 --(fs)->lfs_dirops; \
441 if (!(fs)->lfs_dirops) { \
442 if ((fs)->lfs_nadirop) { \
443 panic("SET_ENDOP: %s: no dirops but nadirop=%d", \
444 (str), (fs)->lfs_nadirop); \
445 } \
446 wakeup(&(fs)->lfs_writer); \
447 lfs_check((vp),LFS_UNUSED_LBN,0); \
448 } \
449 lfs_reserve((fs), vp, vp2, -NRESERVE(fs)); /* XXX */ \
450 vrele(vp); \
451 if (vp2) \
452 vrele(vp2); \
453 }
454
455 #define MARK_VNODE(vp) lfs_mark_vnode(vp)
456 #define UNMARK_VNODE(vp) lfs_unmark_vnode(vp)
457
458 void
459 lfs_mark_vnode(struct vnode *vp)
460 {
461 struct inode *ip = VTOI(vp);
462 struct lfs *fs = ip->i_lfs;
463
464 if (!(ip->i_flag & IN_ADIROP)) {
465 if (!(vp->v_flag & VDIROP)) {
466 (void)lfs_vref(vp);
467 ++lfs_dirvcount;
468 TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
469 vp->v_flag |= VDIROP;
470 }
471 ++fs->lfs_nadirop;
472 ip->i_flag |= IN_ADIROP;
473 } else
474 KASSERT(vp->v_flag & VDIROP);
475 }
476
477 void
478 lfs_unmark_vnode(struct vnode *vp)
479 {
480 struct inode *ip = VTOI(vp);
481
482 if (ip->i_flag & IN_ADIROP) {
483 KASSERT(vp->v_flag & VDIROP);
484 --ip->i_lfs->lfs_nadirop;
485 ip->i_flag &= ~IN_ADIROP;
486 }
487 }
488
489 int
490 lfs_symlink(void *v)
491 {
492 struct vop_symlink_args /* {
493 struct vnode *a_dvp;
494 struct vnode **a_vpp;
495 struct componentname *a_cnp;
496 struct vattr *a_vap;
497 char *a_target;
498 } */ *ap = v;
499 int error;
500
501 if ((error = SET_DIROP(ap->a_dvp)) != 0) {
502 vput(ap->a_dvp);
503 return error;
504 }
505 MARK_VNODE(ap->a_dvp);
506 error = ufs_symlink(ap);
507 UNMARK_VNODE(ap->a_dvp);
508 if (*(ap->a_vpp))
509 UNMARK_VNODE(*(ap->a_vpp));
510 SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"symlink");
511 return (error);
512 }
513
514 int
515 lfs_mknod(void *v)
516 {
517 struct vop_mknod_args /* {
518 struct vnode *a_dvp;
519 struct vnode **a_vpp;
520 struct componentname *a_cnp;
521 struct vattr *a_vap;
522 } */ *ap = v;
523 struct vattr *vap = ap->a_vap;
524 struct vnode **vpp = ap->a_vpp;
525 struct inode *ip;
526 int error;
527 struct mount *mp;
528 ino_t ino;
529
530 if ((error = SET_DIROP(ap->a_dvp)) != 0) {
531 vput(ap->a_dvp);
532 return error;
533 }
534 MARK_VNODE(ap->a_dvp);
535 error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
536 ap->a_dvp, vpp, ap->a_cnp);
537 UNMARK_VNODE(ap->a_dvp);
538 if (*(ap->a_vpp))
539 UNMARK_VNODE(*(ap->a_vpp));
540
541 /* Either way we're done with the dirop at this point */
542 SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"mknod");
543
544 if (error)
545 return (error);
546
547 ip = VTOI(*vpp);
548 mp = (*vpp)->v_mount;
549 ino = ip->i_number;
550 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
551 if (vap->va_rdev != VNOVAL) {
552 /*
553 * Want to be able to use this to make badblock
554 * inodes, so don't truncate the dev number.
555 */
556 #if 0
557 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
558 UFS_MPNEEDSWAP((*vpp)->v_mount));
559 #else
560 ip->i_ffs1_rdev = vap->va_rdev;
561 #endif
562 }
563 /*
564 * Call fsync to write the vnode so that we don't have to deal with
565 * flushing it when it's marked VDIROP|VXLOCK.
566 *
567 * XXX KS - If we can't flush we also can't call vgone(), so must
568 * return. But, that leaves this vnode in limbo, also not good.
569 * Can this ever happen (barring hardware failure)?
570 */
571 if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0,
572 curproc)) != 0) {
573 printf("Couldn't fsync in mknod (ino %d)---what do I do?\n",
574 VTOI(*vpp)->i_number);
575 return (error);
576 }
577 /*
578 * Remove vnode so that it will be reloaded by VFS_VGET and
579 * checked to see if it is an alias of an existing entry in
580 * the inode cache.
581 */
582 /* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
583 VOP_UNLOCK(*vpp, 0);
584 lfs_vunref(*vpp);
585 (*vpp)->v_type = VNON;
586 vgone(*vpp);
587 error = VFS_VGET(mp, ino, vpp);
588 if (error != 0) {
589 *vpp = NULL;
590 return (error);
591 }
592 return (0);
593 }
594
595 int
596 lfs_create(void *v)
597 {
598 struct vop_create_args /* {
599 struct vnode *a_dvp;
600 struct vnode **a_vpp;
601 struct componentname *a_cnp;
602 struct vattr *a_vap;
603 } */ *ap = v;
604 int error;
605
606 if ((error = SET_DIROP(ap->a_dvp)) != 0) {
607 vput(ap->a_dvp);
608 return error;
609 }
610 MARK_VNODE(ap->a_dvp);
611 error = ufs_create(ap);
612 UNMARK_VNODE(ap->a_dvp);
613 if (*(ap->a_vpp))
614 UNMARK_VNODE(*(ap->a_vpp));
615 SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"create");
616 return (error);
617 }
618
619 int
620 lfs_mkdir(void *v)
621 {
622 struct vop_mkdir_args /* {
623 struct vnode *a_dvp;
624 struct vnode **a_vpp;
625 struct componentname *a_cnp;
626 struct vattr *a_vap;
627 } */ *ap = v;
628 int error;
629
630 if ((error = SET_DIROP(ap->a_dvp)) != 0) {
631 vput(ap->a_dvp);
632 return error;
633 }
634 MARK_VNODE(ap->a_dvp);
635 error = ufs_mkdir(ap);
636 UNMARK_VNODE(ap->a_dvp);
637 if (*(ap->a_vpp))
638 UNMARK_VNODE(*(ap->a_vpp));
639 SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"mkdir");
640 return (error);
641 }
642
643 int
644 lfs_remove(void *v)
645 {
646 struct vop_remove_args /* {
647 struct vnode *a_dvp;
648 struct vnode *a_vp;
649 struct componentname *a_cnp;
650 } */ *ap = v;
651 struct vnode *dvp, *vp;
652 int error;
653
654 dvp = ap->a_dvp;
655 vp = ap->a_vp;
656 if ((error = SET_DIROP2(dvp, vp)) != 0) {
657 if (dvp == vp)
658 vrele(vp);
659 else
660 vput(vp);
661 vput(dvp);
662 return error;
663 }
664 MARK_VNODE(dvp);
665 MARK_VNODE(vp);
666 error = ufs_remove(ap);
667 UNMARK_VNODE(dvp);
668 UNMARK_VNODE(vp);
669
670 SET_ENDOP2(VTOI(dvp)->i_lfs, dvp, vp, "remove");
671 return (error);
672 }
673
674 int
675 lfs_rmdir(void *v)
676 {
677 struct vop_rmdir_args /* {
678 struct vnodeop_desc *a_desc;
679 struct vnode *a_dvp;
680 struct vnode *a_vp;
681 struct componentname *a_cnp;
682 } */ *ap = v;
683 struct vnode *vp;
684 int error;
685
686 vp = ap->a_vp;
687 if ((error = SET_DIROP2(ap->a_dvp, ap->a_vp)) != 0) {
688 vrele(ap->a_dvp);
689 if (ap->a_vp != ap->a_dvp)
690 VOP_UNLOCK(ap->a_dvp, 0);
691 vput(vp);
692 return error;
693 }
694 MARK_VNODE(ap->a_dvp);
695 MARK_VNODE(vp);
696 error = ufs_rmdir(ap);
697 UNMARK_VNODE(ap->a_dvp);
698 UNMARK_VNODE(vp);
699
700 SET_ENDOP2(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vp, "rmdir");
701 return (error);
702 }
703
704 int
705 lfs_link(void *v)
706 {
707 struct vop_link_args /* {
708 struct vnode *a_dvp;
709 struct vnode *a_vp;
710 struct componentname *a_cnp;
711 } */ *ap = v;
712 int error;
713
714 if ((error = SET_DIROP(ap->a_dvp)) != 0) {
715 vput(ap->a_dvp);
716 return error;
717 }
718 MARK_VNODE(ap->a_dvp);
719 error = ufs_link(ap);
720 UNMARK_VNODE(ap->a_dvp);
721 SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"link");
722 return (error);
723 }
724
725 int
726 lfs_rename(void *v)
727 {
728 struct vop_rename_args /* {
729 struct vnode *a_fdvp;
730 struct vnode *a_fvp;
731 struct componentname *a_fcnp;
732 struct vnode *a_tdvp;
733 struct vnode *a_tvp;
734 struct componentname *a_tcnp;
735 } */ *ap = v;
736 struct vnode *tvp, *fvp, *tdvp, *fdvp;
737 struct componentname *tcnp, *fcnp;
738 int error;
739 struct lfs *fs;
740
741 fs = VTOI(ap->a_fdvp)->i_lfs;
742 tvp = ap->a_tvp;
743 tdvp = ap->a_tdvp;
744 tcnp = ap->a_tcnp;
745 fvp = ap->a_fvp;
746 fdvp = ap->a_fdvp;
747 fcnp = ap->a_fcnp;
748
749 /*
750 * Check for cross-device rename.
751 * If it is, we don't want to set dirops, just error out.
752 * (In particular note that MARK_VNODE(tdvp) will DTWT on
753 * a cross-device rename.)
754 *
755 * Copied from ufs_rename.
756 */
757 if ((fvp->v_mount != tdvp->v_mount) ||
758 (tvp && (fvp->v_mount != tvp->v_mount))) {
759 error = EXDEV;
760 goto errout;
761 }
762
763 /*
764 * Check to make sure we're not renaming a vnode onto itself
765 * (deleting a hard link by renaming one name onto another);
766 * if we are we can't recursively call VOP_REMOVE since that
767 * would leave us with an unaccounted-for number of live dirops.
768 *
769 * Inline the relevant section of ufs_rename here, *before*
770 * calling SET_DIROP2.
771 */
772 if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) ||
773 (VTOI(tdvp)->i_flags & APPEND))) {
774 error = EPERM;
775 goto errout;
776 }
777 if (fvp == tvp) {
778 if (fvp->v_type == VDIR) {
779 error = EINVAL;
780 goto errout;
781 }
782
783 /* Release destination completely. */
784 VOP_ABORTOP(tdvp, tcnp);
785 vput(tdvp);
786 vput(tvp);
787
788 /* Delete source. */
789 vrele(fvp);
790 fcnp->cn_flags &= ~(MODMASK | SAVESTART);
791 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
792 fcnp->cn_nameiop = DELETE;
793 if ((error = relookup(fdvp, &fvp, fcnp))){
794 /* relookup blew away fdvp */
795 return (error);
796 }
797 return (VOP_REMOVE(fdvp, fvp, fcnp));
798 }
799
800 if ((error = SET_DIROP2(tdvp, tvp)) != 0)
801 goto errout;
802 MARK_VNODE(fdvp);
803 MARK_VNODE(tdvp);
804 MARK_VNODE(fvp);
805 if (tvp) {
806 MARK_VNODE(tvp);
807 }
808
809 error = ufs_rename(ap);
810 UNMARK_VNODE(fdvp);
811 UNMARK_VNODE(tdvp);
812 UNMARK_VNODE(fvp);
813 if (tvp) {
814 UNMARK_VNODE(tvp);
815 }
816 SET_ENDOP2(fs, tdvp, tvp, "rename");
817 return (error);
818
819 errout:
820 VOP_ABORTOP(tdvp, ap->a_tcnp); /* XXX, why not in NFS? */
821 if (tdvp == tvp)
822 vrele(tdvp);
823 else
824 vput(tdvp);
825 if (tvp)
826 vput(tvp);
827 VOP_ABORTOP(fdvp, ap->a_fcnp); /* XXX, why not in NFS? */
828 vrele(fdvp);
829 vrele(fvp);
830 return (error);
831 }
832
833 /* XXX hack to avoid calling ITIMES in getattr */
834 int
835 lfs_getattr(void *v)
836 {
837 struct vop_getattr_args /* {
838 struct vnode *a_vp;
839 struct vattr *a_vap;
840 struct ucred *a_cred;
841 struct proc *a_p;
842 } */ *ap = v;
843 struct vnode *vp = ap->a_vp;
844 struct inode *ip = VTOI(vp);
845 struct vattr *vap = ap->a_vap;
846 struct lfs *fs = ip->i_lfs;
847 /*
848 * Copy from inode table
849 */
850 vap->va_fsid = ip->i_dev;
851 vap->va_fileid = ip->i_number;
852 vap->va_mode = ip->i_mode & ~IFMT;
853 vap->va_nlink = ip->i_nlink;
854 vap->va_uid = ip->i_uid;
855 vap->va_gid = ip->i_gid;
856 vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
857 vap->va_size = vp->v_size;
858 vap->va_atime.tv_sec = ip->i_ffs1_atime;
859 vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
860 vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
861 vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
862 vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
863 vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
864 vap->va_flags = ip->i_flags;
865 vap->va_gen = ip->i_gen;
866 /* this doesn't belong here */
867 if (vp->v_type == VBLK)
868 vap->va_blocksize = BLKDEV_IOSIZE;
869 else if (vp->v_type == VCHR)
870 vap->va_blocksize = MAXBSIZE;
871 else
872 vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
873 vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
874 vap->va_type = vp->v_type;
875 vap->va_filerev = ip->i_modrev;
876 return (0);
877 }
878
879 /*
880 * Check to make sure the inode blocks won't choke the buffer
881 * cache, then call ufs_setattr as usual.
882 */
883 int
884 lfs_setattr(void *v)
885 {
886 struct vop_getattr_args /* {
887 struct vnode *a_vp;
888 struct vattr *a_vap;
889 struct ucred *a_cred;
890 struct proc *a_p;
891 } */ *ap = v;
892 struct vnode *vp = ap->a_vp;
893
894 lfs_check(vp, LFS_UNUSED_LBN, 0);
895 return ufs_setattr(v);
896 }
897
898 /*
899 * Close called
900 *
901 * XXX -- we were using ufs_close, but since it updates the
902 * times on the inode, we might need to bump the uinodes
903 * count.
904 */
905 /* ARGSUSED */
906 int
907 lfs_close(void *v)
908 {
909 struct vop_close_args /* {
910 struct vnode *a_vp;
911 int a_fflag;
912 struct ucred *a_cred;
913 struct proc *a_p;
914 } */ *ap = v;
915 struct vnode *vp = ap->a_vp;
916 struct inode *ip = VTOI(vp);
917 struct timespec ts;
918
919 if (vp == ip->i_lfs->lfs_ivnode &&
920 vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
921 return 0;
922
923 if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
924 TIMEVAL_TO_TIMESPEC(&time, &ts);
925 LFS_ITIMES(ip, &ts, &ts, &ts);
926 }
927 return (0);
928 }
929
930 /*
931 * Close wrapper for special devices.
932 *
933 * Update the times on the inode then do device close.
934 */
935 int
936 lfsspec_close(void *v)
937 {
938 struct vop_close_args /* {
939 struct vnode *a_vp;
940 int a_fflag;
941 struct ucred *a_cred;
942 struct proc *a_p;
943 } */ *ap = v;
944 struct vnode *vp;
945 struct inode *ip;
946 struct timespec ts;
947
948 vp = ap->a_vp;
949 ip = VTOI(vp);
950 if (vp->v_usecount > 1) {
951 TIMEVAL_TO_TIMESPEC(&time, &ts);
952 LFS_ITIMES(ip, &ts, &ts, &ts);
953 }
954 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
955 }
956
957 /*
958 * Close wrapper for fifo's.
959 *
960 * Update the times on the inode then do device close.
961 */
962 int
963 lfsfifo_close(void *v)
964 {
965 struct vop_close_args /* {
966 struct vnode *a_vp;
967 int a_fflag;
968 struct ucred *a_cred;
969 struct proc *a_p;
970 } */ *ap = v;
971 struct vnode *vp;
972 struct inode *ip;
973 struct timespec ts;
974
975 vp = ap->a_vp;
976 ip = VTOI(vp);
977 if (ap->a_vp->v_usecount > 1) {
978 TIMEVAL_TO_TIMESPEC(&time, &ts);
979 LFS_ITIMES(ip, &ts, &ts, &ts);
980 }
981 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
982 }
983
984 /*
985 * Reclaim an inode so that it can be used for other purposes.
986 */
987
988 int
989 lfs_reclaim(void *v)
990 {
991 struct vop_reclaim_args /* {
992 struct vnode *a_vp;
993 struct proc *a_p;
994 } */ *ap = v;
995 struct vnode *vp = ap->a_vp;
996 struct inode *ip = VTOI(vp);
997 int error;
998
999 KASSERT(ip->i_nlink == ip->i_ffs_effnlink);
1000
1001 LFS_CLR_UINO(ip, IN_ALLMOD);
1002 if ((error = ufs_reclaim(vp, ap->a_p)))
1003 return (error);
1004 pool_put(&lfs_dinode_pool, VTOI(vp)->i_din.ffs1_din);
1005 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
1006 ip->inode_ext.lfs = NULL;
1007 pool_put(&lfs_inode_pool, vp->v_data);
1008 vp->v_data = NULL;
1009 return (0);
1010 }
1011
1012 /*
1013 * Read a block from a storage device.
1014 * In order to avoid reading blocks that are in the process of being
1015 * written by the cleaner---and hence are not mutexed by the normal
1016 * buffer cache / page cache mechanisms---check for collisions before
1017 * reading.
1018 *
1019 * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before*
1020 * the active cleaner test.
1021 *
1022 * XXX This code assumes that lfs_markv makes synchronous checkpoints.
1023 */
1024 int
1025 lfs_strategy(void *v)
1026 {
1027 struct vop_strategy_args /* {
1028 struct vnode *a_vp;
1029 struct buf *a_bp;
1030 } */ *ap = v;
1031 struct buf *bp;
1032 struct lfs *fs;
1033 struct vnode *vp;
1034 struct inode *ip;
1035 daddr_t tbn;
1036 int i, sn, error, slept;
1037
1038 bp = ap->a_bp;
1039 vp = ap->a_vp;
1040 ip = VTOI(vp);
1041 fs = ip->i_lfs;
1042
1043 /* lfs uses its strategy routine only for read */
1044 KASSERT(bp->b_flags & B_READ);
1045
1046 if (vp->v_type == VBLK || vp->v_type == VCHR)
1047 panic("lfs_strategy: spec");
1048 KASSERT(bp->b_bcount != 0);
1049 if (bp->b_blkno == bp->b_lblkno) {
1050 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1051 NULL);
1052 if (error) {
1053 bp->b_error = error;
1054 bp->b_flags |= B_ERROR;
1055 biodone(bp);
1056 return (error);
1057 }
1058 if ((long)bp->b_blkno == -1) /* no valid data */
1059 clrbuf(bp);
1060 }
1061 if ((long)bp->b_blkno < 0) { /* block is not on disk */
1062 biodone(bp);
1063 return (0);
1064 }
1065
1066 slept = 1;
1067 simple_lock(&fs->lfs_interlock);
1068 while (slept && fs->lfs_seglock) {
1069 simple_unlock(&fs->lfs_interlock);
1070 /*
1071 * Look through list of intervals.
1072 * There will only be intervals to look through
1073 * if the cleaner holds the seglock.
1074 * Since the cleaner is synchronous, we can trust
1075 * the list of intervals to be current.
1076 */
1077 tbn = dbtofsb(fs, bp->b_blkno);
1078 sn = dtosn(fs, tbn);
1079 slept = 0;
1080 for (i = 0; i < fs->lfs_cleanind; i++) {
1081 if (sn == dtosn(fs, fs->lfs_cleanint[i]) &&
1082 tbn >= fs->lfs_cleanint[i]) {
1083 #ifdef DEBUG_LFS
1084 printf("lfs_strategy: ino %d lbn %" PRId64
1085 " ind %d sn %d fsb %" PRIx32
1086 " given sn %d fsb %" PRIx64 "\n",
1087 ip->i_number, bp->b_lblkno, i,
1088 dtosn(fs, fs->lfs_cleanint[i]),
1089 fs->lfs_cleanint[i], sn, tbn);
1090 printf("lfs_strategy: sleeping on ino %d lbn %"
1091 PRId64 "\n", ip->i_number, bp->b_lblkno);
1092 #endif
1093 tsleep(&fs->lfs_seglock, PRIBIO+1,
1094 "lfs_strategy", 0);
1095 /* Things may be different now; start over. */
1096 slept = 1;
1097 break;
1098 }
1099 }
1100 simple_lock(&fs->lfs_interlock);
1101 }
1102 simple_unlock(&fs->lfs_interlock);
1103
1104 vp = ip->i_devvp;
1105 VOP_STRATEGY(vp, bp);
1106 return (0);
1107 }
1108
1109 static void
1110 lfs_flush_dirops(struct lfs *fs)
1111 {
1112 struct inode *ip, *nip;
1113 struct vnode *vp;
1114 extern int lfs_dostats;
1115 struct segment *sp;
1116 int needunlock;
1117
1118 if (fs->lfs_ronly)
1119 return;
1120
1121 if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL)
1122 return;
1123
1124 if (lfs_dostats)
1125 ++lfs_stats.flush_invoked;
1126
1127 /*
1128 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
1129 * Technically this is a checkpoint (the on-disk state is valid)
1130 * even though we are leaving out all the file data.
1131 */
1132 lfs_imtime(fs);
1133 lfs_seglock(fs, SEGM_CKP);
1134 sp = fs->lfs_sp;
1135
1136 /*
1137 * lfs_writevnodes, optimized to get dirops out of the way.
1138 * Only write dirops, and don't flush files' pages, only
1139 * blocks from the directories.
1140 *
1141 * We don't need to vref these files because they are
1142 * dirops and so hold an extra reference until the
1143 * segunlock clears them of that status.
1144 *
1145 * We don't need to check for IN_ADIROP because we know that
1146 * no dirops are active.
1147 *
1148 */
1149 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
1150 nip = TAILQ_NEXT(ip, i_lfs_dchain);
1151 vp = ITOV(ip);
1152
1153 /*
1154 * All writes to directories come from dirops; all
1155 * writes to files' direct blocks go through the page
1156 * cache, which we're not touching. Reads to files
1157 * and/or directories will not be affected by writing
1158 * directory blocks inodes and file inodes. So we don't
1159 * really need to lock. If we don't lock, though,
1160 * make sure that we don't clear IN_MODIFIED
1161 * unnecessarily.
1162 */
1163 if (vp->v_flag & VXLOCK)
1164 continue;
1165 if (vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE |
1166 LK_NOWAIT) == 0) {
1167 needunlock = 1;
1168 } else {
1169 printf("lfs_flush_dirops: flushing locked ino %d\n",
1170 VTOI(vp)->i_number);
1171 needunlock = 0;
1172 }
1173 if (vp->v_type != VREG &&
1174 ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
1175 lfs_writefile(fs, sp, vp);
1176 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
1177 !(ip->i_flag & IN_ALLMOD)) {
1178 LFS_SET_UINO(ip, IN_MODIFIED);
1179 }
1180 }
1181 (void) lfs_writeinode(fs, sp, ip);
1182 if (needunlock)
1183 VOP_UNLOCK(vp, 0);
1184 else
1185 LFS_SET_UINO(ip, IN_MODIFIED);
1186 }
1187 /* We've written all the dirops there are */
1188 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
1189 (void) lfs_writeseg(fs, sp);
1190 lfs_segunlock(fs);
1191 }
1192
1193 /*
1194 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
1195 */
1196 int
1197 lfs_fcntl(void *v)
1198 {
1199 struct vop_fcntl_args /* {
1200 struct vnode *a_vp;
1201 u_long a_command;
1202 caddr_t a_data;
1203 int a_fflag;
1204 struct ucred *a_cred;
1205 struct proc *a_p;
1206 } */ *ap = v;
1207 struct timeval *tvp;
1208 BLOCK_INFO *blkiov;
1209 CLEANERINFO *cip;
1210 int blkcnt, error, oclean;
1211 struct lfs_fcntl_markv blkvp;
1212 fsid_t *fsidp;
1213 struct lfs *fs;
1214 struct buf *bp;
1215 daddr_t off;
1216
1217 /* Only respect LFS fcntls on fs root or Ifile */
1218 if (VTOI(ap->a_vp)->i_number != ROOTINO &&
1219 VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
1220 return ufs_fcntl(v);
1221 }
1222
1223 /* Avoid locking a draining lock */
1224 if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
1225 return ESHUTDOWN;
1226 }
1227
1228 fs = VTOI(ap->a_vp)->i_lfs;
1229 fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsid;
1230
1231 switch (ap->a_command) {
1232 case LFCNSEGWAITALL:
1233 fsidp = NULL;
1234 /* FALLSTHROUGH */
1235 case LFCNSEGWAIT:
1236 tvp = (struct timeval *)ap->a_data;
1237 simple_lock(&fs->lfs_interlock);
1238 ++fs->lfs_sleepers;
1239 simple_unlock(&fs->lfs_interlock);
1240 VOP_UNLOCK(ap->a_vp, 0);
1241
1242 error = lfs_segwait(fsidp, tvp);
1243
1244 VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
1245 simple_lock(&fs->lfs_interlock);
1246 if (--fs->lfs_sleepers == 0)
1247 wakeup(&fs->lfs_sleepers);
1248 simple_unlock(&fs->lfs_interlock);
1249 return error;
1250
1251 case LFCNBMAPV:
1252 case LFCNMARKV:
1253 if ((error = suser(ap->a_p->p_ucred, &ap->a_p->p_acflag)) != 0)
1254 return (error);
1255 blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
1256
1257 blkcnt = blkvp.blkcnt;
1258 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
1259 return (EINVAL);
1260 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
1261 if ((error = copyin(blkvp.blkiov, blkiov,
1262 blkcnt * sizeof(BLOCK_INFO))) != 0) {
1263 free(blkiov, M_SEGMENT);
1264 return error;
1265 }
1266
1267 simple_lock(&fs->lfs_interlock);
1268 ++fs->lfs_sleepers;
1269 simple_unlock(&fs->lfs_interlock);
1270 VOP_UNLOCK(ap->a_vp, 0);
1271 if (ap->a_command == LFCNBMAPV)
1272 error = lfs_bmapv(ap->a_p, fsidp, blkiov, blkcnt);
1273 else /* LFCNMARKV */
1274 error = lfs_markv(ap->a_p, fsidp, blkiov, blkcnt);
1275 if (error == 0)
1276 error = copyout(blkiov, blkvp.blkiov,
1277 blkcnt * sizeof(BLOCK_INFO));
1278 VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
1279 simple_lock(&fs->lfs_interlock);
1280 if (--fs->lfs_sleepers == 0)
1281 wakeup(&fs->lfs_sleepers);
1282 simple_unlock(&fs->lfs_interlock);
1283 free(blkiov, M_SEGMENT);
1284 return error;
1285
1286 case LFCNRECLAIM:
1287 /*
1288 * Flush dirops and write Ifile, allowing empty segments
1289 * to be immediately reclaimed.
1290 */
1291 lfs_writer_enter(fs, "pndirop");
1292 off = fs->lfs_offset;
1293 lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
1294 lfs_flush_dirops(fs);
1295 LFS_CLEANERINFO(cip, fs, bp);
1296 oclean = cip->clean;
1297 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1298 lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
1299 lfs_segunlock(fs);
1300 lfs_writer_leave(fs);
1301
1302 #ifdef DEBUG_LFS
1303 LFS_CLEANERINFO(cip, fs, bp);
1304 oclean = cip->clean;
1305 printf("lfs_fcntl: reclaim wrote %" PRId64 " blocks, cleaned "
1306 "%" PRId32 " segments (activesb %d)\n",
1307 fs->lfs_offset - off, cip->clean - oclean,
1308 fs->lfs_activesb);
1309 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
1310 #endif
1311
1312 return 0;
1313
1314 default:
1315 return ufs_fcntl(v);
1316 }
1317 return 0;
1318 }
1319
1320 int
1321 lfs_getpages(void *v)
1322 {
1323 struct vop_getpages_args /* {
1324 struct vnode *a_vp;
1325 voff_t a_offset;
1326 struct vm_page **a_m;
1327 int *a_count;
1328 int a_centeridx;
1329 vm_prot_t a_access_type;
1330 int a_advice;
1331 int a_flags;
1332 } */ *ap = v;
1333
1334 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM &&
1335 (ap->a_access_type & VM_PROT_WRITE) != 0) {
1336 return EPERM;
1337 }
1338 if ((ap->a_access_type & VM_PROT_WRITE) != 0) {
1339 LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED);
1340 }
1341
1342 /*
1343 * we're relying on the fact that genfs_getpages() always read in
1344 * entire filesystem blocks.
1345 */
1346 return genfs_getpages(v);
1347 }
1348
1349 /*
1350 * Make sure that for all pages in every block in the given range,
1351 * either all are dirty or all are clean. If any of the pages
1352 * we've seen so far are dirty, put the vnode on the paging chain,
1353 * and mark it IN_PAGING.
1354 *
1355 * If checkfirst != 0, don't check all the pages but return at the
1356 * first dirty page.
1357 */
1358 static int
1359 check_dirty(struct lfs *fs, struct vnode *vp,
1360 off_t startoffset, off_t endoffset, off_t blkeof,
1361 int flags, int checkfirst)
1362 {
1363 int by_list;
1364 struct vm_page *curpg = NULL; /* XXX: gcc */
1365 struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
1366 struct lwp *l = curlwp ? curlwp : &lwp0;
1367 off_t soff = 0; /* XXX: gcc */
1368 voff_t off;
1369 int i;
1370 int nonexistent;
1371 int any_dirty; /* number of dirty pages */
1372 int dirty; /* number of dirty pages in a block */
1373 int tdirty;
1374 int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
1375
1376 top:
1377 by_list = (vp->v_uobj.uo_npages <=
1378 ((endoffset - startoffset) >> PAGE_SHIFT) *
1379 UVM_PAGE_HASH_PENALTY);
1380 any_dirty = 0;
1381
1382 if (by_list) {
1383 curpg = TAILQ_FIRST(&vp->v_uobj.memq);
1384 PHOLD(l);
1385 } else {
1386 soff = startoffset;
1387 }
1388 while (by_list || soff < MIN(blkeof, endoffset)) {
1389 if (by_list) {
1390 /*
1391 * find the first page in a block.
1392 */
1393 if (pages_per_block > 1) {
1394 while (curpg && (curpg->offset & fs->lfs_bmask))
1395 curpg = TAILQ_NEXT(curpg, listq);
1396 }
1397 if (curpg == NULL)
1398 break;
1399 soff = curpg->offset;
1400 }
1401
1402 /*
1403 * Mark all pages in extended range busy; find out if any
1404 * of them are dirty.
1405 */
1406 nonexistent = dirty = 0;
1407 for (i = 0; i == 0 || i < pages_per_block; i++) {
1408 if (by_list && pages_per_block <= 1) {
1409 pgs[i] = pg = curpg;
1410 } else {
1411 off = soff + (i << PAGE_SHIFT);
1412 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
1413 if (pg == NULL) {
1414 ++nonexistent;
1415 continue;
1416 }
1417 }
1418 KASSERT(pg != NULL);
1419 while (pg->flags & PG_BUSY) {
1420 pg->flags |= PG_WANTED;
1421 UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
1422 "lfsput", 0);
1423 simple_lock(&vp->v_interlock);
1424 if (by_list) {
1425 if (i > 0)
1426 uvm_page_unbusy(pgs, i);
1427 goto top;
1428 }
1429 }
1430 pg->flags |= PG_BUSY;
1431 UVM_PAGE_OWN(pg, "lfs_putpages");
1432
1433 pmap_page_protect(pg, VM_PROT_NONE);
1434 tdirty = (pmap_clear_modify(pg) ||
1435 (pg->flags & PG_CLEAN) == 0);
1436 dirty += tdirty;
1437 }
1438 if (pages_per_block > 0 && nonexistent >= pages_per_block) {
1439 if (by_list) {
1440 curpg = TAILQ_NEXT(curpg, listq);
1441 } else {
1442 soff += fs->lfs_bsize;
1443 }
1444 continue;
1445 }
1446
1447 any_dirty += dirty;
1448 KASSERT(nonexistent == 0);
1449
1450 /*
1451 * If any are dirty make all dirty; unbusy them,
1452 * but if we were asked to clean, wire them so that
1453 * the pagedaemon doesn't bother us about them while
1454 * they're on their way to disk.
1455 */
1456 for (i = 0; i == 0 || i < pages_per_block; i++) {
1457 pg = pgs[i];
1458 KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI)));
1459 if (dirty) {
1460 pg->flags &= ~PG_CLEAN;
1461 if (flags & PGO_FREE) {
1462 /* XXXUBC need better way to update */
1463 simple_lock(&lfs_subsys_lock);
1464 lfs_subsys_pages += MIN(1, pages_per_block);
1465 simple_unlock(&lfs_subsys_lock);
1466 /*
1467 * Wire the page so that
1468 * pdaemon doesn't see it again.
1469 */
1470 uvm_lock_pageq();
1471 uvm_pagewire(pg);
1472 uvm_unlock_pageq();
1473
1474 /* Suspended write flag */
1475 pg->flags |= PG_DELWRI;
1476 }
1477 }
1478 if (pg->flags & PG_WANTED)
1479 wakeup(pg);
1480 pg->flags &= ~(PG_WANTED|PG_BUSY);
1481 UVM_PAGE_OWN(pg, NULL);
1482 }
1483
1484 if (checkfirst && any_dirty)
1485 return any_dirty;
1486
1487 if (by_list) {
1488 curpg = TAILQ_NEXT(curpg, listq);
1489 } else {
1490 soff += MAX(PAGE_SIZE, fs->lfs_bsize);
1491 }
1492 }
1493 if (by_list) {
1494 PRELE(l);
1495 }
1496
1497 /*
1498 * If any pages were dirty, mark this inode as "pageout requested",
1499 * and put it on the paging queue.
1500 * XXXUBC locking (check locking on dchainhd too)
1501 */
1502 #ifdef notyet
1503 if (any_dirty) {
1504 if (!(ip->i_flags & IN_PAGING)) {
1505 ip->i_flags |= IN_PAGING;
1506 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
1507 }
1508 }
1509 #endif
1510 return any_dirty;
1511 }
1512
1513 /*
1514 * lfs_putpages functions like genfs_putpages except that
1515 *
1516 * (1) It needs to bounds-check the incoming requests to ensure that
1517 * they are block-aligned; if they are not, expand the range and
1518 * do the right thing in case, e.g., the requested range is clean
1519 * but the expanded range is dirty.
1520 * (2) It needs to explicitly send blocks to be written when it is done.
1521 * VOP_PUTPAGES is not ever called with the seglock held, so
1522 * we simply take the seglock and let lfs_segunlock wait for us.
1523 * XXX Actually we can be called with the seglock held, if we have
1524 * XXX to flush a vnode while lfs_markv is in operation. As of this
1525 * XXX writing we panic in this case.
1526 *
1527 * Assumptions:
1528 *
1529 * (1) The caller does not hold any pages in this vnode busy. If it does,
1530 * there is a danger that when we expand the page range and busy the
1531 * pages we will deadlock.
1532 * (2) We are called with vp->v_interlock held; we must return with it
1533 * released.
1534 * (3) We don't absolutely have to free pages right away, provided that
1535 * the request does not have PGO_SYNCIO. When the pagedaemon gives
1536 * us a request with PGO_FREE, we take the pages out of the paging
1537 * queue and wake up the writer, which will handle freeing them for us.
1538 *
1539 * We ensure that for any filesystem block, all pages for that
1540 * block are either resident or not, even if those pages are higher
1541 * than EOF; that means that we will be getting requests to free
1542 * "unused" pages above EOF all the time, and should ignore them.
1543 *
1544 * XXX note that we're (ab)using PGO_LOCKED as "seglock held".
1545 */
1546
1547 int
1548 lfs_putpages(void *v)
1549 {
1550 int error;
1551 struct vop_putpages_args /* {
1552 struct vnode *a_vp;
1553 voff_t a_offlo;
1554 voff_t a_offhi;
1555 int a_flags;
1556 } */ *ap = v;
1557 struct vnode *vp;
1558 struct inode *ip;
1559 struct lfs *fs;
1560 struct segment *sp;
1561 off_t origoffset, startoffset, endoffset, origendoffset, blkeof;
1562 off_t off, max_endoffset;
1563 int s;
1564 boolean_t seglocked, sync, pagedaemon;
1565 struct vm_page *pg;
1566 UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist);
1567
1568 vp = ap->a_vp;
1569 ip = VTOI(vp);
1570 fs = ip->i_lfs;
1571 sync = (ap->a_flags & PGO_SYNCIO) != 0;
1572 pagedaemon = (curproc == uvm.pagedaemon_proc);
1573
1574 /* Putpages does nothing for metadata. */
1575 if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
1576 simple_unlock(&vp->v_interlock);
1577 return 0;
1578 }
1579
1580 /*
1581 * If there are no pages, don't do anything.
1582 */
1583 if (vp->v_uobj.uo_npages == 0) {
1584 s = splbio();
1585 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
1586 (vp->v_flag & VONWORKLST)) {
1587 vp->v_flag &= ~VONWORKLST;
1588 LIST_REMOVE(vp, v_synclist);
1589 }
1590 splx(s);
1591 simple_unlock(&vp->v_interlock);
1592 return 0;
1593 }
1594
1595 blkeof = blkroundup(fs, ip->i_size);
1596
1597 /*
1598 * Ignore requests to free pages past EOF but in the same block
1599 * as EOF, unless the request is synchronous. (XXX why sync?)
1600 * XXXUBC Make these pages look "active" so the pagedaemon won't
1601 * XXXUBC bother us with them again.
1602 */
1603 if (!sync && ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) {
1604 origoffset = ap->a_offlo;
1605 for (off = origoffset; off < blkeof; off += fs->lfs_bsize) {
1606 pg = uvm_pagelookup(&vp->v_uobj, off);
1607 KASSERT(pg != NULL);
1608 while (pg->flags & PG_BUSY) {
1609 pg->flags |= PG_WANTED;
1610 UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
1611 "lfsput2", 0);
1612 simple_lock(&vp->v_interlock);
1613 }
1614 uvm_lock_pageq();
1615 uvm_pageactivate(pg);
1616 uvm_unlock_pageq();
1617 }
1618 ap->a_offlo = blkeof;
1619 if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) {
1620 simple_unlock(&vp->v_interlock);
1621 return 0;
1622 }
1623 }
1624
1625 /*
1626 * Extend page range to start and end at block boundaries.
1627 * (For the purposes of VOP_PUTPAGES, fragments don't exist.)
1628 */
1629 origoffset = ap->a_offlo;
1630 origendoffset = ap->a_offhi;
1631 startoffset = origoffset & ~(fs->lfs_bmask);
1632 max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift)
1633 << fs->lfs_bshift;
1634
1635 if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
1636 endoffset = max_endoffset;
1637 origendoffset = endoffset;
1638 } else {
1639 origendoffset = round_page(ap->a_offhi);
1640 endoffset = round_page(blkroundup(fs, origendoffset));
1641 }
1642
1643 KASSERT(startoffset > 0 || endoffset >= startoffset);
1644 if (startoffset == endoffset) {
1645 /* Nothing to do, why were we called? */
1646 simple_unlock(&vp->v_interlock);
1647 #ifdef DEBUG
1648 printf("lfs_putpages: startoffset = endoffset = %" PRId64 "\n",
1649 startoffset);
1650 #endif
1651 return 0;
1652 }
1653
1654 ap->a_offlo = startoffset;
1655 ap->a_offhi = endoffset;
1656
1657 if (!(ap->a_flags & PGO_CLEANIT))
1658 return genfs_putpages(v);
1659
1660 /*
1661 * If there are more than one page per block, we don't want
1662 * to get caught locking them backwards; so set PGO_BUSYFAIL
1663 * to avoid deadlocks.
1664 */
1665 ap->a_flags |= PGO_BUSYFAIL;
1666
1667 do {
1668 int r;
1669
1670 /* If no pages are dirty, we can just use genfs_putpages. */
1671 if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
1672 ap->a_flags, 1) != 0)
1673 break;
1674
1675 if ((r = genfs_putpages(v)) != EDEADLK)
1676 return r;
1677
1678 /* Start over. */
1679 preempt(1);
1680 simple_lock(&vp->v_interlock);
1681 } while(1);
1682
1683 /*
1684 * Dirty and asked to clean.
1685 *
1686 * Pagedaemon can't actually write LFS pages; wake up
1687 * the writer to take care of that. The writer will
1688 * notice the pager inode queue and act on that.
1689 */
1690 if (pagedaemon) {
1691 ++fs->lfs_pdflush;
1692 wakeup(&lfs_writer_daemon);
1693 simple_unlock(&vp->v_interlock);
1694 return EWOULDBLOCK;
1695 }
1696
1697 /*
1698 * If this is a file created in a recent dirop, we can't flush its
1699 * inode until the dirop is complete. Drain dirops, then flush the
1700 * filesystem (taking care of any other pending dirops while we're
1701 * at it).
1702 */
1703 if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT &&
1704 (vp->v_flag & VDIROP)) {
1705 int locked;
1706
1707 /* printf("putpages to clean VDIROP, flushing\n"); */
1708 lfs_writer_enter(fs, "ppdirop");
1709 locked = VOP_ISLOCKED(vp) && /* XXX */
1710 vp->v_lock.lk_lockholder == curproc->p_pid;
1711 if (locked)
1712 VOP_UNLOCK(vp, 0);
1713 simple_unlock(&vp->v_interlock);
1714
1715 lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
1716
1717 simple_lock(&vp->v_interlock);
1718 if (locked)
1719 VOP_LOCK(vp, LK_EXCLUSIVE);
1720 lfs_writer_leave(fs);
1721
1722 /* XXX the flush should have taken care of this one too! */
1723 }
1724
1725 /*
1726 * This is it. We are going to write some pages. From here on
1727 * down it's all just mechanics.
1728 *
1729 * Don't let genfs_putpages wait; lfs_segunlock will wait for us.
1730 */
1731 ap->a_flags &= ~PGO_SYNCIO;
1732
1733 /*
1734 * If we've already got the seglock, flush the node and return.
1735 * The FIP has already been set up for us by lfs_writefile,
1736 * and FIP cleanup and lfs_updatemeta will also be done there,
1737 * unless genfs_putpages returns EDEADLK; then we must flush
1738 * what we have, and correct FIP and segment header accounting.
1739 */
1740
1741 seglocked = (ap->a_flags & PGO_LOCKED) != 0;
1742 if (!seglocked) {
1743 simple_unlock(&vp->v_interlock);
1744 /*
1745 * Take the seglock, because we are going to be writing pages.
1746 */
1747 error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0));
1748 if (error != 0)
1749 return error;
1750 simple_lock(&vp->v_interlock);
1751 }
1752
1753 /*
1754 * VOP_PUTPAGES should not be called while holding the seglock.
1755 * XXXUBC fix lfs_markv, or do this properly.
1756 */
1757 /* KASSERT(fs->lfs_seglock == 1); */
1758
1759 /*
1760 * We assume we're being called with sp->fip pointing at blank space.
1761 * Account for a new FIP in the segment header, and set sp->vp.
1762 * (This should duplicate the setup at the top of lfs_writefile().)
1763 */
1764 sp = fs->lfs_sp;
1765 if (!seglocked) {
1766 if (sp->seg_bytes_left < fs->lfs_bsize ||
1767 sp->sum_bytes_left < sizeof(struct finfo))
1768 (void) lfs_writeseg(fs, fs->lfs_sp);
1769
1770 sp->sum_bytes_left -= FINFOSIZE;
1771 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1772 }
1773 KASSERT(sp->vp == NULL);
1774 sp->vp = vp;
1775
1776 if (!seglocked) {
1777 if (vp->v_flag & VDIROP)
1778 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
1779 }
1780
1781 sp->fip->fi_nblocks = 0;
1782 sp->fip->fi_ino = ip->i_number;
1783 sp->fip->fi_version = ip->i_gen;
1784
1785 /*
1786 * Loop through genfs_putpages until all pages are gathered.
1787 * genfs_putpages() drops the interlock, so reacquire it if necessary.
1788 * Whenever we lose the interlock we have to rerun check_dirty, as
1789 * well.
1790 */
1791 again:
1792 check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags, 0);
1793
1794 if ((error = genfs_putpages(v)) == EDEADLK) {
1795 #ifdef DEBUG_LFS
1796 printf("lfs_putpages: genfs_putpages returned EDEADLK [2]"
1797 " ino %d off %x (seg %d)\n",
1798 ip->i_number, fs->lfs_offset,
1799 dtosn(fs, fs->lfs_offset));
1800 #endif
1801 /* If nothing to write, short-circuit */
1802 if (sp->cbpp - sp->bpp > 1) {
1803 /* Write gathered pages */
1804 lfs_updatemeta(sp);
1805 (void) lfs_writeseg(fs, sp);
1806
1807 /*
1808 * Reinitialize brand new FIP and add us to it.
1809 * (This should duplicate the fixup in
1810 * lfs_gatherpages().)
1811 */
1812 KASSERT(sp->vp == vp);
1813 sp->fip->fi_version = ip->i_gen;
1814 sp->fip->fi_ino = ip->i_number;
1815 /* Add us to the new segment summary. */
1816 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1817 sp->sum_bytes_left -= FINFOSIZE;
1818 }
1819
1820 /* Give the write a chance to complete */
1821 preempt(1);
1822
1823 /* We've lost the interlock. Start over. */
1824 simple_lock(&vp->v_interlock);
1825 goto again;
1826 }
1827
1828 KASSERT(sp->vp == vp);
1829 if (!seglocked) {
1830 sp->vp = NULL; /* XXX lfs_gather below will set this */
1831
1832 /* Write indirect blocks as well */
1833 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
1834 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
1835 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
1836
1837 KASSERT(sp->vp == NULL);
1838 sp->vp = vp;
1839 }
1840
1841 /*
1842 * Blocks are now gathered into a segment waiting to be written.
1843 * All that's left to do is update metadata, and write them.
1844 */
1845 lfs_updatemeta(sp);
1846 KASSERT(sp->vp == vp);
1847 sp->vp = NULL;
1848
1849 if (seglocked) {
1850 /* we're called by lfs_writefile. */
1851 return error;
1852 }
1853
1854 /*
1855 * Clean up FIP, since we're done writing this file.
1856 * This should duplicate cleanup at the end of lfs_writefile().
1857 */
1858 if (sp->fip->fi_nblocks != 0) {
1859 sp->fip = (FINFO*)((caddr_t)sp->fip + FINFOSIZE +
1860 sizeof(int32_t) * sp->fip->fi_nblocks);
1861 sp->start_lbp = &sp->fip->fi_blocks[0];
1862 } else {
1863 sp->sum_bytes_left += FINFOSIZE;
1864 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
1865 }
1866 lfs_writeseg(fs, fs->lfs_sp);
1867
1868 /*
1869 * XXX - with the malloc/copy writeseg, the pages are freed by now
1870 * even if we don't wait (e.g. if we hold a nested lock). This
1871 * will not be true if we stop using malloc/copy.
1872 */
1873 KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT);
1874 lfs_segunlock(fs);
1875
1876 /*
1877 * Wait for v_numoutput to drop to zero. The seglock should
1878 * take care of this, but there is a slight possibility that
1879 * aiodoned might not have got around to our buffers yet.
1880 */
1881 if (sync) {
1882 int s;
1883
1884 s = splbio();
1885 simple_lock(&global_v_numoutput_slock);
1886 while (vp->v_numoutput > 0) {
1887 #ifdef DEBUG
1888 printf("ino %d sleeping on num %d\n",
1889 ip->i_number, vp->v_numoutput);
1890 #endif
1891 vp->v_flag |= VBWAIT;
1892 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vn", 0,
1893 &global_v_numoutput_slock);
1894 }
1895 simple_unlock(&global_v_numoutput_slock);
1896 splx(s);
1897 }
1898 return error;
1899 }
1900
1901 /*
1902 * Return the last logical file offset that should be written for this file
1903 * if we're doing a write that ends at "size". If writing, we need to know
1904 * about sizes on disk, i.e. fragments if there are any; if reading, we need
1905 * to know about entire blocks.
1906 */
1907 void
1908 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
1909 {
1910 struct inode *ip = VTOI(vp);
1911 struct lfs *fs = ip->i_lfs;
1912 daddr_t olbn, nlbn;
1913
1914 KASSERT(flags & (GOP_SIZE_READ | GOP_SIZE_WRITE));
1915 KASSERT((flags & (GOP_SIZE_READ | GOP_SIZE_WRITE))
1916 != (GOP_SIZE_READ | GOP_SIZE_WRITE));
1917
1918 olbn = lblkno(fs, ip->i_size);
1919 nlbn = lblkno(fs, size);
1920 if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) {
1921 *eobp = fragroundup(fs, size);
1922 } else {
1923 *eobp = blkroundup(fs, size);
1924 }
1925 }
1926
1927 #ifdef DEBUG
1928 void lfs_dump_vop(void *);
1929
1930 void
1931 lfs_dump_vop(void *v)
1932 {
1933 struct vop_putpages_args /* {
1934 struct vnode *a_vp;
1935 voff_t a_offlo;
1936 voff_t a_offhi;
1937 int a_flags;
1938 } */ *ap = v;
1939
1940 #ifdef DDB
1941 vfs_vnode_print(ap->a_vp, 0, printf);
1942 #endif
1943 lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
1944 }
1945 #endif
1946
1947 int
1948 lfs_mmap(void *v)
1949 {
1950 struct vop_mmap_args /* {
1951 const struct vnodeop_desc *a_desc;
1952 struct vnode *a_vp;
1953 int a_fflags;
1954 struct ucred *a_cred;
1955 struct proc *a_p;
1956 } */ *ap = v;
1957
1958 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
1959 return EOPNOTSUPP;
1960 return ufs_mmap(v);
1961 }
Cache object: fb3218c4744e4865115352c6cc314f3e
|