1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 #include "opt_ffs.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/gsb_crc32.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioccom.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/rwlock.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63
64 #include <security/mac/mac_framework.h>
65
66 #include <ufs/ufs/dir.h>
67 #include <ufs/ufs/extattr.h>
68 #include <ufs/ufs/gjournal.h>
69 #include <ufs/ufs/quota.h>
70 #include <ufs/ufs/ufsmount.h>
71 #include <ufs/ufs/inode.h>
72 #include <ufs/ufs/ufs_extern.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 #include <vm/vm.h>
78 #include <vm/uma.h>
79 #include <vm/vm_page.h>
80
81 #include <geom/geom.h>
82 #include <geom/geom_vfs.h>
83
84 #include <ddb/ddb.h>
85
86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
87 VFS_SMR_DECLARE;
88
89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *);
90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *,
91 ufs2_daddr_t);
92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip);
93 static int ffs_sync_lazy(struct mount *mp);
94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
96
97 static vfs_init_t ffs_init;
98 static vfs_uninit_t ffs_uninit;
99 static vfs_extattrctl_t ffs_extattrctl;
100 static vfs_cmount_t ffs_cmount;
101 static vfs_unmount_t ffs_unmount;
102 static vfs_mount_t ffs_mount;
103 static vfs_statfs_t ffs_statfs;
104 static vfs_fhtovp_t ffs_fhtovp;
105 static vfs_sync_t ffs_sync;
106
107 static struct vfsops ufs_vfsops = {
108 .vfs_extattrctl = ffs_extattrctl,
109 .vfs_fhtovp = ffs_fhtovp,
110 .vfs_init = ffs_init,
111 .vfs_mount = ffs_mount,
112 .vfs_cmount = ffs_cmount,
113 .vfs_quotactl = ufs_quotactl,
114 .vfs_root = vfs_cache_root,
115 .vfs_cachedroot = ufs_root,
116 .vfs_statfs = ffs_statfs,
117 .vfs_sync = ffs_sync,
118 .vfs_uninit = ffs_uninit,
119 .vfs_unmount = ffs_unmount,
120 .vfs_vget = ffs_vget,
121 .vfs_susp_clean = process_deferred_inactive,
122 };
123
124 VFS_SET(ufs_vfsops, ufs, 0);
125 MODULE_VERSION(ufs, 1);
126
127 static b_strategy_t ffs_geom_strategy;
128 static b_write_t ffs_bufwrite;
129
130 static struct buf_ops ffs_ops = {
131 .bop_name = "FFS",
132 .bop_write = ffs_bufwrite,
133 .bop_strategy = ffs_geom_strategy,
134 .bop_sync = bufsync,
135 #ifdef NO_FFS_SNAPSHOT
136 .bop_bdflush = bufbdflush,
137 #else
138 .bop_bdflush = ffs_bdflush,
139 #endif
140 };
141
142 /*
143 * Note that userquota and groupquota options are not currently used
144 * by UFS/FFS code and generally mount(8) does not pass those options
145 * from userland, but they can be passed by loader(8) via
146 * vfs.root.mountfrom.options.
147 */
148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
149 "noclusterw", "noexec", "export", "force", "from", "groupquota",
150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
152
153 static int ffs_enxio_enable = 1;
154 SYSCTL_DECL(_vfs_ffs);
155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
156 &ffs_enxio_enable, 0,
157 "enable mapping of other disk I/O errors to ENXIO");
158
159 /*
160 * Return buffer with the contents of block "offset" from the beginning of
161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the
162 * remaining space in the directory.
163 */
164 static int
165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
166 {
167 struct inode *ip;
168 struct fs *fs;
169 struct buf *bp;
170 ufs_lbn_t lbn;
171 int bsize, error;
172
173 ip = VTOI(vp);
174 fs = ITOFS(ip);
175 lbn = lblkno(fs, offset);
176 bsize = blksize(fs, ip, lbn);
177
178 *bpp = NULL;
179 error = bread(vp, lbn, bsize, NOCRED, &bp);
180 if (error) {
181 return (error);
182 }
183 if (res)
184 *res = (char *)bp->b_data + blkoff(fs, offset);
185 *bpp = bp;
186 return (0);
187 }
188
189 /*
190 * Load up the contents of an inode and copy the appropriate pieces
191 * to the incore copy.
192 */
193 static int
194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
195 {
196 struct ufs1_dinode *dip1;
197 struct ufs2_dinode *dip2;
198 int error;
199
200 if (I_IS_UFS1(ip)) {
201 dip1 = ip->i_din1;
202 *dip1 =
203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
204 ip->i_mode = dip1->di_mode;
205 ip->i_nlink = dip1->di_nlink;
206 ip->i_effnlink = dip1->di_nlink;
207 ip->i_size = dip1->di_size;
208 ip->i_flags = dip1->di_flags;
209 ip->i_gen = dip1->di_gen;
210 ip->i_uid = dip1->di_uid;
211 ip->i_gid = dip1->di_gid;
212 return (0);
213 }
214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
218 (intmax_t)ino);
219 return (error);
220 }
221 *ip->i_din2 = *dip2;
222 dip2 = ip->i_din2;
223 ip->i_mode = dip2->di_mode;
224 ip->i_nlink = dip2->di_nlink;
225 ip->i_effnlink = dip2->di_nlink;
226 ip->i_size = dip2->di_size;
227 ip->i_flags = dip2->di_flags;
228 ip->i_gen = dip2->di_gen;
229 ip->i_uid = dip2->di_uid;
230 ip->i_gid = dip2->di_gid;
231 return (0);
232 }
233
234 /*
235 * Verify that a filesystem block number is a valid data block.
236 * This routine is only called on untrusted filesystems.
237 */
238 static int
239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240 {
241 struct fs *fs;
242 struct ufsmount *ump;
243 ufs2_daddr_t end_daddr;
244 int cg, havemtx;
245
246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247 ("ffs_check_blkno called on a trusted file system"));
248 ump = VFSTOUFS(mp);
249 fs = ump->um_fs;
250 cg = dtog(fs, daddr);
251 end_daddr = daddr + numfrags(fs, blksize);
252 /*
253 * Verify that the block number is a valid data block. Also check
254 * that it does not point to an inode block or a superblock. Accept
255 * blocks that are unalloacted (0) or part of snapshot metadata
256 * (BLK_NOCOPY or BLK_SNAP).
257 *
258 * Thus, the block must be in a valid range for the filesystem and
259 * either in the space before a backup superblock (except the first
260 * cylinder group where that space is used by the bootstrap code) or
261 * after the inode blocks and before the end of the cylinder group.
262 */
263 if ((uint64_t)daddr <= BLK_SNAP ||
264 ((uint64_t)end_daddr <= fs->fs_size &&
265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266 (daddr >= cgdmin(fs, cg) &&
267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268 return (0);
269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270 UFS_LOCK(ump);
271 if (ppsratecheck(&ump->um_last_integritymsg,
272 &ump->um_secs_integritymsg, 1)) {
273 UFS_UNLOCK(ump);
274 uprintf("\n%s: inode %jd, out-of-range indirect block "
275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276 if (havemtx)
277 UFS_LOCK(ump);
278 } else if (!havemtx)
279 UFS_UNLOCK(ump);
280 return (EINTEGRITY);
281 }
282
283 /*
284 * On first ENXIO error, initiate an asynchronous forcible unmount.
285 * Used to unmount filesystems whose underlying media has gone away.
286 *
287 * Return true if a cleanup is in progress.
288 */
289 int
290 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
291 {
292 int retval;
293
294 UFS_LOCK(ump);
295 retval = ffs_fsfail_cleanup_locked(ump, error);
296 UFS_UNLOCK(ump);
297 return (retval);
298 }
299
300 int
301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
302 {
303 mtx_assert(UFS_MTX(ump), MA_OWNED);
304 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
305 ump->um_flags |= UM_FSFAIL_CLEANUP;
306 /*
307 * Queue an async forced unmount.
308 */
309 vfs_ref(ump->um_mountp);
310 dounmount(ump->um_mountp,
311 MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
312 printf("UFS: forcibly unmounting %s from %s\n",
313 ump->um_mountp->mnt_stat.f_mntfromname,
314 ump->um_mountp->mnt_stat.f_mntonname);
315 }
316 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
317 }
318
319 /*
320 * Wrapper used during ENXIO cleanup to allocate empty buffers when
321 * the kernel is unable to read the real one. They are needed so that
322 * the soft updates code can use them to unwind its dependencies.
323 */
324 int
325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
326 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
327 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
328 struct buf **bpp)
329 {
330 int error;
331
332 flags |= GB_CVTENXIO;
333 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
334 cred, flags, ckhashfunc, bpp);
335 if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
336 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
337 KASSERT(error == 0, ("getblkx failed"));
338 vfs_bio_bzero_buf(*bpp, 0, size);
339 }
340 return (error);
341 }
342
343 static int
344 ffs_mount(struct mount *mp)
345 {
346 struct vnode *devvp, *odevvp;
347 struct thread *td;
348 struct ufsmount *ump = NULL;
349 struct fs *fs;
350 int error, flags;
351 int error1 __diagused;
352 uint64_t mntorflags, saved_mnt_flag;
353 accmode_t accmode;
354 struct nameidata ndp;
355 char *fspec;
356 bool mounted_softdep;
357
358 td = curthread;
359 if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
360 return (EINVAL);
361 if (uma_inode == NULL) {
362 uma_inode = uma_zcreate("FFS inode",
363 sizeof(struct inode), NULL, NULL, NULL, NULL,
364 UMA_ALIGN_PTR, 0);
365 uma_ufs1 = uma_zcreate("FFS1 dinode",
366 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
367 UMA_ALIGN_PTR, 0);
368 uma_ufs2 = uma_zcreate("FFS2 dinode",
369 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
370 UMA_ALIGN_PTR, 0);
371 VFS_SMR_ZONE_SET(uma_inode);
372 }
373
374 vfs_deleteopt(mp->mnt_optnew, "groupquota");
375 vfs_deleteopt(mp->mnt_optnew, "userquota");
376
377 fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
378 if (error)
379 return (error);
380
381 mntorflags = 0;
382 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
383 mntorflags |= MNT_UNTRUSTED;
384
385 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
386 mntorflags |= MNT_ACLS;
387
388 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
389 mntorflags |= MNT_SNAPSHOT;
390 /*
391 * Once we have set the MNT_SNAPSHOT flag, do not
392 * persist "snapshot" in the options list.
393 */
394 vfs_deleteopt(mp->mnt_optnew, "snapshot");
395 vfs_deleteopt(mp->mnt_opt, "snapshot");
396 }
397
398 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
399 if (mntorflags & MNT_ACLS) {
400 vfs_mount_error(mp,
401 "\"acls\" and \"nfsv4acls\" options "
402 "are mutually exclusive");
403 return (EINVAL);
404 }
405 mntorflags |= MNT_NFS4ACLS;
406 }
407
408 MNT_ILOCK(mp);
409 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
410 mp->mnt_flag |= mntorflags;
411 MNT_IUNLOCK(mp);
412
413 /*
414 * If this is a snapshot request, take the snapshot.
415 */
416 if (mp->mnt_flag & MNT_SNAPSHOT) {
417 if ((mp->mnt_flag & MNT_UPDATE) == 0)
418 return (EINVAL);
419 return (ffs_snapshot(mp, fspec));
420 }
421
422 /*
423 * Must not call namei() while owning busy ref.
424 */
425 if (mp->mnt_flag & MNT_UPDATE)
426 vfs_unbusy(mp);
427
428 /*
429 * Not an update, or updating the name: look up the name
430 * and verify that it refers to a sensible disk device.
431 */
432 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
433 error = namei(&ndp);
434 if ((mp->mnt_flag & MNT_UPDATE) != 0) {
435 /*
436 * Unmount does not start if MNT_UPDATE is set. Mount
437 * update busies mp before setting MNT_UPDATE. We
438 * must be able to retain our busy ref successfully,
439 * without sleep.
440 */
441 error1 = vfs_busy(mp, MBF_NOWAIT);
442 MPASS(error1 == 0);
443 }
444 if (error != 0)
445 return (error);
446 NDFREE_PNBUF(&ndp);
447 if (!vn_isdisk_error(ndp.ni_vp, &error)) {
448 vput(ndp.ni_vp);
449 return (error);
450 }
451
452 /*
453 * If mount by non-root, then verify that user has necessary
454 * permissions on the device.
455 */
456 accmode = VREAD;
457 if ((mp->mnt_flag & MNT_RDONLY) == 0)
458 accmode |= VWRITE;
459 error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
460 if (error)
461 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
462 if (error) {
463 vput(ndp.ni_vp);
464 return (error);
465 }
466
467 /*
468 * New mount
469 *
470 * We need the name for the mount point (also used for
471 * "last mounted on") copied in. If an error occurs,
472 * the mount point is discarded by the upper level code.
473 * Note that vfs_mount_alloc() populates f_mntonname for us.
474 */
475 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
476 if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
477 vrele(ndp.ni_vp);
478 return (error);
479 }
480 } else {
481 /*
482 * When updating, check whether changing from read-only to
483 * read/write; if there is no device name, that's all we do.
484 */
485 ump = VFSTOUFS(mp);
486 fs = ump->um_fs;
487 odevvp = ump->um_odevvp;
488 devvp = ump->um_devvp;
489
490 /*
491 * If it's not the same vnode, or at least the same device
492 * then it's not correct.
493 */
494 if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
495 error = EINVAL; /* needs translation */
496 vput(ndp.ni_vp);
497 if (error)
498 return (error);
499 if (fs->fs_ronly == 0 &&
500 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
501 /*
502 * Flush any dirty data and suspend filesystem.
503 */
504 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
505 return (error);
506 error = vfs_write_suspend_umnt(mp);
507 if (error != 0)
508 return (error);
509
510 fs->fs_ronly = 1;
511 if (MOUNTEDSOFTDEP(mp)) {
512 MNT_ILOCK(mp);
513 mp->mnt_flag &= ~MNT_SOFTDEP;
514 MNT_IUNLOCK(mp);
515 mounted_softdep = true;
516 } else
517 mounted_softdep = false;
518
519 /*
520 * Check for and optionally get rid of files open
521 * for writing.
522 */
523 flags = WRITECLOSE;
524 if (mp->mnt_flag & MNT_FORCE)
525 flags |= FORCECLOSE;
526 if (mounted_softdep) {
527 error = softdep_flushfiles(mp, flags, td);
528 } else {
529 error = ffs_flushfiles(mp, flags, td);
530 }
531 if (error) {
532 fs->fs_ronly = 0;
533 if (mounted_softdep) {
534 MNT_ILOCK(mp);
535 mp->mnt_flag |= MNT_SOFTDEP;
536 MNT_IUNLOCK(mp);
537 }
538 vfs_write_resume(mp, 0);
539 return (error);
540 }
541
542 if (fs->fs_pendingblocks != 0 ||
543 fs->fs_pendinginodes != 0) {
544 printf("WARNING: %s Update error: blocks %jd "
545 "files %d\n", fs->fs_fsmnt,
546 (intmax_t)fs->fs_pendingblocks,
547 fs->fs_pendinginodes);
548 fs->fs_pendingblocks = 0;
549 fs->fs_pendinginodes = 0;
550 }
551 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
552 fs->fs_clean = 1;
553 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
554 fs->fs_ronly = 0;
555 fs->fs_clean = 0;
556 if (mounted_softdep) {
557 MNT_ILOCK(mp);
558 mp->mnt_flag |= MNT_SOFTDEP;
559 MNT_IUNLOCK(mp);
560 }
561 vfs_write_resume(mp, 0);
562 return (error);
563 }
564 if (mounted_softdep)
565 softdep_unmount(mp);
566 g_topology_lock();
567 /*
568 * Drop our write and exclusive access.
569 */
570 g_access(ump->um_cp, 0, -1, -1);
571 g_topology_unlock();
572 MNT_ILOCK(mp);
573 mp->mnt_flag |= MNT_RDONLY;
574 MNT_IUNLOCK(mp);
575 /*
576 * Allow the writers to note that filesystem
577 * is ro now.
578 */
579 vfs_write_resume(mp, 0);
580 }
581 if ((mp->mnt_flag & MNT_RELOAD) &&
582 (error = ffs_reload(mp, 0)) != 0)
583 return (error);
584 if (fs->fs_ronly &&
585 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
586 /*
587 * If upgrade to read-write by non-root, then verify
588 * that user has necessary permissions on the device.
589 */
590 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
591 error = VOP_ACCESS(odevvp, VREAD | VWRITE,
592 td->td_ucred, td);
593 if (error)
594 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
595 VOP_UNLOCK(odevvp);
596 if (error) {
597 return (error);
598 }
599 fs->fs_flags &= ~FS_UNCLEAN;
600 if (fs->fs_clean == 0) {
601 fs->fs_flags |= FS_UNCLEAN;
602 if ((mp->mnt_flag & MNT_FORCE) ||
603 ((fs->fs_flags &
604 (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
605 (fs->fs_flags & FS_DOSOFTDEP))) {
606 printf("WARNING: %s was not properly "
607 "dismounted\n",
608 mp->mnt_stat.f_mntonname);
609 } else {
610 vfs_mount_error(mp,
611 "R/W mount of %s denied. %s.%s",
612 mp->mnt_stat.f_mntonname,
613 "Filesystem is not clean - run fsck",
614 (fs->fs_flags & FS_SUJ) == 0 ? "" :
615 " Forced mount will invalidate"
616 " journal contents");
617 return (EPERM);
618 }
619 }
620 g_topology_lock();
621 /*
622 * Request exclusive write access.
623 */
624 error = g_access(ump->um_cp, 0, 1, 1);
625 g_topology_unlock();
626 if (error)
627 return (error);
628 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
629 return (error);
630 error = vfs_write_suspend_umnt(mp);
631 if (error != 0)
632 return (error);
633 fs->fs_ronly = 0;
634 MNT_ILOCK(mp);
635 saved_mnt_flag = MNT_RDONLY;
636 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
637 MNT_ASYNC) != 0)
638 saved_mnt_flag |= MNT_ASYNC;
639 mp->mnt_flag &= ~saved_mnt_flag;
640 MNT_IUNLOCK(mp);
641 fs->fs_mtime = time_second;
642 /* check to see if we need to start softdep */
643 if ((fs->fs_flags & FS_DOSOFTDEP) &&
644 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
645 fs->fs_ronly = 1;
646 MNT_ILOCK(mp);
647 mp->mnt_flag |= saved_mnt_flag;
648 MNT_IUNLOCK(mp);
649 vfs_write_resume(mp, 0);
650 return (error);
651 }
652 fs->fs_clean = 0;
653 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
654 fs->fs_ronly = 1;
655 if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
656 softdep_unmount(mp);
657 MNT_ILOCK(mp);
658 mp->mnt_flag |= saved_mnt_flag;
659 MNT_IUNLOCK(mp);
660 vfs_write_resume(mp, 0);
661 return (error);
662 }
663 if (fs->fs_snapinum[0] != 0)
664 ffs_snapshot_mount(mp);
665 vfs_write_resume(mp, 0);
666 }
667 /*
668 * Soft updates is incompatible with "async",
669 * so if we are doing softupdates stop the user
670 * from setting the async flag in an update.
671 * Softdep_mount() clears it in an initial mount
672 * or ro->rw remount.
673 */
674 if (MOUNTEDSOFTDEP(mp)) {
675 /* XXX: Reset too late ? */
676 MNT_ILOCK(mp);
677 mp->mnt_flag &= ~MNT_ASYNC;
678 MNT_IUNLOCK(mp);
679 }
680 /*
681 * Keep MNT_ACLS flag if it is stored in superblock.
682 */
683 if ((fs->fs_flags & FS_ACLS) != 0) {
684 /* XXX: Set too late ? */
685 MNT_ILOCK(mp);
686 mp->mnt_flag |= MNT_ACLS;
687 MNT_IUNLOCK(mp);
688 }
689
690 if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
691 /* XXX: Set too late ? */
692 MNT_ILOCK(mp);
693 mp->mnt_flag |= MNT_NFS4ACLS;
694 MNT_IUNLOCK(mp);
695 }
696
697 }
698
699 MNT_ILOCK(mp);
700 /*
701 * This is racy versus lookup, see ufs_fplookup_vexec for details.
702 */
703 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
704 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
705 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
706 mp->mnt_kern_flag |= MNTK_FPLOOKUP;
707 MNT_IUNLOCK(mp);
708
709 vfs_mountedfrom(mp, fspec);
710 return (0);
711 }
712
713 /*
714 * Compatibility with old mount system call.
715 */
716
717 static int
718 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
719 {
720 struct ufs_args args;
721 int error;
722
723 if (data == NULL)
724 return (EINVAL);
725 error = copyin(data, &args, sizeof args);
726 if (error)
727 return (error);
728
729 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
730 ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
731 error = kernel_mount(ma, flags);
732
733 return (error);
734 }
735
736 /*
737 * Reload all incore data for a filesystem (used after running fsck on
738 * the root filesystem and finding things to fix). If the 'force' flag
739 * is 0, the filesystem must be mounted read-only.
740 *
741 * Things to do to update the mount:
742 * 1) invalidate all cached meta-data.
743 * 2) re-read superblock from disk.
744 * 3) re-read summary information from disk.
745 * 4) invalidate all inactive vnodes.
746 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
747 * writers, if requested.
748 * 6) invalidate all cached file data.
749 * 7) re-read inode data for all active vnodes.
750 */
751 int
752 ffs_reload(struct mount *mp, int flags)
753 {
754 struct vnode *vp, *mvp, *devvp;
755 struct inode *ip;
756 void *space;
757 struct buf *bp;
758 struct fs *fs, *newfs;
759 struct ufsmount *ump;
760 ufs2_daddr_t sblockloc;
761 int i, blks, error;
762 u_long size;
763 int32_t *lp;
764
765 ump = VFSTOUFS(mp);
766
767 MNT_ILOCK(mp);
768 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
769 MNT_IUNLOCK(mp);
770 return (EINVAL);
771 }
772 MNT_IUNLOCK(mp);
773
774 /*
775 * Step 1: invalidate all cached meta-data.
776 */
777 devvp = VFSTOUFS(mp)->um_devvp;
778 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
779 if (vinvalbuf(devvp, 0, 0, 0) != 0)
780 panic("ffs_reload: dirty1");
781 VOP_UNLOCK(devvp);
782
783 /*
784 * Step 2: re-read superblock from disk.
785 */
786 fs = VFSTOUFS(mp)->um_fs;
787 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
788 NOCRED, &bp)) != 0)
789 return (error);
790 newfs = (struct fs *)bp->b_data;
791 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
792 newfs->fs_magic != FS_UFS2_MAGIC) ||
793 newfs->fs_bsize > MAXBSIZE ||
794 newfs->fs_bsize < sizeof(struct fs)) {
795 brelse(bp);
796 return (EIO); /* XXX needs translation */
797 }
798 /*
799 * Preserve the summary information, read-only status, and
800 * superblock location by copying these fields into our new
801 * superblock before using it to update the existing superblock.
802 */
803 newfs->fs_si = fs->fs_si;
804 newfs->fs_ronly = fs->fs_ronly;
805 sblockloc = fs->fs_sblockloc;
806 bcopy(newfs, fs, (u_int)fs->fs_sbsize);
807 brelse(bp);
808 ump->um_bsize = fs->fs_bsize;
809 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
810 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
811 UFS_LOCK(ump);
812 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
813 printf("WARNING: %s: reload pending error: blocks %jd "
814 "files %d\n", mp->mnt_stat.f_mntonname,
815 (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
816 fs->fs_pendingblocks = 0;
817 fs->fs_pendinginodes = 0;
818 }
819 UFS_UNLOCK(ump);
820
821 /*
822 * Step 3: re-read summary information from disk.
823 */
824 size = fs->fs_cssize;
825 blks = howmany(size, fs->fs_fsize);
826 if (fs->fs_contigsumsize > 0)
827 size += fs->fs_ncg * sizeof(int32_t);
828 size += fs->fs_ncg * sizeof(u_int8_t);
829 free(fs->fs_csp, M_UFSMNT);
830 space = malloc(size, M_UFSMNT, M_WAITOK);
831 fs->fs_csp = space;
832 for (i = 0; i < blks; i += fs->fs_frag) {
833 size = fs->fs_bsize;
834 if (i + fs->fs_frag > blks)
835 size = (blks - i) * fs->fs_fsize;
836 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
837 NOCRED, &bp);
838 if (error)
839 return (error);
840 bcopy(bp->b_data, space, (u_int)size);
841 space = (char *)space + size;
842 brelse(bp);
843 }
844 /*
845 * We no longer know anything about clusters per cylinder group.
846 */
847 if (fs->fs_contigsumsize > 0) {
848 fs->fs_maxcluster = lp = space;
849 for (i = 0; i < fs->fs_ncg; i++)
850 *lp++ = fs->fs_contigsumsize;
851 space = lp;
852 }
853 size = fs->fs_ncg * sizeof(u_int8_t);
854 fs->fs_contigdirs = (u_int8_t *)space;
855 bzero(fs->fs_contigdirs, size);
856 if ((flags & FFSR_UNSUSPEND) != 0) {
857 MNT_ILOCK(mp);
858 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
859 wakeup(&mp->mnt_flag);
860 MNT_IUNLOCK(mp);
861 }
862
863 loop:
864 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
865 /*
866 * Skip syncer vnode.
867 */
868 if (vp->v_type == VNON) {
869 VI_UNLOCK(vp);
870 continue;
871 }
872 /*
873 * Step 4: invalidate all cached file data.
874 */
875 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
876 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
877 goto loop;
878 }
879 if (vinvalbuf(vp, 0, 0, 0))
880 panic("ffs_reload: dirty2");
881 /*
882 * Step 5: re-read inode data for all active vnodes.
883 */
884 ip = VTOI(vp);
885 error =
886 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
887 (int)fs->fs_bsize, NOCRED, &bp);
888 if (error) {
889 vput(vp);
890 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
891 return (error);
892 }
893 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
894 brelse(bp);
895 vput(vp);
896 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
897 return (error);
898 }
899 ip->i_effnlink = ip->i_nlink;
900 brelse(bp);
901 vput(vp);
902 }
903 return (0);
904 }
905
906 /*
907 * Common code for mount and mountroot
908 */
909 static int
910 ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
911 {
912 struct ufsmount *ump;
913 struct fs *fs;
914 struct cdev *dev;
915 int error, i, len, ronly;
916 struct ucred *cred;
917 struct g_consumer *cp;
918 struct mount *nmp;
919 struct vnode *devvp;
920 int candelete, canspeedup;
921
922 fs = NULL;
923 ump = NULL;
924 cred = td ? td->td_ucred : NOCRED;
925 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
926
927 devvp = mntfs_allocvp(mp, odevvp);
928 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
929 dev = devvp->v_rdev;
930 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
931 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
932 (uintptr_t)mp) == 0) {
933 mntfs_freevp(devvp);
934 return (EBUSY);
935 }
936 g_topology_lock();
937 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
938 g_topology_unlock();
939 if (error != 0) {
940 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
941 mntfs_freevp(devvp);
942 return (error);
943 }
944 dev_ref(dev);
945 devvp->v_bufobj.bo_ops = &ffs_ops;
946 BO_LOCK(&odevvp->v_bufobj);
947 odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
948 BO_UNLOCK(&odevvp->v_bufobj);
949 VOP_UNLOCK(devvp);
950 if (dev->si_iosize_max != 0)
951 mp->mnt_iosize_max = dev->si_iosize_max;
952 if (mp->mnt_iosize_max > maxphys)
953 mp->mnt_iosize_max = maxphys;
954 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
955 error = EINVAL;
956 vfs_mount_error(mp,
957 "Invalid sectorsize %d for superblock size %d",
958 cp->provider->sectorsize, SBLOCKSIZE);
959 goto out;
960 }
961 /* fetch the superblock and summary information */
962 if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
963 error = ffs_sbsearch(devvp, &fs, 0, M_UFSMNT, ffs_use_bread);
964 else
965 error = ffs_sbget(devvp, &fs, UFS_STDSB, 0, M_UFSMNT,
966 ffs_use_bread);
967 if (error != 0)
968 goto out;
969 fs->fs_flags &= ~FS_UNCLEAN;
970 if (fs->fs_clean == 0) {
971 fs->fs_flags |= FS_UNCLEAN;
972 if (ronly || (mp->mnt_flag & MNT_FORCE) ||
973 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
974 (fs->fs_flags & FS_DOSOFTDEP))) {
975 printf("WARNING: %s was not properly dismounted\n",
976 mp->mnt_stat.f_mntonname);
977 } else {
978 vfs_mount_error(mp, "R/W mount on %s denied. "
979 "Filesystem is not clean - run fsck.%s",
980 mp->mnt_stat.f_mntonname,
981 (fs->fs_flags & FS_SUJ) == 0 ? "" :
982 " Forced mount will invalidate journal contents");
983 error = EPERM;
984 goto out;
985 }
986 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
987 (mp->mnt_flag & MNT_FORCE)) {
988 printf("WARNING: %s: lost blocks %jd files %d\n",
989 mp->mnt_stat.f_mntonname,
990 (intmax_t)fs->fs_pendingblocks,
991 fs->fs_pendinginodes);
992 fs->fs_pendingblocks = 0;
993 fs->fs_pendinginodes = 0;
994 }
995 }
996 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
997 printf("WARNING: %s: mount pending error: blocks %jd "
998 "files %d\n", mp->mnt_stat.f_mntonname,
999 (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes);
1000 fs->fs_pendingblocks = 0;
1001 fs->fs_pendinginodes = 0;
1002 }
1003 if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1004 #ifdef UFS_GJOURNAL
1005 /*
1006 * Get journal provider name.
1007 */
1008 len = 1024;
1009 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK);
1010 if (g_io_getattr("GJOURNAL::provider", cp, &len,
1011 mp->mnt_gjprovider) == 0) {
1012 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1013 M_UFSMNT, M_WAITOK);
1014 MNT_ILOCK(mp);
1015 mp->mnt_flag |= MNT_GJOURNAL;
1016 MNT_IUNLOCK(mp);
1017 } else {
1018 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1019 printf("WARNING: %s: GJOURNAL flag on fs "
1020 "but no gjournal provider below\n",
1021 mp->mnt_stat.f_mntonname);
1022 free(mp->mnt_gjprovider, M_UFSMNT);
1023 mp->mnt_gjprovider = NULL;
1024 }
1025 #else
1026 printf("WARNING: %s: GJOURNAL flag on fs but no "
1027 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1028 #endif
1029 } else {
1030 mp->mnt_gjprovider = NULL;
1031 }
1032 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1033 ump->um_cp = cp;
1034 ump->um_bo = &devvp->v_bufobj;
1035 ump->um_fs = fs;
1036 if (fs->fs_magic == FS_UFS1_MAGIC) {
1037 ump->um_fstype = UFS1;
1038 ump->um_balloc = ffs_balloc_ufs1;
1039 } else {
1040 ump->um_fstype = UFS2;
1041 ump->um_balloc = ffs_balloc_ufs2;
1042 }
1043 ump->um_blkatoff = ffs_blkatoff;
1044 ump->um_truncate = ffs_truncate;
1045 ump->um_update = ffs_update;
1046 ump->um_valloc = ffs_valloc;
1047 ump->um_vfree = ffs_vfree;
1048 ump->um_ifree = ffs_ifree;
1049 ump->um_rdonly = ffs_rdonly;
1050 ump->um_snapgone = ffs_snapgone;
1051 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1052 ump->um_check_blkno = ffs_check_blkno;
1053 else
1054 ump->um_check_blkno = NULL;
1055 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1056 sx_init(&ump->um_checkpath_lock, "uchpth");
1057 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1058 fs->fs_ronly = ronly;
1059 fs->fs_active = NULL;
1060 mp->mnt_data = ump;
1061 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1062 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1063 nmp = NULL;
1064 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1065 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1066 if (nmp)
1067 vfs_rel(nmp);
1068 vfs_getnewfsid(mp);
1069 }
1070 ump->um_bsize = fs->fs_bsize;
1071 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1072 MNT_ILOCK(mp);
1073 mp->mnt_flag |= MNT_LOCAL;
1074 MNT_IUNLOCK(mp);
1075 if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1076 #ifdef MAC
1077 MNT_ILOCK(mp);
1078 mp->mnt_flag |= MNT_MULTILABEL;
1079 MNT_IUNLOCK(mp);
1080 #else
1081 printf("WARNING: %s: multilabel flag on fs but "
1082 "no MAC support\n", mp->mnt_stat.f_mntonname);
1083 #endif
1084 }
1085 if ((fs->fs_flags & FS_ACLS) != 0) {
1086 #ifdef UFS_ACL
1087 MNT_ILOCK(mp);
1088
1089 if (mp->mnt_flag & MNT_NFS4ACLS)
1090 printf("WARNING: %s: ACLs flag on fs conflicts with "
1091 "\"nfsv4acls\" mount option; option ignored\n",
1092 mp->mnt_stat.f_mntonname);
1093 mp->mnt_flag &= ~MNT_NFS4ACLS;
1094 mp->mnt_flag |= MNT_ACLS;
1095
1096 MNT_IUNLOCK(mp);
1097 #else
1098 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1099 mp->mnt_stat.f_mntonname);
1100 #endif
1101 }
1102 if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1103 #ifdef UFS_ACL
1104 MNT_ILOCK(mp);
1105
1106 if (mp->mnt_flag & MNT_ACLS)
1107 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1108 "with \"acls\" mount option; option ignored\n",
1109 mp->mnt_stat.f_mntonname);
1110 mp->mnt_flag &= ~MNT_ACLS;
1111 mp->mnt_flag |= MNT_NFS4ACLS;
1112
1113 MNT_IUNLOCK(mp);
1114 #else
1115 printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1116 "ACLs support\n", mp->mnt_stat.f_mntonname);
1117 #endif
1118 }
1119 if ((fs->fs_flags & FS_TRIM) != 0) {
1120 len = sizeof(int);
1121 if (g_io_getattr("GEOM::candelete", cp, &len,
1122 &candelete) == 0) {
1123 if (candelete)
1124 ump->um_flags |= UM_CANDELETE;
1125 else
1126 printf("WARNING: %s: TRIM flag on fs but disk "
1127 "does not support TRIM\n",
1128 mp->mnt_stat.f_mntonname);
1129 } else {
1130 printf("WARNING: %s: TRIM flag on fs but disk does "
1131 "not confirm that it supports TRIM\n",
1132 mp->mnt_stat.f_mntonname);
1133 }
1134 if (((ump->um_flags) & UM_CANDELETE) != 0) {
1135 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1136 taskqueue_thread_enqueue, &ump->um_trim_tq);
1137 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1138 "%s trim", mp->mnt_stat.f_mntonname);
1139 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1140 &ump->um_trimlisthashsize);
1141 }
1142 }
1143
1144 len = sizeof(int);
1145 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1146 if (canspeedup)
1147 ump->um_flags |= UM_CANSPEEDUP;
1148 }
1149
1150 ump->um_mountp = mp;
1151 ump->um_dev = dev;
1152 ump->um_devvp = devvp;
1153 ump->um_odevvp = odevvp;
1154 ump->um_nindir = fs->fs_nindir;
1155 ump->um_bptrtodb = fs->fs_fsbtodb;
1156 ump->um_seqinc = fs->fs_frag;
1157 for (i = 0; i < MAXQUOTAS; i++)
1158 ump->um_quotas[i] = NULLVP;
1159 #ifdef UFS_EXTATTR
1160 ufs_extattr_uepm_init(&ump->um_extattr);
1161 #endif
1162 /*
1163 * Set FS local "last mounted on" information (NULL pad)
1164 */
1165 bzero(fs->fs_fsmnt, MAXMNTLEN);
1166 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1167 mp->mnt_stat.f_iosize = fs->fs_bsize;
1168
1169 if (mp->mnt_flag & MNT_ROOTFS) {
1170 /*
1171 * Root mount; update timestamp in mount structure.
1172 * this will be used by the common root mount code
1173 * to update the system clock.
1174 */
1175 mp->mnt_time = fs->fs_time;
1176 }
1177
1178 if (ronly == 0) {
1179 fs->fs_mtime = time_second;
1180 if ((fs->fs_flags & FS_DOSOFTDEP) &&
1181 (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1182 ffs_flushfiles(mp, FORCECLOSE, td);
1183 goto out;
1184 }
1185 if (fs->fs_snapinum[0] != 0)
1186 ffs_snapshot_mount(mp);
1187 fs->fs_fmod = 1;
1188 fs->fs_clean = 0;
1189 (void) ffs_sbupdate(ump, MNT_WAIT, 0);
1190 }
1191 /*
1192 * Initialize filesystem state information in mount struct.
1193 */
1194 MNT_ILOCK(mp);
1195 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1196 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1197 MNT_IUNLOCK(mp);
1198 #ifdef UFS_EXTATTR
1199 #ifdef UFS_EXTATTR_AUTOSTART
1200 /*
1201 *
1202 * Auto-starting does the following:
1203 * - check for /.attribute in the fs, and extattr_start if so
1204 * - for each file in .attribute, enable that file with
1205 * an attribute of the same name.
1206 * Not clear how to report errors -- probably eat them.
1207 * This would all happen while the filesystem was busy/not
1208 * available, so would effectively be "atomic".
1209 */
1210 (void) ufs_extattr_autostart(mp, td);
1211 #endif /* !UFS_EXTATTR_AUTOSTART */
1212 #endif /* !UFS_EXTATTR */
1213 return (0);
1214 out:
1215 if (fs != NULL) {
1216 free(fs->fs_csp, M_UFSMNT);
1217 free(fs->fs_si, M_UFSMNT);
1218 free(fs, M_UFSMNT);
1219 }
1220 if (cp != NULL) {
1221 g_topology_lock();
1222 g_vfs_close(cp);
1223 g_topology_unlock();
1224 }
1225 if (ump != NULL) {
1226 mtx_destroy(UFS_MTX(ump));
1227 sx_destroy(&ump->um_checkpath_lock);
1228 if (mp->mnt_gjprovider != NULL) {
1229 free(mp->mnt_gjprovider, M_UFSMNT);
1230 mp->mnt_gjprovider = NULL;
1231 }
1232 MPASS(ump->um_softdep == NULL);
1233 free(ump, M_UFSMNT);
1234 mp->mnt_data = NULL;
1235 }
1236 BO_LOCK(&odevvp->v_bufobj);
1237 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1238 BO_UNLOCK(&odevvp->v_bufobj);
1239 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1240 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1241 mntfs_freevp(devvp);
1242 dev_rel(dev);
1243 return (error);
1244 }
1245
1246 /*
1247 * A read function for use by filesystem-layer routines.
1248 */
1249 static int
1250 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1251 {
1252 struct buf *bp;
1253 int error;
1254
1255 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1256 *bufp = malloc(size, M_UFSMNT, M_WAITOK);
1257 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1258 &bp)) != 0)
1259 return (error);
1260 bcopy(bp->b_data, *bufp, size);
1261 bp->b_flags |= B_INVAL | B_NOCACHE;
1262 brelse(bp);
1263 return (0);
1264 }
1265
1266 static int bigcgs = 0;
1267 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1268
1269 /*
1270 * Sanity checks for loading old filesystem superblocks.
1271 * See ffs_oldfscompat_write below for unwound actions.
1272 *
1273 * XXX - Parts get retired eventually.
1274 * Unfortunately new bits get added.
1275 */
1276 static void
1277 ffs_oldfscompat_read(struct fs *fs,
1278 struct ufsmount *ump,
1279 ufs2_daddr_t sblockloc)
1280 {
1281 off_t maxfilesize;
1282
1283 /*
1284 * If not yet done, update fs_flags location and value of fs_sblockloc.
1285 */
1286 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1287 fs->fs_flags = fs->fs_old_flags;
1288 fs->fs_old_flags |= FS_FLAGS_UPDATED;
1289 fs->fs_sblockloc = sblockloc;
1290 }
1291 /*
1292 * If not yet done, update UFS1 superblock with new wider fields.
1293 */
1294 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1295 fs->fs_maxbsize = fs->fs_bsize;
1296 fs->fs_time = fs->fs_old_time;
1297 fs->fs_size = fs->fs_old_size;
1298 fs->fs_dsize = fs->fs_old_dsize;
1299 fs->fs_csaddr = fs->fs_old_csaddr;
1300 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1301 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1302 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1303 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1304 }
1305 if (fs->fs_magic == FS_UFS1_MAGIC &&
1306 fs->fs_old_inodefmt < FS_44INODEFMT) {
1307 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1308 fs->fs_qbmask = ~fs->fs_bmask;
1309 fs->fs_qfmask = ~fs->fs_fmask;
1310 }
1311 if (fs->fs_magic == FS_UFS1_MAGIC) {
1312 ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1313 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1314 if (fs->fs_maxfilesize > maxfilesize)
1315 fs->fs_maxfilesize = maxfilesize;
1316 }
1317 /* Compatibility for old filesystems */
1318 if (fs->fs_avgfilesize <= 0)
1319 fs->fs_avgfilesize = AVFILESIZ;
1320 if (fs->fs_avgfpdir <= 0)
1321 fs->fs_avgfpdir = AFPDIR;
1322 if (bigcgs) {
1323 fs->fs_save_cgsize = fs->fs_cgsize;
1324 fs->fs_cgsize = fs->fs_bsize;
1325 }
1326 }
1327
1328 /*
1329 * Unwinding superblock updates for old filesystems.
1330 * See ffs_oldfscompat_read above for details.
1331 *
1332 * XXX - Parts get retired eventually.
1333 * Unfortunately new bits get added.
1334 */
1335 void
1336 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1337 {
1338
1339 /*
1340 * Copy back UFS2 updated fields that UFS1 inspects.
1341 */
1342 if (fs->fs_magic == FS_UFS1_MAGIC) {
1343 fs->fs_old_time = fs->fs_time;
1344 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1345 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1346 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1347 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1348 fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1349 }
1350 if (bigcgs) {
1351 fs->fs_cgsize = fs->fs_save_cgsize;
1352 fs->fs_save_cgsize = 0;
1353 }
1354 }
1355
1356 /*
1357 * unmount system call
1358 */
1359 static int
1360 ffs_unmount(struct mount *mp, int mntflags)
1361 {
1362 struct thread *td;
1363 struct ufsmount *ump = VFSTOUFS(mp);
1364 struct fs *fs;
1365 int error, flags, susp;
1366 #ifdef UFS_EXTATTR
1367 int e_restart;
1368 #endif
1369
1370 flags = 0;
1371 td = curthread;
1372 fs = ump->um_fs;
1373 if (mntflags & MNT_FORCE)
1374 flags |= FORCECLOSE;
1375 susp = fs->fs_ronly == 0;
1376 #ifdef UFS_EXTATTR
1377 if ((error = ufs_extattr_stop(mp, td))) {
1378 if (error != EOPNOTSUPP)
1379 printf("WARNING: unmount %s: ufs_extattr_stop "
1380 "returned errno %d\n", mp->mnt_stat.f_mntonname,
1381 error);
1382 e_restart = 0;
1383 } else {
1384 ufs_extattr_uepm_destroy(&ump->um_extattr);
1385 e_restart = 1;
1386 }
1387 #endif
1388 if (susp) {
1389 error = vfs_write_suspend_umnt(mp);
1390 if (error != 0)
1391 goto fail1;
1392 }
1393 if (MOUNTEDSOFTDEP(mp))
1394 error = softdep_flushfiles(mp, flags, td);
1395 else
1396 error = ffs_flushfiles(mp, flags, td);
1397 if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1398 goto fail;
1399
1400 UFS_LOCK(ump);
1401 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1402 printf("WARNING: unmount %s: pending error: blocks %jd "
1403 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1404 fs->fs_pendinginodes);
1405 fs->fs_pendingblocks = 0;
1406 fs->fs_pendinginodes = 0;
1407 }
1408 UFS_UNLOCK(ump);
1409 if (MOUNTEDSOFTDEP(mp))
1410 softdep_unmount(mp);
1411 MPASS(ump->um_softdep == NULL);
1412 if (fs->fs_ronly == 0) {
1413 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1414 error = ffs_sbupdate(ump, MNT_WAIT, 0);
1415 if (ffs_fsfail_cleanup(ump, error))
1416 error = 0;
1417 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1418 fs->fs_clean = 0;
1419 goto fail;
1420 }
1421 }
1422 if (susp)
1423 vfs_write_resume(mp, VR_START_WRITE);
1424 if (ump->um_trim_tq != NULL) {
1425 MPASS(ump->um_trim_inflight == 0);
1426 taskqueue_free(ump->um_trim_tq);
1427 free (ump->um_trimhash, M_TRIM);
1428 }
1429 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1430 g_topology_lock();
1431 g_vfs_close(ump->um_cp);
1432 g_topology_unlock();
1433 BO_LOCK(&ump->um_odevvp->v_bufobj);
1434 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1435 BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1436 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1437 mntfs_freevp(ump->um_devvp);
1438 vrele(ump->um_odevvp);
1439 dev_rel(ump->um_dev);
1440 mtx_destroy(UFS_MTX(ump));
1441 sx_destroy(&ump->um_checkpath_lock);
1442 if (mp->mnt_gjprovider != NULL) {
1443 free(mp->mnt_gjprovider, M_UFSMNT);
1444 mp->mnt_gjprovider = NULL;
1445 }
1446 free(fs->fs_csp, M_UFSMNT);
1447 free(fs->fs_si, M_UFSMNT);
1448 free(fs, M_UFSMNT);
1449 free(ump, M_UFSMNT);
1450 mp->mnt_data = NULL;
1451 if (td->td_su == mp) {
1452 td->td_su = NULL;
1453 vfs_rel(mp);
1454 }
1455 return (error);
1456
1457 fail:
1458 if (susp)
1459 vfs_write_resume(mp, VR_START_WRITE);
1460 fail1:
1461 #ifdef UFS_EXTATTR
1462 if (e_restart) {
1463 ufs_extattr_uepm_init(&ump->um_extattr);
1464 #ifdef UFS_EXTATTR_AUTOSTART
1465 (void) ufs_extattr_autostart(mp, td);
1466 #endif
1467 }
1468 #endif
1469
1470 return (error);
1471 }
1472
1473 /*
1474 * Flush out all the files in a filesystem.
1475 */
1476 int
1477 ffs_flushfiles(struct mount *mp, int flags, struct thread *td)
1478 {
1479 struct ufsmount *ump;
1480 int qerror, error;
1481
1482 ump = VFSTOUFS(mp);
1483 qerror = 0;
1484 #ifdef QUOTA
1485 if (mp->mnt_flag & MNT_QUOTA) {
1486 int i;
1487 error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1488 if (error)
1489 return (error);
1490 for (i = 0; i < MAXQUOTAS; i++) {
1491 error = quotaoff(td, mp, i);
1492 if (error != 0) {
1493 if ((flags & EARLYFLUSH) == 0)
1494 return (error);
1495 else
1496 qerror = error;
1497 }
1498 }
1499
1500 /*
1501 * Here we fall through to vflush again to ensure that
1502 * we have gotten rid of all the system vnodes, unless
1503 * quotas must not be closed.
1504 */
1505 }
1506 #endif
1507 /* devvp is not locked there */
1508 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1509 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1510 return (error);
1511 ffs_snapshot_unmount(mp);
1512 flags |= FORCECLOSE;
1513 /*
1514 * Here we fall through to vflush again to ensure
1515 * that we have gotten rid of all the system vnodes.
1516 */
1517 }
1518
1519 /*
1520 * Do not close system files if quotas were not closed, to be
1521 * able to sync the remaining dquots. The freeblks softupdate
1522 * workitems might hold a reference on a dquot, preventing
1523 * quotaoff() from completing. Next round of
1524 * softdep_flushworklist() iteration should process the
1525 * blockers, allowing the next run of quotaoff() to finally
1526 * flush held dquots.
1527 *
1528 * Otherwise, flush all the files.
1529 */
1530 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1531 return (error);
1532
1533 /*
1534 * If this is a forcible unmount and there were any files that
1535 * were unlinked but still open, then vflush() will have
1536 * truncated and freed those files, which might have started
1537 * some trim work. Wait here for any trims to complete
1538 * and process the blkfrees which follow the trims.
1539 * This may create more dirty devvp buffers and softdep deps.
1540 */
1541 if (ump->um_trim_tq != NULL) {
1542 while (ump->um_trim_inflight != 0)
1543 pause("ufsutr", hz);
1544 taskqueue_drain_all(ump->um_trim_tq);
1545 }
1546
1547 /*
1548 * Flush filesystem metadata.
1549 */
1550 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1551 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1552 VOP_UNLOCK(ump->um_devvp);
1553 return (error);
1554 }
1555
1556 /*
1557 * Get filesystem statistics.
1558 */
1559 static int
1560 ffs_statfs(struct mount *mp, struct statfs *sbp)
1561 {
1562 struct ufsmount *ump;
1563 struct fs *fs;
1564
1565 ump = VFSTOUFS(mp);
1566 fs = ump->um_fs;
1567 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1568 panic("ffs_statfs");
1569 sbp->f_version = STATFS_VERSION;
1570 sbp->f_bsize = fs->fs_fsize;
1571 sbp->f_iosize = fs->fs_bsize;
1572 sbp->f_blocks = fs->fs_dsize;
1573 UFS_LOCK(ump);
1574 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1575 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1576 sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1577 dbtofsb(fs, fs->fs_pendingblocks);
1578 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1579 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1580 UFS_UNLOCK(ump);
1581 sbp->f_namemax = UFS_MAXNAMLEN;
1582 return (0);
1583 }
1584
1585 static bool
1586 sync_doupdate(struct inode *ip)
1587 {
1588
1589 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1590 IN_UPDATE)) != 0);
1591 }
1592
1593 static int
1594 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1595 {
1596 struct inode *ip;
1597
1598 /*
1599 * Flags are safe to access because ->v_data invalidation
1600 * is held off by listmtx.
1601 */
1602 if (vp->v_type == VNON)
1603 return (false);
1604 ip = VTOI(vp);
1605 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1606 return (false);
1607 return (true);
1608 }
1609
1610 /*
1611 * For a lazy sync, we only care about access times, quotas and the
1612 * superblock. Other filesystem changes are already converted to
1613 * cylinder group blocks or inode blocks updates and are written to
1614 * disk by syncer.
1615 */
1616 static int
1617 ffs_sync_lazy(struct mount *mp)
1618 {
1619 struct vnode *mvp, *vp;
1620 struct inode *ip;
1621 int allerror, error;
1622
1623 allerror = 0;
1624 if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1625 #ifdef QUOTA
1626 qsync(mp);
1627 #endif
1628 goto sbupdate;
1629 }
1630 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1631 if (vp->v_type == VNON) {
1632 VI_UNLOCK(vp);
1633 continue;
1634 }
1635 ip = VTOI(vp);
1636
1637 /*
1638 * The IN_ACCESS flag is converted to IN_MODIFIED by
1639 * ufs_close() and ufs_getattr() by the calls to
1640 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1641 * Test also all the other timestamp flags too, to pick up
1642 * any other cases that could be missed.
1643 */
1644 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1645 VI_UNLOCK(vp);
1646 continue;
1647 }
1648 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1649 continue;
1650 #ifdef QUOTA
1651 qsyncvp(vp);
1652 #endif
1653 if (sync_doupdate(ip))
1654 error = ffs_update(vp, 0);
1655 if (error != 0)
1656 allerror = error;
1657 vput(vp);
1658 }
1659 sbupdate:
1660 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1661 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1662 allerror = error;
1663 return (allerror);
1664 }
1665
1666 /*
1667 * Go through the disk queues to initiate sandbagged IO;
1668 * go through the inodes to write those that have been modified;
1669 * initiate the writing of the super block if it has been modified.
1670 *
1671 * Note: we are always called with the filesystem marked busy using
1672 * vfs_busy().
1673 */
1674 static int
1675 ffs_sync(struct mount *mp, int waitfor)
1676 {
1677 struct vnode *mvp, *vp, *devvp;
1678 struct thread *td;
1679 struct inode *ip;
1680 struct ufsmount *ump = VFSTOUFS(mp);
1681 struct fs *fs;
1682 int error, count, lockreq, allerror = 0;
1683 int suspend;
1684 int suspended;
1685 int secondary_writes;
1686 int secondary_accwrites;
1687 int softdep_deps;
1688 int softdep_accdeps;
1689 struct bufobj *bo;
1690
1691 suspend = 0;
1692 suspended = 0;
1693 td = curthread;
1694 fs = ump->um_fs;
1695 if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1696 panic("%s: ffs_sync: modification on read-only filesystem",
1697 fs->fs_fsmnt);
1698 if (waitfor == MNT_LAZY) {
1699 if (!rebooting)
1700 return (ffs_sync_lazy(mp));
1701 waitfor = MNT_NOWAIT;
1702 }
1703
1704 /*
1705 * Write back each (modified) inode.
1706 */
1707 lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1708 if (waitfor == MNT_SUSPEND) {
1709 suspend = 1;
1710 waitfor = MNT_WAIT;
1711 }
1712 if (waitfor == MNT_WAIT)
1713 lockreq = LK_EXCLUSIVE;
1714 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1715 loop:
1716 /* Grab snapshot of secondary write counts */
1717 MNT_ILOCK(mp);
1718 secondary_writes = mp->mnt_secondary_writes;
1719 secondary_accwrites = mp->mnt_secondary_accwrites;
1720 MNT_IUNLOCK(mp);
1721
1722 /* Grab snapshot of softdep dependency counts */
1723 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1724
1725 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1726 /*
1727 * Depend on the vnode interlock to keep things stable enough
1728 * for a quick test. Since there might be hundreds of
1729 * thousands of vnodes, we cannot afford even a subroutine
1730 * call unless there's a good chance that we have work to do.
1731 */
1732 if (vp->v_type == VNON) {
1733 VI_UNLOCK(vp);
1734 continue;
1735 }
1736 ip = VTOI(vp);
1737 if ((ip->i_flag &
1738 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1739 vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1740 VI_UNLOCK(vp);
1741 continue;
1742 }
1743 if ((error = vget(vp, lockreq)) != 0) {
1744 if (error == ENOENT || error == ENOLCK) {
1745 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1746 goto loop;
1747 }
1748 continue;
1749 }
1750 #ifdef QUOTA
1751 qsyncvp(vp);
1752 #endif
1753 for (;;) {
1754 error = ffs_syncvnode(vp, waitfor, 0);
1755 if (error == ERELOOKUP)
1756 continue;
1757 if (error != 0)
1758 allerror = error;
1759 break;
1760 }
1761 vput(vp);
1762 }
1763 /*
1764 * Force stale filesystem control information to be flushed.
1765 */
1766 if (waitfor == MNT_WAIT || rebooting) {
1767 if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1768 allerror = error;
1769 if (ffs_fsfail_cleanup(ump, allerror))
1770 allerror = 0;
1771 /* Flushed work items may create new vnodes to clean */
1772 if (allerror == 0 && count)
1773 goto loop;
1774 }
1775
1776 devvp = ump->um_devvp;
1777 bo = &devvp->v_bufobj;
1778 BO_LOCK(bo);
1779 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1780 BO_UNLOCK(bo);
1781 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1782 error = VOP_FSYNC(devvp, waitfor, td);
1783 VOP_UNLOCK(devvp);
1784 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1785 error = ffs_sbupdate(ump, waitfor, 0);
1786 if (error != 0)
1787 allerror = error;
1788 if (ffs_fsfail_cleanup(ump, allerror))
1789 allerror = 0;
1790 if (allerror == 0 && waitfor == MNT_WAIT)
1791 goto loop;
1792 } else if (suspend != 0) {
1793 if (softdep_check_suspend(mp,
1794 devvp,
1795 softdep_deps,
1796 softdep_accdeps,
1797 secondary_writes,
1798 secondary_accwrites) != 0) {
1799 MNT_IUNLOCK(mp);
1800 goto loop; /* More work needed */
1801 }
1802 mtx_assert(MNT_MTX(mp), MA_OWNED);
1803 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1804 MNT_IUNLOCK(mp);
1805 suspended = 1;
1806 } else
1807 BO_UNLOCK(bo);
1808 /*
1809 * Write back modified superblock.
1810 */
1811 if (fs->fs_fmod != 0 &&
1812 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1813 allerror = error;
1814 if (ffs_fsfail_cleanup(ump, allerror))
1815 allerror = 0;
1816 return (allerror);
1817 }
1818
1819 int
1820 ffs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
1821 {
1822 return (ffs_vgetf(mp, ino, flags, vpp, 0));
1823 }
1824
1825 int
1826 ffs_vgetf(struct mount *mp,
1827 ino_t ino,
1828 int flags,
1829 struct vnode **vpp,
1830 int ffs_flags)
1831 {
1832 struct fs *fs;
1833 struct inode *ip;
1834 struct ufsmount *ump;
1835 struct buf *bp;
1836 struct vnode *vp;
1837 daddr_t dbn;
1838 int error;
1839
1840 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1841 (flags & LK_EXCLUSIVE) != 0);
1842
1843 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1844 if (error != 0)
1845 return (error);
1846 if (*vpp != NULL) {
1847 if ((ffs_flags & FFSV_REPLACE) == 0 ||
1848 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1849 !VN_IS_DOOMED(*vpp)))
1850 return (0);
1851 vgone(*vpp);
1852 vput(*vpp);
1853 }
1854
1855 /*
1856 * We must promote to an exclusive lock for vnode creation. This
1857 * can happen if lookup is passed LOCKSHARED.
1858 */
1859 if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1860 flags &= ~LK_TYPE_MASK;
1861 flags |= LK_EXCLUSIVE;
1862 }
1863
1864 /*
1865 * We do not lock vnode creation as it is believed to be too
1866 * expensive for such rare case as simultaneous creation of vnode
1867 * for same ino by different processes. We just allow them to race
1868 * and check later to decide who wins. Let the race begin!
1869 */
1870
1871 ump = VFSTOUFS(mp);
1872 fs = ump->um_fs;
1873 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1874
1875 /* Allocate a new vnode/inode. */
1876 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1877 &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1878 if (error) {
1879 *vpp = NULL;
1880 uma_zfree_smr(uma_inode, ip);
1881 return (error);
1882 }
1883 /*
1884 * FFS supports recursive locking.
1885 */
1886 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1887 VN_LOCK_AREC(vp);
1888 vp->v_data = ip;
1889 vp->v_bufobj.bo_bsize = fs->fs_bsize;
1890 ip->i_vnode = vp;
1891 ip->i_ump = ump;
1892 ip->i_number = ino;
1893 ip->i_ea_refs = 0;
1894 ip->i_nextclustercg = -1;
1895 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1896 ip->i_mode = 0; /* ensure error cases below throw away vnode */
1897 cluster_init_vn(&ip->i_clusterw);
1898 #ifdef DIAGNOSTIC
1899 ufs_init_trackers(ip);
1900 #endif
1901 #ifdef QUOTA
1902 {
1903 int i;
1904 for (i = 0; i < MAXQUOTAS; i++)
1905 ip->i_dquot[i] = NODQUOT;
1906 }
1907 #endif
1908
1909 if (ffs_flags & FFSV_FORCEINSMQ)
1910 vp->v_vflag |= VV_FORCEINSMQ;
1911 error = insmntque(vp, mp);
1912 if (error != 0) {
1913 uma_zfree_smr(uma_inode, ip);
1914 *vpp = NULL;
1915 return (error);
1916 }
1917 vp->v_vflag &= ~VV_FORCEINSMQ;
1918 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1919 if (error != 0)
1920 return (error);
1921 if (*vpp != NULL) {
1922 /*
1923 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1924 * operate on empty inode, which must not be found by
1925 * other threads until fully filled. Vnode for empty
1926 * inode must be not re-inserted on the hash by other
1927 * thread, after removal by us at the beginning.
1928 */
1929 MPASS((ffs_flags & FFSV_REPLACE) == 0);
1930 return (0);
1931 }
1932 if (I_IS_UFS1(ip))
1933 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1934 else
1935 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1936
1937 if ((ffs_flags & FFSV_NEWINODE) != 0) {
1938 /* New inode, just zero out its contents. */
1939 if (I_IS_UFS1(ip))
1940 memset(ip->i_din1, 0, sizeof(struct ufs1_dinode));
1941 else
1942 memset(ip->i_din2, 0, sizeof(struct ufs2_dinode));
1943 } else {
1944 /* Read the disk contents for the inode, copy into the inode. */
1945 dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1946 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
1947 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1948 if (error != 0) {
1949 /*
1950 * The inode does not contain anything useful, so it
1951 * would be misleading to leave it on its hash chain.
1952 * With mode still zero, it will be unlinked and
1953 * returned to the free list by vput().
1954 */
1955 vgone(vp);
1956 vput(vp);
1957 *vpp = NULL;
1958 return (error);
1959 }
1960 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1961 bqrelse(bp);
1962 vgone(vp);
1963 vput(vp);
1964 *vpp = NULL;
1965 return (error);
1966 }
1967 bqrelse(bp);
1968 }
1969 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1970 (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1971 softdep_load_inodeblock(ip);
1972 else
1973 ip->i_effnlink = ip->i_nlink;
1974
1975 /*
1976 * Initialize the vnode from the inode, check for aliases.
1977 * Note that the underlying vnode may have changed.
1978 */
1979 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1980 &vp);
1981 if (error) {
1982 vgone(vp);
1983 vput(vp);
1984 *vpp = NULL;
1985 return (error);
1986 }
1987
1988 /*
1989 * Finish inode initialization.
1990 */
1991 if (vp->v_type != VFIFO) {
1992 /* FFS supports shared locking for all files except fifos. */
1993 VN_LOCK_ASHARE(vp);
1994 }
1995
1996 /*
1997 * Set up a generation number for this inode if it does not
1998 * already have one. This should only happen on old filesystems.
1999 */
2000 if (ip->i_gen == 0) {
2001 while (ip->i_gen == 0)
2002 ip->i_gen = arc4random();
2003 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2004 UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2005 DIP_SET(ip, i_gen, ip->i_gen);
2006 }
2007 }
2008 #ifdef MAC
2009 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2010 /*
2011 * If this vnode is already allocated, and we're running
2012 * multi-label, attempt to perform a label association
2013 * from the extended attributes on the inode.
2014 */
2015 error = mac_vnode_associate_extattr(mp, vp);
2016 if (error) {
2017 /* ufs_inactive will release ip->i_devvp ref. */
2018 vgone(vp);
2019 vput(vp);
2020 *vpp = NULL;
2021 return (error);
2022 }
2023 }
2024 #endif
2025
2026 vn_set_state(vp, VSTATE_CONSTRUCTED);
2027 *vpp = vp;
2028 return (0);
2029 }
2030
2031 /*
2032 * File handle to vnode
2033 *
2034 * Have to be really careful about stale file handles:
2035 * - check that the inode number is valid
2036 * - for UFS2 check that the inode number is initialized
2037 * - call ffs_vget() to get the locked inode
2038 * - check for an unallocated inode (i_mode == 0)
2039 * - check that the given client host has export rights and return
2040 * those rights via. exflagsp and credanonp
2041 */
2042 static int
2043 ffs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
2044 {
2045 struct ufid *ufhp;
2046
2047 ufhp = (struct ufid *)fhp;
2048 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
2049 vpp, 0));
2050 }
2051
2052 int
2053 ffs_inotovp(struct mount *mp,
2054 ino_t ino,
2055 u_int64_t gen,
2056 int lflags,
2057 struct vnode **vpp,
2058 int ffs_flags)
2059 {
2060 struct ufsmount *ump;
2061 struct vnode *nvp;
2062 struct inode *ip;
2063 struct fs *fs;
2064 struct cg *cgp;
2065 struct buf *bp;
2066 u_int cg;
2067 int error;
2068
2069 ump = VFSTOUFS(mp);
2070 fs = ump->um_fs;
2071 *vpp = NULL;
2072
2073 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2074 return (ESTALE);
2075
2076 /*
2077 * Need to check if inode is initialized because UFS2 does lazy
2078 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2079 */
2080 if (fs->fs_magic == FS_UFS2_MAGIC) {
2081 cg = ino_to_cg(fs, ino);
2082 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
2083 if (error != 0)
2084 return (error);
2085 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2086 brelse(bp);
2087 return (ESTALE);
2088 }
2089 brelse(bp);
2090 }
2091
2092 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags);
2093 if (error != 0)
2094 return (error);
2095
2096 ip = VTOI(nvp);
2097 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
2098 if (ip->i_mode == 0)
2099 vgone(nvp);
2100 vput(nvp);
2101 return (ESTALE);
2102 }
2103
2104 vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
2105 *vpp = nvp;
2106 return (0);
2107 }
2108
2109 /*
2110 * Initialize the filesystem.
2111 */
2112 static int
2113 ffs_init(struct vfsconf *vfsp)
2114 {
2115
2116 ffs_susp_initialize();
2117 softdep_initialize();
2118 return (ufs_init(vfsp));
2119 }
2120
2121 /*
2122 * Undo the work of ffs_init().
2123 */
2124 static int
2125 ffs_uninit(struct vfsconf *vfsp)
2126 {
2127 int ret;
2128
2129 ret = ufs_uninit(vfsp);
2130 softdep_uninitialize();
2131 ffs_susp_uninitialize();
2132 taskqueue_drain_all(taskqueue_thread);
2133 return (ret);
2134 }
2135
2136 /*
2137 * Structure used to pass information from ffs_sbupdate to its
2138 * helper routine ffs_use_bwrite.
2139 */
2140 struct devfd {
2141 struct ufsmount *ump;
2142 struct buf *sbbp;
2143 int waitfor;
2144 int suspended;
2145 int error;
2146 };
2147
2148 /*
2149 * Write a superblock and associated information back to disk.
2150 */
2151 int
2152 ffs_sbupdate(struct ufsmount *ump, int waitfor, int suspended)
2153 {
2154 struct fs *fs;
2155 struct buf *sbbp;
2156 struct devfd devfd;
2157
2158 fs = ump->um_fs;
2159 if (fs->fs_ronly == 1 &&
2160 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2161 (MNT_RDONLY | MNT_UPDATE))
2162 panic("ffs_sbupdate: write read-only filesystem");
2163 /*
2164 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2165 */
2166 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2167 (int)fs->fs_sbsize, 0, 0, 0);
2168 /*
2169 * Initialize info needed for write function.
2170 */
2171 devfd.ump = ump;
2172 devfd.sbbp = sbbp;
2173 devfd.waitfor = waitfor;
2174 devfd.suspended = suspended;
2175 devfd.error = 0;
2176 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2177 }
2178
2179 /*
2180 * Write function for use by filesystem-layer routines.
2181 */
2182 static int
2183 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2184 {
2185 struct devfd *devfdp;
2186 struct ufsmount *ump;
2187 struct buf *bp;
2188 struct fs *fs;
2189 int error;
2190
2191 devfdp = devfd;
2192 ump = devfdp->ump;
2193 fs = ump->um_fs;
2194 /*
2195 * Writing the superblock summary information.
2196 */
2197 if (loc != fs->fs_sblockloc) {
2198 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2199 bcopy(buf, bp->b_data, (u_int)size);
2200 if (devfdp->suspended)
2201 bp->b_flags |= B_VALIDSUSPWRT;
2202 if (devfdp->waitfor != MNT_WAIT)
2203 bawrite(bp);
2204 else if ((error = bwrite(bp)) != 0)
2205 devfdp->error = error;
2206 return (0);
2207 }
2208 /*
2209 * Writing the superblock itself. We need to do special checks for it.
2210 */
2211 bp = devfdp->sbbp;
2212 if (ffs_fsfail_cleanup(ump, devfdp->error))
2213 devfdp->error = 0;
2214 if (devfdp->error != 0) {
2215 brelse(bp);
2216 return (devfdp->error);
2217 }
2218 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2219 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2220 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2221 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2222 fs->fs_sblockloc = SBLOCK_UFS1;
2223 }
2224 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2225 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2226 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2227 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2228 fs->fs_sblockloc = SBLOCK_UFS2;
2229 }
2230 if (MOUNTEDSOFTDEP(ump->um_mountp))
2231 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2232 UFS_LOCK(ump);
2233 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
2234 UFS_UNLOCK(ump);
2235 fs = (struct fs *)bp->b_data;
2236 fs->fs_fmod = 0;
2237 ffs_oldfscompat_write(fs, ump);
2238 fs->fs_si = NULL;
2239 /* Recalculate the superblock hash */
2240 fs->fs_ckhash = ffs_calc_sbhash(fs);
2241 if (devfdp->suspended)
2242 bp->b_flags |= B_VALIDSUSPWRT;
2243 if (devfdp->waitfor != MNT_WAIT)
2244 bawrite(bp);
2245 else if ((error = bwrite(bp)) != 0)
2246 devfdp->error = error;
2247 return (devfdp->error);
2248 }
2249
2250 static int
2251 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2252 int attrnamespace, const char *attrname)
2253 {
2254
2255 #ifdef UFS_EXTATTR
2256 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2257 attrname));
2258 #else
2259 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2260 attrname));
2261 #endif
2262 }
2263
2264 static void
2265 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2266 {
2267
2268 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2269 uma_zfree(uma_ufs1, ip->i_din1);
2270 else if (ip->i_din2 != NULL)
2271 uma_zfree(uma_ufs2, ip->i_din2);
2272 uma_zfree_smr(uma_inode, ip);
2273 }
2274
2275 static int dobkgrdwrite = 1;
2276 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2277 "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2278
2279 /*
2280 * Complete a background write started from bwrite.
2281 */
2282 static void
2283 ffs_backgroundwritedone(struct buf *bp)
2284 {
2285 struct bufobj *bufobj;
2286 struct buf *origbp;
2287
2288 #ifdef SOFTUPDATES
2289 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2290 softdep_handle_error(bp);
2291 #endif
2292
2293 /*
2294 * Find the original buffer that we are writing.
2295 */
2296 bufobj = bp->b_bufobj;
2297 BO_LOCK(bufobj);
2298 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2299 panic("backgroundwritedone: lost buffer");
2300
2301 /*
2302 * We should mark the cylinder group buffer origbp as
2303 * dirty, to not lose the failed write.
2304 */
2305 if ((bp->b_ioflags & BIO_ERROR) != 0)
2306 origbp->b_vflags |= BV_BKGRDERR;
2307 BO_UNLOCK(bufobj);
2308 /*
2309 * Process dependencies then return any unfinished ones.
2310 */
2311 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2312 buf_complete(bp);
2313 #ifdef SOFTUPDATES
2314 if (!LIST_EMPTY(&bp->b_dep))
2315 softdep_move_dependencies(bp, origbp);
2316 #endif
2317 /*
2318 * This buffer is marked B_NOCACHE so when it is released
2319 * by biodone it will be tossed. Clear B_IOSTARTED in case of error.
2320 */
2321 bp->b_flags |= B_NOCACHE;
2322 bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2323 pbrelvp(bp);
2324
2325 /*
2326 * Prevent brelse() from trying to keep and re-dirtying bp on
2327 * errors. It causes b_bufobj dereference in
2328 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2329 * pbrelvp() above.
2330 */
2331 if ((bp->b_ioflags & BIO_ERROR) != 0)
2332 bp->b_flags |= B_INVAL;
2333 bufdone(bp);
2334 BO_LOCK(bufobj);
2335 /*
2336 * Clear the BV_BKGRDINPROG flag in the original buffer
2337 * and awaken it if it is waiting for the write to complete.
2338 * If BV_BKGRDINPROG is not set in the original buffer it must
2339 * have been released and re-instantiated - which is not legal.
2340 */
2341 KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2342 ("backgroundwritedone: lost buffer2"));
2343 origbp->b_vflags &= ~BV_BKGRDINPROG;
2344 if (origbp->b_vflags & BV_BKGRDWAIT) {
2345 origbp->b_vflags &= ~BV_BKGRDWAIT;
2346 wakeup(&origbp->b_xflags);
2347 }
2348 BO_UNLOCK(bufobj);
2349 }
2350
2351 /*
2352 * Write, release buffer on completion. (Done by iodone
2353 * if async). Do not bother writing anything if the buffer
2354 * is invalid.
2355 *
2356 * Note that we set B_CACHE here, indicating that buffer is
2357 * fully valid and thus cacheable. This is true even of NFS
2358 * now so we set it generally. This could be set either here
2359 * or in biodone() since the I/O is synchronous. We put it
2360 * here.
2361 */
2362 static int
2363 ffs_bufwrite(struct buf *bp)
2364 {
2365 struct buf *newbp;
2366 struct cg *cgp;
2367
2368 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2369 if (bp->b_flags & B_INVAL) {
2370 brelse(bp);
2371 return (0);
2372 }
2373
2374 if (!BUF_ISLOCKED(bp))
2375 panic("bufwrite: buffer is not busy???");
2376 /*
2377 * If a background write is already in progress, delay
2378 * writing this block if it is asynchronous. Otherwise
2379 * wait for the background write to complete.
2380 */
2381 BO_LOCK(bp->b_bufobj);
2382 if (bp->b_vflags & BV_BKGRDINPROG) {
2383 if (bp->b_flags & B_ASYNC) {
2384 BO_UNLOCK(bp->b_bufobj);
2385 bdwrite(bp);
2386 return (0);
2387 }
2388 bp->b_vflags |= BV_BKGRDWAIT;
2389 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2390 "bwrbg", 0);
2391 if (bp->b_vflags & BV_BKGRDINPROG)
2392 panic("bufwrite: still writing");
2393 }
2394 bp->b_vflags &= ~BV_BKGRDERR;
2395 BO_UNLOCK(bp->b_bufobj);
2396
2397 /*
2398 * If this buffer is marked for background writing and we
2399 * do not have to wait for it, make a copy and write the
2400 * copy so as to leave this buffer ready for further use.
2401 *
2402 * This optimization eats a lot of memory. If we have a page
2403 * or buffer shortfall we can't do it.
2404 */
2405 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2406 (bp->b_flags & B_ASYNC) &&
2407 !vm_page_count_severe() &&
2408 !buf_dirty_count_severe()) {
2409 KASSERT(bp->b_iodone == NULL,
2410 ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2411
2412 /* get a new block */
2413 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2414 if (newbp == NULL)
2415 goto normal_write;
2416
2417 KASSERT(buf_mapped(bp), ("Unmapped cg"));
2418 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2419 BO_LOCK(bp->b_bufobj);
2420 bp->b_vflags |= BV_BKGRDINPROG;
2421 BO_UNLOCK(bp->b_bufobj);
2422 newbp->b_xflags |=
2423 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2424 newbp->b_lblkno = bp->b_lblkno;
2425 newbp->b_blkno = bp->b_blkno;
2426 newbp->b_offset = bp->b_offset;
2427 newbp->b_iodone = ffs_backgroundwritedone;
2428 newbp->b_flags |= B_ASYNC;
2429 newbp->b_flags &= ~B_INVAL;
2430 pbgetvp(bp->b_vp, newbp);
2431
2432 #ifdef SOFTUPDATES
2433 /*
2434 * Move over the dependencies. If there are rollbacks,
2435 * leave the parent buffer dirtied as it will need to
2436 * be written again.
2437 */
2438 if (LIST_EMPTY(&bp->b_dep) ||
2439 softdep_move_dependencies(bp, newbp) == 0)
2440 bundirty(bp);
2441 #else
2442 bundirty(bp);
2443 #endif
2444
2445 /*
2446 * Initiate write on the copy, release the original. The
2447 * BKGRDINPROG flag prevents it from going away until
2448 * the background write completes. We have to recalculate
2449 * its check hash in case the buffer gets freed and then
2450 * reconstituted from the buffer cache during a later read.
2451 */
2452 if ((bp->b_xflags & BX_CYLGRP) != 0) {
2453 cgp = (struct cg *)bp->b_data;
2454 cgp->cg_ckhash = 0;
2455 cgp->cg_ckhash =
2456 calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2457 }
2458 bqrelse(bp);
2459 bp = newbp;
2460 } else
2461 /* Mark the buffer clean */
2462 bundirty(bp);
2463
2464 /* Let the normal bufwrite do the rest for us */
2465 normal_write:
2466 /*
2467 * If we are writing a cylinder group, update its time.
2468 */
2469 if ((bp->b_xflags & BX_CYLGRP) != 0) {
2470 cgp = (struct cg *)bp->b_data;
2471 cgp->cg_old_time = cgp->cg_time = time_second;
2472 }
2473 return (bufwrite(bp));
2474 }
2475
2476 static void
2477 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2478 {
2479 struct vnode *vp;
2480 struct buf *tbp;
2481 int error, nocopy;
2482
2483 /*
2484 * This is the bufobj strategy for the private VCHR vnodes
2485 * used by FFS to access the underlying storage device.
2486 * We override the default bufobj strategy and thus bypass
2487 * VOP_STRATEGY() for these vnodes.
2488 */
2489 vp = bo2vnode(bo);
2490 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2491 bp->b_vp->v_rdev == NULL ||
2492 bp->b_vp->v_rdev->si_mountpt == NULL ||
2493 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2494 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2495 ("ffs_geom_strategy() with wrong vp"));
2496 if (bp->b_iocmd == BIO_WRITE) {
2497 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2498 bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2499 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2500 panic("ffs_geom_strategy: bad I/O");
2501 nocopy = bp->b_flags & B_NOCOPY;
2502 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2503 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2504 vp->v_rdev->si_snapdata != NULL) {
2505 if ((bp->b_flags & B_CLUSTER) != 0) {
2506 runningbufwakeup(bp);
2507 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2508 b_cluster.cluster_entry) {
2509 error = ffs_copyonwrite(vp, tbp);
2510 if (error != 0 &&
2511 error != EOPNOTSUPP) {
2512 bp->b_error = error;
2513 bp->b_ioflags |= BIO_ERROR;
2514 bp->b_flags &= ~B_BARRIER;
2515 bufdone(bp);
2516 return;
2517 }
2518 }
2519 bp->b_runningbufspace = bp->b_bufsize;
2520 atomic_add_long(&runningbufspace,
2521 bp->b_runningbufspace);
2522 } else {
2523 error = ffs_copyonwrite(vp, bp);
2524 if (error != 0 && error != EOPNOTSUPP) {
2525 bp->b_error = error;
2526 bp->b_ioflags |= BIO_ERROR;
2527 bp->b_flags &= ~B_BARRIER;
2528 bufdone(bp);
2529 return;
2530 }
2531 }
2532 }
2533 #ifdef SOFTUPDATES
2534 if ((bp->b_flags & B_CLUSTER) != 0) {
2535 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2536 b_cluster.cluster_entry) {
2537 if (!LIST_EMPTY(&tbp->b_dep))
2538 buf_start(tbp);
2539 }
2540 } else {
2541 if (!LIST_EMPTY(&bp->b_dep))
2542 buf_start(bp);
2543 }
2544
2545 #endif
2546 /*
2547 * Check for metadata that needs check-hashes and update them.
2548 */
2549 switch (bp->b_xflags & BX_FSPRIV) {
2550 case BX_CYLGRP:
2551 ((struct cg *)bp->b_data)->cg_ckhash = 0;
2552 ((struct cg *)bp->b_data)->cg_ckhash =
2553 calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2554 break;
2555
2556 case BX_SUPERBLOCK:
2557 case BX_INODE:
2558 case BX_INDIR:
2559 case BX_DIR:
2560 printf("Check-hash write is unimplemented!!!\n");
2561 break;
2562
2563 case 0:
2564 break;
2565
2566 default:
2567 printf("multiple buffer types 0x%b\n",
2568 (u_int)(bp->b_xflags & BX_FSPRIV),
2569 PRINT_UFS_BUF_XFLAGS);
2570 break;
2571 }
2572 }
2573 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2574 bp->b_xflags |= BX_CVTENXIO;
2575 g_vfs_strategy(bo, bp);
2576 }
2577
2578 int
2579 ffs_own_mount(const struct mount *mp)
2580 {
2581
2582 if (mp->mnt_op == &ufs_vfsops)
2583 return (1);
2584 return (0);
2585 }
2586
2587 #ifdef DDB
2588 #ifdef SOFTUPDATES
2589
2590 /* defined in ffs_softdep.c */
2591 extern void db_print_ffs(struct ufsmount *ump);
2592
2593 DB_SHOW_COMMAND(ffs, db_show_ffs)
2594 {
2595 struct mount *mp;
2596 struct ufsmount *ump;
2597
2598 if (have_addr) {
2599 ump = VFSTOUFS((struct mount *)addr);
2600 db_print_ffs(ump);
2601 return;
2602 }
2603
2604 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2605 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2606 db_print_ffs(VFSTOUFS(mp));
2607 }
2608 }
2609
2610 #endif /* SOFTUPDATES */
2611 #endif /* DDB */
Cache object: 6d9b8b95d36721c66bb3533da1b01418
|