FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_mount.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1999-2004 Poul-Henning Kamp
5 * Copyright (c) 1999 Michael Smith
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD: releng/12.0/sys/kern/vfs_mount.c 333263 2018-05-04 20:54:27Z jamie $");
41
42 #include <sys/param.h>
43 #include <sys/conf.h>
44 #include <sys/eventhandler.h>
45 #include <sys/fcntl.h>
46 #include <sys/jail.h>
47 #include <sys/kernel.h>
48 #include <sys/libkern.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/namei.h>
53 #include <sys/priv.h>
54 #include <sys/proc.h>
55 #include <sys/filedesc.h>
56 #include <sys/reboot.h>
57 #include <sys/sbuf.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysproto.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
63 #include <sys/systm.h>
64 #include <sys/vnode.h>
65 #include <vm/uma.h>
66
67 #include <geom/geom.h>
68
69 #include <machine/stdarg.h>
70
71 #include <security/audit/audit.h>
72 #include <security/mac/mac_framework.h>
73
74 #define VFS_MOUNTARG_SIZE_MAX (1024 * 64)
75
76 static int vfs_domount(struct thread *td, const char *fstype, char *fspath,
77 uint64_t fsflags, struct vfsoptlist **optlist);
78 static void free_mntarg(struct mntarg *ma);
79
80 static int usermount = 0;
81 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
82 "Unprivileged users may mount and unmount file systems");
83
84 static bool default_autoro = false;
85 SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0,
86 "Retry failed r/w mount as r/o if no explicit ro/rw option is specified");
87
88 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
89 MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
90 static uma_zone_t mount_zone;
91
92 /* List of mounted filesystems. */
93 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
94
95 /* For any iteration/modification of mountlist */
96 struct mtx mountlist_mtx;
97 MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF);
98
99 EVENTHANDLER_LIST_DEFINE(vfs_mounted);
100 EVENTHANDLER_LIST_DEFINE(vfs_unmounted);
101
102 /*
103 * Global opts, taken by all filesystems
104 */
105 static const char *global_opts[] = {
106 "errmsg",
107 "fstype",
108 "fspath",
109 "ro",
110 "rw",
111 "nosuid",
112 "noexec",
113 NULL
114 };
115
116 static int
117 mount_init(void *mem, int size, int flags)
118 {
119 struct mount *mp;
120
121 mp = (struct mount *)mem;
122 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
123 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
124 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
125 return (0);
126 }
127
128 static void
129 mount_fini(void *mem, int size)
130 {
131 struct mount *mp;
132
133 mp = (struct mount *)mem;
134 lockdestroy(&mp->mnt_explock);
135 mtx_destroy(&mp->mnt_listmtx);
136 mtx_destroy(&mp->mnt_mtx);
137 }
138
139 static void
140 vfs_mount_init(void *dummy __unused)
141 {
142
143 mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL,
144 NULL, mount_init, mount_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
145 }
146 SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL);
147
148 /*
149 * ---------------------------------------------------------------------
150 * Functions for building and sanitizing the mount options
151 */
152
153 /* Remove one mount option. */
154 static void
155 vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
156 {
157
158 TAILQ_REMOVE(opts, opt, link);
159 free(opt->name, M_MOUNT);
160 if (opt->value != NULL)
161 free(opt->value, M_MOUNT);
162 free(opt, M_MOUNT);
163 }
164
165 /* Release all resources related to the mount options. */
166 void
167 vfs_freeopts(struct vfsoptlist *opts)
168 {
169 struct vfsopt *opt;
170
171 while (!TAILQ_EMPTY(opts)) {
172 opt = TAILQ_FIRST(opts);
173 vfs_freeopt(opts, opt);
174 }
175 free(opts, M_MOUNT);
176 }
177
178 void
179 vfs_deleteopt(struct vfsoptlist *opts, const char *name)
180 {
181 struct vfsopt *opt, *temp;
182
183 if (opts == NULL)
184 return;
185 TAILQ_FOREACH_SAFE(opt, opts, link, temp) {
186 if (strcmp(opt->name, name) == 0)
187 vfs_freeopt(opts, opt);
188 }
189 }
190
191 static int
192 vfs_isopt_ro(const char *opt)
193 {
194
195 if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 ||
196 strcmp(opt, "norw") == 0)
197 return (1);
198 return (0);
199 }
200
201 static int
202 vfs_isopt_rw(const char *opt)
203 {
204
205 if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0)
206 return (1);
207 return (0);
208 }
209
210 /*
211 * Check if options are equal (with or without the "no" prefix).
212 */
213 static int
214 vfs_equalopts(const char *opt1, const char *opt2)
215 {
216 char *p;
217
218 /* "opt" vs. "opt" or "noopt" vs. "noopt" */
219 if (strcmp(opt1, opt2) == 0)
220 return (1);
221 /* "noopt" vs. "opt" */
222 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
223 return (1);
224 /* "opt" vs. "noopt" */
225 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
226 return (1);
227 while ((p = strchr(opt1, '.')) != NULL &&
228 !strncmp(opt1, opt2, ++p - opt1)) {
229 opt2 += p - opt1;
230 opt1 = p;
231 /* "foo.noopt" vs. "foo.opt" */
232 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
233 return (1);
234 /* "foo.opt" vs. "foo.noopt" */
235 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
236 return (1);
237 }
238 /* "ro" / "rdonly" / "norw" / "rw" / "noro" */
239 if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) &&
240 (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2)))
241 return (1);
242 return (0);
243 }
244
245 /*
246 * If a mount option is specified several times,
247 * (with or without the "no" prefix) only keep
248 * the last occurrence of it.
249 */
250 static void
251 vfs_sanitizeopts(struct vfsoptlist *opts)
252 {
253 struct vfsopt *opt, *opt2, *tmp;
254
255 TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
256 opt2 = TAILQ_PREV(opt, vfsoptlist, link);
257 while (opt2 != NULL) {
258 if (vfs_equalopts(opt->name, opt2->name)) {
259 tmp = TAILQ_PREV(opt2, vfsoptlist, link);
260 vfs_freeopt(opts, opt2);
261 opt2 = tmp;
262 } else {
263 opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
264 }
265 }
266 }
267 }
268
269 /*
270 * Build a linked list of mount options from a struct uio.
271 */
272 int
273 vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
274 {
275 struct vfsoptlist *opts;
276 struct vfsopt *opt;
277 size_t memused, namelen, optlen;
278 unsigned int i, iovcnt;
279 int error;
280
281 opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK);
282 TAILQ_INIT(opts);
283 memused = 0;
284 iovcnt = auio->uio_iovcnt;
285 for (i = 0; i < iovcnt; i += 2) {
286 namelen = auio->uio_iov[i].iov_len;
287 optlen = auio->uio_iov[i + 1].iov_len;
288 memused += sizeof(struct vfsopt) + optlen + namelen;
289 /*
290 * Avoid consuming too much memory, and attempts to overflow
291 * memused.
292 */
293 if (memused > VFS_MOUNTARG_SIZE_MAX ||
294 optlen > VFS_MOUNTARG_SIZE_MAX ||
295 namelen > VFS_MOUNTARG_SIZE_MAX) {
296 error = EINVAL;
297 goto bad;
298 }
299
300 opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
301 opt->name = malloc(namelen, M_MOUNT, M_WAITOK);
302 opt->value = NULL;
303 opt->len = 0;
304 opt->pos = i / 2;
305 opt->seen = 0;
306
307 /*
308 * Do this early, so jumps to "bad" will free the current
309 * option.
310 */
311 TAILQ_INSERT_TAIL(opts, opt, link);
312
313 if (auio->uio_segflg == UIO_SYSSPACE) {
314 bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
315 } else {
316 error = copyin(auio->uio_iov[i].iov_base, opt->name,
317 namelen);
318 if (error)
319 goto bad;
320 }
321 /* Ensure names are null-terminated strings. */
322 if (namelen == 0 || opt->name[namelen - 1] != '\0') {
323 error = EINVAL;
324 goto bad;
325 }
326 if (optlen != 0) {
327 opt->len = optlen;
328 opt->value = malloc(optlen, M_MOUNT, M_WAITOK);
329 if (auio->uio_segflg == UIO_SYSSPACE) {
330 bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
331 optlen);
332 } else {
333 error = copyin(auio->uio_iov[i + 1].iov_base,
334 opt->value, optlen);
335 if (error)
336 goto bad;
337 }
338 }
339 }
340 vfs_sanitizeopts(opts);
341 *options = opts;
342 return (0);
343 bad:
344 vfs_freeopts(opts);
345 return (error);
346 }
347
348 /*
349 * Merge the old mount options with the new ones passed
350 * in the MNT_UPDATE case.
351 *
352 * XXX: This function will keep a "nofoo" option in the new
353 * options. E.g, if the option's canonical name is "foo",
354 * "nofoo" ends up in the mount point's active options.
355 */
356 static void
357 vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
358 {
359 struct vfsopt *opt, *new;
360
361 TAILQ_FOREACH(opt, oldopts, link) {
362 new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
363 new->name = strdup(opt->name, M_MOUNT);
364 if (opt->len != 0) {
365 new->value = malloc(opt->len, M_MOUNT, M_WAITOK);
366 bcopy(opt->value, new->value, opt->len);
367 } else
368 new->value = NULL;
369 new->len = opt->len;
370 new->seen = opt->seen;
371 TAILQ_INSERT_HEAD(toopts, new, link);
372 }
373 vfs_sanitizeopts(toopts);
374 }
375
376 /*
377 * Mount a filesystem.
378 */
379 #ifndef _SYS_SYSPROTO_H_
380 struct nmount_args {
381 struct iovec *iovp;
382 unsigned int iovcnt;
383 int flags;
384 };
385 #endif
386 int
387 sys_nmount(struct thread *td, struct nmount_args *uap)
388 {
389 struct uio *auio;
390 int error;
391 u_int iovcnt;
392 uint64_t flags;
393
394 /*
395 * Mount flags are now 64-bits. On 32-bit archtectures only
396 * 32-bits are passed in, but from here on everything handles
397 * 64-bit flags correctly.
398 */
399 flags = uap->flags;
400
401 AUDIT_ARG_FFLAGS(flags);
402 CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__,
403 uap->iovp, uap->iovcnt, flags);
404
405 /*
406 * Filter out MNT_ROOTFS. We do not want clients of nmount() in
407 * userspace to set this flag, but we must filter it out if we want
408 * MNT_UPDATE on the root file system to work.
409 * MNT_ROOTFS should only be set by the kernel when mounting its
410 * root file system.
411 */
412 flags &= ~MNT_ROOTFS;
413
414 iovcnt = uap->iovcnt;
415 /*
416 * Check that we have an even number of iovec's
417 * and that we have at least two options.
418 */
419 if ((iovcnt & 1) || (iovcnt < 4)) {
420 CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__,
421 uap->iovcnt);
422 return (EINVAL);
423 }
424
425 error = copyinuio(uap->iovp, iovcnt, &auio);
426 if (error) {
427 CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno",
428 __func__, error);
429 return (error);
430 }
431 error = vfs_donmount(td, flags, auio);
432
433 free(auio, M_IOV);
434 return (error);
435 }
436
437 /*
438 * ---------------------------------------------------------------------
439 * Various utility functions
440 */
441
442 void
443 vfs_ref(struct mount *mp)
444 {
445
446 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
447 MNT_ILOCK(mp);
448 MNT_REF(mp);
449 MNT_IUNLOCK(mp);
450 }
451
452 void
453 vfs_rel(struct mount *mp)
454 {
455
456 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
457 MNT_ILOCK(mp);
458 MNT_REL(mp);
459 MNT_IUNLOCK(mp);
460 }
461
462 /*
463 * Allocate and initialize the mount point struct.
464 */
465 struct mount *
466 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
467 struct ucred *cred)
468 {
469 struct mount *mp;
470
471 mp = uma_zalloc(mount_zone, M_WAITOK);
472 bzero(&mp->mnt_startzero,
473 __rangeof(struct mount, mnt_startzero, mnt_endzero));
474 TAILQ_INIT(&mp->mnt_nvnodelist);
475 mp->mnt_nvnodelistsize = 0;
476 TAILQ_INIT(&mp->mnt_activevnodelist);
477 mp->mnt_activevnodelistsize = 0;
478 TAILQ_INIT(&mp->mnt_tmpfreevnodelist);
479 mp->mnt_tmpfreevnodelistsize = 0;
480 mp->mnt_ref = 0;
481 (void) vfs_busy(mp, MBF_NOWAIT);
482 atomic_add_acq_int(&vfsp->vfc_refcount, 1);
483 mp->mnt_op = vfsp->vfc_vfsops;
484 mp->mnt_vfc = vfsp;
485 mp->mnt_stat.f_type = vfsp->vfc_typenum;
486 mp->mnt_gen++;
487 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
488 mp->mnt_vnodecovered = vp;
489 mp->mnt_cred = crdup(cred);
490 mp->mnt_stat.f_owner = cred->cr_uid;
491 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
492 mp->mnt_iosize_max = DFLTPHYS;
493 #ifdef MAC
494 mac_mount_init(mp);
495 mac_mount_create(cred, mp);
496 #endif
497 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
498 TAILQ_INIT(&mp->mnt_uppers);
499 return (mp);
500 }
501
502 /*
503 * Destroy the mount struct previously allocated by vfs_mount_alloc().
504 */
505 void
506 vfs_mount_destroy(struct mount *mp)
507 {
508
509 MNT_ILOCK(mp);
510 mp->mnt_kern_flag |= MNTK_REFEXPIRE;
511 if (mp->mnt_kern_flag & MNTK_MWAIT) {
512 mp->mnt_kern_flag &= ~MNTK_MWAIT;
513 wakeup(mp);
514 }
515 while (mp->mnt_ref)
516 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
517 KASSERT(mp->mnt_ref == 0,
518 ("%s: invalid refcount in the drain path @ %s:%d", __func__,
519 __FILE__, __LINE__));
520 if (mp->mnt_writeopcount != 0)
521 panic("vfs_mount_destroy: nonzero writeopcount");
522 if (mp->mnt_secondary_writes != 0)
523 panic("vfs_mount_destroy: nonzero secondary_writes");
524 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
525 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
526 struct vnode *vp;
527
528 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
529 vn_printf(vp, "dangling vnode ");
530 panic("unmount: dangling vnode");
531 }
532 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
533 if (mp->mnt_nvnodelistsize != 0)
534 panic("vfs_mount_destroy: nonzero nvnodelistsize");
535 if (mp->mnt_activevnodelistsize != 0)
536 panic("vfs_mount_destroy: nonzero activevnodelistsize");
537 if (mp->mnt_tmpfreevnodelistsize != 0)
538 panic("vfs_mount_destroy: nonzero tmpfreevnodelistsize");
539 if (mp->mnt_lockref != 0)
540 panic("vfs_mount_destroy: nonzero lock refcount");
541 MNT_IUNLOCK(mp);
542 if (mp->mnt_vnodecovered != NULL)
543 vrele(mp->mnt_vnodecovered);
544 #ifdef MAC
545 mac_mount_destroy(mp);
546 #endif
547 if (mp->mnt_opt != NULL)
548 vfs_freeopts(mp->mnt_opt);
549 crfree(mp->mnt_cred);
550 uma_zfree(mount_zone, mp);
551 }
552
553 static bool
554 vfs_should_downgrade_to_ro_mount(uint64_t fsflags, int error)
555 {
556 /* This is an upgrade of an exisiting mount. */
557 if ((fsflags & MNT_UPDATE) != 0)
558 return (false);
559 /* This is already an R/O mount. */
560 if ((fsflags & MNT_RDONLY) != 0)
561 return (false);
562
563 switch (error) {
564 case ENODEV: /* generic, geom, ... */
565 case EACCES: /* cam/scsi, ... */
566 case EROFS: /* md, mmcsd, ... */
567 /*
568 * These errors can be returned by the storage layer to signal
569 * that the media is read-only. No harm in the R/O mount
570 * attempt if the error was returned for some other reason.
571 */
572 return (true);
573 default:
574 return (false);
575 }
576 }
577
578 int
579 vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
580 {
581 struct vfsoptlist *optlist;
582 struct vfsopt *opt, *tmp_opt;
583 char *fstype, *fspath, *errmsg;
584 int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
585 bool autoro;
586
587 errmsg = fspath = NULL;
588 errmsg_len = fspathlen = 0;
589 errmsg_pos = -1;
590 autoro = default_autoro;
591
592 error = vfs_buildopts(fsoptions, &optlist);
593 if (error)
594 return (error);
595
596 if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0)
597 errmsg_pos = vfs_getopt_pos(optlist, "errmsg");
598
599 /*
600 * We need these two options before the others,
601 * and they are mandatory for any filesystem.
602 * Ensure they are NUL terminated as well.
603 */
604 fstypelen = 0;
605 error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen);
606 if (error || fstype[fstypelen - 1] != '\0') {
607 error = EINVAL;
608 if (errmsg != NULL)
609 strncpy(errmsg, "Invalid fstype", errmsg_len);
610 goto bail;
611 }
612 fspathlen = 0;
613 error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen);
614 if (error || fspath[fspathlen - 1] != '\0') {
615 error = EINVAL;
616 if (errmsg != NULL)
617 strncpy(errmsg, "Invalid fspath", errmsg_len);
618 goto bail;
619 }
620
621 /*
622 * We need to see if we have the "update" option
623 * before we call vfs_domount(), since vfs_domount() has special
624 * logic based on MNT_UPDATE. This is very important
625 * when we want to update the root filesystem.
626 */
627 TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
628 if (strcmp(opt->name, "update") == 0) {
629 fsflags |= MNT_UPDATE;
630 vfs_freeopt(optlist, opt);
631 }
632 else if (strcmp(opt->name, "async") == 0)
633 fsflags |= MNT_ASYNC;
634 else if (strcmp(opt->name, "force") == 0) {
635 fsflags |= MNT_FORCE;
636 vfs_freeopt(optlist, opt);
637 }
638 else if (strcmp(opt->name, "reload") == 0) {
639 fsflags |= MNT_RELOAD;
640 vfs_freeopt(optlist, opt);
641 }
642 else if (strcmp(opt->name, "multilabel") == 0)
643 fsflags |= MNT_MULTILABEL;
644 else if (strcmp(opt->name, "noasync") == 0)
645 fsflags &= ~MNT_ASYNC;
646 else if (strcmp(opt->name, "noatime") == 0)
647 fsflags |= MNT_NOATIME;
648 else if (strcmp(opt->name, "atime") == 0) {
649 free(opt->name, M_MOUNT);
650 opt->name = strdup("nonoatime", M_MOUNT);
651 }
652 else if (strcmp(opt->name, "noclusterr") == 0)
653 fsflags |= MNT_NOCLUSTERR;
654 else if (strcmp(opt->name, "clusterr") == 0) {
655 free(opt->name, M_MOUNT);
656 opt->name = strdup("nonoclusterr", M_MOUNT);
657 }
658 else if (strcmp(opt->name, "noclusterw") == 0)
659 fsflags |= MNT_NOCLUSTERW;
660 else if (strcmp(opt->name, "clusterw") == 0) {
661 free(opt->name, M_MOUNT);
662 opt->name = strdup("nonoclusterw", M_MOUNT);
663 }
664 else if (strcmp(opt->name, "noexec") == 0)
665 fsflags |= MNT_NOEXEC;
666 else if (strcmp(opt->name, "exec") == 0) {
667 free(opt->name, M_MOUNT);
668 opt->name = strdup("nonoexec", M_MOUNT);
669 }
670 else if (strcmp(opt->name, "nosuid") == 0)
671 fsflags |= MNT_NOSUID;
672 else if (strcmp(opt->name, "suid") == 0) {
673 free(opt->name, M_MOUNT);
674 opt->name = strdup("nonosuid", M_MOUNT);
675 }
676 else if (strcmp(opt->name, "nosymfollow") == 0)
677 fsflags |= MNT_NOSYMFOLLOW;
678 else if (strcmp(opt->name, "symfollow") == 0) {
679 free(opt->name, M_MOUNT);
680 opt->name = strdup("nonosymfollow", M_MOUNT);
681 }
682 else if (strcmp(opt->name, "noro") == 0) {
683 fsflags &= ~MNT_RDONLY;
684 autoro = false;
685 }
686 else if (strcmp(opt->name, "rw") == 0) {
687 fsflags &= ~MNT_RDONLY;
688 autoro = false;
689 }
690 else if (strcmp(opt->name, "ro") == 0) {
691 fsflags |= MNT_RDONLY;
692 autoro = false;
693 }
694 else if (strcmp(opt->name, "rdonly") == 0) {
695 free(opt->name, M_MOUNT);
696 opt->name = strdup("ro", M_MOUNT);
697 fsflags |= MNT_RDONLY;
698 autoro = false;
699 }
700 else if (strcmp(opt->name, "autoro") == 0) {
701 vfs_freeopt(optlist, opt);
702 autoro = true;
703 }
704 else if (strcmp(opt->name, "suiddir") == 0)
705 fsflags |= MNT_SUIDDIR;
706 else if (strcmp(opt->name, "sync") == 0)
707 fsflags |= MNT_SYNCHRONOUS;
708 else if (strcmp(opt->name, "union") == 0)
709 fsflags |= MNT_UNION;
710 else if (strcmp(opt->name, "automounted") == 0) {
711 fsflags |= MNT_AUTOMOUNTED;
712 vfs_freeopt(optlist, opt);
713 }
714 }
715
716 /*
717 * Be ultra-paranoid about making sure the type and fspath
718 * variables will fit in our mp buffers, including the
719 * terminating NUL.
720 */
721 if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
722 error = ENAMETOOLONG;
723 goto bail;
724 }
725
726 error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
727
728 /*
729 * See if we can mount in the read-only mode if the error code suggests
730 * that it could be possible and the mount options allow for that.
731 * Never try it if "[no]{ro|rw}" has been explicitly requested and not
732 * overridden by "autoro".
733 */
734 if (autoro && vfs_should_downgrade_to_ro_mount(fsflags, error)) {
735 printf("%s: R/W mount failed, possibly R/O media,"
736 " trying R/O mount\n", __func__);
737 fsflags |= MNT_RDONLY;
738 error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
739 }
740 bail:
741 /* copyout the errmsg */
742 if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
743 && errmsg_len > 0 && errmsg != NULL) {
744 if (fsoptions->uio_segflg == UIO_SYSSPACE) {
745 bcopy(errmsg,
746 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
747 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
748 } else {
749 copyout(errmsg,
750 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
751 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
752 }
753 }
754
755 if (optlist != NULL)
756 vfs_freeopts(optlist);
757 return (error);
758 }
759
760 /*
761 * Old mount API.
762 */
763 #ifndef _SYS_SYSPROTO_H_
764 struct mount_args {
765 char *type;
766 char *path;
767 int flags;
768 caddr_t data;
769 };
770 #endif
771 /* ARGSUSED */
772 int
773 sys_mount(struct thread *td, struct mount_args *uap)
774 {
775 char *fstype;
776 struct vfsconf *vfsp = NULL;
777 struct mntarg *ma = NULL;
778 uint64_t flags;
779 int error;
780
781 /*
782 * Mount flags are now 64-bits. On 32-bit architectures only
783 * 32-bits are passed in, but from here on everything handles
784 * 64-bit flags correctly.
785 */
786 flags = uap->flags;
787
788 AUDIT_ARG_FFLAGS(flags);
789
790 /*
791 * Filter out MNT_ROOTFS. We do not want clients of mount() in
792 * userspace to set this flag, but we must filter it out if we want
793 * MNT_UPDATE on the root file system to work.
794 * MNT_ROOTFS should only be set by the kernel when mounting its
795 * root file system.
796 */
797 flags &= ~MNT_ROOTFS;
798
799 fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
800 error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL);
801 if (error) {
802 free(fstype, M_TEMP);
803 return (error);
804 }
805
806 AUDIT_ARG_TEXT(fstype);
807 vfsp = vfs_byname_kld(fstype, td, &error);
808 free(fstype, M_TEMP);
809 if (vfsp == NULL)
810 return (ENOENT);
811 if (vfsp->vfc_vfsops->vfs_cmount == NULL)
812 return (EOPNOTSUPP);
813
814 ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN);
815 ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN);
816 ma = mount_argb(ma, flags & MNT_RDONLY, "noro");
817 ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid");
818 ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec");
819
820 error = vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags);
821 return (error);
822 }
823
824 /*
825 * vfs_domount_first(): first file system mount (not update)
826 */
827 static int
828 vfs_domount_first(
829 struct thread *td, /* Calling thread. */
830 struct vfsconf *vfsp, /* File system type. */
831 char *fspath, /* Mount path. */
832 struct vnode *vp, /* Vnode to be covered. */
833 uint64_t fsflags, /* Flags common to all filesystems. */
834 struct vfsoptlist **optlist /* Options local to the filesystem. */
835 )
836 {
837 struct vattr va;
838 struct mount *mp;
839 struct vnode *newdp;
840 int error;
841
842 ASSERT_VOP_ELOCKED(vp, __func__);
843 KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here"));
844
845 /*
846 * If the jail of the calling thread lacks permission for this type of
847 * file system, deny immediately.
848 */
849 if (jailed(td->td_ucred) && !prison_allow(td->td_ucred,
850 vfsp->vfc_prison_flag)) {
851 vput(vp);
852 return (EPERM);
853 }
854
855 /*
856 * If the user is not root, ensure that they own the directory
857 * onto which we are attempting to mount.
858 */
859 error = VOP_GETATTR(vp, &va, td->td_ucred);
860 if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
861 error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN, 0);
862 if (error == 0)
863 error = vinvalbuf(vp, V_SAVE, 0, 0);
864 if (error == 0 && vp->v_type != VDIR)
865 error = ENOTDIR;
866 if (error == 0) {
867 VI_LOCK(vp);
868 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
869 vp->v_iflag |= VI_MOUNT;
870 else
871 error = EBUSY;
872 VI_UNLOCK(vp);
873 }
874 if (error != 0) {
875 vput(vp);
876 return (error);
877 }
878 VOP_UNLOCK(vp, 0);
879
880 /* Allocate and initialize the filesystem. */
881 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
882 /* XXXMAC: pass to vfs_mount_alloc? */
883 mp->mnt_optnew = *optlist;
884 /* Set the mount level flags. */
885 mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY));
886
887 /*
888 * Mount the filesystem.
889 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
890 * get. No freeing of cn_pnbuf.
891 */
892 error = VFS_MOUNT(mp);
893 if (error != 0) {
894 vfs_unbusy(mp);
895 mp->mnt_vnodecovered = NULL;
896 vfs_mount_destroy(mp);
897 VI_LOCK(vp);
898 vp->v_iflag &= ~VI_MOUNT;
899 VI_UNLOCK(vp);
900 vrele(vp);
901 return (error);
902 }
903
904 if (mp->mnt_opt != NULL)
905 vfs_freeopts(mp->mnt_opt);
906 mp->mnt_opt = mp->mnt_optnew;
907 *optlist = NULL;
908 (void)VFS_STATFS(mp, &mp->mnt_stat);
909
910 /*
911 * Prevent external consumers of mount options from reading mnt_optnew.
912 */
913 mp->mnt_optnew = NULL;
914
915 MNT_ILOCK(mp);
916 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
917 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
918 mp->mnt_kern_flag |= MNTK_ASYNC;
919 else
920 mp->mnt_kern_flag &= ~MNTK_ASYNC;
921 MNT_IUNLOCK(mp);
922
923 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
924 cache_purge(vp);
925 VI_LOCK(vp);
926 vp->v_iflag &= ~VI_MOUNT;
927 VI_UNLOCK(vp);
928 vp->v_mountedhere = mp;
929 /* Place the new filesystem at the end of the mount list. */
930 mtx_lock(&mountlist_mtx);
931 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
932 mtx_unlock(&mountlist_mtx);
933 vfs_event_signal(NULL, VQ_MOUNT, 0);
934 if (VFS_ROOT(mp, LK_EXCLUSIVE, &newdp))
935 panic("mount: lost mount");
936 VOP_UNLOCK(vp, 0);
937 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td);
938 VOP_UNLOCK(newdp, 0);
939 mountcheckdirs(vp, newdp);
940 vrele(newdp);
941 if ((mp->mnt_flag & MNT_RDONLY) == 0)
942 vfs_allocate_syncvnode(mp);
943 vfs_unbusy(mp);
944 return (0);
945 }
946
947 /*
948 * vfs_domount_update(): update of mounted file system
949 */
950 static int
951 vfs_domount_update(
952 struct thread *td, /* Calling thread. */
953 struct vnode *vp, /* Mount point vnode. */
954 uint64_t fsflags, /* Flags common to all filesystems. */
955 struct vfsoptlist **optlist /* Options local to the filesystem. */
956 )
957 {
958 struct export_args export;
959 void *bufp;
960 struct mount *mp;
961 int error, export_error, len;
962 uint64_t flag;
963
964 ASSERT_VOP_ELOCKED(vp, __func__);
965 KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
966 mp = vp->v_mount;
967
968 if ((vp->v_vflag & VV_ROOT) == 0) {
969 if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
970 == 0)
971 error = EXDEV;
972 else
973 error = EINVAL;
974 vput(vp);
975 return (error);
976 }
977
978 /*
979 * We only allow the filesystem to be reloaded if it
980 * is currently mounted read-only.
981 */
982 flag = mp->mnt_flag;
983 if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) {
984 vput(vp);
985 return (EOPNOTSUPP); /* Needs translation */
986 }
987 /*
988 * Only privileged root, or (if MNT_USER is set) the user that
989 * did the original mount is permitted to update it.
990 */
991 error = vfs_suser(mp, td);
992 if (error != 0) {
993 vput(vp);
994 return (error);
995 }
996 if (vfs_busy(mp, MBF_NOWAIT)) {
997 vput(vp);
998 return (EBUSY);
999 }
1000 VI_LOCK(vp);
1001 if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
1002 VI_UNLOCK(vp);
1003 vfs_unbusy(mp);
1004 vput(vp);
1005 return (EBUSY);
1006 }
1007 vp->v_iflag |= VI_MOUNT;
1008 VI_UNLOCK(vp);
1009 VOP_UNLOCK(vp, 0);
1010
1011 MNT_ILOCK(mp);
1012 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1013 MNT_IUNLOCK(mp);
1014 error = EBUSY;
1015 goto end;
1016 }
1017 mp->mnt_flag &= ~MNT_UPDATEMASK;
1018 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
1019 MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
1020 if ((mp->mnt_flag & MNT_ASYNC) == 0)
1021 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1022 MNT_IUNLOCK(mp);
1023 mp->mnt_optnew = *optlist;
1024 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
1025
1026 /*
1027 * Mount the filesystem.
1028 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
1029 * get. No freeing of cn_pnbuf.
1030 */
1031 error = VFS_MOUNT(mp);
1032
1033 export_error = 0;
1034 /* Process the export option. */
1035 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
1036 &len) == 0) {
1037 /* Assume that there is only 1 ABI for each length. */
1038 switch (len) {
1039 case (sizeof(struct oexport_args)):
1040 bzero(&export, sizeof(export));
1041 /* FALLTHROUGH */
1042 case (sizeof(export)):
1043 bcopy(bufp, &export, len);
1044 export_error = vfs_export(mp, &export);
1045 break;
1046 default:
1047 export_error = EINVAL;
1048 break;
1049 }
1050 }
1051
1052 MNT_ILOCK(mp);
1053 if (error == 0) {
1054 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
1055 MNT_SNAPSHOT);
1056 } else {
1057 /*
1058 * If we fail, restore old mount flags. MNT_QUOTA is special,
1059 * because it is not part of MNT_UPDATEMASK, but it could have
1060 * changed in the meantime if quotactl(2) was called.
1061 * All in all we want current value of MNT_QUOTA, not the old
1062 * one.
1063 */
1064 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
1065 }
1066 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1067 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1068 mp->mnt_kern_flag |= MNTK_ASYNC;
1069 else
1070 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1071 MNT_IUNLOCK(mp);
1072
1073 if (error != 0)
1074 goto end;
1075
1076 if (mp->mnt_opt != NULL)
1077 vfs_freeopts(mp->mnt_opt);
1078 mp->mnt_opt = mp->mnt_optnew;
1079 *optlist = NULL;
1080 (void)VFS_STATFS(mp, &mp->mnt_stat);
1081 /*
1082 * Prevent external consumers of mount options from reading
1083 * mnt_optnew.
1084 */
1085 mp->mnt_optnew = NULL;
1086
1087 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1088 vfs_allocate_syncvnode(mp);
1089 else
1090 vfs_deallocate_syncvnode(mp);
1091 end:
1092 vfs_unbusy(mp);
1093 VI_LOCK(vp);
1094 vp->v_iflag &= ~VI_MOUNT;
1095 VI_UNLOCK(vp);
1096 vrele(vp);
1097 return (error != 0 ? error : export_error);
1098 }
1099
1100 /*
1101 * vfs_domount(): actually attempt a filesystem mount.
1102 */
1103 static int
1104 vfs_domount(
1105 struct thread *td, /* Calling thread. */
1106 const char *fstype, /* Filesystem type. */
1107 char *fspath, /* Mount path. */
1108 uint64_t fsflags, /* Flags common to all filesystems. */
1109 struct vfsoptlist **optlist /* Options local to the filesystem. */
1110 )
1111 {
1112 struct vfsconf *vfsp;
1113 struct nameidata nd;
1114 struct vnode *vp;
1115 char *pathbuf;
1116 int error;
1117
1118 /*
1119 * Be ultra-paranoid about making sure the type and fspath
1120 * variables will fit in our mp buffers, including the
1121 * terminating NUL.
1122 */
1123 if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
1124 return (ENAMETOOLONG);
1125
1126 if (jailed(td->td_ucred) || usermount == 0) {
1127 if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0)
1128 return (error);
1129 }
1130
1131 /*
1132 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users.
1133 */
1134 if (fsflags & MNT_EXPORTED) {
1135 error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
1136 if (error)
1137 return (error);
1138 }
1139 if (fsflags & MNT_SUIDDIR) {
1140 error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
1141 if (error)
1142 return (error);
1143 }
1144 /*
1145 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users.
1146 */
1147 if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
1148 if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
1149 fsflags |= MNT_NOSUID | MNT_USER;
1150 }
1151
1152 /* Load KLDs before we lock the covered vnode to avoid reversals. */
1153 vfsp = NULL;
1154 if ((fsflags & MNT_UPDATE) == 0) {
1155 /* Don't try to load KLDs if we're mounting the root. */
1156 if (fsflags & MNT_ROOTFS)
1157 vfsp = vfs_byname(fstype);
1158 else
1159 vfsp = vfs_byname_kld(fstype, td, &error);
1160 if (vfsp == NULL)
1161 return (ENODEV);
1162 }
1163
1164 /*
1165 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE.
1166 */
1167 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1168 UIO_SYSSPACE, fspath, td);
1169 error = namei(&nd);
1170 if (error != 0)
1171 return (error);
1172 NDFREE(&nd, NDF_ONLY_PNBUF);
1173 vp = nd.ni_vp;
1174 if ((fsflags & MNT_UPDATE) == 0) {
1175 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1176 strcpy(pathbuf, fspath);
1177 error = vn_path_to_global_path(td, vp, pathbuf, MNAMELEN);
1178 /* debug.disablefullpath == 1 results in ENODEV */
1179 if (error == 0 || error == ENODEV) {
1180 error = vfs_domount_first(td, vfsp, pathbuf, vp,
1181 fsflags, optlist);
1182 }
1183 free(pathbuf, M_TEMP);
1184 } else
1185 error = vfs_domount_update(td, vp, fsflags, optlist);
1186
1187 return (error);
1188 }
1189
1190 /*
1191 * Unmount a filesystem.
1192 *
1193 * Note: unmount takes a path to the vnode mounted on as argument, not
1194 * special file (as before).
1195 */
1196 #ifndef _SYS_SYSPROTO_H_
1197 struct unmount_args {
1198 char *path;
1199 int flags;
1200 };
1201 #endif
1202 /* ARGSUSED */
1203 int
1204 sys_unmount(struct thread *td, struct unmount_args *uap)
1205 {
1206 struct nameidata nd;
1207 struct mount *mp;
1208 char *pathbuf;
1209 int error, id0, id1;
1210
1211 AUDIT_ARG_VALUE(uap->flags);
1212 if (jailed(td->td_ucred) || usermount == 0) {
1213 error = priv_check(td, PRIV_VFS_UNMOUNT);
1214 if (error)
1215 return (error);
1216 }
1217
1218 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1219 error = copyinstr(uap->path, pathbuf, MNAMELEN, NULL);
1220 if (error) {
1221 free(pathbuf, M_TEMP);
1222 return (error);
1223 }
1224 if (uap->flags & MNT_BYFSID) {
1225 AUDIT_ARG_TEXT(pathbuf);
1226 /* Decode the filesystem ID. */
1227 if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) {
1228 free(pathbuf, M_TEMP);
1229 return (EINVAL);
1230 }
1231
1232 mtx_lock(&mountlist_mtx);
1233 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1234 if (mp->mnt_stat.f_fsid.val[0] == id0 &&
1235 mp->mnt_stat.f_fsid.val[1] == id1) {
1236 vfs_ref(mp);
1237 break;
1238 }
1239 }
1240 mtx_unlock(&mountlist_mtx);
1241 } else {
1242 /*
1243 * Try to find global path for path argument.
1244 */
1245 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1246 UIO_SYSSPACE, pathbuf, td);
1247 if (namei(&nd) == 0) {
1248 NDFREE(&nd, NDF_ONLY_PNBUF);
1249 error = vn_path_to_global_path(td, nd.ni_vp, pathbuf,
1250 MNAMELEN);
1251 if (error == 0 || error == ENODEV)
1252 vput(nd.ni_vp);
1253 }
1254 mtx_lock(&mountlist_mtx);
1255 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1256 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1257 vfs_ref(mp);
1258 break;
1259 }
1260 }
1261 mtx_unlock(&mountlist_mtx);
1262 }
1263 free(pathbuf, M_TEMP);
1264 if (mp == NULL) {
1265 /*
1266 * Previously we returned ENOENT for a nonexistent path and
1267 * EINVAL for a non-mountpoint. We cannot tell these apart
1268 * now, so in the !MNT_BYFSID case return the more likely
1269 * EINVAL for compatibility.
1270 */
1271 return ((uap->flags & MNT_BYFSID) ? ENOENT : EINVAL);
1272 }
1273
1274 /*
1275 * Don't allow unmounting the root filesystem.
1276 */
1277 if (mp->mnt_flag & MNT_ROOTFS) {
1278 vfs_rel(mp);
1279 return (EINVAL);
1280 }
1281 error = dounmount(mp, uap->flags, td);
1282 return (error);
1283 }
1284
1285 /*
1286 * Return error if any of the vnodes, ignoring the root vnode
1287 * and the syncer vnode, have non-zero usecount.
1288 *
1289 * This function is purely advisory - it can return false positives
1290 * and negatives.
1291 */
1292 static int
1293 vfs_check_usecounts(struct mount *mp)
1294 {
1295 struct vnode *vp, *mvp;
1296
1297 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1298 if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
1299 vp->v_usecount != 0) {
1300 VI_UNLOCK(vp);
1301 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1302 return (EBUSY);
1303 }
1304 VI_UNLOCK(vp);
1305 }
1306
1307 return (0);
1308 }
1309
1310 static void
1311 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
1312 {
1313
1314 mtx_assert(MNT_MTX(mp), MA_OWNED);
1315 mp->mnt_kern_flag &= ~mntkflags;
1316 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1317 mp->mnt_kern_flag &= ~MNTK_MWAIT;
1318 wakeup(mp);
1319 }
1320 MNT_IUNLOCK(mp);
1321 if (coveredvp != NULL) {
1322 VOP_UNLOCK(coveredvp, 0);
1323 vdrop(coveredvp);
1324 }
1325 vn_finished_write(mp);
1326 }
1327
1328 /*
1329 * Do the actual filesystem unmount.
1330 */
1331 int
1332 dounmount(struct mount *mp, int flags, struct thread *td)
1333 {
1334 struct vnode *coveredvp;
1335 int error;
1336 uint64_t async_flag;
1337 int mnt_gen_r;
1338
1339 if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
1340 mnt_gen_r = mp->mnt_gen;
1341 VI_LOCK(coveredvp);
1342 vholdl(coveredvp);
1343 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
1344 /*
1345 * Check for mp being unmounted while waiting for the
1346 * covered vnode lock.
1347 */
1348 if (coveredvp->v_mountedhere != mp ||
1349 coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
1350 VOP_UNLOCK(coveredvp, 0);
1351 vdrop(coveredvp);
1352 vfs_rel(mp);
1353 return (EBUSY);
1354 }
1355 }
1356
1357 /*
1358 * Only privileged root, or (if MNT_USER is set) the user that did the
1359 * original mount is permitted to unmount this filesystem.
1360 */
1361 error = vfs_suser(mp, td);
1362 if (error != 0) {
1363 if (coveredvp != NULL) {
1364 VOP_UNLOCK(coveredvp, 0);
1365 vdrop(coveredvp);
1366 }
1367 vfs_rel(mp);
1368 return (error);
1369 }
1370
1371 vn_start_write(NULL, &mp, V_WAIT | V_MNTREF);
1372 MNT_ILOCK(mp);
1373 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
1374 (mp->mnt_flag & MNT_UPDATE) != 0 ||
1375 !TAILQ_EMPTY(&mp->mnt_uppers)) {
1376 dounmount_cleanup(mp, coveredvp, 0);
1377 return (EBUSY);
1378 }
1379 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_NOINSMNTQ;
1380 if (flags & MNT_NONBUSY) {
1381 MNT_IUNLOCK(mp);
1382 error = vfs_check_usecounts(mp);
1383 MNT_ILOCK(mp);
1384 if (error != 0) {
1385 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT |
1386 MNTK_NOINSMNTQ);
1387 return (error);
1388 }
1389 }
1390 /* Allow filesystems to detect that a forced unmount is in progress. */
1391 if (flags & MNT_FORCE) {
1392 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
1393 MNT_IUNLOCK(mp);
1394 /*
1395 * Must be done after setting MNTK_UNMOUNTF and before
1396 * waiting for mnt_lockref to become 0.
1397 */
1398 VFS_PURGE(mp);
1399 MNT_ILOCK(mp);
1400 }
1401 error = 0;
1402 if (mp->mnt_lockref) {
1403 mp->mnt_kern_flag |= MNTK_DRAINING;
1404 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
1405 "mount drain", 0);
1406 }
1407 MNT_IUNLOCK(mp);
1408 KASSERT(mp->mnt_lockref == 0,
1409 ("%s: invalid lock refcount in the drain path @ %s:%d",
1410 __func__, __FILE__, __LINE__));
1411 KASSERT(error == 0,
1412 ("%s: invalid return value for msleep in the drain path @ %s:%d",
1413 __func__, __FILE__, __LINE__));
1414
1415 if (mp->mnt_flag & MNT_EXPUBLIC)
1416 vfs_setpublicfs(NULL, NULL, NULL);
1417
1418 /*
1419 * From now, we can claim that the use reference on the
1420 * coveredvp is ours, and the ref can be released only by
1421 * successfull unmount by us, or left for later unmount
1422 * attempt. The previously acquired hold reference is no
1423 * longer needed to protect the vnode from reuse.
1424 */
1425 if (coveredvp != NULL)
1426 vdrop(coveredvp);
1427
1428 vfs_msync(mp, MNT_WAIT);
1429 MNT_ILOCK(mp);
1430 async_flag = mp->mnt_flag & MNT_ASYNC;
1431 mp->mnt_flag &= ~MNT_ASYNC;
1432 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1433 MNT_IUNLOCK(mp);
1434 cache_purgevfs(mp, false); /* remove cache entries for this file sys */
1435 vfs_deallocate_syncvnode(mp);
1436 if ((mp->mnt_flag & MNT_RDONLY) != 0 || (flags & MNT_FORCE) != 0 ||
1437 (error = VFS_SYNC(mp, MNT_WAIT)) == 0)
1438 error = VFS_UNMOUNT(mp, flags);
1439 vn_finished_write(mp);
1440 /*
1441 * If we failed to flush the dirty blocks for this mount point,
1442 * undo all the cdir/rdir and rootvnode changes we made above.
1443 * Unless we failed to do so because the device is reporting that
1444 * it doesn't exist anymore.
1445 */
1446 if (error && error != ENXIO) {
1447 MNT_ILOCK(mp);
1448 mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ;
1449 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1450 MNT_IUNLOCK(mp);
1451 vfs_allocate_syncvnode(mp);
1452 MNT_ILOCK(mp);
1453 }
1454 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
1455 mp->mnt_flag |= async_flag;
1456 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1457 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1458 mp->mnt_kern_flag |= MNTK_ASYNC;
1459 if (mp->mnt_kern_flag & MNTK_MWAIT) {
1460 mp->mnt_kern_flag &= ~MNTK_MWAIT;
1461 wakeup(mp);
1462 }
1463 MNT_IUNLOCK(mp);
1464 if (coveredvp)
1465 VOP_UNLOCK(coveredvp, 0);
1466 return (error);
1467 }
1468 mtx_lock(&mountlist_mtx);
1469 TAILQ_REMOVE(&mountlist, mp, mnt_list);
1470 mtx_unlock(&mountlist_mtx);
1471 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td);
1472 if (coveredvp != NULL) {
1473 coveredvp->v_mountedhere = NULL;
1474 VOP_UNLOCK(coveredvp, 0);
1475 }
1476 vfs_event_signal(NULL, VQ_UNMOUNT, 0);
1477 if (rootvnode != NULL && mp == rootvnode->v_mount) {
1478 vrele(rootvnode);
1479 rootvnode = NULL;
1480 }
1481 if (mp == rootdevmp)
1482 rootdevmp = NULL;
1483 vfs_mount_destroy(mp);
1484 return (0);
1485 }
1486
1487 /*
1488 * Report errors during filesystem mounting.
1489 */
1490 void
1491 vfs_mount_error(struct mount *mp, const char *fmt, ...)
1492 {
1493 struct vfsoptlist *moptlist = mp->mnt_optnew;
1494 va_list ap;
1495 int error, len;
1496 char *errmsg;
1497
1498 error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len);
1499 if (error || errmsg == NULL || len <= 0)
1500 return;
1501
1502 va_start(ap, fmt);
1503 vsnprintf(errmsg, (size_t)len, fmt, ap);
1504 va_end(ap);
1505 }
1506
1507 void
1508 vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...)
1509 {
1510 va_list ap;
1511 int error, len;
1512 char *errmsg;
1513
1514 error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len);
1515 if (error || errmsg == NULL || len <= 0)
1516 return;
1517
1518 va_start(ap, fmt);
1519 vsnprintf(errmsg, (size_t)len, fmt, ap);
1520 va_end(ap);
1521 }
1522
1523 /*
1524 * ---------------------------------------------------------------------
1525 * Functions for querying mount options/arguments from filesystems.
1526 */
1527
1528 /*
1529 * Check that no unknown options are given
1530 */
1531 int
1532 vfs_filteropt(struct vfsoptlist *opts, const char **legal)
1533 {
1534 struct vfsopt *opt;
1535 char errmsg[255];
1536 const char **t, *p, *q;
1537 int ret = 0;
1538
1539 TAILQ_FOREACH(opt, opts, link) {
1540 p = opt->name;
1541 q = NULL;
1542 if (p[0] == 'n' && p[1] == 'o')
1543 q = p + 2;
1544 for(t = global_opts; *t != NULL; t++) {
1545 if (strcmp(*t, p) == 0)
1546 break;
1547 if (q != NULL) {
1548 if (strcmp(*t, q) == 0)
1549 break;
1550 }
1551 }
1552 if (*t != NULL)
1553 continue;
1554 for(t = legal; *t != NULL; t++) {
1555 if (strcmp(*t, p) == 0)
1556 break;
1557 if (q != NULL) {
1558 if (strcmp(*t, q) == 0)
1559 break;
1560 }
1561 }
1562 if (*t != NULL)
1563 continue;
1564 snprintf(errmsg, sizeof(errmsg),
1565 "mount option <%s> is unknown", p);
1566 ret = EINVAL;
1567 }
1568 if (ret != 0) {
1569 TAILQ_FOREACH(opt, opts, link) {
1570 if (strcmp(opt->name, "errmsg") == 0) {
1571 strncpy((char *)opt->value, errmsg, opt->len);
1572 break;
1573 }
1574 }
1575 if (opt == NULL)
1576 printf("%s\n", errmsg);
1577 }
1578 return (ret);
1579 }
1580
1581 /*
1582 * Get a mount option by its name.
1583 *
1584 * Return 0 if the option was found, ENOENT otherwise.
1585 * If len is non-NULL it will be filled with the length
1586 * of the option. If buf is non-NULL, it will be filled
1587 * with the address of the option.
1588 */
1589 int
1590 vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len)
1591 {
1592 struct vfsopt *opt;
1593
1594 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1595
1596 TAILQ_FOREACH(opt, opts, link) {
1597 if (strcmp(name, opt->name) == 0) {
1598 opt->seen = 1;
1599 if (len != NULL)
1600 *len = opt->len;
1601 if (buf != NULL)
1602 *buf = opt->value;
1603 return (0);
1604 }
1605 }
1606 return (ENOENT);
1607 }
1608
1609 int
1610 vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
1611 {
1612 struct vfsopt *opt;
1613
1614 if (opts == NULL)
1615 return (-1);
1616
1617 TAILQ_FOREACH(opt, opts, link) {
1618 if (strcmp(name, opt->name) == 0) {
1619 opt->seen = 1;
1620 return (opt->pos);
1621 }
1622 }
1623 return (-1);
1624 }
1625
1626 int
1627 vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
1628 {
1629 char *opt_value, *vtp;
1630 quad_t iv;
1631 int error, opt_len;
1632
1633 error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len);
1634 if (error != 0)
1635 return (error);
1636 if (opt_len == 0 || opt_value == NULL)
1637 return (EINVAL);
1638 if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0')
1639 return (EINVAL);
1640 iv = strtoq(opt_value, &vtp, 0);
1641 if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0'))
1642 return (EINVAL);
1643 if (iv < 0)
1644 return (EINVAL);
1645 switch (vtp[0]) {
1646 case 't':
1647 case 'T':
1648 iv *= 1024;
1649 case 'g':
1650 case 'G':
1651 iv *= 1024;
1652 case 'm':
1653 case 'M':
1654 iv *= 1024;
1655 case 'k':
1656 case 'K':
1657 iv *= 1024;
1658 case '\0':
1659 break;
1660 default:
1661 return (EINVAL);
1662 }
1663 *value = iv;
1664
1665 return (0);
1666 }
1667
1668 char *
1669 vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
1670 {
1671 struct vfsopt *opt;
1672
1673 *error = 0;
1674 TAILQ_FOREACH(opt, opts, link) {
1675 if (strcmp(name, opt->name) != 0)
1676 continue;
1677 opt->seen = 1;
1678 if (opt->len == 0 ||
1679 ((char *)opt->value)[opt->len - 1] != '\0') {
1680 *error = EINVAL;
1681 return (NULL);
1682 }
1683 return (opt->value);
1684 }
1685 *error = ENOENT;
1686 return (NULL);
1687 }
1688
1689 int
1690 vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w,
1691 uint64_t val)
1692 {
1693 struct vfsopt *opt;
1694
1695 TAILQ_FOREACH(opt, opts, link) {
1696 if (strcmp(name, opt->name) == 0) {
1697 opt->seen = 1;
1698 if (w != NULL)
1699 *w |= val;
1700 return (1);
1701 }
1702 }
1703 if (w != NULL)
1704 *w &= ~val;
1705 return (0);
1706 }
1707
1708 int
1709 vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
1710 {
1711 va_list ap;
1712 struct vfsopt *opt;
1713 int ret;
1714
1715 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1716
1717 TAILQ_FOREACH(opt, opts, link) {
1718 if (strcmp(name, opt->name) != 0)
1719 continue;
1720 opt->seen = 1;
1721 if (opt->len == 0 || opt->value == NULL)
1722 return (0);
1723 if (((char *)opt->value)[opt->len - 1] != '\0')
1724 return (0);
1725 va_start(ap, fmt);
1726 ret = vsscanf(opt->value, fmt, ap);
1727 va_end(ap);
1728 return (ret);
1729 }
1730 return (0);
1731 }
1732
1733 int
1734 vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
1735 {
1736 struct vfsopt *opt;
1737
1738 TAILQ_FOREACH(opt, opts, link) {
1739 if (strcmp(name, opt->name) != 0)
1740 continue;
1741 opt->seen = 1;
1742 if (opt->value == NULL)
1743 opt->len = len;
1744 else {
1745 if (opt->len != len)
1746 return (EINVAL);
1747 bcopy(value, opt->value, len);
1748 }
1749 return (0);
1750 }
1751 return (ENOENT);
1752 }
1753
1754 int
1755 vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
1756 {
1757 struct vfsopt *opt;
1758
1759 TAILQ_FOREACH(opt, opts, link) {
1760 if (strcmp(name, opt->name) != 0)
1761 continue;
1762 opt->seen = 1;
1763 if (opt->value == NULL)
1764 opt->len = len;
1765 else {
1766 if (opt->len < len)
1767 return (EINVAL);
1768 opt->len = len;
1769 bcopy(value, opt->value, len);
1770 }
1771 return (0);
1772 }
1773 return (ENOENT);
1774 }
1775
1776 int
1777 vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
1778 {
1779 struct vfsopt *opt;
1780
1781 TAILQ_FOREACH(opt, opts, link) {
1782 if (strcmp(name, opt->name) != 0)
1783 continue;
1784 opt->seen = 1;
1785 if (opt->value == NULL)
1786 opt->len = strlen(value) + 1;
1787 else if (strlcpy(opt->value, value, opt->len) >= opt->len)
1788 return (EINVAL);
1789 return (0);
1790 }
1791 return (ENOENT);
1792 }
1793
1794 /*
1795 * Find and copy a mount option.
1796 *
1797 * The size of the buffer has to be specified
1798 * in len, if it is not the same length as the
1799 * mount option, EINVAL is returned.
1800 * Returns ENOENT if the option is not found.
1801 */
1802 int
1803 vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len)
1804 {
1805 struct vfsopt *opt;
1806
1807 KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL"));
1808
1809 TAILQ_FOREACH(opt, opts, link) {
1810 if (strcmp(name, opt->name) == 0) {
1811 opt->seen = 1;
1812 if (len != opt->len)
1813 return (EINVAL);
1814 bcopy(opt->value, dest, opt->len);
1815 return (0);
1816 }
1817 }
1818 return (ENOENT);
1819 }
1820
1821 int
1822 __vfs_statfs(struct mount *mp, struct statfs *sbp)
1823 {
1824 int error;
1825
1826 error = mp->mnt_op->vfs_statfs(mp, &mp->mnt_stat);
1827 if (sbp != &mp->mnt_stat)
1828 *sbp = mp->mnt_stat;
1829 return (error);
1830 }
1831
1832 void
1833 vfs_mountedfrom(struct mount *mp, const char *from)
1834 {
1835
1836 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
1837 strlcpy(mp->mnt_stat.f_mntfromname, from,
1838 sizeof mp->mnt_stat.f_mntfromname);
1839 }
1840
1841 /*
1842 * ---------------------------------------------------------------------
1843 * This is the api for building mount args and mounting filesystems from
1844 * inside the kernel.
1845 *
1846 * The API works by accumulation of individual args. First error is
1847 * latched.
1848 *
1849 * XXX: should be documented in new manpage kernel_mount(9)
1850 */
1851
1852 /* A memory allocation which must be freed when we are done */
1853 struct mntaarg {
1854 SLIST_ENTRY(mntaarg) next;
1855 };
1856
1857 /* The header for the mount arguments */
1858 struct mntarg {
1859 struct iovec *v;
1860 int len;
1861 int error;
1862 SLIST_HEAD(, mntaarg) list;
1863 };
1864
1865 /*
1866 * Add a boolean argument.
1867 *
1868 * flag is the boolean value.
1869 * name must start with "no".
1870 */
1871 struct mntarg *
1872 mount_argb(struct mntarg *ma, int flag, const char *name)
1873 {
1874
1875 KASSERT(name[0] == 'n' && name[1] == 'o',
1876 ("mount_argb(...,%s): name must start with 'no'", name));
1877
1878 return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0));
1879 }
1880
1881 /*
1882 * Add an argument printf style
1883 */
1884 struct mntarg *
1885 mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...)
1886 {
1887 va_list ap;
1888 struct mntaarg *maa;
1889 struct sbuf *sb;
1890 int len;
1891
1892 if (ma == NULL) {
1893 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1894 SLIST_INIT(&ma->list);
1895 }
1896 if (ma->error)
1897 return (ma);
1898
1899 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1900 M_MOUNT, M_WAITOK);
1901 ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1902 ma->v[ma->len].iov_len = strlen(name) + 1;
1903 ma->len++;
1904
1905 sb = sbuf_new_auto();
1906 va_start(ap, fmt);
1907 sbuf_vprintf(sb, fmt, ap);
1908 va_end(ap);
1909 sbuf_finish(sb);
1910 len = sbuf_len(sb) + 1;
1911 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1912 SLIST_INSERT_HEAD(&ma->list, maa, next);
1913 bcopy(sbuf_data(sb), maa + 1, len);
1914 sbuf_delete(sb);
1915
1916 ma->v[ma->len].iov_base = maa + 1;
1917 ma->v[ma->len].iov_len = len;
1918 ma->len++;
1919
1920 return (ma);
1921 }
1922
1923 /*
1924 * Add an argument which is a userland string.
1925 */
1926 struct mntarg *
1927 mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
1928 {
1929 struct mntaarg *maa;
1930 char *tbuf;
1931
1932 if (val == NULL)
1933 return (ma);
1934 if (ma == NULL) {
1935 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1936 SLIST_INIT(&ma->list);
1937 }
1938 if (ma->error)
1939 return (ma);
1940 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1941 SLIST_INSERT_HEAD(&ma->list, maa, next);
1942 tbuf = (void *)(maa + 1);
1943 ma->error = copyinstr(val, tbuf, len, NULL);
1944 return (mount_arg(ma, name, tbuf, -1));
1945 }
1946
1947 /*
1948 * Plain argument.
1949 *
1950 * If length is -1, treat value as a C string.
1951 */
1952 struct mntarg *
1953 mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
1954 {
1955
1956 if (ma == NULL) {
1957 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1958 SLIST_INIT(&ma->list);
1959 }
1960 if (ma->error)
1961 return (ma);
1962
1963 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1964 M_MOUNT, M_WAITOK);
1965 ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1966 ma->v[ma->len].iov_len = strlen(name) + 1;
1967 ma->len++;
1968
1969 ma->v[ma->len].iov_base = (void *)(uintptr_t)val;
1970 if (len < 0)
1971 ma->v[ma->len].iov_len = strlen(val) + 1;
1972 else
1973 ma->v[ma->len].iov_len = len;
1974 ma->len++;
1975 return (ma);
1976 }
1977
1978 /*
1979 * Free a mntarg structure
1980 */
1981 static void
1982 free_mntarg(struct mntarg *ma)
1983 {
1984 struct mntaarg *maa;
1985
1986 while (!SLIST_EMPTY(&ma->list)) {
1987 maa = SLIST_FIRST(&ma->list);
1988 SLIST_REMOVE_HEAD(&ma->list, next);
1989 free(maa, M_MOUNT);
1990 }
1991 free(ma->v, M_MOUNT);
1992 free(ma, M_MOUNT);
1993 }
1994
1995 /*
1996 * Mount a filesystem
1997 */
1998 int
1999 kernel_mount(struct mntarg *ma, uint64_t flags)
2000 {
2001 struct uio auio;
2002 int error;
2003
2004 KASSERT(ma != NULL, ("kernel_mount NULL ma"));
2005 KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v"));
2006 KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len));
2007
2008 auio.uio_iov = ma->v;
2009 auio.uio_iovcnt = ma->len;
2010 auio.uio_segflg = UIO_SYSSPACE;
2011
2012 error = ma->error;
2013 if (!error)
2014 error = vfs_donmount(curthread, flags, &auio);
2015 free_mntarg(ma);
2016 return (error);
2017 }
2018
2019 /*
2020 * A printflike function to mount a filesystem.
2021 */
2022 int
2023 kernel_vmount(int flags, ...)
2024 {
2025 struct mntarg *ma = NULL;
2026 va_list ap;
2027 const char *cp;
2028 const void *vp;
2029 int error;
2030
2031 va_start(ap, flags);
2032 for (;;) {
2033 cp = va_arg(ap, const char *);
2034 if (cp == NULL)
2035 break;
2036 vp = va_arg(ap, const void *);
2037 ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0));
2038 }
2039 va_end(ap);
2040
2041 error = kernel_mount(ma, flags);
2042 return (error);
2043 }
2044
2045 void
2046 vfs_oexport_conv(const struct oexport_args *oexp, struct export_args *exp)
2047 {
2048
2049 bcopy(oexp, exp, sizeof(*oexp));
2050 exp->ex_numsecflavors = 0;
2051 }
Cache object: ae81cd7402f0560a1d6a570cbe19985b
|