1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5 *
6 * Copyright (c) 2005 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11 * 2005 program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * Efficient memory file system.
37 *
38 * tmpfs is a file system that uses FreeBSD's virtual memory
39 * sub-system to store file data and metadata in an efficient way.
40 * This means that it does not follow the structure of an on-disk file
41 * system because it simply does not need to. Instead, it uses
42 * memory-specific data structures and algorithms to automatically
43 * allocate and release resources.
44 */
45
46 #include "opt_tmpfs.h"
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/dirent.h>
54 #include <sys/file.h>
55 #include <sys/limits.h>
56 #include <sys/lock.h>
57 #include <sys/mount.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/jail.h>
61 #include <sys/kernel.h>
62 #include <sys/rwlock.h>
63 #include <sys/stat.h>
64 #include <sys/sx.h>
65 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_param.h>
75
76 #include <fs/tmpfs/tmpfs.h>
77
78 /*
79 * Default permission for root node
80 */
81 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
82
83 static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
84 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
85
86 static int tmpfs_mount(struct mount *);
87 static int tmpfs_unmount(struct mount *, int);
88 static int tmpfs_root(struct mount *, int flags, struct vnode **);
89 static int tmpfs_fhtovp(struct mount *, struct fid *, int,
90 struct vnode **);
91 static int tmpfs_statfs(struct mount *, struct statfs *);
92
93 static const char *tmpfs_opts[] = {
94 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
95 "union", "nonc", "nomtime", NULL
96 };
97
98 static const char *tmpfs_updateopts[] = {
99 "from", "export", "nomtime", "size", NULL
100 };
101
102 /*
103 * Handle updates of time from writes to mmaped regions, if allowed.
104 * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
105 * unmap of the tmpfs-backed vnode does not call vinactive(), due to
106 * vm object type is OBJT_SWAP. If lazy, only handle delayed update
107 * of mtime due to the writes to mapped files.
108 */
109 static void
110 tmpfs_update_mtime(struct mount *mp, bool lazy)
111 {
112 struct vnode *vp, *mvp;
113 struct vm_object *obj;
114
115 if (VFS_TO_TMPFS(mp)->tm_nomtime)
116 return;
117 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
118 if (vp->v_type != VREG) {
119 VI_UNLOCK(vp);
120 continue;
121 }
122 obj = vp->v_object;
123 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
124 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
125
126 /*
127 * In lazy case, do unlocked read, avoid taking vnode
128 * lock if not needed. Lost update will be handled on
129 * the next call.
130 * For non-lazy case, we must flush all pending
131 * metadata changes now.
132 */
133 if (!lazy || obj->generation != obj->cleangeneration) {
134 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
135 continue;
136 tmpfs_check_mtime(vp);
137 if (!lazy)
138 tmpfs_update(vp);
139 vput(vp);
140 } else {
141 VI_UNLOCK(vp);
142 continue;
143 }
144 }
145 }
146
147 struct tmpfs_check_rw_maps_arg {
148 bool found;
149 };
150
151 static bool
152 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
153 vm_map_entry_t entry __unused, void *arg)
154 {
155 struct tmpfs_check_rw_maps_arg *a;
156
157 a = arg;
158 a->found = true;
159 return (true);
160 }
161
162 /*
163 * Revoke write permissions from all mappings of regular files
164 * belonging to the specified tmpfs mount.
165 */
166 static bool
167 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
168 vm_map_entry_t entry, void *arg __unused)
169 {
170
171 /*
172 * XXXKIB: might be invalidate the mapping
173 * instead ? The process is not going to be
174 * happy in any case.
175 */
176 entry->max_protection &= ~VM_PROT_WRITE;
177 if ((entry->protection & VM_PROT_WRITE) != 0) {
178 entry->protection &= ~VM_PROT_WRITE;
179 pmap_protect(map->pmap, entry->start, entry->end,
180 entry->protection);
181 }
182 return (false);
183 }
184
185 static void
186 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
187 vm_map_entry_t, void *), void *cb_arg)
188 {
189 struct proc *p;
190 struct vmspace *vm;
191 vm_map_t map;
192 vm_map_entry_t entry;
193 vm_object_t object;
194 struct vnode *vp;
195 int gen;
196 bool terminate;
197
198 terminate = false;
199 sx_slock(&allproc_lock);
200 again:
201 gen = allproc_gen;
202 FOREACH_PROC_IN_SYSTEM(p) {
203 PROC_LOCK(p);
204 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
205 P_SYSTEM | P_WEXIT)) != 0) {
206 PROC_UNLOCK(p);
207 continue;
208 }
209 vm = vmspace_acquire_ref(p);
210 _PHOLD_LITE(p);
211 PROC_UNLOCK(p);
212 if (vm == NULL) {
213 PRELE(p);
214 continue;
215 }
216 sx_sunlock(&allproc_lock);
217 map = &vm->vm_map;
218
219 vm_map_lock(map);
220 if (map->busy)
221 vm_map_wait_busy(map);
222 VM_MAP_ENTRY_FOREACH(entry, map) {
223 if ((entry->eflags & (MAP_ENTRY_GUARD |
224 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
225 (entry->max_protection & VM_PROT_WRITE) == 0)
226 continue;
227 object = entry->object.vm_object;
228 if (object == NULL || object->type != OBJT_SWAP ||
229 (object->flags & OBJ_TMPFS_NODE) == 0)
230 continue;
231 /*
232 * No need to dig into shadow chain, mapping
233 * of the object not at top is readonly.
234 */
235
236 VM_OBJECT_RLOCK(object);
237 if (object->type == OBJT_DEAD) {
238 VM_OBJECT_RUNLOCK(object);
239 continue;
240 }
241 MPASS(object->ref_count > 1);
242 if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
243 (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
244 VM_OBJECT_RUNLOCK(object);
245 continue;
246 }
247 vp = object->un_pager.swp.swp_tmpfs;
248 if (vp->v_mount != mp) {
249 VM_OBJECT_RUNLOCK(object);
250 continue;
251 }
252
253 terminate = cb(mp, map, entry, cb_arg);
254 VM_OBJECT_RUNLOCK(object);
255 if (terminate)
256 break;
257 }
258 vm_map_unlock(map);
259
260 vmspace_free(vm);
261 sx_slock(&allproc_lock);
262 PRELE(p);
263 if (terminate)
264 break;
265 }
266 if (!terminate && gen != allproc_gen)
267 goto again;
268 sx_sunlock(&allproc_lock);
269 }
270
271 static bool
272 tmpfs_check_rw_maps(struct mount *mp)
273 {
274 struct tmpfs_check_rw_maps_arg ca;
275
276 ca.found = false;
277 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
278 return (ca.found);
279 }
280
281 static int
282 tmpfs_rw_to_ro(struct mount *mp)
283 {
284 int error, flags;
285 bool forced;
286
287 forced = (mp->mnt_flag & MNT_FORCE) != 0;
288 flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
289
290 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
291 return (error);
292 error = vfs_write_suspend_umnt(mp);
293 if (error != 0)
294 return (error);
295 if (!forced && tmpfs_check_rw_maps(mp)) {
296 error = EBUSY;
297 goto out;
298 }
299 VFS_TO_TMPFS(mp)->tm_ronly = 1;
300 MNT_ILOCK(mp);
301 mp->mnt_flag |= MNT_RDONLY;
302 MNT_IUNLOCK(mp);
303 for (;;) {
304 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
305 tmpfs_update_mtime(mp, false);
306 error = vflush(mp, 0, flags, curthread);
307 if (error != 0) {
308 VFS_TO_TMPFS(mp)->tm_ronly = 0;
309 MNT_ILOCK(mp);
310 mp->mnt_flag &= ~MNT_RDONLY;
311 MNT_IUNLOCK(mp);
312 goto out;
313 }
314 if (!tmpfs_check_rw_maps(mp))
315 break;
316 }
317 out:
318 vfs_write_resume(mp, 0);
319 return (error);
320 }
321
322 static int
323 tmpfs_mount(struct mount *mp)
324 {
325 const size_t nodes_per_page = howmany(PAGE_SIZE,
326 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
327 struct tmpfs_mount *tmp;
328 struct tmpfs_node *root;
329 int error;
330 bool nomtime, nonc;
331 /* Size counters. */
332 u_quad_t pages;
333 off_t nodes_max, size_max, maxfilesize;
334
335 /* Root node attributes. */
336 uid_t root_uid;
337 gid_t root_gid;
338 mode_t root_mode;
339
340 struct vattr va;
341
342 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
343 return (EINVAL);
344
345 if (mp->mnt_flag & MNT_UPDATE) {
346 /* Only support update mounts for certain options. */
347 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
348 return (EOPNOTSUPP);
349 tmp = VFS_TO_TMPFS(mp);
350 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
351 /*
352 * On-the-fly resizing is not supported (yet). We still
353 * need to have "size" listed as "supported", otherwise
354 * trying to update fs that is listed in fstab with size
355 * parameter, say trying to change rw to ro or vice
356 * versa, would cause vfs_filteropt() to bail.
357 */
358 if (size_max != tmp->tm_size_max)
359 return (EOPNOTSUPP);
360 }
361 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
362 !tmp->tm_ronly) {
363 /* RW -> RO */
364 return (tmpfs_rw_to_ro(mp));
365 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
366 tmp->tm_ronly) {
367 /* RO -> RW */
368 tmp->tm_ronly = 0;
369 MNT_ILOCK(mp);
370 mp->mnt_flag &= ~MNT_RDONLY;
371 MNT_IUNLOCK(mp);
372 }
373 tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL,
374 0) == 0;
375 MNT_ILOCK(mp);
376 if ((mp->mnt_flag & MNT_UNION) == 0) {
377 mp->mnt_kern_flag |= MNTK_FPLOOKUP;
378 } else {
379 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
380 }
381 MNT_IUNLOCK(mp);
382 return (0);
383 }
384
385 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
386 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
387 VOP_UNLOCK(mp->mnt_vnodecovered);
388 if (error)
389 return (error);
390
391 if (mp->mnt_cred->cr_ruid != 0 ||
392 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
393 root_gid = va.va_gid;
394 if (mp->mnt_cred->cr_ruid != 0 ||
395 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
396 root_uid = va.va_uid;
397 if (mp->mnt_cred->cr_ruid != 0 ||
398 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
399 root_mode = va.va_mode;
400 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
401 nodes_max = 0;
402 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
403 size_max = 0;
404 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
405 maxfilesize = 0;
406 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
407 nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0;
408
409 /* Do not allow mounts if we do not have enough memory to preserve
410 * the minimum reserved pages. */
411 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
412 return (ENOSPC);
413
414 /* Get the maximum number of memory pages this file system is
415 * allowed to use, based on the maximum size the user passed in
416 * the mount structure. A value of zero is treated as if the
417 * maximum available space was requested. */
418 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
419 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
420 pages = SIZE_MAX;
421 else {
422 size_max = roundup(size_max, PAGE_SIZE);
423 pages = howmany(size_max, PAGE_SIZE);
424 }
425 MPASS(pages > 0);
426
427 if (nodes_max <= 3) {
428 if (pages < INT_MAX / nodes_per_page)
429 nodes_max = pages * nodes_per_page;
430 else
431 nodes_max = INT_MAX;
432 }
433 if (nodes_max > INT_MAX)
434 nodes_max = INT_MAX;
435 MPASS(nodes_max >= 3);
436
437 /* Allocate the tmpfs mount structure and fill it. */
438 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
439 M_TMPFSMNT, M_WAITOK | M_ZERO);
440
441 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
442 tmp->tm_nodes_max = nodes_max;
443 tmp->tm_nodes_inuse = 0;
444 tmp->tm_refcount = 1;
445 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
446 LIST_INIT(&tmp->tm_nodes_used);
447
448 tmp->tm_size_max = size_max;
449 tmp->tm_pages_max = pages;
450 tmp->tm_pages_used = 0;
451 new_unrhdr64(&tmp->tm_ino_unr, 2);
452 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
453 tmp->tm_nonc = nonc;
454 tmp->tm_nomtime = nomtime;
455
456 /* Allocate the root node. */
457 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
458 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
459
460 if (error != 0 || root == NULL) {
461 free(tmp, M_TMPFSMNT);
462 return (error);
463 }
464 KASSERT(root->tn_id == 2,
465 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
466 tmp->tm_root = root;
467
468 MNT_ILOCK(mp);
469 mp->mnt_flag |= MNT_LOCAL;
470 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
471 MNTK_TEXT_REFS | MNTK_NOMSYNC;
472 if (!nonc && (mp->mnt_flag & MNT_UNION) == 0)
473 mp->mnt_kern_flag |= MNTK_FPLOOKUP;
474 MNT_IUNLOCK(mp);
475
476 mp->mnt_data = tmp;
477 mp->mnt_stat.f_namemax = MAXNAMLEN;
478 vfs_getnewfsid(mp);
479 vfs_mountedfrom(mp, "tmpfs");
480
481 return 0;
482 }
483
484 /* ARGSUSED2 */
485 static int
486 tmpfs_unmount(struct mount *mp, int mntflags)
487 {
488 struct tmpfs_mount *tmp;
489 struct tmpfs_node *node;
490 int error, flags;
491
492 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
493 tmp = VFS_TO_TMPFS(mp);
494
495 /* Stop writers */
496 error = vfs_write_suspend_umnt(mp);
497 if (error != 0)
498 return (error);
499 /*
500 * At this point, nodes cannot be destroyed by any other
501 * thread because write suspension is started.
502 */
503
504 for (;;) {
505 error = vflush(mp, 0, flags, curthread);
506 if (error != 0) {
507 vfs_write_resume(mp, VR_START_WRITE);
508 return (error);
509 }
510 MNT_ILOCK(mp);
511 if (mp->mnt_nvnodelistsize == 0) {
512 MNT_IUNLOCK(mp);
513 break;
514 }
515 MNT_IUNLOCK(mp);
516 if ((mntflags & MNT_FORCE) == 0) {
517 vfs_write_resume(mp, VR_START_WRITE);
518 return (EBUSY);
519 }
520 }
521
522 TMPFS_LOCK(tmp);
523 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
524 TMPFS_NODE_LOCK(node);
525 if (node->tn_type == VDIR)
526 tmpfs_dir_destroy(tmp, node);
527 if (tmpfs_free_node_locked(tmp, node, true))
528 TMPFS_LOCK(tmp);
529 else
530 TMPFS_NODE_UNLOCK(node);
531 }
532
533 mp->mnt_data = NULL;
534 tmpfs_free_tmp(tmp);
535 vfs_write_resume(mp, VR_START_WRITE);
536
537 MNT_ILOCK(mp);
538 mp->mnt_flag &= ~MNT_LOCAL;
539 MNT_IUNLOCK(mp);
540
541 return (0);
542 }
543
544 void
545 tmpfs_free_tmp(struct tmpfs_mount *tmp)
546 {
547 TMPFS_MP_ASSERT_LOCKED(tmp);
548 MPASS(tmp->tm_refcount > 0);
549
550 tmp->tm_refcount--;
551 if (tmp->tm_refcount > 0) {
552 TMPFS_UNLOCK(tmp);
553 return;
554 }
555 TMPFS_UNLOCK(tmp);
556
557 mtx_destroy(&tmp->tm_allnode_lock);
558 MPASS(tmp->tm_pages_used == 0);
559 MPASS(tmp->tm_nodes_inuse == 0);
560
561 free(tmp, M_TMPFSMNT);
562 }
563
564 static int
565 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
566 {
567 int error;
568
569 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
570 if (error == 0)
571 (*vpp)->v_vflag |= VV_ROOT;
572 return (error);
573 }
574
575 static int
576 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
577 struct vnode **vpp)
578 {
579 struct tmpfs_fid_data tfd;
580 struct tmpfs_mount *tmp;
581 struct tmpfs_node *node;
582 int error;
583
584 if (fhp->fid_len != sizeof(tfd))
585 return (EINVAL);
586
587 /*
588 * Copy from fid_data onto the stack to avoid unaligned pointer use.
589 * See the comment in sys/mount.h on struct fid for details.
590 */
591 memcpy(&tfd, fhp->fid_data, fhp->fid_len);
592
593 tmp = VFS_TO_TMPFS(mp);
594
595 if (tfd.tfd_id >= tmp->tm_nodes_max)
596 return (EINVAL);
597
598 TMPFS_LOCK(tmp);
599 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
600 if (node->tn_id == tfd.tfd_id &&
601 node->tn_gen == tfd.tfd_gen) {
602 tmpfs_ref_node(node);
603 break;
604 }
605 }
606 TMPFS_UNLOCK(tmp);
607
608 if (node != NULL) {
609 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
610 tmpfs_free_node(tmp, node);
611 } else
612 error = EINVAL;
613 return (error);
614 }
615
616 /* ARGSUSED2 */
617 static int
618 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
619 {
620 struct tmpfs_mount *tmp;
621 size_t used;
622
623 tmp = VFS_TO_TMPFS(mp);
624
625 sbp->f_iosize = PAGE_SIZE;
626 sbp->f_bsize = PAGE_SIZE;
627
628 used = tmpfs_pages_used(tmp);
629 if (tmp->tm_pages_max != ULONG_MAX)
630 sbp->f_blocks = tmp->tm_pages_max;
631 else
632 sbp->f_blocks = used + tmpfs_mem_avail();
633 if (sbp->f_blocks <= used)
634 sbp->f_bavail = 0;
635 else
636 sbp->f_bavail = sbp->f_blocks - used;
637 sbp->f_bfree = sbp->f_bavail;
638 used = tmp->tm_nodes_inuse;
639 sbp->f_files = tmp->tm_nodes_max;
640 if (sbp->f_files <= used)
641 sbp->f_ffree = 0;
642 else
643 sbp->f_ffree = sbp->f_files - used;
644 /* sbp->f_owner = tmp->tn_uid; */
645
646 return 0;
647 }
648
649 static int
650 tmpfs_sync(struct mount *mp, int waitfor)
651 {
652
653 if (waitfor == MNT_SUSPEND) {
654 MNT_ILOCK(mp);
655 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
656 MNT_IUNLOCK(mp);
657 } else if (waitfor == MNT_LAZY) {
658 tmpfs_update_mtime(mp, true);
659 }
660 return (0);
661 }
662
663 static int
664 tmpfs_init(struct vfsconf *conf)
665 {
666 tmpfs_subr_init();
667 memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops));
668 tmpfs_fnops.fo_close = tmpfs_fo_close;
669 return (0);
670 }
671
672 static int
673 tmpfs_uninit(struct vfsconf *conf)
674 {
675 tmpfs_subr_uninit();
676 return (0);
677 }
678
679 /*
680 * tmpfs vfs operations.
681 */
682 struct vfsops tmpfs_vfsops = {
683 .vfs_mount = tmpfs_mount,
684 .vfs_unmount = tmpfs_unmount,
685 .vfs_root = vfs_cache_root,
686 .vfs_cachedroot = tmpfs_root,
687 .vfs_statfs = tmpfs_statfs,
688 .vfs_fhtovp = tmpfs_fhtovp,
689 .vfs_sync = tmpfs_sync,
690 .vfs_init = tmpfs_init,
691 .vfs_uninit = tmpfs_uninit,
692 };
693 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
Cache object: 350e1ed07ccbe49f2310cd79145dfd64
|