1 /*-
2 * Copyright (c) 2000-2004
3 * Poul-Henning Kamp. All rights reserved.
4 * Copyright (c) 1989, 1992-1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95
32 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33 *
34 * $FreeBSD: releng/8.4/sys/fs/devfs/devfs_vnops.c 247083 2013-02-21 06:10:36Z kib $
35 */
36
37 /*
38 * TODO:
39 * remove empty directories
40 * mkdir: want it ?
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/conf.h>
46 #include <sys/dirent.h>
47 #include <sys/fcntl.h>
48 #include <sys/file.h>
49 #include <sys/filedesc.h>
50 #include <sys/filio.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mount.h>
56 #include <sys/namei.h>
57 #include <sys/priv.h>
58 #include <sys/proc.h>
59 #include <sys/stat.h>
60 #include <sys/sx.h>
61 #include <sys/time.h>
62 #include <sys/ttycom.h>
63 #include <sys/unistd.h>
64 #include <sys/vnode.h>
65
66 static struct vop_vector devfs_vnodeops;
67 static struct vop_vector devfs_specops;
68 static struct fileops devfs_ops_f;
69
70 #include <fs/devfs/devfs.h>
71 #include <fs/devfs/devfs_int.h>
72
73 #include <security/mac/mac_framework.h>
74
75 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
76
77 struct mtx devfs_de_interlock;
78 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
79 struct sx clone_drain_lock;
80 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
81 struct mtx cdevpriv_mtx;
82 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
83
84 static int
85 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
86 int *ref)
87 {
88
89 *dswp = devvn_refthread(fp->f_vnode, devp, ref);
90 if (*devp != fp->f_data) {
91 if (*dswp != NULL)
92 dev_relthread(*devp, *ref);
93 return (ENXIO);
94 }
95 KASSERT((*devp)->si_refcount > 0,
96 ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
97 if (*dswp == NULL)
98 return (ENXIO);
99 curthread->td_fpop = fp;
100 return (0);
101 }
102
103 int
104 devfs_get_cdevpriv(void **datap)
105 {
106 struct file *fp;
107 struct cdev_privdata *p;
108 int error;
109
110 fp = curthread->td_fpop;
111 if (fp == NULL)
112 return (EBADF);
113 p = fp->f_cdevpriv;
114 if (p != NULL) {
115 error = 0;
116 *datap = p->cdpd_data;
117 } else
118 error = ENOENT;
119 return (error);
120 }
121
122 int
123 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
124 {
125 struct file *fp;
126 struct cdev_priv *cdp;
127 struct cdev_privdata *p;
128 int error;
129
130 fp = curthread->td_fpop;
131 if (fp == NULL)
132 return (ENOENT);
133 cdp = cdev2priv((struct cdev *)fp->f_data);
134 p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
135 p->cdpd_data = priv;
136 p->cdpd_dtr = priv_dtr;
137 p->cdpd_fp = fp;
138 mtx_lock(&cdevpriv_mtx);
139 if (fp->f_cdevpriv == NULL) {
140 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
141 fp->f_cdevpriv = p;
142 mtx_unlock(&cdevpriv_mtx);
143 error = 0;
144 } else {
145 mtx_unlock(&cdevpriv_mtx);
146 free(p, M_CDEVPDATA);
147 error = EBUSY;
148 }
149 return (error);
150 }
151
152 void
153 devfs_destroy_cdevpriv(struct cdev_privdata *p)
154 {
155
156 mtx_assert(&cdevpriv_mtx, MA_OWNED);
157 p->cdpd_fp->f_cdevpriv = NULL;
158 LIST_REMOVE(p, cdpd_list);
159 mtx_unlock(&cdevpriv_mtx);
160 (p->cdpd_dtr)(p->cdpd_data);
161 free(p, M_CDEVPDATA);
162 }
163
164 void
165 devfs_fpdrop(struct file *fp)
166 {
167 struct cdev_privdata *p;
168
169 mtx_lock(&cdevpriv_mtx);
170 if ((p = fp->f_cdevpriv) == NULL) {
171 mtx_unlock(&cdevpriv_mtx);
172 return;
173 }
174 devfs_destroy_cdevpriv(p);
175 }
176
177 void
178 devfs_clear_cdevpriv(void)
179 {
180 struct file *fp;
181
182 fp = curthread->td_fpop;
183 if (fp == NULL)
184 return;
185 devfs_fpdrop(fp);
186 }
187
188 static int
189 devfs_vptocnp(struct vop_vptocnp_args *ap)
190 {
191 struct vnode *vp = ap->a_vp;
192 struct vnode **dvp = ap->a_vpp;
193 struct devfs_mount *dmp;
194 char *buf = ap->a_buf;
195 int *buflen = ap->a_buflen;
196 struct devfs_dirent *dd, *de;
197 int i, error;
198
199 dmp = VFSTODEVFS(vp->v_mount);
200 i = *buflen;
201 dd = vp->v_data;
202 error = 0;
203
204 sx_xlock(&dmp->dm_lock);
205
206 if (vp->v_type == VCHR) {
207 i -= strlen(dd->de_cdp->cdp_c.si_name);
208 if (i < 0) {
209 error = ENOMEM;
210 goto finished;
211 }
212 bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
213 strlen(dd->de_cdp->cdp_c.si_name));
214 de = dd->de_dir;
215 } else if (vp->v_type == VDIR) {
216 if (dd == dmp->dm_rootdir) {
217 *dvp = vp;
218 vhold(*dvp);
219 goto finished;
220 }
221 i -= dd->de_dirent->d_namlen;
222 if (i < 0) {
223 error = ENOMEM;
224 goto finished;
225 }
226 bcopy(dd->de_dirent->d_name, buf + i,
227 dd->de_dirent->d_namlen);
228 de = dd;
229 } else {
230 error = ENOENT;
231 goto finished;
232 }
233 *buflen = i;
234 de = devfs_parent_dirent(de);
235 if (de == NULL) {
236 error = ENOENT;
237 goto finished;
238 }
239 mtx_lock(&devfs_de_interlock);
240 *dvp = de->de_vnode;
241 if (*dvp != NULL) {
242 VI_LOCK(*dvp);
243 mtx_unlock(&devfs_de_interlock);
244 vholdl(*dvp);
245 VI_UNLOCK(*dvp);
246 } else {
247 mtx_unlock(&devfs_de_interlock);
248 error = ENOENT;
249 }
250 finished:
251 sx_xunlock(&dmp->dm_lock);
252 return (error);
253 }
254
255 /*
256 * Construct the fully qualified path name relative to the mountpoint
257 */
258 static char *
259 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp)
260 {
261 int i;
262 struct devfs_dirent *de, *dd;
263 struct devfs_mount *dmp;
264
265 dmp = VFSTODEVFS(dvp->v_mount);
266 dd = dvp->v_data;
267 i = SPECNAMELEN;
268 buf[i] = '\0';
269 i -= cnp->cn_namelen;
270 if (i < 0)
271 return (NULL);
272 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
273 de = dd;
274 while (de != dmp->dm_rootdir) {
275 i--;
276 if (i < 0)
277 return (NULL);
278 buf[i] = '/';
279 i -= de->de_dirent->d_namlen;
280 if (i < 0)
281 return (NULL);
282 bcopy(de->de_dirent->d_name, buf + i,
283 de->de_dirent->d_namlen);
284 de = devfs_parent_dirent(de);
285 if (de == NULL)
286 return (NULL);
287 }
288 return (buf + i);
289 }
290
291 static int
292 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
293 struct devfs_dirent *de)
294 {
295 int not_found;
296
297 not_found = 0;
298 if (de->de_flags & DE_DOOMED)
299 not_found = 1;
300 if (DEVFS_DE_DROP(de)) {
301 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
302 devfs_dirent_free(de);
303 }
304 if (DEVFS_DMP_DROP(dmp)) {
305 KASSERT(not_found == 1,
306 ("DEVFS mount struct freed before dirent"));
307 not_found = 2;
308 sx_xunlock(&dmp->dm_lock);
309 devfs_unmount_final(dmp);
310 }
311 if (not_found == 1 || (drop_dm_lock && not_found != 2))
312 sx_unlock(&dmp->dm_lock);
313 return (not_found);
314 }
315
316 static void
317 devfs_insmntque_dtr(struct vnode *vp, void *arg)
318 {
319 struct devfs_dirent *de;
320
321 de = (struct devfs_dirent *)arg;
322 mtx_lock(&devfs_de_interlock);
323 vp->v_data = NULL;
324 de->de_vnode = NULL;
325 mtx_unlock(&devfs_de_interlock);
326 vgone(vp);
327 vput(vp);
328 }
329
330 /*
331 * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
332 * it on return.
333 */
334 int
335 devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
336 struct vnode **vpp)
337 {
338 int error;
339 struct vnode *vp;
340 struct cdev *dev;
341 struct devfs_mount *dmp;
342 struct cdevsw *dsw;
343
344 dmp = VFSTODEVFS(mp);
345 if (de->de_flags & DE_DOOMED) {
346 sx_xunlock(&dmp->dm_lock);
347 return (ENOENT);
348 }
349 loop:
350 DEVFS_DE_HOLD(de);
351 DEVFS_DMP_HOLD(dmp);
352 mtx_lock(&devfs_de_interlock);
353 vp = de->de_vnode;
354 if (vp != NULL) {
355 VI_LOCK(vp);
356 mtx_unlock(&devfs_de_interlock);
357 sx_xunlock(&dmp->dm_lock);
358 vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
359 sx_xlock(&dmp->dm_lock);
360 if (devfs_allocv_drop_refs(0, dmp, de)) {
361 vput(vp);
362 return (ENOENT);
363 }
364 else if ((vp->v_iflag & VI_DOOMED) != 0) {
365 mtx_lock(&devfs_de_interlock);
366 if (de->de_vnode == vp) {
367 de->de_vnode = NULL;
368 vp->v_data = NULL;
369 }
370 mtx_unlock(&devfs_de_interlock);
371 vput(vp);
372 goto loop;
373 }
374 sx_xunlock(&dmp->dm_lock);
375 *vpp = vp;
376 return (0);
377 }
378 mtx_unlock(&devfs_de_interlock);
379 if (de->de_dirent->d_type == DT_CHR) {
380 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
381 devfs_allocv_drop_refs(1, dmp, de);
382 return (ENOENT);
383 }
384 dev = &de->de_cdp->cdp_c;
385 } else {
386 dev = NULL;
387 }
388 error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
389 if (error != 0) {
390 devfs_allocv_drop_refs(1, dmp, de);
391 printf("devfs_allocv: failed to allocate new vnode\n");
392 return (error);
393 }
394
395 if (de->de_dirent->d_type == DT_CHR) {
396 vp->v_type = VCHR;
397 VI_LOCK(vp);
398 dev_lock();
399 dev_refl(dev);
400 /* XXX: v_rdev should be protect by vnode lock */
401 vp->v_rdev = dev;
402 KASSERT(vp->v_usecount == 1,
403 ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
404 dev->si_usecount += vp->v_usecount;
405 /* Special casing of ttys for deadfs. Probably redundant. */
406 dsw = dev->si_devsw;
407 if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
408 vp->v_vflag |= VV_ISTTY;
409 dev_unlock();
410 VI_UNLOCK(vp);
411 if ((dev->si_flags & SI_ETERNAL) != 0)
412 vp->v_vflag |= VV_ETERNALDEV;
413 vp->v_op = &devfs_specops;
414 } else if (de->de_dirent->d_type == DT_DIR) {
415 vp->v_type = VDIR;
416 } else if (de->de_dirent->d_type == DT_LNK) {
417 vp->v_type = VLNK;
418 } else {
419 vp->v_type = VBAD;
420 }
421 VN_LOCK_ASHARE(vp);
422 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
423 mtx_lock(&devfs_de_interlock);
424 vp->v_data = de;
425 de->de_vnode = vp;
426 mtx_unlock(&devfs_de_interlock);
427 error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
428 if (error != 0) {
429 (void) devfs_allocv_drop_refs(1, dmp, de);
430 return (error);
431 }
432 if (devfs_allocv_drop_refs(0, dmp, de)) {
433 vput(vp);
434 return (ENOENT);
435 }
436 #ifdef MAC
437 mac_devfs_vnode_associate(mp, de, vp);
438 #endif
439 sx_xunlock(&dmp->dm_lock);
440 *vpp = vp;
441 return (0);
442 }
443
444 static int
445 devfs_access(struct vop_access_args *ap)
446 {
447 struct vnode *vp = ap->a_vp;
448 struct devfs_dirent *de;
449 int error;
450
451 de = vp->v_data;
452 if (vp->v_type == VDIR)
453 de = de->de_dir;
454
455 error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
456 ap->a_accmode, ap->a_cred, NULL);
457 if (!error)
458 return (error);
459 if (error != EACCES)
460 return (error);
461 /* We do, however, allow access to the controlling terminal */
462 if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
463 return (error);
464 if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode)
465 return (0);
466 return (error);
467 }
468
469 /* ARGSUSED */
470 static int
471 devfs_close(struct vop_close_args *ap)
472 {
473 struct vnode *vp = ap->a_vp, *oldvp;
474 struct thread *td = ap->a_td;
475 struct cdev *dev = vp->v_rdev;
476 struct cdevsw *dsw;
477 int vp_locked, error, ref;
478
479 /*
480 * XXX: Don't call d_close() if we were called because of
481 * XXX: insmntque1() failure.
482 */
483 if (vp->v_data == NULL)
484 return (0);
485
486 /*
487 * Hack: a tty device that is a controlling terminal
488 * has a reference from the session structure.
489 * We cannot easily tell that a character device is
490 * a controlling terminal, unless it is the closing
491 * process' controlling terminal. In that case,
492 * if the reference count is 2 (this last descriptor
493 * plus the session), release the reference from the session.
494 */
495 oldvp = NULL;
496 sx_xlock(&proctree_lock);
497 if (td && vp == td->td_proc->p_session->s_ttyvp) {
498 SESS_LOCK(td->td_proc->p_session);
499 VI_LOCK(vp);
500 if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
501 td->td_proc->p_session->s_ttyvp = NULL;
502 oldvp = vp;
503 }
504 VI_UNLOCK(vp);
505 SESS_UNLOCK(td->td_proc->p_session);
506 }
507 sx_xunlock(&proctree_lock);
508 if (oldvp != NULL)
509 vrele(oldvp);
510 /*
511 * We do not want to really close the device if it
512 * is still in use unless we are trying to close it
513 * forcibly. Since every use (buffer, vnode, swap, cmap)
514 * holds a reference to the vnode, and because we mark
515 * any other vnodes that alias this device, when the
516 * sum of the reference counts on all the aliased
517 * vnodes descends to one, we are on last close.
518 */
519 dsw = dev_refthread(dev, &ref);
520 if (dsw == NULL)
521 return (ENXIO);
522 VI_LOCK(vp);
523 if (vp->v_iflag & VI_DOOMED) {
524 /* Forced close. */
525 } else if (dsw->d_flags & D_TRACKCLOSE) {
526 /* Keep device updated on status. */
527 } else if (count_dev(dev) > 1) {
528 VI_UNLOCK(vp);
529 dev_relthread(dev, ref);
530 return (0);
531 }
532 vholdl(vp);
533 VI_UNLOCK(vp);
534 vp_locked = VOP_ISLOCKED(vp);
535 VOP_UNLOCK(vp, 0);
536 KASSERT(dev->si_refcount > 0,
537 ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
538 error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
539 dev_relthread(dev, ref);
540 vn_lock(vp, vp_locked | LK_RETRY);
541 vdrop(vp);
542 return (error);
543 }
544
545 static int
546 devfs_close_f(struct file *fp, struct thread *td)
547 {
548 int error;
549 struct file *fpop;
550
551 /*
552 * NB: td may be NULL if this descriptor is closed due to
553 * garbage collection from a closed UNIX domain socket.
554 */
555 fpop = curthread->td_fpop;
556 curthread->td_fpop = fp;
557 error = vnops.fo_close(fp, td);
558 curthread->td_fpop = fpop;
559
560 /*
561 * The f_cdevpriv cannot be assigned non-NULL value while we
562 * are destroying the file.
563 */
564 if (fp->f_cdevpriv != NULL)
565 devfs_fpdrop(fp);
566 return (error);
567 }
568
569 static int
570 devfs_fsync(struct vop_fsync_args *ap)
571 {
572 int error;
573 struct bufobj *bo;
574 struct devfs_dirent *de;
575
576 if (!vn_isdisk(ap->a_vp, &error)) {
577 bo = &ap->a_vp->v_bufobj;
578 de = ap->a_vp->v_data;
579 if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
580 printf("Device %s went missing before all of the data "
581 "could be written to it; expect data loss.\n",
582 de->de_dirent->d_name);
583
584 error = vop_stdfsync(ap);
585 if (bo->bo_dirty.bv_cnt != 0 || error != 0)
586 panic("devfs_fsync: vop_stdfsync failed.");
587 }
588
589 return (0);
590 }
591
592 return (vop_stdfsync(ap));
593 }
594
595 static int
596 devfs_getattr(struct vop_getattr_args *ap)
597 {
598 struct vnode *vp = ap->a_vp;
599 struct vattr *vap = ap->a_vap;
600 int error = 0;
601 struct devfs_dirent *de;
602 struct cdev *dev;
603
604 de = vp->v_data;
605 KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
606 if (vp->v_type == VDIR) {
607 de = de->de_dir;
608 KASSERT(de != NULL,
609 ("Null dir dirent in devfs_getattr vp=%p", vp));
610 }
611 vap->va_uid = de->de_uid;
612 vap->va_gid = de->de_gid;
613 vap->va_mode = de->de_mode;
614 if (vp->v_type == VLNK)
615 vap->va_size = strlen(de->de_symlink);
616 else if (vp->v_type == VDIR)
617 vap->va_size = vap->va_bytes = DEV_BSIZE;
618 else
619 vap->va_size = 0;
620 if (vp->v_type != VDIR)
621 vap->va_bytes = 0;
622 vap->va_blocksize = DEV_BSIZE;
623 vap->va_type = vp->v_type;
624
625 #define fix(aa) \
626 do { \
627 if ((aa).tv_sec <= 3600) { \
628 (aa).tv_sec = boottime.tv_sec; \
629 (aa).tv_nsec = boottime.tv_usec * 1000; \
630 } \
631 } while (0)
632
633 if (vp->v_type != VCHR) {
634 fix(de->de_atime);
635 vap->va_atime = de->de_atime;
636 fix(de->de_mtime);
637 vap->va_mtime = de->de_mtime;
638 fix(de->de_ctime);
639 vap->va_ctime = de->de_ctime;
640 } else {
641 dev = vp->v_rdev;
642 fix(dev->si_atime);
643 vap->va_atime = dev->si_atime;
644 fix(dev->si_mtime);
645 vap->va_mtime = dev->si_mtime;
646 fix(dev->si_ctime);
647 vap->va_ctime = dev->si_ctime;
648
649 vap->va_rdev = cdev2priv(dev)->cdp_inode;
650 }
651 vap->va_gen = 0;
652 vap->va_flags = 0;
653 vap->va_filerev = 0;
654 vap->va_nlink = de->de_links;
655 vap->va_fileid = de->de_inode;
656
657 return (error);
658 }
659
660 /* ARGSUSED */
661 static int
662 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
663 {
664 struct cdev *dev;
665 struct cdevsw *dsw;
666 struct vnode *vp;
667 struct vnode *vpold;
668 int error, i, ref;
669 const char *p;
670 struct fiodgname_arg *fgn;
671 struct file *fpop;
672
673 fpop = td->td_fpop;
674 error = devfs_fp_check(fp, &dev, &dsw, &ref);
675 if (error)
676 return (error);
677
678 if (com == FIODTYPE) {
679 *(int *)data = dsw->d_flags & D_TYPEMASK;
680 td->td_fpop = fpop;
681 dev_relthread(dev, ref);
682 return (0);
683 } else if (com == FIODGNAME) {
684 fgn = data;
685 p = devtoname(dev);
686 i = strlen(p) + 1;
687 if (i > fgn->len)
688 error = EINVAL;
689 else
690 error = copyout(p, fgn->buf, i);
691 td->td_fpop = fpop;
692 dev_relthread(dev, ref);
693 return (error);
694 }
695 error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
696 td->td_fpop = NULL;
697 dev_relthread(dev, ref);
698 if (error == ENOIOCTL)
699 error = ENOTTY;
700 if (error == 0 && com == TIOCSCTTY) {
701 vp = fp->f_vnode;
702
703 /* Do nothing if reassigning same control tty */
704 sx_slock(&proctree_lock);
705 if (td->td_proc->p_session->s_ttyvp == vp) {
706 sx_sunlock(&proctree_lock);
707 return (0);
708 }
709
710 vpold = td->td_proc->p_session->s_ttyvp;
711 VREF(vp);
712 SESS_LOCK(td->td_proc->p_session);
713 td->td_proc->p_session->s_ttyvp = vp;
714 SESS_UNLOCK(td->td_proc->p_session);
715
716 sx_sunlock(&proctree_lock);
717
718 /* Get rid of reference to old control tty */
719 if (vpold)
720 vrele(vpold);
721 }
722 return (error);
723 }
724
725 /* ARGSUSED */
726 static int
727 devfs_kqfilter_f(struct file *fp, struct knote *kn)
728 {
729 struct cdev *dev;
730 struct cdevsw *dsw;
731 int error, ref;
732 struct file *fpop;
733 struct thread *td;
734
735 td = curthread;
736 fpop = td->td_fpop;
737 error = devfs_fp_check(fp, &dev, &dsw, &ref);
738 if (error)
739 return (error);
740 error = dsw->d_kqfilter(dev, kn);
741 td->td_fpop = fpop;
742 dev_relthread(dev, ref);
743 return (error);
744 }
745
746 static inline int
747 devfs_prison_check(struct devfs_dirent *de, struct ucred *tcr)
748 {
749 struct cdev_priv *cdp;
750 struct ucred *dcr;
751
752 cdp = de->de_cdp;
753 if (cdp == NULL)
754 return (0);
755 dcr = cdp->cdp_c.si_cred;
756 if (dcr == NULL)
757 return (0);
758
759 return (prison_check(tcr, dcr));
760 }
761
762 static int
763 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
764 {
765 struct componentname *cnp;
766 struct vnode *dvp, **vpp;
767 struct thread *td;
768 struct devfs_dirent *de, *dd;
769 struct devfs_dirent **dde;
770 struct devfs_mount *dmp;
771 struct cdev *cdev;
772 int error, flags, nameiop, dvplocked;
773 char specname[SPECNAMELEN + 1], *pname;
774
775 cnp = ap->a_cnp;
776 vpp = ap->a_vpp;
777 dvp = ap->a_dvp;
778 pname = cnp->cn_nameptr;
779 td = cnp->cn_thread;
780 flags = cnp->cn_flags;
781 nameiop = cnp->cn_nameiop;
782 dmp = VFSTODEVFS(dvp->v_mount);
783 dd = dvp->v_data;
784 *vpp = NULLVP;
785
786 if ((flags & ISLASTCN) && nameiop == RENAME)
787 return (EOPNOTSUPP);
788
789 if (dvp->v_type != VDIR)
790 return (ENOTDIR);
791
792 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
793 return (EIO);
794
795 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
796 if (error)
797 return (error);
798
799 if (cnp->cn_namelen == 1 && *pname == '.') {
800 if ((flags & ISLASTCN) && nameiop != LOOKUP)
801 return (EINVAL);
802 *vpp = dvp;
803 VREF(dvp);
804 return (0);
805 }
806
807 if (flags & ISDOTDOT) {
808 if ((flags & ISLASTCN) && nameiop != LOOKUP)
809 return (EINVAL);
810 de = devfs_parent_dirent(dd);
811 if (de == NULL)
812 return (ENOENT);
813 dvplocked = VOP_ISLOCKED(dvp);
814 VOP_UNLOCK(dvp, 0);
815 error = devfs_allocv(de, dvp->v_mount,
816 cnp->cn_lkflags & LK_TYPE_MASK, vpp);
817 *dm_unlock = 0;
818 vn_lock(dvp, dvplocked | LK_RETRY);
819 return (error);
820 }
821
822 DEVFS_DMP_HOLD(dmp);
823 devfs_populate(dmp);
824 if (DEVFS_DMP_DROP(dmp)) {
825 *dm_unlock = 0;
826 sx_xunlock(&dmp->dm_lock);
827 devfs_unmount_final(dmp);
828 return (ENOENT);
829 }
830 dd = dvp->v_data;
831 de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen);
832 while (de == NULL) { /* While(...) so we can use break */
833
834 if (nameiop == DELETE)
835 return (ENOENT);
836
837 /*
838 * OK, we didn't have an entry for the name we were asked for
839 * so we try to see if anybody can create it on demand.
840 */
841 pname = devfs_fqpn(specname, dvp, cnp);
842 if (pname == NULL)
843 break;
844
845 cdev = NULL;
846 DEVFS_DMP_HOLD(dmp);
847 sx_xunlock(&dmp->dm_lock);
848 sx_slock(&clone_drain_lock);
849 EVENTHANDLER_INVOKE(dev_clone,
850 td->td_ucred, pname, strlen(pname), &cdev);
851 sx_sunlock(&clone_drain_lock);
852 sx_xlock(&dmp->dm_lock);
853 if (DEVFS_DMP_DROP(dmp)) {
854 *dm_unlock = 0;
855 sx_xunlock(&dmp->dm_lock);
856 devfs_unmount_final(dmp);
857 if (cdev != NULL)
858 dev_rel(cdev);
859 return (ENOENT);
860 }
861 if (cdev == NULL)
862 break;
863
864 DEVFS_DMP_HOLD(dmp);
865 devfs_populate(dmp);
866 if (DEVFS_DMP_DROP(dmp)) {
867 *dm_unlock = 0;
868 sx_xunlock(&dmp->dm_lock);
869 devfs_unmount_final(dmp);
870 dev_rel(cdev);
871 return (ENOENT);
872 }
873
874 dev_lock();
875 dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
876 if (dde != NULL && *dde != NULL)
877 de = *dde;
878 dev_unlock();
879 dev_rel(cdev);
880 break;
881 }
882
883 if (de == NULL || de->de_flags & DE_WHITEOUT) {
884 if ((nameiop == CREATE || nameiop == RENAME) &&
885 (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
886 cnp->cn_flags |= SAVENAME;
887 return (EJUSTRETURN);
888 }
889 return (ENOENT);
890 }
891
892 if (devfs_prison_check(de, td->td_ucred))
893 return (ENOENT);
894
895 if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
896 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
897 if (error)
898 return (error);
899 if (*vpp == dvp) {
900 VREF(dvp);
901 *vpp = dvp;
902 return (0);
903 }
904 }
905 error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
906 vpp);
907 *dm_unlock = 0;
908 return (error);
909 }
910
911 static int
912 devfs_lookup(struct vop_lookup_args *ap)
913 {
914 int j;
915 struct devfs_mount *dmp;
916 int dm_unlock;
917
918 dmp = VFSTODEVFS(ap->a_dvp->v_mount);
919 dm_unlock = 1;
920 sx_xlock(&dmp->dm_lock);
921 j = devfs_lookupx(ap, &dm_unlock);
922 if (dm_unlock == 1)
923 sx_xunlock(&dmp->dm_lock);
924 return (j);
925 }
926
927 static int
928 devfs_mknod(struct vop_mknod_args *ap)
929 {
930 struct componentname *cnp;
931 struct vnode *dvp, **vpp;
932 struct devfs_dirent *dd, *de;
933 struct devfs_mount *dmp;
934 int error;
935
936 /*
937 * The only type of node we should be creating here is a
938 * character device, for anything else return EOPNOTSUPP.
939 */
940 if (ap->a_vap->va_type != VCHR)
941 return (EOPNOTSUPP);
942 dvp = ap->a_dvp;
943 dmp = VFSTODEVFS(dvp->v_mount);
944
945 cnp = ap->a_cnp;
946 vpp = ap->a_vpp;
947 dd = dvp->v_data;
948
949 error = ENOENT;
950 sx_xlock(&dmp->dm_lock);
951 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
952 if (cnp->cn_namelen != de->de_dirent->d_namlen)
953 continue;
954 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
955 de->de_dirent->d_namlen) != 0)
956 continue;
957 if (de->de_flags & DE_WHITEOUT)
958 break;
959 goto notfound;
960 }
961 if (de == NULL)
962 goto notfound;
963 de->de_flags &= ~DE_WHITEOUT;
964 error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
965 return (error);
966 notfound:
967 sx_xunlock(&dmp->dm_lock);
968 return (error);
969 }
970
971 /* ARGSUSED */
972 static int
973 devfs_open(struct vop_open_args *ap)
974 {
975 struct thread *td = ap->a_td;
976 struct vnode *vp = ap->a_vp;
977 struct cdev *dev = vp->v_rdev;
978 struct file *fp = ap->a_fp;
979 int error, ref, vlocked;
980 struct cdevsw *dsw;
981 struct file *fpop;
982
983 if (vp->v_type == VBLK)
984 return (ENXIO);
985
986 if (dev == NULL)
987 return (ENXIO);
988
989 /* Make this field valid before any I/O in d_open. */
990 if (dev->si_iosize_max == 0)
991 dev->si_iosize_max = DFLTPHYS;
992
993 dsw = dev_refthread(dev, &ref);
994 if (dsw == NULL)
995 return (ENXIO);
996 if (fp == NULL && dsw->d_fdopen != NULL) {
997 dev_relthread(dev, ref);
998 return (ENXIO);
999 }
1000
1001 vlocked = VOP_ISLOCKED(vp);
1002 VOP_UNLOCK(vp, 0);
1003
1004 fpop = td->td_fpop;
1005 td->td_fpop = fp;
1006 if (fp != NULL) {
1007 fp->f_data = dev;
1008 fp->f_vnode = vp;
1009 }
1010 if (dsw->d_fdopen != NULL)
1011 error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
1012 else
1013 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
1014 td->td_fpop = fpop;
1015
1016 vn_lock(vp, vlocked | LK_RETRY);
1017 dev_relthread(dev, ref);
1018 if (error != 0) {
1019 if (error == ERESTART)
1020 error = EINTR;
1021 return (error);
1022 }
1023
1024 #if 0 /* /dev/console */
1025 KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
1026 #else
1027 if (fp == NULL)
1028 return (error);
1029 #endif
1030 if (fp->f_ops == &badfileops)
1031 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
1032 return (error);
1033 }
1034
1035 static int
1036 devfs_pathconf(struct vop_pathconf_args *ap)
1037 {
1038
1039 switch (ap->a_name) {
1040 case _PC_MAC_PRESENT:
1041 #ifdef MAC
1042 /*
1043 * If MAC is enabled, devfs automatically supports
1044 * trivial non-persistant label storage.
1045 */
1046 *ap->a_retval = 1;
1047 #else
1048 *ap->a_retval = 0;
1049 #endif
1050 return (0);
1051 default:
1052 return (vop_stdpathconf(ap));
1053 }
1054 /* NOTREACHED */
1055 }
1056
1057 /* ARGSUSED */
1058 static int
1059 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
1060 {
1061 struct cdev *dev;
1062 struct cdevsw *dsw;
1063 int error, ref;
1064 struct file *fpop;
1065
1066 fpop = td->td_fpop;
1067 error = devfs_fp_check(fp, &dev, &dsw, &ref);
1068 if (error)
1069 return (poll_no_poll(events));
1070 error = dsw->d_poll(dev, events, td);
1071 td->td_fpop = fpop;
1072 dev_relthread(dev, ref);
1073 return(error);
1074 }
1075
1076 /*
1077 * Print out the contents of a special device vnode.
1078 */
1079 static int
1080 devfs_print(struct vop_print_args *ap)
1081 {
1082
1083 printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
1084 return (0);
1085 }
1086
1087 /* ARGSUSED */
1088 static int
1089 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
1090 {
1091 struct cdev *dev;
1092 int ioflag, error, ref, resid;
1093 struct cdevsw *dsw;
1094 struct file *fpop;
1095
1096 fpop = td->td_fpop;
1097 error = devfs_fp_check(fp, &dev, &dsw, &ref);
1098 if (error)
1099 return (error);
1100 resid = uio->uio_resid;
1101 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
1102 if (ioflag & O_DIRECT)
1103 ioflag |= IO_DIRECT;
1104
1105 if ((flags & FOF_OFFSET) == 0)
1106 uio->uio_offset = fp->f_offset;
1107
1108 error = dsw->d_read(dev, uio, ioflag);
1109 if (uio->uio_resid != resid || (error == 0 && resid != 0))
1110 vfs_timestamp(&dev->si_atime);
1111 td->td_fpop = fpop;
1112 dev_relthread(dev, ref);
1113
1114 if ((flags & FOF_OFFSET) == 0)
1115 fp->f_offset = uio->uio_offset;
1116 fp->f_nextoff = uio->uio_offset;
1117 return (error);
1118 }
1119
1120 static int
1121 devfs_readdir(struct vop_readdir_args *ap)
1122 {
1123 int error;
1124 struct uio *uio;
1125 struct dirent *dp;
1126 struct devfs_dirent *dd;
1127 struct devfs_dirent *de;
1128 struct devfs_mount *dmp;
1129 off_t off;
1130 int *tmp_ncookies = NULL;
1131
1132 if (ap->a_vp->v_type != VDIR)
1133 return (ENOTDIR);
1134
1135 uio = ap->a_uio;
1136 if (uio->uio_offset < 0)
1137 return (EINVAL);
1138
1139 /*
1140 * XXX: This is a temporary hack to get around this filesystem not
1141 * supporting cookies. We store the location of the ncookies pointer
1142 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1143 * and set the number of cookies to 0. We then set the pointer to
1144 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1145 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1146 * pointer to its original location before returning to the caller.
1147 */
1148 if (ap->a_ncookies != NULL) {
1149 tmp_ncookies = ap->a_ncookies;
1150 *ap->a_ncookies = 0;
1151 ap->a_ncookies = NULL;
1152 }
1153
1154 dmp = VFSTODEVFS(ap->a_vp->v_mount);
1155 sx_xlock(&dmp->dm_lock);
1156 DEVFS_DMP_HOLD(dmp);
1157 devfs_populate(dmp);
1158 if (DEVFS_DMP_DROP(dmp)) {
1159 sx_xunlock(&dmp->dm_lock);
1160 devfs_unmount_final(dmp);
1161 if (tmp_ncookies != NULL)
1162 ap->a_ncookies = tmp_ncookies;
1163 return (EIO);
1164 }
1165 error = 0;
1166 de = ap->a_vp->v_data;
1167 off = 0;
1168 TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1169 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1170 if (dd->de_flags & DE_WHITEOUT)
1171 continue;
1172 if (devfs_prison_check(dd, ap->a_cred))
1173 continue;
1174 if (dd->de_dirent->d_type == DT_DIR)
1175 de = dd->de_dir;
1176 else
1177 de = dd;
1178 dp = dd->de_dirent;
1179 if (dp->d_reclen > uio->uio_resid)
1180 break;
1181 dp->d_fileno = de->de_inode;
1182 if (off >= uio->uio_offset) {
1183 error = vfs_read_dirent(ap, dp, off);
1184 if (error)
1185 break;
1186 }
1187 off += dp->d_reclen;
1188 }
1189 sx_xunlock(&dmp->dm_lock);
1190 uio->uio_offset = off;
1191
1192 /*
1193 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1194 * place.
1195 */
1196 if (tmp_ncookies != NULL)
1197 ap->a_ncookies = tmp_ncookies;
1198
1199 return (error);
1200 }
1201
1202 static int
1203 devfs_readlink(struct vop_readlink_args *ap)
1204 {
1205 struct devfs_dirent *de;
1206
1207 de = ap->a_vp->v_data;
1208 return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1209 }
1210
1211 static int
1212 devfs_reclaim(struct vop_reclaim_args *ap)
1213 {
1214 struct vnode *vp = ap->a_vp;
1215 struct devfs_dirent *de;
1216 struct cdev *dev;
1217
1218 mtx_lock(&devfs_de_interlock);
1219 de = vp->v_data;
1220 if (de != NULL) {
1221 de->de_vnode = NULL;
1222 vp->v_data = NULL;
1223 }
1224 mtx_unlock(&devfs_de_interlock);
1225
1226 vnode_destroy_vobject(vp);
1227
1228 VI_LOCK(vp);
1229 dev_lock();
1230 dev = vp->v_rdev;
1231 vp->v_rdev = NULL;
1232
1233 if (dev == NULL) {
1234 dev_unlock();
1235 VI_UNLOCK(vp);
1236 return (0);
1237 }
1238
1239 dev->si_usecount -= vp->v_usecount;
1240 dev_unlock();
1241 VI_UNLOCK(vp);
1242 dev_rel(dev);
1243 return (0);
1244 }
1245
1246 static int
1247 devfs_remove(struct vop_remove_args *ap)
1248 {
1249 struct vnode *vp = ap->a_vp;
1250 struct devfs_dirent *dd;
1251 struct devfs_dirent *de;
1252 struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1253
1254 sx_xlock(&dmp->dm_lock);
1255 dd = ap->a_dvp->v_data;
1256 de = vp->v_data;
1257 if (de->de_cdp == NULL) {
1258 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1259 devfs_delete(dmp, de, 1);
1260 } else {
1261 de->de_flags |= DE_WHITEOUT;
1262 }
1263 sx_xunlock(&dmp->dm_lock);
1264 return (0);
1265 }
1266
1267 /*
1268 * Revoke is called on a tty when a terminal session ends. The vnode
1269 * is orphaned by setting v_op to deadfs so we need to let go of it
1270 * as well so that we create a new one next time around.
1271 *
1272 */
1273 static int
1274 devfs_revoke(struct vop_revoke_args *ap)
1275 {
1276 struct vnode *vp = ap->a_vp, *vp2;
1277 struct cdev *dev;
1278 struct cdev_priv *cdp;
1279 struct devfs_dirent *de;
1280 int i;
1281
1282 KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1283
1284 dev = vp->v_rdev;
1285 cdp = cdev2priv(dev);
1286
1287 dev_lock();
1288 cdp->cdp_inuse++;
1289 dev_unlock();
1290
1291 vhold(vp);
1292 vgone(vp);
1293 vdrop(vp);
1294
1295 VOP_UNLOCK(vp,0);
1296 loop:
1297 for (;;) {
1298 mtx_lock(&devfs_de_interlock);
1299 dev_lock();
1300 vp2 = NULL;
1301 for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1302 de = cdp->cdp_dirents[i];
1303 if (de == NULL)
1304 continue;
1305
1306 vp2 = de->de_vnode;
1307 if (vp2 != NULL) {
1308 dev_unlock();
1309 VI_LOCK(vp2);
1310 mtx_unlock(&devfs_de_interlock);
1311 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1312 curthread))
1313 goto loop;
1314 vhold(vp2);
1315 vgone(vp2);
1316 vdrop(vp2);
1317 vput(vp2);
1318 break;
1319 }
1320 }
1321 if (vp2 != NULL) {
1322 continue;
1323 }
1324 dev_unlock();
1325 mtx_unlock(&devfs_de_interlock);
1326 break;
1327 }
1328 dev_lock();
1329 cdp->cdp_inuse--;
1330 if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1331 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1332 dev_unlock();
1333 dev_rel(&cdp->cdp_c);
1334 } else
1335 dev_unlock();
1336
1337 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1338 return (0);
1339 }
1340
1341 static int
1342 devfs_rioctl(struct vop_ioctl_args *ap)
1343 {
1344 struct vnode *vp;
1345 struct devfs_mount *dmp;
1346 int error;
1347
1348 vp = ap->a_vp;
1349 vn_lock(vp, LK_SHARED | LK_RETRY);
1350 if (vp->v_iflag & VI_DOOMED) {
1351 VOP_UNLOCK(vp, 0);
1352 return (EBADF);
1353 }
1354 dmp = VFSTODEVFS(vp->v_mount);
1355 sx_xlock(&dmp->dm_lock);
1356 VOP_UNLOCK(vp, 0);
1357 DEVFS_DMP_HOLD(dmp);
1358 devfs_populate(dmp);
1359 if (DEVFS_DMP_DROP(dmp)) {
1360 sx_xunlock(&dmp->dm_lock);
1361 devfs_unmount_final(dmp);
1362 return (ENOENT);
1363 }
1364 error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1365 sx_xunlock(&dmp->dm_lock);
1366 return (error);
1367 }
1368
1369 static int
1370 devfs_rread(struct vop_read_args *ap)
1371 {
1372
1373 if (ap->a_vp->v_type != VDIR)
1374 return (EINVAL);
1375 return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1376 }
1377
1378 static int
1379 devfs_setattr(struct vop_setattr_args *ap)
1380 {
1381 struct devfs_dirent *de;
1382 struct vattr *vap;
1383 struct vnode *vp;
1384 struct thread *td;
1385 int c, error;
1386 uid_t uid;
1387 gid_t gid;
1388
1389 vap = ap->a_vap;
1390 vp = ap->a_vp;
1391 td = curthread;
1392 if ((vap->va_type != VNON) ||
1393 (vap->va_nlink != VNOVAL) ||
1394 (vap->va_fsid != VNOVAL) ||
1395 (vap->va_fileid != VNOVAL) ||
1396 (vap->va_blocksize != VNOVAL) ||
1397 (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1398 (vap->va_rdev != VNOVAL) ||
1399 ((int)vap->va_bytes != VNOVAL) ||
1400 (vap->va_gen != VNOVAL)) {
1401 return (EINVAL);
1402 }
1403
1404 de = vp->v_data;
1405 if (vp->v_type == VDIR)
1406 de = de->de_dir;
1407
1408 error = c = 0;
1409 if (vap->va_uid == (uid_t)VNOVAL)
1410 uid = de->de_uid;
1411 else
1412 uid = vap->va_uid;
1413 if (vap->va_gid == (gid_t)VNOVAL)
1414 gid = de->de_gid;
1415 else
1416 gid = vap->va_gid;
1417 if (uid != de->de_uid || gid != de->de_gid) {
1418 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1419 (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1420 error = priv_check(td, PRIV_VFS_CHOWN);
1421 if (error)
1422 return (error);
1423 }
1424 de->de_uid = uid;
1425 de->de_gid = gid;
1426 c = 1;
1427 }
1428
1429 if (vap->va_mode != (mode_t)VNOVAL) {
1430 if (ap->a_cred->cr_uid != de->de_uid) {
1431 error = priv_check(td, PRIV_VFS_ADMIN);
1432 if (error)
1433 return (error);
1434 }
1435 de->de_mode = vap->va_mode;
1436 c = 1;
1437 }
1438
1439 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1440 /* See the comment in ufs_vnops::ufs_setattr(). */
1441 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1442 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1443 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1444 return (error);
1445 if (vap->va_atime.tv_sec != VNOVAL) {
1446 if (vp->v_type == VCHR)
1447 vp->v_rdev->si_atime = vap->va_atime;
1448 else
1449 de->de_atime = vap->va_atime;
1450 }
1451 if (vap->va_mtime.tv_sec != VNOVAL) {
1452 if (vp->v_type == VCHR)
1453 vp->v_rdev->si_mtime = vap->va_mtime;
1454 else
1455 de->de_mtime = vap->va_mtime;
1456 }
1457 c = 1;
1458 }
1459
1460 if (c) {
1461 if (vp->v_type == VCHR)
1462 vfs_timestamp(&vp->v_rdev->si_ctime);
1463 else
1464 vfs_timestamp(&de->de_mtime);
1465 }
1466 return (0);
1467 }
1468
1469 #ifdef MAC
1470 static int
1471 devfs_setlabel(struct vop_setlabel_args *ap)
1472 {
1473 struct vnode *vp;
1474 struct devfs_dirent *de;
1475
1476 vp = ap->a_vp;
1477 de = vp->v_data;
1478
1479 mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1480 mac_devfs_update(vp->v_mount, de, vp);
1481
1482 return (0);
1483 }
1484 #endif
1485
1486 static int
1487 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1488 {
1489
1490 return (vnops.fo_stat(fp, sb, cred, td));
1491 }
1492
1493 static int
1494 devfs_symlink(struct vop_symlink_args *ap)
1495 {
1496 int i, error;
1497 struct devfs_dirent *dd;
1498 struct devfs_dirent *de;
1499 struct devfs_mount *dmp;
1500
1501 error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
1502 if (error)
1503 return(error);
1504 dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1505 dd = ap->a_dvp->v_data;
1506 de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1507 de->de_uid = 0;
1508 de->de_gid = 0;
1509 de->de_mode = 0755;
1510 de->de_inode = alloc_unr(devfs_inos);
1511 de->de_dirent->d_type = DT_LNK;
1512 i = strlen(ap->a_target) + 1;
1513 de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1514 bcopy(ap->a_target, de->de_symlink, i);
1515 sx_xlock(&dmp->dm_lock);
1516 #ifdef MAC
1517 mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1518 #endif
1519 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
1520 devfs_rules_apply(dmp, de);
1521 return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
1522 }
1523
1524 static int
1525 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1526 {
1527
1528 return (vnops.fo_truncate(fp, length, cred, td));
1529 }
1530
1531 /* ARGSUSED */
1532 static int
1533 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
1534 {
1535 struct cdev *dev;
1536 int error, ioflag, ref, resid;
1537 struct cdevsw *dsw;
1538 struct file *fpop;
1539
1540 fpop = td->td_fpop;
1541 error = devfs_fp_check(fp, &dev, &dsw, &ref);
1542 if (error)
1543 return (error);
1544 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1545 ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1546 if (ioflag & O_DIRECT)
1547 ioflag |= IO_DIRECT;
1548 if ((flags & FOF_OFFSET) == 0)
1549 uio->uio_offset = fp->f_offset;
1550
1551 resid = uio->uio_resid;
1552
1553 error = dsw->d_write(dev, uio, ioflag);
1554 if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1555 vfs_timestamp(&dev->si_ctime);
1556 dev->si_mtime = dev->si_ctime;
1557 }
1558 td->td_fpop = fpop;
1559 dev_relthread(dev, ref);
1560
1561 if ((flags & FOF_OFFSET) == 0)
1562 fp->f_offset = uio->uio_offset;
1563 fp->f_nextoff = uio->uio_offset;
1564 return (error);
1565 }
1566
1567 dev_t
1568 dev2udev(struct cdev *x)
1569 {
1570 if (x == NULL)
1571 return (NODEV);
1572 return (cdev2priv(x)->cdp_inode);
1573 }
1574
1575 static struct fileops devfs_ops_f = {
1576 .fo_read = devfs_read_f,
1577 .fo_write = devfs_write_f,
1578 .fo_truncate = devfs_truncate_f,
1579 .fo_ioctl = devfs_ioctl_f,
1580 .fo_poll = devfs_poll_f,
1581 .fo_kqfilter = devfs_kqfilter_f,
1582 .fo_stat = devfs_stat_f,
1583 .fo_close = devfs_close_f,
1584 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
1585 };
1586
1587 static struct vop_vector devfs_vnodeops = {
1588 .vop_default = &default_vnodeops,
1589
1590 .vop_access = devfs_access,
1591 .vop_getattr = devfs_getattr,
1592 .vop_ioctl = devfs_rioctl,
1593 .vop_lookup = devfs_lookup,
1594 .vop_mknod = devfs_mknod,
1595 .vop_pathconf = devfs_pathconf,
1596 .vop_read = devfs_rread,
1597 .vop_readdir = devfs_readdir,
1598 .vop_readlink = devfs_readlink,
1599 .vop_reclaim = devfs_reclaim,
1600 .vop_remove = devfs_remove,
1601 .vop_revoke = devfs_revoke,
1602 .vop_setattr = devfs_setattr,
1603 #ifdef MAC
1604 .vop_setlabel = devfs_setlabel,
1605 #endif
1606 .vop_symlink = devfs_symlink,
1607 .vop_vptocnp = devfs_vptocnp,
1608 };
1609
1610 static struct vop_vector devfs_specops = {
1611 .vop_default = &default_vnodeops,
1612
1613 .vop_access = devfs_access,
1614 .vop_bmap = VOP_PANIC,
1615 .vop_close = devfs_close,
1616 .vop_create = VOP_PANIC,
1617 .vop_fsync = devfs_fsync,
1618 .vop_getattr = devfs_getattr,
1619 .vop_link = VOP_PANIC,
1620 .vop_mkdir = VOP_PANIC,
1621 .vop_mknod = VOP_PANIC,
1622 .vop_open = devfs_open,
1623 .vop_pathconf = devfs_pathconf,
1624 .vop_print = devfs_print,
1625 .vop_read = VOP_PANIC,
1626 .vop_readdir = VOP_PANIC,
1627 .vop_readlink = VOP_PANIC,
1628 .vop_reallocblks = VOP_PANIC,
1629 .vop_reclaim = devfs_reclaim,
1630 .vop_remove = devfs_remove,
1631 .vop_rename = VOP_PANIC,
1632 .vop_revoke = devfs_revoke,
1633 .vop_rmdir = VOP_PANIC,
1634 .vop_setattr = devfs_setattr,
1635 #ifdef MAC
1636 .vop_setlabel = devfs_setlabel,
1637 #endif
1638 .vop_strategy = VOP_PANIC,
1639 .vop_symlink = VOP_PANIC,
1640 .vop_vptocnp = devfs_vptocnp,
1641 .vop_write = VOP_PANIC,
1642 };
1643
1644 /*
1645 * Our calling convention to the device drivers used to be that we passed
1646 * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1647 * flags instead since that's what open(), close() and ioctl() takes and
1648 * we don't really want vnode.h in device drivers.
1649 * We solved the source compatibility by redefining some vnode flags to
1650 * be the same as the fcntl ones and by sending down the bitwise OR of
1651 * the respective fcntl/vnode flags. These CTASSERTS make sure nobody
1652 * pulls the rug out under this.
1653 */
1654 CTASSERT(O_NONBLOCK == IO_NDELAY);
1655 CTASSERT(O_FSYNC == IO_SYNC);
Cache object: a001313bbe976c63b164efd1112cdfdf
|