1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26 /*
27 * Copyright (c) 1989, 1993, 1995
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
59 */
60
61 #include <sys/param.h>
62 #include <sys/proc.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/buf.h>
67 #include <sys/mount.h>
68 #include <sys/namei.h>
69 #include <sys/vnode.h>
70 #include <sys/stat.h>
71 #include <sys/errno.h>
72 #include <sys/ioctl.h>
73 #include <sys/file.h>
74 #include <sys/malloc.h>
75 #include <sys/disk.h>
76 #include <miscfs/specfs/specdev.h>
77 #include <vfs/vfs_support.h>
78
79 #include <sys/kdebug.h>
80
81 struct vnode *speclisth[SPECHSZ];
82
83 /* symbolic sleep message strings for devices */
84 char devopn[] = "devopn";
85 char devio[] = "devio";
86 char devwait[] = "devwait";
87 char devin[] = "devin";
88 char devout[] = "devout";
89 char devioc[] = "devioc";
90 char devcls[] = "devcls";
91
92 #define VOPFUNC int (*)(void *)
93
94 int (**spec_vnodeop_p)(void *);
95 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
96 { &vop_default_desc, (VOPFUNC)vn_default_error },
97 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
98 { &vop_create_desc, (VOPFUNC)err_create }, /* create */
99 { &vop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */
100 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
101 { &vop_close_desc, (VOPFUNC)spec_close }, /* close */
102 { &vop_access_desc, (VOPFUNC)spec_access }, /* access */
103 { &vop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */
104 { &vop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */
105 { &vop_read_desc, (VOPFUNC)spec_read }, /* read */
106 { &vop_write_desc, (VOPFUNC)spec_write }, /* write */
107 { &vop_lease_desc, (VOPFUNC)nop_lease }, /* lease */
108 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
109 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
110 { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
111 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
112 { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */
113 { &vop_seek_desc, (VOPFUNC)err_seek }, /* seek */
114 { &vop_remove_desc, (VOPFUNC)err_remove }, /* remove */
115 { &vop_link_desc, (VOPFUNC)err_link }, /* link */
116 { &vop_rename_desc, (VOPFUNC)err_rename }, /* rename */
117 { &vop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */
118 { &vop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */
119 { &vop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */
120 { &vop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */
121 { &vop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */
122 { &vop_abortop_desc, (VOPFUNC)err_abortop }, /* abortop */
123 { &vop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */
124 { &vop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */
125 { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */
126 { &vop_unlock_desc, (VOPFUNC)nop_unlock }, /* unlock */
127 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
128 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
129 { &vop_print_desc, (VOPFUNC)spec_print }, /* print */
130 { &vop_islocked_desc, (VOPFUNC)nop_islocked }, /* islocked */
131 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
132 { &vop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
133 { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff }, /* blkatoff */
134 { &vop_valloc_desc, (VOPFUNC)err_valloc }, /* valloc */
135 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
136 { &vop_truncate_desc, (VOPFUNC)nop_truncate }, /* truncate */
137 { &vop_update_desc, (VOPFUNC)nop_update }, /* update */
138 { &vop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */
139 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
140 { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */
141 { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */
142 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */
143 { &vop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */
144 { &vop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */
145 { &vop_cmap_desc, (VOPFUNC)spec_cmap }, /* cmap */
146 { (struct vnodeop_desc*)NULL, (int(*)())NULL }
147 };
148 struct vnodeopv_desc spec_vnodeop_opv_desc =
149 { &spec_vnodeop_p, spec_vnodeop_entries };
150
151 /*
152 * Trivial lookup routine that always fails.
153 */
154 int
155 spec_lookup(ap)
156 struct vop_lookup_args /* {
157 struct vnode *a_dvp;
158 struct vnode **a_vpp;
159 struct componentname *a_cnp;
160 } */ *ap;
161 {
162
163 *ap->a_vpp = NULL;
164 return (ENOTDIR);
165 }
166
167 void
168 set_blocksize(struct vnode *vp, dev_t dev)
169 {
170 int (*size)();
171 int rsize;
172
173 if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) {
174 rsize = (*size)(dev);
175 if (rsize <= 0) /* did size fail? */
176 vp->v_specsize = DEV_BSIZE;
177 else
178 vp->v_specsize = rsize;
179 }
180 else
181 vp->v_specsize = DEV_BSIZE;
182 }
183
184 void
185 set_fsblocksize(struct vnode *vp)
186 {
187
188 if (vp->v_type == VBLK) {
189 dev_t dev = (dev_t)vp->v_rdev;
190 int maj = major(dev);
191
192 if ((u_int)maj >= nblkdev)
193 return;
194
195 set_blocksize(vp, dev);
196 }
197
198 }
199
200
201 /*
202 * Open a special file.
203 */
204 /* ARGSUSED */
205 spec_open(ap)
206 struct vop_open_args /* {
207 struct vnode *a_vp;
208 int a_mode;
209 struct ucred *a_cred;
210 struct proc *a_p;
211 } */ *ap;
212 {
213 struct proc *p = ap->a_p;
214 struct vnode *bvp, *vp = ap->a_vp;
215 dev_t bdev, dev = (dev_t)vp->v_rdev;
216 int maj = major(dev);
217 int error;
218
219 /*
220 * Don't allow open if fs is mounted -nodev.
221 */
222 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
223 return (ENXIO);
224
225 switch (vp->v_type) {
226
227 case VCHR:
228 if ((u_int)maj >= nchrdev)
229 return (ENXIO);
230 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
231 /*
232 * When running in very secure mode, do not allow
233 * opens for writing of any disk character devices.
234 */
235 if (securelevel >= 2 && isdisk(dev, VCHR))
236 return (EPERM);
237 /*
238 * When running in secure mode, do not allow opens
239 * for writing of /dev/mem, /dev/kmem, or character
240 * devices whose corresponding block devices are
241 * currently mounted.
242 */
243 if (securelevel >= 1) {
244 if ((bdev = chrtoblk(dev)) != NODEV &&
245 vfinddev(bdev, VBLK, &bvp) &&
246 bvp->v_usecount > 0 &&
247 (error = vfs_mountedon(bvp)))
248 return (error);
249 if (iskmemdev(dev))
250 return (EPERM);
251 }
252 }
253 if (cdevsw[maj].d_type == D_TTY)
254 vp->v_flag |= VISTTY;
255 VOP_UNLOCK(vp, 0, p);
256 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
257 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
258 return (error);
259
260 case VBLK:
261 if ((u_int)maj >= nblkdev)
262 return (ENXIO);
263 /*
264 * When running in very secure mode, do not allow
265 * opens for writing of any disk block devices.
266 */
267 if (securelevel >= 2 && ap->a_cred != FSCRED &&
268 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
269 return (EPERM);
270 /*
271 * Do not allow opens of block devices that are
272 * currently mounted.
273 */
274 if (error = vfs_mountedon(vp))
275 return (error);
276 error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
277 if (!error) {
278 u_int64_t blkcnt;
279 u_int32_t blksize;
280
281 set_blocksize(vp, dev);
282
283 /*
284 * Cache the size in bytes of the block device for later
285 * use by spec_write().
286 */
287 vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */
288 if (!VOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, NOCRED, p)) {
289 /* Switch to 512 byte sectors (temporarily) */
290 u_int32_t size512 = 512;
291
292 if (!VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, NOCRED, p)) {
293 /* Get the number of 512 byte physical blocks. */
294 if (!VOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, NOCRED, p)) {
295 vp->v_specdevsize = blkcnt * (u_int64_t)size512;
296 }
297 }
298 /* If it doesn't set back, we can't recover */
299 if (VOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, NOCRED, p))
300 error = ENXIO;
301 }
302 }
303 return(error);
304 }
305 return (0);
306 }
307
308 /*
309 * Vnode op for read
310 */
311 /* ARGSUSED */
312 spec_read(ap)
313 struct vop_read_args /* {
314 struct vnode *a_vp;
315 struct uio *a_uio;
316 int a_ioflag;
317 struct ucred *a_cred;
318 } */ *ap;
319 {
320 register struct vnode *vp = ap->a_vp;
321 register struct uio *uio = ap->a_uio;
322 struct proc *p = uio->uio_procp;
323 struct buf *bp;
324 daddr_t bn, nextbn;
325 long bsize, bscale;
326 int devBlockSize=0;
327 int n, on, majordev, (*ioctl)();
328 int error = 0;
329 dev_t dev;
330
331 #if DIAGNOSTIC
332 if (uio->uio_rw != UIO_READ)
333 panic("spec_read mode");
334 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc())
335 panic("spec_read proc");
336 #endif
337 if (uio->uio_resid == 0)
338 return (0);
339
340 switch (vp->v_type) {
341
342 case VCHR:
343 VOP_UNLOCK(vp, 0, p);
344 error = (*cdevsw[major(vp->v_rdev)].d_read)
345 (vp->v_rdev, uio, ap->a_ioflag);
346 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
347 return (error);
348
349 case VBLK:
350 if (uio->uio_offset < 0)
351 return (EINVAL);
352
353 dev = vp->v_rdev;
354
355 devBlockSize = vp->v_specsize;
356
357 if (devBlockSize > PAGE_SIZE)
358 return (EINVAL);
359
360 bscale = PAGE_SIZE / devBlockSize;
361 bsize = bscale * devBlockSize;
362
363 do {
364 on = uio->uio_offset % bsize;
365
366 bn = (uio->uio_offset / devBlockSize) &~ (bscale - 1);
367
368 if (vp->v_lastr + bscale == bn) {
369 nextbn = bn + bscale;
370 error = breadn(vp, bn, (int)bsize, &nextbn,
371 (int *)&bsize, 1, NOCRED, &bp);
372 } else
373 error = bread(vp, bn, (int)bsize, NOCRED, &bp);
374
375 vp->v_lastr = bn;
376 n = bsize - bp->b_resid;
377 if ((on > n) || error) {
378 if (!error)
379 error = EINVAL;
380 brelse(bp);
381 return (error);
382 }
383 n = min((unsigned)(n - on), uio->uio_resid);
384
385 error = uiomove((char *)bp->b_data + on, n, uio);
386 if (n + on == bsize)
387 bp->b_flags |= B_AGE;
388 brelse(bp);
389 } while (error == 0 && uio->uio_resid > 0 && n != 0);
390 return (error);
391
392 default:
393 panic("spec_read type");
394 }
395 /* NOTREACHED */
396 }
397
398 /*
399 * Vnode op for write
400 */
401 /* ARGSUSED */
402 spec_write(ap)
403 struct vop_write_args /* {
404 struct vnode *a_vp;
405 struct uio *a_uio;
406 int a_ioflag;
407 struct ucred *a_cred;
408 } */ *ap;
409 {
410 register struct vnode *vp = ap->a_vp;
411 register struct uio *uio = ap->a_uio;
412 struct proc *p = uio->uio_procp;
413 struct buf *bp;
414 daddr_t bn;
415 int bsize, blkmask, bscale;
416 register int io_sync;
417 register int io_size;
418 int devBlockSize=0;
419 register int n, on;
420 int error = 0;
421 dev_t dev;
422
423 #if DIAGNOSTIC
424 if (uio->uio_rw != UIO_WRITE)
425 panic("spec_write mode");
426 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc())
427 panic("spec_write proc");
428 #endif
429
430 switch (vp->v_type) {
431
432 case VCHR:
433 VOP_UNLOCK(vp, 0, p);
434 error = (*cdevsw[major(vp->v_rdev)].d_write)
435 (vp->v_rdev, uio, ap->a_ioflag);
436 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
437 return (error);
438
439 case VBLK:
440 if (uio->uio_resid == 0)
441 return (0);
442 if (uio->uio_offset < 0)
443 return (EINVAL);
444
445 io_sync = (ap->a_ioflag & IO_SYNC);
446 io_size = uio->uio_resid;
447
448 dev = (vp->v_rdev);
449
450 devBlockSize = vp->v_specsize;
451 if (devBlockSize > PAGE_SIZE)
452 return(EINVAL);
453
454 bscale = PAGE_SIZE / devBlockSize;
455 blkmask = bscale - 1;
456 bsize = bscale * devBlockSize;
457
458
459 do {
460 bn = (uio->uio_offset / devBlockSize) &~ blkmask;
461 on = uio->uio_offset % bsize;
462
463 n = min((unsigned)(bsize - on), uio->uio_resid);
464
465 /*
466 * Use getblk() as an optimization IFF:
467 *
468 * 1) We are reading exactly a block on a block
469 * aligned boundary
470 * 2) We know the size of the device from spec_open
471 * 3) The read doesn't span the end of the device
472 *
473 * Otherwise, we fall back on bread().
474 */
475 if (n == bsize &&
476 vp->v_specdevsize != (u_int64_t)0 &&
477 (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) {
478 /* reduce the size of the read to what is there */
479 n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize;
480 }
481
482 if (n == bsize)
483 bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
484 else
485 error = bread(vp, bn, bsize, NOCRED, &bp);
486
487 /* Translate downstream error for upstream, if needed */
488 if (!error) {
489 error = bp->b_error;
490 if (!error && (bp->b_flags & B_ERROR) != 0) {
491 error = EIO;
492 }
493 }
494 if (error) {
495 brelse(bp);
496 return (error);
497 }
498 n = min(n, bsize - bp->b_resid);
499
500 error = uiomove((char *)bp->b_data + on, n, uio);
501
502 bp->b_flags |= B_AGE;
503
504 if (io_sync)
505 bwrite(bp);
506 else {
507 if ((n + on) == bsize)
508 bawrite(bp);
509 else
510 bdwrite(bp);
511 }
512 } while (error == 0 && uio->uio_resid > 0 && n != 0);
513 return (error);
514
515 default:
516 panic("spec_write type");
517 }
518 /* NOTREACHED */
519 }
520
521 /*
522 * Device ioctl operation.
523 */
524 /* ARGSUSED */
525 spec_ioctl(ap)
526 struct vop_ioctl_args /* {
527 struct vnode *a_vp;
528 int a_command;
529 caddr_t a_data;
530 int a_fflag;
531 struct ucred *a_cred;
532 struct proc *a_p;
533 } */ *ap;
534 {
535 dev_t dev = ap->a_vp->v_rdev;
536
537 switch (ap->a_vp->v_type) {
538
539 case VCHR:
540 return ((*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
541 ap->a_fflag, ap->a_p));
542
543 case VBLK:
544 if (ap->a_command == 0 && (int)ap->a_data == B_TAPE)
545 if (bdevsw[major(dev)].d_type == D_TAPE)
546 return (0);
547 else
548 return (1);
549 return ((*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
550 ap->a_fflag, ap->a_p));
551
552 default:
553 panic("spec_ioctl");
554 /* NOTREACHED */
555 }
556 }
557
558 /* ARGSUSED */
559 spec_select(ap)
560 struct vop_select_args /* {
561 struct vnode *a_vp;
562 int a_which;
563 int a_fflags;
564 struct ucred *a_cred;
565 void * a_wql;
566 struct proc *a_p;
567 } */ *ap;
568 {
569 register dev_t dev;
570
571 switch (ap->a_vp->v_type) {
572
573 default:
574 return (1); /* XXX */
575
576 case VCHR:
577 dev = ap->a_vp->v_rdev;
578 return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_wql, ap->a_p);
579 }
580 }
581 /*
582 * Synch buffers associated with a block device
583 */
584 /* ARGSUSED */
585 int
586 spec_fsync(ap)
587 struct vop_fsync_args /* {
588 struct vnode *a_vp;
589 struct ucred *a_cred;
590 int a_waitfor;
591 struct proc *a_p;
592 } */ *ap;
593 {
594 register struct vnode *vp = ap->a_vp;
595 register struct buf *bp;
596 struct buf *nbp;
597 int s;
598
599 if (vp->v_type == VCHR)
600 return (0);
601 /*
602 * Flush all dirty buffers associated with a block device.
603 */
604 loop:
605 s = splbio();
606 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
607 nbp = bp->b_vnbufs.le_next;
608 // XXXdbg - don't flush locked blocks. they may be journaled.
609 if ((bp->b_flags & B_BUSY) || (bp->b_flags & B_LOCKED))
610 continue;
611 if ((bp->b_flags & B_DELWRI) == 0)
612 panic("spec_fsync: not dirty");
613 bremfree(bp);
614 bp->b_flags |= B_BUSY;
615 splx(s);
616 bawrite(bp);
617 goto loop;
618 }
619 if (ap->a_waitfor == MNT_WAIT) {
620 while (vp->v_numoutput) {
621 vp->v_flag |= VBWAIT;
622 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spec_fsync", 0);
623 }
624 #if DIAGNOSTIC
625 if (vp->v_dirtyblkhd.lh_first) {
626 vprint("spec_fsync: dirty", vp);
627 splx(s);
628 goto loop;
629 }
630 #endif
631 }
632 splx(s);
633 return (0);
634 }
635
636 /*
637 * Just call the device strategy routine
638 */
639 spec_strategy(ap)
640 struct vop_strategy_args /* {
641 struct buf *a_bp;
642 } */ *ap;
643 {
644 struct buf *bp;
645 extern int hard_throttle_on_root;
646
647 bp = ap->a_bp;
648
649 if (kdebug_enable) {
650 int code = 0;
651
652 if (bp->b_flags & B_READ)
653 code |= DKIO_READ;
654 if (bp->b_flags & B_ASYNC)
655 code |= DKIO_ASYNC;
656
657 if (bp->b_flags & B_META)
658 code |= DKIO_META;
659 else if (bp->b_flags & (B_PGIN | B_PAGEOUT))
660 code |= DKIO_PAGING;
661
662 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
663 (unsigned int)bp, bp->b_dev, bp->b_blkno, bp->b_bcount, 0);
664 }
665 if ((bp->b_flags & B_PGIN) && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV))
666 hard_throttle_on_root = 1;
667
668 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
669 return (0);
670 }
671
672 /*
673 * This is a noop, simply returning what one has been given.
674 */
675 spec_bmap(ap)
676 struct vop_bmap_args /* {
677 struct vnode *a_vp;
678 daddr_t a_bn;
679 struct vnode **a_vpp;
680 daddr_t *a_bnp;
681 int *a_runp;
682 } */ *ap;
683 {
684
685 if (ap->a_vpp != NULL)
686 *ap->a_vpp = ap->a_vp;
687 if (ap->a_bnp != NULL)
688 *ap->a_bnp = ap->a_bn * (PAGE_SIZE / ap->a_vp->v_specsize);
689 if (ap->a_runp != NULL)
690 *ap->a_runp = (MAXPHYSIO / PAGE_SIZE) - 1;
691 return (0);
692 }
693
694 /*
695 * This is a noop, simply returning what one has been given.
696 */
697 spec_cmap(ap)
698 struct vop_cmap_args /* {
699 struct vnode *a_vp;
700 off_t a_offset;
701 size_t a_size;
702 daddr_t *a_bpn;
703 size_t *a_run;
704 void *a_poff;
705 } */ *ap;
706 {
707 return (EOPNOTSUPP);
708 }
709
710
711 /*
712 * Device close routine
713 */
714 /* ARGSUSED */
715 spec_close(ap)
716 struct vop_close_args /* {
717 struct vnode *a_vp;
718 int a_fflag;
719 struct ucred *a_cred;
720 struct proc *a_p;
721 } */ *ap;
722 {
723 register struct vnode *vp = ap->a_vp;
724 dev_t dev = vp->v_rdev;
725 int (*devclose) __P((dev_t, int, int, struct proc *));
726 int mode, error;
727
728 switch (vp->v_type) {
729
730 case VCHR:
731 /*
732 * Hack: a tty device that is a controlling terminal
733 * has a reference from the session structure.
734 * We cannot easily tell that a character device is
735 * a controlling terminal, unless it is the closing
736 * process' controlling terminal. In that case,
737 * if the reference count is 2 (this last descriptor
738 * plus the session), release the reference from the session.
739 */
740 if (vcount(vp) == 2 && ap->a_p &&
741 vp == ap->a_p->p_session->s_ttyvp) {
742 ap->a_p->p_session->s_ttyvp = NULL;
743 vrele(vp);
744 }
745 /*
746 * If the vnode is locked, then we are in the midst
747 * of forcably closing the device, otherwise we only
748 * close on last reference.
749 */
750 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
751 return (0);
752 devclose = cdevsw[major(dev)].d_close;
753 mode = S_IFCHR;
754 break;
755
756 case VBLK:
757 #ifdef DEVFS_IMPLEMENTS_LOCKING
758 /*
759 * On last close of a block device (that isn't mounted)
760 * we must invalidate any in core blocks, so that
761 * we can, for instance, change floppy disks.
762 */
763 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
764 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
765 VOP_UNLOCK(vp, 0, ap->a_p);
766 if (error)
767 return (error);
768 /*
769 * We do not want to really close the device if it
770 * is still in use unless we are trying to close it
771 * forcibly. Since every use (buffer, vnode, swap, cmap)
772 * holds a reference to the vnode, and because we mark
773 * any other vnodes that alias this device, when the
774 * sum of the reference counts on all the aliased
775 * vnodes descends to one, we are on last close.
776 */
777 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
778 return (0);
779 #else /* DEVFS_IMPLEMENTS_LOCKING */
780 /*
781 * We do not want to really close the device if it
782 * is still in use unless we are trying to close it
783 * forcibly. Since every use (buffer, vnode, swap, cmap)
784 * holds a reference to the vnode, and because we mark
785 * any other vnodes that alias this device, when the
786 * sum of the reference counts on all the aliased
787 * vnodes descends to one, we are on last close.
788 */
789 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
790 return (0);
791
792 /*
793 * On last close of a block device (that isn't mounted)
794 * we must invalidate any in core blocks, so that
795 * we can, for instance, change floppy disks.
796 */
797 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
798 if (error)
799 return (error);
800 #endif /* DEVFS_IMPLEMENTS_LOCKING */
801 devclose = bdevsw[major(dev)].d_close;
802 mode = S_IFBLK;
803 break;
804
805 default:
806 panic("spec_close: not special");
807 }
808
809 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
810 }
811
812 /*
813 * Print out the contents of a special device vnode.
814 */
815 spec_print(ap)
816 struct vop_print_args /* {
817 struct vnode *a_vp;
818 } */ *ap;
819 {
820
821 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
822 minor(ap->a_vp->v_rdev));
823 }
824
825 /*
826 * Return POSIX pathconf information applicable to special devices.
827 */
828 spec_pathconf(ap)
829 struct vop_pathconf_args /* {
830 struct vnode *a_vp;
831 int a_name;
832 int *a_retval;
833 } */ *ap;
834 {
835
836 switch (ap->a_name) {
837 case _PC_LINK_MAX:
838 *ap->a_retval = LINK_MAX;
839 return (0);
840 case _PC_MAX_CANON:
841 *ap->a_retval = MAX_CANON;
842 return (0);
843 case _PC_MAX_INPUT:
844 *ap->a_retval = MAX_INPUT;
845 return (0);
846 case _PC_PIPE_BUF:
847 *ap->a_retval = PIPE_BUF;
848 return (0);
849 case _PC_CHOWN_RESTRICTED:
850 *ap->a_retval = 1;
851 return (0);
852 case _PC_VDISABLE:
853 *ap->a_retval = _POSIX_VDISABLE;
854 return (0);
855 default:
856 return (EINVAL);
857 }
858 /* NOTREACHED */
859 }
860
861 int
862 spec_devblocksize(ap)
863 struct vop_devblocksize_args /* {
864 struct vnode *a_vp;
865 int *a_retval;
866 } */ *ap;
867 {
868 *ap->a_retval = (ap->a_vp->v_specsize);
869 return (0);
870 }
871
872 /*
873 * Special device failed operation
874 */
875 spec_ebadf()
876 {
877
878 return (EBADF);
879 }
880
881 /*
882 * Special device bad operation
883 */
884 spec_badop()
885 {
886
887 panic("spec_badop called");
888 /* NOTREACHED */
889 }
890
891 /* Blktooff derives file offset from logical block number */
892 int
893 spec_blktooff(ap)
894 struct vop_blktooff_args /* {
895 struct vnode *a_vp;
896 daddr_t a_lblkno;
897 off_t *a_offset;
898 } */ *ap;
899 {
900 register struct vnode *vp = ap->a_vp;
901
902 switch (vp->v_type) {
903 case VCHR:
904 *ap->a_offset = (off_t)-1; /* failure */
905 return (EOPNOTSUPP);
906
907 case VBLK:
908 printf("spec_blktooff: not implemented for VBLK\n");
909 *ap->a_offset = (off_t)-1; /* failure */
910 return (EOPNOTSUPP);
911
912 default:
913 panic("spec_blktooff type");
914 }
915 /* NOTREACHED */
916 }
917
918 /* Offtoblk derives logical block number from file offset */
919 int
920 spec_offtoblk(ap)
921 struct vop_offtoblk_args /* {
922 struct vnode *a_vp;
923 off_t a_offset;
924 daddr_t *a_lblkno;
925 } */ *ap;
926 {
927 register struct vnode *vp = ap->a_vp;
928
929 switch (vp->v_type) {
930 case VCHR:
931 *ap->a_lblkno = (daddr_t)-1; /* failure */
932 return (EOPNOTSUPP);
933
934 case VBLK:
935 printf("spec_offtoblk: not implemented for VBLK\n");
936 *ap->a_lblkno = (daddr_t)-1; /* failure */
937 return (EOPNOTSUPP);
938
939 default:
940 panic("spec_offtoblk type");
941 }
942 /* NOTREACHED */
943 }
Cache object: 5776d8f6d7fc8bfa3d21c66e43d480ea
|