FreeBSD/Linux Kernel Cross Reference
sys/dev/md/md.c
1 /*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD$
10 *
11 */
12
13 /*-
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 * The Regents of the University of California. All rights reserved.
21 *
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
24 * Science Department.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE.
49 *
50 * from: Utah Hdr: vn.c 1.13 94/04/02
51 *
52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54 */
55
56 #include "opt_geom.h"
57 #include "opt_md.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/fcntl.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/linker.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mdioctl.h>
70 #include <sys/mount.h>
71 #include <sys/mutex.h>
72 #include <sys/sx.h>
73 #include <sys/namei.h>
74 #include <sys/proc.h>
75 #include <sys/queue.h>
76 #include <sys/sched.h>
77 #include <sys/sf_buf.h>
78 #include <sys/sysctl.h>
79 #include <sys/vnode.h>
80
81 #include <geom/geom.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/uma.h>
89
90 #define MD_MODVER 1
91
92 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
93 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
94
95 #ifndef MD_NSECT
96 #define MD_NSECT (10000 * 2)
97 #endif
98
99 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
100 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
101
102 static int md_debug;
103 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
104
105 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
106 /*
107 * Preloaded image gets put here.
108 * Applications that patch the object with the image can determine
109 * the size looking at the start and end markers (strings),
110 * so we want them contiguous.
111 */
112 static struct {
113 u_char start[MD_ROOT_SIZE*1024];
114 u_char end[128];
115 } mfs_root = {
116 .start = "MFS Filesystem goes here",
117 .end = "MFS Filesystem had better STOP here",
118 };
119 #endif
120
121 static g_init_t g_md_init;
122 static g_fini_t g_md_fini;
123 static g_start_t g_md_start;
124 static g_access_t g_md_access;
125 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
126 struct g_consumer *cp __unused, struct g_provider *pp);
127
128 static int mdunits;
129 static struct cdev *status_dev = 0;
130 static struct sx md_sx;
131
132 static d_ioctl_t mdctlioctl;
133
134 static struct cdevsw mdctl_cdevsw = {
135 .d_version = D_VERSION,
136 .d_ioctl = mdctlioctl,
137 .d_name = MD_NAME,
138 };
139
140 struct g_class g_md_class = {
141 .name = "MD",
142 .version = G_VERSION,
143 .init = g_md_init,
144 .fini = g_md_fini,
145 .start = g_md_start,
146 .access = g_md_access,
147 .dumpconf = g_md_dumpconf,
148 };
149
150 DECLARE_GEOM_CLASS(g_md_class, g_md);
151
152
153 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
154
155 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
156 #define NMASK (NINDIR-1)
157 static int nshift;
158
159 struct indir {
160 uintptr_t *array;
161 u_int total;
162 u_int used;
163 u_int shift;
164 };
165
166 struct md_s {
167 int unit;
168 LIST_ENTRY(md_s) list;
169 struct bio_queue_head bio_queue;
170 struct mtx queue_mtx;
171 struct cdev *dev;
172 enum md_types type;
173 off_t mediasize;
174 unsigned sectorsize;
175 unsigned opencount;
176 unsigned fwheads;
177 unsigned fwsectors;
178 unsigned flags;
179 char name[20];
180 struct proc *procp;
181 struct g_geom *gp;
182 struct g_provider *pp;
183 int (*start)(struct md_s *sc, struct bio *bp);
184
185 /* MD_MALLOC related fields */
186 struct indir *indir;
187 uma_zone_t uma;
188
189 /* MD_PRELOAD related fields */
190 u_char *pl_ptr;
191 size_t pl_len;
192
193 /* MD_VNODE related fields */
194 struct vnode *vnode;
195 char file[PATH_MAX];
196 struct ucred *cred;
197
198 /* MD_SWAP related fields */
199 vm_object_t object;
200 };
201
202 static struct indir *
203 new_indir(u_int shift)
204 {
205 struct indir *ip;
206
207 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
208 if (ip == NULL)
209 return (NULL);
210 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
211 M_MDSECT, M_NOWAIT | M_ZERO);
212 if (ip->array == NULL) {
213 free(ip, M_MD);
214 return (NULL);
215 }
216 ip->total = NINDIR;
217 ip->shift = shift;
218 return (ip);
219 }
220
221 static void
222 del_indir(struct indir *ip)
223 {
224
225 free(ip->array, M_MDSECT);
226 free(ip, M_MD);
227 }
228
229 static void
230 destroy_indir(struct md_s *sc, struct indir *ip)
231 {
232 int i;
233
234 for (i = 0; i < NINDIR; i++) {
235 if (!ip->array[i])
236 continue;
237 if (ip->shift)
238 destroy_indir(sc, (struct indir*)(ip->array[i]));
239 else if (ip->array[i] > 255)
240 uma_zfree(sc->uma, (void *)(ip->array[i]));
241 }
242 del_indir(ip);
243 }
244
245 /*
246 * This function does the math and allocates the top level "indir" structure
247 * for a device of "size" sectors.
248 */
249
250 static struct indir *
251 dimension(off_t size)
252 {
253 off_t rcnt;
254 struct indir *ip;
255 int layer;
256
257 rcnt = size;
258 layer = 0;
259 while (rcnt > NINDIR) {
260 rcnt /= NINDIR;
261 layer++;
262 }
263
264 /*
265 * XXX: the top layer is probably not fully populated, so we allocate
266 * too much space for ip->array in here.
267 */
268 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
269 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
270 M_MDSECT, M_WAITOK | M_ZERO);
271 ip->total = NINDIR;
272 ip->shift = layer * nshift;
273 return (ip);
274 }
275
276 /*
277 * Read a given sector
278 */
279
280 static uintptr_t
281 s_read(struct indir *ip, off_t offset)
282 {
283 struct indir *cip;
284 int idx;
285 uintptr_t up;
286
287 if (md_debug > 1)
288 printf("s_read(%jd)\n", (intmax_t)offset);
289 up = 0;
290 for (cip = ip; cip != NULL;) {
291 if (cip->shift) {
292 idx = (offset >> cip->shift) & NMASK;
293 up = cip->array[idx];
294 cip = (struct indir *)up;
295 continue;
296 }
297 idx = offset & NMASK;
298 return (cip->array[idx]);
299 }
300 return (0);
301 }
302
303 /*
304 * Write a given sector, prune the tree if the value is 0
305 */
306
307 static int
308 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
309 {
310 struct indir *cip, *lip[10];
311 int idx, li;
312 uintptr_t up;
313
314 if (md_debug > 1)
315 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
316 up = 0;
317 li = 0;
318 cip = ip;
319 for (;;) {
320 lip[li++] = cip;
321 if (cip->shift) {
322 idx = (offset >> cip->shift) & NMASK;
323 up = cip->array[idx];
324 if (up != 0) {
325 cip = (struct indir *)up;
326 continue;
327 }
328 /* Allocate branch */
329 cip->array[idx] =
330 (uintptr_t)new_indir(cip->shift - nshift);
331 if (cip->array[idx] == 0)
332 return (ENOSPC);
333 cip->used++;
334 up = cip->array[idx];
335 cip = (struct indir *)up;
336 continue;
337 }
338 /* leafnode */
339 idx = offset & NMASK;
340 up = cip->array[idx];
341 if (up != 0)
342 cip->used--;
343 cip->array[idx] = ptr;
344 if (ptr != 0)
345 cip->used++;
346 break;
347 }
348 if (cip->used != 0 || li == 1)
349 return (0);
350 li--;
351 while (cip->used == 0 && cip != ip) {
352 li--;
353 idx = (offset >> lip[li]->shift) & NMASK;
354 up = lip[li]->array[idx];
355 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
356 del_indir(cip);
357 lip[li]->array[idx] = 0;
358 lip[li]->used--;
359 cip = lip[li];
360 }
361 return (0);
362 }
363
364
365 static int
366 g_md_access(struct g_provider *pp, int r, int w, int e)
367 {
368 struct md_s *sc;
369
370 sc = pp->geom->softc;
371 if (sc == NULL)
372 return (ENXIO);
373 r += pp->acr;
374 w += pp->acw;
375 e += pp->ace;
376 if ((sc->flags & MD_READONLY) != 0 && w > 0)
377 return (EROFS);
378 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
379 sc->opencount = 1;
380 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
381 sc->opencount = 0;
382 }
383 return (0);
384 }
385
386 static void
387 g_md_start(struct bio *bp)
388 {
389 struct md_s *sc;
390
391 sc = bp->bio_to->geom->softc;
392 mtx_lock(&sc->queue_mtx);
393 bioq_disksort(&sc->bio_queue, bp);
394 mtx_unlock(&sc->queue_mtx);
395 wakeup(sc);
396 }
397
398 static int
399 mdstart_malloc(struct md_s *sc, struct bio *bp)
400 {
401 int i, error;
402 u_char *dst;
403 off_t secno, nsec, uc;
404 uintptr_t sp, osp;
405
406 switch (bp->bio_cmd) {
407 case BIO_READ:
408 case BIO_WRITE:
409 case BIO_DELETE:
410 break;
411 default:
412 return (EOPNOTSUPP);
413 }
414
415 nsec = bp->bio_length / sc->sectorsize;
416 secno = bp->bio_offset / sc->sectorsize;
417 dst = bp->bio_data;
418 error = 0;
419 while (nsec--) {
420 osp = s_read(sc->indir, secno);
421 if (bp->bio_cmd == BIO_DELETE) {
422 if (osp != 0)
423 error = s_write(sc->indir, secno, 0);
424 } else if (bp->bio_cmd == BIO_READ) {
425 if (osp == 0)
426 bzero(dst, sc->sectorsize);
427 else if (osp <= 255)
428 for (i = 0; i < sc->sectorsize; i++)
429 dst[i] = osp;
430 else
431 bcopy((void *)osp, dst, sc->sectorsize);
432 osp = 0;
433 } else if (bp->bio_cmd == BIO_WRITE) {
434 if (sc->flags & MD_COMPRESS) {
435 uc = dst[0];
436 for (i = 1; i < sc->sectorsize; i++)
437 if (dst[i] != uc)
438 break;
439 } else {
440 i = 0;
441 uc = 0;
442 }
443 if (i == sc->sectorsize) {
444 if (osp != uc)
445 error = s_write(sc->indir, secno, uc);
446 } else {
447 if (osp <= 255) {
448 sp = (uintptr_t)uma_zalloc(sc->uma,
449 M_NOWAIT);
450 if (sp == 0) {
451 error = ENOSPC;
452 break;
453 }
454 bcopy(dst, (void *)sp, sc->sectorsize);
455 error = s_write(sc->indir, secno, sp);
456 } else {
457 bcopy(dst, (void *)osp, sc->sectorsize);
458 osp = 0;
459 }
460 }
461 } else {
462 error = EOPNOTSUPP;
463 }
464 if (osp > 255)
465 uma_zfree(sc->uma, (void*)osp);
466 if (error != 0)
467 break;
468 secno++;
469 dst += sc->sectorsize;
470 }
471 bp->bio_resid = 0;
472 return (error);
473 }
474
475 static int
476 mdstart_preload(struct md_s *sc, struct bio *bp)
477 {
478
479 switch (bp->bio_cmd) {
480 case BIO_READ:
481 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
482 bp->bio_length);
483 break;
484 case BIO_WRITE:
485 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
486 bp->bio_length);
487 break;
488 }
489 bp->bio_resid = 0;
490 return (0);
491 }
492
493 static int
494 mdstart_vnode(struct md_s *sc, struct bio *bp)
495 {
496 int error, vfslocked;
497 struct uio auio;
498 struct iovec aiov;
499 struct mount *mp;
500 struct vnode *vp;
501 struct thread *td;
502
503 switch (bp->bio_cmd) {
504 case BIO_READ:
505 case BIO_WRITE:
506 case BIO_FLUSH:
507 break;
508 default:
509 return (EOPNOTSUPP);
510 }
511
512 td = curthread;
513 vp = sc->vnode;
514
515 /*
516 * VNODE I/O
517 *
518 * If an error occurs, we set BIO_ERROR but we do not set
519 * B_INVAL because (for a write anyway), the buffer is
520 * still valid.
521 */
522
523 if (bp->bio_cmd == BIO_FLUSH) {
524 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
525 (void) vn_start_write(vp, &mp, V_WAIT);
526 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
527 error = VOP_FSYNC(vp, MNT_WAIT, td);
528 VOP_UNLOCK(vp, 0, td);
529 vn_finished_write(mp);
530 VFS_UNLOCK_GIANT(vfslocked);
531 return (error);
532 }
533
534 bzero(&auio, sizeof(auio));
535
536 aiov.iov_base = bp->bio_data;
537 aiov.iov_len = bp->bio_length;
538 auio.uio_iov = &aiov;
539 auio.uio_iovcnt = 1;
540 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
541 auio.uio_segflg = UIO_SYSSPACE;
542 if (bp->bio_cmd == BIO_READ)
543 auio.uio_rw = UIO_READ;
544 else if (bp->bio_cmd == BIO_WRITE)
545 auio.uio_rw = UIO_WRITE;
546 else
547 panic("wrong BIO_OP in mdstart_vnode");
548 auio.uio_resid = bp->bio_length;
549 auio.uio_td = td;
550 /*
551 * When reading set IO_DIRECT to try to avoid double-caching
552 * the data. When writing IO_DIRECT is not optimal.
553 */
554 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
555 if (bp->bio_cmd == BIO_READ) {
556 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
557 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
558 VOP_UNLOCK(vp, 0, td);
559 } else {
560 (void) vn_start_write(vp, &mp, V_WAIT);
561 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
562 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
563 sc->cred);
564 VOP_UNLOCK(vp, 0, td);
565 vn_finished_write(mp);
566 }
567 VFS_UNLOCK_GIANT(vfslocked);
568 bp->bio_resid = auio.uio_resid;
569 return (error);
570 }
571
572 static int
573 mdstart_swap(struct md_s *sc, struct bio *bp)
574 {
575 struct sf_buf *sf;
576 int rv, offs, len, lastend;
577 vm_pindex_t i, lastp;
578 vm_page_t m;
579 u_char *p;
580
581 switch (bp->bio_cmd) {
582 case BIO_READ:
583 case BIO_WRITE:
584 case BIO_DELETE:
585 break;
586 default:
587 return (EOPNOTSUPP);
588 }
589
590 p = bp->bio_data;
591
592 /*
593 * offs is the offset at which to start operating on the
594 * next (ie, first) page. lastp is the last page on
595 * which we're going to operate. lastend is the ending
596 * position within that last page (ie, PAGE_SIZE if
597 * we're operating on complete aligned pages).
598 */
599 offs = bp->bio_offset % PAGE_SIZE;
600 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
601 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
602
603 rv = VM_PAGER_OK;
604 VM_OBJECT_LOCK(sc->object);
605 vm_object_pip_add(sc->object, 1);
606 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
607 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
608
609 m = vm_page_grab(sc->object, i,
610 VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
611 VM_OBJECT_UNLOCK(sc->object);
612 sched_pin();
613 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
614 VM_OBJECT_LOCK(sc->object);
615 if (bp->bio_cmd == BIO_READ) {
616 if (m->valid != VM_PAGE_BITS_ALL)
617 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
618 if (rv == VM_PAGER_ERROR) {
619 sf_buf_free(sf);
620 sched_unpin();
621 vm_page_lock_queues();
622 vm_page_wakeup(m);
623 vm_page_unlock_queues();
624 break;
625 }
626 bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
627 } else if (bp->bio_cmd == BIO_WRITE) {
628 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
629 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
630 if (rv == VM_PAGER_ERROR) {
631 sf_buf_free(sf);
632 sched_unpin();
633 vm_page_lock_queues();
634 vm_page_wakeup(m);
635 vm_page_unlock_queues();
636 break;
637 }
638 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
639 m->valid = VM_PAGE_BITS_ALL;
640 #if 0
641 } else if (bp->bio_cmd == BIO_DELETE) {
642 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
643 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
644 if (rv == VM_PAGER_ERROR) {
645 sf_buf_free(sf);
646 sched_unpin();
647 vm_page_lock_queues();
648 vm_page_wakeup(m);
649 vm_page_unlock_queues();
650 break;
651 }
652 bzero((void *)(sf_buf_kva(sf) + offs), len);
653 vm_page_dirty(m);
654 m->valid = VM_PAGE_BITS_ALL;
655 #endif
656 }
657 sf_buf_free(sf);
658 sched_unpin();
659 vm_page_lock_queues();
660 vm_page_wakeup(m);
661 vm_page_activate(m);
662 if (bp->bio_cmd == BIO_WRITE)
663 vm_page_dirty(m);
664 vm_page_unlock_queues();
665
666 /* Actions on further pages start at offset 0 */
667 p += PAGE_SIZE - offs;
668 offs = 0;
669 #if 0
670 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
671 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
672 m->wire_count, m->busy,
673 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
674 #endif
675 }
676 vm_object_pip_subtract(sc->object, 1);
677 vm_object_set_writeable_dirty(sc->object);
678 VM_OBJECT_UNLOCK(sc->object);
679 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
680 }
681
682 static void
683 md_kthread(void *arg)
684 {
685 struct md_s *sc;
686 struct bio *bp;
687 int error;
688
689 sc = arg;
690 thread_lock(curthread);
691 sched_prio(curthread, PRIBIO);
692 thread_unlock(curthread);
693 if (sc->type == MD_VNODE)
694 curthread->td_pflags |= TDP_NORUNNINGBUF;
695
696 for (;;) {
697 mtx_lock(&sc->queue_mtx);
698 if (sc->flags & MD_SHUTDOWN) {
699 sc->flags |= MD_EXITING;
700 mtx_unlock(&sc->queue_mtx);
701 kthread_exit(0);
702 }
703 bp = bioq_takefirst(&sc->bio_queue);
704 if (!bp) {
705 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
706 continue;
707 }
708 mtx_unlock(&sc->queue_mtx);
709 if (bp->bio_cmd == BIO_GETATTR) {
710 if (sc->fwsectors && sc->fwheads &&
711 (g_handleattr_int(bp, "GEOM::fwsectors",
712 sc->fwsectors) ||
713 g_handleattr_int(bp, "GEOM::fwheads",
714 sc->fwheads)))
715 error = -1;
716 else
717 error = EOPNOTSUPP;
718 } else {
719 error = sc->start(sc, bp);
720 }
721
722 if (error != -1) {
723 bp->bio_completed = bp->bio_length;
724 g_io_deliver(bp, error);
725 }
726 }
727 }
728
729 static struct md_s *
730 mdfind(int unit)
731 {
732 struct md_s *sc;
733
734 LIST_FOREACH(sc, &md_softc_list, list) {
735 if (sc->unit == unit)
736 break;
737 }
738 return (sc);
739 }
740
741 static struct md_s *
742 mdnew(int unit, int *errp, enum md_types type)
743 {
744 struct md_s *sc, *sc2;
745 int error, max = -1;
746
747 *errp = 0;
748 LIST_FOREACH(sc2, &md_softc_list, list) {
749 if (unit == sc2->unit) {
750 *errp = EBUSY;
751 return (NULL);
752 }
753 if (unit == -1 && sc2->unit > max)
754 max = sc2->unit;
755 }
756 if (unit == -1)
757 unit = max + 1;
758 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
759 sc->type = type;
760 bioq_init(&sc->bio_queue);
761 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
762 sc->unit = unit;
763 sprintf(sc->name, "md%d", unit);
764 LIST_INSERT_HEAD(&md_softc_list, sc, list);
765 error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
766 if (error == 0)
767 return (sc);
768 LIST_REMOVE(sc, list);
769 mtx_destroy(&sc->queue_mtx);
770 free(sc, M_MD);
771 *errp = error;
772 return (NULL);
773 }
774
775 static void
776 mdinit(struct md_s *sc)
777 {
778
779 struct g_geom *gp;
780 struct g_provider *pp;
781
782 g_topology_lock();
783 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
784 gp->softc = sc;
785 pp = g_new_providerf(gp, "md%d", sc->unit);
786 pp->mediasize = sc->mediasize;
787 pp->sectorsize = sc->sectorsize;
788 sc->gp = gp;
789 sc->pp = pp;
790 g_error_provider(pp, 0);
791 g_topology_unlock();
792 }
793
794 /*
795 * XXX: we should check that the range they feed us is mapped.
796 * XXX: we should implement read-only.
797 */
798
799 static int
800 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
801 {
802
803 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
804 return (EINVAL);
805 sc->flags = mdio->md_options & MD_FORCE;
806 /* Cast to pointer size, then to pointer to avoid warning */
807 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
808 sc->pl_len = (size_t)sc->mediasize;
809 return (0);
810 }
811
812
813 static int
814 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
815 {
816 uintptr_t sp;
817 int error;
818 off_t u;
819
820 error = 0;
821 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
822 return (EINVAL);
823 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
824 return (EINVAL);
825 /* Compression doesn't make sense if we have reserved space */
826 if (mdio->md_options & MD_RESERVE)
827 mdio->md_options &= ~MD_COMPRESS;
828 if (mdio->md_fwsectors != 0)
829 sc->fwsectors = mdio->md_fwsectors;
830 if (mdio->md_fwheads != 0)
831 sc->fwheads = mdio->md_fwheads;
832 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
833 sc->indir = dimension(sc->mediasize / sc->sectorsize);
834 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
835 0x1ff, 0);
836 if (mdio->md_options & MD_RESERVE) {
837 off_t nsectors;
838
839 nsectors = sc->mediasize / sc->sectorsize;
840 for (u = 0; u < nsectors; u++) {
841 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
842 if (sp != 0)
843 error = s_write(sc->indir, u, sp);
844 else
845 error = ENOMEM;
846 if (error != 0)
847 break;
848 }
849 }
850 return (error);
851 }
852
853
854 static int
855 mdsetcred(struct md_s *sc, struct ucred *cred)
856 {
857 char *tmpbuf;
858 int error = 0;
859
860 /*
861 * Set credits in our softc
862 */
863
864 if (sc->cred)
865 crfree(sc->cred);
866 sc->cred = crhold(cred);
867
868 /*
869 * Horrible kludge to establish credentials for NFS XXX.
870 */
871
872 if (sc->vnode) {
873 struct uio auio;
874 struct iovec aiov;
875
876 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
877 bzero(&auio, sizeof(auio));
878
879 aiov.iov_base = tmpbuf;
880 aiov.iov_len = sc->sectorsize;
881 auio.uio_iov = &aiov;
882 auio.uio_iovcnt = 1;
883 auio.uio_offset = 0;
884 auio.uio_rw = UIO_READ;
885 auio.uio_segflg = UIO_SYSSPACE;
886 auio.uio_resid = aiov.iov_len;
887 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
888 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
889 VOP_UNLOCK(sc->vnode, 0, curthread);
890 free(tmpbuf, M_TEMP);
891 }
892 return (error);
893 }
894
895 static int
896 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
897 {
898 struct vattr vattr;
899 struct nameidata nd;
900 int error, flags, vfslocked;
901
902 error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL);
903 if (error != 0)
904 return (error);
905 flags = FREAD|FWRITE;
906 /*
907 * If the user specified that this is a read only device, unset the
908 * FWRITE mask before trying to open the backing store.
909 */
910 if ((mdio->md_options & MD_READONLY) != 0)
911 flags &= ~FWRITE;
912 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
913 error = vn_open(&nd, &flags, 0, NULL);
914 if (error != 0)
915 return (error);
916 vfslocked = NDHASGIANT(&nd);
917 NDFREE(&nd, NDF_ONLY_PNBUF);
918 if (nd.ni_vp->v_type != VREG) {
919 error = EINVAL;
920 goto bad;
921 }
922 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td);
923 if (error != 0)
924 goto bad;
925 if (VOP_ISLOCKED(nd.ni_vp, td) != LK_EXCLUSIVE) {
926 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY, td);
927 if (nd.ni_vp->v_iflag & VI_DOOMED) {
928 /* Forced unmount. */
929 error = EBADF;
930 goto bad;
931 }
932 }
933 nd.ni_vp->v_vflag |= VV_MD;
934 VOP_UNLOCK(nd.ni_vp, 0, td);
935
936 if (mdio->md_fwsectors != 0)
937 sc->fwsectors = mdio->md_fwsectors;
938 if (mdio->md_fwheads != 0)
939 sc->fwheads = mdio->md_fwheads;
940 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
941 if (!(flags & FWRITE))
942 sc->flags |= MD_READONLY;
943 sc->vnode = nd.ni_vp;
944
945 error = mdsetcred(sc, td->td_ucred);
946 if (error != 0) {
947 sc->vnode = NULL;
948 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY, td);
949 nd.ni_vp->v_vflag &= ~VV_MD;
950 goto bad;
951 }
952 VFS_UNLOCK_GIANT(vfslocked);
953 return (0);
954 bad:
955 VOP_UNLOCK(nd.ni_vp, 0, td);
956 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
957 VFS_UNLOCK_GIANT(vfslocked);
958 return (error);
959 }
960
961 static int
962 mddestroy(struct md_s *sc, struct thread *td)
963 {
964 int vfslocked;
965
966 if (sc->gp) {
967 sc->gp->softc = NULL;
968 g_topology_lock();
969 g_wither_geom(sc->gp, ENXIO);
970 g_topology_unlock();
971 sc->gp = NULL;
972 sc->pp = NULL;
973 }
974 mtx_lock(&sc->queue_mtx);
975 sc->flags |= MD_SHUTDOWN;
976 wakeup(sc);
977 while (!(sc->flags & MD_EXITING))
978 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
979 mtx_unlock(&sc->queue_mtx);
980 mtx_destroy(&sc->queue_mtx);
981 if (sc->vnode != NULL) {
982 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
983 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, td);
984 sc->vnode->v_vflag &= ~VV_MD;
985 VOP_UNLOCK(sc->vnode, 0, td);
986 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
987 FREAD : (FREAD|FWRITE), sc->cred, td);
988 VFS_UNLOCK_GIANT(vfslocked);
989 }
990 if (sc->cred != NULL)
991 crfree(sc->cred);
992 if (sc->object != NULL)
993 vm_object_deallocate(sc->object);
994 if (sc->indir)
995 destroy_indir(sc, sc->indir);
996 if (sc->uma)
997 uma_zdestroy(sc->uma);
998
999 LIST_REMOVE(sc, list);
1000 free(sc, M_MD);
1001 return (0);
1002 }
1003
1004 static int
1005 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1006 {
1007 vm_ooffset_t npage;
1008 int error;
1009
1010 /*
1011 * Range check. Disallow negative sizes or any size less then the
1012 * size of a page. Then round to a page.
1013 */
1014 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1015 return (EDOM);
1016
1017 /*
1018 * Allocate an OBJT_SWAP object.
1019 *
1020 * Note the truncation.
1021 */
1022
1023 npage = mdio->md_mediasize / PAGE_SIZE;
1024 if (mdio->md_fwsectors != 0)
1025 sc->fwsectors = mdio->md_fwsectors;
1026 if (mdio->md_fwheads != 0)
1027 sc->fwheads = mdio->md_fwheads;
1028 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1029 VM_PROT_DEFAULT, 0);
1030 if (sc->object == NULL)
1031 return (ENOMEM);
1032 sc->flags = mdio->md_options & MD_FORCE;
1033 if (mdio->md_options & MD_RESERVE) {
1034 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1035 vm_object_deallocate(sc->object);
1036 sc->object = NULL;
1037 return (EDOM);
1038 }
1039 }
1040 error = mdsetcred(sc, td->td_ucred);
1041 if (error != 0) {
1042 vm_object_deallocate(sc->object);
1043 sc->object = NULL;
1044 }
1045 return (error);
1046 }
1047
1048
1049 static int
1050 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1051 {
1052 struct md_ioctl *mdio;
1053 struct md_s *sc;
1054 int error, i;
1055
1056 if (md_debug)
1057 printf("mdctlioctl(%s %lx %p %x %p)\n",
1058 devtoname(dev), cmd, addr, flags, td);
1059
1060 mdio = (struct md_ioctl *)addr;
1061 if (mdio->md_version != MDIOVERSION)
1062 return (EINVAL);
1063
1064 /*
1065 * We assert the version number in the individual ioctl
1066 * handlers instead of out here because (a) it is possible we
1067 * may add another ioctl in the future which doesn't read an
1068 * mdio, and (b) the correct return value for an unknown ioctl
1069 * is ENOIOCTL, not EINVAL.
1070 */
1071 error = 0;
1072 switch (cmd) {
1073 case MDIOCATTACH:
1074 switch (mdio->md_type) {
1075 case MD_MALLOC:
1076 case MD_PRELOAD:
1077 case MD_VNODE:
1078 case MD_SWAP:
1079 break;
1080 default:
1081 return (EINVAL);
1082 }
1083 if (mdio->md_options & MD_AUTOUNIT)
1084 sc = mdnew(-1, &error, mdio->md_type);
1085 else
1086 sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1087 if (sc == NULL)
1088 return (error);
1089 if (mdio->md_options & MD_AUTOUNIT)
1090 mdio->md_unit = sc->unit;
1091 sc->mediasize = mdio->md_mediasize;
1092 if (mdio->md_sectorsize == 0)
1093 sc->sectorsize = DEV_BSIZE;
1094 else
1095 sc->sectorsize = mdio->md_sectorsize;
1096 error = EDOOFUS;
1097 switch (sc->type) {
1098 case MD_MALLOC:
1099 sc->start = mdstart_malloc;
1100 error = mdcreate_malloc(sc, mdio);
1101 break;
1102 case MD_PRELOAD:
1103 sc->start = mdstart_preload;
1104 error = mdcreate_preload(sc, mdio);
1105 break;
1106 case MD_VNODE:
1107 sc->start = mdstart_vnode;
1108 error = mdcreate_vnode(sc, mdio, td);
1109 break;
1110 case MD_SWAP:
1111 sc->start = mdstart_swap;
1112 error = mdcreate_swap(sc, mdio, td);
1113 break;
1114 }
1115 if (error != 0) {
1116 mddestroy(sc, td);
1117 return (error);
1118 }
1119
1120 /* Prune off any residual fractional sector */
1121 i = sc->mediasize % sc->sectorsize;
1122 sc->mediasize -= i;
1123
1124 mdinit(sc);
1125 return (0);
1126 case MDIOCDETACH:
1127 if (mdio->md_mediasize != 0 || mdio->md_options != 0)
1128 return (EINVAL);
1129
1130 sc = mdfind(mdio->md_unit);
1131 if (sc == NULL)
1132 return (ENOENT);
1133 if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1134 return (EBUSY);
1135 return (mddestroy(sc, td));
1136 case MDIOCQUERY:
1137 sc = mdfind(mdio->md_unit);
1138 if (sc == NULL)
1139 return (ENOENT);
1140 mdio->md_type = sc->type;
1141 mdio->md_options = sc->flags;
1142 mdio->md_mediasize = sc->mediasize;
1143 mdio->md_sectorsize = sc->sectorsize;
1144 if (sc->type == MD_VNODE)
1145 error = copyout(sc->file, mdio->md_file,
1146 strlen(sc->file) + 1);
1147 return (error);
1148 case MDIOCLIST:
1149 i = 1;
1150 LIST_FOREACH(sc, &md_softc_list, list) {
1151 if (i == MDNPAD - 1)
1152 mdio->md_pad[i] = -1;
1153 else
1154 mdio->md_pad[i++] = sc->unit;
1155 }
1156 mdio->md_pad[0] = i - 1;
1157 return (0);
1158 default:
1159 return (ENOIOCTL);
1160 };
1161 }
1162
1163 static int
1164 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1165 {
1166 int error;
1167
1168 sx_xlock(&md_sx);
1169 error = xmdctlioctl(dev, cmd, addr, flags, td);
1170 sx_xunlock(&md_sx);
1171 return (error);
1172 }
1173
1174 static void
1175 md_preloaded(u_char *image, size_t length)
1176 {
1177 struct md_s *sc;
1178 int error;
1179
1180 sc = mdnew(-1, &error, MD_PRELOAD);
1181 if (sc == NULL)
1182 return;
1183 sc->mediasize = length;
1184 sc->sectorsize = DEV_BSIZE;
1185 sc->pl_ptr = image;
1186 sc->pl_len = length;
1187 sc->start = mdstart_preload;
1188 #ifdef MD_ROOT
1189 if (sc->unit == 0)
1190 rootdevnames[0] = "ufs:/dev/md0";
1191 #endif
1192 mdinit(sc);
1193 }
1194
1195 static void
1196 g_md_init(struct g_class *mp __unused)
1197 {
1198
1199 caddr_t mod;
1200 caddr_t c;
1201 u_char *ptr, *name, *type;
1202 unsigned len;
1203 int i;
1204
1205 /* figure out log2(NINDIR) */
1206 for (i = NINDIR, nshift = -1; i; nshift++)
1207 i >>= 1;
1208
1209 mod = NULL;
1210 sx_init(&md_sx, "MD config lock");
1211 g_topology_unlock();
1212 #ifdef MD_ROOT_SIZE
1213 sx_xlock(&md_sx);
1214 md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1215 sx_xunlock(&md_sx);
1216 #endif
1217 /* XXX: are preload_* static or do they need Giant ? */
1218 while ((mod = preload_search_next_name(mod)) != NULL) {
1219 name = (char *)preload_search_info(mod, MODINFO_NAME);
1220 if (name == NULL)
1221 continue;
1222 type = (char *)preload_search_info(mod, MODINFO_TYPE);
1223 if (type == NULL)
1224 continue;
1225 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1226 continue;
1227 c = preload_search_info(mod, MODINFO_ADDR);
1228 ptr = *(u_char **)c;
1229 c = preload_search_info(mod, MODINFO_SIZE);
1230 len = *(size_t *)c;
1231 printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1232 MD_NAME, mdunits, name, len, ptr);
1233 sx_xlock(&md_sx);
1234 md_preloaded(ptr, len);
1235 sx_xunlock(&md_sx);
1236 }
1237 status_dev = make_dev(&mdctl_cdevsw, MAXMINOR, UID_ROOT, GID_WHEEL,
1238 0600, MDCTL_NAME);
1239 g_topology_lock();
1240 }
1241
1242 static void
1243 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1244 struct g_consumer *cp __unused, struct g_provider *pp)
1245 {
1246 struct md_s *mp;
1247 char *type;
1248
1249 mp = gp->softc;
1250 if (mp == NULL)
1251 return;
1252
1253 switch (mp->type) {
1254 case MD_MALLOC:
1255 type = "malloc";
1256 break;
1257 case MD_PRELOAD:
1258 type = "preload";
1259 break;
1260 case MD_VNODE:
1261 type = "vnode";
1262 break;
1263 case MD_SWAP:
1264 type = "swap";
1265 break;
1266 default:
1267 type = "unknown";
1268 break;
1269 }
1270
1271 if (pp != NULL) {
1272 if (indent == NULL) {
1273 sbuf_printf(sb, " u %d", mp->unit);
1274 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1275 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1276 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1277 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1278 sbuf_printf(sb, " t %s", type);
1279 if (mp->type == MD_VNODE && mp->vnode != NULL)
1280 sbuf_printf(sb, " file %s", mp->file);
1281 } else {
1282 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1283 mp->unit);
1284 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1285 indent, (uintmax_t) mp->sectorsize);
1286 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1287 indent, (uintmax_t) mp->fwheads);
1288 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1289 indent, (uintmax_t) mp->fwsectors);
1290 sbuf_printf(sb, "%s<length>%ju</length>\n",
1291 indent, (uintmax_t) mp->mediasize);
1292 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1293 type);
1294 if (mp->type == MD_VNODE && mp->vnode != NULL)
1295 sbuf_printf(sb, "%s<file>%s</file>\n",
1296 indent, mp->file);
1297 }
1298 }
1299 }
1300
1301 static void
1302 g_md_fini(struct g_class *mp __unused)
1303 {
1304
1305 sx_destroy(&md_sx);
1306 if (status_dev != NULL)
1307 destroy_dev(status_dev);
1308 }
Cache object: ba44c7c162c629f8c641bf365f4e4b91
|