FreeBSD/Linux Kernel Cross Reference
sys/dev/dksubr.c
1 /* $NetBSD: dksubr.c,v 1.113 2021/04/15 00:32:50 rin Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.113 2021/04/15 00:32:50 rin Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/stat.h>
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/device.h>
41 #include <sys/disk.h>
42 #include <sys/disklabel.h>
43 #include <sys/buf.h>
44 #include <sys/bufq.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/namei.h>
48 #include <sys/module.h>
49 #include <sys/syslog.h>
50
51 #include <dev/dkvar.h>
52 #include <miscfs/specfs/specdev.h> /* for v_rdev */
53
54 int dkdebug = 0;
55
56 #ifdef DEBUG
57 #define DKDB_FOLLOW 0x1
58 #define DKDB_INIT 0x2
59 #define DKDB_VNODE 0x4
60 #define DKDB_DUMP 0x8
61
62 #define IFDEBUG(x,y) if (dkdebug & (x)) y
63 #define DPRINTF(x,y) IFDEBUG(x, printf y)
64 #define DPRINTF_FOLLOW(y) DPRINTF(DKDB_FOLLOW, y)
65 #else
66 #define IFDEBUG(x,y)
67 #define DPRINTF(x,y)
68 #define DPRINTF_FOLLOW(y)
69 #endif
70
71 #define DKF_READYFORDUMP (DKF_INITED|DKF_TAKEDUMP)
72
73 static int dk_subr_modcmd(modcmd_t, void *);
74
75 #define DKLABELDEV(dev) \
76 (MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77
78 static void dk_makedisklabel(struct dk_softc *);
79 static int dk_translate(struct dk_softc *, struct buf *);
80 static void dk_done1(struct dk_softc *, struct buf *, bool);
81
82 void
83 dk_init(struct dk_softc *dksc, device_t dev, int dtype)
84 {
85
86 memset(dksc, 0x0, sizeof(*dksc));
87 dksc->sc_dtype = dtype;
88 dksc->sc_dev = dev;
89
90 strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
91 dksc->sc_dkdev.dk_name = dksc->sc_xname;
92 }
93
94 void
95 dk_attach(struct dk_softc *dksc)
96 {
97 KASSERT(dksc->sc_dev != NULL);
98
99 mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
100 dksc->sc_flags |= DKF_READYFORDUMP;
101 #ifdef DIAGNOSTIC
102 dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
103 #endif
104
105 if ((dksc->sc_flags & DKF_NO_RND) == 0) {
106 /* Attach the device into the rnd source list. */
107 rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
108 RND_TYPE_DISK, RND_FLAG_DEFAULT);
109 }
110 }
111
112 void
113 dk_detach(struct dk_softc *dksc)
114 {
115 if ((dksc->sc_flags & DKF_NO_RND) == 0) {
116 /* Unhook the entropy source. */
117 rnd_detach_source(&dksc->sc_rnd_source);
118 }
119
120 dksc->sc_flags &= ~DKF_READYFORDUMP;
121 mutex_destroy(&dksc->sc_iolock);
122 }
123
124 /* ARGSUSED */
125 int
126 dk_open(struct dk_softc *dksc, dev_t dev,
127 int flags, int fmt, struct lwp *l)
128 {
129 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
130 struct disklabel *lp = dksc->sc_dkdev.dk_label;
131 int part = DISKPART(dev);
132 int pmask = 1 << part;
133 int ret = 0;
134 struct disk *dk = &dksc->sc_dkdev;
135
136 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
137 dksc->sc_xname, dksc, dev, flags));
138
139 mutex_enter(&dk->dk_openlock);
140
141 /*
142 * If there are wedges, and this is not RAW_PART, then we
143 * need to fail.
144 */
145 if (dk->dk_nwedges != 0 && part != RAW_PART) {
146 ret = EBUSY;
147 goto done;
148 }
149
150 /* If no dkdriver attached, bail */
151 if (dkd == NULL) {
152 ret = ENXIO;
153 goto done;
154 }
155
156 /*
157 * initialize driver for the first opener
158 */
159 if (dk->dk_openmask == 0 && dkd->d_firstopen != NULL) {
160 ret = (*dkd->d_firstopen)(dksc->sc_dev, dev, flags, fmt);
161 if (ret)
162 goto done;
163 }
164
165 /*
166 * If we're init'ed and there are no other open partitions then
167 * update the in-core disklabel.
168 */
169 if ((dksc->sc_flags & DKF_INITED)) {
170 if ((dksc->sc_flags & DKF_VLABEL) == 0) {
171 dksc->sc_flags |= DKF_VLABEL;
172 dk_getdisklabel(dksc, dev);
173 }
174 }
175
176 /* Fail if we can't find the partition. */
177 if (part != RAW_PART &&
178 ((dksc->sc_flags & DKF_VLABEL) == 0 ||
179 part >= lp->d_npartitions ||
180 lp->d_partitions[part].p_fstype == FS_UNUSED)) {
181 ret = ENXIO;
182 goto done;
183 }
184
185 /* Mark our unit as open. */
186 switch (fmt) {
187 case S_IFCHR:
188 dk->dk_copenmask |= pmask;
189 break;
190 case S_IFBLK:
191 dk->dk_bopenmask |= pmask;
192 break;
193 }
194
195 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
196
197 done:
198 mutex_exit(&dk->dk_openlock);
199 return ret;
200 }
201
202 /* ARGSUSED */
203 int
204 dk_close(struct dk_softc *dksc, dev_t dev,
205 int flags, int fmt, struct lwp *l)
206 {
207 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
208 int part = DISKPART(dev);
209 int pmask = 1 << part;
210 struct disk *dk = &dksc->sc_dkdev;
211
212 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
213 dksc->sc_xname, dksc, dev, flags));
214
215 mutex_enter(&dk->dk_openlock);
216
217 switch (fmt) {
218 case S_IFCHR:
219 dk->dk_copenmask &= ~pmask;
220 break;
221 case S_IFBLK:
222 dk->dk_bopenmask &= ~pmask;
223 break;
224 }
225 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
226
227 if (dk->dk_openmask == 0) {
228 if (dkd->d_lastclose != NULL)
229 (*dkd->d_lastclose)(dksc->sc_dev);
230 if ((dksc->sc_flags & DKF_KLABEL) == 0)
231 dksc->sc_flags &= ~DKF_VLABEL;
232 }
233
234 mutex_exit(&dk->dk_openlock);
235 return 0;
236 }
237
238 static int
239 dk_translate(struct dk_softc *dksc, struct buf *bp)
240 {
241 int part;
242 int wlabel;
243 daddr_t blkno;
244 struct disklabel *lp;
245 struct disk *dk;
246 uint64_t numsecs;
247 unsigned secsize;
248
249 lp = dksc->sc_dkdev.dk_label;
250 dk = &dksc->sc_dkdev;
251
252 part = DISKPART(bp->b_dev);
253 numsecs = dk->dk_geom.dg_secperunit;
254 secsize = dk->dk_geom.dg_secsize;
255
256 /*
257 * The transfer must be a whole number of blocks and the offset must
258 * not be negative.
259 */
260 if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
261 bp->b_error = EINVAL;
262 goto done;
263 }
264
265 /* If there is nothing to do, then we are done */
266 if (bp->b_bcount == 0)
267 goto done;
268
269 wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
270 if (part == RAW_PART) {
271 uint64_t numblocks = btodb(numsecs * secsize);
272 if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
273 goto done;
274 } else {
275 if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
276 goto done;
277 }
278
279 /*
280 * Convert the block number to absolute and put it in terms
281 * of the device's logical block size.
282 */
283 if (secsize >= DEV_BSIZE)
284 blkno = bp->b_blkno / (secsize / DEV_BSIZE);
285 else
286 blkno = bp->b_blkno * (DEV_BSIZE / secsize);
287
288 if (part != RAW_PART)
289 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
290 bp->b_rawblkno = blkno;
291
292 return -1;
293
294 done:
295 bp->b_resid = bp->b_bcount;
296 return bp->b_error;
297 }
298
299 static int
300 dk_strategy1(struct dk_softc *dksc, struct buf *bp)
301 {
302 int error;
303
304 DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
305 dksc->sc_xname, dksc, bp));
306
307 if (!(dksc->sc_flags & DKF_INITED)) {
308 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
309 bp->b_error = ENXIO;
310 bp->b_resid = bp->b_bcount;
311 biodone(bp);
312 return 1;
313 }
314
315 error = dk_translate(dksc, bp);
316 if (error >= 0) {
317 biodone(bp);
318 return 1;
319 }
320
321 return 0;
322 }
323
324 void
325 dk_strategy(struct dk_softc *dksc, struct buf *bp)
326 {
327 int error;
328
329 error = dk_strategy1(dksc, bp);
330 if (error)
331 return;
332
333 /*
334 * Queue buffer and start unit
335 */
336 dk_start(dksc, bp);
337 }
338
339 int
340 dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
341 {
342 int error;
343
344 error = dk_strategy1(dksc, bp);
345 if (error)
346 return error;
347
348 /*
349 * Queue buffer only
350 */
351 mutex_enter(&dksc->sc_iolock);
352 disk_wait(&dksc->sc_dkdev);
353 bufq_put(dksc->sc_bufq, bp);
354 mutex_exit(&dksc->sc_iolock);
355
356 return 0;
357 }
358
359 int
360 dk_strategy_pending(struct dk_softc *dksc)
361 {
362 struct buf *bp;
363
364 if (!(dksc->sc_flags & DKF_INITED)) {
365 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
366 return 0;
367 }
368
369 mutex_enter(&dksc->sc_iolock);
370 bp = bufq_peek(dksc->sc_bufq);
371 mutex_exit(&dksc->sc_iolock);
372
373 return bp != NULL;
374 }
375
376 void
377 dk_start(struct dk_softc *dksc, struct buf *bp)
378 {
379 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
380 int error;
381
382 if (!(dksc->sc_flags & DKF_INITED)) {
383 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
384 return;
385 }
386
387 mutex_enter(&dksc->sc_iolock);
388
389 if (bp != NULL) {
390 bp->b_ci = curcpu();
391 disk_wait(&dksc->sc_dkdev);
392 bufq_put(dksc->sc_bufq, bp);
393 }
394
395 /*
396 * If another thread is running the queue, increment
397 * busy counter to 2 so that the queue is retried,
398 * because the driver may now accept additional
399 * requests.
400 */
401 if (dksc->sc_busy < 2)
402 dksc->sc_busy++;
403 if (dksc->sc_busy > 1)
404 goto done;
405
406 /*
407 * Peeking at the buffer queue and committing the operation
408 * only after success isn't atomic.
409 *
410 * So when a diskstart fails, the buffer is saved
411 * and tried again before the next buffer is fetched.
412 * dk_drain() handles flushing of a saved buffer.
413 *
414 * This keeps order of I/O operations, unlike bufq_put.
415 */
416
417 while (dksc->sc_busy > 0) {
418
419 bp = dksc->sc_deferred;
420 dksc->sc_deferred = NULL;
421
422 if (bp == NULL)
423 bp = bufq_get(dksc->sc_bufq);
424
425 while (bp != NULL) {
426
427 disk_busy(&dksc->sc_dkdev);
428 mutex_exit(&dksc->sc_iolock);
429 error = dkd->d_diskstart(dksc->sc_dev, bp);
430 mutex_enter(&dksc->sc_iolock);
431 if (error == EAGAIN || error == ENOMEM) {
432 /*
433 * Not a disk error. Retry later.
434 */
435 KASSERT(dksc->sc_deferred == NULL);
436 dksc->sc_deferred = bp;
437 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
438 disk_wait(&dksc->sc_dkdev);
439 break;
440 }
441
442 if (error != 0) {
443 bp->b_error = error;
444 bp->b_resid = bp->b_bcount;
445 dk_done1(dksc, bp, false);
446 }
447
448 bp = bufq_get(dksc->sc_bufq);
449 }
450
451 dksc->sc_busy--;
452 }
453 done:
454 mutex_exit(&dksc->sc_iolock);
455 }
456
457 static void
458 dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
459 {
460 struct disk *dk = &dksc->sc_dkdev;
461
462 if (bp->b_error != 0) {
463 struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
464
465 diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
466 dk->dk_label);
467 printf("\n");
468 }
469
470 if (lock)
471 mutex_enter(&dksc->sc_iolock);
472 disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
473
474 if ((dksc->sc_flags & DKF_NO_RND) == 0)
475 rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
476 if (lock)
477 mutex_exit(&dksc->sc_iolock);
478
479 biodone(bp);
480 }
481
482 void
483 dk_done(struct dk_softc *dksc, struct buf *bp)
484 {
485 dk_done1(dksc, bp, true);
486 }
487
488 void
489 dk_drain(struct dk_softc *dksc)
490 {
491 struct buf *bp;
492
493 mutex_enter(&dksc->sc_iolock);
494 bp = dksc->sc_deferred;
495 dksc->sc_deferred = NULL;
496 if (bp != NULL) {
497 bp->b_error = EIO;
498 bp->b_resid = bp->b_bcount;
499 biodone(bp);
500 }
501 bufq_drain(dksc->sc_bufq);
502 mutex_exit(&dksc->sc_iolock);
503 }
504
505 int
506 dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
507 {
508 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
509 unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
510 struct buf tmp, *bp = &tmp;
511 int maxsz;
512 int error = 0;
513
514 KASSERT(len >= 0);
515
516 DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
517 dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
518
519 if (!(dksc->sc_flags & DKF_INITED)) {
520 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
521 return ENXIO;
522 }
523
524 if (secsize == 0 || (pos % secsize) != 0 || (len % secsize) != 0)
525 return EINVAL;
526
527 /* largest value that b_bcount can store */
528 maxsz = rounddown(INT_MAX, secsize);
529
530 while (len > 0) {
531 /* enough data to please the bounds checking code */
532 bp->b_dev = dev;
533 bp->b_blkno = (daddr_t)(pos / secsize);
534 bp->b_bcount = uimin(len, maxsz);
535 bp->b_flags = B_WRITE;
536
537 error = dk_translate(dksc, bp);
538 if (error >= 0)
539 break;
540
541 error = dkd->d_discard(dksc->sc_dev,
542 (off_t)bp->b_rawblkno * secsize,
543 (off_t)bp->b_bcount);
544 if (error)
545 break;
546
547 pos += bp->b_bcount;
548 len -= bp->b_bcount;
549 }
550
551 return error;
552 }
553
554 int
555 dk_size(struct dk_softc *dksc, dev_t dev)
556 {
557 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
558 struct disklabel *lp;
559 int is_open;
560 int part;
561 int size;
562
563 if ((dksc->sc_flags & DKF_INITED) == 0)
564 return -1;
565
566 part = DISKPART(dev);
567 is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
568
569 if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
570 return -1;
571
572 lp = dksc->sc_dkdev.dk_label;
573 if (lp->d_partitions[part].p_fstype != FS_SWAP)
574 size = -1;
575 else
576 size = lp->d_partitions[part].p_size *
577 (lp->d_secsize / DEV_BSIZE);
578
579 if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
580 return -1;
581
582 return size;
583 }
584
585 int
586 dk_ioctl(struct dk_softc *dksc, dev_t dev,
587 u_long cmd, void *data, int flag, struct lwp *l)
588 {
589 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
590 struct disklabel *lp;
591 struct disk *dk = &dksc->sc_dkdev;
592 #ifdef __HAVE_OLD_DISKLABEL
593 struct disklabel newlabel;
594 #endif
595 int error;
596
597 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
598 dksc->sc_xname, dksc, dev, cmd));
599
600 /* ensure that the pseudo disk is open for writes for these commands */
601 switch (cmd) {
602 case DIOCSDINFO:
603 case DIOCWDINFO:
604 #ifdef __HAVE_OLD_DISKLABEL
605 case ODIOCSDINFO:
606 case ODIOCWDINFO:
607 #endif
608 case DIOCKLABEL:
609 case DIOCWLABEL:
610 case DIOCAWEDGE:
611 case DIOCDWEDGE:
612 case DIOCSSTRATEGY:
613 if ((flag & FWRITE) == 0)
614 return EBADF;
615 }
616
617 /* ensure that the pseudo-disk is initialized for these */
618 switch (cmd) {
619 case DIOCGDINFO:
620 case DIOCSDINFO:
621 case DIOCWDINFO:
622 case DIOCGPARTINFO:
623 case DIOCKLABEL:
624 case DIOCWLABEL:
625 case DIOCGDEFLABEL:
626 case DIOCAWEDGE:
627 case DIOCDWEDGE:
628 case DIOCLWEDGES:
629 case DIOCMWEDGES:
630 case DIOCRMWEDGES:
631 case DIOCCACHESYNC:
632 #ifdef __HAVE_OLD_DISKLABEL
633 case ODIOCGDINFO:
634 case ODIOCSDINFO:
635 case ODIOCWDINFO:
636 case ODIOCGDEFLABEL:
637 #endif
638 if ((dksc->sc_flags & DKF_INITED) == 0)
639 return ENXIO;
640 }
641
642 error = disk_ioctl(dk, dev, cmd, data, flag, l);
643 if (error != EPASSTHROUGH)
644 return error;
645 else
646 error = 0;
647
648 switch (cmd) {
649 case DIOCWDINFO:
650 case DIOCSDINFO:
651 #ifdef __HAVE_OLD_DISKLABEL
652 case ODIOCWDINFO:
653 case ODIOCSDINFO:
654 #endif
655 #ifdef __HAVE_OLD_DISKLABEL
656 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
657 memset(&newlabel, 0, sizeof newlabel);
658 memcpy(&newlabel, data, sizeof (struct olddisklabel));
659 lp = &newlabel;
660 } else
661 #endif
662 lp = (struct disklabel *)data;
663
664 mutex_enter(&dk->dk_openlock);
665 dksc->sc_flags |= DKF_LABELLING;
666
667 error = setdisklabel(dksc->sc_dkdev.dk_label,
668 lp, 0, dksc->sc_dkdev.dk_cpulabel);
669 if (error == 0) {
670 if (cmd == DIOCWDINFO
671 #ifdef __HAVE_OLD_DISKLABEL
672 || cmd == ODIOCWDINFO
673 #endif
674 )
675 error = writedisklabel(DKLABELDEV(dev),
676 dkd->d_strategy, dksc->sc_dkdev.dk_label,
677 dksc->sc_dkdev.dk_cpulabel);
678 }
679
680 dksc->sc_flags &= ~DKF_LABELLING;
681 mutex_exit(&dk->dk_openlock);
682 break;
683
684 case DIOCKLABEL:
685 if (*(int *)data != 0)
686 dksc->sc_flags |= DKF_KLABEL;
687 else
688 dksc->sc_flags &= ~DKF_KLABEL;
689 break;
690
691 case DIOCWLABEL:
692 if (*(int *)data != 0)
693 dksc->sc_flags |= DKF_WLABEL;
694 else
695 dksc->sc_flags &= ~DKF_WLABEL;
696 break;
697
698 case DIOCGDEFLABEL:
699 dk_getdefaultlabel(dksc, (struct disklabel *)data);
700 break;
701
702 #ifdef __HAVE_OLD_DISKLABEL
703 case ODIOCGDEFLABEL:
704 dk_getdefaultlabel(dksc, &newlabel);
705 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
706 return ENOTTY;
707 memcpy(data, &newlabel, sizeof (struct olddisklabel));
708 break;
709 #endif
710
711 case DIOCGSTRATEGY:
712 {
713 struct disk_strategy *dks = (void *)data;
714
715 mutex_enter(&dksc->sc_iolock);
716 if (dksc->sc_bufq != NULL)
717 strlcpy(dks->dks_name,
718 bufq_getstrategyname(dksc->sc_bufq),
719 sizeof(dks->dks_name));
720 else
721 error = EINVAL;
722 mutex_exit(&dksc->sc_iolock);
723 dks->dks_paramlen = 0;
724 break;
725 }
726
727 case DIOCSSTRATEGY:
728 {
729 struct disk_strategy *dks = (void *)data;
730 struct bufq_state *new;
731 struct bufq_state *old;
732
733 if (dks->dks_param != NULL) {
734 return EINVAL;
735 }
736 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
737 error = bufq_alloc(&new, dks->dks_name,
738 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
739 if (error) {
740 return error;
741 }
742 mutex_enter(&dksc->sc_iolock);
743 old = dksc->sc_bufq;
744 if (old)
745 bufq_move(new, old);
746 dksc->sc_bufq = new;
747 mutex_exit(&dksc->sc_iolock);
748 if (old)
749 bufq_free(old);
750 break;
751 }
752
753 default:
754 error = ENOTTY;
755 }
756
757 return error;
758 }
759
760 /*
761 * dk_dump dumps all of physical memory into the partition specified.
762 * This requires substantially more framework than {s,w}ddump, and hence
763 * is probably much more fragile.
764 *
765 */
766
767 #define DKFF_READYFORDUMP(x) (((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
768 static volatile int dk_dumping = 0;
769
770 /* ARGSUSED */
771 int
772 dk_dump(struct dk_softc *dksc, dev_t dev,
773 daddr_t blkno, void *vav, size_t size, int flags)
774 {
775 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
776 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
777 char *va = vav;
778 struct disklabel *lp;
779 struct partition *p;
780 int part, towrt, maxblkcnt, nblk;
781 int maxxfer, rv = 0;
782
783 /*
784 * ensure that we consider this device to be safe for dumping,
785 * and that the device is configured.
786 */
787 if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
788 DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
789 dksc->sc_flags));
790 return ENXIO;
791 }
792
793 /* ensure that we are not already dumping */
794 if (dk_dumping)
795 return EFAULT;
796 if ((flags & DK_DUMP_RECURSIVE) == 0)
797 dk_dumping = 1;
798
799 if (dkd->d_dumpblocks == NULL) {
800 DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
801 return ENXIO;
802 }
803
804 /* device specific max transfer size */
805 maxxfer = MAXPHYS;
806 if (dkd->d_iosize != NULL)
807 (*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
808
809 /* Convert to disk sectors. Request must be a multiple of size. */
810 part = DISKPART(dev);
811 lp = dksc->sc_dkdev.dk_label;
812 if ((size % lp->d_secsize) != 0) {
813 DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
814 return EFAULT;
815 }
816 towrt = size / lp->d_secsize;
817 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in secsize units */
818
819 p = &lp->d_partitions[part];
820 if (part == RAW_PART) {
821 if (p->p_fstype != FS_UNUSED) {
822 DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
823 p->p_fstype));
824 return ENXIO;
825 }
826 /* Check whether dump goes to a wedge */
827 if (dksc->sc_dkdev.dk_nwedges == 0) {
828 DPRINTF(DKDB_DUMP, ("%s: dump to raw\n", __func__));
829 return ENXIO;
830 }
831 /* Check transfer bounds against media size */
832 if (blkno < 0 || (blkno + towrt) > dg->dg_secperunit) {
833 DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
834 "nsects=%jd\n", __func__, (intmax_t)blkno, towrt, dg->dg_secperunit));
835 return EINVAL;
836 }
837 } else {
838 int nsects, sectoff;
839
840 if (p->p_fstype != FS_SWAP) {
841 DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
842 p->p_fstype));
843 return ENXIO;
844 }
845 nsects = p->p_size;
846 sectoff = p->p_offset;
847
848 /* Check transfer bounds against partition size. */
849 if ((blkno < 0) || ((blkno + towrt) > nsects)) {
850 DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
851 "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
852 return EINVAL;
853 }
854
855 /* Offset block number to start of partition. */
856 blkno += sectoff;
857 }
858
859 /* Start dumping and return when done. */
860 maxblkcnt = howmany(maxxfer, lp->d_secsize);
861 while (towrt > 0) {
862 nblk = uimin(maxblkcnt, towrt);
863
864 if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
865 != 0) {
866 DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
867 rv));
868 return rv;
869 }
870
871 towrt -= nblk;
872 blkno += nblk;
873 va += nblk * lp->d_secsize;
874 }
875
876 if ((flags & DK_DUMP_RECURSIVE) == 0)
877 dk_dumping = 0;
878
879 return 0;
880 }
881
882 /* ARGSUSED */
883 void
884 dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
885 {
886 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
887 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
888
889 memset(lp, 0, sizeof(*lp));
890
891 if (dg->dg_secperunit > UINT32_MAX)
892 lp->d_secperunit = UINT32_MAX;
893 else
894 lp->d_secperunit = dg->dg_secperunit;
895 lp->d_secsize = dg->dg_secsize;
896 lp->d_nsectors = dg->dg_nsectors;
897 lp->d_ntracks = dg->dg_ntracks;
898 lp->d_ncylinders = dg->dg_ncylinders;
899 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
900
901 strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
902 lp->d_type = dksc->sc_dtype;
903 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
904 lp->d_rpm = 3600;
905 lp->d_interleave = 1;
906 lp->d_flags = 0;
907
908 lp->d_partitions[RAW_PART].p_offset = 0;
909 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
910 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
911 lp->d_npartitions = RAW_PART + 1;
912
913 lp->d_magic = DISKMAGIC;
914 lp->d_magic2 = DISKMAGIC;
915
916 if (dkd->d_label)
917 dkd->d_label(dksc->sc_dev, lp);
918
919 lp->d_checksum = dkcksum(lp);
920 }
921
922 /* ARGSUSED */
923 void
924 dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
925 {
926 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
927 struct disklabel *lp = dksc->sc_dkdev.dk_label;
928 struct cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
929 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
930 struct partition *pp;
931 int i, lpratio, dgratio;
932 const char *errstring;
933
934 memset(clp, 0x0, sizeof(*clp));
935 dk_getdefaultlabel(dksc, lp);
936 errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
937 dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
938 if (errstring) {
939 dk_makedisklabel(dksc);
940 if (dksc->sc_flags & DKF_WARNLABEL)
941 printf("%s: %s\n", dksc->sc_xname, errstring);
942 return;
943 }
944
945 if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
946 return;
947
948 /* Convert sector counts to multiple of DEV_BSIZE for comparison */
949 lpratio = dgratio = 1;
950 if (lp->d_secsize > DEV_BSIZE)
951 lpratio = lp->d_secsize / DEV_BSIZE;
952 if (dg->dg_secsize > DEV_BSIZE)
953 dgratio = dg->dg_secsize / DEV_BSIZE;
954
955 /* Sanity check */
956 if ((uint64_t)lp->d_secperunit * lpratio > dg->dg_secperunit * dgratio)
957 printf("WARNING: %s: "
958 "total unit size in disklabel (%" PRIu64 ") "
959 "!= the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
960 (uint64_t)lp->d_secperunit * lpratio, dksc->sc_xname,
961 dg->dg_secperunit * dgratio);
962 else if (lp->d_secperunit < UINT32_MAX &&
963 (uint64_t)lp->d_secperunit * lpratio < dg->dg_secperunit * dgratio)
964 printf("%s: %" PRIu64 " trailing sectors not covered"
965 " by disklabel\n", dksc->sc_xname,
966 (dg->dg_secperunit * dgratio)
967 - (lp->d_secperunit * lpratio));
968
969 for (i=0; i < lp->d_npartitions; i++) {
970 uint64_t pend;
971
972 pp = &lp->d_partitions[i];
973 pend = pp->p_offset + pp->p_size;
974 if (pend * lpratio > dg->dg_secperunit * dgratio)
975 printf("WARNING: %s: end of partition `%c' exceeds "
976 "the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
977 'a' + i, dksc->sc_xname,
978 dg->dg_secperunit * dgratio);
979 }
980 }
981
982 /*
983 * Heuristic to conjure a disklabel if reading a disklabel failed.
984 *
985 * This is to allow the raw partition to be used for a filesystem
986 * without caring about the write protected label sector.
987 *
988 * If the driver provides it's own callback, use that instead.
989 */
990 /* ARGSUSED */
991 static void
992 dk_makedisklabel(struct dk_softc *dksc)
993 {
994 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
995 struct disklabel *lp = dksc->sc_dkdev.dk_label;
996
997 strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
998
999 if (dkd->d_label)
1000 dkd->d_label(dksc->sc_dev, lp);
1001 else
1002 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1003
1004 lp->d_checksum = dkcksum(lp);
1005 }
1006
1007 MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
1008
1009 static int
1010 dk_subr_modcmd(modcmd_t cmd, void *arg)
1011 {
1012 switch (cmd) {
1013 case MODULE_CMD_INIT:
1014 case MODULE_CMD_FINI:
1015 return 0;
1016 case MODULE_CMD_STAT:
1017 case MODULE_CMD_AUTOUNLOAD:
1018 default:
1019 return ENOTTY;
1020 }
1021 }
Cache object: 16c56cc26e08c913573a9d8fd5840f82
|