FreeBSD/Linux Kernel Cross Reference
sys/dev/scsipi/sd.c
1 /* $NetBSD: sd.c,v 1.258.2.1 2007/07/31 21:47:15 liamjfoy Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian@dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.258.2.1 2007/07/31 21:47:15 liamjfoy Exp $");
58
59 #include "opt_scsi.h"
60 #include "rnd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/file.h>
66 #include <sys/stat.h>
67 #include <sys/ioctl.h>
68 #include <sys/scsiio.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsi_spc.h>
85 #include <dev/scsipi/scsipi_all.h>
86 #include <dev/scsipi/scsi_all.h>
87 #include <dev/scsipi/scsipi_disk.h>
88 #include <dev/scsipi/scsi_disk.h>
89 #include <dev/scsipi/scsiconf.h>
90 #include <dev/scsipi/scsipi_base.h>
91 #include <dev/scsipi/sdvar.h>
92
93 #include <prop/proplib.h>
94
95 #define SDUNIT(dev) DISKUNIT(dev)
96 #define SDPART(dev) DISKPART(dev)
97 #define SDMINOR(unit, part) DISKMINOR(unit, part)
98 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
99
100 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
101
102 #define SD_DEFAULT_BLKSIZE 512
103
104 static void sdminphys(struct buf *);
105 static void sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
106 static int sdgetdisklabel(struct sd_softc *);
107 static void sdstart(struct scsipi_periph *);
108 static void sdrestart(void *);
109 static void sddone(struct scsipi_xfer *, int);
110 static void sd_shutdown(void *);
111 static int sd_interpret_sense(struct scsipi_xfer *);
112
113 static int sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
114 int, int *);
115 static int sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
116 int);
117 static int sd_validate_blksize(struct scsipi_periph *, int);
118 static u_int64_t sd_read_capacity(struct scsipi_periph *, int *, int flags);
119 static int sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
120 int);
121 static int sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
122 static int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
123 static int sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
124 int);
125 static int sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
126 int);
127
128 static int sd_flush(struct sd_softc *, int);
129 static int sd_getcache(struct sd_softc *, int *);
130 static int sd_setcache(struct sd_softc *, int);
131
132 static int sdmatch(struct device *, struct cfdata *, void *);
133 static void sdattach(struct device *, struct device *, void *);
134 static int sdactivate(struct device *, enum devact);
135 static int sddetach(struct device *, int);
136 static void sd_set_properties(struct sd_softc *);
137
138 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
139 sdactivate);
140
141 extern struct cfdriver sd_cd;
142
143 static const struct scsipi_inquiry_pattern sd_patterns[] = {
144 {T_DIRECT, T_FIXED,
145 "", "", ""},
146 {T_DIRECT, T_REMOV,
147 "", "", ""},
148 {T_OPTICAL, T_FIXED,
149 "", "", ""},
150 {T_OPTICAL, T_REMOV,
151 "", "", ""},
152 {T_SIMPLE_DIRECT, T_FIXED,
153 "", "", ""},
154 {T_SIMPLE_DIRECT, T_REMOV,
155 "", "", ""},
156 };
157
158 static dev_type_open(sdopen);
159 static dev_type_close(sdclose);
160 static dev_type_read(sdread);
161 static dev_type_write(sdwrite);
162 static dev_type_ioctl(sdioctl);
163 static dev_type_strategy(sdstrategy);
164 static dev_type_dump(sddump);
165 static dev_type_size(sdsize);
166
167 const struct bdevsw sd_bdevsw = {
168 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
169 };
170
171 const struct cdevsw sd_cdevsw = {
172 sdopen, sdclose, sdread, sdwrite, sdioctl,
173 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
174 };
175
176 static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
177
178 static const struct scsipi_periphsw sd_switch = {
179 sd_interpret_sense, /* check our error handler first */
180 sdstart, /* have a queue, served by this */
181 NULL, /* have no async handler */
182 sddone, /* deal with stats at interrupt time */
183 };
184
185 struct sd_mode_sense_data {
186 /*
187 * XXX
188 * We are not going to parse this as-is -- it just has to be large
189 * enough.
190 */
191 union {
192 struct scsi_mode_parameter_header_6 small;
193 struct scsi_mode_parameter_header_10 big;
194 } header;
195 struct scsi_general_block_descriptor blk_desc;
196 union scsi_disk_pages pages;
197 };
198
199 /*
200 * The routine called by the low level scsi routine when it discovers
201 * A device suitable for this driver
202 */
203 static int
204 sdmatch(struct device *parent, struct cfdata *match,
205 void *aux)
206 {
207 struct scsipibus_attach_args *sa = aux;
208 int priority;
209
210 (void)scsipi_inqmatch(&sa->sa_inqbuf,
211 sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
212 sizeof(sd_patterns[0]), &priority);
213
214 return (priority);
215 }
216
217 /*
218 * Attach routine common to atapi & scsi.
219 */
220 static void
221 sdattach(struct device *parent, struct device *self, void *aux)
222 {
223 struct sd_softc *sd = device_private(self);
224 struct scsipibus_attach_args *sa = aux;
225 struct scsipi_periph *periph = sa->sa_periph;
226 int error, result;
227 struct disk_parms *dp = &sd->params;
228 char pbuf[9];
229
230 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
231
232 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
233 strncpy(sd->name, sa->sa_inqbuf.product, sizeof(sd->name));
234 if (sd->type == T_SIMPLE_DIRECT)
235 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
236
237 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
238 periph->periph_version == 0)
239 sd->flags |= SDF_ANCIENT;
240
241 bufq_alloc(&sd->buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
242
243 callout_init(&sd->sc_callout);
244
245 /*
246 * Store information needed to contact our base driver
247 */
248 sd->sc_periph = periph;
249
250 periph->periph_dev = &sd->sc_dev;
251 periph->periph_switch = &sd_switch;
252
253 /*
254 * Increase our openings to the maximum-per-periph
255 * supported by the adapter. This will either be
256 * clamped down or grown by the adapter if necessary.
257 */
258 periph->periph_openings =
259 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
260 periph->periph_flags |= PERIPH_GROW_OPENINGS;
261
262 /*
263 * Initialize and attach the disk structure.
264 */
265 sd->sc_dk.dk_driver = &sddkdriver;
266 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
267 disk_attach(&sd->sc_dk);
268
269 /*
270 * Use the subdriver to request information regarding the drive.
271 */
272 aprint_naive("\n");
273 aprint_normal("\n");
274
275 error = scsipi_test_unit_ready(periph,
276 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
277 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
278
279 if (error)
280 result = SDGP_RESULT_OFFLINE;
281 else
282 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
283 aprint_normal("%s: ", sd->sc_dev.dv_xname);
284 switch (result) {
285 case SDGP_RESULT_OK:
286 format_bytes(pbuf, sizeof(pbuf),
287 (u_int64_t)dp->disksize * dp->blksize);
288 aprint_normal(
289 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
290 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
291 (unsigned long long)dp->disksize);
292 break;
293
294 case SDGP_RESULT_OFFLINE:
295 aprint_normal("drive offline");
296 break;
297
298 case SDGP_RESULT_UNFORMATTED:
299 aprint_normal("unformatted media");
300 break;
301
302 #ifdef DIAGNOSTIC
303 default:
304 panic("sdattach: unknown result from get_parms");
305 break;
306 #endif
307 }
308 aprint_normal("\n");
309
310 /*
311 * Establish a shutdown hook so that we can ensure that
312 * our data has actually made it onto the platter at
313 * shutdown time. Note that this relies on the fact
314 * that the shutdown hook code puts us at the head of
315 * the list (thus guaranteeing that our hook runs before
316 * our ancestors').
317 */
318 if ((sd->sc_sdhook =
319 shutdownhook_establish(sd_shutdown, sd)) == NULL)
320 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
321 sd->sc_dev.dv_xname);
322
323 #if NRND > 0
324 /*
325 * attach the device into the random source list
326 */
327 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
328 RND_TYPE_DISK, 0);
329 #endif
330
331 /* Discover wedges on this disk. */
332 dkwedge_discover(&sd->sc_dk);
333
334 sd_set_properties(sd);
335 }
336
337 static int
338 sdactivate(struct device *self, enum devact act)
339 {
340 int rv = 0;
341
342 switch (act) {
343 case DVACT_ACTIVATE:
344 rv = EOPNOTSUPP;
345 break;
346
347 case DVACT_DEACTIVATE:
348 /*
349 * Nothing to do; we key off the device's DVF_ACTIVE.
350 */
351 break;
352 }
353 return (rv);
354 }
355
356 static int
357 sddetach(struct device *self, int flags)
358 {
359 struct sd_softc *sd = device_private(self);
360 int s, bmaj, cmaj, i, mn;
361
362 /* locate the major number */
363 bmaj = bdevsw_lookup_major(&sd_bdevsw);
364 cmaj = cdevsw_lookup_major(&sd_cdevsw);
365
366 /* Nuke the vnodes for any open instances */
367 for (i = 0; i < MAXPARTITIONS; i++) {
368 mn = SDMINOR(device_unit(self), i);
369 vdevgone(bmaj, mn, mn, VBLK);
370 vdevgone(cmaj, mn, mn, VCHR);
371 }
372
373 /* kill any pending restart */
374 callout_stop(&sd->sc_callout);
375
376 /* Delete all of our wedges. */
377 dkwedge_delall(&sd->sc_dk);
378
379 s = splbio();
380
381 /* Kill off any queued buffers. */
382 bufq_drain(sd->buf_queue);
383
384 bufq_free(sd->buf_queue);
385
386 /* Kill off any pending commands. */
387 scsipi_kill_pending(sd->sc_periph);
388
389 splx(s);
390
391 /* Detach from the disk list. */
392 disk_detach(&sd->sc_dk);
393
394 /* Get rid of the shutdown hook. */
395 shutdownhook_disestablish(sd->sc_sdhook);
396
397 #if NRND > 0
398 /* Unhook the entropy source. */
399 rnd_detach_source(&sd->rnd_source);
400 #endif
401
402 return (0);
403 }
404
405 /*
406 * open the device. Make sure the partition info is a up-to-date as can be.
407 */
408 static int
409 sdopen(dev_t dev, int flag, int fmt, struct lwp *l)
410 {
411 struct sd_softc *sd;
412 struct scsipi_periph *periph;
413 struct scsipi_adapter *adapt;
414 int unit, part;
415 int error;
416
417 unit = SDUNIT(dev);
418 if (unit >= sd_cd.cd_ndevs)
419 return (ENXIO);
420 sd = sd_cd.cd_devs[unit];
421 if (sd == NULL)
422 return (ENXIO);
423
424 if (!device_is_active(&sd->sc_dev))
425 return (ENODEV);
426
427 part = SDPART(dev);
428
429 if ((error = lockmgr(&sd->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
430 return (error);
431
432 /*
433 * If there are wedges, and this is not RAW_PART, then we
434 * need to fail.
435 */
436 if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
437 error = EBUSY;
438 goto bad1;
439 }
440
441 periph = sd->sc_periph;
442 adapt = periph->periph_channel->chan_adapter;
443
444 SC_DEBUG(periph, SCSIPI_DB1,
445 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
446 sd_cd.cd_ndevs, part));
447
448 /*
449 * If this is the first open of this device, add a reference
450 * to the adapter.
451 */
452 if (sd->sc_dk.dk_openmask == 0 &&
453 (error = scsipi_adapter_addref(adapt)) != 0)
454 goto bad1;
455
456 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
457 /*
458 * If any partition is open, but the disk has been invalidated,
459 * disallow further opens of non-raw partition
460 */
461 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
462 (part != RAW_PART || fmt != S_IFCHR)) {
463 error = EIO;
464 goto bad2;
465 }
466 } else {
467 int silent;
468
469 if (part == RAW_PART && fmt == S_IFCHR)
470 silent = XS_CTL_SILENT;
471 else
472 silent = 0;
473
474 /* Check that it is still responding and ok. */
475 error = scsipi_test_unit_ready(periph,
476 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
477 silent);
478
479 /*
480 * Start the pack spinning if necessary. Always allow the
481 * raw parition to be opened, for raw IOCTLs. Data transfers
482 * will check for SDEV_MEDIA_LOADED.
483 */
484 if (error == EIO) {
485 int error2;
486
487 error2 = scsipi_start(periph, SSS_START, silent);
488 switch (error2) {
489 case 0:
490 error = 0;
491 break;
492 case EIO:
493 case EINVAL:
494 break;
495 default:
496 error = error2;
497 break;
498 }
499 }
500 if (error) {
501 if (silent)
502 goto out;
503 goto bad2;
504 }
505
506 periph->periph_flags |= PERIPH_OPEN;
507
508 if (periph->periph_flags & PERIPH_REMOVABLE) {
509 /* Lock the pack in. */
510 error = scsipi_prevent(periph, SPAMR_PREVENT_DT,
511 XS_CTL_IGNORE_ILLEGAL_REQUEST |
512 XS_CTL_IGNORE_MEDIA_CHANGE |
513 XS_CTL_SILENT);
514 if (error)
515 goto bad3;
516 }
517
518 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
519 int param_error;
520 periph->periph_flags |= PERIPH_MEDIA_LOADED;
521
522 /*
523 * Load the physical device parameters.
524 *
525 * Note that if media is present but unformatted,
526 * we allow the open (so that it can be formatted!).
527 * The drive should refuse real I/O, if the media is
528 * unformatted.
529 */
530 if ((param_error = sd_get_parms(sd, &sd->params, 0))
531 == SDGP_RESULT_OFFLINE) {
532 error = ENXIO;
533 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
534 goto bad3;
535 }
536 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
537
538 /* Load the partition info if not already loaded. */
539 if (param_error == 0) {
540 if ((sdgetdisklabel(sd) != 0) && (part != RAW_PART)) {
541 error = EIO;
542 goto bad3;
543 }
544 SC_DEBUG(periph, SCSIPI_DB3,
545 ("Disklabel loaded "));
546 }
547 }
548 }
549
550 /* Check that the partition exists. */
551 if (part != RAW_PART &&
552 (part >= sd->sc_dk.dk_label->d_npartitions ||
553 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
554 error = ENXIO;
555 goto bad3;
556 }
557
558 out: /* Insure only one open at a time. */
559 switch (fmt) {
560 case S_IFCHR:
561 sd->sc_dk.dk_copenmask |= (1 << part);
562 break;
563 case S_IFBLK:
564 sd->sc_dk.dk_bopenmask |= (1 << part);
565 break;
566 }
567 sd->sc_dk.dk_openmask =
568 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
569
570 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
571 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
572 return (0);
573
574 bad3:
575 if (sd->sc_dk.dk_openmask == 0) {
576 if (periph->periph_flags & PERIPH_REMOVABLE)
577 scsipi_prevent(periph, SPAMR_ALLOW,
578 XS_CTL_IGNORE_ILLEGAL_REQUEST |
579 XS_CTL_IGNORE_MEDIA_CHANGE |
580 XS_CTL_SILENT);
581 periph->periph_flags &= ~PERIPH_OPEN;
582 }
583
584 bad2:
585 if (sd->sc_dk.dk_openmask == 0)
586 scsipi_adapter_delref(adapt);
587
588 bad1:
589 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
590 return (error);
591 }
592
593 /*
594 * close the device.. only called if we are the LAST occurence of an open
595 * device. Convenient now but usually a pain.
596 */
597 static int
598 sdclose(dev_t dev, int flag, int fmt, struct lwp *l)
599 {
600 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
601 struct scsipi_periph *periph = sd->sc_periph;
602 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
603 int part = SDPART(dev);
604 int error;
605
606 if ((error = lockmgr(&sd->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
607 return (error);
608
609 switch (fmt) {
610 case S_IFCHR:
611 sd->sc_dk.dk_copenmask &= ~(1 << part);
612 break;
613 case S_IFBLK:
614 sd->sc_dk.dk_bopenmask &= ~(1 << part);
615 break;
616 }
617 sd->sc_dk.dk_openmask =
618 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
619
620 if (sd->sc_dk.dk_openmask == 0) {
621 /*
622 * If the disk cache needs flushing, and the disk supports
623 * it, do it now.
624 */
625 if ((sd->flags & SDF_DIRTY) != 0) {
626 if (sd_flush(sd, 0)) {
627 printf("%s: cache synchronization failed\n",
628 sd->sc_dev.dv_xname);
629 sd->flags &= ~SDF_FLUSHING;
630 } else
631 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
632 }
633
634 scsipi_wait_drain(periph);
635
636 if (periph->periph_flags & PERIPH_REMOVABLE)
637 scsipi_prevent(periph, SPAMR_ALLOW,
638 XS_CTL_IGNORE_ILLEGAL_REQUEST |
639 XS_CTL_IGNORE_NOT_READY |
640 XS_CTL_SILENT);
641 periph->periph_flags &= ~PERIPH_OPEN;
642
643 scsipi_wait_drain(periph);
644
645 scsipi_adapter_delref(adapt);
646 }
647
648 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
649 return (0);
650 }
651
652 /*
653 * Actually translate the requested transfer into one the physical driver
654 * can understand. The transfer is described by a buf and will include
655 * only one physical transfer.
656 */
657 static void
658 sdstrategy(struct buf *bp)
659 {
660 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
661 struct scsipi_periph *periph = sd->sc_periph;
662 struct disklabel *lp;
663 daddr_t blkno;
664 int s;
665 boolean_t sector_aligned;
666
667 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
668 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
669 ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
670 /*
671 * If the device has been made invalid, error out
672 */
673 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
674 !device_is_active(&sd->sc_dev)) {
675 if (periph->periph_flags & PERIPH_OPEN)
676 bp->b_error = EIO;
677 else
678 bp->b_error = ENODEV;
679 goto bad;
680 }
681
682 lp = sd->sc_dk.dk_label;
683
684 /*
685 * The transfer must be a whole number of blocks, offset must not be
686 * negative.
687 */
688 if (lp->d_secsize == DEV_BSIZE) {
689 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
690 } else {
691 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
692 }
693 if (!sector_aligned || bp->b_blkno < 0) {
694 bp->b_error = EINVAL;
695 goto bad;
696 }
697 /*
698 * If it's a null transfer, return immediatly
699 */
700 if (bp->b_bcount == 0)
701 goto done;
702
703 /*
704 * Do bounds checking, adjust transfer. if error, process.
705 * If end of partition, just return.
706 */
707 if (SDPART(bp->b_dev) == RAW_PART) {
708 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
709 sd->params.disksize512) <= 0)
710 goto done;
711 } else {
712 if (bounds_check_with_label(&sd->sc_dk, bp,
713 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
714 goto done;
715 }
716
717 /*
718 * Now convert the block number to absolute and put it in
719 * terms of the device's logical block size.
720 */
721 if (lp->d_secsize == DEV_BSIZE)
722 blkno = bp->b_blkno;
723 else if (lp->d_secsize > DEV_BSIZE)
724 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
725 else
726 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
727
728 if (SDPART(bp->b_dev) != RAW_PART)
729 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
730
731 bp->b_rawblkno = blkno;
732
733 s = splbio();
734
735 /*
736 * Place it in the queue of disk activities for this disk.
737 *
738 * XXX Only do disksort() if the current operating mode does not
739 * XXX include tagged queueing.
740 */
741 BUFQ_PUT(sd->buf_queue, bp);
742
743 /*
744 * Tell the device to get going on the transfer if it's
745 * not doing anything, otherwise just wait for completion
746 */
747 sdstart(sd->sc_periph);
748
749 splx(s);
750 return;
751
752 bad:
753 bp->b_flags |= B_ERROR;
754 done:
755 /*
756 * Correctly set the buf to indicate a completed xfer
757 */
758 bp->b_resid = bp->b_bcount;
759 biodone(bp);
760 }
761
762 /*
763 * sdstart looks to see if there is a buf waiting for the device
764 * and that the device is not already busy. If both are true,
765 * It dequeues the buf and creates a scsi command to perform the
766 * transfer in the buf. The transfer request will call scsipi_done
767 * on completion, which will in turn call this routine again
768 * so that the next queued transfer is performed.
769 * The bufs are queued by the strategy routine (sdstrategy)
770 *
771 * This routine is also called after other non-queued requests
772 * have been made of the scsi driver, to ensure that the queue
773 * continues to be drained.
774 *
775 * must be called at the correct (highish) spl level
776 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
777 */
778 static void
779 sdstart(struct scsipi_periph *periph)
780 {
781 struct sd_softc *sd = (void *)periph->periph_dev;
782 struct disklabel *lp = sd->sc_dk.dk_label;
783 struct buf *bp = 0;
784 struct scsipi_rw_16 cmd16;
785 struct scsipi_rw_10 cmd_big;
786 struct scsi_rw_6 cmd_small;
787 struct scsipi_generic *cmdp;
788 struct scsipi_xfer *xs;
789 int nblks, cmdlen, error, flags;
790
791 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
792 /*
793 * Check if the device has room for another command
794 */
795 while (periph->periph_active < periph->periph_openings) {
796 /*
797 * there is excess capacity, but a special waits
798 * It'll need the adapter as soon as we clear out of the
799 * way and let it run (user level wait).
800 */
801 if (periph->periph_flags & PERIPH_WAITING) {
802 periph->periph_flags &= ~PERIPH_WAITING;
803 wakeup((caddr_t)periph);
804 return;
805 }
806
807 /*
808 * If the device has become invalid, abort all the
809 * reads and writes until all files have been closed and
810 * re-opened
811 */
812 if (__predict_false(
813 (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
814 if ((bp = BUFQ_GET(sd->buf_queue)) != NULL) {
815 bp->b_error = EIO;
816 bp->b_flags |= B_ERROR;
817 bp->b_resid = bp->b_bcount;
818 biodone(bp);
819 continue;
820 } else {
821 return;
822 }
823 }
824
825 /*
826 * See if there is a buf with work for us to do..
827 */
828 if ((bp = BUFQ_PEEK(sd->buf_queue)) == NULL)
829 return;
830
831 /*
832 * We have a buf, now we should make a command.
833 */
834
835 if (lp->d_secsize == DEV_BSIZE)
836 nblks = bp->b_bcount >> DEV_BSHIFT;
837 else
838 nblks = howmany(bp->b_bcount, lp->d_secsize);
839
840 /*
841 * Fill out the scsi command. Use the smallest CDB possible
842 * (6-byte, 10-byte, or 16-byte).
843 */
844 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
845 ((nblks & 0xff) == nblks) &&
846 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
847 /* 6-byte CDB */
848 memset(&cmd_small, 0, sizeof(cmd_small));
849 cmd_small.opcode = (bp->b_flags & B_READ) ?
850 SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
851 _lto3b(bp->b_rawblkno, cmd_small.addr);
852 cmd_small.length = nblks & 0xff;
853 cmdlen = sizeof(cmd_small);
854 cmdp = (struct scsipi_generic *)&cmd_small;
855 } else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
856 /* 10-byte CDB */
857 memset(&cmd_big, 0, sizeof(cmd_big));
858 cmd_big.opcode = (bp->b_flags & B_READ) ?
859 READ_10 : WRITE_10;
860 _lto4b(bp->b_rawblkno, cmd_big.addr);
861 _lto2b(nblks, cmd_big.length);
862 cmdlen = sizeof(cmd_big);
863 cmdp = (struct scsipi_generic *)&cmd_big;
864 } else {
865 /* 16-byte CDB */
866 memset(&cmd16, 0, sizeof(cmd16));
867 cmd16.opcode = (bp->b_flags & B_READ) ?
868 READ_16 : WRITE_16;
869 _lto8b(bp->b_rawblkno, cmd16.addr);
870 _lto4b(nblks, cmd16.length);
871 cmdlen = sizeof(cmd16);
872 cmdp = (struct scsipi_generic *)&cmd16;
873 }
874
875 /* Instrumentation. */
876 disk_busy(&sd->sc_dk);
877
878 /*
879 * Mark the disk dirty so that the cache will be
880 * flushed on close.
881 */
882 if ((bp->b_flags & B_READ) == 0)
883 sd->flags |= SDF_DIRTY;
884
885 /*
886 * Figure out what flags to use.
887 */
888 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
889 if (bp->b_flags & B_READ)
890 flags |= XS_CTL_DATA_IN;
891 else
892 flags |= XS_CTL_DATA_OUT;
893
894 /*
895 * Call the routine that chats with the adapter.
896 * Note: we cannot sleep as we may be an interrupt
897 */
898 xs = scsipi_make_xs(periph, cmdp, cmdlen,
899 (u_char *)bp->b_data, bp->b_bcount,
900 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
901 if (__predict_false(xs == NULL)) {
902 /*
903 * out of memory. Keep this buffer in the queue, and
904 * retry later.
905 */
906 callout_reset(&sd->sc_callout, hz / 2, sdrestart,
907 periph);
908 return;
909 }
910 /*
911 * need to dequeue the buffer before queuing the command,
912 * because cdstart may be called recursively from the
913 * HBA driver
914 */
915 #ifdef DIAGNOSTIC
916 if (BUFQ_GET(sd->buf_queue) != bp)
917 panic("sdstart(): dequeued wrong buf");
918 #else
919 BUFQ_GET(sd->buf_queue);
920 #endif
921 error = scsipi_execute_xs(xs);
922 /* with a scsipi_xfer preallocated, scsipi_command can't fail */
923 KASSERT(error == 0);
924 }
925 }
926
927 static void
928 sdrestart(void *v)
929 {
930 int s = splbio();
931 sdstart((struct scsipi_periph *)v);
932 splx(s);
933 }
934
935 static void
936 sddone(struct scsipi_xfer *xs, int error)
937 {
938 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
939 struct buf *bp = xs->bp;
940
941 if (sd->flags & SDF_FLUSHING) {
942 /* Flush completed, no longer dirty. */
943 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
944 }
945
946 if (bp) {
947 bp->b_error = error;
948 bp->b_resid = xs->resid;
949 if (error) {
950 /* on a read/write error bp->b_resid is zero, so fix */
951 bp->b_resid =bp->b_bcount;
952 bp->b_flags |= B_ERROR;
953 }
954
955 disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
956 (bp->b_flags & B_READ));
957 #if NRND > 0
958 rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
959 #endif
960
961 biodone(bp);
962 }
963 }
964
965 static void
966 sdminphys(struct buf *bp)
967 {
968 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
969 long xmax;
970
971 /*
972 * If the device is ancient, we want to make sure that
973 * the transfer fits into a 6-byte cdb.
974 *
975 * XXX Note that the SCSI-I spec says that 256-block transfers
976 * are allowed in a 6-byte read/write, and are specified
977 * by settng the "length" to 0. However, we're conservative
978 * here, allowing only 255-block transfers in case an
979 * ancient device gets confused by length == 0. A length of 0
980 * in a 10-byte read/write actually means 0 blocks.
981 */
982 if ((sd->flags & SDF_ANCIENT) &&
983 ((sd->sc_periph->periph_flags &
984 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
985 xmax = sd->sc_dk.dk_label->d_secsize * 0xff;
986
987 if (bp->b_bcount > xmax)
988 bp->b_bcount = xmax;
989 }
990
991 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
992 }
993
994 static int
995 sdread(dev_t dev, struct uio *uio, int ioflag)
996 {
997
998 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
999 }
1000
1001 static int
1002 sdwrite(dev_t dev, struct uio *uio, int ioflag)
1003 {
1004
1005 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
1006 }
1007
1008 /*
1009 * Perform special action on behalf of the user
1010 * Knows about the internals of this device
1011 */
1012 static int
1013 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct lwp *l)
1014 {
1015 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1016 struct scsipi_periph *periph = sd->sc_periph;
1017 int part = SDPART(dev);
1018 int error = 0;
1019 #ifdef __HAVE_OLD_DISKLABEL
1020 struct disklabel *newlabel = NULL;
1021 #endif
1022
1023 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1024
1025 /*
1026 * If the device is not valid, some IOCTLs can still be
1027 * handled on the raw partition. Check this here.
1028 */
1029 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1030 switch (cmd) {
1031 case DIOCKLABEL:
1032 case DIOCWLABEL:
1033 case DIOCLOCK:
1034 case DIOCEJECT:
1035 case ODIOCEJECT:
1036 case DIOCGCACHE:
1037 case DIOCSCACHE:
1038 case SCIOCIDENTIFY:
1039 case OSCIOCIDENTIFY:
1040 case SCIOCCOMMAND:
1041 case SCIOCDEBUG:
1042 if (part == RAW_PART)
1043 break;
1044 /* FALLTHROUGH */
1045 default:
1046 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1047 return (ENODEV);
1048 else
1049 return (EIO);
1050 }
1051 }
1052
1053 switch (cmd) {
1054 case DIOCGDINFO:
1055 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1056 return (0);
1057
1058 #ifdef __HAVE_OLD_DISKLABEL
1059 case ODIOCGDINFO:
1060 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1061 if (newlabel == NULL)
1062 return EIO;
1063 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1064 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1065 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1066 else
1067 error = ENOTTY;
1068 free(newlabel, M_TEMP);
1069 return error;
1070 #endif
1071
1072 case DIOCGPART:
1073 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1074 ((struct partinfo *)addr)->part =
1075 &sd->sc_dk.dk_label->d_partitions[part];
1076 return (0);
1077
1078 case DIOCWDINFO:
1079 case DIOCSDINFO:
1080 #ifdef __HAVE_OLD_DISKLABEL
1081 case ODIOCWDINFO:
1082 case ODIOCSDINFO:
1083 #endif
1084 {
1085 struct disklabel *lp;
1086
1087 if ((flag & FWRITE) == 0)
1088 return (EBADF);
1089
1090 #ifdef __HAVE_OLD_DISKLABEL
1091 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1092 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1093 if (newlabel == NULL)
1094 return EIO;
1095 memset(newlabel, 0, sizeof newlabel);
1096 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1097 lp = newlabel;
1098 } else
1099 #endif
1100 lp = (struct disklabel *)addr;
1101
1102 if ((error = lockmgr(&sd->sc_dk.dk_openlock,
1103 LK_EXCLUSIVE, NULL)) != 0)
1104 goto bad;
1105 sd->flags |= SDF_LABELLING;
1106
1107 error = setdisklabel(sd->sc_dk.dk_label,
1108 lp, /*sd->sc_dk.dk_openmask : */0,
1109 sd->sc_dk.dk_cpulabel);
1110 if (error == 0) {
1111 if (cmd == DIOCWDINFO
1112 #ifdef __HAVE_OLD_DISKLABEL
1113 || cmd == ODIOCWDINFO
1114 #endif
1115 )
1116 error = writedisklabel(SDLABELDEV(dev),
1117 sdstrategy, sd->sc_dk.dk_label,
1118 sd->sc_dk.dk_cpulabel);
1119 }
1120
1121 sd->flags &= ~SDF_LABELLING;
1122 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
1123 bad:
1124 #ifdef __HAVE_OLD_DISKLABEL
1125 if (newlabel != NULL)
1126 free(newlabel, M_TEMP);
1127 #endif
1128 return (error);
1129 }
1130
1131 case DIOCKLABEL:
1132 if (*(int *)addr)
1133 periph->periph_flags |= PERIPH_KEEP_LABEL;
1134 else
1135 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1136 return (0);
1137
1138 case DIOCWLABEL:
1139 if ((flag & FWRITE) == 0)
1140 return (EBADF);
1141 if (*(int *)addr)
1142 sd->flags |= SDF_WLABEL;
1143 else
1144 sd->flags &= ~SDF_WLABEL;
1145 return (0);
1146
1147 case DIOCLOCK:
1148 if (periph->periph_flags & PERIPH_REMOVABLE)
1149 return (scsipi_prevent(periph,
1150 (*(int *)addr) ?
1151 SPAMR_PREVENT_DT : SPAMR_ALLOW, 0));
1152 else
1153 return (ENOTTY);
1154
1155 case DIOCEJECT:
1156 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1157 return (ENOTTY);
1158 if (*(int *)addr == 0) {
1159 /*
1160 * Don't force eject: check that we are the only
1161 * partition open. If so, unlock it.
1162 */
1163 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1164 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1165 sd->sc_dk.dk_openmask) {
1166 error = scsipi_prevent(periph, SPAMR_ALLOW,
1167 XS_CTL_IGNORE_NOT_READY);
1168 if (error)
1169 return (error);
1170 } else {
1171 return (EBUSY);
1172 }
1173 }
1174 /* FALLTHROUGH */
1175 case ODIOCEJECT:
1176 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1177 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1178
1179 case DIOCGDEFLABEL:
1180 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1181 return (0);
1182
1183 #ifdef __HAVE_OLD_DISKLABEL
1184 case ODIOCGDEFLABEL:
1185 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1186 if (newlabel == NULL)
1187 return EIO;
1188 sdgetdefaultlabel(sd, newlabel);
1189 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1190 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1191 else
1192 error = ENOTTY;
1193 free(newlabel, M_TEMP);
1194 return error;
1195 #endif
1196
1197 case DIOCGCACHE:
1198 return (sd_getcache(sd, (int *) addr));
1199
1200 case DIOCSCACHE:
1201 if ((flag & FWRITE) == 0)
1202 return (EBADF);
1203 return (sd_setcache(sd, *(int *) addr));
1204
1205 case DIOCCACHESYNC:
1206 /*
1207 * XXX Do we really need to care about having a writable
1208 * file descriptor here?
1209 */
1210 if ((flag & FWRITE) == 0)
1211 return (EBADF);
1212 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1213 error = sd_flush(sd, 0);
1214 if (error)
1215 sd->flags &= ~SDF_FLUSHING;
1216 else
1217 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1218 } else
1219 error = 0;
1220 return (error);
1221
1222 case DIOCAWEDGE:
1223 {
1224 struct dkwedge_info *dkw = (void *) addr;
1225
1226 if ((flag & FWRITE) == 0)
1227 return (EBADF);
1228
1229 /* If the ioctl happens here, the parent is us. */
1230 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1231 return (dkwedge_add(dkw));
1232 }
1233
1234 case DIOCDWEDGE:
1235 {
1236 struct dkwedge_info *dkw = (void *) addr;
1237
1238 if ((flag & FWRITE) == 0)
1239 return (EBADF);
1240
1241 /* If the ioctl happens here, the parent is us. */
1242 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1243 return (dkwedge_del(dkw));
1244 }
1245
1246 case DIOCLWEDGES:
1247 {
1248 struct dkwedge_list *dkwl = (void *) addr;
1249
1250 return (dkwedge_list(&sd->sc_dk, dkwl, l));
1251 }
1252
1253 default:
1254 if (part != RAW_PART)
1255 return (ENOTTY);
1256 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, l));
1257 }
1258
1259 #ifdef DIAGNOSTIC
1260 panic("sdioctl: impossible");
1261 #endif
1262 }
1263
1264 static void
1265 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1266 {
1267
1268 memset(lp, 0, sizeof(struct disklabel));
1269
1270 lp->d_secsize = sd->params.blksize;
1271 lp->d_ntracks = sd->params.heads;
1272 lp->d_nsectors = sd->params.sectors;
1273 lp->d_ncylinders = sd->params.cyls;
1274 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1275
1276 switch (scsipi_periph_bustype(sd->sc_periph)) {
1277 case SCSIPI_BUSTYPE_SCSI:
1278 lp->d_type = DTYPE_SCSI;
1279 break;
1280 case SCSIPI_BUSTYPE_ATAPI:
1281 lp->d_type = DTYPE_ATAPI;
1282 break;
1283 }
1284 /*
1285 * XXX
1286 * We could probe the mode pages to figure out what kind of disc it is.
1287 * Is this worthwhile?
1288 */
1289 strncpy(lp->d_typename, sd->name, 16);
1290 strncpy(lp->d_packname, "fictitious", 16);
1291 lp->d_secperunit = sd->params.disksize;
1292 lp->d_rpm = sd->params.rot_rate;
1293 lp->d_interleave = 1;
1294 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1295 D_REMOVABLE : 0;
1296
1297 lp->d_partitions[RAW_PART].p_offset = 0;
1298 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
1299 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1300 lp->d_npartitions = RAW_PART + 1;
1301
1302 lp->d_magic = DISKMAGIC;
1303 lp->d_magic2 = DISKMAGIC;
1304 lp->d_checksum = dkcksum(lp);
1305 }
1306
1307
1308 /*
1309 * Load the label information on the named device
1310 */
1311 static int
1312 sdgetdisklabel(struct sd_softc *sd)
1313 {
1314 struct disklabel *lp = sd->sc_dk.dk_label;
1315 const char *errstring;
1316
1317 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1318
1319 sdgetdefaultlabel(sd, lp);
1320
1321 if (lp->d_secpercyl == 0) {
1322 lp->d_secpercyl = 100;
1323 /* as long as it's not 0 - readdisklabel divides by it (?) */
1324 }
1325
1326 /*
1327 * Call the generic disklabel extraction routine
1328 */
1329 errstring = readdisklabel(MAKESDDEV(0, device_unit(&sd->sc_dev),
1330 RAW_PART), sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1331 if (errstring) {
1332 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1333 return EIO;
1334 }
1335 return 0;
1336 }
1337
1338 static void
1339 sd_shutdown(void *arg)
1340 {
1341 struct sd_softc *sd = arg;
1342
1343 /*
1344 * If the disk cache needs to be flushed, and the disk supports
1345 * it, flush it. We're cold at this point, so we poll for
1346 * completion.
1347 */
1348 if ((sd->flags & SDF_DIRTY) != 0) {
1349 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1350 printf("%s: cache synchronization failed\n",
1351 sd->sc_dev.dv_xname);
1352 sd->flags &= ~SDF_FLUSHING;
1353 } else
1354 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1355 }
1356 }
1357
1358 /*
1359 * Check Errors
1360 */
1361 static int
1362 sd_interpret_sense(struct scsipi_xfer *xs)
1363 {
1364 struct scsipi_periph *periph = xs->xs_periph;
1365 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
1366 struct sd_softc *sd = (void *)periph->periph_dev;
1367 int s, error, retval = EJUSTRETURN;
1368
1369 /*
1370 * If the periph is already recovering, just do the normal
1371 * error processing.
1372 */
1373 if (periph->periph_flags & PERIPH_RECOVERING)
1374 return (retval);
1375
1376 /*
1377 * Ignore errors from accessing illegal fields (e.g. trying to
1378 * lock the door of a digicam, which doesn't have a door that
1379 * can be locked) for the SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL command.
1380 */
1381 if (xs->cmd->opcode == SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL &&
1382 SSD_SENSE_KEY(sense->flags) == SKEY_ILLEGAL_REQUEST &&
1383 sense->asc == 0x24 &&
1384 sense->ascq == 0x00) { /* Illegal field in CDB */
1385 if (!(xs->xs_control & XS_CTL_SILENT)) {
1386 scsipi_printaddr(periph);
1387 printf("no door lock\n");
1388 }
1389 xs->xs_control |= XS_CTL_IGNORE_ILLEGAL_REQUEST;
1390 return (retval);
1391 }
1392
1393
1394
1395 /*
1396 * If the device is not open yet, let the generic code handle it.
1397 */
1398 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1399 return (retval);
1400
1401 /*
1402 * If it isn't a extended or extended/deferred error, let
1403 * the generic code handle it.
1404 */
1405 if (SSD_RCODE(sense->response_code) != SSD_RCODE_CURRENT &&
1406 SSD_RCODE(sense->response_code) != SSD_RCODE_DEFERRED)
1407 return (retval);
1408
1409 if (SSD_SENSE_KEY(sense->flags) == SKEY_NOT_READY &&
1410 sense->asc == 0x4) {
1411 if (sense->ascq == 0x01) {
1412 /*
1413 * Unit In The Process Of Becoming Ready.
1414 */
1415 printf("%s: waiting for pack to spin up...\n",
1416 sd->sc_dev.dv_xname);
1417 if (!callout_pending(&periph->periph_callout))
1418 scsipi_periph_freeze(periph, 1);
1419 callout_reset(&periph->periph_callout,
1420 5 * hz, scsipi_periph_timed_thaw, periph);
1421 retval = ERESTART;
1422 } else if (sense->ascq == 0x02) {
1423 printf("%s: pack is stopped, restarting...\n",
1424 sd->sc_dev.dv_xname);
1425 s = splbio();
1426 periph->periph_flags |= PERIPH_RECOVERING;
1427 splx(s);
1428 error = scsipi_start(periph, SSS_START,
1429 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1430 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1431 if (error) {
1432 printf("%s: unable to restart pack\n",
1433 sd->sc_dev.dv_xname);
1434 retval = error;
1435 } else
1436 retval = ERESTART;
1437 s = splbio();
1438 periph->periph_flags &= ~PERIPH_RECOVERING;
1439 splx(s);
1440 }
1441 }
1442 if (SSD_SENSE_KEY(sense->flags) == SKEY_MEDIUM_ERROR &&
1443 sense->asc == 0x31 &&
1444 sense->ascq == 0x00) { /* maybe for any asq ? */
1445 /* Medium Format Corrupted */
1446 retval = EFTYPE;
1447 }
1448 return (retval);
1449 }
1450
1451
1452 static int
1453 sdsize(dev_t dev)
1454 {
1455 struct sd_softc *sd;
1456 int part, unit, omask;
1457 int size;
1458
1459 unit = SDUNIT(dev);
1460 if (unit >= sd_cd.cd_ndevs)
1461 return (-1);
1462 sd = sd_cd.cd_devs[unit];
1463 if (sd == NULL)
1464 return (-1);
1465
1466 if (!device_is_active(&sd->sc_dev))
1467 return (-1);
1468
1469 part = SDPART(dev);
1470 omask = sd->sc_dk.dk_openmask & (1 << part);
1471
1472 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1473 return (-1);
1474 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1475 size = -1;
1476 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1477 size = -1;
1478 else
1479 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1480 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1481 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1482 return (-1);
1483 return (size);
1484 }
1485
1486 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1487 static struct scsipi_xfer sx;
1488 static int sddoingadump;
1489
1490 /*
1491 * dump all of physical memory into the partition specified, starting
1492 * at offset 'dumplo' into the partition.
1493 */
1494 static int
1495 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1496 {
1497 struct sd_softc *sd; /* disk unit to do the I/O */
1498 struct disklabel *lp; /* disk's disklabel */
1499 int unit, part;
1500 int sectorsize; /* size of a disk sector */
1501 int nsects; /* number of sectors in partition */
1502 int sectoff; /* sector offset of partition */
1503 int totwrt; /* total number of sectors left to write */
1504 int nwrt; /* current number of sectors to write */
1505 struct scsipi_rw_10 cmd; /* write command */
1506 struct scsipi_xfer *xs; /* ... convenience */
1507 struct scsipi_periph *periph;
1508 struct scsipi_channel *chan;
1509
1510 /* Check if recursive dump; if so, punt. */
1511 if (sddoingadump)
1512 return (EFAULT);
1513
1514 /* Mark as active early. */
1515 sddoingadump = 1;
1516
1517 unit = SDUNIT(dev); /* Decompose unit & partition. */
1518 part = SDPART(dev);
1519
1520 /* Check for acceptable drive number. */
1521 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1522 return (ENXIO);
1523
1524 if (!device_is_active(&sd->sc_dev))
1525 return (ENODEV);
1526
1527 periph = sd->sc_periph;
1528 chan = periph->periph_channel;
1529
1530 /* Make sure it was initialized. */
1531 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1532 return (ENXIO);
1533
1534 /* Convert to disk sectors. Request must be a multiple of size. */
1535 lp = sd->sc_dk.dk_label;
1536 sectorsize = lp->d_secsize;
1537 if ((size % sectorsize) != 0)
1538 return (EFAULT);
1539 totwrt = size / sectorsize;
1540 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1541
1542 nsects = lp->d_partitions[part].p_size;
1543 sectoff = lp->d_partitions[part].p_offset;
1544
1545 /* Check transfer bounds against partition size. */
1546 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1547 return (EINVAL);
1548
1549 /* Offset block number to start of partition. */
1550 blkno += sectoff;
1551
1552 xs = &sx;
1553
1554 while (totwrt > 0) {
1555 nwrt = totwrt; /* XXX */
1556 #ifndef SD_DUMP_NOT_TRUSTED
1557 /*
1558 * Fill out the scsi command
1559 */
1560 memset(&cmd, 0, sizeof(cmd));
1561 cmd.opcode = WRITE_10;
1562 _lto4b(blkno, cmd.addr);
1563 _lto2b(nwrt, cmd.length);
1564 /*
1565 * Fill out the scsipi_xfer structure
1566 * Note: we cannot sleep as we may be an interrupt
1567 * don't use scsipi_command() as it may want to wait
1568 * for an xs.
1569 */
1570 memset(xs, 0, sizeof(sx));
1571 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1572 XS_CTL_DATA_OUT;
1573 xs->xs_status = 0;
1574 xs->xs_periph = periph;
1575 xs->xs_retries = SDRETRIES;
1576 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1577 xs->cmd = (struct scsipi_generic *)&cmd;
1578 xs->cmdlen = sizeof(cmd);
1579 xs->resid = nwrt * sectorsize;
1580 xs->error = XS_NOERROR;
1581 xs->bp = 0;
1582 xs->data = va;
1583 xs->datalen = nwrt * sectorsize;
1584
1585 /*
1586 * Pass all this info to the scsi driver.
1587 */
1588 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1589 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1590 xs->error != XS_NOERROR)
1591 return (EIO);
1592 #else /* SD_DUMP_NOT_TRUSTED */
1593 /* Let's just talk about this first... */
1594 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1595 delay(500 * 1000); /* half a second */
1596 #endif /* SD_DUMP_NOT_TRUSTED */
1597
1598 /* update block count */
1599 totwrt -= nwrt;
1600 blkno += nwrt;
1601 va += sectorsize * nwrt;
1602 }
1603 sddoingadump = 0;
1604 return (0);
1605 }
1606
1607 static int
1608 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1609 int page, int flags, int *big)
1610 {
1611
1612 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1613 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1614 *big = 1;
1615 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1616 size + sizeof(struct scsi_mode_parameter_header_10),
1617 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1618 } else {
1619 *big = 0;
1620 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1621 size + sizeof(struct scsi_mode_parameter_header_6),
1622 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1623 }
1624 }
1625
1626 static int
1627 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1628 int flags, int big)
1629 {
1630
1631 if (big) {
1632 struct scsi_mode_parameter_header_10 *header = sense;
1633
1634 _lto2b(0, header->data_length);
1635 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1636 size + sizeof(struct scsi_mode_parameter_header_10),
1637 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1638 } else {
1639 struct scsi_mode_parameter_header_6 *header = sense;
1640
1641 header->data_length = 0;
1642 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1643 size + sizeof(struct scsi_mode_parameter_header_6),
1644 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1645 }
1646 }
1647
1648 /*
1649 * sd_validate_blksize:
1650 *
1651 * Validate the block size. Print error if periph is specified,
1652 */
1653 static int
1654 sd_validate_blksize(struct scsipi_periph *periph, int len)
1655 {
1656
1657 switch (len) {
1658 case 256:
1659 case 512:
1660 case 1024:
1661 case 2048:
1662 case 4096:
1663 return 1;
1664 }
1665
1666 if (periph) {
1667 scsipi_printaddr(periph);
1668 printf("%s sector size: 0x%x. Defaulting to %d bytes.\n",
1669 (len ^ (1 << (ffs(len) - 1))) ?
1670 "preposterous" : "unsupported",
1671 len, SD_DEFAULT_BLKSIZE);
1672 }
1673
1674 return 0;
1675 }
1676
1677 /*
1678 * sd_read_capacity:
1679 *
1680 * Find out from the device what its capacity is.
1681 */
1682 static u_int64_t
1683 sd_read_capacity(struct scsipi_periph *periph, int *blksize, int flags)
1684 {
1685 union {
1686 struct scsipi_read_capacity_10 cmd;
1687 struct scsipi_read_capacity_16 cmd16;
1688 } cmd;
1689 union {
1690 struct scsipi_read_capacity_10_data data;
1691 struct scsipi_read_capacity_16_data data16;
1692 } data;
1693
1694 memset(&cmd, 0, sizeof(cmd));
1695 cmd.cmd.opcode = READ_CAPACITY_10;
1696
1697 /*
1698 * If the command works, interpret the result as a 4 byte
1699 * number of blocks
1700 */
1701 memset(&data.data, 0, sizeof(data.data));
1702 if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1703 (void *)&data.data, sizeof(data.data), SCSIPIRETRIES, 20000, NULL,
1704 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1705 return (0);
1706
1707 if (_4btol(data.data.addr) != 0xffffffff) {
1708 *blksize = _4btol(data.data.length);
1709 return (_4btol(data.data.addr) + 1);
1710 }
1711
1712 /*
1713 * Device is larger than can be reflected by READ CAPACITY (10).
1714 * Try READ CAPACITY (16).
1715 */
1716
1717 memset(&cmd, 0, sizeof(cmd));
1718 cmd.cmd16.opcode = READ_CAPACITY_16;
1719 cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1720 _lto4b(sizeof(data.data16), cmd.cmd16.len);
1721
1722 memset(&data.data16, 0, sizeof(data.data16));
1723 if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1724 (void *)&data.data16, sizeof(data.data16), SCSIPIRETRIES, 20000,
1725 NULL,
1726 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1727 return (0);
1728
1729 *blksize = _4btol(data.data16.length);
1730 return (_8btol(data.data16.addr) + 1);
1731 }
1732
1733 static int
1734 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1735 {
1736 struct {
1737 struct scsi_mode_parameter_header_6 header;
1738 /* no block descriptor */
1739 u_int8_t pg_code; /* page code (should be 6) */
1740 u_int8_t pg_length; /* page length (should be 11) */
1741 u_int8_t wcd; /* bit0: cache disable */
1742 u_int8_t lbs[2]; /* logical block size */
1743 u_int8_t size[5]; /* number of log. blocks */
1744 u_int8_t pp; /* power/performance */
1745 u_int8_t flags;
1746 u_int8_t resvd;
1747 } scsipi_sense;
1748 u_int64_t blocks;
1749 int error, blksize;
1750
1751 /*
1752 * sd_read_capacity (ie "read capacity") and mode sense page 6
1753 * give the same information. Do both for now, and check
1754 * for consistency.
1755 * XXX probably differs for removable media
1756 */
1757 dp->blksize = SD_DEFAULT_BLKSIZE;
1758 if ((blocks = sd_read_capacity(sd->sc_periph, &blksize, flags)) == 0)
1759 return (SDGP_RESULT_OFFLINE); /* XXX? */
1760
1761 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1762 &scsipi_sense.header, sizeof(scsipi_sense),
1763 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1764
1765 if (error != 0)
1766 return (SDGP_RESULT_OFFLINE); /* XXX? */
1767
1768 dp->blksize = blksize;
1769 if (!sd_validate_blksize(NULL, dp->blksize))
1770 dp->blksize = _2btol(scsipi_sense.lbs);
1771 if (!sd_validate_blksize(sd->sc_periph, dp->blksize))
1772 dp->blksize = SD_DEFAULT_BLKSIZE;
1773
1774 /*
1775 * Create a pseudo-geometry.
1776 */
1777 dp->heads = 64;
1778 dp->sectors = 32;
1779 dp->cyls = blocks / (dp->heads * dp->sectors);
1780 dp->disksize = _5btol(scsipi_sense.size);
1781 if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) {
1782 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1783 (unsigned long long)dp->disksize,
1784 (unsigned long long)blocks);
1785 dp->disksize = blocks;
1786 }
1787 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1788
1789 return (SDGP_RESULT_OK);
1790 }
1791
1792 /*
1793 * Get the scsi driver to send a full inquiry to the * device and use the
1794 * results to fill out the disk parameter structure.
1795 */
1796 static int
1797 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1798 {
1799 u_int64_t blocks;
1800 int error, blksize;
1801 #if 0
1802 int i;
1803 u_int8_t *p;
1804 #endif
1805
1806 dp->disksize = blocks = sd_read_capacity(sd->sc_periph, &blksize,
1807 flags);
1808 if (blocks == 0) {
1809 struct scsipi_read_format_capacities cmd;
1810 struct {
1811 struct scsipi_capacity_list_header header;
1812 struct scsipi_capacity_descriptor desc;
1813 } __attribute__((packed)) data;
1814
1815 memset(&cmd, 0, sizeof(cmd));
1816 memset(&data, 0, sizeof(data));
1817 cmd.opcode = READ_FORMAT_CAPACITIES;
1818 _lto2b(sizeof(data), cmd.length);
1819
1820 error = scsipi_command(sd->sc_periph,
1821 (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1822 SDRETRIES, 20000, NULL,
1823 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK);
1824 if (error == EFTYPE) {
1825 /* Medium Format Corrupted, handle as not formatted */
1826 return (SDGP_RESULT_UNFORMATTED);
1827 }
1828 if (error || data.header.length == 0)
1829 return (SDGP_RESULT_OFFLINE);
1830
1831 #if 0
1832 printf("rfc: length=%d\n", data.header.length);
1833 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1834 #endif
1835 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1836 case SCSIPI_CAP_DESC_CODE_RESERVED:
1837 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1838 break;
1839
1840 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1841 return (SDGP_RESULT_UNFORMATTED);
1842
1843 case SCSIPI_CAP_DESC_CODE_NONE:
1844 return (SDGP_RESULT_OFFLINE);
1845 }
1846
1847 dp->disksize = blocks = _4btol(data.desc.nblks);
1848 if (blocks == 0)
1849 return (SDGP_RESULT_OFFLINE); /* XXX? */
1850
1851 blksize = _3btol(data.desc.blklen);
1852
1853 } else if (!sd_validate_blksize(NULL, blksize)) {
1854 struct sd_mode_sense_data scsipi_sense;
1855 int big, bsize;
1856 struct scsi_general_block_descriptor *bdesc;
1857
1858 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1859 error = sd_mode_sense(sd, 0, &scsipi_sense,
1860 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1861 if (!error) {
1862 if (big) {
1863 bdesc = (void *)(&scsipi_sense.header.big + 1);
1864 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1865 } else {
1866 bdesc = (void *)(&scsipi_sense.header.small + 1);
1867 bsize = scsipi_sense.header.small.blk_desc_len;
1868 }
1869
1870 #if 0
1871 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1872 printf("page 0 bsize=%d\n", bsize);
1873 printf("page 0 ok\n");
1874 #endif
1875
1876 if (bsize >= 8) {
1877 blksize = _3btol(bdesc->blklen);
1878 }
1879 }
1880 }
1881
1882 if (!sd_validate_blksize(sd->sc_periph, blksize))
1883 blksize = SD_DEFAULT_BLKSIZE;
1884
1885 dp->blksize = blksize;
1886 dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE;
1887 return (0);
1888 }
1889
1890 static int
1891 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1892 {
1893 struct sd_mode_sense_data scsipi_sense;
1894 int error;
1895 int big, byte2;
1896 size_t poffset;
1897 union scsi_disk_pages *pages;
1898
1899 byte2 = SMS_DBD;
1900 again:
1901 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1902 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1903 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1904 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1905 flags | XS_CTL_SILENT, &big);
1906 if (error) {
1907 if (byte2 == SMS_DBD) {
1908 /* No result; try once more with DBD off */
1909 byte2 = 0;
1910 goto again;
1911 }
1912 return (error);
1913 }
1914
1915 if (big) {
1916 poffset = sizeof scsipi_sense.header.big;
1917 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1918 } else {
1919 poffset = sizeof scsipi_sense.header.small;
1920 poffset += scsipi_sense.header.small.blk_desc_len;
1921 }
1922
1923 if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry))
1924 return ERESTART;
1925
1926 pages = (void *)((u_long)&scsipi_sense + poffset);
1927 #if 0
1928 {
1929 size_t i;
1930 u_int8_t *p;
1931
1932 printf("page 4 sense:");
1933 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
1934 i--, p++)
1935 printf(" %02x", *p);
1936 printf("\n");
1937 printf("page 4 pg_code=%d sense=%p/%p\n",
1938 pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1939 }
1940 #endif
1941
1942 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1943 return (ERESTART);
1944
1945 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1946 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1947 _3btol(pages->rigid_geometry.ncyl),
1948 pages->rigid_geometry.nheads,
1949 _2btol(pages->rigid_geometry.st_cyl_wp),
1950 _2btol(pages->rigid_geometry.st_cyl_rwc),
1951 _2btol(pages->rigid_geometry.land_zone)));
1952
1953 /*
1954 * KLUDGE!! (for zone recorded disks)
1955 * give a number of sectors so that sec * trks * cyls
1956 * is <= disk_size
1957 * can lead to wasted space! THINK ABOUT THIS !
1958 */
1959 dp->heads = pages->rigid_geometry.nheads;
1960 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1961 if (dp->heads == 0 || dp->cyls == 0)
1962 return (ERESTART);
1963 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1964
1965 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1966 if (dp->rot_rate == 0)
1967 dp->rot_rate = 3600;
1968
1969 #if 0
1970 printf("page 4 ok\n");
1971 #endif
1972 return (0);
1973 }
1974
1975 static int
1976 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
1977 {
1978 struct sd_mode_sense_data scsipi_sense;
1979 int error;
1980 int big, byte2;
1981 size_t poffset;
1982 union scsi_disk_pages *pages;
1983
1984 byte2 = SMS_DBD;
1985 again:
1986 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1987 error = sd_mode_sense(sd, 0, &scsipi_sense,
1988 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1989 sizeof(scsipi_sense.pages.flex_geometry), 5,
1990 flags | XS_CTL_SILENT, &big);
1991 if (error) {
1992 if (byte2 == SMS_DBD) {
1993 /* No result; try once more with DBD off */
1994 byte2 = 0;
1995 goto again;
1996 }
1997 return (error);
1998 }
1999
2000 if (big) {
2001 poffset = sizeof scsipi_sense.header.big;
2002 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
2003 } else {
2004 poffset = sizeof scsipi_sense.header.small;
2005 poffset += scsipi_sense.header.small.blk_desc_len;
2006 }
2007
2008 if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry))
2009 return ERESTART;
2010
2011 pages = (void *)((u_long)&scsipi_sense + poffset);
2012 #if 0
2013 {
2014 size_t i;
2015 u_int8_t *p;
2016
2017 printf("page 5 sense:");
2018 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2019 i--, p++)
2020 printf(" %02x", *p);
2021 printf("\n");
2022 printf("page 5 pg_code=%d sense=%p/%p\n",
2023 pages->flex_geometry.pg_code, &scsipi_sense, pages);
2024 }
2025 #endif
2026
2027 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
2028 return (ERESTART);
2029
2030 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2031 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
2032 _3btol(pages->flex_geometry.ncyl),
2033 pages->flex_geometry.nheads,
2034 pages->flex_geometry.ph_sec_tr,
2035 _2btol(pages->flex_geometry.bytes_s)));
2036
2037 dp->heads = pages->flex_geometry.nheads;
2038 dp->cyls = _2btol(pages->flex_geometry.ncyl);
2039 dp->sectors = pages->flex_geometry.ph_sec_tr;
2040 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
2041 return (ERESTART);
2042
2043 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2044 if (dp->rot_rate == 0)
2045 dp->rot_rate = 3600;
2046
2047 #if 0
2048 printf("page 5 ok\n");
2049 #endif
2050 return (0);
2051 }
2052
2053 static int
2054 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
2055 {
2056 int error;
2057
2058 /*
2059 * If offline, the SDEV_MEDIA_LOADED flag will be
2060 * cleared by the caller if necessary.
2061 */
2062 if (sd->type == T_SIMPLE_DIRECT) {
2063 error = sd_get_simplifiedparms(sd, dp, flags);
2064 if (!error)
2065 disk_blocksize(&sd->sc_dk, dp->blksize);
2066 return (error);
2067 }
2068
2069 error = sd_get_capacity(sd, dp, flags);
2070 if (error)
2071 return (error);
2072
2073 disk_blocksize(&sd->sc_dk, dp->blksize);
2074
2075 if (sd->type == T_OPTICAL)
2076 goto page0;
2077
2078 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
2079 if (!sd_get_parms_page5(sd, dp, flags) ||
2080 !sd_get_parms_page4(sd, dp, flags))
2081 return (SDGP_RESULT_OK);
2082 } else {
2083 if (!sd_get_parms_page4(sd, dp, flags) ||
2084 !sd_get_parms_page5(sd, dp, flags))
2085 return (SDGP_RESULT_OK);
2086 }
2087
2088 page0:
2089 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
2090 /* Try calling driver's method for figuring out geometry. */
2091 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
2092 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
2093 (sd->sc_periph, dp, dp->disksize)) {
2094 /*
2095 * Use adaptec standard fictitious geometry
2096 * this depends on which controller (e.g. 1542C is
2097 * different. but we have to put SOMETHING here..)
2098 */
2099 dp->heads = 64;
2100 dp->sectors = 32;
2101 dp->cyls = dp->disksize / (64 * 32);
2102 }
2103 dp->rot_rate = 3600;
2104 return (SDGP_RESULT_OK);
2105 }
2106
2107 static int
2108 sd_flush(struct sd_softc *sd, int flags)
2109 {
2110 struct scsipi_periph *periph = sd->sc_periph;
2111 struct scsi_synchronize_cache_10 cmd;
2112
2113 /*
2114 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
2115 * We issue with address 0 length 0, which should be
2116 * interpreted by the device as "all remaining blocks
2117 * starting at address 0". We ignore ILLEGAL REQUEST
2118 * in the event that the command is not supported by
2119 * the device, and poll for completion so that we know
2120 * that the cache has actually been flushed.
2121 *
2122 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
2123 * command, as indicated by our quirks flags.
2124 *
2125 * XXX What about older devices?
2126 */
2127 if (periph->periph_version < 2 ||
2128 (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
2129 return (0);
2130
2131 sd->flags |= SDF_FLUSHING;
2132 memset(&cmd, 0, sizeof(cmd));
2133 cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
2134
2135 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
2136 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
2137 }
2138
2139 static int
2140 sd_getcache(struct sd_softc *sd, int *bitsp)
2141 {
2142 struct scsipi_periph *periph = sd->sc_periph;
2143 struct sd_mode_sense_data scsipi_sense;
2144 int error, bits = 0;
2145 int big;
2146 union scsi_disk_pages *pages;
2147
2148 if (periph->periph_version < 2)
2149 return (EOPNOTSUPP);
2150
2151 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2152 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2153 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2154 if (error)
2155 return (error);
2156
2157 if (big)
2158 pages = (void *)(&scsipi_sense.header.big + 1);
2159 else
2160 pages = (void *)(&scsipi_sense.header.small + 1);
2161
2162 if ((pages->caching_params.flags & CACHING_RCD) == 0)
2163 bits |= DKCACHE_READ;
2164 if (pages->caching_params.flags & CACHING_WCE)
2165 bits |= DKCACHE_WRITE;
2166 if (pages->caching_params.pg_code & PGCODE_PS)
2167 bits |= DKCACHE_SAVE;
2168
2169 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2170 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2171 sizeof(scsipi_sense.pages.caching_params),
2172 SMS_PCTRL_CHANGEABLE|8, 0, &big);
2173 if (error == 0) {
2174 if (big)
2175 pages = (void *)(&scsipi_sense.header.big + 1);
2176 else
2177 pages = (void *)(&scsipi_sense.header.small + 1);
2178
2179 if (pages->caching_params.flags & CACHING_RCD)
2180 bits |= DKCACHE_RCHANGE;
2181 if (pages->caching_params.flags & CACHING_WCE)
2182 bits |= DKCACHE_WCHANGE;
2183 }
2184
2185 *bitsp = bits;
2186
2187 return (0);
2188 }
2189
2190 static int
2191 sd_setcache(struct sd_softc *sd, int bits)
2192 {
2193 struct scsipi_periph *periph = sd->sc_periph;
2194 struct sd_mode_sense_data scsipi_sense;
2195 int error;
2196 uint8_t oflags, byte2 = 0;
2197 int big;
2198 union scsi_disk_pages *pages;
2199
2200 if (periph->periph_version < 2)
2201 return (EOPNOTSUPP);
2202
2203 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2204 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2205 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2206 if (error)
2207 return (error);
2208
2209 if (big)
2210 pages = (void *)(&scsipi_sense.header.big + 1);
2211 else
2212 pages = (void *)(&scsipi_sense.header.small + 1);
2213
2214 oflags = pages->caching_params.flags;
2215
2216 if (bits & DKCACHE_READ)
2217 pages->caching_params.flags &= ~CACHING_RCD;
2218 else
2219 pages->caching_params.flags |= CACHING_RCD;
2220
2221 if (bits & DKCACHE_WRITE)
2222 pages->caching_params.flags |= CACHING_WCE;
2223 else
2224 pages->caching_params.flags &= ~CACHING_WCE;
2225
2226 if (oflags == pages->caching_params.flags)
2227 return (0);
2228
2229 pages->caching_params.pg_code &= PGCODE_MASK;
2230
2231 if (bits & DKCACHE_SAVE)
2232 byte2 |= SMS_SP;
2233
2234 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2235 sizeof(struct scsi_mode_page_header) +
2236 pages->caching_params.pg_length, 0, big));
2237 }
2238
2239 static void
2240 sd_set_properties(struct sd_softc *sd)
2241 {
2242 prop_dictionary_t disk_info, odisk_info, geom;
2243
2244 disk_info = prop_dictionary_create();
2245
2246 geom = prop_dictionary_create();
2247
2248 prop_dictionary_set_uint64(geom, "sectors-per-unit",
2249 sd->params.disksize);
2250
2251 prop_dictionary_set_uint32(geom, "sector-size",
2252 sd->params.blksize);
2253
2254 prop_dictionary_set_uint16(geom, "sectors-per-track",
2255 sd->params.sectors);
2256
2257 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
2258 sd->params.heads);
2259
2260 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
2261 sd->params.cyls);
2262
2263 prop_dictionary_set(disk_info, "geometry", geom);
2264 prop_object_release(geom);
2265
2266 prop_dictionary_set(device_properties(&sd->sc_dev),
2267 "disk-info", disk_info);
2268
2269 /*
2270 * Don't release disk_info here; we keep a reference to it.
2271 * disk_detach() will release it when we go away.
2272 */
2273
2274 odisk_info = sd->sc_dk.dk_info;
2275 sd->sc_dk.dk_info = disk_info;
2276 if (odisk_info)
2277 prop_object_release(odisk_info);
2278 }
Cache object: 1c611cea14dfb0734005ca28cd2c4574
|