FreeBSD/Linux Kernel Cross Reference
sys/cam/scsi/scsi_da.c
1 /*-
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/11.2/sys/cam/scsi/scsi_da.c 332461 2018-04-13 00:29:42Z mav $");
31
32 #include <sys/param.h>
33
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/endian.h>
48 #include <sys/proc.h>
49 #include <sys/sbuf.h>
50 #include <geom/geom.h>
51 #include <geom/geom_disk.h>
52 #endif /* _KERNEL */
53
54 #ifndef _KERNEL
55 #include <stdio.h>
56 #include <string.h>
57 #endif /* _KERNEL */
58
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_periph.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_iosched.h>
65
66 #include <cam/scsi/scsi_message.h>
67 #include <cam/scsi/scsi_da.h>
68
69 #ifdef _KERNEL
70 /*
71 * Note that there are probe ordering dependencies here. The order isn't
72 * controlled by this enumeration, but by explicit state transitions in
73 * dastart() and dadone(). Here are some of the dependencies:
74 *
75 * 1. RC should come first, before RC16, unless there is evidence that RC16
76 * is supported.
77 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
78 * 3. The ATA probes should go in this order:
79 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
80 */
81 typedef enum {
82 DA_STATE_PROBE_WP,
83 DA_STATE_PROBE_RC,
84 DA_STATE_PROBE_RC16,
85 DA_STATE_PROBE_LBP,
86 DA_STATE_PROBE_BLK_LIMITS,
87 DA_STATE_PROBE_BDC,
88 DA_STATE_PROBE_ATA,
89 DA_STATE_PROBE_ATA_LOGDIR,
90 DA_STATE_PROBE_ATA_IDDIR,
91 DA_STATE_PROBE_ATA_SUP,
92 DA_STATE_PROBE_ATA_ZONE,
93 DA_STATE_PROBE_ZONE,
94 DA_STATE_NORMAL
95 } da_state;
96
97 typedef enum {
98 DA_FLAG_PACK_INVALID = 0x000001,
99 DA_FLAG_NEW_PACK = 0x000002,
100 DA_FLAG_PACK_LOCKED = 0x000004,
101 DA_FLAG_PACK_REMOVABLE = 0x000008,
102 DA_FLAG_NEED_OTAG = 0x000020,
103 DA_FLAG_WAS_OTAG = 0x000040,
104 DA_FLAG_RETRY_UA = 0x000080,
105 DA_FLAG_OPEN = 0x000100,
106 DA_FLAG_SCTX_INIT = 0x000200,
107 DA_FLAG_CAN_RC16 = 0x000400,
108 DA_FLAG_PROBED = 0x000800,
109 DA_FLAG_DIRTY = 0x001000,
110 DA_FLAG_ANNOUNCED = 0x002000,
111 DA_FLAG_CAN_ATA_DMA = 0x004000,
112 DA_FLAG_CAN_ATA_LOG = 0x008000,
113 DA_FLAG_CAN_ATA_IDLOG = 0x010000,
114 DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
115 DA_FLAG_CAN_ATA_ZONE = 0x040000
116 } da_flags;
117
118 typedef enum {
119 DA_Q_NONE = 0x00,
120 DA_Q_NO_SYNC_CACHE = 0x01,
121 DA_Q_NO_6_BYTE = 0x02,
122 DA_Q_NO_PREVENT = 0x04,
123 DA_Q_4K = 0x08,
124 DA_Q_NO_RC16 = 0x10,
125 DA_Q_NO_UNMAP = 0x20,
126 DA_Q_RETRY_BUSY = 0x40,
127 DA_Q_SMR_DM = 0x80,
128 DA_Q_STRICT_UNMAP = 0x100
129 } da_quirks;
130
131 #define DA_Q_BIT_STRING \
132 "\020" \
133 "\001NO_SYNC_CACHE" \
134 "\002NO_6_BYTE" \
135 "\003NO_PREVENT" \
136 "\0044K" \
137 "\005NO_RC16" \
138 "\006NO_UNMAP" \
139 "\007RETRY_BUSY" \
140 "\010SMR_DM" \
141 "\011STRICT_UNMAP"
142
143 typedef enum {
144 DA_CCB_PROBE_RC = 0x01,
145 DA_CCB_PROBE_RC16 = 0x02,
146 DA_CCB_PROBE_LBP = 0x03,
147 DA_CCB_PROBE_BLK_LIMITS = 0x04,
148 DA_CCB_PROBE_BDC = 0x05,
149 DA_CCB_PROBE_ATA = 0x06,
150 DA_CCB_BUFFER_IO = 0x07,
151 DA_CCB_DUMP = 0x0A,
152 DA_CCB_DELETE = 0x0B,
153 DA_CCB_TUR = 0x0C,
154 DA_CCB_PROBE_ZONE = 0x0D,
155 DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
156 DA_CCB_PROBE_ATA_IDDIR = 0x0F,
157 DA_CCB_PROBE_ATA_SUP = 0x10,
158 DA_CCB_PROBE_ATA_ZONE = 0x11,
159 DA_CCB_PROBE_WP = 0x12,
160 DA_CCB_TYPE_MASK = 0x1F,
161 DA_CCB_RETRY_UA = 0x20
162 } da_ccb_state;
163
164 /*
165 * Order here is important for method choice
166 *
167 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
168 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
169 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
170 * import taking 5mins.
171 *
172 */
173 typedef enum {
174 DA_DELETE_NONE,
175 DA_DELETE_DISABLE,
176 DA_DELETE_ATA_TRIM,
177 DA_DELETE_UNMAP,
178 DA_DELETE_WS16,
179 DA_DELETE_WS10,
180 DA_DELETE_ZERO,
181 DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
182 DA_DELETE_MAX = DA_DELETE_ZERO
183 } da_delete_methods;
184
185 /*
186 * For SCSI, host managed drives show up as a separate device type. For
187 * ATA, host managed drives also have a different device signature.
188 * XXX KDM figure out the ATA host managed signature.
189 */
190 typedef enum {
191 DA_ZONE_NONE = 0x00,
192 DA_ZONE_DRIVE_MANAGED = 0x01,
193 DA_ZONE_HOST_AWARE = 0x02,
194 DA_ZONE_HOST_MANAGED = 0x03
195 } da_zone_mode;
196
197 /*
198 * We distinguish between these interface cases in addition to the drive type:
199 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
200 * o ATA drive behind a SCSI translation layer that does not know about
201 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
202 * case, we would need to share the ATA code with the ada(4) driver.
203 * o SCSI drive.
204 */
205 typedef enum {
206 DA_ZONE_IF_SCSI,
207 DA_ZONE_IF_ATA_PASS,
208 DA_ZONE_IF_ATA_SAT,
209 } da_zone_interface;
210
211 typedef enum {
212 DA_ZONE_FLAG_RZ_SUP = 0x0001,
213 DA_ZONE_FLAG_OPEN_SUP = 0x0002,
214 DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
215 DA_ZONE_FLAG_FINISH_SUP = 0x0008,
216 DA_ZONE_FLAG_RWP_SUP = 0x0010,
217 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
218 DA_ZONE_FLAG_OPEN_SUP |
219 DA_ZONE_FLAG_CLOSE_SUP |
220 DA_ZONE_FLAG_FINISH_SUP |
221 DA_ZONE_FLAG_RWP_SUP),
222 DA_ZONE_FLAG_URSWRZ = 0x0020,
223 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
224 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
225 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
226 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
227 DA_ZONE_FLAG_OPT_NONSEQ_SET |
228 DA_ZONE_FLAG_MAX_SEQ_SET)
229 } da_zone_flags;
230
231 static struct da_zone_desc {
232 da_zone_flags value;
233 const char *desc;
234 } da_zone_desc_table[] = {
235 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
236 {DA_ZONE_FLAG_OPEN_SUP, "Open" },
237 {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
238 {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
239 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
240 };
241
242 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
243 struct bio *bp);
244 static da_delete_func_t da_delete_trim;
245 static da_delete_func_t da_delete_unmap;
246 static da_delete_func_t da_delete_ws;
247
248 static const void * da_delete_functions[] = {
249 NULL,
250 NULL,
251 da_delete_trim,
252 da_delete_unmap,
253 da_delete_ws,
254 da_delete_ws,
255 da_delete_ws
256 };
257
258 static const char *da_delete_method_names[] =
259 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
260 static const char *da_delete_method_desc[] =
261 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
262 "WRITE SAME(10) with UNMAP", "ZERO" };
263
264 /* Offsets into our private area for storing information */
265 #define ccb_state ppriv_field0
266 #define ccb_bp ppriv_ptr1
267
268 struct disk_params {
269 u_int8_t heads;
270 u_int32_t cylinders;
271 u_int8_t secs_per_track;
272 u_int32_t secsize; /* Number of bytes/sector */
273 u_int64_t sectors; /* total number sectors */
274 u_int stripesize;
275 u_int stripeoffset;
276 };
277
278 #define UNMAP_RANGE_MAX 0xffffffff
279 #define UNMAP_HEAD_SIZE 8
280 #define UNMAP_RANGE_SIZE 16
281 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */
282 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
283 UNMAP_HEAD_SIZE)
284
285 #define WS10_MAX_BLKS 0xffff
286 #define WS16_MAX_BLKS 0xffffffff
287 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \
288 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
289
290 #define DA_WORK_TUR (1 << 16)
291
292 struct da_softc {
293 struct cam_iosched_softc *cam_iosched;
294 struct bio_queue_head delete_run_queue;
295 LIST_HEAD(, ccb_hdr) pending_ccbs;
296 int refcount; /* Active xpt_action() calls */
297 da_state state;
298 da_flags flags;
299 da_quirks quirks;
300 int minimum_cmd_size;
301 int error_inject;
302 int trim_max_ranges;
303 int delete_available; /* Delete methods possibly available */
304 da_zone_mode zone_mode;
305 da_zone_interface zone_interface;
306 da_zone_flags zone_flags;
307 struct ata_gp_log_dir ata_logdir;
308 int valid_logdir_len;
309 struct ata_identify_log_pages ata_iddir;
310 int valid_iddir_len;
311 uint64_t optimal_seq_zones;
312 uint64_t optimal_nonseq_zones;
313 uint64_t max_seq_zones;
314 u_int maxio;
315 uint32_t unmap_max_ranges;
316 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
317 uint32_t unmap_gran;
318 uint32_t unmap_gran_align;
319 uint64_t ws_max_blks;
320 da_delete_methods delete_method_pref;
321 da_delete_methods delete_method;
322 da_delete_func_t *delete_func;
323 int unmappedio;
324 int rotating;
325 struct disk_params params;
326 struct disk *disk;
327 union ccb saved_ccb;
328 struct task sysctl_task;
329 struct sysctl_ctx_list sysctl_ctx;
330 struct sysctl_oid *sysctl_tree;
331 struct callout sendordered_c;
332 uint64_t wwpn;
333 uint8_t unmap_buf[UNMAP_BUF_SIZE];
334 struct scsi_read_capacity_data_long rcaplong;
335 struct callout mediapoll_c;
336 #ifdef CAM_IO_STATS
337 struct sysctl_ctx_list sysctl_stats_ctx;
338 struct sysctl_oid *sysctl_stats_tree;
339 u_int errors;
340 u_int timeouts;
341 u_int invalidations;
342 #endif
343 };
344
345 #define dadeleteflag(softc, delete_method, enable) \
346 if (enable) { \
347 softc->delete_available |= (1 << delete_method); \
348 } else { \
349 softc->delete_available &= ~(1 << delete_method); \
350 }
351
352 struct da_quirk_entry {
353 struct scsi_inquiry_pattern inq_pat;
354 da_quirks quirks;
355 };
356
357 static const char quantum[] = "QUANTUM";
358 static const char microp[] = "MICROP";
359
360 static struct da_quirk_entry da_quirk_table[] =
361 {
362 /* SPI, FC devices */
363 {
364 /*
365 * Fujitsu M2513A MO drives.
366 * Tested devices: M2513A2 firmware versions 1200 & 1300.
367 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
368 * Reported by: W.Scholten <whs@xs4all.nl>
369 */
370 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
371 /*quirks*/ DA_Q_NO_SYNC_CACHE
372 },
373 {
374 /* See above. */
375 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
376 /*quirks*/ DA_Q_NO_SYNC_CACHE
377 },
378 {
379 /*
380 * This particular Fujitsu drive doesn't like the
381 * synchronize cache command.
382 * Reported by: Tom Jackson <toj@gorilla.net>
383 */
384 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
385 /*quirks*/ DA_Q_NO_SYNC_CACHE
386 },
387 {
388 /*
389 * This drive doesn't like the synchronize cache command
390 * either. Reported by: Matthew Jacob <mjacob@feral.com>
391 * in NetBSD PR kern/6027, August 24, 1998.
392 */
393 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
394 /*quirks*/ DA_Q_NO_SYNC_CACHE
395 },
396 {
397 /*
398 * This drive doesn't like the synchronize cache command
399 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
400 * (PR 8882).
401 */
402 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
403 /*quirks*/ DA_Q_NO_SYNC_CACHE
404 },
405 {
406 /*
407 * Doesn't like the synchronize cache command.
408 * Reported by: Blaz Zupan <blaz@gold.amis.net>
409 */
410 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
411 /*quirks*/ DA_Q_NO_SYNC_CACHE
412 },
413 {
414 /*
415 * Doesn't like the synchronize cache command.
416 * Reported by: Blaz Zupan <blaz@gold.amis.net>
417 */
418 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
419 /*quirks*/ DA_Q_NO_SYNC_CACHE
420 },
421 {
422 /*
423 * Doesn't like the synchronize cache command.
424 */
425 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
426 /*quirks*/ DA_Q_NO_SYNC_CACHE
427 },
428 {
429 /*
430 * Doesn't like the synchronize cache command.
431 * Reported by: walter@pelissero.de
432 */
433 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
434 /*quirks*/ DA_Q_NO_SYNC_CACHE
435 },
436 {
437 /*
438 * Doesn't work correctly with 6 byte reads/writes.
439 * Returns illegal request, and points to byte 9 of the
440 * 6-byte CDB.
441 * Reported by: Adam McDougall <bsdx@spawnet.com>
442 */
443 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
444 /*quirks*/ DA_Q_NO_6_BYTE
445 },
446 {
447 /* See above. */
448 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
449 /*quirks*/ DA_Q_NO_6_BYTE
450 },
451 {
452 /*
453 * Doesn't like the synchronize cache command.
454 * Reported by: walter@pelissero.de
455 */
456 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
457 /*quirks*/ DA_Q_NO_SYNC_CACHE
458 },
459 {
460 /*
461 * The CISS RAID controllers do not support SYNC_CACHE
462 */
463 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
464 /*quirks*/ DA_Q_NO_SYNC_CACHE
465 },
466 {
467 /*
468 * The STEC SSDs sometimes hang on UNMAP.
469 */
470 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
471 /*quirks*/ DA_Q_NO_UNMAP
472 },
473 {
474 /*
475 * VMware returns BUSY status when storage has transient
476 * connectivity problems, so better wait.
477 * Also VMware returns odd errors on misaligned UNMAPs.
478 */
479 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
480 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
481 },
482 /* USB mass storage devices supported by umass(4) */
483 {
484 /*
485 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
486 * PR: kern/51675
487 */
488 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
489 /*quirks*/ DA_Q_NO_SYNC_CACHE
490 },
491 {
492 /*
493 * Power Quotient Int. (PQI) USB flash key
494 * PR: kern/53067
495 */
496 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
497 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
498 },
499 {
500 /*
501 * Creative Nomad MUVO mp3 player (USB)
502 * PR: kern/53094
503 */
504 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
505 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
506 },
507 {
508 /*
509 * Jungsoft NEXDISK USB flash key
510 * PR: kern/54737
511 */
512 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
513 /*quirks*/ DA_Q_NO_SYNC_CACHE
514 },
515 {
516 /*
517 * FreeDik USB Mini Data Drive
518 * PR: kern/54786
519 */
520 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
521 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
522 },
523 {
524 /*
525 * Sigmatel USB Flash MP3 Player
526 * PR: kern/57046
527 */
528 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
529 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
530 },
531 {
532 /*
533 * Neuros USB Digital Audio Computer
534 * PR: kern/63645
535 */
536 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
537 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
538 },
539 {
540 /*
541 * SEAGRAND NP-900 MP3 Player
542 * PR: kern/64563
543 */
544 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
545 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
546 },
547 {
548 /*
549 * iRiver iFP MP3 player (with UMS Firmware)
550 * PR: kern/54881, i386/63941, kern/66124
551 */
552 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
553 /*quirks*/ DA_Q_NO_SYNC_CACHE
554 },
555 {
556 /*
557 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
558 * PR: kern/70158
559 */
560 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
561 /*quirks*/ DA_Q_NO_SYNC_CACHE
562 },
563 {
564 /*
565 * ZICPlay USB MP3 Player with FM
566 * PR: kern/75057
567 */
568 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
569 /*quirks*/ DA_Q_NO_SYNC_CACHE
570 },
571 {
572 /*
573 * TEAC USB floppy mechanisms
574 */
575 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
576 /*quirks*/ DA_Q_NO_SYNC_CACHE
577 },
578 {
579 /*
580 * Kingston DataTraveler II+ USB Pen-Drive.
581 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
582 */
583 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
584 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
585 },
586 {
587 /*
588 * USB DISK Pro PMAP
589 * Reported by: jhs
590 * PR: usb/96381
591 */
592 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
593 /*quirks*/ DA_Q_NO_SYNC_CACHE
594 },
595 {
596 /*
597 * Motorola E398 Mobile Phone (TransFlash memory card).
598 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
599 * PR: usb/89889
600 */
601 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
602 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
603 },
604 {
605 /*
606 * Qware BeatZkey! Pro
607 * PR: usb/79164
608 */
609 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
610 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
611 },
612 {
613 /*
614 * Time DPA20B 1GB MP3 Player
615 * PR: usb/81846
616 */
617 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
618 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
619 },
620 {
621 /*
622 * Samsung USB key 128Mb
623 * PR: usb/90081
624 */
625 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
626 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
627 },
628 {
629 /*
630 * Kingston DataTraveler 2.0 USB Flash memory.
631 * PR: usb/89196
632 */
633 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
634 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
635 },
636 {
637 /*
638 * Creative MUVO Slim mp3 player (USB)
639 * PR: usb/86131
640 */
641 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
642 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
643 },
644 {
645 /*
646 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
647 * PR: usb/80487
648 */
649 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
650 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
651 },
652 {
653 /*
654 * SanDisk Micro Cruzer 128MB
655 * PR: usb/75970
656 */
657 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
658 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
659 },
660 {
661 /*
662 * TOSHIBA TransMemory USB sticks
663 * PR: kern/94660
664 */
665 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
666 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
667 },
668 {
669 /*
670 * PNY USB 3.0 Flash Drives
671 */
672 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
673 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
674 },
675 {
676 /*
677 * PNY USB Flash keys
678 * PR: usb/75578, usb/72344, usb/65436
679 */
680 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
681 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
682 },
683 {
684 /*
685 * Genesys GL3224
686 */
687 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
688 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
689 },
690 {
691 /*
692 * Genesys 6-in-1 Card Reader
693 * PR: usb/94647
694 */
695 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
696 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
697 },
698 {
699 /*
700 * Rekam Digital CAMERA
701 * PR: usb/98713
702 */
703 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
704 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
705 },
706 {
707 /*
708 * iRiver H10 MP3 player
709 * PR: usb/102547
710 */
711 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
712 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
713 },
714 {
715 /*
716 * iRiver U10 MP3 player
717 * PR: usb/92306
718 */
719 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
720 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
721 },
722 {
723 /*
724 * X-Micro Flash Disk
725 * PR: usb/96901
726 */
727 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
728 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
729 },
730 {
731 /*
732 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
733 * PR: usb/96546
734 */
735 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
736 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
737 },
738 {
739 /*
740 * Denver MP3 player
741 * PR: usb/107101
742 */
743 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
744 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
745 },
746 {
747 /*
748 * Philips USB Key Audio KEY013
749 * PR: usb/68412
750 */
751 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
752 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
753 },
754 {
755 /*
756 * JNC MP3 Player
757 * PR: usb/94439
758 */
759 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
760 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
761 },
762 {
763 /*
764 * SAMSUNG MP0402H
765 * PR: usb/108427
766 */
767 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
768 /*quirks*/ DA_Q_NO_SYNC_CACHE
769 },
770 {
771 /*
772 * I/O Magic USB flash - Giga Bank
773 * PR: usb/108810
774 */
775 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
776 /*quirks*/ DA_Q_NO_SYNC_CACHE
777 },
778 {
779 /*
780 * JoyFly 128mb USB Flash Drive
781 * PR: 96133
782 */
783 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
784 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
785 },
786 {
787 /*
788 * ChipsBnk usb stick
789 * PR: 103702
790 */
791 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
792 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
793 },
794 {
795 /*
796 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
797 * PR: 129858
798 */
799 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
800 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
801 },
802 {
803 /*
804 * Samsung YP-U3 mp3-player
805 * PR: 125398
806 */
807 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
808 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
809 },
810 {
811 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
812 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
813 },
814 {
815 /*
816 * Sony Cyber-Shot DSC cameras
817 * PR: usb/137035
818 */
819 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
820 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
821 },
822 {
823 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
824 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
825 },
826 {
827 /* At least several Transcent USB sticks lie on RC16. */
828 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
829 "*"}, /*quirks*/ DA_Q_NO_RC16
830 },
831 {
832 /*
833 * I-O Data USB Flash Disk
834 * PR: usb/211716
835 */
836 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
837 "*"}, /*quirks*/ DA_Q_NO_RC16
838 },
839 /* ATA/SATA devices over SAS/USB/... */
840 {
841 /* Hitachi Advanced Format (4k) drives */
842 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
843 /*quirks*/DA_Q_4K
844 },
845 {
846 /* Micron Advanced Format (4k) drives */
847 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
848 /*quirks*/DA_Q_4K
849 },
850 {
851 /* Samsung Advanced Format (4k) drives */
852 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
853 /*quirks*/DA_Q_4K
854 },
855 {
856 /* Samsung Advanced Format (4k) drives */
857 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
858 /*quirks*/DA_Q_4K
859 },
860 {
861 /* Samsung Advanced Format (4k) drives */
862 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
863 /*quirks*/DA_Q_4K
864 },
865 {
866 /* Samsung Advanced Format (4k) drives */
867 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
868 /*quirks*/DA_Q_4K
869 },
870 {
871 /* Seagate Barracuda Green Advanced Format (4k) drives */
872 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
873 /*quirks*/DA_Q_4K
874 },
875 {
876 /* Seagate Barracuda Green Advanced Format (4k) drives */
877 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
878 /*quirks*/DA_Q_4K
879 },
880 {
881 /* Seagate Barracuda Green Advanced Format (4k) drives */
882 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
883 /*quirks*/DA_Q_4K
884 },
885 {
886 /* Seagate Barracuda Green Advanced Format (4k) drives */
887 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
888 /*quirks*/DA_Q_4K
889 },
890 {
891 /* Seagate Barracuda Green Advanced Format (4k) drives */
892 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
893 /*quirks*/DA_Q_4K
894 },
895 {
896 /* Seagate Barracuda Green Advanced Format (4k) drives */
897 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
898 /*quirks*/DA_Q_4K
899 },
900 {
901 /* Seagate Momentus Advanced Format (4k) drives */
902 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
903 /*quirks*/DA_Q_4K
904 },
905 {
906 /* Seagate Momentus Advanced Format (4k) drives */
907 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
908 /*quirks*/DA_Q_4K
909 },
910 {
911 /* Seagate Momentus Advanced Format (4k) drives */
912 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
913 /*quirks*/DA_Q_4K
914 },
915 {
916 /* Seagate Momentus Advanced Format (4k) drives */
917 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
918 /*quirks*/DA_Q_4K
919 },
920 {
921 /* Seagate Momentus Advanced Format (4k) drives */
922 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
923 /*quirks*/DA_Q_4K
924 },
925 {
926 /* Seagate Momentus Advanced Format (4k) drives */
927 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
928 /*quirks*/DA_Q_4K
929 },
930 {
931 /* Seagate Momentus Advanced Format (4k) drives */
932 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
933 /*quirks*/DA_Q_4K
934 },
935 {
936 /* Seagate Momentus Advanced Format (4k) drives */
937 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
938 /*quirks*/DA_Q_4K
939 },
940 {
941 /* Seagate Momentus Advanced Format (4k) drives */
942 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
943 /*quirks*/DA_Q_4K
944 },
945 {
946 /* Seagate Momentus Advanced Format (4k) drives */
947 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
948 /*quirks*/DA_Q_4K
949 },
950 {
951 /* Seagate Momentus Advanced Format (4k) drives */
952 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
953 /*quirks*/DA_Q_4K
954 },
955 {
956 /* Seagate Momentus Advanced Format (4k) drives */
957 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
958 /*quirks*/DA_Q_4K
959 },
960 {
961 /* Seagate Momentus Advanced Format (4k) drives */
962 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
963 /*quirks*/DA_Q_4K
964 },
965 {
966 /* Seagate Momentus Advanced Format (4k) drives */
967 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
968 /*quirks*/DA_Q_4K
969 },
970 {
971 /* Seagate Momentus Thin Advanced Format (4k) drives */
972 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
973 /*quirks*/DA_Q_4K
974 },
975 {
976 /* Seagate Momentus Thin Advanced Format (4k) drives */
977 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
978 /*quirks*/DA_Q_4K
979 },
980 {
981 /* WDC Caviar Green Advanced Format (4k) drives */
982 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
983 /*quirks*/DA_Q_4K
984 },
985 {
986 /* WDC Caviar Green Advanced Format (4k) drives */
987 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
988 /*quirks*/DA_Q_4K
989 },
990 {
991 /* WDC Caviar Green Advanced Format (4k) drives */
992 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
993 /*quirks*/DA_Q_4K
994 },
995 {
996 /* WDC Caviar Green Advanced Format (4k) drives */
997 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
998 /*quirks*/DA_Q_4K
999 },
1000 {
1001 /* WDC Caviar Green Advanced Format (4k) drives */
1002 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1003 /*quirks*/DA_Q_4K
1004 },
1005 {
1006 /* WDC Caviar Green Advanced Format (4k) drives */
1007 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1008 /*quirks*/DA_Q_4K
1009 },
1010 {
1011 /* WDC Caviar Green Advanced Format (4k) drives */
1012 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1013 /*quirks*/DA_Q_4K
1014 },
1015 {
1016 /* WDC Caviar Green Advanced Format (4k) drives */
1017 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1018 /*quirks*/DA_Q_4K
1019 },
1020 {
1021 /* WDC Scorpio Black Advanced Format (4k) drives */
1022 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1023 /*quirks*/DA_Q_4K
1024 },
1025 {
1026 /* WDC Scorpio Black Advanced Format (4k) drives */
1027 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1028 /*quirks*/DA_Q_4K
1029 },
1030 {
1031 /* WDC Scorpio Black Advanced Format (4k) drives */
1032 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1033 /*quirks*/DA_Q_4K
1034 },
1035 {
1036 /* WDC Scorpio Black Advanced Format (4k) drives */
1037 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1038 /*quirks*/DA_Q_4K
1039 },
1040 {
1041 /* WDC Scorpio Blue Advanced Format (4k) drives */
1042 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1043 /*quirks*/DA_Q_4K
1044 },
1045 {
1046 /* WDC Scorpio Blue Advanced Format (4k) drives */
1047 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1048 /*quirks*/DA_Q_4K
1049 },
1050 {
1051 /* WDC Scorpio Blue Advanced Format (4k) drives */
1052 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1053 /*quirks*/DA_Q_4K
1054 },
1055 {
1056 /* WDC Scorpio Blue Advanced Format (4k) drives */
1057 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1058 /*quirks*/DA_Q_4K
1059 },
1060 {
1061 /*
1062 * Olympus FE-210 camera
1063 */
1064 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1065 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1066 },
1067 {
1068 /*
1069 * LG UP3S MP3 player
1070 */
1071 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1072 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1073 },
1074 {
1075 /*
1076 * Laser MP3-2GA13 MP3 player
1077 */
1078 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1079 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1080 },
1081 {
1082 /*
1083 * LaCie external 250GB Hard drive des by Porsche
1084 * Submitted by: Ben Stuyts <ben@altesco.nl>
1085 * PR: 121474
1086 */
1087 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1088 /*quirks*/ DA_Q_NO_SYNC_CACHE
1089 },
1090 /* SATA SSDs */
1091 {
1092 /*
1093 * Corsair Force 2 SSDs
1094 * 4k optimised & trim only works in 4k requests + 4k aligned
1095 */
1096 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1097 /*quirks*/DA_Q_4K
1098 },
1099 {
1100 /*
1101 * Corsair Force 3 SSDs
1102 * 4k optimised & trim only works in 4k requests + 4k aligned
1103 */
1104 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1105 /*quirks*/DA_Q_4K
1106 },
1107 {
1108 /*
1109 * Corsair Neutron GTX SSDs
1110 * 4k optimised & trim only works in 4k requests + 4k aligned
1111 */
1112 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1113 /*quirks*/DA_Q_4K
1114 },
1115 {
1116 /*
1117 * Corsair Force GT & GS SSDs
1118 * 4k optimised & trim only works in 4k requests + 4k aligned
1119 */
1120 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1121 /*quirks*/DA_Q_4K
1122 },
1123 {
1124 /*
1125 * Crucial M4 SSDs
1126 * 4k optimised & trim only works in 4k requests + 4k aligned
1127 */
1128 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1129 /*quirks*/DA_Q_4K
1130 },
1131 {
1132 /*
1133 * Crucial RealSSD C300 SSDs
1134 * 4k optimised
1135 */
1136 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1137 "*" }, /*quirks*/DA_Q_4K
1138 },
1139 {
1140 /*
1141 * Intel 320 Series SSDs
1142 * 4k optimised & trim only works in 4k requests + 4k aligned
1143 */
1144 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1145 /*quirks*/DA_Q_4K
1146 },
1147 {
1148 /*
1149 * Intel 330 Series SSDs
1150 * 4k optimised & trim only works in 4k requests + 4k aligned
1151 */
1152 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1153 /*quirks*/DA_Q_4K
1154 },
1155 {
1156 /*
1157 * Intel 510 Series SSDs
1158 * 4k optimised & trim only works in 4k requests + 4k aligned
1159 */
1160 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1161 /*quirks*/DA_Q_4K
1162 },
1163 {
1164 /*
1165 * Intel 520 Series SSDs
1166 * 4k optimised & trim only works in 4k requests + 4k aligned
1167 */
1168 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1169 /*quirks*/DA_Q_4K
1170 },
1171 {
1172 /*
1173 * Intel S3610 Series SSDs
1174 * 4k optimised & trim only works in 4k requests + 4k aligned
1175 */
1176 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1177 /*quirks*/DA_Q_4K
1178 },
1179 {
1180 /*
1181 * Intel X25-M Series SSDs
1182 * 4k optimised & trim only works in 4k requests + 4k aligned
1183 */
1184 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1185 /*quirks*/DA_Q_4K
1186 },
1187 {
1188 /*
1189 * Kingston E100 Series SSDs
1190 * 4k optimised & trim only works in 4k requests + 4k aligned
1191 */
1192 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1193 /*quirks*/DA_Q_4K
1194 },
1195 {
1196 /*
1197 * Kingston HyperX 3k SSDs
1198 * 4k optimised & trim only works in 4k requests + 4k aligned
1199 */
1200 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1201 /*quirks*/DA_Q_4K
1202 },
1203 {
1204 /*
1205 * Marvell SSDs (entry taken from OpenSolaris)
1206 * 4k optimised & trim only works in 4k requests + 4k aligned
1207 */
1208 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1209 /*quirks*/DA_Q_4K
1210 },
1211 {
1212 /*
1213 * OCZ Agility 2 SSDs
1214 * 4k optimised & trim only works in 4k requests + 4k aligned
1215 */
1216 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1217 /*quirks*/DA_Q_4K
1218 },
1219 {
1220 /*
1221 * OCZ Agility 3 SSDs
1222 * 4k optimised & trim only works in 4k requests + 4k aligned
1223 */
1224 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1225 /*quirks*/DA_Q_4K
1226 },
1227 {
1228 /*
1229 * OCZ Deneva R Series SSDs
1230 * 4k optimised & trim only works in 4k requests + 4k aligned
1231 */
1232 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1233 /*quirks*/DA_Q_4K
1234 },
1235 {
1236 /*
1237 * OCZ Vertex 2 SSDs (inc pro series)
1238 * 4k optimised & trim only works in 4k requests + 4k aligned
1239 */
1240 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1241 /*quirks*/DA_Q_4K
1242 },
1243 {
1244 /*
1245 * OCZ Vertex 3 SSDs
1246 * 4k optimised & trim only works in 4k requests + 4k aligned
1247 */
1248 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1249 /*quirks*/DA_Q_4K
1250 },
1251 {
1252 /*
1253 * OCZ Vertex 4 SSDs
1254 * 4k optimised & trim only works in 4k requests + 4k aligned
1255 */
1256 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1257 /*quirks*/DA_Q_4K
1258 },
1259 {
1260 /*
1261 * Samsung 750 Series SSDs
1262 * 4k optimised & trim only works in 4k requests + 4k aligned
1263 */
1264 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1265 /*quirks*/DA_Q_4K
1266 },
1267 {
1268 /*
1269 * Samsung 830 Series SSDs
1270 * 4k optimised & trim only works in 4k requests + 4k aligned
1271 */
1272 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1273 /*quirks*/DA_Q_4K
1274 },
1275 {
1276 /*
1277 * Samsung 840 SSDs
1278 * 4k optimised & trim only works in 4k requests + 4k aligned
1279 */
1280 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1281 /*quirks*/DA_Q_4K
1282 },
1283 {
1284 /*
1285 * Samsung 845 SSDs
1286 * 4k optimised & trim only works in 4k requests + 4k aligned
1287 */
1288 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1289 /*quirks*/DA_Q_4K
1290 },
1291 {
1292 /*
1293 * Samsung 850 SSDs
1294 * 4k optimised & trim only works in 4k requests + 4k aligned
1295 */
1296 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1297 /*quirks*/DA_Q_4K
1298 },
1299 {
1300 /*
1301 * Samsung 843T Series SSDs (MZ7WD*)
1302 * Samsung PM851 Series SSDs (MZ7TE*)
1303 * Samsung PM853T Series SSDs (MZ7GE*)
1304 * Samsung SM863 Series SSDs (MZ7KM*)
1305 * 4k optimised
1306 */
1307 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1308 /*quirks*/DA_Q_4K
1309 },
1310 {
1311 /*
1312 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1313 * starting with MZ7* too
1314 */
1315 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1316 /*quirks*/DA_Q_4K
1317 },
1318 {
1319 /*
1320 * SuperTalent TeraDrive CT SSDs
1321 * 4k optimised & trim only works in 4k requests + 4k aligned
1322 */
1323 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1324 /*quirks*/DA_Q_4K
1325 },
1326 {
1327 /*
1328 * XceedIOPS SATA SSDs
1329 * 4k optimised
1330 */
1331 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1332 /*quirks*/DA_Q_4K
1333 },
1334 {
1335 /*
1336 * Hama Innostor USB-Stick
1337 */
1338 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1339 /*quirks*/DA_Q_NO_RC16
1340 },
1341 {
1342 /*
1343 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1344 * Drive Managed SATA hard drive. This drive doesn't report
1345 * in firmware that it is a drive managed SMR drive.
1346 */
1347 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1348 /*quirks*/DA_Q_SMR_DM
1349 },
1350 {
1351 /*
1352 * MX-ES USB Drive by Mach Xtreme
1353 */
1354 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1355 /*quirks*/DA_Q_NO_RC16
1356 },
1357 };
1358
1359 static disk_strategy_t dastrategy;
1360 static dumper_t dadump;
1361 static periph_init_t dainit;
1362 static void daasync(void *callback_arg, u_int32_t code,
1363 struct cam_path *path, void *arg);
1364 static void dasysctlinit(void *context, int pending);
1365 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1366 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1367 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1368 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1369 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1370 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1371 static void dadeletemethodset(struct da_softc *softc,
1372 da_delete_methods delete_method);
1373 static off_t dadeletemaxsize(struct da_softc *softc,
1374 da_delete_methods delete_method);
1375 static void dadeletemethodchoose(struct da_softc *softc,
1376 da_delete_methods default_method);
1377 static void daprobedone(struct cam_periph *periph, union ccb *ccb);
1378
1379 static periph_ctor_t daregister;
1380 static periph_dtor_t dacleanup;
1381 static periph_start_t dastart;
1382 static periph_oninv_t daoninvalidate;
1383 static void dazonedone(struct cam_periph *periph, union ccb *ccb);
1384 static void dadone(struct cam_periph *periph,
1385 union ccb *done_ccb);
1386 static int daerror(union ccb *ccb, u_int32_t cam_flags,
1387 u_int32_t sense_flags);
1388 static void daprevent(struct cam_periph *periph, int action);
1389 static void dareprobe(struct cam_periph *periph);
1390 static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
1391 uint64_t maxsector,
1392 struct scsi_read_capacity_data_long *rcaplong,
1393 size_t rcap_size);
1394 static timeout_t dasendorderedtag;
1395 static void dashutdown(void *arg, int howto);
1396 static timeout_t damediapoll;
1397
1398 #ifndef DA_DEFAULT_POLL_PERIOD
1399 #define DA_DEFAULT_POLL_PERIOD 3
1400 #endif
1401
1402 #ifndef DA_DEFAULT_TIMEOUT
1403 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
1404 #endif
1405
1406 #ifndef DA_DEFAULT_SOFTTIMEOUT
1407 #define DA_DEFAULT_SOFTTIMEOUT 0
1408 #endif
1409
1410 #ifndef DA_DEFAULT_RETRY
1411 #define DA_DEFAULT_RETRY 4
1412 #endif
1413
1414 #ifndef DA_DEFAULT_SEND_ORDERED
1415 #define DA_DEFAULT_SEND_ORDERED 1
1416 #endif
1417
1418 static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1419 static int da_retry_count = DA_DEFAULT_RETRY;
1420 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1421 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1422 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1423
1424 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1425 "CAM Direct Access Disk driver");
1426 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1427 &da_poll_period, 0, "Media polling period in seconds");
1428 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1429 &da_retry_count, 0, "Normal I/O retry count");
1430 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1431 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1432 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1433 &da_send_ordered, 0, "Send Ordered Tags");
1434
1435 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1436 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1437 "Soft I/O timeout (ms)");
1438 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1439
1440 /*
1441 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1442 * to the default timeout, we check to see whether an ordered
1443 * tagged transaction is appropriate to prevent simple tag
1444 * starvation. Since we'd like to ensure that there is at least
1445 * 1/2 of the timeout length left for a starved transaction to
1446 * complete after we've sent an ordered tag, we must poll at least
1447 * four times in every timeout period. This takes care of the worst
1448 * case where a starved transaction starts during an interval that
1449 * meets the requirement "don't send an ordered tag" test so it takes
1450 * us two intervals to determine that a tag must be sent.
1451 */
1452 #ifndef DA_ORDEREDTAG_INTERVAL
1453 #define DA_ORDEREDTAG_INTERVAL 4
1454 #endif
1455
1456 static struct periph_driver dadriver =
1457 {
1458 dainit, "da",
1459 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1460 };
1461
1462 PERIPHDRIVER_DECLARE(da, dadriver);
1463
1464 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1465
1466 static int
1467 daopen(struct disk *dp)
1468 {
1469 struct cam_periph *periph;
1470 struct da_softc *softc;
1471 int error;
1472
1473 periph = (struct cam_periph *)dp->d_drv1;
1474 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1475 return (ENXIO);
1476 }
1477
1478 cam_periph_lock(periph);
1479 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
1480 cam_periph_unlock(periph);
1481 cam_periph_release(periph);
1482 return (error);
1483 }
1484
1485 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1486 ("daopen\n"));
1487
1488 softc = (struct da_softc *)periph->softc;
1489 dareprobe(periph);
1490
1491 /* Wait for the disk size update. */
1492 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1493 "dareprobe", 0);
1494 if (error != 0)
1495 xpt_print(periph->path, "unable to retrieve capacity data\n");
1496
1497 if (periph->flags & CAM_PERIPH_INVALID)
1498 error = ENXIO;
1499
1500 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1501 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1502 daprevent(periph, PR_PREVENT);
1503
1504 if (error == 0) {
1505 softc->flags &= ~DA_FLAG_PACK_INVALID;
1506 softc->flags |= DA_FLAG_OPEN;
1507 }
1508
1509 cam_periph_unhold(periph);
1510 cam_periph_unlock(periph);
1511
1512 if (error != 0)
1513 cam_periph_release(periph);
1514
1515 return (error);
1516 }
1517
1518 static int
1519 daclose(struct disk *dp)
1520 {
1521 struct cam_periph *periph;
1522 struct da_softc *softc;
1523 union ccb *ccb;
1524 int error;
1525
1526 periph = (struct cam_periph *)dp->d_drv1;
1527 softc = (struct da_softc *)periph->softc;
1528 cam_periph_lock(periph);
1529 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1530 ("daclose\n"));
1531
1532 if (cam_periph_hold(periph, PRIBIO) == 0) {
1533
1534 /* Flush disk cache. */
1535 if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1536 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1537 (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1538 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1539 scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1540 /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
1541 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1542 5 * 60 * 1000);
1543 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1544 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1545 softc->disk->d_devstat);
1546 softc->flags &= ~DA_FLAG_DIRTY;
1547 xpt_release_ccb(ccb);
1548 }
1549
1550 /* Allow medium removal. */
1551 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1552 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1553 daprevent(periph, PR_ALLOW);
1554
1555 cam_periph_unhold(periph);
1556 }
1557
1558 /*
1559 * If we've got removeable media, mark the blocksize as
1560 * unavailable, since it could change when new media is
1561 * inserted.
1562 */
1563 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1564 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1565
1566 softc->flags &= ~DA_FLAG_OPEN;
1567 while (softc->refcount != 0)
1568 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1569 cam_periph_unlock(periph);
1570 cam_periph_release(periph);
1571 return (0);
1572 }
1573
1574 static void
1575 daschedule(struct cam_periph *periph)
1576 {
1577 struct da_softc *softc = (struct da_softc *)periph->softc;
1578
1579 if (softc->state != DA_STATE_NORMAL)
1580 return;
1581
1582 cam_iosched_schedule(softc->cam_iosched, periph);
1583 }
1584
1585 /*
1586 * Actually translate the requested transfer into one the physical driver
1587 * can understand. The transfer is described by a buf and will include
1588 * only one physical transfer.
1589 */
1590 static void
1591 dastrategy(struct bio *bp)
1592 {
1593 struct cam_periph *periph;
1594 struct da_softc *softc;
1595
1596 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1597 softc = (struct da_softc *)periph->softc;
1598
1599 cam_periph_lock(periph);
1600
1601 /*
1602 * If the device has been made invalid, error out
1603 */
1604 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1605 cam_periph_unlock(periph);
1606 biofinish(bp, NULL, ENXIO);
1607 return;
1608 }
1609
1610 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1611
1612 /*
1613 * Zone commands must be ordered, because they can depend on the
1614 * effects of previously issued commands, and they may affect
1615 * commands after them.
1616 */
1617 if (bp->bio_cmd == BIO_ZONE)
1618 bp->bio_flags |= BIO_ORDERED;
1619
1620 /*
1621 * Place it in the queue of disk activities for this disk
1622 */
1623 cam_iosched_queue_work(softc->cam_iosched, bp);
1624
1625 /*
1626 * Schedule ourselves for performing the work.
1627 */
1628 daschedule(periph);
1629 cam_periph_unlock(periph);
1630
1631 return;
1632 }
1633
1634 static int
1635 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1636 {
1637 struct cam_periph *periph;
1638 struct da_softc *softc;
1639 u_int secsize;
1640 struct ccb_scsiio csio;
1641 struct disk *dp;
1642 int error = 0;
1643
1644 dp = arg;
1645 periph = dp->d_drv1;
1646 softc = (struct da_softc *)periph->softc;
1647 cam_periph_lock(periph);
1648 secsize = softc->params.secsize;
1649
1650 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
1651 cam_periph_unlock(periph);
1652 return (ENXIO);
1653 }
1654
1655 if (length > 0) {
1656 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1657 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1658 scsi_read_write(&csio,
1659 /*retries*/0,
1660 dadone,
1661 MSG_ORDERED_Q_TAG,
1662 /*read*/SCSI_RW_WRITE,
1663 /*byte2*/0,
1664 /*minimum_cmd_size*/ softc->minimum_cmd_size,
1665 offset / secsize,
1666 length / secsize,
1667 /*data_ptr*/(u_int8_t *) virtual,
1668 /*dxfer_len*/length,
1669 /*sense_len*/SSD_FULL_SIZE,
1670 da_default_timeout * 1000);
1671 xpt_polled_action((union ccb *)&csio);
1672
1673 error = cam_periph_error((union ccb *)&csio,
1674 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1675 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1676 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1677 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1678 if (error != 0)
1679 printf("Aborting dump due to I/O error.\n");
1680 cam_periph_unlock(periph);
1681 return (error);
1682 }
1683
1684 /*
1685 * Sync the disk cache contents to the physical media.
1686 */
1687 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1688
1689 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1690 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1691 scsi_synchronize_cache(&csio,
1692 /*retries*/0,
1693 /*cbfcnp*/dadone,
1694 MSG_SIMPLE_Q_TAG,
1695 /*begin_lba*/0,/* Cover the whole disk */
1696 /*lb_count*/0,
1697 SSD_FULL_SIZE,
1698 5 * 1000);
1699 xpt_polled_action((union ccb *)&csio);
1700
1701 error = cam_periph_error((union ccb *)&csio,
1702 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL);
1703 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1704 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1705 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1706 if (error != 0)
1707 xpt_print(periph->path, "Synchronize cache failed\n");
1708 }
1709 cam_periph_unlock(periph);
1710 return (error);
1711 }
1712
1713 static int
1714 dagetattr(struct bio *bp)
1715 {
1716 int ret;
1717 struct cam_periph *periph;
1718
1719 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1720 cam_periph_lock(periph);
1721 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1722 periph->path);
1723 cam_periph_unlock(periph);
1724 if (ret == 0)
1725 bp->bio_completed = bp->bio_length;
1726 return ret;
1727 }
1728
1729 static void
1730 dainit(void)
1731 {
1732 cam_status status;
1733
1734 /*
1735 * Install a global async callback. This callback will
1736 * receive async callbacks like "new device found".
1737 */
1738 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1739
1740 if (status != CAM_REQ_CMP) {
1741 printf("da: Failed to attach master async callback "
1742 "due to status 0x%x!\n", status);
1743 } else if (da_send_ordered) {
1744
1745 /* Register our shutdown event handler */
1746 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1747 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1748 printf("dainit: shutdown event registration failed!\n");
1749 }
1750 }
1751
1752 /*
1753 * Callback from GEOM, called when it has finished cleaning up its
1754 * resources.
1755 */
1756 static void
1757 dadiskgonecb(struct disk *dp)
1758 {
1759 struct cam_periph *periph;
1760
1761 periph = (struct cam_periph *)dp->d_drv1;
1762 cam_periph_release(periph);
1763 }
1764
1765 static void
1766 daoninvalidate(struct cam_periph *periph)
1767 {
1768 struct da_softc *softc;
1769
1770 softc = (struct da_softc *)periph->softc;
1771
1772 /*
1773 * De-register any async callbacks.
1774 */
1775 xpt_register_async(0, daasync, periph, periph->path);
1776
1777 softc->flags |= DA_FLAG_PACK_INVALID;
1778 #ifdef CAM_IO_STATS
1779 softc->invalidations++;
1780 #endif
1781
1782 /*
1783 * Return all queued I/O with ENXIO.
1784 * XXX Handle any transactions queued to the card
1785 * with XPT_ABORT_CCB.
1786 */
1787 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1788
1789 /*
1790 * Tell GEOM that we've gone away, we'll get a callback when it is
1791 * done cleaning up its resources.
1792 */
1793 disk_gone(softc->disk);
1794 }
1795
1796 static void
1797 dacleanup(struct cam_periph *periph)
1798 {
1799 struct da_softc *softc;
1800
1801 softc = (struct da_softc *)periph->softc;
1802
1803 cam_periph_unlock(periph);
1804
1805 cam_iosched_fini(softc->cam_iosched);
1806
1807 /*
1808 * If we can't free the sysctl tree, oh well...
1809 */
1810 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
1811 #ifdef CAM_IO_STATS
1812 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1813 xpt_print(periph->path,
1814 "can't remove sysctl stats context\n");
1815 #endif
1816 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1817 xpt_print(periph->path,
1818 "can't remove sysctl context\n");
1819 }
1820
1821 callout_drain(&softc->mediapoll_c);
1822 disk_destroy(softc->disk);
1823 callout_drain(&softc->sendordered_c);
1824 free(softc, M_DEVBUF);
1825 cam_periph_lock(periph);
1826 }
1827
1828 static void
1829 daasync(void *callback_arg, u_int32_t code,
1830 struct cam_path *path, void *arg)
1831 {
1832 struct cam_periph *periph;
1833 struct da_softc *softc;
1834
1835 periph = (struct cam_periph *)callback_arg;
1836 switch (code) {
1837 case AC_FOUND_DEVICE:
1838 {
1839 struct ccb_getdev *cgd;
1840 cam_status status;
1841
1842 cgd = (struct ccb_getdev *)arg;
1843 if (cgd == NULL)
1844 break;
1845
1846 if (cgd->protocol != PROTO_SCSI)
1847 break;
1848 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
1849 break;
1850 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
1851 && SID_TYPE(&cgd->inq_data) != T_RBC
1852 && SID_TYPE(&cgd->inq_data) != T_OPTICAL
1853 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
1854 break;
1855
1856 /*
1857 * Allocate a peripheral instance for
1858 * this device and start the probe
1859 * process.
1860 */
1861 status = cam_periph_alloc(daregister, daoninvalidate,
1862 dacleanup, dastart,
1863 "da", CAM_PERIPH_BIO,
1864 path, daasync,
1865 AC_FOUND_DEVICE, cgd);
1866
1867 if (status != CAM_REQ_CMP
1868 && status != CAM_REQ_INPROG)
1869 printf("daasync: Unable to attach to new device "
1870 "due to status 0x%x\n", status);
1871 return;
1872 }
1873 case AC_ADVINFO_CHANGED:
1874 {
1875 uintptr_t buftype;
1876
1877 buftype = (uintptr_t)arg;
1878 if (buftype == CDAI_TYPE_PHYS_PATH) {
1879 struct da_softc *softc;
1880
1881 softc = periph->softc;
1882 disk_attr_changed(softc->disk, "GEOM::physpath",
1883 M_NOWAIT);
1884 }
1885 break;
1886 }
1887 case AC_UNIT_ATTENTION:
1888 {
1889 union ccb *ccb;
1890 int error_code, sense_key, asc, ascq;
1891
1892 softc = (struct da_softc *)periph->softc;
1893 ccb = (union ccb *)arg;
1894
1895 /*
1896 * Handle all UNIT ATTENTIONs except our own,
1897 * as they will be handled by daerror().
1898 */
1899 if (xpt_path_periph(ccb->ccb_h.path) != periph &&
1900 scsi_extract_sense_ccb(ccb,
1901 &error_code, &sense_key, &asc, &ascq)) {
1902 if (asc == 0x2A && ascq == 0x09) {
1903 xpt_print(ccb->ccb_h.path,
1904 "Capacity data has changed\n");
1905 softc->flags &= ~DA_FLAG_PROBED;
1906 dareprobe(periph);
1907 } else if (asc == 0x28 && ascq == 0x00) {
1908 softc->flags &= ~DA_FLAG_PROBED;
1909 disk_media_changed(softc->disk, M_NOWAIT);
1910 } else if (asc == 0x3F && ascq == 0x03) {
1911 xpt_print(ccb->ccb_h.path,
1912 "INQUIRY data has changed\n");
1913 softc->flags &= ~DA_FLAG_PROBED;
1914 dareprobe(periph);
1915 }
1916 }
1917 cam_periph_async(periph, code, path, arg);
1918 break;
1919 }
1920 case AC_SCSI_AEN:
1921 softc = (struct da_softc *)periph->softc;
1922 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
1923 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
1924 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
1925 daschedule(periph);
1926 }
1927 }
1928 /* FALLTHROUGH */
1929 case AC_SENT_BDR:
1930 case AC_BUS_RESET:
1931 {
1932 struct ccb_hdr *ccbh;
1933
1934 softc = (struct da_softc *)periph->softc;
1935 /*
1936 * Don't fail on the expected unit attention
1937 * that will occur.
1938 */
1939 softc->flags |= DA_FLAG_RETRY_UA;
1940 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1941 ccbh->ccb_state |= DA_CCB_RETRY_UA;
1942 break;
1943 }
1944 case AC_INQ_CHANGED:
1945 softc = (struct da_softc *)periph->softc;
1946 softc->flags &= ~DA_FLAG_PROBED;
1947 dareprobe(periph);
1948 break;
1949 default:
1950 break;
1951 }
1952 cam_periph_async(periph, code, path, arg);
1953 }
1954
1955 static void
1956 dasysctlinit(void *context, int pending)
1957 {
1958 struct cam_periph *periph;
1959 struct da_softc *softc;
1960 char tmpstr[32], tmpstr2[16];
1961 struct ccb_trans_settings cts;
1962
1963 periph = (struct cam_periph *)context;
1964 /*
1965 * periph was held for us when this task was enqueued
1966 */
1967 if (periph->flags & CAM_PERIPH_INVALID) {
1968 cam_periph_release(periph);
1969 return;
1970 }
1971
1972 softc = (struct da_softc *)periph->softc;
1973 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1974 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1975
1976 sysctl_ctx_init(&softc->sysctl_ctx);
1977 softc->flags |= DA_FLAG_SCTX_INIT;
1978 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1979 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1980 CTLFLAG_RD, 0, tmpstr);
1981 if (softc->sysctl_tree == NULL) {
1982 printf("dasysctlinit: unable to allocate sysctl tree\n");
1983 cam_periph_release(periph);
1984 return;
1985 }
1986
1987 /*
1988 * Now register the sysctl handler, so the user can change the value on
1989 * the fly.
1990 */
1991 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1992 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
1993 softc, 0, dadeletemethodsysctl, "A",
1994 "BIO_DELETE execution method");
1995 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1996 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
1997 softc, 0, dadeletemaxsysctl, "Q",
1998 "Maximum BIO_DELETE size");
1999 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2000 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
2001 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2002 "Minimum CDB size");
2003
2004 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2005 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
2006 softc, 0, dazonemodesysctl, "A",
2007 "Zone Mode");
2008 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2009 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
2010 softc, 0, dazonesupsysctl, "A",
2011 "Zone Support");
2012 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2013 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2014 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2015 "Optimal Number of Open Sequential Write Preferred Zones");
2016 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2017 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2018 "optimal_nonseq_zones", CTLFLAG_RD,
2019 &softc->optimal_nonseq_zones,
2020 "Optimal Number of Non-Sequentially Written Sequential Write "
2021 "Preferred Zones");
2022 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2023 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2024 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2025 "Maximum Number of Open Sequential Write Required Zones");
2026
2027 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2028 SYSCTL_CHILDREN(softc->sysctl_tree),
2029 OID_AUTO,
2030 "error_inject",
2031 CTLFLAG_RW,
2032 &softc->error_inject,
2033 0,
2034 "error_inject leaf");
2035
2036 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2037 SYSCTL_CHILDREN(softc->sysctl_tree),
2038 OID_AUTO,
2039 "unmapped_io",
2040 CTLFLAG_RD,
2041 &softc->unmappedio,
2042 0,
2043 "Unmapped I/O leaf");
2044
2045 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2046 SYSCTL_CHILDREN(softc->sysctl_tree),
2047 OID_AUTO,
2048 "rotating",
2049 CTLFLAG_RD,
2050 &softc->rotating,
2051 0,
2052 "Rotating media");
2053
2054 /*
2055 * Add some addressing info.
2056 */
2057 memset(&cts, 0, sizeof (cts));
2058 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2059 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2060 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2061 cam_periph_lock(periph);
2062 xpt_action((union ccb *)&cts);
2063 cam_periph_unlock(periph);
2064 if (cts.ccb_h.status != CAM_REQ_CMP) {
2065 cam_periph_release(periph);
2066 return;
2067 }
2068 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2069 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2070 if (fc->valid & CTS_FC_VALID_WWPN) {
2071 softc->wwpn = fc->wwpn;
2072 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2073 SYSCTL_CHILDREN(softc->sysctl_tree),
2074 OID_AUTO, "wwpn", CTLFLAG_RD,
2075 &softc->wwpn, "World Wide Port Name");
2076 }
2077 }
2078
2079 #ifdef CAM_IO_STATS
2080 /*
2081 * Now add some useful stats.
2082 * XXX These should live in cam_periph and be common to all periphs
2083 */
2084 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2085 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2086 CTLFLAG_RD, 0, "Statistics");
2087 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2088 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2089 OID_AUTO,
2090 "errors",
2091 CTLFLAG_RD,
2092 &softc->errors,
2093 0,
2094 "Transport errors reported by the SIM");
2095 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2096 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2097 OID_AUTO,
2098 "timeouts",
2099 CTLFLAG_RD,
2100 &softc->timeouts,
2101 0,
2102 "Device timeouts reported by the SIM");
2103 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2104 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2105 OID_AUTO,
2106 "pack_invalidations",
2107 CTLFLAG_RD,
2108 &softc->invalidations,
2109 0,
2110 "Device pack invalidations");
2111 #endif
2112
2113 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2114 softc->sysctl_tree);
2115
2116 cam_periph_release(periph);
2117 }
2118
2119 static int
2120 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2121 {
2122 int error;
2123 uint64_t value;
2124 struct da_softc *softc;
2125
2126 softc = (struct da_softc *)arg1;
2127
2128 value = softc->disk->d_delmaxsize;
2129 error = sysctl_handle_64(oidp, &value, 0, req);
2130 if ((error != 0) || (req->newptr == NULL))
2131 return (error);
2132
2133 /* only accept values smaller than the calculated value */
2134 if (value > dadeletemaxsize(softc, softc->delete_method)) {
2135 return (EINVAL);
2136 }
2137 softc->disk->d_delmaxsize = value;
2138
2139 return (0);
2140 }
2141
2142 static int
2143 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2144 {
2145 int error, value;
2146
2147 value = *(int *)arg1;
2148
2149 error = sysctl_handle_int(oidp, &value, 0, req);
2150
2151 if ((error != 0)
2152 || (req->newptr == NULL))
2153 return (error);
2154
2155 /*
2156 * Acceptable values here are 6, 10, 12 or 16.
2157 */
2158 if (value < 6)
2159 value = 6;
2160 else if ((value > 6)
2161 && (value <= 10))
2162 value = 10;
2163 else if ((value > 10)
2164 && (value <= 12))
2165 value = 12;
2166 else if (value > 12)
2167 value = 16;
2168
2169 *(int *)arg1 = value;
2170
2171 return (0);
2172 }
2173
2174 static int
2175 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2176 {
2177 sbintime_t value;
2178 int error;
2179
2180 value = da_default_softtimeout / SBT_1MS;
2181
2182 error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2183 if ((error != 0) || (req->newptr == NULL))
2184 return (error);
2185
2186 /* XXX Should clip this to a reasonable level */
2187 if (value > da_default_timeout * 1000)
2188 return (EINVAL);
2189
2190 da_default_softtimeout = value * SBT_1MS;
2191 return (0);
2192 }
2193
2194 static void
2195 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2196 {
2197
2198 softc->delete_method = delete_method;
2199 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2200 softc->delete_func = da_delete_functions[delete_method];
2201
2202 if (softc->delete_method > DA_DELETE_DISABLE)
2203 softc->disk->d_flags |= DISKFLAG_CANDELETE;
2204 else
2205 softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2206 }
2207
2208 static off_t
2209 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2210 {
2211 off_t sectors;
2212
2213 switch(delete_method) {
2214 case DA_DELETE_UNMAP:
2215 sectors = (off_t)softc->unmap_max_lba;
2216 break;
2217 case DA_DELETE_ATA_TRIM:
2218 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2219 break;
2220 case DA_DELETE_WS16:
2221 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2222 break;
2223 case DA_DELETE_ZERO:
2224 case DA_DELETE_WS10:
2225 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2226 break;
2227 default:
2228 return 0;
2229 }
2230
2231 return (off_t)softc->params.secsize *
2232 omin(sectors, softc->params.sectors);
2233 }
2234
2235 static void
2236 daprobedone(struct cam_periph *periph, union ccb *ccb)
2237 {
2238 struct da_softc *softc;
2239
2240 softc = (struct da_softc *)periph->softc;
2241
2242 dadeletemethodchoose(softc, DA_DELETE_NONE);
2243
2244 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2245 char buf[80];
2246 int i, sep;
2247
2248 snprintf(buf, sizeof(buf), "Delete methods: <");
2249 sep = 0;
2250 for (i = 0; i <= DA_DELETE_MAX; i++) {
2251 if ((softc->delete_available & (1 << i)) == 0 &&
2252 i != softc->delete_method)
2253 continue;
2254 if (sep)
2255 strlcat(buf, ",", sizeof(buf));
2256 strlcat(buf, da_delete_method_names[i],
2257 sizeof(buf));
2258 if (i == softc->delete_method)
2259 strlcat(buf, "(*)", sizeof(buf));
2260 sep = 1;
2261 }
2262 strlcat(buf, ">", sizeof(buf));
2263 printf("%s%d: %s\n", periph->periph_name,
2264 periph->unit_number, buf);
2265 }
2266
2267 /*
2268 * Since our peripheral may be invalidated by an error
2269 * above or an external event, we must release our CCB
2270 * before releasing the probe lock on the peripheral.
2271 * The peripheral will only go away once the last lock
2272 * is removed, and we need it around for the CCB release
2273 * operation.
2274 */
2275 xpt_release_ccb(ccb);
2276 softc->state = DA_STATE_NORMAL;
2277 softc->flags |= DA_FLAG_PROBED;
2278 daschedule(periph);
2279 wakeup(&softc->disk->d_mediasize);
2280 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2281 softc->flags |= DA_FLAG_ANNOUNCED;
2282 cam_periph_unhold(periph);
2283 } else
2284 cam_periph_release_locked(periph);
2285 }
2286
2287 static void
2288 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2289 {
2290 int i, methods;
2291
2292 /* If available, prefer the method requested by user. */
2293 i = softc->delete_method_pref;
2294 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2295 if (methods & (1 << i)) {
2296 dadeletemethodset(softc, i);
2297 return;
2298 }
2299
2300 /* Use the pre-defined order to choose the best performing delete. */
2301 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2302 if (i == DA_DELETE_ZERO)
2303 continue;
2304 if (softc->delete_available & (1 << i)) {
2305 dadeletemethodset(softc, i);
2306 return;
2307 }
2308 }
2309
2310 /* Fallback to default. */
2311 dadeletemethodset(softc, default_method);
2312 }
2313
2314 static int
2315 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2316 {
2317 char buf[16];
2318 const char *p;
2319 struct da_softc *softc;
2320 int i, error, methods, value;
2321
2322 softc = (struct da_softc *)arg1;
2323
2324 value = softc->delete_method;
2325 if (value < 0 || value > DA_DELETE_MAX)
2326 p = "UNKNOWN";
2327 else
2328 p = da_delete_method_names[value];
2329 strncpy(buf, p, sizeof(buf));
2330 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2331 if (error != 0 || req->newptr == NULL)
2332 return (error);
2333 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2334 for (i = 0; i <= DA_DELETE_MAX; i++) {
2335 if (strcmp(buf, da_delete_method_names[i]) == 0)
2336 break;
2337 }
2338 if (i > DA_DELETE_MAX)
2339 return (EINVAL);
2340 softc->delete_method_pref = i;
2341 dadeletemethodchoose(softc, DA_DELETE_NONE);
2342 return (0);
2343 }
2344
2345 static int
2346 dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2347 {
2348 char tmpbuf[40];
2349 struct da_softc *softc;
2350 int error;
2351
2352 softc = (struct da_softc *)arg1;
2353
2354 switch (softc->zone_mode) {
2355 case DA_ZONE_DRIVE_MANAGED:
2356 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2357 break;
2358 case DA_ZONE_HOST_AWARE:
2359 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2360 break;
2361 case DA_ZONE_HOST_MANAGED:
2362 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2363 break;
2364 case DA_ZONE_NONE:
2365 default:
2366 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2367 break;
2368 }
2369
2370 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2371
2372 return (error);
2373 }
2374
2375 static int
2376 dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2377 {
2378 char tmpbuf[180];
2379 struct da_softc *softc;
2380 struct sbuf sb;
2381 int error, first;
2382 unsigned int i;
2383
2384 softc = (struct da_softc *)arg1;
2385
2386 error = 0;
2387 first = 1;
2388 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2389
2390 for (i = 0; i < sizeof(da_zone_desc_table) /
2391 sizeof(da_zone_desc_table[0]); i++) {
2392 if (softc->zone_flags & da_zone_desc_table[i].value) {
2393 if (first == 0)
2394 sbuf_printf(&sb, ", ");
2395 else
2396 first = 0;
2397 sbuf_cat(&sb, da_zone_desc_table[i].desc);
2398 }
2399 }
2400
2401 if (first == 1)
2402 sbuf_printf(&sb, "None");
2403
2404 sbuf_finish(&sb);
2405
2406 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2407
2408 return (error);
2409 }
2410
2411 static cam_status
2412 daregister(struct cam_periph *periph, void *arg)
2413 {
2414 struct da_softc *softc;
2415 struct ccb_pathinq cpi;
2416 struct ccb_getdev *cgd;
2417 char tmpstr[80];
2418 caddr_t match;
2419
2420 cgd = (struct ccb_getdev *)arg;
2421 if (cgd == NULL) {
2422 printf("daregister: no getdev CCB, can't register device\n");
2423 return(CAM_REQ_CMP_ERR);
2424 }
2425
2426 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2427 M_NOWAIT|M_ZERO);
2428
2429 if (softc == NULL) {
2430 printf("daregister: Unable to probe new device. "
2431 "Unable to allocate softc\n");
2432 return(CAM_REQ_CMP_ERR);
2433 }
2434
2435 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2436 printf("daregister: Unable to probe new device. "
2437 "Unable to allocate iosched memory\n");
2438 free(softc, M_DEVBUF);
2439 return(CAM_REQ_CMP_ERR);
2440 }
2441
2442 LIST_INIT(&softc->pending_ccbs);
2443 softc->state = DA_STATE_PROBE_WP;
2444 bioq_init(&softc->delete_run_queue);
2445 if (SID_IS_REMOVABLE(&cgd->inq_data))
2446 softc->flags |= DA_FLAG_PACK_REMOVABLE;
2447 softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2448 softc->unmap_max_lba = UNMAP_RANGE_MAX;
2449 softc->unmap_gran = 0;
2450 softc->unmap_gran_align = 0;
2451 softc->ws_max_blks = WS16_MAX_BLKS;
2452 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2453 softc->rotating = 1;
2454
2455 periph->softc = softc;
2456
2457 /*
2458 * See if this device has any quirks.
2459 */
2460 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2461 (caddr_t)da_quirk_table,
2462 nitems(da_quirk_table),
2463 sizeof(*da_quirk_table), scsi_inquiry_match);
2464
2465 if (match != NULL)
2466 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2467 else
2468 softc->quirks = DA_Q_NONE;
2469
2470 /* Check if the SIM does not want 6 byte commands */
2471 bzero(&cpi, sizeof(cpi));
2472 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
2473 cpi.ccb_h.func_code = XPT_PATH_INQ;
2474 xpt_action((union ccb *)&cpi);
2475 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2476 softc->quirks |= DA_Q_NO_6_BYTE;
2477
2478 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2479 softc->zone_mode = DA_ZONE_HOST_MANAGED;
2480 else if (softc->quirks & DA_Q_SMR_DM)
2481 softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2482 else
2483 softc->zone_mode = DA_ZONE_NONE;
2484
2485 if (softc->zone_mode != DA_ZONE_NONE) {
2486 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2487 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2488 softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2489 else
2490 softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2491 } else
2492 softc->zone_interface = DA_ZONE_IF_SCSI;
2493 }
2494
2495 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2496
2497 /*
2498 * Take an exclusive refcount on the periph while dastart is called
2499 * to finish the probe. The reference will be dropped in dadone at
2500 * the end of probe.
2501 */
2502 (void)cam_periph_hold(periph, PRIBIO);
2503
2504 /*
2505 * Schedule a periodic event to occasionally send an
2506 * ordered tag to a device.
2507 */
2508 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2509 callout_reset(&softc->sendordered_c,
2510 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2511 dasendorderedtag, softc);
2512
2513 cam_periph_unlock(periph);
2514 /*
2515 * RBC devices don't have to support READ(6), only READ(10).
2516 */
2517 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2518 softc->minimum_cmd_size = 10;
2519 else
2520 softc->minimum_cmd_size = 6;
2521
2522 /*
2523 * Load the user's default, if any.
2524 */
2525 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2526 periph->unit_number);
2527 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2528
2529 /*
2530 * 6, 10, 12 and 16 are the currently permissible values.
2531 */
2532 if (softc->minimum_cmd_size < 6)
2533 softc->minimum_cmd_size = 6;
2534 else if ((softc->minimum_cmd_size > 6)
2535 && (softc->minimum_cmd_size <= 10))
2536 softc->minimum_cmd_size = 10;
2537 else if ((softc->minimum_cmd_size > 10)
2538 && (softc->minimum_cmd_size <= 12))
2539 softc->minimum_cmd_size = 12;
2540 else if (softc->minimum_cmd_size > 12)
2541 softc->minimum_cmd_size = 16;
2542
2543 /* Predict whether device may support READ CAPACITY(16). */
2544 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2545 (softc->quirks & DA_Q_NO_RC16) == 0) {
2546 softc->flags |= DA_FLAG_CAN_RC16;
2547 }
2548
2549 /*
2550 * Register this media as a disk.
2551 */
2552 softc->disk = disk_alloc();
2553 softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2554 periph->unit_number, 0,
2555 DEVSTAT_BS_UNAVAILABLE,
2556 SID_TYPE(&cgd->inq_data) |
2557 XPORT_DEVSTAT_TYPE(cpi.transport),
2558 DEVSTAT_PRIORITY_DISK);
2559 softc->disk->d_open = daopen;
2560 softc->disk->d_close = daclose;
2561 softc->disk->d_strategy = dastrategy;
2562 softc->disk->d_dump = dadump;
2563 softc->disk->d_getattr = dagetattr;
2564 softc->disk->d_gone = dadiskgonecb;
2565 softc->disk->d_name = "da";
2566 softc->disk->d_drv1 = periph;
2567 if (cpi.maxio == 0)
2568 softc->maxio = DFLTPHYS; /* traditional default */
2569 else if (cpi.maxio > MAXPHYS)
2570 softc->maxio = MAXPHYS; /* for safety */
2571 else
2572 softc->maxio = cpi.maxio;
2573 softc->disk->d_maxsize = softc->maxio;
2574 softc->disk->d_unit = periph->unit_number;
2575 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2576 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2577 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2578 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2579 softc->unmappedio = 1;
2580 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2581 }
2582 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2583 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2584 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2585 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2586 cgd->inq_data.product, sizeof(cgd->inq_data.product),
2587 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2588 softc->disk->d_hba_vendor = cpi.hba_vendor;
2589 softc->disk->d_hba_device = cpi.hba_device;
2590 softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2591 softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2592
2593 /*
2594 * Acquire a reference to the periph before we register with GEOM.
2595 * We'll release this reference once GEOM calls us back (via
2596 * dadiskgonecb()) telling us that our provider has been freed.
2597 */
2598 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
2599 xpt_print(periph->path, "%s: lost periph during "
2600 "registration!\n", __func__);
2601 cam_periph_lock(periph);
2602 return (CAM_REQ_CMP_ERR);
2603 }
2604
2605 disk_create(softc->disk, DISK_VERSION);
2606 cam_periph_lock(periph);
2607
2608 /*
2609 * Add async callbacks for events of interest.
2610 * I don't bother checking if this fails as,
2611 * in most cases, the system will function just
2612 * fine without them and the only alternative
2613 * would be to not attach the device on failure.
2614 */
2615 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2616 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2617 AC_INQ_CHANGED, daasync, periph, periph->path);
2618
2619 /*
2620 * Emit an attribute changed notification just in case
2621 * physical path information arrived before our async
2622 * event handler was registered, but after anyone attaching
2623 * to our disk device polled it.
2624 */
2625 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2626
2627 /*
2628 * Schedule a periodic media polling events.
2629 */
2630 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2631 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2632 (cgd->inq_flags & SID_AEN) == 0 &&
2633 da_poll_period != 0)
2634 callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2635 damediapoll, periph);
2636
2637 xpt_schedule(periph, CAM_PRIORITY_DEV);
2638
2639 return(CAM_REQ_CMP);
2640 }
2641
2642 static int
2643 da_zone_bio_to_scsi(int disk_zone_cmd)
2644 {
2645 switch (disk_zone_cmd) {
2646 case DISK_ZONE_OPEN:
2647 return ZBC_OUT_SA_OPEN;
2648 case DISK_ZONE_CLOSE:
2649 return ZBC_OUT_SA_CLOSE;
2650 case DISK_ZONE_FINISH:
2651 return ZBC_OUT_SA_FINISH;
2652 case DISK_ZONE_RWP:
2653 return ZBC_OUT_SA_RWP;
2654 }
2655
2656 return -1;
2657 }
2658
2659 static int
2660 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2661 int *queue_ccb)
2662 {
2663 struct da_softc *softc;
2664 int error;
2665
2666 error = 0;
2667
2668 if (bp->bio_cmd != BIO_ZONE) {
2669 error = EINVAL;
2670 goto bailout;
2671 }
2672
2673 softc = periph->softc;
2674
2675 switch (bp->bio_zone.zone_cmd) {
2676 case DISK_ZONE_OPEN:
2677 case DISK_ZONE_CLOSE:
2678 case DISK_ZONE_FINISH:
2679 case DISK_ZONE_RWP: {
2680 int zone_flags;
2681 int zone_sa;
2682 uint64_t lba;
2683
2684 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2685 if (zone_sa == -1) {
2686 xpt_print(periph->path, "Cannot translate zone "
2687 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2688 error = EINVAL;
2689 goto bailout;
2690 }
2691
2692 zone_flags = 0;
2693 lba = bp->bio_zone.zone_params.rwp.id;
2694
2695 if (bp->bio_zone.zone_params.rwp.flags &
2696 DISK_ZONE_RWP_FLAG_ALL)
2697 zone_flags |= ZBC_OUT_ALL;
2698
2699 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2700 scsi_zbc_out(&ccb->csio,
2701 /*retries*/ da_retry_count,
2702 /*cbfcnp*/ dadone,
2703 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2704 /*service_action*/ zone_sa,
2705 /*zone_id*/ lba,
2706 /*zone_flags*/ zone_flags,
2707 /*data_ptr*/ NULL,
2708 /*dxfer_len*/ 0,
2709 /*sense_len*/ SSD_FULL_SIZE,
2710 /*timeout*/ da_default_timeout * 1000);
2711 } else {
2712 /*
2713 * Note that in this case, even though we can
2714 * technically use NCQ, we don't bother for several
2715 * reasons:
2716 * 1. It hasn't been tested on a SAT layer that
2717 * supports it. This is new as of SAT-4.
2718 * 2. Even when there is a SAT layer that supports
2719 * it, that SAT layer will also probably support
2720 * ZBC -> ZAC translation, since they are both
2721 * in the SAT-4 spec.
2722 * 3. Translation will likely be preferable to ATA
2723 * passthrough. LSI / Avago at least single
2724 * steps ATA passthrough commands in the HBA,
2725 * regardless of protocol, so unless that
2726 * changes, there is a performance penalty for
2727 * doing ATA passthrough no matter whether
2728 * you're using NCQ/FPDMA, DMA or PIO.
2729 * 4. It requires a 32-byte CDB, which at least at
2730 * this point in CAM requires a CDB pointer, which
2731 * would require us to allocate an additional bit
2732 * of storage separate from the CCB.
2733 */
2734 error = scsi_ata_zac_mgmt_out(&ccb->csio,
2735 /*retries*/ da_retry_count,
2736 /*cbfcnp*/ dadone,
2737 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2738 /*use_ncq*/ 0,
2739 /*zm_action*/ zone_sa,
2740 /*zone_id*/ lba,
2741 /*zone_flags*/ zone_flags,
2742 /*data_ptr*/ NULL,
2743 /*dxfer_len*/ 0,
2744 /*cdb_storage*/ NULL,
2745 /*cdb_storage_len*/ 0,
2746 /*sense_len*/ SSD_FULL_SIZE,
2747 /*timeout*/ da_default_timeout * 1000);
2748 if (error != 0) {
2749 error = EINVAL;
2750 xpt_print(periph->path,
2751 "scsi_ata_zac_mgmt_out() returned an "
2752 "error!");
2753 goto bailout;
2754 }
2755 }
2756 *queue_ccb = 1;
2757
2758 break;
2759 }
2760 case DISK_ZONE_REPORT_ZONES: {
2761 uint8_t *rz_ptr;
2762 uint32_t num_entries, alloc_size;
2763 struct disk_zone_report *rep;
2764
2765 rep = &bp->bio_zone.zone_params.report;
2766
2767 num_entries = rep->entries_allocated;
2768 if (num_entries == 0) {
2769 xpt_print(periph->path, "No entries allocated for "
2770 "Report Zones request\n");
2771 error = EINVAL;
2772 goto bailout;
2773 }
2774 alloc_size = sizeof(struct scsi_report_zones_hdr) +
2775 (sizeof(struct scsi_report_zones_desc) * num_entries);
2776 alloc_size = min(alloc_size, softc->disk->d_maxsize);
2777 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
2778 if (rz_ptr == NULL) {
2779 xpt_print(periph->path, "Unable to allocate memory "
2780 "for Report Zones request\n");
2781 error = ENOMEM;
2782 goto bailout;
2783 }
2784
2785 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2786 scsi_zbc_in(&ccb->csio,
2787 /*retries*/ da_retry_count,
2788 /*cbcfnp*/ dadone,
2789 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2790 /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
2791 /*zone_start_lba*/ rep->starting_id,
2792 /*zone_options*/ rep->rep_options,
2793 /*data_ptr*/ rz_ptr,
2794 /*dxfer_len*/ alloc_size,
2795 /*sense_len*/ SSD_FULL_SIZE,
2796 /*timeout*/ da_default_timeout * 1000);
2797 } else {
2798 /*
2799 * Note that in this case, even though we can
2800 * technically use NCQ, we don't bother for several
2801 * reasons:
2802 * 1. It hasn't been tested on a SAT layer that
2803 * supports it. This is new as of SAT-4.
2804 * 2. Even when there is a SAT layer that supports
2805 * it, that SAT layer will also probably support
2806 * ZBC -> ZAC translation, since they are both
2807 * in the SAT-4 spec.
2808 * 3. Translation will likely be preferable to ATA
2809 * passthrough. LSI / Avago at least single
2810 * steps ATA passthrough commands in the HBA,
2811 * regardless of protocol, so unless that
2812 * changes, there is a performance penalty for
2813 * doing ATA passthrough no matter whether
2814 * you're using NCQ/FPDMA, DMA or PIO.
2815 * 4. It requires a 32-byte CDB, which at least at
2816 * this point in CAM requires a CDB pointer, which
2817 * would require us to allocate an additional bit
2818 * of storage separate from the CCB.
2819 */
2820 error = scsi_ata_zac_mgmt_in(&ccb->csio,
2821 /*retries*/ da_retry_count,
2822 /*cbcfnp*/ dadone,
2823 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2824 /*use_ncq*/ 0,
2825 /*zm_action*/ ATA_ZM_REPORT_ZONES,
2826 /*zone_id*/ rep->starting_id,
2827 /*zone_flags*/ rep->rep_options,
2828 /*data_ptr*/ rz_ptr,
2829 /*dxfer_len*/ alloc_size,
2830 /*cdb_storage*/ NULL,
2831 /*cdb_storage_len*/ 0,
2832 /*sense_len*/ SSD_FULL_SIZE,
2833 /*timeout*/ da_default_timeout * 1000);
2834 if (error != 0) {
2835 error = EINVAL;
2836 xpt_print(periph->path,
2837 "scsi_ata_zac_mgmt_in() returned an "
2838 "error!");
2839 goto bailout;
2840 }
2841 }
2842
2843 /*
2844 * For BIO_ZONE, this isn't normally needed. However, it
2845 * is used by devstat_end_transaction_bio() to determine
2846 * how much data was transferred.
2847 */
2848 /*
2849 * XXX KDM we have a problem. But I'm not sure how to fix
2850 * it. devstat uses bio_bcount - bio_resid to calculate
2851 * the amount of data transferred. The GEOM disk code
2852 * uses bio_length - bio_resid to calculate the amount of
2853 * data in bio_completed. We have different structure
2854 * sizes above and below the ada(4) driver. So, if we
2855 * use the sizes above, the amount transferred won't be
2856 * quite accurate for devstat. If we use different sizes
2857 * for bio_bcount and bio_length (above and below
2858 * respectively), then the residual needs to match one or
2859 * the other. Everything is calculated after the bio
2860 * leaves the driver, so changing the values around isn't
2861 * really an option. For now, just set the count to the
2862 * passed in length. This means that the calculations
2863 * above (e.g. bio_completed) will be correct, but the
2864 * amount of data reported to devstat will be slightly
2865 * under or overstated.
2866 */
2867 bp->bio_bcount = bp->bio_length;
2868
2869 *queue_ccb = 1;
2870
2871 break;
2872 }
2873 case DISK_ZONE_GET_PARAMS: {
2874 struct disk_zone_disk_params *params;
2875
2876 params = &bp->bio_zone.zone_params.disk_params;
2877 bzero(params, sizeof(*params));
2878
2879 switch (softc->zone_mode) {
2880 case DA_ZONE_DRIVE_MANAGED:
2881 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2882 break;
2883 case DA_ZONE_HOST_AWARE:
2884 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2885 break;
2886 case DA_ZONE_HOST_MANAGED:
2887 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2888 break;
2889 default:
2890 case DA_ZONE_NONE:
2891 params->zone_mode = DISK_ZONE_MODE_NONE;
2892 break;
2893 }
2894
2895 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
2896 params->flags |= DISK_ZONE_DISK_URSWRZ;
2897
2898 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
2899 params->optimal_seq_zones = softc->optimal_seq_zones;
2900 params->flags |= DISK_ZONE_OPT_SEQ_SET;
2901 }
2902
2903 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
2904 params->optimal_nonseq_zones =
2905 softc->optimal_nonseq_zones;
2906 params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2907 }
2908
2909 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
2910 params->max_seq_zones = softc->max_seq_zones;
2911 params->flags |= DISK_ZONE_MAX_SEQ_SET;
2912 }
2913 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
2914 params->flags |= DISK_ZONE_RZ_SUP;
2915
2916 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
2917 params->flags |= DISK_ZONE_OPEN_SUP;
2918
2919 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
2920 params->flags |= DISK_ZONE_CLOSE_SUP;
2921
2922 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
2923 params->flags |= DISK_ZONE_FINISH_SUP;
2924
2925 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
2926 params->flags |= DISK_ZONE_RWP_SUP;
2927 break;
2928 }
2929 default:
2930 break;
2931 }
2932 bailout:
2933 return (error);
2934 }
2935
2936 static void
2937 dastart(struct cam_periph *periph, union ccb *start_ccb)
2938 {
2939 struct da_softc *softc;
2940
2941 softc = (struct da_softc *)periph->softc;
2942
2943 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
2944
2945 skipstate:
2946 switch (softc->state) {
2947 case DA_STATE_NORMAL:
2948 {
2949 struct bio *bp;
2950 uint8_t tag_code;
2951
2952 more:
2953 bp = cam_iosched_next_bio(softc->cam_iosched);
2954 if (bp == NULL) {
2955 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2956 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2957 scsi_test_unit_ready(&start_ccb->csio,
2958 /*retries*/ da_retry_count,
2959 dadone,
2960 MSG_SIMPLE_Q_TAG,
2961 SSD_FULL_SIZE,
2962 da_default_timeout * 1000);
2963 start_ccb->ccb_h.ccb_bp = NULL;
2964 start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
2965 xpt_action(start_ccb);
2966 } else
2967 xpt_release_ccb(start_ccb);
2968 break;
2969 }
2970
2971 if (bp->bio_cmd == BIO_DELETE) {
2972 if (softc->delete_func != NULL) {
2973 softc->delete_func(periph, start_ccb, bp);
2974 goto out;
2975 } else {
2976 /* Not sure this is possible, but failsafe by lying and saying "sure, done." */
2977 biofinish(bp, NULL, 0);
2978 goto more;
2979 }
2980 }
2981
2982 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2983 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2984 cam_periph_release_locked(periph);
2985 }
2986
2987 if ((bp->bio_flags & BIO_ORDERED) != 0 ||
2988 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
2989 softc->flags &= ~DA_FLAG_NEED_OTAG;
2990 softc->flags |= DA_FLAG_WAS_OTAG;
2991 tag_code = MSG_ORDERED_Q_TAG;
2992 } else {
2993 tag_code = MSG_SIMPLE_Q_TAG;
2994 }
2995
2996 switch (bp->bio_cmd) {
2997 case BIO_WRITE:
2998 case BIO_READ:
2999 {
3000 void *data_ptr;
3001 int rw_op;
3002
3003 if (bp->bio_cmd == BIO_WRITE) {
3004 softc->flags |= DA_FLAG_DIRTY;
3005 rw_op = SCSI_RW_WRITE;
3006 } else {
3007 rw_op = SCSI_RW_READ;
3008 }
3009
3010 data_ptr = bp->bio_data;
3011 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3012 rw_op |= SCSI_RW_BIO;
3013 data_ptr = bp;
3014 }
3015
3016 scsi_read_write(&start_ccb->csio,
3017 /*retries*/da_retry_count,
3018 /*cbfcnp*/dadone,
3019 /*tag_action*/tag_code,
3020 rw_op,
3021 /*byte2*/0,
3022 softc->minimum_cmd_size,
3023 /*lba*/bp->bio_pblkno,
3024 /*block_count*/bp->bio_bcount /
3025 softc->params.secsize,
3026 data_ptr,
3027 /*dxfer_len*/ bp->bio_bcount,
3028 /*sense_len*/SSD_FULL_SIZE,
3029 da_default_timeout * 1000);
3030 break;
3031 }
3032 case BIO_FLUSH:
3033 /*
3034 * If we don't support sync cache, or the disk
3035 * isn't dirty, FLUSH is a no-op. Use the
3036 * allocated * CCB for the next bio if one is
3037 * available.
3038 */
3039 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3040 (softc->flags & DA_FLAG_DIRTY) == 0) {
3041 biodone(bp);
3042 goto skipstate;
3043 }
3044
3045 /*
3046 * BIO_FLUSH doesn't currently communicate
3047 * range data, so we synchronize the cache
3048 * over the whole disk. We also force
3049 * ordered tag semantics the flush applies
3050 * to all previously queued I/O.
3051 */
3052 scsi_synchronize_cache(&start_ccb->csio,
3053 /*retries*/1,
3054 /*cbfcnp*/dadone,
3055 MSG_ORDERED_Q_TAG,
3056 /*begin_lba*/0,
3057 /*lb_count*/0,
3058 SSD_FULL_SIZE,
3059 da_default_timeout*1000);
3060 /*
3061 * Clear the dirty flag before sending the command.
3062 * Either this sync cache will be successful, or it
3063 * will fail after a retry. If it fails, it is
3064 * unlikely to be successful if retried later, so
3065 * we'll save ourselves time by just marking the
3066 * device clean.
3067 */
3068 softc->flags &= ~DA_FLAG_DIRTY;
3069 break;
3070 case BIO_ZONE: {
3071 int error, queue_ccb;
3072
3073 queue_ccb = 0;
3074
3075 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3076 if ((error != 0)
3077 || (queue_ccb == 0)) {
3078 biofinish(bp, NULL, error);
3079 xpt_release_ccb(start_ccb);
3080 return;
3081 }
3082 break;
3083 }
3084 }
3085 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3086 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3087 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3088
3089 out:
3090 LIST_INSERT_HEAD(&softc->pending_ccbs,
3091 &start_ccb->ccb_h, periph_links.le);
3092
3093 /* We expect a unit attention from this device */
3094 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3095 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3096 softc->flags &= ~DA_FLAG_RETRY_UA;
3097 }
3098
3099 start_ccb->ccb_h.ccb_bp = bp;
3100 softc->refcount++;
3101 cam_periph_unlock(periph);
3102 xpt_action(start_ccb);
3103 cam_periph_lock(periph);
3104 softc->refcount--;
3105
3106 /* May have more work to do, so ensure we stay scheduled */
3107 daschedule(periph);
3108 break;
3109 }
3110 case DA_STATE_PROBE_WP:
3111 {
3112 void *mode_buf;
3113 int mode_buf_len;
3114
3115 mode_buf_len = 192;
3116 mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3117 if (mode_buf == NULL) {
3118 xpt_print(periph->path, "Unable to send mode sense - "
3119 "malloc failure\n");
3120 softc->state = DA_STATE_PROBE_RC;
3121 goto skipstate;
3122 }
3123 scsi_mode_sense_len(&start_ccb->csio,
3124 /*retries*/ da_retry_count,
3125 /*cbfcnp*/ dadone,
3126 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3127 /*dbd*/ FALSE,
3128 /*pc*/ SMS_PAGE_CTRL_CURRENT,
3129 /*page*/ SMS_ALL_PAGES_PAGE,
3130 /*param_buf*/ mode_buf,
3131 /*param_len*/ mode_buf_len,
3132 /*minimum_cmd_size*/ softc->minimum_cmd_size,
3133 /*sense_len*/ SSD_FULL_SIZE,
3134 /*timeout*/ da_default_timeout * 1000);
3135 start_ccb->ccb_h.ccb_bp = NULL;
3136 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3137 xpt_action(start_ccb);
3138 break;
3139 }
3140 case DA_STATE_PROBE_RC:
3141 {
3142 struct scsi_read_capacity_data *rcap;
3143
3144 rcap = (struct scsi_read_capacity_data *)
3145 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3146 if (rcap == NULL) {
3147 printf("dastart: Couldn't malloc read_capacity data\n");
3148 /* da_free_periph??? */
3149 break;
3150 }
3151 scsi_read_capacity(&start_ccb->csio,
3152 /*retries*/da_retry_count,
3153 dadone,
3154 MSG_SIMPLE_Q_TAG,
3155 rcap,
3156 SSD_FULL_SIZE,
3157 /*timeout*/5000);
3158 start_ccb->ccb_h.ccb_bp = NULL;
3159 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3160 xpt_action(start_ccb);
3161 break;
3162 }
3163 case DA_STATE_PROBE_RC16:
3164 {
3165 struct scsi_read_capacity_data_long *rcaplong;
3166
3167 rcaplong = (struct scsi_read_capacity_data_long *)
3168 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3169 if (rcaplong == NULL) {
3170 printf("dastart: Couldn't malloc read_capacity data\n");
3171 /* da_free_periph??? */
3172 break;
3173 }
3174 scsi_read_capacity_16(&start_ccb->csio,
3175 /*retries*/ da_retry_count,
3176 /*cbfcnp*/ dadone,
3177 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3178 /*lba*/ 0,
3179 /*reladr*/ 0,
3180 /*pmi*/ 0,
3181 /*rcap_buf*/ (uint8_t *)rcaplong,
3182 /*rcap_buf_len*/ sizeof(*rcaplong),
3183 /*sense_len*/ SSD_FULL_SIZE,
3184 /*timeout*/ da_default_timeout * 1000);
3185 start_ccb->ccb_h.ccb_bp = NULL;
3186 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3187 xpt_action(start_ccb);
3188 break;
3189 }
3190 case DA_STATE_PROBE_LBP:
3191 {
3192 struct scsi_vpd_logical_block_prov *lbp;
3193
3194 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3195 /*
3196 * If we get here we don't support any SBC-3 delete
3197 * methods with UNMAP as the Logical Block Provisioning
3198 * VPD page support is required for devices which
3199 * support it according to T10/1799-D Revision 31
3200 * however older revisions of the spec don't mandate
3201 * this so we currently don't remove these methods
3202 * from the available set.
3203 */
3204 softc->state = DA_STATE_PROBE_BLK_LIMITS;
3205 goto skipstate;
3206 }
3207
3208 lbp = (struct scsi_vpd_logical_block_prov *)
3209 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3210
3211 if (lbp == NULL) {
3212 printf("dastart: Couldn't malloc lbp data\n");
3213 /* da_free_periph??? */
3214 break;
3215 }
3216
3217 scsi_inquiry(&start_ccb->csio,
3218 /*retries*/da_retry_count,
3219 /*cbfcnp*/dadone,
3220 /*tag_action*/MSG_SIMPLE_Q_TAG,
3221 /*inq_buf*/(u_int8_t *)lbp,
3222 /*inq_len*/sizeof(*lbp),
3223 /*evpd*/TRUE,
3224 /*page_code*/SVPD_LBP,
3225 /*sense_len*/SSD_MIN_SIZE,
3226 /*timeout*/da_default_timeout * 1000);
3227 start_ccb->ccb_h.ccb_bp = NULL;
3228 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3229 xpt_action(start_ccb);
3230 break;
3231 }
3232 case DA_STATE_PROBE_BLK_LIMITS:
3233 {
3234 struct scsi_vpd_block_limits *block_limits;
3235
3236 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3237 /* Not supported skip to next probe */
3238 softc->state = DA_STATE_PROBE_BDC;
3239 goto skipstate;
3240 }
3241
3242 block_limits = (struct scsi_vpd_block_limits *)
3243 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3244
3245 if (block_limits == NULL) {
3246 printf("dastart: Couldn't malloc block_limits data\n");
3247 /* da_free_periph??? */
3248 break;
3249 }
3250
3251 scsi_inquiry(&start_ccb->csio,
3252 /*retries*/da_retry_count,
3253 /*cbfcnp*/dadone,
3254 /*tag_action*/MSG_SIMPLE_Q_TAG,
3255 /*inq_buf*/(u_int8_t *)block_limits,
3256 /*inq_len*/sizeof(*block_limits),
3257 /*evpd*/TRUE,
3258 /*page_code*/SVPD_BLOCK_LIMITS,
3259 /*sense_len*/SSD_MIN_SIZE,
3260 /*timeout*/da_default_timeout * 1000);
3261 start_ccb->ccb_h.ccb_bp = NULL;
3262 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3263 xpt_action(start_ccb);
3264 break;
3265 }
3266 case DA_STATE_PROBE_BDC:
3267 {
3268 struct scsi_vpd_block_characteristics *bdc;
3269
3270 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3271 softc->state = DA_STATE_PROBE_ATA;
3272 goto skipstate;
3273 }
3274
3275 bdc = (struct scsi_vpd_block_characteristics *)
3276 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3277
3278 if (bdc == NULL) {
3279 printf("dastart: Couldn't malloc bdc data\n");
3280 /* da_free_periph??? */
3281 break;
3282 }
3283
3284 scsi_inquiry(&start_ccb->csio,
3285 /*retries*/da_retry_count,
3286 /*cbfcnp*/dadone,
3287 /*tag_action*/MSG_SIMPLE_Q_TAG,
3288 /*inq_buf*/(u_int8_t *)bdc,
3289 /*inq_len*/sizeof(*bdc),
3290 /*evpd*/TRUE,
3291 /*page_code*/SVPD_BDC,
3292 /*sense_len*/SSD_MIN_SIZE,
3293 /*timeout*/da_default_timeout * 1000);
3294 start_ccb->ccb_h.ccb_bp = NULL;
3295 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3296 xpt_action(start_ccb);
3297 break;
3298 }
3299 case DA_STATE_PROBE_ATA:
3300 {
3301 struct ata_params *ata_params;
3302
3303 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3304 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3305 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3306 /*
3307 * Note that if the ATA VPD page isn't
3308 * supported, we aren't talking to an ATA
3309 * device anyway. Support for that VPD
3310 * page is mandatory for SCSI to ATA (SAT)
3311 * translation layers.
3312 */
3313 softc->state = DA_STATE_PROBE_ZONE;
3314 goto skipstate;
3315 }
3316 daprobedone(periph, start_ccb);
3317 break;
3318 }
3319
3320 ata_params = (struct ata_params*)
3321 malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3322
3323 if (ata_params == NULL) {
3324 xpt_print(periph->path, "Couldn't malloc ata_params "
3325 "data\n");
3326 /* da_free_periph??? */
3327 break;
3328 }
3329
3330 scsi_ata_identify(&start_ccb->csio,
3331 /*retries*/da_retry_count,
3332 /*cbfcnp*/dadone,
3333 /*tag_action*/MSG_SIMPLE_Q_TAG,
3334 /*data_ptr*/(u_int8_t *)ata_params,
3335 /*dxfer_len*/sizeof(*ata_params),
3336 /*sense_len*/SSD_FULL_SIZE,
3337 /*timeout*/da_default_timeout * 1000);
3338 start_ccb->ccb_h.ccb_bp = NULL;
3339 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3340 xpt_action(start_ccb);
3341 break;
3342 }
3343 case DA_STATE_PROBE_ATA_LOGDIR:
3344 {
3345 struct ata_gp_log_dir *log_dir;
3346 int retval;
3347
3348 retval = 0;
3349
3350 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3351 /*
3352 * If we don't have log support, not much point in
3353 * trying to probe zone support.
3354 */
3355 daprobedone(periph, start_ccb);
3356 break;
3357 }
3358
3359 /*
3360 * If we have an ATA device (the SCSI ATA Information VPD
3361 * page should be present and the ATA identify should have
3362 * succeeded) and it supports logs, ask for the log directory.
3363 */
3364
3365 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3366 if (log_dir == NULL) {
3367 xpt_print(periph->path, "Couldn't malloc log_dir "
3368 "data\n");
3369 daprobedone(periph, start_ccb);
3370 break;
3371 }
3372
3373 retval = scsi_ata_read_log(&start_ccb->csio,
3374 /*retries*/ da_retry_count,
3375 /*cbfcnp*/ dadone,
3376 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3377 /*log_address*/ ATA_LOG_DIRECTORY,
3378 /*page_number*/ 0,
3379 /*block_count*/ 1,
3380 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3381 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3382 /*data_ptr*/ (uint8_t *)log_dir,
3383 /*dxfer_len*/ sizeof(*log_dir),
3384 /*sense_len*/ SSD_FULL_SIZE,
3385 /*timeout*/ da_default_timeout * 1000);
3386
3387 if (retval != 0) {
3388 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3389 free(log_dir, M_SCSIDA);
3390 daprobedone(periph, start_ccb);
3391 break;
3392 }
3393 start_ccb->ccb_h.ccb_bp = NULL;
3394 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3395 xpt_action(start_ccb);
3396 break;
3397 }
3398 case DA_STATE_PROBE_ATA_IDDIR:
3399 {
3400 struct ata_identify_log_pages *id_dir;
3401 int retval;
3402
3403 retval = 0;
3404
3405 /*
3406 * Check here to see whether the Identify Device log is
3407 * supported in the directory of logs. If so, continue
3408 * with requesting the log of identify device pages.
3409 */
3410 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3411 daprobedone(periph, start_ccb);
3412 break;
3413 }
3414
3415 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3416 if (id_dir == NULL) {
3417 xpt_print(periph->path, "Couldn't malloc id_dir "
3418 "data\n");
3419 daprobedone(periph, start_ccb);
3420 break;
3421 }
3422
3423 retval = scsi_ata_read_log(&start_ccb->csio,
3424 /*retries*/ da_retry_count,
3425 /*cbfcnp*/ dadone,
3426 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3427 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3428 /*page_number*/ ATA_IDL_PAGE_LIST,
3429 /*block_count*/ 1,
3430 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3431 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3432 /*data_ptr*/ (uint8_t *)id_dir,
3433 /*dxfer_len*/ sizeof(*id_dir),
3434 /*sense_len*/ SSD_FULL_SIZE,
3435 /*timeout*/ da_default_timeout * 1000);
3436
3437 if (retval != 0) {
3438 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3439 free(id_dir, M_SCSIDA);
3440 daprobedone(periph, start_ccb);
3441 break;
3442 }
3443 start_ccb->ccb_h.ccb_bp = NULL;
3444 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3445 xpt_action(start_ccb);
3446 break;
3447 }
3448 case DA_STATE_PROBE_ATA_SUP:
3449 {
3450 struct ata_identify_log_sup_cap *sup_cap;
3451 int retval;
3452
3453 retval = 0;
3454
3455 /*
3456 * Check here to see whether the Supported Capabilities log
3457 * is in the list of Identify Device logs.
3458 */
3459 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3460 daprobedone(periph, start_ccb);
3461 break;
3462 }
3463
3464 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3465 if (sup_cap == NULL) {
3466 xpt_print(periph->path, "Couldn't malloc sup_cap "
3467 "data\n");
3468 daprobedone(periph, start_ccb);
3469 break;
3470 }
3471
3472 retval = scsi_ata_read_log(&start_ccb->csio,
3473 /*retries*/ da_retry_count,
3474 /*cbfcnp*/ dadone,
3475 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3476 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3477 /*page_number*/ ATA_IDL_SUP_CAP,
3478 /*block_count*/ 1,
3479 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3480 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3481 /*data_ptr*/ (uint8_t *)sup_cap,
3482 /*dxfer_len*/ sizeof(*sup_cap),
3483 /*sense_len*/ SSD_FULL_SIZE,
3484 /*timeout*/ da_default_timeout * 1000);
3485
3486 if (retval != 0) {
3487 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3488 free(sup_cap, M_SCSIDA);
3489 daprobedone(periph, start_ccb);
3490 break;
3491
3492 }
3493
3494 start_ccb->ccb_h.ccb_bp = NULL;
3495 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3496 xpt_action(start_ccb);
3497 break;
3498 }
3499 case DA_STATE_PROBE_ATA_ZONE:
3500 {
3501 struct ata_zoned_info_log *ata_zone;
3502 int retval;
3503
3504 retval = 0;
3505
3506 /*
3507 * Check here to see whether the zoned device information
3508 * page is supported. If so, continue on to request it.
3509 * If not, skip to DA_STATE_PROBE_LOG or done.
3510 */
3511 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3512 daprobedone(periph, start_ccb);
3513 break;
3514 }
3515 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3516 M_NOWAIT|M_ZERO);
3517 if (ata_zone == NULL) {
3518 xpt_print(periph->path, "Couldn't malloc ata_zone "
3519 "data\n");
3520 daprobedone(periph, start_ccb);
3521 break;
3522 }
3523
3524 retval = scsi_ata_read_log(&start_ccb->csio,
3525 /*retries*/ da_retry_count,
3526 /*cbfcnp*/ dadone,
3527 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3528 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3529 /*page_number*/ ATA_IDL_ZDI,
3530 /*block_count*/ 1,
3531 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3532 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3533 /*data_ptr*/ (uint8_t *)ata_zone,
3534 /*dxfer_len*/ sizeof(*ata_zone),
3535 /*sense_len*/ SSD_FULL_SIZE,
3536 /*timeout*/ da_default_timeout * 1000);
3537
3538 if (retval != 0) {
3539 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3540 free(ata_zone, M_SCSIDA);
3541 daprobedone(periph, start_ccb);
3542 break;
3543 }
3544 start_ccb->ccb_h.ccb_bp = NULL;
3545 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3546 xpt_action(start_ccb);
3547
3548 break;
3549 }
3550 case DA_STATE_PROBE_ZONE:
3551 {
3552 struct scsi_vpd_zoned_bdc *bdc;
3553
3554 /*
3555 * Note that this page will be supported for SCSI protocol
3556 * devices that support ZBC (SMR devices), as well as ATA
3557 * protocol devices that are behind a SAT (SCSI to ATA
3558 * Translation) layer that supports converting ZBC commands
3559 * to their ZAC equivalents.
3560 */
3561 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3562 daprobedone(periph, start_ccb);
3563 break;
3564 }
3565 bdc = (struct scsi_vpd_zoned_bdc *)
3566 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3567
3568 if (bdc == NULL) {
3569 xpt_release_ccb(start_ccb);
3570 xpt_print(periph->path, "Couldn't malloc zone VPD "
3571 "data\n");
3572 break;
3573 }
3574 scsi_inquiry(&start_ccb->csio,
3575 /*retries*/da_retry_count,
3576 /*cbfcnp*/dadone,
3577 /*tag_action*/MSG_SIMPLE_Q_TAG,
3578 /*inq_buf*/(u_int8_t *)bdc,
3579 /*inq_len*/sizeof(*bdc),
3580 /*evpd*/TRUE,
3581 /*page_code*/SVPD_ZONED_BDC,
3582 /*sense_len*/SSD_FULL_SIZE,
3583 /*timeout*/da_default_timeout * 1000);
3584 start_ccb->ccb_h.ccb_bp = NULL;
3585 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3586 xpt_action(start_ccb);
3587 break;
3588 }
3589 }
3590 }
3591
3592 /*
3593 * In each of the methods below, while its the caller's
3594 * responsibility to ensure the request will fit into a
3595 * single device request, we might have changed the delete
3596 * method due to the device incorrectly advertising either
3597 * its supported methods or limits.
3598 *
3599 * To prevent this causing further issues we validate the
3600 * against the methods limits, and warn which would
3601 * otherwise be unnecessary.
3602 */
3603 static void
3604 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3605 {
3606 struct da_softc *softc = (struct da_softc *)periph->softc;;
3607 struct bio *bp1;
3608 uint8_t *buf = softc->unmap_buf;
3609 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3610 uint64_t lba, lastlba = (uint64_t)-1;
3611 uint64_t totalcount = 0;
3612 uint64_t count;
3613 uint32_t c, lastcount = 0, ranges = 0;
3614
3615 /*
3616 * Currently this doesn't take the UNMAP
3617 * Granularity and Granularity Alignment
3618 * fields into account.
3619 *
3620 * This could result in both unoptimal unmap
3621 * requests as as well as UNMAP calls unmapping
3622 * fewer LBA's than requested.
3623 */
3624
3625 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3626 bp1 = bp;
3627 do {
3628 /*
3629 * Note: ada and da are different in how they store the
3630 * pending bp's in a trim. ada stores all of them in the
3631 * trim_req.bps. da stores all but the first one in the
3632 * delete_run_queue. ada then completes all the bps in
3633 * its adadone() loop. da completes all the bps in the
3634 * delete_run_queue in dadone, and relies on the biodone
3635 * after to complete. This should be reconciled since there's
3636 * no real reason to do it differently. XXX
3637 */
3638 if (bp1 != bp)
3639 bioq_insert_tail(&softc->delete_run_queue, bp1);
3640 lba = bp1->bio_pblkno;
3641 count = bp1->bio_bcount / softc->params.secsize;
3642
3643 /* Try to extend the previous range. */
3644 if (lba == lastlba) {
3645 c = omin(count, UNMAP_RANGE_MAX - lastcount);
3646 lastlba += c;
3647 lastcount += c;
3648 scsi_ulto4b(lastcount, d[ranges - 1].length);
3649 count -= c;
3650 lba += c;
3651 totalcount += c;
3652 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3653 softc->unmap_gran != 0) {
3654 /* Align length of the previous range. */
3655 if ((c = lastcount % softc->unmap_gran) != 0) {
3656 if (lastcount <= c) {
3657 totalcount -= lastcount;
3658 lastlba = (uint64_t)-1;
3659 lastcount = 0;
3660 ranges--;
3661 } else {
3662 totalcount -= c;
3663 lastlba -= c;
3664 lastcount -= c;
3665 scsi_ulto4b(lastcount, d[ranges - 1].length);
3666 }
3667 }
3668 /* Align beginning of the new range. */
3669 c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3670 if (c != 0) {
3671 c = softc->unmap_gran - c;
3672 if (count <= c) {
3673 count = 0;
3674 } else {
3675 lba += c;
3676 count -= c;
3677 }
3678 }
3679 }
3680
3681 while (count > 0) {
3682 c = omin(count, UNMAP_RANGE_MAX);
3683 if (totalcount + c > softc->unmap_max_lba ||
3684 ranges >= softc->unmap_max_ranges) {
3685 xpt_print(periph->path,
3686 "%s issuing short delete %ld > %ld"
3687 "|| %d >= %d",
3688 da_delete_method_desc[softc->delete_method],
3689 totalcount + c, softc->unmap_max_lba,
3690 ranges, softc->unmap_max_ranges);
3691 break;
3692 }
3693 scsi_u64to8b(lba, d[ranges].lba);
3694 scsi_ulto4b(c, d[ranges].length);
3695 lba += c;
3696 totalcount += c;
3697 ranges++;
3698 count -= c;
3699 lastlba = lba;
3700 lastcount = c;
3701 }
3702 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3703 if (bp1 == NULL)
3704 break;
3705 if (ranges >= softc->unmap_max_ranges ||
3706 totalcount + bp1->bio_bcount /
3707 softc->params.secsize > softc->unmap_max_lba) {
3708 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3709 break;
3710 }
3711 } while (1);
3712
3713 /* Align length of the last range. */
3714 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
3715 (c = lastcount % softc->unmap_gran) != 0) {
3716 if (lastcount <= c)
3717 ranges--;
3718 else
3719 scsi_ulto4b(lastcount - c, d[ranges - 1].length);
3720 }
3721
3722 scsi_ulto2b(ranges * 16 + 6, &buf[0]);
3723 scsi_ulto2b(ranges * 16, &buf[2]);
3724
3725 scsi_unmap(&ccb->csio,
3726 /*retries*/da_retry_count,
3727 /*cbfcnp*/dadone,
3728 /*tag_action*/MSG_SIMPLE_Q_TAG,
3729 /*byte2*/0,
3730 /*data_ptr*/ buf,
3731 /*dxfer_len*/ ranges * 16 + 8,
3732 /*sense_len*/SSD_FULL_SIZE,
3733 da_default_timeout * 1000);
3734 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3735 ccb->ccb_h.flags |= CAM_UNLOCKED;
3736 cam_iosched_submit_trim(softc->cam_iosched);
3737 }
3738
3739 static void
3740 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3741 {
3742 struct da_softc *softc = (struct da_softc *)periph->softc;
3743 struct bio *bp1;
3744 uint8_t *buf = softc->unmap_buf;
3745 uint64_t lastlba = (uint64_t)-1;
3746 uint64_t count;
3747 uint64_t lba;
3748 uint32_t lastcount = 0, c, requestcount;
3749 int ranges = 0, off, block_count;
3750
3751 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3752 bp1 = bp;
3753 do {
3754 if (bp1 != bp)//XXX imp XXX
3755 bioq_insert_tail(&softc->delete_run_queue, bp1);
3756 lba = bp1->bio_pblkno;
3757 count = bp1->bio_bcount / softc->params.secsize;
3758 requestcount = count;
3759
3760 /* Try to extend the previous range. */
3761 if (lba == lastlba) {
3762 c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
3763 lastcount += c;
3764 off = (ranges - 1) * 8;
3765 buf[off + 6] = lastcount & 0xff;
3766 buf[off + 7] = (lastcount >> 8) & 0xff;
3767 count -= c;
3768 lba += c;
3769 }
3770
3771 while (count > 0) {
3772 c = omin(count, ATA_DSM_RANGE_MAX);
3773 off = ranges * 8;
3774
3775 buf[off + 0] = lba & 0xff;
3776 buf[off + 1] = (lba >> 8) & 0xff;
3777 buf[off + 2] = (lba >> 16) & 0xff;
3778 buf[off + 3] = (lba >> 24) & 0xff;
3779 buf[off + 4] = (lba >> 32) & 0xff;
3780 buf[off + 5] = (lba >> 40) & 0xff;
3781 buf[off + 6] = c & 0xff;
3782 buf[off + 7] = (c >> 8) & 0xff;
3783 lba += c;
3784 ranges++;
3785 count -= c;
3786 lastcount = c;
3787 if (count != 0 && ranges == softc->trim_max_ranges) {
3788 xpt_print(periph->path,
3789 "%s issuing short delete %ld > %ld\n",
3790 da_delete_method_desc[softc->delete_method],
3791 requestcount,
3792 (softc->trim_max_ranges - ranges) *
3793 ATA_DSM_RANGE_MAX);
3794 break;
3795 }
3796 }
3797 lastlba = lba;
3798 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3799 if (bp1 == NULL)
3800 break;
3801 if (bp1->bio_bcount / softc->params.secsize >
3802 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
3803 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3804 break;
3805 }
3806 } while (1);
3807
3808 block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
3809 scsi_ata_trim(&ccb->csio,
3810 /*retries*/da_retry_count,
3811 /*cbfcnp*/dadone,
3812 /*tag_action*/MSG_SIMPLE_Q_TAG,
3813 block_count,
3814 /*data_ptr*/buf,
3815 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
3816 /*sense_len*/SSD_FULL_SIZE,
3817 da_default_timeout * 1000);
3818 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3819 ccb->ccb_h.flags |= CAM_UNLOCKED;
3820 cam_iosched_submit_trim(softc->cam_iosched);
3821 }
3822
3823 /*
3824 * We calculate ws_max_blks here based off d_delmaxsize instead
3825 * of using softc->ws_max_blks as it is absolute max for the
3826 * device not the protocol max which may well be lower.
3827 */
3828 static void
3829 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3830 {
3831 struct da_softc *softc;
3832 struct bio *bp1;
3833 uint64_t ws_max_blks;
3834 uint64_t lba;
3835 uint64_t count; /* forward compat with WS32 */
3836
3837 softc = (struct da_softc *)periph->softc;
3838 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
3839 lba = bp->bio_pblkno;
3840 count = 0;
3841 bp1 = bp;
3842 do {
3843 if (bp1 != bp)//XXX imp XXX
3844 bioq_insert_tail(&softc->delete_run_queue, bp1);
3845 count += bp1->bio_bcount / softc->params.secsize;
3846 if (count > ws_max_blks) {
3847 xpt_print(periph->path,
3848 "%s issuing short delete %ld > %ld\n",
3849 da_delete_method_desc[softc->delete_method],
3850 count, ws_max_blks);
3851 count = omin(count, ws_max_blks);
3852 break;
3853 }
3854 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3855 if (bp1 == NULL)
3856 break;
3857 if (lba + count != bp1->bio_pblkno ||
3858 count + bp1->bio_bcount /
3859 softc->params.secsize > ws_max_blks) {
3860 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3861 break;
3862 }
3863 } while (1);
3864
3865 scsi_write_same(&ccb->csio,
3866 /*retries*/da_retry_count,
3867 /*cbfcnp*/dadone,
3868 /*tag_action*/MSG_SIMPLE_Q_TAG,
3869 /*byte2*/softc->delete_method ==
3870 DA_DELETE_ZERO ? 0 : SWS_UNMAP,
3871 softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
3872 /*lba*/lba,
3873 /*block_count*/count,
3874 /*data_ptr*/ __DECONST(void *, zero_region),
3875 /*dxfer_len*/ softc->params.secsize,
3876 /*sense_len*/SSD_FULL_SIZE,
3877 da_default_timeout * 1000);
3878 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3879 ccb->ccb_h.flags |= CAM_UNLOCKED;
3880 cam_iosched_submit_trim(softc->cam_iosched);
3881 }
3882
3883 static int
3884 cmd6workaround(union ccb *ccb)
3885 {
3886 struct scsi_rw_6 cmd6;
3887 struct scsi_rw_10 *cmd10;
3888 struct da_softc *softc;
3889 u_int8_t *cdb;
3890 struct bio *bp;
3891 int frozen;
3892
3893 cdb = ccb->csio.cdb_io.cdb_bytes;
3894 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
3895
3896 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
3897 da_delete_methods old_method = softc->delete_method;
3898
3899 /*
3900 * Typically there are two reasons for failure here
3901 * 1. Delete method was detected as supported but isn't
3902 * 2. Delete failed due to invalid params e.g. too big
3903 *
3904 * While we will attempt to choose an alternative delete method
3905 * this may result in short deletes if the existing delete
3906 * requests from geom are big for the new method chosen.
3907 *
3908 * This method assumes that the error which triggered this
3909 * will not retry the io otherwise a panic will occur
3910 */
3911 dadeleteflag(softc, old_method, 0);
3912 dadeletemethodchoose(softc, DA_DELETE_DISABLE);
3913 if (softc->delete_method == DA_DELETE_DISABLE)
3914 xpt_print(ccb->ccb_h.path,
3915 "%s failed, disabling BIO_DELETE\n",
3916 da_delete_method_desc[old_method]);
3917 else
3918 xpt_print(ccb->ccb_h.path,
3919 "%s failed, switching to %s BIO_DELETE\n",
3920 da_delete_method_desc[old_method],
3921 da_delete_method_desc[softc->delete_method]);
3922
3923 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
3924 cam_iosched_queue_work(softc->cam_iosched, bp);
3925 cam_iosched_queue_work(softc->cam_iosched,
3926 (struct bio *)ccb->ccb_h.ccb_bp);
3927 ccb->ccb_h.ccb_bp = NULL;
3928 return (0);
3929 }
3930
3931 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
3932 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3933 (*cdb == PREVENT_ALLOW) &&
3934 (softc->quirks & DA_Q_NO_PREVENT) == 0) {
3935 if (bootverbose)
3936 xpt_print(ccb->ccb_h.path,
3937 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
3938 softc->quirks |= DA_Q_NO_PREVENT;
3939 return (0);
3940 }
3941
3942 /* Detect unsupported SYNCHRONIZE CACHE(10). */
3943 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3944 (*cdb == SYNCHRONIZE_CACHE) &&
3945 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
3946 if (bootverbose)
3947 xpt_print(ccb->ccb_h.path,
3948 "SYNCHRONIZE CACHE(10) not supported.\n");
3949 softc->quirks |= DA_Q_NO_SYNC_CACHE;
3950 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
3951 return (0);
3952 }
3953
3954 /* Translation only possible if CDB is an array and cmd is R/W6 */
3955 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
3956 (*cdb != READ_6 && *cdb != WRITE_6))
3957 return 0;
3958
3959 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
3960 "increasing minimum_cmd_size to 10.\n");
3961 softc->minimum_cmd_size = 10;
3962
3963 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
3964 cmd10 = (struct scsi_rw_10 *)cdb;
3965 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
3966 cmd10->byte2 = 0;
3967 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
3968 cmd10->reserved = 0;
3969 scsi_ulto2b(cmd6.length, cmd10->length);
3970 cmd10->control = cmd6.control;
3971 ccb->csio.cdb_len = sizeof(*cmd10);
3972
3973 /* Requeue request, unfreezing queue if necessary */
3974 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
3975 ccb->ccb_h.status = CAM_REQUEUE_REQ;
3976 xpt_action(ccb);
3977 if (frozen) {
3978 cam_release_devq(ccb->ccb_h.path,
3979 /*relsim_flags*/0,
3980 /*reduction*/0,
3981 /*timeout*/0,
3982 /*getcount_only*/0);
3983 }
3984 return (ERESTART);
3985 }
3986
3987 static void
3988 dazonedone(struct cam_periph *periph, union ccb *ccb)
3989 {
3990 struct da_softc *softc;
3991 struct bio *bp;
3992
3993 softc = periph->softc;
3994 bp = (struct bio *)ccb->ccb_h.ccb_bp;
3995
3996 switch (bp->bio_zone.zone_cmd) {
3997 case DISK_ZONE_OPEN:
3998 case DISK_ZONE_CLOSE:
3999 case DISK_ZONE_FINISH:
4000 case DISK_ZONE_RWP:
4001 break;
4002 case DISK_ZONE_REPORT_ZONES: {
4003 uint32_t avail_len;
4004 struct disk_zone_report *rep;
4005 struct scsi_report_zones_hdr *hdr;
4006 struct scsi_report_zones_desc *desc;
4007 struct disk_zone_rep_entry *entry;
4008 uint32_t num_alloced, hdr_len, num_avail;
4009 uint32_t num_to_fill, i;
4010 int ata;
4011
4012 rep = &bp->bio_zone.zone_params.report;
4013 avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4014 /*
4015 * Note that bio_resid isn't normally used for zone
4016 * commands, but it is used by devstat_end_transaction_bio()
4017 * to determine how much data was transferred. Because
4018 * the size of the SCSI/ATA data structures is different
4019 * than the size of the BIO interface structures, the
4020 * amount of data actually transferred from the drive will
4021 * be different than the amount of data transferred to
4022 * the user.
4023 */
4024 bp->bio_resid = ccb->csio.resid;
4025 num_alloced = rep->entries_allocated;
4026 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4027 if (avail_len < sizeof(*hdr)) {
4028 /*
4029 * Is there a better error than EIO here? We asked
4030 * for at least the header, and we got less than
4031 * that.
4032 */
4033 bp->bio_error = EIO;
4034 bp->bio_flags |= BIO_ERROR;
4035 bp->bio_resid = bp->bio_bcount;
4036 break;
4037 }
4038
4039 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4040 ata = 1;
4041 else
4042 ata = 0;
4043
4044 hdr_len = ata ? le32dec(hdr->length) :
4045 scsi_4btoul(hdr->length);
4046 if (hdr_len > 0)
4047 rep->entries_available = hdr_len / sizeof(*desc);
4048 else
4049 rep->entries_available = 0;
4050 /*
4051 * NOTE: using the same values for the BIO version of the
4052 * same field as the SCSI/ATA values. This means we could
4053 * get some additional values that aren't defined in bio.h
4054 * if more values of the same field are defined later.
4055 */
4056 rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4057 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
4058 scsi_8btou64(hdr->maximum_lba);
4059 /*
4060 * If the drive reports no entries that match the query,
4061 * we're done.
4062 */
4063 if (hdr_len == 0) {
4064 rep->entries_filled = 0;
4065 break;
4066 }
4067
4068 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4069 hdr_len / sizeof(*desc));
4070 /*
4071 * If the drive didn't return any data, then we're done.
4072 */
4073 if (num_avail == 0) {
4074 rep->entries_filled = 0;
4075 break;
4076 }
4077
4078 num_to_fill = min(num_avail, rep->entries_allocated);
4079 /*
4080 * If the user didn't allocate any entries for us to fill,
4081 * we're done.
4082 */
4083 if (num_to_fill == 0) {
4084 rep->entries_filled = 0;
4085 break;
4086 }
4087
4088 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4089 i < num_to_fill; i++, desc++, entry++) {
4090 /*
4091 * NOTE: we're mapping the values here directly
4092 * from the SCSI/ATA bit definitions to the bio.h
4093 * definitons. There is also a warning in
4094 * disk_zone.h, but the impact is that if
4095 * additional values are added in the SCSI/ATA
4096 * specs these will be visible to consumers of
4097 * this interface.
4098 */
4099 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4100 entry->zone_condition =
4101 (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4102 SRZ_ZONE_COND_SHIFT;
4103 entry->zone_flags |= desc->zone_flags &
4104 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4105 entry->zone_length =
4106 ata ? le64dec(desc->zone_length) :
4107 scsi_8btou64(desc->zone_length);
4108 entry->zone_start_lba =
4109 ata ? le64dec(desc->zone_start_lba) :
4110 scsi_8btou64(desc->zone_start_lba);
4111 entry->write_pointer_lba =
4112 ata ? le64dec(desc->write_pointer_lba) :
4113 scsi_8btou64(desc->write_pointer_lba);
4114 }
4115 rep->entries_filled = num_to_fill;
4116 break;
4117 }
4118 case DISK_ZONE_GET_PARAMS:
4119 default:
4120 /*
4121 * In theory we should not get a GET_PARAMS bio, since it
4122 * should be handled without queueing the command to the
4123 * drive.
4124 */
4125 panic("%s: Invalid zone command %d", __func__,
4126 bp->bio_zone.zone_cmd);
4127 break;
4128 }
4129
4130 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4131 free(ccb->csio.data_ptr, M_SCSIDA);
4132 }
4133
4134 static void
4135 dadone(struct cam_periph *periph, union ccb *done_ccb)
4136 {
4137 struct da_softc *softc;
4138 struct ccb_scsiio *csio;
4139 u_int32_t priority;
4140 da_ccb_state state;
4141
4142 softc = (struct da_softc *)periph->softc;
4143 priority = done_ccb->ccb_h.pinfo.priority;
4144
4145 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4146
4147 csio = &done_ccb->csio;
4148 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4149 switch (state) {
4150 case DA_CCB_BUFFER_IO:
4151 case DA_CCB_DELETE:
4152 {
4153 struct bio *bp, *bp1;
4154
4155 cam_periph_lock(periph);
4156 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4157 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4158 int error;
4159 int sf;
4160
4161 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4162 sf = SF_RETRY_UA;
4163 else
4164 sf = 0;
4165
4166 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4167 if (error == ERESTART) {
4168 /*
4169 * A retry was scheduled, so
4170 * just return.
4171 */
4172 cam_periph_unlock(periph);
4173 return;
4174 }
4175 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4176 if (error != 0) {
4177 int queued_error;
4178
4179 /*
4180 * return all queued I/O with EIO, so that
4181 * the client can retry these I/Os in the
4182 * proper order should it attempt to recover.
4183 */
4184 queued_error = EIO;
4185
4186 if (error == ENXIO
4187 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4188 /*
4189 * Catastrophic error. Mark our pack as
4190 * invalid.
4191 */
4192 /*
4193 * XXX See if this is really a media
4194 * XXX change first?
4195 */
4196 xpt_print(periph->path,
4197 "Invalidating pack\n");
4198 softc->flags |= DA_FLAG_PACK_INVALID;
4199 #ifdef CAM_IO_STATS
4200 softc->invalidations++;
4201 #endif
4202 queued_error = ENXIO;
4203 }
4204 cam_iosched_flush(softc->cam_iosched, NULL,
4205 queued_error);
4206 if (bp != NULL) {
4207 bp->bio_error = error;
4208 bp->bio_resid = bp->bio_bcount;
4209 bp->bio_flags |= BIO_ERROR;
4210 }
4211 } else if (bp != NULL) {
4212 if (state == DA_CCB_DELETE)
4213 bp->bio_resid = 0;
4214 else
4215 bp->bio_resid = csio->resid;
4216 bp->bio_error = 0;
4217 if (bp->bio_resid != 0)
4218 bp->bio_flags |= BIO_ERROR;
4219 }
4220 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4221 cam_release_devq(done_ccb->ccb_h.path,
4222 /*relsim_flags*/0,
4223 /*reduction*/0,
4224 /*timeout*/0,
4225 /*getcount_only*/0);
4226 } else if (bp != NULL) {
4227 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4228 panic("REQ_CMP with QFRZN");
4229 if (bp->bio_cmd == BIO_ZONE)
4230 dazonedone(periph, done_ccb);
4231 else if (state == DA_CCB_DELETE)
4232 bp->bio_resid = 0;
4233 else
4234 bp->bio_resid = csio->resid;
4235 if ((csio->resid > 0)
4236 && (bp->bio_cmd != BIO_ZONE))
4237 bp->bio_flags |= BIO_ERROR;
4238 if (softc->error_inject != 0) {
4239 bp->bio_error = softc->error_inject;
4240 bp->bio_resid = bp->bio_bcount;
4241 bp->bio_flags |= BIO_ERROR;
4242 softc->error_inject = 0;
4243 }
4244 }
4245
4246 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4247 if (LIST_EMPTY(&softc->pending_ccbs))
4248 softc->flags |= DA_FLAG_WAS_OTAG;
4249
4250 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4251 xpt_release_ccb(done_ccb);
4252 if (state == DA_CCB_DELETE) {
4253 TAILQ_HEAD(, bio) queue;
4254
4255 TAILQ_INIT(&queue);
4256 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4257 softc->delete_run_queue.insert_point = NULL;
4258 /*
4259 * Normally, the xpt_release_ccb() above would make sure
4260 * that when we have more work to do, that work would
4261 * get kicked off. However, we specifically keep
4262 * delete_running set to 0 before the call above to
4263 * allow other I/O to progress when many BIO_DELETE
4264 * requests are pushed down. We set delete_running to 0
4265 * and call daschedule again so that we don't stall if
4266 * there are no other I/Os pending apart from BIO_DELETEs.
4267 */
4268 cam_iosched_trim_done(softc->cam_iosched);
4269 daschedule(periph);
4270 cam_periph_unlock(periph);
4271 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4272 TAILQ_REMOVE(&queue, bp1, bio_queue);
4273 bp1->bio_error = bp->bio_error;
4274 if (bp->bio_flags & BIO_ERROR) {
4275 bp1->bio_flags |= BIO_ERROR;
4276 bp1->bio_resid = bp1->bio_bcount;
4277 } else
4278 bp1->bio_resid = 0;
4279 biodone(bp1);
4280 }
4281 } else {
4282 daschedule(periph);
4283 cam_periph_unlock(periph);
4284 }
4285 if (bp != NULL)
4286 biodone(bp);
4287 return;
4288 }
4289 case DA_CCB_PROBE_WP:
4290 {
4291 struct scsi_mode_header_6 *mode_hdr6;
4292 struct scsi_mode_header_10 *mode_hdr10;
4293 uint8_t dev_spec;
4294
4295 if (softc->minimum_cmd_size > 6) {
4296 mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr;
4297 dev_spec = mode_hdr10->dev_spec;
4298 } else {
4299 mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr;
4300 dev_spec = mode_hdr6->dev_spec;
4301 }
4302 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4303 if ((dev_spec & 0x80) != 0)
4304 softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4305 else
4306 softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4307 } else {
4308 int error;
4309
4310 error = daerror(done_ccb, CAM_RETRY_SELTO,
4311 SF_RETRY_UA|SF_NO_PRINT);
4312 if (error == ERESTART)
4313 return;
4314 else if (error != 0) {
4315 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4316 /* Don't wedge this device's queue */
4317 cam_release_devq(done_ccb->ccb_h.path,
4318 /*relsim_flags*/0,
4319 /*reduction*/0,
4320 /*timeout*/0,
4321 /*getcount_only*/0);
4322 }
4323 }
4324 }
4325
4326 free(csio->data_ptr, M_SCSIDA);
4327 xpt_release_ccb(done_ccb);
4328 if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4329 softc->state = DA_STATE_PROBE_RC16;
4330 else
4331 softc->state = DA_STATE_PROBE_RC;
4332 xpt_schedule(periph, priority);
4333 return;
4334 }
4335 case DA_CCB_PROBE_RC:
4336 case DA_CCB_PROBE_RC16:
4337 {
4338 struct scsi_read_capacity_data *rdcap;
4339 struct scsi_read_capacity_data_long *rcaplong;
4340 char announce_buf[80];
4341 int lbp;
4342
4343 lbp = 0;
4344 rdcap = NULL;
4345 rcaplong = NULL;
4346 if (state == DA_CCB_PROBE_RC)
4347 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4348 else
4349 rcaplong = (struct scsi_read_capacity_data_long *)
4350 csio->data_ptr;
4351
4352 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4353 struct disk_params *dp;
4354 uint32_t block_size;
4355 uint64_t maxsector;
4356 u_int lalba; /* Lowest aligned LBA. */
4357
4358 if (state == DA_CCB_PROBE_RC) {
4359 block_size = scsi_4btoul(rdcap->length);
4360 maxsector = scsi_4btoul(rdcap->addr);
4361 lalba = 0;
4362
4363 /*
4364 * According to SBC-2, if the standard 10
4365 * byte READ CAPACITY command returns 2^32,
4366 * we should issue the 16 byte version of
4367 * the command, since the device in question
4368 * has more sectors than can be represented
4369 * with the short version of the command.
4370 */
4371 if (maxsector == 0xffffffff) {
4372 free(rdcap, M_SCSIDA);
4373 xpt_release_ccb(done_ccb);
4374 softc->state = DA_STATE_PROBE_RC16;
4375 xpt_schedule(periph, priority);
4376 return;
4377 }
4378 } else {
4379 block_size = scsi_4btoul(rcaplong->length);
4380 maxsector = scsi_8btou64(rcaplong->addr);
4381 lalba = scsi_2btoul(rcaplong->lalba_lbp);
4382 }
4383
4384 /*
4385 * Because GEOM code just will panic us if we
4386 * give them an 'illegal' value we'll avoid that
4387 * here.
4388 */
4389 if (block_size == 0) {
4390 block_size = 512;
4391 if (maxsector == 0)
4392 maxsector = -1;
4393 }
4394 if (block_size >= MAXPHYS) {
4395 xpt_print(periph->path,
4396 "unsupportable block size %ju\n",
4397 (uintmax_t) block_size);
4398 announce_buf[0] = '\0';
4399 cam_periph_invalidate(periph);
4400 } else {
4401 /*
4402 * We pass rcaplong into dasetgeom(),
4403 * because it will only use it if it is
4404 * non-NULL.
4405 */
4406 dasetgeom(periph, block_size, maxsector,
4407 rcaplong, sizeof(*rcaplong));
4408 lbp = (lalba & SRC16_LBPME_A);
4409 dp = &softc->params;
4410 snprintf(announce_buf, sizeof(announce_buf),
4411 "%juMB (%ju %u byte sectors)",
4412 ((uintmax_t)dp->secsize * dp->sectors) /
4413 (1024 * 1024),
4414 (uintmax_t)dp->sectors, dp->secsize);
4415 }
4416 } else {
4417 int error;
4418
4419 announce_buf[0] = '\0';
4420
4421 /*
4422 * Retry any UNIT ATTENTION type errors. They
4423 * are expected at boot.
4424 */
4425 error = daerror(done_ccb, CAM_RETRY_SELTO,
4426 SF_RETRY_UA|SF_NO_PRINT);
4427 if (error == ERESTART) {
4428 /*
4429 * A retry was scheuled, so
4430 * just return.
4431 */
4432 return;
4433 } else if (error != 0) {
4434 int asc, ascq;
4435 int sense_key, error_code;
4436 int have_sense;
4437 cam_status status;
4438 struct ccb_getdev cgd;
4439
4440 /* Don't wedge this device's queue */
4441 status = done_ccb->ccb_h.status;
4442 if ((status & CAM_DEV_QFRZN) != 0)
4443 cam_release_devq(done_ccb->ccb_h.path,
4444 /*relsim_flags*/0,
4445 /*reduction*/0,
4446 /*timeout*/0,
4447 /*getcount_only*/0);
4448
4449
4450 xpt_setup_ccb(&cgd.ccb_h,
4451 done_ccb->ccb_h.path,
4452 CAM_PRIORITY_NORMAL);
4453 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4454 xpt_action((union ccb *)&cgd);
4455
4456 if (scsi_extract_sense_ccb(done_ccb,
4457 &error_code, &sense_key, &asc, &ascq))
4458 have_sense = TRUE;
4459 else
4460 have_sense = FALSE;
4461
4462 /*
4463 * If we tried READ CAPACITY(16) and failed,
4464 * fallback to READ CAPACITY(10).
4465 */
4466 if ((state == DA_CCB_PROBE_RC16) &&
4467 (softc->flags & DA_FLAG_CAN_RC16) &&
4468 (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4469 CAM_REQ_INVALID) ||
4470 ((have_sense) &&
4471 (error_code == SSD_CURRENT_ERROR ||
4472 error_code == SSD_DESC_CURRENT_ERROR) &&
4473 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4474 softc->flags &= ~DA_FLAG_CAN_RC16;
4475 free(rdcap, M_SCSIDA);
4476 xpt_release_ccb(done_ccb);
4477 softc->state = DA_STATE_PROBE_RC;
4478 xpt_schedule(periph, priority);
4479 return;
4480 }
4481
4482 /*
4483 * Attach to anything that claims to be a
4484 * direct access or optical disk device,
4485 * as long as it doesn't return a "Logical
4486 * unit not supported" (0x25) error.
4487 */
4488 if ((have_sense) && (asc != 0x25)
4489 && (error_code == SSD_CURRENT_ERROR
4490 || error_code == SSD_DESC_CURRENT_ERROR)) {
4491 const char *sense_key_desc;
4492 const char *asc_desc;
4493
4494 dasetgeom(periph, 512, -1, NULL, 0);
4495 scsi_sense_desc(sense_key, asc, ascq,
4496 &cgd.inq_data,
4497 &sense_key_desc,
4498 &asc_desc);
4499 snprintf(announce_buf,
4500 sizeof(announce_buf),
4501 "Attempt to query device "
4502 "size failed: %s, %s",
4503 sense_key_desc,
4504 asc_desc);
4505 } else {
4506 if (have_sense)
4507 scsi_sense_print(
4508 &done_ccb->csio);
4509 else {
4510 xpt_print(periph->path,
4511 "got CAM status %#x\n",
4512 done_ccb->ccb_h.status);
4513 }
4514
4515 xpt_print(periph->path, "fatal error, "
4516 "failed to attach to device\n");
4517
4518 /*
4519 * Free up resources.
4520 */
4521 cam_periph_invalidate(periph);
4522 }
4523 }
4524 }
4525 free(csio->data_ptr, M_SCSIDA);
4526 if (announce_buf[0] != '\0' &&
4527 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4528 /*
4529 * Create our sysctl variables, now that we know
4530 * we have successfully attached.
4531 */
4532 /* increase the refcount */
4533 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
4534 taskqueue_enqueue(taskqueue_thread,
4535 &softc->sysctl_task);
4536 xpt_announce_periph(periph, announce_buf);
4537 xpt_announce_quirks(periph, softc->quirks,
4538 DA_Q_BIT_STRING);
4539 } else {
4540 xpt_print(periph->path, "fatal error, "
4541 "could not acquire reference count\n");
4542 }
4543 }
4544
4545 /* We already probed the device. */
4546 if (softc->flags & DA_FLAG_PROBED) {
4547 daprobedone(periph, done_ccb);
4548 return;
4549 }
4550
4551 /* Ensure re-probe doesn't see old delete. */
4552 softc->delete_available = 0;
4553 dadeleteflag(softc, DA_DELETE_ZERO, 1);
4554 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4555 /*
4556 * Based on older SBC-3 spec revisions
4557 * any of the UNMAP methods "may" be
4558 * available via LBP given this flag so
4559 * we flag all of them as available and
4560 * then remove those which further
4561 * probes confirm aren't available
4562 * later.
4563 *
4564 * We could also check readcap(16) p_type
4565 * flag to exclude one or more invalid
4566 * write same (X) types here
4567 */
4568 dadeleteflag(softc, DA_DELETE_WS16, 1);
4569 dadeleteflag(softc, DA_DELETE_WS10, 1);
4570 dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4571
4572 xpt_release_ccb(done_ccb);
4573 softc->state = DA_STATE_PROBE_LBP;
4574 xpt_schedule(periph, priority);
4575 return;
4576 }
4577
4578 xpt_release_ccb(done_ccb);
4579 softc->state = DA_STATE_PROBE_BDC;
4580 xpt_schedule(periph, priority);
4581 return;
4582 }
4583 case DA_CCB_PROBE_LBP:
4584 {
4585 struct scsi_vpd_logical_block_prov *lbp;
4586
4587 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4588
4589 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4590 /*
4591 * T10/1799-D Revision 31 states at least one of these
4592 * must be supported but we don't currently enforce this.
4593 */
4594 dadeleteflag(softc, DA_DELETE_WS16,
4595 (lbp->flags & SVPD_LBP_WS16));
4596 dadeleteflag(softc, DA_DELETE_WS10,
4597 (lbp->flags & SVPD_LBP_WS10));
4598 dadeleteflag(softc, DA_DELETE_UNMAP,
4599 (lbp->flags & SVPD_LBP_UNMAP));
4600 } else {
4601 int error;
4602 error = daerror(done_ccb, CAM_RETRY_SELTO,
4603 SF_RETRY_UA|SF_NO_PRINT);
4604 if (error == ERESTART)
4605 return;
4606 else if (error != 0) {
4607 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4608 /* Don't wedge this device's queue */
4609 cam_release_devq(done_ccb->ccb_h.path,
4610 /*relsim_flags*/0,
4611 /*reduction*/0,
4612 /*timeout*/0,
4613 /*getcount_only*/0);
4614 }
4615
4616 /*
4617 * Failure indicates we don't support any SBC-3
4618 * delete methods with UNMAP
4619 */
4620 }
4621 }
4622
4623 free(lbp, M_SCSIDA);
4624 xpt_release_ccb(done_ccb);
4625 softc->state = DA_STATE_PROBE_BLK_LIMITS;
4626 xpt_schedule(periph, priority);
4627 return;
4628 }
4629 case DA_CCB_PROBE_BLK_LIMITS:
4630 {
4631 struct scsi_vpd_block_limits *block_limits;
4632
4633 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4634
4635 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4636 uint32_t max_txfer_len = scsi_4btoul(
4637 block_limits->max_txfer_len);
4638 uint32_t max_unmap_lba_cnt = scsi_4btoul(
4639 block_limits->max_unmap_lba_cnt);
4640 uint32_t max_unmap_blk_cnt = scsi_4btoul(
4641 block_limits->max_unmap_blk_cnt);
4642 uint32_t unmap_gran = scsi_4btoul(
4643 block_limits->opt_unmap_grain);
4644 uint32_t unmap_gran_align = scsi_4btoul(
4645 block_limits->unmap_grain_align);
4646 uint64_t ws_max_blks = scsi_8btou64(
4647 block_limits->max_write_same_length);
4648
4649 if (max_txfer_len != 0) {
4650 softc->disk->d_maxsize = MIN(softc->maxio,
4651 (off_t)max_txfer_len * softc->params.secsize);
4652 }
4653
4654 /*
4655 * We should already support UNMAP but we check lba
4656 * and block count to be sure
4657 */
4658 if (max_unmap_lba_cnt != 0x00L &&
4659 max_unmap_blk_cnt != 0x00L) {
4660 softc->unmap_max_lba = max_unmap_lba_cnt;
4661 softc->unmap_max_ranges = min(max_unmap_blk_cnt,
4662 UNMAP_MAX_RANGES);
4663 if (unmap_gran > 1) {
4664 softc->unmap_gran = unmap_gran;
4665 if (unmap_gran_align & 0x80000000) {
4666 softc->unmap_gran_align =
4667 unmap_gran_align &
4668 0x7fffffff;
4669 }
4670 }
4671 } else {
4672 /*
4673 * Unexpected UNMAP limits which means the
4674 * device doesn't actually support UNMAP
4675 */
4676 dadeleteflag(softc, DA_DELETE_UNMAP, 0);
4677 }
4678
4679 if (ws_max_blks != 0x00L)
4680 softc->ws_max_blks = ws_max_blks;
4681 } else {
4682 int error;
4683 error = daerror(done_ccb, CAM_RETRY_SELTO,
4684 SF_RETRY_UA|SF_NO_PRINT);
4685 if (error == ERESTART)
4686 return;
4687 else if (error != 0) {
4688 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4689 /* Don't wedge this device's queue */
4690 cam_release_devq(done_ccb->ccb_h.path,
4691 /*relsim_flags*/0,
4692 /*reduction*/0,
4693 /*timeout*/0,
4694 /*getcount_only*/0);
4695 }
4696
4697 /*
4698 * Failure here doesn't mean UNMAP is not
4699 * supported as this is an optional page.
4700 */
4701 softc->unmap_max_lba = 1;
4702 softc->unmap_max_ranges = 1;
4703 }
4704 }
4705
4706 free(block_limits, M_SCSIDA);
4707 xpt_release_ccb(done_ccb);
4708 softc->state = DA_STATE_PROBE_BDC;
4709 xpt_schedule(periph, priority);
4710 return;
4711 }
4712 case DA_CCB_PROBE_BDC:
4713 {
4714 struct scsi_vpd_block_device_characteristics *bdc;
4715
4716 bdc = (struct scsi_vpd_block_device_characteristics *)
4717 csio->data_ptr;
4718
4719 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4720 uint32_t valid_len;
4721
4722 /*
4723 * Disable queue sorting for non-rotational media
4724 * by default.
4725 */
4726 u_int16_t old_rate = softc->disk->d_rotation_rate;
4727
4728 valid_len = csio->dxfer_len - csio->resid;
4729 if (SBDC_IS_PRESENT(bdc, valid_len,
4730 medium_rotation_rate)) {
4731 softc->disk->d_rotation_rate =
4732 scsi_2btoul(bdc->medium_rotation_rate);
4733 if (softc->disk->d_rotation_rate ==
4734 SVPD_BDC_RATE_NON_ROTATING) {
4735 cam_iosched_set_sort_queue(
4736 softc->cam_iosched, 0);
4737 softc->rotating = 0;
4738 }
4739 if (softc->disk->d_rotation_rate != old_rate) {
4740 disk_attr_changed(softc->disk,
4741 "GEOM::rotation_rate", M_NOWAIT);
4742 }
4743 }
4744 if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
4745 && (softc->zone_mode == DA_ZONE_NONE)) {
4746 int ata_proto;
4747
4748 if (scsi_vpd_supported_page(periph,
4749 SVPD_ATA_INFORMATION))
4750 ata_proto = 1;
4751 else
4752 ata_proto = 0;
4753
4754 /*
4755 * The Zoned field will only be set for
4756 * Drive Managed and Host Aware drives. If
4757 * they are Host Managed, the device type
4758 * in the standard INQUIRY data should be
4759 * set to T_ZBC_HM (0x14).
4760 */
4761 if ((bdc->flags & SVPD_ZBC_MASK) ==
4762 SVPD_HAW_ZBC) {
4763 softc->zone_mode = DA_ZONE_HOST_AWARE;
4764 softc->zone_interface = (ata_proto) ?
4765 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4766 } else if ((bdc->flags & SVPD_ZBC_MASK) ==
4767 SVPD_DM_ZBC) {
4768 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4769 softc->zone_interface = (ata_proto) ?
4770 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4771 } else if ((bdc->flags & SVPD_ZBC_MASK) !=
4772 SVPD_ZBC_NR) {
4773 xpt_print(periph->path, "Unknown zoned "
4774 "type %#x",
4775 bdc->flags & SVPD_ZBC_MASK);
4776 }
4777 }
4778 } else {
4779 int error;
4780 error = daerror(done_ccb, CAM_RETRY_SELTO,
4781 SF_RETRY_UA|SF_NO_PRINT);
4782 if (error == ERESTART)
4783 return;
4784 else if (error != 0) {
4785 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4786 /* Don't wedge this device's queue */
4787 cam_release_devq(done_ccb->ccb_h.path,
4788 /*relsim_flags*/0,
4789 /*reduction*/0,
4790 /*timeout*/0,
4791 /*getcount_only*/0);
4792 }
4793 }
4794 }
4795
4796 free(bdc, M_SCSIDA);
4797 xpt_release_ccb(done_ccb);
4798 softc->state = DA_STATE_PROBE_ATA;
4799 xpt_schedule(periph, priority);
4800 return;
4801 }
4802 case DA_CCB_PROBE_ATA:
4803 {
4804 int i;
4805 struct ata_params *ata_params;
4806 int continue_probe;
4807 int error;
4808 int16_t *ptr;
4809
4810 ata_params = (struct ata_params *)csio->data_ptr;
4811 ptr = (uint16_t *)ata_params;
4812 continue_probe = 0;
4813 error = 0;
4814
4815 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4816 uint16_t old_rate;
4817
4818 for (i = 0; i < sizeof(*ata_params) / 2; i++)
4819 ptr[i] = le16toh(ptr[i]);
4820 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
4821 (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4822 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
4823 if (ata_params->max_dsm_blocks != 0)
4824 softc->trim_max_ranges = min(
4825 softc->trim_max_ranges,
4826 ata_params->max_dsm_blocks *
4827 ATA_DSM_BLK_RANGES);
4828 }
4829 /*
4830 * Disable queue sorting for non-rotational media
4831 * by default.
4832 */
4833 old_rate = softc->disk->d_rotation_rate;
4834 softc->disk->d_rotation_rate =
4835 ata_params->media_rotation_rate;
4836 if (softc->disk->d_rotation_rate ==
4837 ATA_RATE_NON_ROTATING) {
4838 cam_iosched_set_sort_queue(softc->cam_iosched, 0);
4839 softc->rotating = 0;
4840 }
4841 if (softc->disk->d_rotation_rate != old_rate) {
4842 disk_attr_changed(softc->disk,
4843 "GEOM::rotation_rate", M_NOWAIT);
4844 }
4845
4846 if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
4847 softc->flags |= DA_FLAG_CAN_ATA_DMA;
4848
4849 if (ata_params->support.extension &
4850 ATA_SUPPORT_GENLOG)
4851 softc->flags |= DA_FLAG_CAN_ATA_LOG;
4852
4853 /*
4854 * At this point, if we have a SATA host aware drive,
4855 * we communicate via ATA passthrough unless the
4856 * SAT layer supports ZBC -> ZAC translation. In
4857 * that case,
4858 */
4859 /*
4860 * XXX KDM figure out how to detect a host managed
4861 * SATA drive.
4862 */
4863 if (softc->zone_mode == DA_ZONE_NONE) {
4864 /*
4865 * Note that we don't override the zone
4866 * mode or interface if it has already been
4867 * set. This is because it has either been
4868 * set as a quirk, or when we probed the
4869 * SCSI Block Device Characteristics page,
4870 * the zoned field was set. The latter
4871 * means that the SAT layer supports ZBC to
4872 * ZAC translation, and we would prefer to
4873 * use that if it is available.
4874 */
4875 if ((ata_params->support3 &
4876 ATA_SUPPORT_ZONE_MASK) ==
4877 ATA_SUPPORT_ZONE_HOST_AWARE) {
4878 softc->zone_mode = DA_ZONE_HOST_AWARE;
4879 softc->zone_interface =
4880 DA_ZONE_IF_ATA_PASS;
4881 } else if ((ata_params->support3 &
4882 ATA_SUPPORT_ZONE_MASK) ==
4883 ATA_SUPPORT_ZONE_DEV_MANAGED) {
4884 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4885 softc->zone_interface =
4886 DA_ZONE_IF_ATA_PASS;
4887 }
4888 }
4889
4890 } else {
4891 error = daerror(done_ccb, CAM_RETRY_SELTO,
4892 SF_RETRY_UA|SF_NO_PRINT);
4893 if (error == ERESTART)
4894 return;
4895 else if (error != 0) {
4896 if ((done_ccb->ccb_h.status &
4897 CAM_DEV_QFRZN) != 0) {
4898 /* Don't wedge this device's queue */
4899 cam_release_devq(done_ccb->ccb_h.path,
4900 /*relsim_flags*/0,
4901 /*reduction*/0,
4902 /*timeout*/0,
4903 /*getcount_only*/0);
4904 }
4905 }
4906 }
4907
4908 free(ata_params, M_SCSIDA);
4909 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
4910 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
4911 /*
4912 * If the ATA IDENTIFY failed, we could be talking
4913 * to a SCSI drive, although that seems unlikely,
4914 * since the drive did report that it supported the
4915 * ATA Information VPD page. If the ATA IDENTIFY
4916 * succeeded, and the SAT layer doesn't support
4917 * ZBC -> ZAC translation, continue on to get the
4918 * directory of ATA logs, and complete the rest of
4919 * the ZAC probe. If the SAT layer does support
4920 * ZBC -> ZAC translation, we want to use that,
4921 * and we'll probe the SCSI Zoned Block Device
4922 * Characteristics VPD page next.
4923 */
4924 if ((error == 0)
4925 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
4926 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
4927 softc->state = DA_STATE_PROBE_ATA_LOGDIR;
4928 else
4929 softc->state = DA_STATE_PROBE_ZONE;
4930 continue_probe = 1;
4931 }
4932 if (continue_probe != 0) {
4933 xpt_release_ccb(done_ccb);
4934 xpt_schedule(periph, priority);
4935 return;
4936 } else
4937 daprobedone(periph, done_ccb);
4938 return;
4939 }
4940 case DA_CCB_PROBE_ATA_LOGDIR:
4941 {
4942 int error;
4943
4944 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4945 error = 0;
4946 softc->valid_logdir_len = 0;
4947 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
4948 softc->valid_logdir_len =
4949 csio->dxfer_len - csio->resid;
4950 if (softc->valid_logdir_len > 0)
4951 bcopy(csio->data_ptr, &softc->ata_logdir,
4952 min(softc->valid_logdir_len,
4953 sizeof(softc->ata_logdir)));
4954 /*
4955 * Figure out whether the Identify Device log is
4956 * supported. The General Purpose log directory
4957 * has a header, and lists the number of pages
4958 * available for each GP log identified by the
4959 * offset into the list.
4960 */
4961 if ((softc->valid_logdir_len >=
4962 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
4963 && (le16dec(softc->ata_logdir.header) ==
4964 ATA_GP_LOG_DIR_VERSION)
4965 && (le16dec(&softc->ata_logdir.num_pages[
4966 (ATA_IDENTIFY_DATA_LOG *
4967 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
4968 softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
4969 } else {
4970 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4971 }
4972 } else {
4973 error = daerror(done_ccb, CAM_RETRY_SELTO,
4974 SF_RETRY_UA|SF_NO_PRINT);
4975 if (error == ERESTART)
4976 return;
4977 else if (error != 0) {
4978 /*
4979 * If we can't get the ATA log directory,
4980 * then ATA logs are effectively not
4981 * supported even if the bit is set in the
4982 * identify data.
4983 */
4984 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
4985 DA_FLAG_CAN_ATA_IDLOG);
4986 if ((done_ccb->ccb_h.status &
4987 CAM_DEV_QFRZN) != 0) {
4988 /* Don't wedge this device's queue */
4989 cam_release_devq(done_ccb->ccb_h.path,
4990 /*relsim_flags*/0,
4991 /*reduction*/0,
4992 /*timeout*/0,
4993 /*getcount_only*/0);
4994 }
4995 }
4996 }
4997
4998 free(csio->data_ptr, M_SCSIDA);
4999
5000 if ((error == 0)
5001 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5002 softc->state = DA_STATE_PROBE_ATA_IDDIR;
5003 xpt_release_ccb(done_ccb);
5004 xpt_schedule(periph, priority);
5005 return;
5006 }
5007 daprobedone(periph, done_ccb);
5008 return;
5009 }
5010 case DA_CCB_PROBE_ATA_IDDIR:
5011 {
5012 int error;
5013
5014 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5015 off_t entries_offset, max_entries;
5016 error = 0;
5017
5018 softc->valid_iddir_len = 0;
5019 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5020 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5021 DA_FLAG_CAN_ATA_ZONE);
5022 softc->valid_iddir_len =
5023 csio->dxfer_len - csio->resid;
5024 if (softc->valid_iddir_len > 0)
5025 bcopy(csio->data_ptr, &softc->ata_iddir,
5026 min(softc->valid_iddir_len,
5027 sizeof(softc->ata_iddir)));
5028
5029 entries_offset =
5030 __offsetof(struct ata_identify_log_pages,entries);
5031 max_entries = softc->valid_iddir_len - entries_offset;
5032 if ((softc->valid_iddir_len > (entries_offset + 1))
5033 && (le64dec(softc->ata_iddir.header) ==
5034 ATA_IDLOG_REVISION)
5035 && (softc->ata_iddir.entry_count > 0)) {
5036 int num_entries, i;
5037
5038 num_entries = softc->ata_iddir.entry_count;
5039 num_entries = min(num_entries,
5040 softc->valid_iddir_len - entries_offset);
5041 for (i = 0; i < num_entries &&
5042 i < max_entries; i++) {
5043 if (softc->ata_iddir.entries[i] ==
5044 ATA_IDL_SUP_CAP)
5045 softc->flags |=
5046 DA_FLAG_CAN_ATA_SUPCAP;
5047 else if (softc->ata_iddir.entries[i]==
5048 ATA_IDL_ZDI)
5049 softc->flags |=
5050 DA_FLAG_CAN_ATA_ZONE;
5051
5052 if ((softc->flags &
5053 DA_FLAG_CAN_ATA_SUPCAP)
5054 && (softc->flags &
5055 DA_FLAG_CAN_ATA_ZONE))
5056 break;
5057 }
5058 }
5059 } else {
5060 error = daerror(done_ccb, CAM_RETRY_SELTO,
5061 SF_RETRY_UA|SF_NO_PRINT);
5062 if (error == ERESTART)
5063 return;
5064 else if (error != 0) {
5065 /*
5066 * If we can't get the ATA Identify Data log
5067 * directory, then it effectively isn't
5068 * supported even if the ATA Log directory
5069 * a non-zero number of pages present for
5070 * this log.
5071 */
5072 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5073 if ((done_ccb->ccb_h.status &
5074 CAM_DEV_QFRZN) != 0) {
5075 /* Don't wedge this device's queue */
5076 cam_release_devq(done_ccb->ccb_h.path,
5077 /*relsim_flags*/0,
5078 /*reduction*/0,
5079 /*timeout*/0,
5080 /*getcount_only*/0);
5081 }
5082 }
5083 }
5084
5085 free(csio->data_ptr, M_SCSIDA);
5086
5087 if ((error == 0)
5088 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5089 softc->state = DA_STATE_PROBE_ATA_SUP;
5090 xpt_release_ccb(done_ccb);
5091 xpt_schedule(periph, priority);
5092 return;
5093 }
5094 daprobedone(periph, done_ccb);
5095 return;
5096 }
5097 case DA_CCB_PROBE_ATA_SUP:
5098 {
5099 int error;
5100
5101 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5102 uint32_t valid_len;
5103 size_t needed_size;
5104 struct ata_identify_log_sup_cap *sup_cap;
5105 error = 0;
5106
5107 sup_cap = (struct ata_identify_log_sup_cap *)
5108 csio->data_ptr;
5109 valid_len = csio->dxfer_len - csio->resid;
5110 needed_size =
5111 __offsetof(struct ata_identify_log_sup_cap,
5112 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5113 if (valid_len >= needed_size) {
5114 uint64_t zoned, zac_cap;
5115
5116 zoned = le64dec(sup_cap->zoned_cap);
5117 if (zoned & ATA_ZONED_VALID) {
5118 /*
5119 * This should have already been
5120 * set, because this is also in the
5121 * ATA identify data.
5122 */
5123 if ((zoned & ATA_ZONED_MASK) ==
5124 ATA_SUPPORT_ZONE_HOST_AWARE)
5125 softc->zone_mode =
5126 DA_ZONE_HOST_AWARE;
5127 else if ((zoned & ATA_ZONED_MASK) ==
5128 ATA_SUPPORT_ZONE_DEV_MANAGED)
5129 softc->zone_mode =
5130 DA_ZONE_DRIVE_MANAGED;
5131 }
5132
5133 zac_cap = le64dec(sup_cap->sup_zac_cap);
5134 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5135 if (zac_cap & ATA_REPORT_ZONES_SUP)
5136 softc->zone_flags |=
5137 DA_ZONE_FLAG_RZ_SUP;
5138 if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5139 softc->zone_flags |=
5140 DA_ZONE_FLAG_OPEN_SUP;
5141 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5142 softc->zone_flags |=
5143 DA_ZONE_FLAG_CLOSE_SUP;
5144 if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5145 softc->zone_flags |=
5146 DA_ZONE_FLAG_FINISH_SUP;
5147 if (zac_cap & ATA_ND_RWP_SUP)
5148 softc->zone_flags |=
5149 DA_ZONE_FLAG_RWP_SUP;
5150 } else {
5151 /*
5152 * This field was introduced in
5153 * ACS-4, r08 on April 28th, 2015.
5154 * If the drive firmware was written
5155 * to an earlier spec, it won't have
5156 * the field. So, assume all
5157 * commands are supported.
5158 */
5159 softc->zone_flags |=
5160 DA_ZONE_FLAG_SUP_MASK;
5161 }
5162
5163 }
5164 } else {
5165 error = daerror(done_ccb, CAM_RETRY_SELTO,
5166 SF_RETRY_UA|SF_NO_PRINT);
5167 if (error == ERESTART)
5168 return;
5169 else if (error != 0) {
5170 /*
5171 * If we can't get the ATA Identify Data
5172 * Supported Capabilities page, clear the
5173 * flag...
5174 */
5175 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5176 /*
5177 * And clear zone capabilities.
5178 */
5179 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5180 if ((done_ccb->ccb_h.status &
5181 CAM_DEV_QFRZN) != 0) {
5182 /* Don't wedge this device's queue */
5183 cam_release_devq(done_ccb->ccb_h.path,
5184 /*relsim_flags*/0,
5185 /*reduction*/0,
5186 /*timeout*/0,
5187 /*getcount_only*/0);
5188 }
5189 }
5190 }
5191
5192 free(csio->data_ptr, M_SCSIDA);
5193
5194 if ((error == 0)
5195 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5196 softc->state = DA_STATE_PROBE_ATA_ZONE;
5197 xpt_release_ccb(done_ccb);
5198 xpt_schedule(periph, priority);
5199 return;
5200 }
5201 daprobedone(periph, done_ccb);
5202 return;
5203 }
5204 case DA_CCB_PROBE_ATA_ZONE:
5205 {
5206 int error;
5207
5208 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5209 struct ata_zoned_info_log *zi_log;
5210 uint32_t valid_len;
5211 size_t needed_size;
5212
5213 zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5214
5215 valid_len = csio->dxfer_len - csio->resid;
5216 needed_size = __offsetof(struct ata_zoned_info_log,
5217 version_info) + 1 + sizeof(zi_log->version_info);
5218 if (valid_len >= needed_size) {
5219 uint64_t tmpvar;
5220
5221 tmpvar = le64dec(zi_log->zoned_cap);
5222 if (tmpvar & ATA_ZDI_CAP_VALID) {
5223 if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5224 softc->zone_flags |=
5225 DA_ZONE_FLAG_URSWRZ;
5226 else
5227 softc->zone_flags &=
5228 ~DA_ZONE_FLAG_URSWRZ;
5229 }
5230 tmpvar = le64dec(zi_log->optimal_seq_zones);
5231 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5232 softc->zone_flags |=
5233 DA_ZONE_FLAG_OPT_SEQ_SET;
5234 softc->optimal_seq_zones = (tmpvar &
5235 ATA_ZDI_OPT_SEQ_MASK);
5236 } else {
5237 softc->zone_flags &=
5238 ~DA_ZONE_FLAG_OPT_SEQ_SET;
5239 softc->optimal_seq_zones = 0;
5240 }
5241
5242 tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5243 if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5244 softc->zone_flags |=
5245 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5246 softc->optimal_nonseq_zones =
5247 (tmpvar & ATA_ZDI_OPT_NS_MASK);
5248 } else {
5249 softc->zone_flags &=
5250 ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5251 softc->optimal_nonseq_zones = 0;
5252 }
5253
5254 tmpvar = le64dec(zi_log->max_seq_req_zones);
5255 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5256 softc->zone_flags |=
5257 DA_ZONE_FLAG_MAX_SEQ_SET;
5258 softc->max_seq_zones =
5259 (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5260 } else {
5261 softc->zone_flags &=
5262 ~DA_ZONE_FLAG_MAX_SEQ_SET;
5263 softc->max_seq_zones = 0;
5264 }
5265 }
5266 } else {
5267 error = daerror(done_ccb, CAM_RETRY_SELTO,
5268 SF_RETRY_UA|SF_NO_PRINT);
5269 if (error == ERESTART)
5270 return;
5271 else if (error != 0) {
5272 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5273 softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5274
5275 if ((done_ccb->ccb_h.status &
5276 CAM_DEV_QFRZN) != 0) {
5277 /* Don't wedge this device's queue */
5278 cam_release_devq(done_ccb->ccb_h.path,
5279 /*relsim_flags*/0,
5280 /*reduction*/0,
5281 /*timeout*/0,
5282 /*getcount_only*/0);
5283 }
5284 }
5285
5286 }
5287 free(csio->data_ptr, M_SCSIDA);
5288
5289 daprobedone(periph, done_ccb);
5290 return;
5291 }
5292 case DA_CCB_PROBE_ZONE:
5293 {
5294 int error;
5295
5296 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5297 uint32_t valid_len;
5298 size_t needed_len;
5299 struct scsi_vpd_zoned_bdc *zoned_bdc;
5300
5301 error = 0;
5302 zoned_bdc = (struct scsi_vpd_zoned_bdc *)
5303 csio->data_ptr;
5304 valid_len = csio->dxfer_len - csio->resid;
5305 needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5306 max_seq_req_zones) + 1 +
5307 sizeof(zoned_bdc->max_seq_req_zones);
5308 if ((valid_len >= needed_len)
5309 && (scsi_2btoul(zoned_bdc->page_length) >=
5310 SVPD_ZBDC_PL)) {
5311 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5312 softc->zone_flags |=
5313 DA_ZONE_FLAG_URSWRZ;
5314 else
5315 softc->zone_flags &=
5316 ~DA_ZONE_FLAG_URSWRZ;
5317 softc->optimal_seq_zones =
5318 scsi_4btoul(zoned_bdc->optimal_seq_zones);
5319 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5320 softc->optimal_nonseq_zones = scsi_4btoul(
5321 zoned_bdc->optimal_nonseq_zones);
5322 softc->zone_flags |=
5323 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5324 softc->max_seq_zones =
5325 scsi_4btoul(zoned_bdc->max_seq_req_zones);
5326 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5327 }
5328 /*
5329 * All of the zone commands are mandatory for SCSI
5330 * devices.
5331 *
5332 * XXX KDM this is valid as of September 2015.
5333 * Re-check this assumption once the SAT spec is
5334 * updated to support SCSI ZBC to ATA ZAC mapping.
5335 * Since ATA allows zone commands to be reported
5336 * as supported or not, this may not necessarily
5337 * be true for an ATA device behind a SAT (SCSI to
5338 * ATA Translation) layer.
5339 */
5340 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5341 } else {
5342 error = daerror(done_ccb, CAM_RETRY_SELTO,
5343 SF_RETRY_UA|SF_NO_PRINT);
5344 if (error == ERESTART)
5345 return;
5346 else if (error != 0) {
5347 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5348 /* Don't wedge this device's queue */
5349 cam_release_devq(done_ccb->ccb_h.path,
5350 /*relsim_flags*/0,
5351 /*reduction*/0,
5352 /*timeout*/0,
5353 /*getcount_only*/0);
5354 }
5355 }
5356 }
5357 daprobedone(periph, done_ccb);
5358 return;
5359 }
5360 case DA_CCB_DUMP:
5361 /* No-op. We're polling */
5362 return;
5363 case DA_CCB_TUR:
5364 {
5365 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5366
5367 if (daerror(done_ccb, CAM_RETRY_SELTO,
5368 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) ==
5369 ERESTART)
5370 return;
5371 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5372 cam_release_devq(done_ccb->ccb_h.path,
5373 /*relsim_flags*/0,
5374 /*reduction*/0,
5375 /*timeout*/0,
5376 /*getcount_only*/0);
5377 }
5378 xpt_release_ccb(done_ccb);
5379 cam_periph_release_locked(periph);
5380 return;
5381 }
5382 default:
5383 break;
5384 }
5385 xpt_release_ccb(done_ccb);
5386 }
5387
5388 static void
5389 dareprobe(struct cam_periph *periph)
5390 {
5391 struct da_softc *softc;
5392 cam_status status;
5393
5394 softc = (struct da_softc *)periph->softc;
5395
5396 /* Probe in progress; don't interfere. */
5397 if (softc->state != DA_STATE_NORMAL)
5398 return;
5399
5400 status = cam_periph_acquire(periph);
5401 KASSERT(status == CAM_REQ_CMP,
5402 ("dareprobe: cam_periph_acquire failed"));
5403
5404 softc->state = DA_STATE_PROBE_WP;
5405 xpt_schedule(periph, CAM_PRIORITY_DEV);
5406 }
5407
5408 static int
5409 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5410 {
5411 struct da_softc *softc;
5412 struct cam_periph *periph;
5413 int error, error_code, sense_key, asc, ascq;
5414
5415 periph = xpt_path_periph(ccb->ccb_h.path);
5416 softc = (struct da_softc *)periph->softc;
5417
5418 /*
5419 * Automatically detect devices that do not support
5420 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5421 */
5422 error = 0;
5423 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5424 error = cmd6workaround(ccb);
5425 } else if (scsi_extract_sense_ccb(ccb,
5426 &error_code, &sense_key, &asc, &ascq)) {
5427 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5428 error = cmd6workaround(ccb);
5429 /*
5430 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5431 * query the capacity and notify upper layers.
5432 */
5433 else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5434 asc == 0x2A && ascq == 0x09) {
5435 xpt_print(periph->path, "Capacity data has changed\n");
5436 softc->flags &= ~DA_FLAG_PROBED;
5437 dareprobe(periph);
5438 sense_flags |= SF_NO_PRINT;
5439 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5440 asc == 0x28 && ascq == 0x00) {
5441 softc->flags &= ~DA_FLAG_PROBED;
5442 disk_media_changed(softc->disk, M_NOWAIT);
5443 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5444 asc == 0x3F && ascq == 0x03) {
5445 xpt_print(periph->path, "INQUIRY data has changed\n");
5446 softc->flags &= ~DA_FLAG_PROBED;
5447 dareprobe(periph);
5448 sense_flags |= SF_NO_PRINT;
5449 } else if (sense_key == SSD_KEY_NOT_READY &&
5450 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5451 softc->flags |= DA_FLAG_PACK_INVALID;
5452 disk_media_gone(softc->disk, M_NOWAIT);
5453 }
5454 }
5455 if (error == ERESTART)
5456 return (ERESTART);
5457
5458 #ifdef CAM_IO_STATS
5459 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5460 case CAM_CMD_TIMEOUT:
5461 softc->timeouts++;
5462 break;
5463 case CAM_REQ_ABORTED:
5464 case CAM_REQ_CMP_ERR:
5465 case CAM_REQ_TERMIO:
5466 case CAM_UNREC_HBA_ERROR:
5467 case CAM_DATA_RUN_ERR:
5468 softc->errors++;
5469 break;
5470 default:
5471 break;
5472 }
5473 #endif
5474
5475 /*
5476 * XXX
5477 * Until we have a better way of doing pack validation,
5478 * don't treat UAs as errors.
5479 */
5480 sense_flags |= SF_RETRY_UA;
5481
5482 if (softc->quirks & DA_Q_RETRY_BUSY)
5483 sense_flags |= SF_RETRY_BUSY;
5484 return(cam_periph_error(ccb, cam_flags, sense_flags,
5485 &softc->saved_ccb));
5486 }
5487
5488 static void
5489 damediapoll(void *arg)
5490 {
5491 struct cam_periph *periph = arg;
5492 struct da_softc *softc = periph->softc;
5493
5494 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5495 LIST_EMPTY(&softc->pending_ccbs)) {
5496 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
5497 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5498 daschedule(periph);
5499 }
5500 }
5501 /* Queue us up again */
5502 if (da_poll_period != 0)
5503 callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5504 }
5505
5506 static void
5507 daprevent(struct cam_periph *periph, int action)
5508 {
5509 struct da_softc *softc;
5510 union ccb *ccb;
5511 int error;
5512
5513 softc = (struct da_softc *)periph->softc;
5514
5515 if (((action == PR_ALLOW)
5516 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5517 || ((action == PR_PREVENT)
5518 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5519 return;
5520 }
5521
5522 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5523
5524 scsi_prevent(&ccb->csio,
5525 /*retries*/1,
5526 /*cbcfp*/dadone,
5527 MSG_SIMPLE_Q_TAG,
5528 action,
5529 SSD_FULL_SIZE,
5530 5000);
5531
5532 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5533 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5534
5535 if (error == 0) {
5536 if (action == PR_ALLOW)
5537 softc->flags &= ~DA_FLAG_PACK_LOCKED;
5538 else
5539 softc->flags |= DA_FLAG_PACK_LOCKED;
5540 }
5541
5542 xpt_release_ccb(ccb);
5543 }
5544
5545 static void
5546 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5547 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5548 {
5549 struct ccb_calc_geometry ccg;
5550 struct da_softc *softc;
5551 struct disk_params *dp;
5552 u_int lbppbe, lalba;
5553 int error;
5554
5555 softc = (struct da_softc *)periph->softc;
5556
5557 dp = &softc->params;
5558 dp->secsize = block_len;
5559 dp->sectors = maxsector + 1;
5560 if (rcaplong != NULL) {
5561 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5562 lalba = scsi_2btoul(rcaplong->lalba_lbp);
5563 lalba &= SRC16_LALBA_A;
5564 } else {
5565 lbppbe = 0;
5566 lalba = 0;
5567 }
5568
5569 if (lbppbe > 0) {
5570 dp->stripesize = block_len << lbppbe;
5571 dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5572 dp->stripesize;
5573 } else if (softc->quirks & DA_Q_4K) {
5574 dp->stripesize = 4096;
5575 dp->stripeoffset = 0;
5576 } else if (softc->unmap_gran != 0) {
5577 dp->stripesize = block_len * softc->unmap_gran;
5578 dp->stripeoffset = (dp->stripesize - block_len *
5579 softc->unmap_gran_align) % dp->stripesize;
5580 } else {
5581 dp->stripesize = 0;
5582 dp->stripeoffset = 0;
5583 }
5584 /*
5585 * Have the controller provide us with a geometry
5586 * for this disk. The only time the geometry
5587 * matters is when we boot and the controller
5588 * is the only one knowledgeable enough to come
5589 * up with something that will make this a bootable
5590 * device.
5591 */
5592 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5593 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
5594 ccg.block_size = dp->secsize;
5595 ccg.volume_size = dp->sectors;
5596 ccg.heads = 0;
5597 ccg.secs_per_track = 0;
5598 ccg.cylinders = 0;
5599 xpt_action((union ccb*)&ccg);
5600 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5601 /*
5602 * We don't know what went wrong here- but just pick
5603 * a geometry so we don't have nasty things like divide
5604 * by zero.
5605 */
5606 dp->heads = 255;
5607 dp->secs_per_track = 255;
5608 dp->cylinders = dp->sectors / (255 * 255);
5609 if (dp->cylinders == 0) {
5610 dp->cylinders = 1;
5611 }
5612 } else {
5613 dp->heads = ccg.heads;
5614 dp->secs_per_track = ccg.secs_per_track;
5615 dp->cylinders = ccg.cylinders;
5616 }
5617
5618 /*
5619 * If the user supplied a read capacity buffer, and if it is
5620 * different than the previous buffer, update the data in the EDT.
5621 * If it's the same, we don't bother. This avoids sending an
5622 * update every time someone opens this device.
5623 */
5624 if ((rcaplong != NULL)
5625 && (bcmp(rcaplong, &softc->rcaplong,
5626 min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
5627 struct ccb_dev_advinfo cdai;
5628
5629 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5630 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
5631 cdai.buftype = CDAI_TYPE_RCAPLONG;
5632 cdai.flags = CDAI_FLAG_STORE;
5633 cdai.bufsiz = rcap_len;
5634 cdai.buf = (uint8_t *)rcaplong;
5635 xpt_action((union ccb *)&cdai);
5636 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
5637 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
5638 if (cdai.ccb_h.status != CAM_REQ_CMP) {
5639 xpt_print(periph->path, "%s: failed to set read "
5640 "capacity advinfo\n", __func__);
5641 /* Use cam_error_print() to decode the status */
5642 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
5643 CAM_EPF_ALL);
5644 } else {
5645 bcopy(rcaplong, &softc->rcaplong,
5646 min(sizeof(softc->rcaplong), rcap_len));
5647 }
5648 }
5649
5650 softc->disk->d_sectorsize = softc->params.secsize;
5651 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
5652 softc->disk->d_stripesize = softc->params.stripesize;
5653 softc->disk->d_stripeoffset = softc->params.stripeoffset;
5654 /* XXX: these are not actually "firmware" values, so they may be wrong */
5655 softc->disk->d_fwsectors = softc->params.secs_per_track;
5656 softc->disk->d_fwheads = softc->params.heads;
5657 softc->disk->d_devstat->block_size = softc->params.secsize;
5658 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
5659
5660 error = disk_resize(softc->disk, M_NOWAIT);
5661 if (error != 0)
5662 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
5663 }
5664
5665 static void
5666 dasendorderedtag(void *arg)
5667 {
5668 struct da_softc *softc = arg;
5669
5670 if (da_send_ordered) {
5671 if (!LIST_EMPTY(&softc->pending_ccbs)) {
5672 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
5673 softc->flags |= DA_FLAG_NEED_OTAG;
5674 softc->flags &= ~DA_FLAG_WAS_OTAG;
5675 }
5676 }
5677 /* Queue us up again */
5678 callout_reset(&softc->sendordered_c,
5679 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
5680 dasendorderedtag, softc);
5681 }
5682
5683 /*
5684 * Step through all DA peripheral drivers, and if the device is still open,
5685 * sync the disk cache to physical media.
5686 */
5687 static void
5688 dashutdown(void * arg, int howto)
5689 {
5690 struct cam_periph *periph;
5691 struct da_softc *softc;
5692 union ccb *ccb;
5693 int error;
5694
5695 CAM_PERIPH_FOREACH(periph, &dadriver) {
5696 softc = (struct da_softc *)periph->softc;
5697 if (SCHEDULER_STOPPED()) {
5698 /* If we paniced with the lock held, do not recurse. */
5699 if (!cam_periph_owned(periph) &&
5700 (softc->flags & DA_FLAG_OPEN)) {
5701 dadump(softc->disk, NULL, 0, 0, 0);
5702 }
5703 continue;
5704 }
5705 cam_periph_lock(periph);
5706
5707 /*
5708 * We only sync the cache if the drive is still open, and
5709 * if the drive is capable of it..
5710 */
5711 if (((softc->flags & DA_FLAG_OPEN) == 0)
5712 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
5713 cam_periph_unlock(periph);
5714 continue;
5715 }
5716
5717 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5718 scsi_synchronize_cache(&ccb->csio,
5719 /*retries*/0,
5720 /*cbfcnp*/dadone,
5721 MSG_SIMPLE_Q_TAG,
5722 /*begin_lba*/0, /* whole disk */
5723 /*lb_count*/0,
5724 SSD_FULL_SIZE,
5725 60 * 60 * 1000);
5726
5727 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
5728 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
5729 softc->disk->d_devstat);
5730 if (error != 0)
5731 xpt_print(periph->path, "Synchronize cache failed\n");
5732 xpt_release_ccb(ccb);
5733 cam_periph_unlock(periph);
5734 }
5735 }
5736
5737 #else /* !_KERNEL */
5738
5739 /*
5740 * XXX These are only left out of the kernel build to silence warnings. If,
5741 * for some reason these functions are used in the kernel, the ifdefs should
5742 * be moved so they are included both in the kernel and userland.
5743 */
5744 void
5745 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
5746 void (*cbfcnp)(struct cam_periph *, union ccb *),
5747 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
5748 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5749 u_int32_t timeout)
5750 {
5751 struct scsi_format_unit *scsi_cmd;
5752
5753 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
5754 scsi_cmd->opcode = FORMAT_UNIT;
5755 scsi_cmd->byte2 = byte2;
5756 scsi_ulto2b(ileave, scsi_cmd->interleave);
5757
5758 cam_fill_csio(csio,
5759 retries,
5760 cbfcnp,
5761 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5762 tag_action,
5763 data_ptr,
5764 dxfer_len,
5765 sense_len,
5766 sizeof(*scsi_cmd),
5767 timeout);
5768 }
5769
5770 void
5771 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
5772 void (*cbfcnp)(struct cam_periph *, union ccb *),
5773 uint8_t tag_action, uint8_t list_format,
5774 uint32_t addr_desc_index, uint8_t *data_ptr,
5775 uint32_t dxfer_len, int minimum_cmd_size,
5776 uint8_t sense_len, uint32_t timeout)
5777 {
5778 uint8_t cdb_len;
5779
5780 /*
5781 * These conditions allow using the 10 byte command. Otherwise we
5782 * need to use the 12 byte command.
5783 */
5784 if ((minimum_cmd_size <= 10)
5785 && (addr_desc_index == 0)
5786 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
5787 struct scsi_read_defect_data_10 *cdb10;
5788
5789 cdb10 = (struct scsi_read_defect_data_10 *)
5790 &csio->cdb_io.cdb_bytes;
5791
5792 cdb_len = sizeof(*cdb10);
5793 bzero(cdb10, cdb_len);
5794 cdb10->opcode = READ_DEFECT_DATA_10;
5795 cdb10->format = list_format;
5796 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
5797 } else {
5798 struct scsi_read_defect_data_12 *cdb12;
5799
5800 cdb12 = (struct scsi_read_defect_data_12 *)
5801 &csio->cdb_io.cdb_bytes;
5802
5803 cdb_len = sizeof(*cdb12);
5804 bzero(cdb12, cdb_len);
5805 cdb12->opcode = READ_DEFECT_DATA_12;
5806 cdb12->format = list_format;
5807 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
5808 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
5809 }
5810
5811 cam_fill_csio(csio,
5812 retries,
5813 cbfcnp,
5814 /*flags*/ CAM_DIR_IN,
5815 tag_action,
5816 data_ptr,
5817 dxfer_len,
5818 sense_len,
5819 cdb_len,
5820 timeout);
5821 }
5822
5823 void
5824 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
5825 void (*cbfcnp)(struct cam_periph *, union ccb *),
5826 u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
5827 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5828 u_int32_t timeout)
5829 {
5830 struct scsi_sanitize *scsi_cmd;
5831
5832 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
5833 scsi_cmd->opcode = SANITIZE;
5834 scsi_cmd->byte2 = byte2;
5835 scsi_cmd->control = control;
5836 scsi_ulto2b(dxfer_len, scsi_cmd->length);
5837
5838 cam_fill_csio(csio,
5839 retries,
5840 cbfcnp,
5841 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5842 tag_action,
5843 data_ptr,
5844 dxfer_len,
5845 sense_len,
5846 sizeof(*scsi_cmd),
5847 timeout);
5848 }
5849
5850 #endif /* _KERNEL */
5851
5852 void
5853 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
5854 void (*cbfcnp)(struct cam_periph *, union ccb *),
5855 uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
5856 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
5857 uint8_t sense_len, uint32_t timeout)
5858 {
5859 struct scsi_zbc_out *scsi_cmd;
5860
5861 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
5862 scsi_cmd->opcode = ZBC_OUT;
5863 scsi_cmd->service_action = service_action;
5864 scsi_u64to8b(zone_id, scsi_cmd->zone_id);
5865 scsi_cmd->zone_flags = zone_flags;
5866
5867 cam_fill_csio(csio,
5868 retries,
5869 cbfcnp,
5870 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5871 tag_action,
5872 data_ptr,
5873 dxfer_len,
5874 sense_len,
5875 sizeof(*scsi_cmd),
5876 timeout);
5877 }
5878
5879 void
5880 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
5881 void (*cbfcnp)(struct cam_periph *, union ccb *),
5882 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
5883 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
5884 uint8_t sense_len, uint32_t timeout)
5885 {
5886 struct scsi_zbc_in *scsi_cmd;
5887
5888 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
5889 scsi_cmd->opcode = ZBC_IN;
5890 scsi_cmd->service_action = service_action;
5891 scsi_ulto4b(dxfer_len, scsi_cmd->length);
5892 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
5893 scsi_cmd->zone_options = zone_options;
5894
5895 cam_fill_csio(csio,
5896 retries,
5897 cbfcnp,
5898 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
5899 tag_action,
5900 data_ptr,
5901 dxfer_len,
5902 sense_len,
5903 sizeof(*scsi_cmd),
5904 timeout);
5905
5906 }
5907
5908 int
5909 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
5910 void (*cbfcnp)(struct cam_periph *, union ccb *),
5911 uint8_t tag_action, int use_ncq,
5912 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5913 uint8_t *data_ptr, uint32_t dxfer_len,
5914 uint8_t *cdb_storage, size_t cdb_storage_len,
5915 uint8_t sense_len, uint32_t timeout)
5916 {
5917 uint8_t command_out, protocol, ata_flags;
5918 uint16_t features_out;
5919 uint32_t sectors_out, auxiliary;
5920 int retval;
5921
5922 retval = 0;
5923
5924 if (use_ncq == 0) {
5925 command_out = ATA_ZAC_MANAGEMENT_OUT;
5926 features_out = (zm_action & 0xf) | (zone_flags << 8);
5927 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5928 if (dxfer_len == 0) {
5929 protocol = AP_PROTO_NON_DATA;
5930 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5931 sectors_out = 0;
5932 } else {
5933 protocol = AP_PROTO_DMA;
5934 ata_flags |= AP_FLAG_TLEN_SECT_CNT |
5935 AP_FLAG_TDIR_TO_DEV;
5936 sectors_out = ((dxfer_len >> 9) & 0xffff);
5937 }
5938 auxiliary = 0;
5939 } else {
5940 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5941 if (dxfer_len == 0) {
5942 command_out = ATA_NCQ_NON_DATA;
5943 features_out = ATA_NCQ_ZAC_MGMT_OUT;
5944 /*
5945 * We're assuming the SCSI to ATA translation layer
5946 * will set the NCQ tag number in the tag field.
5947 * That isn't clear from the SAT-4 spec (as of rev 05).
5948 */
5949 sectors_out = 0;
5950 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5951 } else {
5952 command_out = ATA_SEND_FPDMA_QUEUED;
5953 /*
5954 * Note that we're defaulting to normal priority,
5955 * and assuming that the SCSI to ATA translation
5956 * layer will insert the NCQ tag number in the tag
5957 * field. That isn't clear in the SAT-4 spec (as
5958 * of rev 05).
5959 */
5960 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
5961
5962 ata_flags |= AP_FLAG_TLEN_FEAT |
5963 AP_FLAG_TDIR_TO_DEV;
5964
5965 /*
5966 * For SEND FPDMA QUEUED, the transfer length is
5967 * encoded in the FEATURE register, and 0 means
5968 * that 65536 512 byte blocks are to be tranferred.
5969 * In practice, it seems unlikely that we'll see
5970 * a transfer that large, and it may confuse the
5971 * the SAT layer, because generally that means that
5972 * 0 bytes should be transferred.
5973 */
5974 if (dxfer_len == (65536 * 512)) {
5975 features_out = 0;
5976 } else if (dxfer_len <= (65535 * 512)) {
5977 features_out = ((dxfer_len >> 9) & 0xffff);
5978 } else {
5979 /* The transfer is too big. */
5980 retval = 1;
5981 goto bailout;
5982 }
5983
5984 }
5985
5986 auxiliary = (zm_action & 0xf) | (zone_flags << 8);
5987 protocol = AP_PROTO_FPDMA;
5988 }
5989
5990 protocol |= AP_EXTEND;
5991
5992 retval = scsi_ata_pass(csio,
5993 retries,
5994 cbfcnp,
5995 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5996 tag_action,
5997 /*protocol*/ protocol,
5998 /*ata_flags*/ ata_flags,
5999 /*features*/ features_out,
6000 /*sector_count*/ sectors_out,
6001 /*lba*/ zone_id,
6002 /*command*/ command_out,
6003 /*device*/ 0,
6004 /*icc*/ 0,
6005 /*auxiliary*/ auxiliary,
6006 /*control*/ 0,
6007 /*data_ptr*/ data_ptr,
6008 /*dxfer_len*/ dxfer_len,
6009 /*cdb_storage*/ cdb_storage,
6010 /*cdb_storage_len*/ cdb_storage_len,
6011 /*minimum_cmd_size*/ 0,
6012 /*sense_len*/ SSD_FULL_SIZE,
6013 /*timeout*/ timeout);
6014
6015 bailout:
6016
6017 return (retval);
6018 }
6019
6020 int
6021 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6022 void (*cbfcnp)(struct cam_periph *, union ccb *),
6023 uint8_t tag_action, int use_ncq,
6024 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6025 uint8_t *data_ptr, uint32_t dxfer_len,
6026 uint8_t *cdb_storage, size_t cdb_storage_len,
6027 uint8_t sense_len, uint32_t timeout)
6028 {
6029 uint8_t command_out, protocol;
6030 uint16_t features_out, sectors_out;
6031 uint32_t auxiliary;
6032 int ata_flags;
6033 int retval;
6034
6035 retval = 0;
6036 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6037
6038 if (use_ncq == 0) {
6039 command_out = ATA_ZAC_MANAGEMENT_IN;
6040 /* XXX KDM put a macro here */
6041 features_out = (zm_action & 0xf) | (zone_flags << 8);
6042 sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6043 protocol = AP_PROTO_DMA;
6044 ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6045 auxiliary = 0;
6046 } else {
6047 ata_flags |= AP_FLAG_TLEN_FEAT;
6048
6049 command_out = ATA_RECV_FPDMA_QUEUED;
6050 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6051
6052 /*
6053 * For RECEIVE FPDMA QUEUED, the transfer length is
6054 * encoded in the FEATURE register, and 0 means
6055 * that 65536 512 byte blocks are to be tranferred.
6056 * In practice, it seems unlikely that we'll see
6057 * a transfer that large, and it may confuse the
6058 * the SAT layer, because generally that means that
6059 * 0 bytes should be transferred.
6060 */
6061 if (dxfer_len == (65536 * 512)) {
6062 features_out = 0;
6063 } else if (dxfer_len <= (65535 * 512)) {
6064 features_out = ((dxfer_len >> 9) & 0xffff);
6065 } else {
6066 /* The transfer is too big. */
6067 retval = 1;
6068 goto bailout;
6069 }
6070 auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6071 protocol = AP_PROTO_FPDMA;
6072 }
6073
6074 protocol |= AP_EXTEND;
6075
6076 retval = scsi_ata_pass(csio,
6077 retries,
6078 cbfcnp,
6079 /*flags*/ CAM_DIR_IN,
6080 tag_action,
6081 /*protocol*/ protocol,
6082 /*ata_flags*/ ata_flags,
6083 /*features*/ features_out,
6084 /*sector_count*/ sectors_out,
6085 /*lba*/ zone_id,
6086 /*command*/ command_out,
6087 /*device*/ 0,
6088 /*icc*/ 0,
6089 /*auxiliary*/ auxiliary,
6090 /*control*/ 0,
6091 /*data_ptr*/ data_ptr,
6092 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6093 /*cdb_storage*/ cdb_storage,
6094 /*cdb_storage_len*/ cdb_storage_len,
6095 /*minimum_cmd_size*/ 0,
6096 /*sense_len*/ SSD_FULL_SIZE,
6097 /*timeout*/ timeout);
6098
6099 bailout:
6100 return (retval);
6101 }
Cache object: ca41dc10151820eb4e3d97d68df046ba
|