FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_ccb.h
1 /*-
2 * Data structures and definitions for CAM Control Blocks (CCBs).
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32
33 #ifndef _CAM_CAM_CCB_H
34 #define _CAM_CAM_CCB_H 1
35
36 #include <sys/queue.h>
37 #include <sys/cdefs.h>
38 #include <sys/time.h>
39 #include <sys/limits.h>
40 #ifndef _KERNEL
41 #include <sys/callout.h>
42 #endif
43 #include <cam/cam_debug.h>
44 #include <cam/scsi/scsi_all.h>
45 #include <cam/ata/ata_all.h>
46 #include <cam/nvme/nvme_all.h>
47 #include <cam/mmc/mmc_all.h>
48
49 /* General allocation length definitions for CCB structures */
50 #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */
51 #define VUHBALEN 14 /* Vendor Unique HBA length */
52 #define SIM_IDLEN 16 /* ASCII string len for SIM ID */
53 #define HBA_IDLEN 16 /* ASCII string len for HBA ID */
54 #define DEV_IDLEN 16 /* ASCII string len for device names */
55 #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */
56 #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */
57
58 /* Struct definitions for CAM control blocks */
59
60 /* Common CCB header */
61
62 /* CCB memory allocation flags */
63 typedef enum {
64 CAM_CCB_FROM_UMA = 0x00000001,/* CCB from a periph UMA zone */
65 } ccb_alloc_flags;
66
67 /* CAM CCB flags */
68 typedef enum {
69 CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */
70 CAM_unused1 = 0x00000002,
71 CAM_unused2 = 0x00000004,
72 CAM_NEGOTIATE = 0x00000008,/*
73 * Perform transport negotiation
74 * with this command.
75 */
76 CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */
77 CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */
78 CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */
79 CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */
80 CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */
81 CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */
82 CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */
83 CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */
84 CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */
85 CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */
86 CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */
87 CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */
88 CAM_DATA_MASK = 0x00240010,/* Data type mask */
89 CAM_unused3 = 0x00000100,
90 CAM_unused4 = 0x00000200,
91 CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */
92 CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */
93 CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */
94 CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */
95 CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/
96 CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/
97 CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/
98 CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */
99 CAM_unused5 = 0x00080000,
100 CAM_unused6 = 0x00100000,
101 CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */
102 CAM_unused7 = 0x00800000,
103
104 /* Phase cognizant mode flags */
105 CAM_unused8 = 0x01000000,
106 CAM_unused9 = 0x02000000,
107 CAM_unused10 = 0x04000000,
108 CAM_unused11 = 0x08000000,
109 CAM_unused12 = 0x10000000,
110 CAM_unused13 = 0x20000000,
111 CAM_unused14 = 0x40000000,
112
113 /* Host target Mode flags */
114 CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */
115 CAM_unused15 = 0x10000000,
116 CAM_unused16 = 0x20000000,
117 CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */
118
119 CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */
120 } ccb_flags;
121
122 typedef enum {
123 CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */
124 CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/
125 CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */
126 } ccb_xflags;
127
128 /* XPT Opcodes for xpt_action */
129 typedef enum {
130 /* Function code flags are bits greater than 0xff */
131 XPT_FC_QUEUED = 0x100,
132 /* Non-immediate function code */
133 XPT_FC_USER_CCB = 0x200,
134 XPT_FC_XPT_ONLY = 0x400,
135 /* Only for the transport layer device */
136 XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED,
137 /* Passes through the device queues */
138 /* Common function commands: 0x00->0x0F */
139 XPT_NOOP = 0x00,
140 /* Execute Nothing */
141 XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED,
142 /* Execute the requested I/O operation */
143 XPT_GDEV_TYPE = 0x02,
144 /* Get type information for specified device */
145 XPT_GDEVLIST = 0x03,
146 /* Get a list of peripheral devices */
147 XPT_PATH_INQ = 0x04,
148 /* Path routing inquiry */
149 XPT_REL_SIMQ = 0x05,
150 /* Release a frozen device queue */
151 XPT_SASYNC_CB = 0x06,
152 /* Set Asynchronous Callback Parameters */
153 XPT_SDEV_TYPE = 0x07,
154 /* Set device type information */
155 XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB
156 | XPT_FC_XPT_ONLY,
157 /* (Re)Scan the SCSI Bus */
158 XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY,
159 /* Get EDT entries matching the given pattern */
160 XPT_DEBUG = 0x0a,
161 /* Turn on debugging for a bus, target or lun */
162 XPT_PATH_STATS = 0x0b,
163 /* Path statistics (error counts, etc.) */
164 XPT_GDEV_STATS = 0x0c,
165 /* Device statistics (error counts, etc.) */
166 XPT_DEV_ADVINFO = 0x0e,
167 /* Get/Set Device advanced information */
168 XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB
169 | XPT_FC_XPT_ONLY,
170 /* Asynchronous event */
171 /* SCSI Control Functions: 0x10->0x1F */
172 XPT_ABORT = 0x10,
173 /* Abort the specified CCB */
174 XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY,
175 /* Reset the specified SCSI bus */
176 XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED,
177 /* Bus Device Reset the specified SCSI device */
178 XPT_TERM_IO = 0x13,
179 /* Terminate the I/O process */
180 XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB
181 | XPT_FC_XPT_ONLY,
182 /* Scan Logical Unit */
183 XPT_GET_TRAN_SETTINGS = 0x15,
184 /*
185 * Get default/user transfer settings
186 * for the target
187 */
188 XPT_SET_TRAN_SETTINGS = 0x16,
189 /*
190 * Set transfer rate/width
191 * negotiation settings
192 */
193 XPT_CALC_GEOMETRY = 0x17,
194 /*
195 * Calculate the geometry parameters for
196 * a device give the sector size and
197 * volume size.
198 */
199 XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED,
200 /* Execute the requested ATA I/O operation */
201
202 XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */
203
204 XPT_SET_SIM_KNOB = 0x19,
205 /*
206 * Set SIM specific knob values.
207 */
208
209 XPT_GET_SIM_KNOB = 0x1a,
210 /*
211 * Get SIM specific knob values.
212 */
213
214 XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED,
215 /* Serial Management Protocol */
216
217 XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED,
218 /* Execute the requested NVMe I/O operation */
219
220 XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED,
221 /* Placeholder for MMC / SD / SDIO I/O stuff */
222
223 XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB
224 | XPT_FC_XPT_ONLY,
225 /* Scan Target */
226
227 XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED,
228 /* Execute the requested NVMe Admin operation */
229
230 /* HBA engine commands 0x20->0x2F */
231 XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY,
232 /* HBA engine feature inquiry */
233 XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED,
234 /* HBA execute engine request */
235
236 /* Target mode commands: 0x30->0x3F */
237 XPT_EN_LUN = 0x30,
238 /* Enable LUN as a target */
239 XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED,
240 /* Execute target I/O request */
241 XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
242 /* Accept Host Target Mode CDB */
243 XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED,
244 /* Continue Host Target I/O Connection */
245 XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
246 /* Notify Host Target driver of event (obsolete) */
247 XPT_NOTIFY_ACK = 0x35,
248 /* Acknowledgement of event (obsolete) */
249 XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
250 /* Notify Host Target driver of event */
251 XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
252 /* Acknowledgement of event */
253 XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
254 /* Query device capacity and notify GEOM */
255
256 XPT_MMC_SET_TRAN_SETTINGS = 0x40 | XPT_FC_DEV_QUEUED,
257 XPT_MMC_GET_TRAN_SETTINGS = 0x41 | XPT_FC_DEV_QUEUED,
258
259 /* Vendor Unique codes: 0x80->0x8F */
260 XPT_VUNIQUE = 0x80
261 } xpt_opcode;
262
263 #define XPT_FC_GROUP_MASK 0xF0
264 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK)
265 #define XPT_FC_GROUP_COMMON 0x00
266 #define XPT_FC_GROUP_SCSI_CONTROL 0x10
267 #define XPT_FC_GROUP_HBA_ENGINE 0x20
268 #define XPT_FC_GROUP_TMODE 0x30
269 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80
270
271 #define XPT_FC_IS_DEV_QUEUED(ccb) \
272 (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED)
273 #define XPT_FC_IS_QUEUED(ccb) \
274 (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0)
275
276 typedef enum {
277 PROTO_UNKNOWN,
278 PROTO_UNSPECIFIED,
279 PROTO_SCSI, /* Small Computer System Interface */
280 PROTO_ATA, /* AT Attachment */
281 PROTO_ATAPI, /* AT Attachment Packetized Interface */
282 PROTO_SATAPM, /* SATA Port Multiplier */
283 PROTO_SEMB, /* SATA Enclosure Management Bridge */
284 PROTO_NVME, /* NVME */
285 PROTO_MMCSD, /* MMC, SD, SDIO */
286 } cam_proto;
287
288 typedef enum {
289 XPORT_UNKNOWN,
290 XPORT_UNSPECIFIED,
291 XPORT_SPI, /* SCSI Parallel Interface */
292 XPORT_FC, /* Fiber Channel */
293 XPORT_SSA, /* Serial Storage Architecture */
294 XPORT_USB, /* Universal Serial Bus */
295 XPORT_PPB, /* Parallel Port Bus */
296 XPORT_ATA, /* AT Attachment */
297 XPORT_SAS, /* Serial Attached SCSI */
298 XPORT_SATA, /* Serial AT Attachment */
299 XPORT_ISCSI, /* iSCSI */
300 XPORT_SRP, /* SCSI RDMA Protocol */
301 XPORT_NVME, /* NVMe over PCIe */
302 XPORT_MMCSD, /* MMC, SD, SDIO card */
303 } cam_xport;
304
305 #define XPORT_IS_NVME(t) ((t) == XPORT_NVME)
306 #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA)
307 #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \
308 (t) != XPORT_UNSPECIFIED && \
309 !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t))
310 #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \
311 XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \
312 DEVSTAT_TYPE_IF_OTHER)
313
314 #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1)
315 #define PROTO_VERSION_UNSPECIFIED UINT_MAX
316 #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1)
317 #define XPORT_VERSION_UNSPECIFIED UINT_MAX
318
319 typedef union {
320 LIST_ENTRY(ccb_hdr) le;
321 SLIST_ENTRY(ccb_hdr) sle;
322 TAILQ_ENTRY(ccb_hdr) tqe;
323 STAILQ_ENTRY(ccb_hdr) stqe;
324 } camq_entry;
325
326 typedef union {
327 void *ptr;
328 u_long field;
329 u_int8_t bytes[sizeof(uintptr_t)];
330 } ccb_priv_entry;
331
332 typedef union {
333 ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE];
334 u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)];
335 } ccb_ppriv_area;
336
337 typedef union {
338 ccb_priv_entry entries[CCB_SIM_PRIV_SIZE];
339 u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)];
340 } ccb_spriv_area;
341
342 typedef struct {
343 struct timeval *etime;
344 uintptr_t sim_data;
345 uintptr_t periph_data;
346 } ccb_qos_area;
347
348 struct ccb_hdr {
349 cam_pinfo pinfo; /* Info for priority scheduling */
350 camq_entry xpt_links; /* For chaining in the XPT layer */
351 camq_entry sim_links; /* For chaining in the SIM layer */
352 camq_entry periph_links; /* For chaining in the type driver */
353 #if BYTE_ORDER == LITTLE_ENDIAN
354 u_int16_t retry_count;
355 u_int16_t alloc_flags; /* ccb_alloc_flags */
356 #else
357 u_int16_t alloc_flags; /* ccb_alloc_flags */
358 u_int16_t retry_count;
359 #endif
360 void (*cbfcnp)(struct cam_periph *, union ccb *);
361 /* Callback on completion function */
362 xpt_opcode func_code; /* XPT function code */
363 u_int32_t status; /* Status returned by CAM subsystem */
364 struct cam_path *path; /* Compiled path for this ccb */
365 path_id_t path_id; /* Path ID for the request */
366 target_id_t target_id; /* Target device ID */
367 lun_id_t target_lun; /* Target LUN number */
368 u_int32_t flags; /* ccb_flags */
369 u_int32_t xflags; /* Extended flags */
370 ccb_ppriv_area periph_priv;
371 ccb_spriv_area sim_priv;
372 ccb_qos_area qos;
373 u_int32_t timeout; /* Hard timeout value in mseconds */
374 struct timeval softtimeout; /* Soft timeout value in sec + usec */
375 };
376
377 /* Get Device Information CCB */
378 struct ccb_getdev {
379 struct ccb_hdr ccb_h;
380 cam_proto protocol;
381 struct scsi_inquiry_data inq_data;
382 struct ata_params ident_data;
383 u_int8_t serial_num[252];
384 u_int8_t inq_flags;
385 u_int8_t serial_num_len;
386 void *padding[2];
387 };
388
389 /* Device Statistics CCB */
390 struct ccb_getdevstats {
391 struct ccb_hdr ccb_h;
392 int dev_openings; /* Space left for more work on device*/
393 int dev_active; /* Transactions running on the device */
394 int allocated; /* CCBs allocated for the device */
395 int queued; /* CCBs queued to be sent to the device */
396 int held; /*
397 * CCBs held by peripheral drivers
398 * for this device
399 */
400 int maxtags; /*
401 * Boundary conditions for number of
402 * tagged operations
403 */
404 int mintags;
405 struct timeval last_reset; /* Time of last bus reset/loop init */
406 };
407
408 typedef enum {
409 CAM_GDEVLIST_LAST_DEVICE,
410 CAM_GDEVLIST_LIST_CHANGED,
411 CAM_GDEVLIST_MORE_DEVS,
412 CAM_GDEVLIST_ERROR
413 } ccb_getdevlist_status_e;
414
415 struct ccb_getdevlist {
416 struct ccb_hdr ccb_h;
417 char periph_name[DEV_IDLEN];
418 u_int32_t unit_number;
419 unsigned int generation;
420 u_int32_t index;
421 ccb_getdevlist_status_e status;
422 };
423
424 typedef enum {
425 PERIPH_MATCH_ANY = 0x000,
426 PERIPH_MATCH_PATH = 0x001,
427 PERIPH_MATCH_TARGET = 0x002,
428 PERIPH_MATCH_LUN = 0x004,
429 PERIPH_MATCH_NAME = 0x008,
430 PERIPH_MATCH_UNIT = 0x010,
431 } periph_pattern_flags;
432
433 struct periph_match_pattern {
434 char periph_name[DEV_IDLEN];
435 u_int32_t unit_number;
436 path_id_t path_id;
437 target_id_t target_id;
438 lun_id_t target_lun;
439 periph_pattern_flags flags;
440 };
441
442 typedef enum {
443 DEV_MATCH_ANY = 0x000,
444 DEV_MATCH_PATH = 0x001,
445 DEV_MATCH_TARGET = 0x002,
446 DEV_MATCH_LUN = 0x004,
447 DEV_MATCH_INQUIRY = 0x008,
448 DEV_MATCH_DEVID = 0x010,
449 } dev_pattern_flags;
450
451 struct device_id_match_pattern {
452 uint8_t id_len;
453 uint8_t id[256];
454 };
455
456 struct device_match_pattern {
457 path_id_t path_id;
458 target_id_t target_id;
459 lun_id_t target_lun;
460 dev_pattern_flags flags;
461 union {
462 struct scsi_static_inquiry_pattern inq_pat;
463 struct device_id_match_pattern devid_pat;
464 } data;
465 };
466
467 typedef enum {
468 BUS_MATCH_ANY = 0x000,
469 BUS_MATCH_PATH = 0x001,
470 BUS_MATCH_NAME = 0x002,
471 BUS_MATCH_UNIT = 0x004,
472 BUS_MATCH_BUS_ID = 0x008,
473 } bus_pattern_flags;
474
475 struct bus_match_pattern {
476 path_id_t path_id;
477 char dev_name[DEV_IDLEN];
478 u_int32_t unit_number;
479 u_int32_t bus_id;
480 bus_pattern_flags flags;
481 };
482
483 union match_pattern {
484 struct periph_match_pattern periph_pattern;
485 struct device_match_pattern device_pattern;
486 struct bus_match_pattern bus_pattern;
487 };
488
489 typedef enum {
490 DEV_MATCH_PERIPH,
491 DEV_MATCH_DEVICE,
492 DEV_MATCH_BUS
493 } dev_match_type;
494
495 struct dev_match_pattern {
496 dev_match_type type;
497 union match_pattern pattern;
498 };
499
500 struct periph_match_result {
501 char periph_name[DEV_IDLEN];
502 u_int32_t unit_number;
503 path_id_t path_id;
504 target_id_t target_id;
505 lun_id_t target_lun;
506 };
507
508 typedef enum {
509 DEV_RESULT_NOFLAG = 0x00,
510 DEV_RESULT_UNCONFIGURED = 0x01
511 } dev_result_flags;
512
513 struct device_match_result {
514 path_id_t path_id;
515 target_id_t target_id;
516 lun_id_t target_lun;
517 cam_proto protocol;
518 struct scsi_inquiry_data inq_data;
519 struct ata_params ident_data;
520 dev_result_flags flags;
521 };
522
523 struct bus_match_result {
524 path_id_t path_id;
525 char dev_name[DEV_IDLEN];
526 u_int32_t unit_number;
527 u_int32_t bus_id;
528 };
529
530 union match_result {
531 struct periph_match_result periph_result;
532 struct device_match_result device_result;
533 struct bus_match_result bus_result;
534 };
535
536 struct dev_match_result {
537 dev_match_type type;
538 union match_result result;
539 };
540
541 typedef enum {
542 CAM_DEV_MATCH_LAST,
543 CAM_DEV_MATCH_MORE,
544 CAM_DEV_MATCH_LIST_CHANGED,
545 CAM_DEV_MATCH_SIZE_ERROR,
546 CAM_DEV_MATCH_ERROR
547 } ccb_dev_match_status;
548
549 typedef enum {
550 CAM_DEV_POS_NONE = 0x000,
551 CAM_DEV_POS_BUS = 0x001,
552 CAM_DEV_POS_TARGET = 0x002,
553 CAM_DEV_POS_DEVICE = 0x004,
554 CAM_DEV_POS_PERIPH = 0x008,
555 CAM_DEV_POS_PDPTR = 0x010,
556 CAM_DEV_POS_TYPEMASK = 0xf00,
557 CAM_DEV_POS_EDT = 0x100,
558 CAM_DEV_POS_PDRV = 0x200
559 } dev_pos_type;
560
561 struct ccb_dm_cookie {
562 void *bus;
563 void *target;
564 void *device;
565 void *periph;
566 void *pdrv;
567 };
568
569 struct ccb_dev_position {
570 u_int generations[4];
571 #define CAM_BUS_GENERATION 0x00
572 #define CAM_TARGET_GENERATION 0x01
573 #define CAM_DEV_GENERATION 0x02
574 #define CAM_PERIPH_GENERATION 0x03
575 dev_pos_type position_type;
576 struct ccb_dm_cookie cookie;
577 };
578
579 struct ccb_dev_match {
580 struct ccb_hdr ccb_h;
581 ccb_dev_match_status status;
582 u_int32_t num_patterns;
583 u_int32_t pattern_buf_len;
584 struct dev_match_pattern *patterns;
585 u_int32_t num_matches;
586 u_int32_t match_buf_len;
587 struct dev_match_result *matches;
588 struct ccb_dev_position pos;
589 };
590
591 /*
592 * Definitions for the path inquiry CCB fields.
593 */
594 #define CAM_VERSION 0x1a /* Hex value for current version */
595
596 typedef enum {
597 PI_MDP_ABLE = 0x80, /* Supports MDP message */
598 PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */
599 PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */
600 PI_SDTR_ABLE = 0x10, /* Supports SDTR message */
601 PI_LINKED_CDB = 0x08, /* Supports linked CDBs */
602 PI_SATAPM = 0x04, /* Supports SATA PM */
603 PI_TAG_ABLE = 0x02, /* Supports tag queue messages */
604 PI_SOFT_RST = 0x01 /* Supports soft reset alternative */
605 } pi_inqflag;
606
607 typedef enum {
608 PIT_PROCESSOR = 0x80, /* Target mode processor mode */
609 PIT_PHASE = 0x40, /* Target mode phase cog. mode */
610 PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */
611 PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */
612 PIT_GRP_6 = 0x08, /* Group 6 commands supported */
613 PIT_GRP_7 = 0x04 /* Group 7 commands supported */
614 } pi_tmflag;
615
616 typedef enum {
617 PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */
618 PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */
619 PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */
620 PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */
621 PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */
622 PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */
623 PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */
624 PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */
625 PIM_UNMAPPED = 0x02,
626 PIM_NOSCAN = 0x01 /* SIM does its own scanning */
627 } pi_miscflag;
628
629 /* Path Inquiry CCB */
630 struct ccb_pathinq_settings_spi {
631 u_int8_t ppr_options;
632 };
633
634 struct ccb_pathinq_settings_fc {
635 u_int64_t wwnn; /* world wide node name */
636 u_int64_t wwpn; /* world wide port name */
637 u_int32_t port; /* 24 bit port id, if known */
638 u_int32_t bitrate; /* Mbps */
639 };
640
641 struct ccb_pathinq_settings_sas {
642 u_int32_t bitrate; /* Mbps */
643 };
644
645 #define NVME_DEV_NAME_LEN 52
646 struct ccb_pathinq_settings_nvme {
647 uint32_t nsid; /* Namespace ID for this path */
648 uint32_t domain;
649 uint8_t bus;
650 uint8_t slot;
651 uint8_t function;
652 uint8_t extra;
653 char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */
654 };
655 _Static_assert(sizeof(struct ccb_pathinq_settings_nvme) == 64,
656 "ccb_pathinq_settings_nvme too big");
657
658 #define PATHINQ_SETTINGS_SIZE 128
659
660 struct ccb_pathinq {
661 struct ccb_hdr ccb_h;
662 u_int8_t version_num; /* Version number for the SIM/HBA */
663 u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */
664 u_int16_t target_sprt; /* Flags for target mode support */
665 u_int32_t hba_misc; /* Misc HBA features */
666 u_int16_t hba_eng_cnt; /* HBA engine count */
667 /* Vendor Unique capabilities */
668 u_int8_t vuhba_flags[VUHBALEN];
669 u_int32_t max_target; /* Maximum supported Target */
670 u_int32_t max_lun; /* Maximum supported Lun */
671 u_int32_t async_flags; /* Installed Async handlers */
672 path_id_t hpath_id; /* Highest Path ID in the subsystem */
673 target_id_t initiator_id; /* ID of the HBA on the SCSI bus */
674 char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */
675 char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */
676 char dev_name[DEV_IDLEN];/* Device name for SIM */
677 u_int32_t unit_number; /* Unit number for SIM */
678 u_int32_t bus_id; /* Bus ID for SIM */
679 u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */
680 cam_proto protocol;
681 u_int protocol_version;
682 cam_xport transport;
683 u_int transport_version;
684 union {
685 struct ccb_pathinq_settings_spi spi;
686 struct ccb_pathinq_settings_fc fc;
687 struct ccb_pathinq_settings_sas sas;
688 struct ccb_pathinq_settings_nvme nvme;
689 char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
690 } xport_specific;
691 u_int maxio; /* Max supported I/O size, in bytes. */
692 u_int16_t hba_vendor; /* HBA vendor ID */
693 u_int16_t hba_device; /* HBA device ID */
694 u_int16_t hba_subvendor; /* HBA subvendor ID */
695 u_int16_t hba_subdevice; /* HBA subdevice ID */
696 };
697
698 /* Path Statistics CCB */
699 struct ccb_pathstats {
700 struct ccb_hdr ccb_h;
701 struct timeval last_reset; /* Time of last bus reset/loop init */
702 };
703
704 typedef enum {
705 SMP_FLAG_NONE = 0x00,
706 SMP_FLAG_REQ_SG = 0x01,
707 SMP_FLAG_RSP_SG = 0x02
708 } ccb_smp_pass_flags;
709
710 /*
711 * Serial Management Protocol CCB
712 * XXX Currently the semantics for this CCB are that it is executed either
713 * by the addressed device, or that device's parent (i.e. an expander for
714 * any device on an expander) if the addressed device doesn't support SMP.
715 * Later, once we have the ability to probe SMP-only devices and put them
716 * in CAM's topology, the CCB will only be executed by the addressed device
717 * if possible.
718 */
719 struct ccb_smpio {
720 struct ccb_hdr ccb_h;
721 uint8_t *smp_request;
722 int smp_request_len;
723 uint16_t smp_request_sglist_cnt;
724 uint8_t *smp_response;
725 int smp_response_len;
726 uint16_t smp_response_sglist_cnt;
727 ccb_smp_pass_flags flags;
728 };
729
730 typedef union {
731 u_int8_t *sense_ptr; /*
732 * Pointer to storage
733 * for sense information
734 */
735 /* Storage Area for sense information */
736 struct scsi_sense_data sense_buf;
737 } sense_t;
738
739 typedef union {
740 u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */
741 /* Area for the CDB send */
742 u_int8_t cdb_bytes[IOCDBLEN];
743 } cdb_t;
744
745 /*
746 * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO
747 * function codes.
748 */
749 struct ccb_scsiio {
750 struct ccb_hdr ccb_h;
751 union ccb *next_ccb; /* Ptr for next CCB for action */
752 u_int8_t *req_map; /* Ptr to mapping info */
753 u_int8_t *data_ptr; /* Ptr to the data buf/SG list */
754 u_int32_t dxfer_len; /* Data transfer length */
755 /* Autosense storage */
756 struct scsi_sense_data sense_data;
757 u_int8_t sense_len; /* Number of bytes to autosense */
758 u_int8_t cdb_len; /* Number of bytes for the CDB */
759 u_int16_t sglist_cnt; /* Number of SG list entries */
760 u_int8_t scsi_status; /* Returned SCSI status */
761 u_int8_t sense_resid; /* Autosense resid length: 2's comp */
762 u_int32_t resid; /* Transfer residual length: 2's comp */
763 cdb_t cdb_io; /* Union for CDB bytes/pointer */
764 u_int8_t *msg_ptr; /* Pointer to the message buffer */
765 u_int16_t msg_len; /* Number of bytes for the Message */
766 u_int8_t tag_action; /* What to do for tag queueing */
767 /*
768 * The tag action should be either the define below (to send a
769 * non-tagged transaction) or one of the defined scsi tag messages
770 * from scsi_message.h.
771 */
772 #define CAM_TAG_ACTION_NONE 0x00
773 uint8_t priority; /* Command priority for SIMPLE tag */
774 u_int tag_id; /* tag id from initator (target mode) */
775 u_int init_id; /* initiator id of who selected */
776 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
777 struct bio *bio; /* Associated bio */
778 #endif
779 };
780
781 static __inline uint8_t *
782 scsiio_cdb_ptr(struct ccb_scsiio *ccb)
783 {
784 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
785 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
786 }
787
788 /*
789 * ATA I/O Request CCB used for the XPT_ATA_IO function code.
790 */
791 struct ccb_ataio {
792 struct ccb_hdr ccb_h;
793 union ccb *next_ccb; /* Ptr for next CCB for action */
794 struct ata_cmd cmd; /* ATA command register set */
795 struct ata_res res; /* ATA result register set */
796 u_int8_t *data_ptr; /* Ptr to the data buf/SG list */
797 u_int32_t dxfer_len; /* Data transfer length */
798 u_int32_t resid; /* Transfer residual length: 2's comp */
799 u_int8_t ata_flags; /* Flags for the rest of the buffer */
800 #define ATA_FLAG_AUX 0x1
801 #define ATA_FLAG_ICC 0x2
802 uint8_t icc; /* Isochronous Command Completion */
803 uint32_t aux;
804 uint32_t unused;
805 };
806
807 /*
808 * MMC I/O Request CCB used for the XPT_MMC_IO function code.
809 */
810 struct ccb_mmcio {
811 struct ccb_hdr ccb_h;
812 union ccb *next_ccb; /* Ptr for next CCB for action */
813 struct mmc_command cmd;
814 struct mmc_command stop;
815 };
816
817 struct ccb_accept_tio {
818 struct ccb_hdr ccb_h;
819 cdb_t cdb_io; /* Union for CDB bytes/pointer */
820 u_int8_t cdb_len; /* Number of bytes for the CDB */
821 u_int8_t tag_action; /* What to do for tag queueing */
822 u_int8_t sense_len; /* Number of bytes of Sense Data */
823 uint8_t priority; /* Command priority for SIMPLE tag */
824 u_int tag_id; /* tag id from initator (target mode) */
825 u_int init_id; /* initiator id of who selected */
826 struct scsi_sense_data sense_data;
827 };
828
829 static __inline uint8_t *
830 atio_cdb_ptr(struct ccb_accept_tio *ccb)
831 {
832 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
833 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
834 }
835
836 /* Release SIM Queue */
837 struct ccb_relsim {
838 struct ccb_hdr ccb_h;
839 u_int32_t release_flags;
840 #define RELSIM_ADJUST_OPENINGS 0x01
841 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02
842 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04
843 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08
844 u_int32_t openings;
845 u_int32_t release_timeout; /* Abstract argument. */
846 u_int32_t qfrozen_cnt;
847 };
848
849 /*
850 * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes.
851 */
852 struct ccb_nvmeio {
853 struct ccb_hdr ccb_h;
854 union ccb *next_ccb; /* Ptr for next CCB for action */
855 struct nvme_command cmd; /* NVME command, per NVME standard */
856 struct nvme_completion cpl; /* NVME completion, per NVME standard */
857 uint8_t *data_ptr; /* Ptr to the data buf/SG list */
858 uint32_t dxfer_len; /* Data transfer length */
859 uint16_t sglist_cnt; /* Number of SG list entries */
860 uint16_t unused; /* padding for removed uint32_t */
861 };
862
863 /*
864 * Definitions for the asynchronous callback CCB fields.
865 */
866 typedef enum {
867 AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */
868 AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */
869 AC_CONTRACT = 0x1000,/* A contractual callback */
870 AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */
871 AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */
872 AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */
873 AC_LOST_DEVICE = 0x100,/* A device went away */
874 AC_FOUND_DEVICE = 0x080,/* A new device was found */
875 AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */
876 AC_PATH_REGISTERED = 0x020,/* A new path has been registered */
877 AC_SENT_BDR = 0x010,/* A BDR message was sent to target */
878 AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */
879 AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */
880 AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */
881 } ac_code;
882
883 typedef void ac_callback_t (void *softc, u_int32_t code,
884 struct cam_path *path, void *args);
885
886 /*
887 * Generic Asynchronous callbacks.
888 *
889 * Generic arguments passed bac which are then interpreted between a per-system
890 * contract number.
891 */
892 #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t))
893 struct ac_contract {
894 u_int64_t contract_number;
895 u_int8_t contract_data[AC_CONTRACT_DATA_MAX];
896 };
897
898 #define AC_CONTRACT_DEV_CHG 1
899 struct ac_device_changed {
900 u_int64_t wwpn;
901 u_int32_t port;
902 target_id_t target;
903 u_int8_t arrived;
904 };
905
906 /* Set Asynchronous Callback CCB */
907 struct ccb_setasync {
908 struct ccb_hdr ccb_h;
909 u_int32_t event_enable; /* Async Event enables */
910 ac_callback_t *callback;
911 void *callback_arg;
912 };
913
914 /* Set Device Type CCB */
915 struct ccb_setdev {
916 struct ccb_hdr ccb_h;
917 u_int8_t dev_type; /* Value for dev type field in EDT */
918 };
919
920 /* SCSI Control Functions */
921
922 /* Abort XPT request CCB */
923 struct ccb_abort {
924 struct ccb_hdr ccb_h;
925 union ccb *abort_ccb; /* Pointer to CCB to abort */
926 };
927
928 /* Reset SCSI Bus CCB */
929 struct ccb_resetbus {
930 struct ccb_hdr ccb_h;
931 };
932
933 /* Reset SCSI Device CCB */
934 struct ccb_resetdev {
935 struct ccb_hdr ccb_h;
936 };
937
938 /* Terminate I/O Process Request CCB */
939 struct ccb_termio {
940 struct ccb_hdr ccb_h;
941 union ccb *termio_ccb; /* Pointer to CCB to terminate */
942 };
943
944 typedef enum {
945 CTS_TYPE_CURRENT_SETTINGS,
946 CTS_TYPE_USER_SETTINGS
947 } cts_type;
948
949 struct ccb_trans_settings_scsi
950 {
951 u_int valid; /* Which fields to honor */
952 #define CTS_SCSI_VALID_TQ 0x01
953 u_int flags;
954 #define CTS_SCSI_FLAGS_TAG_ENB 0x01
955 };
956
957 struct ccb_trans_settings_ata
958 {
959 u_int valid; /* Which fields to honor */
960 #define CTS_ATA_VALID_TQ 0x01
961 u_int flags;
962 #define CTS_ATA_FLAGS_TAG_ENB 0x01
963 };
964
965 struct ccb_trans_settings_spi
966 {
967 u_int valid; /* Which fields to honor */
968 #define CTS_SPI_VALID_SYNC_RATE 0x01
969 #define CTS_SPI_VALID_SYNC_OFFSET 0x02
970 #define CTS_SPI_VALID_BUS_WIDTH 0x04
971 #define CTS_SPI_VALID_DISC 0x08
972 #define CTS_SPI_VALID_PPR_OPTIONS 0x10
973 u_int flags;
974 #define CTS_SPI_FLAGS_DISC_ENB 0x01
975 u_int sync_period;
976 u_int sync_offset;
977 u_int bus_width;
978 u_int ppr_options;
979 };
980
981 struct ccb_trans_settings_fc {
982 u_int valid; /* Which fields to honor */
983 #define CTS_FC_VALID_WWNN 0x8000
984 #define CTS_FC_VALID_WWPN 0x4000
985 #define CTS_FC_VALID_PORT 0x2000
986 #define CTS_FC_VALID_SPEED 0x1000
987 u_int64_t wwnn; /* world wide node name */
988 u_int64_t wwpn; /* world wide port name */
989 u_int32_t port; /* 24 bit port id, if known */
990 u_int32_t bitrate; /* Mbps */
991 };
992
993 struct ccb_trans_settings_sas {
994 u_int valid; /* Which fields to honor */
995 #define CTS_SAS_VALID_SPEED 0x1000
996 u_int32_t bitrate; /* Mbps */
997 };
998
999 struct ccb_trans_settings_pata {
1000 u_int valid; /* Which fields to honor */
1001 #define CTS_ATA_VALID_MODE 0x01
1002 #define CTS_ATA_VALID_BYTECOUNT 0x02
1003 #define CTS_ATA_VALID_ATAPI 0x20
1004 #define CTS_ATA_VALID_CAPS 0x40
1005 int mode; /* Mode */
1006 u_int bytecount; /* Length of PIO transaction */
1007 u_int atapi; /* Length of ATAPI CDB */
1008 u_int caps; /* Device and host SATA caps. */
1009 #define CTS_ATA_CAPS_H 0x0000ffff
1010 #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */
1011 #define CTS_ATA_CAPS_D 0xffff0000
1012 };
1013
1014 struct ccb_trans_settings_sata {
1015 u_int valid; /* Which fields to honor */
1016 #define CTS_SATA_VALID_MODE 0x01
1017 #define CTS_SATA_VALID_BYTECOUNT 0x02
1018 #define CTS_SATA_VALID_REVISION 0x04
1019 #define CTS_SATA_VALID_PM 0x08
1020 #define CTS_SATA_VALID_TAGS 0x10
1021 #define CTS_SATA_VALID_ATAPI 0x20
1022 #define CTS_SATA_VALID_CAPS 0x40
1023 int mode; /* Legacy PATA mode */
1024 u_int bytecount; /* Length of PIO transaction */
1025 int revision; /* SATA revision */
1026 u_int pm_present; /* PM is present (XPT->SIM) */
1027 u_int tags; /* Number of allowed tags */
1028 u_int atapi; /* Length of ATAPI CDB */
1029 u_int caps; /* Device and host SATA caps. */
1030 #define CTS_SATA_CAPS_H 0x0000ffff
1031 #define CTS_SATA_CAPS_H_PMREQ 0x00000001
1032 #define CTS_SATA_CAPS_H_APST 0x00000002
1033 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */
1034 #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */
1035 #define CTS_SATA_CAPS_D 0xffff0000
1036 #define CTS_SATA_CAPS_D_PMREQ 0x00010000
1037 #define CTS_SATA_CAPS_D_APST 0x00020000
1038 };
1039
1040 struct ccb_trans_settings_nvme
1041 {
1042 u_int valid; /* Which fields to honor */
1043 #define CTS_NVME_VALID_SPEC 0x01
1044 #define CTS_NVME_VALID_CAPS 0x02
1045 #define CTS_NVME_VALID_LINK 0x04
1046 uint32_t spec; /* NVMe spec implemented -- same as vs register */
1047 uint32_t max_xfer; /* Max transfer size (0 -> unlimited */
1048 uint32_t caps;
1049 uint8_t lanes; /* Number of PCIe lanes */
1050 uint8_t speed; /* PCIe generation for each lane */
1051 uint8_t max_lanes; /* Number of PCIe lanes */
1052 uint8_t max_speed; /* PCIe generation for each lane */
1053 };
1054
1055 #include <cam/mmc/mmc_bus.h>
1056 struct ccb_trans_settings_mmc {
1057 struct mmc_ios ios;
1058 #define MMC_CLK (1 << 1)
1059 #define MMC_VDD (1 << 2)
1060 #define MMC_CS (1 << 3)
1061 #define MMC_BW (1 << 4)
1062 #define MMC_PM (1 << 5)
1063 #define MMC_BT (1 << 6)
1064 #define MMC_BM (1 << 7)
1065 #define MMC_VCCQ (1 << 8)
1066 uint32_t ios_valid;
1067 /* The folowing is used only for GET_TRAN_SETTINGS */
1068 uint32_t host_ocr;
1069 int host_f_min;
1070 int host_f_max;
1071 /* Copied from sys/dev/mmc/bridge.h */
1072 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */
1073 #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */
1074 #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */
1075 #define MMC_CAP_BOOT_NOACC (1 << 4) /* Cannot access boot partitions */
1076 #define MMC_CAP_WAIT_WHILE_BUSY (1 << 5) /* Host waits for busy responses */
1077 #define MMC_CAP_UHS_SDR12 (1 << 6) /* Can do UHS SDR12 */
1078 #define MMC_CAP_UHS_SDR25 (1 << 7) /* Can do UHS SDR25 */
1079 #define MMC_CAP_UHS_SDR50 (1 << 8) /* Can do UHS SDR50 */
1080 #define MMC_CAP_UHS_SDR104 (1 << 9) /* Can do UHS SDR104 */
1081 #define MMC_CAP_UHS_DDR50 (1 << 10) /* Can do UHS DDR50 */
1082 #define MMC_CAP_MMC_DDR52_120 (1 << 11) /* Can do eMMC DDR52 at 1.2 V */
1083 #define MMC_CAP_MMC_DDR52_180 (1 << 12) /* Can do eMMC DDR52 at 1.8 V */
1084 #define MMC_CAP_MMC_DDR52 (MMC_CAP_MMC_DDR52_120 | MMC_CAP_MMC_DDR52_180)
1085 #define MMC_CAP_MMC_HS200_120 (1 << 13) /* Can do eMMC HS200 at 1.2 V */
1086 #define MMC_CAP_MMC_HS200_180 (1 << 14) /* Can do eMMC HS200 at 1.8 V */
1087 #define MMC_CAP_MMC_HS200 (MMC_CAP_MMC_HS200_120| MMC_CAP_MMC_HS200_180)
1088 #define MMC_CAP_MMC_HS400_120 (1 << 15) /* Can do eMMC HS400 at 1.2 V */
1089 #define MMC_CAP_MMC_HS400_180 (1 << 16) /* Can do eMMC HS400 at 1.8 V */
1090 #define MMC_CAP_MMC_HS400 (MMC_CAP_MMC_HS400_120 | MMC_CAP_MMC_HS400_180)
1091 #define MMC_CAP_MMC_HSX00_120 (MMC_CAP_MMC_HS200_120 | MMC_CAP_MMC_HS400_120)
1092 #define MMC_CAP_MMC_ENH_STROBE (1 << 17) /* Can do eMMC Enhanced Strobe */
1093 #define MMC_CAP_SIGNALING_120 (1 << 18) /* Can do signaling at 1.2 V */
1094 #define MMC_CAP_SIGNALING_180 (1 << 19) /* Can do signaling at 1.8 V */
1095 #define MMC_CAP_SIGNALING_330 (1 << 20) /* Can do signaling at 3.3 V */
1096 #define MMC_CAP_DRIVER_TYPE_A (1 << 21) /* Can do Driver Type A */
1097 #define MMC_CAP_DRIVER_TYPE_C (1 << 22) /* Can do Driver Type C */
1098 #define MMC_CAP_DRIVER_TYPE_D (1 << 23) /* Can do Driver Type D */
1099
1100 uint32_t host_caps;
1101 uint32_t host_max_data;
1102 };
1103
1104 /* Get/Set transfer rate/width/disconnection/tag queueing settings */
1105 struct ccb_trans_settings {
1106 struct ccb_hdr ccb_h;
1107 cts_type type; /* Current or User settings */
1108 cam_proto protocol;
1109 u_int protocol_version;
1110 cam_xport transport;
1111 u_int transport_version;
1112 union {
1113 u_int valid; /* Which fields to honor */
1114 struct ccb_trans_settings_ata ata;
1115 struct ccb_trans_settings_scsi scsi;
1116 struct ccb_trans_settings_nvme nvme;
1117 struct ccb_trans_settings_mmc mmc;
1118 } proto_specific;
1119 union {
1120 u_int valid; /* Which fields to honor */
1121 struct ccb_trans_settings_spi spi;
1122 struct ccb_trans_settings_fc fc;
1123 struct ccb_trans_settings_sas sas;
1124 struct ccb_trans_settings_pata ata;
1125 struct ccb_trans_settings_sata sata;
1126 struct ccb_trans_settings_nvme nvme;
1127 } xport_specific;
1128 };
1129
1130 /*
1131 * Calculate the geometry parameters for a device
1132 * give the block size and volume size in blocks.
1133 */
1134 struct ccb_calc_geometry {
1135 struct ccb_hdr ccb_h;
1136 u_int32_t block_size;
1137 u_int64_t volume_size;
1138 u_int32_t cylinders;
1139 u_int8_t heads;
1140 u_int8_t secs_per_track;
1141 };
1142
1143 /*
1144 * Set or get SIM (and transport) specific knobs
1145 */
1146
1147 #define KNOB_VALID_ADDRESS 0x1
1148 #define KNOB_VALID_ROLE 0x2
1149
1150 #define KNOB_ROLE_NONE 0x0
1151 #define KNOB_ROLE_INITIATOR 0x1
1152 #define KNOB_ROLE_TARGET 0x2
1153 #define KNOB_ROLE_BOTH 0x3
1154
1155 struct ccb_sim_knob_settings_spi {
1156 u_int valid;
1157 u_int initiator_id;
1158 u_int role;
1159 };
1160
1161 struct ccb_sim_knob_settings_fc {
1162 u_int valid;
1163 u_int64_t wwnn; /* world wide node name */
1164 u_int64_t wwpn; /* world wide port name */
1165 u_int role;
1166 };
1167
1168 struct ccb_sim_knob_settings_sas {
1169 u_int valid;
1170 u_int64_t wwnn; /* world wide node name */
1171 u_int role;
1172 };
1173 #define KNOB_SETTINGS_SIZE 128
1174
1175 struct ccb_sim_knob {
1176 struct ccb_hdr ccb_h;
1177 union {
1178 u_int valid; /* Which fields to honor */
1179 struct ccb_sim_knob_settings_spi spi;
1180 struct ccb_sim_knob_settings_fc fc;
1181 struct ccb_sim_knob_settings_sas sas;
1182 char pad[KNOB_SETTINGS_SIZE];
1183 } xport_specific;
1184 };
1185
1186 /*
1187 * Rescan the given bus, or bus/target/lun
1188 */
1189 struct ccb_rescan {
1190 struct ccb_hdr ccb_h;
1191 cam_flags flags;
1192 };
1193
1194 /*
1195 * Turn on debugging for the given bus, bus/target, or bus/target/lun.
1196 */
1197 struct ccb_debug {
1198 struct ccb_hdr ccb_h;
1199 cam_debug_flags flags;
1200 };
1201
1202 /* Target mode structures. */
1203
1204 struct ccb_en_lun {
1205 struct ccb_hdr ccb_h;
1206 u_int16_t grp6_len; /* Group 6 VU CDB length */
1207 u_int16_t grp7_len; /* Group 7 VU CDB length */
1208 u_int8_t enable;
1209 };
1210
1211 /* old, barely used immediate notify, binary compatibility */
1212 struct ccb_immed_notify {
1213 struct ccb_hdr ccb_h;
1214 struct scsi_sense_data sense_data;
1215 u_int8_t sense_len; /* Number of bytes in sense buffer */
1216 u_int8_t initiator_id; /* Id of initiator that selected */
1217 u_int8_t message_args[7]; /* Message Arguments */
1218 };
1219
1220 struct ccb_notify_ack {
1221 struct ccb_hdr ccb_h;
1222 u_int16_t seq_id; /* Sequence identifier */
1223 u_int8_t event; /* Event flags */
1224 };
1225
1226 struct ccb_immediate_notify {
1227 struct ccb_hdr ccb_h;
1228 u_int tag_id; /* Tag for immediate notify */
1229 u_int seq_id; /* Tag for target of notify */
1230 u_int initiator_id; /* Initiator Identifier */
1231 u_int arg; /* Function specific */
1232 };
1233
1234 struct ccb_notify_acknowledge {
1235 struct ccb_hdr ccb_h;
1236 u_int tag_id; /* Tag for immediate notify */
1237 u_int seq_id; /* Tar for target of notify */
1238 u_int initiator_id; /* Initiator Identifier */
1239 u_int arg; /* Response information */
1240 /*
1241 * Lower byte of arg is one of RESPONSE CODE values defined below
1242 * (subset of response codes from SPL-4 and FCP-4 specifications),
1243 * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
1244 */
1245 #define CAM_RSP_TMF_COMPLETE 0x00
1246 #define CAM_RSP_TMF_REJECTED 0x04
1247 #define CAM_RSP_TMF_FAILED 0x05
1248 #define CAM_RSP_TMF_SUCCEEDED 0x08
1249 #define CAM_RSP_TMF_INCORRECT_LUN 0x09
1250 };
1251
1252 /* HBA engine structures. */
1253
1254 typedef enum {
1255 EIT_BUFFER, /* Engine type: buffer memory */
1256 EIT_LOSSLESS, /* Engine type: lossless compression */
1257 EIT_LOSSY, /* Engine type: lossy compression */
1258 EIT_ENCRYPT /* Engine type: encryption */
1259 } ei_type;
1260
1261 typedef enum {
1262 EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */
1263 EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */
1264 EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */
1265 EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */
1266 } ei_algo;
1267
1268 struct ccb_eng_inq {
1269 struct ccb_hdr ccb_h;
1270 u_int16_t eng_num; /* The engine number for this inquiry */
1271 ei_type eng_type; /* Returned engine type */
1272 ei_algo eng_algo; /* Returned engine algorithm type */
1273 u_int32_t eng_memeory; /* Returned engine memory size */
1274 };
1275
1276 struct ccb_eng_exec { /* This structure must match SCSIIO size */
1277 struct ccb_hdr ccb_h;
1278 u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */
1279 u_int8_t *req_map; /* Ptr for mapping info on the req. */
1280 u_int8_t *data_ptr; /* Pointer to the data buf/SG list */
1281 u_int32_t dxfer_len; /* Data transfer length */
1282 u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */
1283 u_int16_t sglist_cnt; /* Num of scatter gather list entries */
1284 u_int32_t dmax_len; /* Destination data maximum length */
1285 u_int32_t dest_len; /* Destination data length */
1286 int32_t src_resid; /* Source residual length: 2's comp */
1287 u_int32_t timeout; /* Timeout value */
1288 u_int16_t eng_num; /* Engine number for this request */
1289 u_int16_t vu_flags; /* Vendor Unique flags */
1290 };
1291
1292 /*
1293 * Definitions for the timeout field in the SCSI I/O CCB.
1294 */
1295 #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */
1296 #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */
1297
1298 #define CAM_SUCCESS 0 /* For signaling general success */
1299
1300 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */
1301
1302 /*
1303 * CCB for working with advanced device information. This operates in a fashion
1304 * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer
1305 * type requested, and provide a buffer size/buffer to write to. If the
1306 * buffer is too small, provsiz will be larger than bufsiz.
1307 */
1308 struct ccb_dev_advinfo {
1309 struct ccb_hdr ccb_h;
1310 uint32_t flags;
1311 #define CDAI_FLAG_NONE 0x0 /* No flags set */
1312 #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */
1313 uint32_t buftype; /* IN: Type of data being requested */
1314 /* NB: buftype is interpreted on a per-transport basis */
1315 #define CDAI_TYPE_SCSI_DEVID 1
1316 #define CDAI_TYPE_SERIAL_NUM 2
1317 #define CDAI_TYPE_PHYS_PATH 3
1318 #define CDAI_TYPE_RCAPLONG 4
1319 #define CDAI_TYPE_EXT_INQ 5
1320 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */
1321 #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */
1322 #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */
1323 off_t bufsiz; /* IN: Size of external buffer */
1324 #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
1325 off_t provsiz; /* OUT: Size required/used */
1326 uint8_t *buf; /* IN/OUT: Buffer for requested data */
1327 };
1328
1329 /*
1330 * CCB for sending async events
1331 */
1332 struct ccb_async {
1333 struct ccb_hdr ccb_h;
1334 uint32_t async_code;
1335 off_t async_arg_size;
1336 void *async_arg_ptr;
1337 };
1338
1339 /*
1340 * Union of all CCB types for kernel space allocation. This union should
1341 * never be used for manipulating CCBs - its only use is for the allocation
1342 * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc
1343 * and the argument to xpt_ccb_free.
1344 */
1345 union ccb {
1346 struct ccb_hdr ccb_h; /* For convenience */
1347 struct ccb_scsiio csio;
1348 struct ccb_getdev cgd;
1349 struct ccb_getdevlist cgdl;
1350 struct ccb_pathinq cpi;
1351 struct ccb_relsim crs;
1352 struct ccb_setasync csa;
1353 struct ccb_setdev csd;
1354 struct ccb_pathstats cpis;
1355 struct ccb_getdevstats cgds;
1356 struct ccb_dev_match cdm;
1357 struct ccb_trans_settings cts;
1358 struct ccb_calc_geometry ccg;
1359 struct ccb_sim_knob knob;
1360 struct ccb_abort cab;
1361 struct ccb_resetbus crb;
1362 struct ccb_resetdev crd;
1363 struct ccb_termio tio;
1364 struct ccb_accept_tio atio;
1365 struct ccb_scsiio ctio;
1366 struct ccb_en_lun cel;
1367 struct ccb_immed_notify cin;
1368 struct ccb_notify_ack cna;
1369 struct ccb_immediate_notify cin1;
1370 struct ccb_notify_acknowledge cna2;
1371 struct ccb_eng_inq cei;
1372 struct ccb_eng_exec cee;
1373 struct ccb_smpio smpio;
1374 struct ccb_rescan crcn;
1375 struct ccb_debug cdbg;
1376 struct ccb_ataio ataio;
1377 struct ccb_dev_advinfo cdai;
1378 struct ccb_async casync;
1379 struct ccb_nvmeio nvmeio;
1380 struct ccb_mmcio mmcio;
1381 };
1382
1383 #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \
1384 bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \
1385 sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h))
1386
1387 __BEGIN_DECLS
1388 static __inline void
1389 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
1390 void (*cbfcnp)(struct cam_periph *, union ccb *),
1391 u_int32_t flags, u_int8_t tag_action,
1392 u_int8_t *data_ptr, u_int32_t dxfer_len,
1393 u_int8_t sense_len, u_int8_t cdb_len,
1394 u_int32_t timeout)
1395 {
1396 csio->ccb_h.func_code = XPT_SCSI_IO;
1397 csio->ccb_h.flags = flags;
1398 csio->ccb_h.xflags = 0;
1399 csio->ccb_h.retry_count = retries;
1400 csio->ccb_h.cbfcnp = cbfcnp;
1401 csio->ccb_h.timeout = timeout;
1402 csio->data_ptr = data_ptr;
1403 csio->dxfer_len = dxfer_len;
1404 csio->sense_len = sense_len;
1405 csio->cdb_len = cdb_len;
1406 csio->tag_action = tag_action;
1407 csio->priority = 0;
1408 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1409 csio->bio = NULL;
1410 #endif
1411 }
1412
1413 static __inline void
1414 cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries,
1415 void (*cbfcnp)(struct cam_periph *, union ccb *),
1416 u_int32_t flags, u_int tag_action, u_int tag_id,
1417 u_int init_id, u_int scsi_status, u_int8_t *data_ptr,
1418 u_int32_t dxfer_len, u_int32_t timeout)
1419 {
1420 csio->ccb_h.func_code = XPT_CONT_TARGET_IO;
1421 csio->ccb_h.flags = flags;
1422 csio->ccb_h.xflags = 0;
1423 csio->ccb_h.retry_count = retries;
1424 csio->ccb_h.cbfcnp = cbfcnp;
1425 csio->ccb_h.timeout = timeout;
1426 csio->data_ptr = data_ptr;
1427 csio->dxfer_len = dxfer_len;
1428 csio->scsi_status = scsi_status;
1429 csio->tag_action = tag_action;
1430 csio->priority = 0;
1431 csio->tag_id = tag_id;
1432 csio->init_id = init_id;
1433 }
1434
1435 static __inline void
1436 cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
1437 void (*cbfcnp)(struct cam_periph *, union ccb *),
1438 u_int32_t flags, u_int tag_action __unused,
1439 u_int8_t *data_ptr, u_int32_t dxfer_len,
1440 u_int32_t timeout)
1441 {
1442 ataio->ccb_h.func_code = XPT_ATA_IO;
1443 ataio->ccb_h.flags = flags;
1444 ataio->ccb_h.retry_count = retries;
1445 ataio->ccb_h.cbfcnp = cbfcnp;
1446 ataio->ccb_h.timeout = timeout;
1447 ataio->data_ptr = data_ptr;
1448 ataio->dxfer_len = dxfer_len;
1449 ataio->ata_flags = 0;
1450 }
1451
1452 static __inline void
1453 cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
1454 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
1455 uint8_t *smp_request, int smp_request_len,
1456 uint8_t *smp_response, int smp_response_len,
1457 uint32_t timeout)
1458 {
1459 #ifdef _KERNEL
1460 KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH,
1461 ("direction != CAM_DIR_BOTH"));
1462 KASSERT((smp_request != NULL) && (smp_response != NULL),
1463 ("need valid request and response buffers"));
1464 KASSERT((smp_request_len != 0) && (smp_response_len != 0),
1465 ("need non-zero request and response lengths"));
1466 #endif /*_KERNEL*/
1467 smpio->ccb_h.func_code = XPT_SMP_IO;
1468 smpio->ccb_h.flags = flags;
1469 smpio->ccb_h.retry_count = retries;
1470 smpio->ccb_h.cbfcnp = cbfcnp;
1471 smpio->ccb_h.timeout = timeout;
1472 smpio->smp_request = smp_request;
1473 smpio->smp_request_len = smp_request_len;
1474 smpio->smp_response = smp_response;
1475 smpio->smp_response_len = smp_response_len;
1476 }
1477
1478 static __inline void
1479 cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries,
1480 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
1481 uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags,
1482 struct mmc_data *mmc_d,
1483 uint32_t timeout)
1484 {
1485 mmcio->ccb_h.func_code = XPT_MMC_IO;
1486 mmcio->ccb_h.flags = flags;
1487 mmcio->ccb_h.retry_count = retries;
1488 mmcio->ccb_h.cbfcnp = cbfcnp;
1489 mmcio->ccb_h.timeout = timeout;
1490 mmcio->cmd.opcode = mmc_opcode;
1491 mmcio->cmd.arg = mmc_arg;
1492 mmcio->cmd.flags = mmc_flags;
1493 mmcio->stop.opcode = 0;
1494 mmcio->stop.arg = 0;
1495 mmcio->stop.flags = 0;
1496 if (mmc_d != NULL) {
1497 mmcio->cmd.data = mmc_d;
1498 } else
1499 mmcio->cmd.data = NULL;
1500 mmcio->cmd.resp[0] = 0;
1501 mmcio->cmd.resp[1] = 0;
1502 mmcio->cmd.resp[2] = 0;
1503 mmcio->cmd.resp[3] = 0;
1504 }
1505
1506 static __inline void
1507 cam_set_ccbstatus(union ccb *ccb, cam_status status)
1508 {
1509 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1510 ccb->ccb_h.status |= status;
1511 }
1512
1513 static __inline cam_status
1514 cam_ccb_status(union ccb *ccb)
1515 {
1516 return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK));
1517 }
1518
1519 void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
1520
1521 static __inline void
1522 cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries,
1523 void (*cbfcnp)(struct cam_periph *, union ccb *),
1524 u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len,
1525 u_int32_t timeout)
1526 {
1527 nvmeio->ccb_h.func_code = XPT_NVME_IO;
1528 nvmeio->ccb_h.flags = flags;
1529 nvmeio->ccb_h.retry_count = retries;
1530 nvmeio->ccb_h.cbfcnp = cbfcnp;
1531 nvmeio->ccb_h.timeout = timeout;
1532 nvmeio->data_ptr = data_ptr;
1533 nvmeio->dxfer_len = dxfer_len;
1534 }
1535
1536 static __inline void
1537 cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries,
1538 void (*cbfcnp)(struct cam_periph *, union ccb *),
1539 u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len,
1540 u_int32_t timeout)
1541 {
1542 nvmeio->ccb_h.func_code = XPT_NVME_ADMIN;
1543 nvmeio->ccb_h.flags = flags;
1544 nvmeio->ccb_h.retry_count = retries;
1545 nvmeio->ccb_h.cbfcnp = cbfcnp;
1546 nvmeio->ccb_h.timeout = timeout;
1547 nvmeio->data_ptr = data_ptr;
1548 nvmeio->dxfer_len = dxfer_len;
1549 }
1550 __END_DECLS
1551
1552 #endif /* _CAM_CAM_CCB_H */
Cache object: 0df782b1191e946a0e98f5b07a81dcf8
|