FreeBSD/Linux Kernel Cross Reference
sys/dev/mpr/mpr_sas.c
1 /*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60
61 #include <machine/stdarg.h>
62
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73
74 #include <dev/nvme/nvme.h>
75
76 #include <dev/mpr/mpi/mpi2_type.h>
77 #include <dev/mpr/mpi/mpi2.h>
78 #include <dev/mpr/mpi/mpi2_ioc.h>
79 #include <dev/mpr/mpi/mpi2_sas.h>
80 #include <dev/mpr/mpi/mpi2_pci.h>
81 #include <dev/mpr/mpi/mpi2_cnfg.h>
82 #include <dev/mpr/mpi/mpi2_init.h>
83 #include <dev/mpr/mpi/mpi2_tool.h>
84 #include <dev/mpr/mpr_ioctl.h>
85 #include <dev/mpr/mprvar.h>
86 #include <dev/mpr/mpr_table.h>
87 #include <dev/mpr/mpr_sas.h>
88
89 #define MPRSAS_DISCOVERY_TIMEOUT 20
90 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
91
92 /*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116
117 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118
119 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mprsas_poll(struct cam_sim *sim);
123 static void mprsas_scsiio_timeout(void *data);
124 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130 struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132 struct cam_path *path, void *arg);
133 static int mprsas_send_portenable(struct mpr_softc *sc);
134 static void mprsas_portenable_complete(struct mpr_softc *sc,
135 struct mpr_command *cm);
136
137 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
139 uint64_t sasaddr);
140 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
141
142 struct mprsas_target *
143 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
144 uint16_t handle)
145 {
146 struct mprsas_target *target;
147 int i;
148
149 for (i = start; i < sassc->maxtargets; i++) {
150 target = &sassc->targets[i];
151 if (target->handle == handle)
152 return (target);
153 }
154
155 return (NULL);
156 }
157
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery. Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
164 */
165 void
166 mprsas_startup_increment(struct mprsas_softc *sassc)
167 {
168 MPR_FUNCTRACE(sassc->sc);
169
170 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171 if (sassc->startup_refcount++ == 0) {
172 /* just starting, freeze the simq */
173 mpr_dprint(sassc->sc, MPR_INIT,
174 "%s freezing simq\n", __func__);
175 xpt_hold_boot();
176 xpt_freeze_simq(sassc->sim, 1);
177 }
178 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179 sassc->startup_refcount);
180 }
181 }
182
183 void
184 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
185 {
186 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188 xpt_release_simq(sassc->sim, 1);
189 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
190 }
191 }
192
193 void
194 mprsas_startup_decrement(struct mprsas_softc *sassc)
195 {
196 MPR_FUNCTRACE(sassc->sc);
197
198 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199 if (--sassc->startup_refcount == 0) {
200 /* finished all discovery-related actions, release
201 * the simq and rescan for the latest topology.
202 */
203 mpr_dprint(sassc->sc, MPR_INIT,
204 "%s releasing simq\n", __func__);
205 sassc->flags &= ~MPRSAS_IN_STARTUP;
206 xpt_release_simq(sassc->sim, 1);
207 xpt_release_boot();
208 }
209 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210 sassc->startup_refcount);
211 }
212 }
213
214 /*
215 * The firmware requires us to stop sending commands when we're doing task
216 * management.
217 * use.
218 * XXX The logic for serializing the device has been made lazy and moved to
219 * mprsas_prepare_for_tm().
220 */
221 struct mpr_command *
222 mprsas_alloc_tm(struct mpr_softc *sc)
223 {
224 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225 struct mpr_command *tm;
226
227 MPR_FUNCTRACE(sc);
228 tm = mpr_alloc_high_priority_command(sc);
229 if (tm == NULL)
230 return (NULL);
231
232 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
234 return tm;
235 }
236
237 void
238 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
239 {
240 int target_id = 0xFFFFFFFF;
241
242 MPR_FUNCTRACE(sc);
243 if (tm == NULL)
244 return;
245
246 /*
247 * For TM's the devq is frozen for the device. Unfreeze it here and
248 * free the resources used for freezing the devq. Must clear the
249 * INRESET flag as well or scsi I/O will not work.
250 */
251 if (tm->cm_targ != NULL) {
252 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253 target_id = tm->cm_targ->tid;
254 }
255 if (tm->cm_ccb) {
256 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
257 target_id);
258 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259 xpt_free_path(tm->cm_ccb->ccb_h.path);
260 xpt_free_ccb(tm->cm_ccb);
261 }
262
263 mpr_free_high_priority_command(sc, tm);
264 }
265
266 void
267 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
268 {
269 struct mprsas_softc *sassc = sc->sassc;
270 path_id_t pathid;
271 target_id_t targetid;
272 union ccb *ccb;
273
274 MPR_FUNCTRACE(sc);
275 pathid = cam_sim_path(sassc->sim);
276 if (targ == NULL)
277 targetid = CAM_TARGET_WILDCARD;
278 else
279 targetid = targ - sassc->targets;
280
281 /*
282 * Allocate a CCB and schedule a rescan.
283 */
284 ccb = xpt_alloc_ccb_nowait();
285 if (ccb == NULL) {
286 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
287 return;
288 }
289
290 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
291 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
293 xpt_free_ccb(ccb);
294 return;
295 }
296
297 if (targetid == CAM_TARGET_WILDCARD)
298 ccb->ccb_h.func_code = XPT_SCAN_BUS;
299 else
300 ccb->ccb_h.func_code = XPT_SCAN_TGT;
301
302 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
303 xpt_rescan(ccb);
304 }
305
306 static void
307 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
308 {
309 struct sbuf sb;
310 va_list ap;
311 char str[224];
312 char path_str[64];
313
314 if (cm == NULL)
315 return;
316
317 /* No need to be in here if debugging isn't enabled */
318 if ((cm->cm_sc->mpr_debug & level) == 0)
319 return;
320
321 sbuf_new(&sb, str, sizeof(str), 0);
322
323 va_start(ap, fmt);
324
325 if (cm->cm_ccb != NULL) {
326 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327 sizeof(path_str));
328 sbuf_cat(&sb, path_str);
329 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330 scsi_command_string(&cm->cm_ccb->csio, &sb);
331 sbuf_printf(&sb, "length %d ",
332 cm->cm_ccb->csio.dxfer_len);
333 }
334 } else {
335 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
336 cam_sim_name(cm->cm_sc->sassc->sim),
337 cam_sim_unit(cm->cm_sc->sassc->sim),
338 cam_sim_bus(cm->cm_sc->sassc->sim),
339 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340 cm->cm_lun);
341 }
342
343 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
344 sbuf_vprintf(&sb, fmt, ap);
345 sbuf_finish(&sb);
346 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
347
348 va_end(ap);
349 }
350
351 static void
352 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
353 {
354 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355 struct mprsas_target *targ;
356 uint16_t handle;
357
358 MPR_FUNCTRACE(sc);
359
360 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
362 targ = tm->cm_targ;
363
364 if (reply == NULL) {
365 /* XXX retry the remove after the diag reset completes? */
366 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
367 "0x%04x\n", __func__, handle);
368 mprsas_free_tm(sc, tm);
369 return;
370 }
371
372 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 MPI2_IOCSTATUS_SUCCESS) {
374 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
375 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
376 }
377
378 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
379 le32toh(reply->TerminationCount));
380 mpr_free_reply(sc, tm->cm_reply_data);
381 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
382
383 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
384 targ->tid, handle);
385
386 /*
387 * Don't clear target if remove fails because things will get confusing.
388 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 * this target id if possible, and so we can assign the same target id
390 * to this device if it comes back in the future.
391 */
392 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393 MPI2_IOCSTATUS_SUCCESS) {
394 targ = tm->cm_targ;
395 targ->handle = 0x0;
396 targ->encl_handle = 0x0;
397 targ->encl_level_valid = 0x0;
398 targ->encl_level = 0x0;
399 targ->connector_name[0] = ' ';
400 targ->connector_name[1] = ' ';
401 targ->connector_name[2] = ' ';
402 targ->connector_name[3] = ' ';
403 targ->encl_slot = 0x0;
404 targ->exp_dev_handle = 0x0;
405 targ->phy_num = 0x0;
406 targ->linkrate = 0x0;
407 targ->devinfo = 0x0;
408 targ->flags = 0x0;
409 targ->scsi_req_desc_type = 0;
410 }
411
412 mprsas_free_tm(sc, tm);
413 }
414
415 /*
416 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
417 * Otherwise Volume Delete is same as Bare Drive Removal.
418 */
419 void
420 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
421 {
422 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
423 struct mpr_softc *sc;
424 struct mpr_command *cm;
425 struct mprsas_target *targ = NULL;
426
427 MPR_FUNCTRACE(sassc->sc);
428 sc = sassc->sc;
429
430 targ = mprsas_find_target_by_handle(sassc, 0, handle);
431 if (targ == NULL) {
432 /* FIXME: what is the action? */
433 /* We don't know about this device? */
434 mpr_dprint(sc, MPR_ERROR,
435 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
436 return;
437 }
438
439 targ->flags |= MPRSAS_TARGET_INREMOVAL;
440
441 cm = mprsas_alloc_tm(sc);
442 if (cm == NULL) {
443 mpr_dprint(sc, MPR_ERROR,
444 "%s: command alloc failure\n", __func__);
445 return;
446 }
447
448 mprsas_rescan_target(sc, targ);
449
450 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
451 req->DevHandle = targ->handle;
452 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
453
454 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
455 /* SAS Hard Link Reset / SATA Link Reset */
456 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
457 } else {
458 /* PCIe Protocol Level Reset*/
459 req->MsgFlags =
460 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
461 }
462
463 cm->cm_targ = targ;
464 cm->cm_data = NULL;
465 cm->cm_complete = mprsas_remove_volume;
466 cm->cm_complete_data = (void *)(uintptr_t)handle;
467
468 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
469 __func__, targ->tid);
470 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
471
472 mpr_map_command(sc, cm);
473 }
474
475 /*
476 * The firmware performs debounce on the link to avoid transient link errors
477 * and false removals. When it does decide that link has been lost and a
478 * device needs to go away, it expects that the host will perform a target reset
479 * and then an op remove. The reset has the side-effect of aborting any
480 * outstanding requests for the device, which is required for the op-remove to
481 * succeed. It's not clear if the host should check for the device coming back
482 * alive after the reset.
483 */
484 void
485 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
486 {
487 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
488 struct mpr_softc *sc;
489 struct mpr_command *tm;
490 struct mprsas_target *targ = NULL;
491
492 MPR_FUNCTRACE(sassc->sc);
493
494 sc = sassc->sc;
495
496 targ = mprsas_find_target_by_handle(sassc, 0, handle);
497 if (targ == NULL) {
498 /* FIXME: what is the action? */
499 /* We don't know about this device? */
500 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
501 __func__, handle);
502 return;
503 }
504
505 targ->flags |= MPRSAS_TARGET_INREMOVAL;
506
507 tm = mprsas_alloc_tm(sc);
508 if (tm == NULL) {
509 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
510 __func__);
511 return;
512 }
513
514 mprsas_rescan_target(sc, targ);
515
516 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
517 req->DevHandle = htole16(targ->handle);
518 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
519
520 /* SAS Hard Link Reset / SATA Link Reset */
521 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
522
523 tm->cm_targ = targ;
524 tm->cm_data = NULL;
525 tm->cm_complete = mprsas_remove_device;
526 tm->cm_complete_data = (void *)(uintptr_t)handle;
527
528 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
529 __func__, targ->tid);
530 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
531
532 mpr_map_command(sc, tm);
533 }
534
535 static void
536 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
537 {
538 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
539 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
540 struct mprsas_target *targ;
541 uint16_t handle;
542
543 MPR_FUNCTRACE(sc);
544
545 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
546 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
547 targ = tm->cm_targ;
548
549 /*
550 * Currently there should be no way we can hit this case. It only
551 * happens when we have a failure to allocate chain frames, and
552 * task management commands don't have S/G lists.
553 */
554 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
555 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
556 "handle %#04x! This should not happen!\n", __func__,
557 tm->cm_flags, handle);
558 }
559
560 if (reply == NULL) {
561 /* XXX retry the remove after the diag reset completes? */
562 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
563 "0x%04x\n", __func__, handle);
564 mprsas_free_tm(sc, tm);
565 return;
566 }
567
568 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
569 MPI2_IOCSTATUS_SUCCESS) {
570 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
571 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
572 }
573
574 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
575 le32toh(reply->TerminationCount));
576 mpr_free_reply(sc, tm->cm_reply_data);
577 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
578
579 /* Reuse the existing command */
580 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
581 memset(req, 0, sizeof(*req));
582 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
583 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
584 req->DevHandle = htole16(handle);
585 tm->cm_data = NULL;
586 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
587 tm->cm_complete = mprsas_remove_complete;
588 tm->cm_complete_data = (void *)(uintptr_t)handle;
589
590 /*
591 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
592 * They should be aborted or time out and we'll kick thus off there
593 * if so.
594 */
595 if (TAILQ_FIRST(&targ->commands) == NULL) {
596 mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
597 mpr_map_command(sc, tm);
598 targ->pending_remove_tm = NULL;
599 } else {
600 targ->pending_remove_tm = tm;
601 }
602
603 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
604 targ->tid, handle);
605 if (targ->encl_level_valid) {
606 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
607 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
608 targ->connector_name);
609 }
610 }
611
612 static void
613 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
614 {
615 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
616 uint16_t handle;
617 struct mprsas_target *targ;
618 struct mprsas_lun *lun;
619
620 MPR_FUNCTRACE(sc);
621
622 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
623 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
624
625 targ = tm->cm_targ;
626
627 /*
628 * At this point, we should have no pending commands for the target.
629 * The remove target has just completed.
630 */
631 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632 ("%s: no commands should be pending\n", __func__));
633
634 /*
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
638 */
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
644 return;
645 }
646
647 if (reply == NULL) {
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
652 return;
653 }
654
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
657
658 /*
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
663 */
664 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 MPI2_IOCSTATUS_SUCCESS) {
666 targ->handle = 0x0;
667 targ->encl_handle = 0x0;
668 targ->encl_level_valid = 0x0;
669 targ->encl_level = 0x0;
670 targ->connector_name[0] = ' ';
671 targ->connector_name[1] = ' ';
672 targ->connector_name[2] = ' ';
673 targ->connector_name[3] = ' ';
674 targ->encl_slot = 0x0;
675 targ->exp_dev_handle = 0x0;
676 targ->phy_num = 0x0;
677 targ->linkrate = 0x0;
678 targ->devinfo = 0x0;
679 targ->flags = 0x0;
680 targ->scsi_req_desc_type = 0;
681
682 while (!SLIST_EMPTY(&targ->luns)) {
683 lun = SLIST_FIRST(&targ->luns);
684 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
685 free(lun, M_MPR);
686 }
687 }
688
689 mprsas_free_tm(sc, tm);
690 }
691
692 static int
693 mprsas_register_events(struct mpr_softc *sc)
694 {
695 uint8_t events[16];
696
697 bzero(events, 16);
698 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706 setbit(events, MPI2_EVENT_IR_VOLUME);
707 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
710 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
711 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 }
718 }
719
720 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 &sc->sassc->mprsas_eh);
722
723 return (0);
724 }
725
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 struct mprsas_softc *sassc;
730 cam_status status;
731 int unit, error = 0, reqs;
732
733 MPR_FUNCTRACE(sc);
734 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735
736 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737
738 /*
739 * XXX MaxTargets could change during a reinit. Since we don't
740 * resize the targets[] array during such an event, cache the value
741 * of MaxTargets here so that we don't get into trouble later. This
742 * should move into the reinit logic.
743 */
744 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
745 sassc->targets = malloc(sizeof(struct mprsas_target) *
746 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
747 sc->sassc = sassc;
748 sassc->sc = sc;
749
750 reqs = sc->num_reqs - sc->num_prireqs - 1;
751 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
752 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
753 error = ENOMEM;
754 goto out;
755 }
756
757 unit = device_get_unit(sc->mpr_dev);
758 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
759 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
760 if (sassc->sim == NULL) {
761 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
762 error = EINVAL;
763 goto out;
764 }
765
766 TAILQ_INIT(&sassc->ev_queue);
767
768 /* Initialize taskqueue for Event Handling */
769 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
770 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
771 taskqueue_thread_enqueue, &sassc->ev_tq);
772 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773 device_get_nameunit(sc->mpr_dev));
774
775 mpr_lock(sc);
776
777 /*
778 * XXX There should be a bus for every port on the adapter, but since
779 * we're just going to fake the topology for now, we'll pretend that
780 * everything is just a target on a single bus.
781 */
782 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
783 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
784 "Error %d registering SCSI bus\n", error);
785 mpr_unlock(sc);
786 goto out;
787 }
788
789 /*
790 * Assume that discovery events will start right away.
791 *
792 * Hold off boot until discovery is complete.
793 */
794 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
795 sc->sassc->startup_refcount = 0;
796 mprsas_startup_increment(sassc);
797
798 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
799
800 /*
801 * Register for async events so we can determine the EEDP
802 * capabilities of devices.
803 */
804 status = xpt_create_path(&sassc->path, /*periph*/NULL,
805 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806 CAM_LUN_WILDCARD);
807 if (status != CAM_REQ_CMP) {
808 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
809 "Error %#x creating sim path\n", status);
810 sassc->path = NULL;
811 } else {
812 int event;
813
814 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815 status = xpt_register_async(event, mprsas_async, sc,
816 sassc->path);
817
818 if (status != CAM_REQ_CMP) {
819 mpr_dprint(sc, MPR_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
823 sassc->path = NULL;
824 }
825 }
826 if (status != CAM_REQ_CMP) {
827 /*
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
830 */
831 mpr_printf(sc, "EEDP capabilities disabled.\n");
832 }
833
834 mpr_unlock(sc);
835
836 mprsas_register_events(sc);
837 out:
838 if (error)
839 mpr_detach_sas(sc);
840
841 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
842 return (error);
843 }
844
845 int
846 mpr_detach_sas(struct mpr_softc *sc)
847 {
848 struct mprsas_softc *sassc;
849 struct mprsas_lun *lun, *lun_tmp;
850 struct mprsas_target *targ;
851 int i;
852
853 MPR_FUNCTRACE(sc);
854
855 if (sc->sassc == NULL)
856 return (0);
857
858 sassc = sc->sassc;
859 mpr_deregister_events(sc, sassc->mprsas_eh);
860
861 /*
862 * Drain and free the event handling taskqueue with the lock
863 * unheld so that any parallel processing tasks drain properly
864 * without deadlocking.
865 */
866 if (sassc->ev_tq != NULL)
867 taskqueue_free(sassc->ev_tq);
868
869 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 mpr_lock(sc);
871
872 while (sassc->startup_refcount != 0)
873 mprsas_startup_decrement(sassc);
874
875 /* Deregister our async handler */
876 if (sassc->path != NULL) {
877 xpt_register_async(0, mprsas_async, sc, sassc->path);
878 xpt_free_path(sassc->path);
879 sassc->path = NULL;
880 }
881
882 if (sassc->flags & MPRSAS_IN_STARTUP)
883 xpt_release_simq(sassc->sim, 1);
884
885 if (sassc->sim != NULL) {
886 xpt_bus_deregister(cam_sim_path(sassc->sim));
887 cam_sim_free(sassc->sim, FALSE);
888 }
889
890 mpr_unlock(sc);
891
892 if (sassc->devq != NULL)
893 cam_simq_free(sassc->devq);
894
895 for (i = 0; i < sassc->maxtargets; i++) {
896 targ = &sassc->targets[i];
897 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
898 free(lun, M_MPR);
899 }
900 }
901 free(sassc->targets, M_MPR);
902 free(sassc, M_MPR);
903 sc->sassc = NULL;
904
905 return (0);
906 }
907
908 void
909 mprsas_discovery_end(struct mprsas_softc *sassc)
910 {
911 struct mpr_softc *sc = sassc->sc;
912
913 MPR_FUNCTRACE(sc);
914
915 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
916 callout_stop(&sassc->discovery_callout);
917
918 /*
919 * After discovery has completed, check the mapping table for any
920 * missing devices and update their missing counts. Only do this once
921 * whenever the driver is initialized so that missing counts aren't
922 * updated unnecessarily. Note that just because discovery has
923 * completed doesn't mean that events have been processed yet. The
924 * check_devices function is a callout timer that checks if ALL devices
925 * are missing. If so, it will wait a little longer for events to
926 * complete and keep resetting itself until some device in the mapping
927 * table is not missing, meaning that event processing has started.
928 */
929 if (sc->track_mapping_events) {
930 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
931 "completed. Check for missing devices in the mapping "
932 "table.\n");
933 callout_reset(&sc->device_check_callout,
934 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
935 sc);
936 }
937 }
938
939 static void
940 mprsas_action(struct cam_sim *sim, union ccb *ccb)
941 {
942 struct mprsas_softc *sassc;
943
944 sassc = cam_sim_softc(sim);
945
946 MPR_FUNCTRACE(sassc->sc);
947 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
948 ccb->ccb_h.func_code);
949 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
950
951 switch (ccb->ccb_h.func_code) {
952 case XPT_PATH_INQ:
953 {
954 struct ccb_pathinq *cpi = &ccb->cpi;
955 struct mpr_softc *sc = sassc->sc;
956
957 cpi->version_num = 1;
958 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
959 cpi->target_sprt = 0;
960 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
961 cpi->hba_eng_cnt = 0;
962 cpi->max_target = sassc->maxtargets - 1;
963 cpi->max_lun = 255;
964
965 /*
966 * initiator_id is set here to an ID outside the set of valid
967 * target IDs (including volumes).
968 */
969 cpi->initiator_id = sassc->maxtargets;
970 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 cpi->unit_number = cam_sim_unit(sim);
974 cpi->bus_id = cam_sim_bus(sim);
975 /*
976 * XXXSLM-I think this needs to change based on config page or
977 * something instead of hardcoded to 150000.
978 */
979 cpi->base_transfer_speed = 150000;
980 cpi->transport = XPORT_SAS;
981 cpi->transport_version = 0;
982 cpi->protocol = PROTO_SCSI;
983 cpi->protocol_version = SCSI_REV_SPC;
984 cpi->maxio = sc->maxio;
985 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
986 break;
987 }
988 case XPT_GET_TRAN_SETTINGS:
989 {
990 struct ccb_trans_settings *cts;
991 struct ccb_trans_settings_sas *sas;
992 struct ccb_trans_settings_scsi *scsi;
993 struct mprsas_target *targ;
994
995 cts = &ccb->cts;
996 sas = &cts->xport_specific.sas;
997 scsi = &cts->proto_specific.scsi;
998
999 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1000 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1001 cts->ccb_h.target_id));
1002 targ = &sassc->targets[cts->ccb_h.target_id];
1003 if (targ->handle == 0x0) {
1004 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1005 break;
1006 }
1007
1008 cts->protocol_version = SCSI_REV_SPC2;
1009 cts->transport = XPORT_SAS;
1010 cts->transport_version = 0;
1011
1012 sas->valid = CTS_SAS_VALID_SPEED;
1013 switch (targ->linkrate) {
1014 case 0x08:
1015 sas->bitrate = 150000;
1016 break;
1017 case 0x09:
1018 sas->bitrate = 300000;
1019 break;
1020 case 0x0a:
1021 sas->bitrate = 600000;
1022 break;
1023 case 0x0b:
1024 sas->bitrate = 1200000;
1025 break;
1026 default:
1027 sas->valid = 0;
1028 }
1029
1030 cts->protocol = PROTO_SCSI;
1031 scsi->valid = CTS_SCSI_VALID_TQ;
1032 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1033
1034 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1035 break;
1036 }
1037 case XPT_CALC_GEOMETRY:
1038 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1039 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1040 break;
1041 case XPT_RESET_DEV:
1042 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1043 "XPT_RESET_DEV\n");
1044 mprsas_action_resetdev(sassc, ccb);
1045 return;
1046 case XPT_RESET_BUS:
1047 case XPT_ABORT:
1048 case XPT_TERM_IO:
1049 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1050 "for abort or reset\n");
1051 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1052 break;
1053 case XPT_SCSI_IO:
1054 mprsas_action_scsiio(sassc, ccb);
1055 return;
1056 case XPT_SMP_IO:
1057 mprsas_action_smpio(sassc, ccb);
1058 return;
1059 default:
1060 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1061 break;
1062 }
1063 xpt_done(ccb);
1064
1065 }
1066
1067 static void
1068 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1069 target_id_t target_id, lun_id_t lun_id)
1070 {
1071 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1072 struct cam_path *path;
1073
1074 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1075 ac_code, target_id, (uintmax_t)lun_id);
1076
1077 if (xpt_create_path(&path, NULL,
1078 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1079 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1080 "notification\n");
1081 return;
1082 }
1083
1084 xpt_async(ac_code, path, NULL);
1085 xpt_free_path(path);
1086 }
1087
1088 static void
1089 mprsas_complete_all_commands(struct mpr_softc *sc)
1090 {
1091 struct mpr_command *cm;
1092 int i;
1093 int completed;
1094
1095 MPR_FUNCTRACE(sc);
1096 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1097
1098 /* complete all commands with a NULL reply */
1099 for (i = 1; i < sc->num_reqs; i++) {
1100 cm = &sc->commands[i];
1101 if (cm->cm_state == MPR_CM_STATE_FREE)
1102 continue;
1103
1104 cm->cm_state = MPR_CM_STATE_BUSY;
1105 cm->cm_reply = NULL;
1106 completed = 0;
1107
1108 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1109 MPASS(cm->cm_data);
1110 free(cm->cm_data, M_MPR);
1111 cm->cm_data = NULL;
1112 }
1113
1114 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1115 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1116
1117 if (cm->cm_complete != NULL) {
1118 mprsas_log_command(cm, MPR_RECOVERY,
1119 "completing cm %p state %x ccb %p for diag reset\n",
1120 cm, cm->cm_state, cm->cm_ccb);
1121 cm->cm_complete(sc, cm);
1122 completed = 1;
1123 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1124 mprsas_log_command(cm, MPR_RECOVERY,
1125 "waking up cm %p state %x ccb %p for diag reset\n",
1126 cm, cm->cm_state, cm->cm_ccb);
1127 wakeup(cm);
1128 completed = 1;
1129 }
1130
1131 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1132 /* this should never happen, but if it does, log */
1133 mprsas_log_command(cm, MPR_RECOVERY,
1134 "cm %p state %x flags 0x%x ccb %p during diag "
1135 "reset\n", cm, cm->cm_state, cm->cm_flags,
1136 cm->cm_ccb);
1137 }
1138 }
1139
1140 sc->io_cmds_active = 0;
1141 }
1142
1143 void
1144 mprsas_handle_reinit(struct mpr_softc *sc)
1145 {
1146 int i;
1147
1148 /* Go back into startup mode and freeze the simq, so that CAM
1149 * doesn't send any commands until after we've rediscovered all
1150 * targets and found the proper device handles for them.
1151 *
1152 * After the reset, portenable will trigger discovery, and after all
1153 * discovery-related activities have finished, the simq will be
1154 * released.
1155 */
1156 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1157 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1158 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1159 mprsas_startup_increment(sc->sassc);
1160
1161 /* notify CAM of a bus reset */
1162 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1163 CAM_LUN_WILDCARD);
1164
1165 /* complete and cleanup after all outstanding commands */
1166 mprsas_complete_all_commands(sc);
1167
1168 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1169 __func__, sc->sassc->startup_refcount);
1170
1171 /* zero all the target handles, since they may change after the
1172 * reset, and we have to rediscover all the targets and use the new
1173 * handles.
1174 */
1175 for (i = 0; i < sc->sassc->maxtargets; i++) {
1176 if (sc->sassc->targets[i].outstanding != 0)
1177 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1178 i, sc->sassc->targets[i].outstanding);
1179 sc->sassc->targets[i].handle = 0x0;
1180 sc->sassc->targets[i].exp_dev_handle = 0x0;
1181 sc->sassc->targets[i].outstanding = 0;
1182 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1183 }
1184 }
1185 static void
1186 mprsas_tm_timeout(void *data)
1187 {
1188 struct mpr_command *tm = data;
1189 struct mpr_softc *sc = tm->cm_sc;
1190
1191 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1192
1193 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1194 "out\n", tm);
1195
1196 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1197 ("command not inqueue\n"));
1198
1199 tm->cm_state = MPR_CM_STATE_BUSY;
1200 mpr_reinit(sc);
1201 }
1202
1203 static void
1204 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1205 {
1206 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1207 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1208 unsigned int cm_count = 0;
1209 struct mpr_command *cm;
1210 struct mprsas_target *targ;
1211
1212 callout_stop(&tm->cm_callout);
1213
1214 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1215 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1216 targ = tm->cm_targ;
1217
1218 /*
1219 * Currently there should be no way we can hit this case. It only
1220 * happens when we have a failure to allocate chain frames, and
1221 * task management commands don't have S/G lists.
1222 */
1223 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1224 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1225 "%s: cm_flags = %#x for LUN reset! "
1226 "This should not happen!\n", __func__, tm->cm_flags);
1227 mprsas_free_tm(sc, tm);
1228 return;
1229 }
1230
1231 if (reply == NULL) {
1232 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1233 tm);
1234 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1235 /* this completion was due to a reset, just cleanup */
1236 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1237 "reset, ignoring NULL LUN reset reply\n");
1238 targ->tm = NULL;
1239 mprsas_free_tm(sc, tm);
1240 }
1241 else {
1242 /* we should have gotten a reply. */
1243 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1244 "LUN reset attempt, resetting controller\n");
1245 mpr_reinit(sc);
1246 }
1247 return;
1248 }
1249
1250 mpr_dprint(sc, MPR_RECOVERY,
1251 "logical unit reset status 0x%x code 0x%x count %u\n",
1252 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1253 le32toh(reply->TerminationCount));
1254
1255 /*
1256 * See if there are any outstanding commands for this LUN.
1257 * This could be made more efficient by using a per-LU data
1258 * structure of some sort.
1259 */
1260 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1261 if (cm->cm_lun == tm->cm_lun)
1262 cm_count++;
1263 }
1264
1265 if (cm_count == 0) {
1266 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1267 "Finished recovery after LUN reset for target %u\n",
1268 targ->tid);
1269
1270 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1271 tm->cm_lun);
1272
1273 /*
1274 * We've finished recovery for this logical unit. check and
1275 * see if some other logical unit has a timedout command
1276 * that needs to be processed.
1277 */
1278 cm = TAILQ_FIRST(&targ->timedout_commands);
1279 if (cm) {
1280 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1281 "More commands to abort for target %u\n", targ->tid);
1282 mprsas_send_abort(sc, tm, cm);
1283 } else {
1284 targ->tm = NULL;
1285 mprsas_free_tm(sc, tm);
1286 }
1287 } else {
1288 /* if we still have commands for this LUN, the reset
1289 * effectively failed, regardless of the status reported.
1290 * Escalate to a target reset.
1291 */
1292 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1293 "logical unit reset complete for target %u, but still "
1294 "have %u command(s), sending target reset\n", targ->tid,
1295 cm_count);
1296 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1297 mprsas_send_reset(sc, tm,
1298 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1299 else
1300 mpr_reinit(sc);
1301 }
1302 }
1303
1304 static void
1305 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1306 {
1307 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1308 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1309 struct mprsas_target *targ;
1310
1311 callout_stop(&tm->cm_callout);
1312
1313 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1314 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1315 targ = tm->cm_targ;
1316
1317 /*
1318 * Currently there should be no way we can hit this case. It only
1319 * happens when we have a failure to allocate chain frames, and
1320 * task management commands don't have S/G lists.
1321 */
1322 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1323 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1324 "reset! This should not happen!\n", __func__, tm->cm_flags);
1325 mprsas_free_tm(sc, tm);
1326 return;
1327 }
1328
1329 if (reply == NULL) {
1330 mpr_dprint(sc, MPR_RECOVERY,
1331 "NULL target reset reply for tm %p TaskMID %u\n",
1332 tm, le16toh(req->TaskMID));
1333 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1334 /* this completion was due to a reset, just cleanup */
1335 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1336 "reset, ignoring NULL target reset reply\n");
1337 targ->tm = NULL;
1338 mprsas_free_tm(sc, tm);
1339 }
1340 else {
1341 /* we should have gotten a reply. */
1342 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1343 "target reset attempt, resetting controller\n");
1344 mpr_reinit(sc);
1345 }
1346 return;
1347 }
1348
1349 mpr_dprint(sc, MPR_RECOVERY,
1350 "target reset status 0x%x code 0x%x count %u\n",
1351 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1352 le32toh(reply->TerminationCount));
1353
1354 if (targ->outstanding == 0) {
1355 /*
1356 * We've finished recovery for this target and all
1357 * of its logical units.
1358 */
1359 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1360 "Finished reset recovery for target %u\n", targ->tid);
1361
1362 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1363 CAM_LUN_WILDCARD);
1364
1365 targ->tm = NULL;
1366 mprsas_free_tm(sc, tm);
1367 } else {
1368 /*
1369 * After a target reset, if this target still has
1370 * outstanding commands, the reset effectively failed,
1371 * regardless of the status reported. escalate.
1372 */
1373 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1374 "Target reset complete for target %u, but still have %u "
1375 "command(s), resetting controller\n", targ->tid,
1376 targ->outstanding);
1377 mpr_reinit(sc);
1378 }
1379 }
1380
1381 #define MPR_RESET_TIMEOUT 30
1382
1383 int
1384 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1385 {
1386 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1387 struct mprsas_target *target;
1388 int err, timeout;
1389
1390 target = tm->cm_targ;
1391 if (target->handle == 0) {
1392 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1393 "%d\n", __func__, target->tid);
1394 return -1;
1395 }
1396
1397 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398 req->DevHandle = htole16(target->handle);
1399 req->TaskType = type;
1400
1401 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1402 timeout = MPR_RESET_TIMEOUT;
1403 /*
1404 * Target reset method =
1405 * SAS Hard Link Reset / SATA Link Reset
1406 */
1407 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1408 } else {
1409 timeout = (target->controller_reset_timeout) ? (
1410 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1411 /* PCIe Protocol Level Reset*/
1412 req->MsgFlags =
1413 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1414 }
1415
1416 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1417 /* XXX Need to handle invalid LUNs */
1418 MPR_SET_LUN(req->LUN, tm->cm_lun);
1419 tm->cm_targ->logical_unit_resets++;
1420 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1421 "Sending logical unit reset to target %u lun %d\n",
1422 target->tid, tm->cm_lun);
1423 tm->cm_complete = mprsas_logical_unit_reset_complete;
1424 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1425 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1426 tm->cm_targ->target_resets++;
1427 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1428 "Sending target reset to target %u\n", target->tid);
1429 tm->cm_complete = mprsas_target_reset_complete;
1430 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1431 }
1432 else {
1433 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1434 return -1;
1435 }
1436
1437 if (target->encl_level_valid) {
1438 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1439 "At enclosure level %d, slot %d, connector name (%4s)\n",
1440 target->encl_level, target->encl_slot,
1441 target->connector_name);
1442 }
1443
1444 tm->cm_data = NULL;
1445 tm->cm_complete_data = (void *)tm;
1446
1447 callout_reset(&tm->cm_callout, timeout * hz,
1448 mprsas_tm_timeout, tm);
1449
1450 err = mpr_map_command(sc, tm);
1451 if (err)
1452 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1453 "error %d sending reset type %u\n", err, type);
1454
1455 return err;
1456 }
1457
1458 static void
1459 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1460 {
1461 struct mpr_command *cm;
1462 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1463 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1464 struct mprsas_target *targ;
1465
1466 callout_stop(&tm->cm_callout);
1467
1468 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1469 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1470 targ = tm->cm_targ;
1471
1472 /*
1473 * Currently there should be no way we can hit this case. It only
1474 * happens when we have a failure to allocate chain frames, and
1475 * task management commands don't have S/G lists.
1476 */
1477 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1478 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1479 "cm_flags = %#x for abort %p TaskMID %u!\n",
1480 tm->cm_flags, tm, le16toh(req->TaskMID));
1481 mprsas_free_tm(sc, tm);
1482 return;
1483 }
1484
1485 if (reply == NULL) {
1486 mpr_dprint(sc, MPR_RECOVERY,
1487 "NULL abort reply for tm %p TaskMID %u\n",
1488 tm, le16toh(req->TaskMID));
1489 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1490 /* this completion was due to a reset, just cleanup */
1491 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1492 "reset, ignoring NULL abort reply\n");
1493 targ->tm = NULL;
1494 mprsas_free_tm(sc, tm);
1495 } else {
1496 /* we should have gotten a reply. */
1497 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1498 "abort attempt, resetting controller\n");
1499 mpr_reinit(sc);
1500 }
1501 return;
1502 }
1503
1504 mpr_dprint(sc, MPR_RECOVERY,
1505 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1506 le16toh(req->TaskMID),
1507 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1508 le32toh(reply->TerminationCount));
1509
1510 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1511 if (cm == NULL) {
1512 /*
1513 * if there are no more timedout commands, we're done with
1514 * error recovery for this target.
1515 */
1516 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1517 "Finished abort recovery for target %u\n", targ->tid);
1518 targ->tm = NULL;
1519 mprsas_free_tm(sc, tm);
1520 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1521 /* abort success, but we have more timedout commands to abort */
1522 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1523 "Continuing abort recovery for target %u\n", targ->tid);
1524 mprsas_send_abort(sc, tm, cm);
1525 } else {
1526 /*
1527 * we didn't get a command completion, so the abort
1528 * failed as far as we're concerned. escalate.
1529 */
1530 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1531 "Abort failed for target %u, sending logical unit reset\n",
1532 targ->tid);
1533
1534 mprsas_send_reset(sc, tm,
1535 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1536 }
1537 }
1538
1539 #define MPR_ABORT_TIMEOUT 5
1540
1541 static int
1542 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1543 struct mpr_command *cm)
1544 {
1545 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1546 struct mprsas_target *targ;
1547 int err, timeout;
1548
1549 targ = cm->cm_targ;
1550 if (targ->handle == 0) {
1551 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1552 "%s null devhandle for target_id %d\n",
1553 __func__, cm->cm_ccb->ccb_h.target_id);
1554 return -1;
1555 }
1556
1557 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1558 "Aborting command %p\n", cm);
1559
1560 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1561 req->DevHandle = htole16(targ->handle);
1562 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1563
1564 /* XXX Need to handle invalid LUNs */
1565 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1566
1567 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1568
1569 tm->cm_data = NULL;
1570 tm->cm_complete = mprsas_abort_complete;
1571 tm->cm_complete_data = (void *)tm;
1572 tm->cm_targ = cm->cm_targ;
1573 tm->cm_lun = cm->cm_lun;
1574
1575 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1576 timeout = MPR_ABORT_TIMEOUT;
1577 else
1578 timeout = sc->nvme_abort_timeout;
1579
1580 callout_reset(&tm->cm_callout, timeout * hz,
1581 mprsas_tm_timeout, tm);
1582
1583 targ->aborts++;
1584
1585 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1586
1587 err = mpr_map_command(sc, tm);
1588 if (err)
1589 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1590 "error %d sending abort for cm %p SMID %u\n",
1591 err, cm, req->TaskMID);
1592 return err;
1593 }
1594
1595 static void
1596 mprsas_scsiio_timeout(void *data)
1597 {
1598 sbintime_t elapsed, now;
1599 union ccb *ccb;
1600 struct mpr_softc *sc;
1601 struct mpr_command *cm;
1602 struct mprsas_target *targ;
1603
1604 cm = (struct mpr_command *)data;
1605 sc = cm->cm_sc;
1606 ccb = cm->cm_ccb;
1607 now = sbinuptime();
1608
1609 MPR_FUNCTRACE(sc);
1610 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1611
1612 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1613
1614 /*
1615 * Run the interrupt handler to make sure it's not pending. This
1616 * isn't perfect because the command could have already completed
1617 * and been re-used, though this is unlikely.
1618 */
1619 mpr_intr_locked(sc);
1620 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1621 mprsas_log_command(cm, MPR_XINFO,
1622 "SCSI command %p almost timed out\n", cm);
1623 return;
1624 }
1625
1626 if (cm->cm_ccb == NULL) {
1627 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1628 return;
1629 }
1630
1631 targ = cm->cm_targ;
1632 targ->timeouts++;
1633
1634 elapsed = now - ccb->ccb_h.qos.sim_data;
1635 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1636 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1637 targ->tid, targ->handle, ccb->ccb_h.timeout,
1638 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1639 if (targ->encl_level_valid) {
1640 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1641 "At enclosure level %d, slot %d, connector name (%4s)\n",
1642 targ->encl_level, targ->encl_slot, targ->connector_name);
1643 }
1644
1645 /* XXX first, check the firmware state, to see if it's still
1646 * operational. if not, do a diag reset.
1647 */
1648 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1649 cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1650 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1651
1652 if (targ->tm != NULL) {
1653 /* target already in recovery, just queue up another
1654 * timedout command to be processed later.
1655 */
1656 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1657 "processing by tm %p\n", cm, targ->tm);
1658 }
1659 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1660 /* start recovery by aborting the first timedout command */
1661 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1662 "Sending abort to target %u for SMID %d\n", targ->tid,
1663 cm->cm_desc.Default.SMID);
1664 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1665 cm, targ->tm);
1666 mprsas_send_abort(sc, targ->tm, cm);
1667 }
1668 else {
1669 /* XXX queue this target up for recovery once a TM becomes
1670 * available. The firmware only has a limited number of
1671 * HighPriority credits for the high priority requests used
1672 * for task management, and we ran out.
1673 *
1674 * Isilon: don't worry about this for now, since we have
1675 * more credits than disks in an enclosure, and limit
1676 * ourselves to one TM per target for recovery.
1677 */
1678 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1679 "timedout cm %p failed to allocate a tm\n", cm);
1680 }
1681 }
1682
1683 /**
1684 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1685 * to SCSI Unmap.
1686 * Return 0 - for success,
1687 * 1 - to immediately return back the command with success status to CAM
1688 * negative value - to fallback to firmware path i.e. issue scsi unmap
1689 * to FW without any translation.
1690 */
1691 static int
1692 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1693 union ccb *ccb, struct mprsas_target *targ)
1694 {
1695 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1696 struct ccb_scsiio *csio;
1697 struct unmap_parm_list *plist;
1698 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1699 struct nvme_command *c;
1700 int i, res;
1701 uint16_t ndesc, list_len, data_length;
1702 struct mpr_prp_page *prp_page_info;
1703 uint64_t nvme_dsm_ranges_dma_handle;
1704
1705 csio = &ccb->csio;
1706 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1707 if (!list_len) {
1708 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1709 return -EINVAL;
1710 }
1711
1712 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1713 if (!plist) {
1714 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1715 "save UNMAP data\n");
1716 return -ENOMEM;
1717 }
1718
1719 /* Copy SCSI unmap data to a local buffer */
1720 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1721
1722 /* return back the unmap command to CAM with success status,
1723 * if number of descripts is zero.
1724 */
1725 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1726 if (!ndesc) {
1727 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1728 "UNMAP cmd is Zero\n");
1729 res = 1;
1730 goto out;
1731 }
1732
1733 data_length = ndesc * sizeof(struct nvme_dsm_range);
1734 if (data_length > targ->MDTS) {
1735 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1736 "Device's MDTS: %d\n", data_length, targ->MDTS);
1737 res = -EINVAL;
1738 goto out;
1739 }
1740
1741 prp_page_info = mpr_alloc_prp_page(sc);
1742 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1743 "UNMAP command.\n", __func__));
1744
1745 /*
1746 * Insert the allocated PRP page into the command's PRP page list. This
1747 * will be freed when the command is freed.
1748 */
1749 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1750
1751 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1752 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1753
1754 bzero(nvme_dsm_ranges, data_length);
1755
1756 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1757 * for each descriptors contained in SCSI UNMAP data.
1758 */
1759 for (i = 0; i < ndesc; i++) {
1760 nvme_dsm_ranges[i].length =
1761 htole32(be32toh(plist->desc[i].nlb));
1762 nvme_dsm_ranges[i].starting_lba =
1763 htole64(be64toh(plist->desc[i].slba));
1764 nvme_dsm_ranges[i].attributes = 0;
1765 }
1766
1767 /* Build MPI2.6's NVMe Encapsulated Request Message */
1768 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1769 bzero(req, sizeof(*req));
1770 req->DevHandle = htole16(targ->handle);
1771 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1772 req->Flags = MPI26_NVME_FLAGS_WRITE;
1773 req->ErrorResponseBaseAddress.High =
1774 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1775 req->ErrorResponseBaseAddress.Low =
1776 htole32(cm->cm_sense_busaddr);
1777 req->ErrorResponseAllocationLength =
1778 htole16(sizeof(struct nvme_completion));
1779 req->EncapsulatedCommandLength =
1780 htole16(sizeof(struct nvme_command));
1781 req->DataLength = htole32(data_length);
1782
1783 /* Build NVMe DSM command */
1784 c = (struct nvme_command *) req->NVMe_Command;
1785 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1786 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1787 c->cdw10 = htole32(ndesc - 1);
1788 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1789
1790 cm->cm_length = data_length;
1791 cm->cm_data = NULL;
1792
1793 cm->cm_complete = mprsas_scsiio_complete;
1794 cm->cm_complete_data = ccb;
1795 cm->cm_targ = targ;
1796 cm->cm_lun = csio->ccb_h.target_lun;
1797 cm->cm_ccb = ccb;
1798
1799 cm->cm_desc.Default.RequestFlags =
1800 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1801
1802 csio->ccb_h.qos.sim_data = sbinuptime();
1803 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1804 mprsas_scsiio_timeout, cm, 0);
1805
1806 targ->issued++;
1807 targ->outstanding++;
1808 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1809 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1810
1811 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1812 __func__, cm, ccb, targ->outstanding);
1813
1814 mpr_build_nvme_prp(sc, cm, req,
1815 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1816 mpr_map_command(sc, cm);
1817
1818 out:
1819 free(plist, M_MPR);
1820 return 0;
1821 }
1822
1823 static void
1824 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1825 {
1826 MPI2_SCSI_IO_REQUEST *req;
1827 struct ccb_scsiio *csio;
1828 struct mpr_softc *sc;
1829 struct mprsas_target *targ;
1830 struct mprsas_lun *lun;
1831 struct mpr_command *cm;
1832 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1833 uint16_t eedp_flags;
1834 uint32_t mpi_control;
1835 int rc;
1836
1837 sc = sassc->sc;
1838 MPR_FUNCTRACE(sc);
1839 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1840
1841 csio = &ccb->csio;
1842 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1843 ("Target %d out of bounds in XPT_SCSI_IO\n",
1844 csio->ccb_h.target_id));
1845 targ = &sassc->targets[csio->ccb_h.target_id];
1846 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1847 if (targ->handle == 0x0) {
1848 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1849 __func__, csio->ccb_h.target_id);
1850 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1851 xpt_done(ccb);
1852 return;
1853 }
1854 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1855 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1856 "supported %u\n", __func__, csio->ccb_h.target_id);
1857 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1858 xpt_done(ccb);
1859 return;
1860 }
1861 /*
1862 * Sometimes, it is possible to get a command that is not "In
1863 * Progress" and was actually aborted by the upper layer. Check for
1864 * this here and complete the command without error.
1865 */
1866 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1867 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1868 "target %u\n", __func__, csio->ccb_h.target_id);
1869 xpt_done(ccb);
1870 return;
1871 }
1872 /*
1873 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1874 * that the volume has timed out. We want volumes to be enumerated
1875 * until they are deleted/removed, not just failed. In either event,
1876 * we're removing the target due to a firmware event telling us
1877 * the device is now gone (as opposed to some transient event). Since
1878 * we're opting to remove failed devices from the OS's view, we need
1879 * to propagate that status up the stack.
1880 */
1881 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1882 if (targ->devinfo == 0)
1883 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1884 else
1885 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1886 xpt_done(ccb);
1887 return;
1888 }
1889
1890 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1891 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1892 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1893 xpt_done(ccb);
1894 return;
1895 }
1896
1897 /*
1898 * If target has a reset in progress, freeze the devq and return. The
1899 * devq will be released when the TM reset is finished.
1900 */
1901 if (targ->flags & MPRSAS_TARGET_INRESET) {
1902 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1903 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1904 __func__, targ->tid);
1905 xpt_freeze_devq(ccb->ccb_h.path, 1);
1906 xpt_done(ccb);
1907 return;
1908 }
1909
1910 cm = mpr_alloc_command(sc);
1911 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1912 if (cm != NULL) {
1913 mpr_free_command(sc, cm);
1914 }
1915 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1916 xpt_freeze_simq(sassc->sim, 1);
1917 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1918 }
1919 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1920 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1921 xpt_done(ccb);
1922 return;
1923 }
1924
1925 /* For NVME device's issue UNMAP command directly to NVME drives by
1926 * constructing equivalent native NVMe DataSetManagement command.
1927 */
1928 scsi_opcode = scsiio_cdb_ptr(csio)[0];
1929 if (scsi_opcode == UNMAP &&
1930 targ->is_nvme &&
1931 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1932 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1933 if (rc == 1) { /* return command to CAM with success status */
1934 mpr_free_command(sc, cm);
1935 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1936 xpt_done(ccb);
1937 return;
1938 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
1939 return;
1940 }
1941
1942 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1943 bzero(req, sizeof(*req));
1944 req->DevHandle = htole16(targ->handle);
1945 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1946 req->MsgFlags = 0;
1947 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1948 req->SenseBufferLength = MPR_SENSE_LEN;
1949 req->SGLFlags = 0;
1950 req->ChainOffset = 0;
1951 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1952 req->SGLOffset1= 0;
1953 req->SGLOffset2= 0;
1954 req->SGLOffset3= 0;
1955 req->SkipCount = 0;
1956 req->DataLength = htole32(csio->dxfer_len);
1957 req->BidirectionalDataLength = 0;
1958 req->IoFlags = htole16(csio->cdb_len);
1959 req->EEDPFlags = 0;
1960
1961 /* Note: BiDirectional transfers are not supported */
1962 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1963 case CAM_DIR_IN:
1964 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1965 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1966 break;
1967 case CAM_DIR_OUT:
1968 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1969 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1970 break;
1971 case CAM_DIR_NONE:
1972 default:
1973 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1974 break;
1975 }
1976
1977 if (csio->cdb_len == 32)
1978 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1979 /*
1980 * It looks like the hardware doesn't require an explicit tag
1981 * number for each transaction. SAM Task Management not supported
1982 * at the moment.
1983 */
1984 switch (csio->tag_action) {
1985 case MSG_HEAD_OF_Q_TAG:
1986 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1987 break;
1988 case MSG_ORDERED_Q_TAG:
1989 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1990 break;
1991 case MSG_ACA_TASK:
1992 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1993 break;
1994 case CAM_TAG_ACTION_NONE:
1995 case MSG_SIMPLE_Q_TAG:
1996 default:
1997 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1998 break;
1999 }
2000 mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2001 MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2002 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2003 req->Control = htole32(mpi_control);
2004
2005 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2006 mpr_free_command(sc, cm);
2007 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2008 xpt_done(ccb);
2009 return;
2010 }
2011
2012 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2013 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2014 else {
2015 KASSERT(csio->cdb_len <= IOCDBLEN,
2016 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2017 "is not set", csio->cdb_len));
2018 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2019 }
2020 req->IoFlags = htole16(csio->cdb_len);
2021
2022 /*
2023 * Check if EEDP is supported and enabled. If it is then check if the
2024 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2025 * is formatted for EEDP support. If all of this is true, set CDB up
2026 * for EEDP transfer.
2027 */
2028 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2029 if (sc->eedp_enabled && eedp_flags) {
2030 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2031 if (lun->lun_id == csio->ccb_h.target_lun) {
2032 break;
2033 }
2034 }
2035
2036 if ((lun != NULL) && (lun->eedp_formatted)) {
2037 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2038 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2039 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2040 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2041 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2042 eedp_flags |=
2043 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2044 }
2045 req->EEDPFlags = htole16(eedp_flags);
2046
2047 /*
2048 * If CDB less than 32, fill in Primary Ref Tag with
2049 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2050 * already there. Also, set protection bit. FreeBSD
2051 * currently does not support CDBs bigger than 16, but
2052 * the code doesn't hurt, and will be here for the
2053 * future.
2054 */
2055 if (csio->cdb_len != 32) {
2056 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2057 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2058 PrimaryReferenceTag;
2059 for (i = 0; i < 4; i++) {
2060 *ref_tag_addr =
2061 req->CDB.CDB32[lba_byte + i];
2062 ref_tag_addr++;
2063 }
2064 req->CDB.EEDP32.PrimaryReferenceTag =
2065 htole32(req->
2066 CDB.EEDP32.PrimaryReferenceTag);
2067 req->CDB.EEDP32.PrimaryApplicationTagMask =
2068 0xFFFF;
2069 req->CDB.CDB32[1] =
2070 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2071 } else {
2072 eedp_flags |=
2073 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2074 req->EEDPFlags = htole16(eedp_flags);
2075 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2076 0x1F) | 0x20;
2077 }
2078 }
2079 }
2080
2081 cm->cm_length = csio->dxfer_len;
2082 if (cm->cm_length != 0) {
2083 cm->cm_data = ccb;
2084 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2085 } else {
2086 cm->cm_data = NULL;
2087 }
2088 cm->cm_sge = &req->SGL;
2089 cm->cm_sglsize = (32 - 24) * 4;
2090 cm->cm_complete = mprsas_scsiio_complete;
2091 cm->cm_complete_data = ccb;
2092 cm->cm_targ = targ;
2093 cm->cm_lun = csio->ccb_h.target_lun;
2094 cm->cm_ccb = ccb;
2095 /*
2096 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2097 * and set descriptor type.
2098 */
2099 if (targ->scsi_req_desc_type ==
2100 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2101 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2102 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2103 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2104 if (!sc->atomic_desc_capable) {
2105 cm->cm_desc.FastPathSCSIIO.DevHandle =
2106 htole16(targ->handle);
2107 }
2108 } else {
2109 cm->cm_desc.SCSIIO.RequestFlags =
2110 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2111 if (!sc->atomic_desc_capable)
2112 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2113 }
2114
2115 csio->ccb_h.qos.sim_data = sbinuptime();
2116 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2117 mprsas_scsiio_timeout, cm, 0);
2118
2119 targ->issued++;
2120 targ->outstanding++;
2121 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2122 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2123
2124 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2125 __func__, cm, ccb, targ->outstanding);
2126
2127 mpr_map_command(sc, cm);
2128 return;
2129 }
2130
2131 /**
2132 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2133 */
2134 static void
2135 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2136 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2137 {
2138 u32 response_info;
2139 u8 *response_bytes;
2140 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2141 MPI2_IOCSTATUS_MASK;
2142 u8 scsi_state = mpi_reply->SCSIState;
2143 u8 scsi_status = mpi_reply->SCSIStatus;
2144 char *desc_ioc_state = NULL;
2145 char *desc_scsi_status = NULL;
2146 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2147
2148 if (log_info == 0x31170000)
2149 return;
2150
2151 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2152 ioc_status);
2153 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2154 scsi_status);
2155
2156 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2157 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2158 if (targ->encl_level_valid) {
2159 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2160 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2161 targ->connector_name);
2162 }
2163
2164 /*
2165 * We can add more detail about underflow data here
2166 * TO-DO
2167 */
2168 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2169 "scsi_state %b\n", desc_scsi_status, scsi_status,
2170 scsi_state, "\2" "\1AutosenseValid" "\2AutosenseFailed"
2171 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2172
2173 if (sc->mpr_debug & MPR_XINFO &&
2174 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2175 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2176 scsi_sense_print(csio);
2177 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2178 }
2179
2180 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2181 response_info = le32toh(mpi_reply->ResponseInfo);
2182 response_bytes = (u8 *)&response_info;
2183 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2184 response_bytes[0],
2185 mpr_describe_table(mpr_scsi_taskmgmt_string,
2186 response_bytes[0]));
2187 }
2188 }
2189
2190 /** mprsas_nvme_trans_status_code
2191 *
2192 * Convert Native NVMe command error status to
2193 * equivalent SCSI error status.
2194 *
2195 * Returns appropriate scsi_status
2196 */
2197 static u8
2198 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2199 struct mpr_command *cm)
2200 {
2201 u8 status = MPI2_SCSI_STATUS_GOOD;
2202 int skey, asc, ascq;
2203 union ccb *ccb = cm->cm_complete_data;
2204 int returned_sense_len;
2205 uint8_t sct, sc;
2206
2207 sct = NVME_STATUS_GET_SCT(nvme_status);
2208 sc = NVME_STATUS_GET_SC(nvme_status);
2209
2210 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2211 skey = SSD_KEY_ILLEGAL_REQUEST;
2212 asc = SCSI_ASC_NO_SENSE;
2213 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2214
2215 switch (sct) {
2216 case NVME_SCT_GENERIC:
2217 switch (sc) {
2218 case NVME_SC_SUCCESS:
2219 status = MPI2_SCSI_STATUS_GOOD;
2220 skey = SSD_KEY_NO_SENSE;
2221 asc = SCSI_ASC_NO_SENSE;
2222 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2223 break;
2224 case NVME_SC_INVALID_OPCODE:
2225 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2226 skey = SSD_KEY_ILLEGAL_REQUEST;
2227 asc = SCSI_ASC_ILLEGAL_COMMAND;
2228 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2229 break;
2230 case NVME_SC_INVALID_FIELD:
2231 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2232 skey = SSD_KEY_ILLEGAL_REQUEST;
2233 asc = SCSI_ASC_INVALID_CDB;
2234 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2235 break;
2236 case NVME_SC_DATA_TRANSFER_ERROR:
2237 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2238 skey = SSD_KEY_MEDIUM_ERROR;
2239 asc = SCSI_ASC_NO_SENSE;
2240 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2241 break;
2242 case NVME_SC_ABORTED_POWER_LOSS:
2243 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2244 skey = SSD_KEY_ABORTED_COMMAND;
2245 asc = SCSI_ASC_WARNING;
2246 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2247 break;
2248 case NVME_SC_INTERNAL_DEVICE_ERROR:
2249 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2250 skey = SSD_KEY_HARDWARE_ERROR;
2251 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2252 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2253 break;
2254 case NVME_SC_ABORTED_BY_REQUEST:
2255 case NVME_SC_ABORTED_SQ_DELETION:
2256 case NVME_SC_ABORTED_FAILED_FUSED:
2257 case NVME_SC_ABORTED_MISSING_FUSED:
2258 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2259 skey = SSD_KEY_ABORTED_COMMAND;
2260 asc = SCSI_ASC_NO_SENSE;
2261 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2262 break;
2263 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2264 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2265 skey = SSD_KEY_ILLEGAL_REQUEST;
2266 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2267 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2268 break;
2269 case NVME_SC_LBA_OUT_OF_RANGE:
2270 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2271 skey = SSD_KEY_ILLEGAL_REQUEST;
2272 asc = SCSI_ASC_ILLEGAL_BLOCK;
2273 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2274 break;
2275 case NVME_SC_CAPACITY_EXCEEDED:
2276 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2277 skey = SSD_KEY_MEDIUM_ERROR;
2278 asc = SCSI_ASC_NO_SENSE;
2279 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2280 break;
2281 case NVME_SC_NAMESPACE_NOT_READY:
2282 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2283 skey = SSD_KEY_NOT_READY;
2284 asc = SCSI_ASC_LUN_NOT_READY;
2285 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2286 break;
2287 }
2288 break;
2289 case NVME_SCT_COMMAND_SPECIFIC:
2290 switch (sc) {
2291 case NVME_SC_INVALID_FORMAT:
2292 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2293 skey = SSD_KEY_ILLEGAL_REQUEST;
2294 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2295 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2296 break;
2297 case NVME_SC_CONFLICTING_ATTRIBUTES:
2298 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2299 skey = SSD_KEY_ILLEGAL_REQUEST;
2300 asc = SCSI_ASC_INVALID_CDB;
2301 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2302 break;
2303 }
2304 break;
2305 case NVME_SCT_MEDIA_ERROR:
2306 switch (sc) {
2307 case NVME_SC_WRITE_FAULTS:
2308 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2309 skey = SSD_KEY_MEDIUM_ERROR;
2310 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2311 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 break;
2313 case NVME_SC_UNRECOVERED_READ_ERROR:
2314 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2315 skey = SSD_KEY_MEDIUM_ERROR;
2316 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2317 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2318 break;
2319 case NVME_SC_GUARD_CHECK_ERROR:
2320 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2321 skey = SSD_KEY_MEDIUM_ERROR;
2322 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2323 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2324 break;
2325 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2326 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2327 skey = SSD_KEY_MEDIUM_ERROR;
2328 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2329 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2330 break;
2331 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2332 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2333 skey = SSD_KEY_MEDIUM_ERROR;
2334 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2335 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2336 break;
2337 case NVME_SC_COMPARE_FAILURE:
2338 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339 skey = SSD_KEY_MISCOMPARE;
2340 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2341 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2342 break;
2343 case NVME_SC_ACCESS_DENIED:
2344 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2345 skey = SSD_KEY_ILLEGAL_REQUEST;
2346 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2347 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2348 break;
2349 }
2350 break;
2351 }
2352
2353 returned_sense_len = sizeof(struct scsi_sense_data);
2354 if (returned_sense_len < ccb->csio.sense_len)
2355 ccb->csio.sense_resid = ccb->csio.sense_len -
2356 returned_sense_len;
2357 else
2358 ccb->csio.sense_resid = 0;
2359
2360 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2361 1, skey, asc, ascq, SSD_ELEM_NONE);
2362 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2363
2364 return status;
2365 }
2366
2367 /** mprsas_complete_nvme_unmap
2368 *
2369 * Complete native NVMe command issued using NVMe Encapsulated
2370 * Request Message.
2371 */
2372 static u8
2373 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2374 {
2375 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2376 struct nvme_completion *nvme_completion = NULL;
2377 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2378
2379 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2380 if (le16toh(mpi_reply->ErrorResponseCount)){
2381 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2382 scsi_status = mprsas_nvme_trans_status_code(
2383 nvme_completion->status, cm);
2384 }
2385 return scsi_status;
2386 }
2387
2388 static void
2389 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2390 {
2391 MPI2_SCSI_IO_REPLY *rep;
2392 union ccb *ccb;
2393 struct ccb_scsiio *csio;
2394 struct mprsas_softc *sassc;
2395 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2396 u8 *TLR_bits, TLR_on, *scsi_cdb;
2397 int dir = 0, i;
2398 u16 alloc_len;
2399 struct mprsas_target *target;
2400 target_id_t target_id;
2401
2402 MPR_FUNCTRACE(sc);
2403 mpr_dprint(sc, MPR_TRACE,
2404 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2405 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2406 cm->cm_targ->outstanding);
2407
2408 callout_stop(&cm->cm_callout);
2409 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2410
2411 sassc = sc->sassc;
2412 ccb = cm->cm_complete_data;
2413 csio = &ccb->csio;
2414 target_id = csio->ccb_h.target_id;
2415 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2416 /*
2417 * XXX KDM if the chain allocation fails, does it matter if we do
2418 * the sync and unload here? It is simpler to do it in every case,
2419 * assuming it doesn't cause problems.
2420 */
2421 if (cm->cm_data != NULL) {
2422 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2423 dir = BUS_DMASYNC_POSTREAD;
2424 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2425 dir = BUS_DMASYNC_POSTWRITE;
2426 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2427 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2428 }
2429
2430 cm->cm_targ->completed++;
2431 cm->cm_targ->outstanding--;
2432 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2433 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2434
2435 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2436 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2437 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2438 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2439 cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2440 if (cm->cm_reply != NULL)
2441 mprsas_log_command(cm, MPR_RECOVERY,
2442 "completed timedout cm %p ccb %p during recovery "
2443 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2444 le16toh(rep->IOCStatus), rep->SCSIStatus,
2445 rep->SCSIState, le32toh(rep->TransferCount));
2446 else
2447 mprsas_log_command(cm, MPR_RECOVERY,
2448 "completed timedout cm %p ccb %p during recovery\n",
2449 cm, cm->cm_ccb);
2450 } else if (cm->cm_targ->tm != NULL) {
2451 if (cm->cm_reply != NULL)
2452 mprsas_log_command(cm, MPR_RECOVERY,
2453 "completed cm %p ccb %p during recovery "
2454 "ioc %x scsi %x state %x xfer %u\n",
2455 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2456 rep->SCSIStatus, rep->SCSIState,
2457 le32toh(rep->TransferCount));
2458 else
2459 mprsas_log_command(cm, MPR_RECOVERY,
2460 "completed cm %p ccb %p during recovery\n",
2461 cm, cm->cm_ccb);
2462 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2463 mprsas_log_command(cm, MPR_RECOVERY,
2464 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2465 }
2466
2467 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2468 /*
2469 * We ran into an error after we tried to map the command,
2470 * so we're getting a callback without queueing the command
2471 * to the hardware. So we set the status here, and it will
2472 * be retained below. We'll go through the "fast path",
2473 * because there can be no reply when we haven't actually
2474 * gone out to the hardware.
2475 */
2476 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2477
2478 /*
2479 * Currently the only error included in the mask is
2480 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2481 * chain frames. We need to freeze the queue until we get
2482 * a command that completed without this error, which will
2483 * hopefully have some chain frames attached that we can
2484 * use. If we wanted to get smarter about it, we would
2485 * only unfreeze the queue in this condition when we're
2486 * sure that we're getting some chain frames back. That's
2487 * probably unnecessary.
2488 */
2489 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2490 xpt_freeze_simq(sassc->sim, 1);
2491 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2492 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2493 "freezing SIM queue\n");
2494 }
2495 }
2496
2497 /*
2498 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2499 * flag, and use it in a few places in the rest of this function for
2500 * convenience. Use the macro if available.
2501 */
2502 scsi_cdb = scsiio_cdb_ptr(csio);
2503
2504 /*
2505 * If this is a Start Stop Unit command and it was issued by the driver
2506 * during shutdown, decrement the refcount to account for all of the
2507 * commands that were sent. All SSU commands should be completed before
2508 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2509 * is TRUE.
2510 */
2511 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2512 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2513 sc->SSU_refcount--;
2514 }
2515
2516 /* Take the fast path to completion */
2517 if (cm->cm_reply == NULL) {
2518 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2519 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2520 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2521 else {
2522 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2523 csio->scsi_status = SCSI_STATUS_OK;
2524 }
2525 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2526 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2527 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2528 mpr_dprint(sc, MPR_XINFO,
2529 "Unfreezing SIM queue\n");
2530 }
2531 }
2532
2533 /*
2534 * There are two scenarios where the status won't be
2535 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2536 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2537 */
2538 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2539 /*
2540 * Freeze the dev queue so that commands are
2541 * executed in the correct order after error
2542 * recovery.
2543 */
2544 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2545 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2546 }
2547 mpr_free_command(sc, cm);
2548 xpt_done(ccb);
2549 return;
2550 }
2551
2552 target = &sassc->targets[target_id];
2553 if (scsi_cdb[0] == UNMAP &&
2554 target->is_nvme &&
2555 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2556 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2557 csio->scsi_status = rep->SCSIStatus;
2558 }
2559
2560 mprsas_log_command(cm, MPR_XINFO,
2561 "ioc %x scsi %x state %x xfer %u\n",
2562 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2563 le32toh(rep->TransferCount));
2564
2565 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2566 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2567 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2568 /* FALLTHROUGH */
2569 case MPI2_IOCSTATUS_SUCCESS:
2570 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2571 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2572 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2573 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2574
2575 /* Completion failed at the transport level. */
2576 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2577 MPI2_SCSI_STATE_TERMINATED)) {
2578 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2579 break;
2580 }
2581
2582 /* In a modern packetized environment, an autosense failure
2583 * implies that there's not much else that can be done to
2584 * recover the command.
2585 */
2586 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2587 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2588 break;
2589 }
2590
2591 /*
2592 * CAM doesn't care about SAS Response Info data, but if this is
2593 * the state check if TLR should be done. If not, clear the
2594 * TLR_bits for the target.
2595 */
2596 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2597 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2598 == MPR_SCSI_RI_INVALID_FRAME)) {
2599 sc->mapping_table[target_id].TLR_bits =
2600 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2601 }
2602
2603 /*
2604 * Intentionally override the normal SCSI status reporting
2605 * for these two cases. These are likely to happen in a
2606 * multi-initiator environment, and we want to make sure that
2607 * CAM retries these commands rather than fail them.
2608 */
2609 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2610 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2611 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2612 break;
2613 }
2614
2615 /* Handle normal status and sense */
2616 csio->scsi_status = rep->SCSIStatus;
2617 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2618 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2619 else
2620 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2621
2622 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2623 int sense_len, returned_sense_len;
2624
2625 returned_sense_len = min(le32toh(rep->SenseCount),
2626 sizeof(struct scsi_sense_data));
2627 if (returned_sense_len < csio->sense_len)
2628 csio->sense_resid = csio->sense_len -
2629 returned_sense_len;
2630 else
2631 csio->sense_resid = 0;
2632
2633 sense_len = min(returned_sense_len,
2634 csio->sense_len - csio->sense_resid);
2635 bzero(&csio->sense_data, sizeof(csio->sense_data));
2636 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2637 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2638 }
2639
2640 /*
2641 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2642 * and it's page code 0 (Supported Page List), and there is
2643 * inquiry data, and this is for a sequential access device, and
2644 * the device is an SSP target, and TLR is supported by the
2645 * controller, turn the TLR_bits value ON if page 0x90 is
2646 * supported.
2647 */
2648 if ((scsi_cdb[0] == INQUIRY) &&
2649 (scsi_cdb[1] & SI_EVPD) &&
2650 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2651 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2652 (csio->data_ptr != NULL) &&
2653 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2654 (sc->control_TLR) &&
2655 (sc->mapping_table[target_id].device_info &
2656 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2657 vpd_list = (struct scsi_vpd_supported_page_list *)
2658 csio->data_ptr;
2659 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2660 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2661 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2662 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2663 alloc_len -= csio->resid;
2664 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2665 if (vpd_list->list[i] == 0x90) {
2666 *TLR_bits = TLR_on;
2667 break;
2668 }
2669 }
2670 }
2671
2672 /*
2673 * If this is a SATA direct-access end device, mark it so that
2674 * a SCSI StartStopUnit command will be sent to it when the
2675 * driver is being shutdown.
2676 */
2677 if ((scsi_cdb[0] == INQUIRY) &&
2678 (csio->data_ptr != NULL) &&
2679 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2680 (sc->mapping_table[target_id].device_info &
2681 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2682 ((sc->mapping_table[target_id].device_info &
2683 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2684 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2685 target = &sassc->targets[target_id];
2686 target->supports_SSU = TRUE;
2687 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2688 target_id);
2689 }
2690 break;
2691 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2692 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2693 /*
2694 * If devinfo is 0 this will be a volume. In that case don't
2695 * tell CAM that the volume is not there. We want volumes to
2696 * be enumerated until they are deleted/removed, not just
2697 * failed.
2698 */
2699 if (cm->cm_targ->devinfo == 0)
2700 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2701 else
2702 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2703 break;
2704 case MPI2_IOCSTATUS_INVALID_SGL:
2705 mpr_print_scsiio_cmd(sc, cm);
2706 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2707 break;
2708 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2709 /*
2710 * This is one of the responses that comes back when an I/O
2711 * has been aborted. If it is because of a timeout that we
2712 * initiated, just set the status to CAM_CMD_TIMEOUT.
2713 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2714 * command is the same (it gets retried, subject to the
2715 * retry counter), the only difference is what gets printed
2716 * on the console.
2717 */
2718 if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2719 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2720 else
2721 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2722 break;
2723 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2724 /* resid is ignored for this condition */
2725 csio->resid = 0;
2726 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2727 break;
2728 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2729 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2730 /*
2731 * These can sometimes be transient transport-related
2732 * errors, and sometimes persistent drive-related errors.
2733 * We used to retry these without decrementing the retry
2734 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2735 * we hit a persistent drive problem that returns one of
2736 * these error codes, we would retry indefinitely. So,
2737 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2738 * count and avoid infinite retries. We're taking the
2739 * potential risk of flagging false failures in the event
2740 * of a topology-related error (e.g. a SAS expander problem
2741 * causes a command addressed to a drive to fail), but
2742 * avoiding getting into an infinite retry loop. However,
2743 * if we get them while were moving a device, we should
2744 * fail the request as 'not there' because the device
2745 * is effectively gone.
2746 */
2747 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2748 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2749 else
2750 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2751 mpr_dprint(sc, MPR_INFO,
2752 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2753 mpr_describe_table(mpr_iocstatus_string,
2754 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2755 target_id, cm->cm_desc.Default.SMID,
2756 le32toh(rep->IOCLogInfo),
2757 (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2758 mpr_dprint(sc, MPR_XINFO,
2759 "SCSIStatus %x SCSIState %x xfercount %u\n",
2760 rep->SCSIStatus, rep->SCSIState,
2761 le32toh(rep->TransferCount));
2762 break;
2763 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2764 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2765 case MPI2_IOCSTATUS_INVALID_VPID:
2766 case MPI2_IOCSTATUS_INVALID_FIELD:
2767 case MPI2_IOCSTATUS_INVALID_STATE:
2768 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2769 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2770 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2771 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2772 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2773 default:
2774 mprsas_log_command(cm, MPR_XINFO,
2775 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2776 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2777 rep->SCSIStatus, rep->SCSIState,
2778 le32toh(rep->TransferCount));
2779 csio->resid = cm->cm_length;
2780
2781 if (scsi_cdb[0] == UNMAP &&
2782 target->is_nvme &&
2783 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2784 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2785 else
2786 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2787
2788 break;
2789 }
2790
2791 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2792
2793 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2794 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2795 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2796 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2797 "queue\n");
2798 }
2799
2800 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2801 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2802 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2803 }
2804
2805 /*
2806 * Check to see if we're removing the device. If so, and this is the
2807 * last command on the queue, proceed with the deferred removal of the
2808 * device. Note, for removing a volume, this won't trigger because
2809 * pending_remove_tm will be NULL.
2810 */
2811 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2812 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2813 cm->cm_targ->pending_remove_tm != NULL) {
2814 mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2815 mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2816 cm->cm_targ->pending_remove_tm = NULL;
2817 }
2818 }
2819
2820 mpr_free_command(sc, cm);
2821 xpt_done(ccb);
2822 }
2823
2824 static void
2825 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2826 {
2827 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2828 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2829 uint64_t sasaddr;
2830 union ccb *ccb;
2831
2832 ccb = cm->cm_complete_data;
2833
2834 /*
2835 * Currently there should be no way we can hit this case. It only
2836 * happens when we have a failure to allocate chain frames, and SMP
2837 * commands require two S/G elements only. That should be handled
2838 * in the standard request size.
2839 */
2840 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2841 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2842 "request!\n", __func__, cm->cm_flags);
2843 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2844 goto bailout;
2845 }
2846
2847 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2848 if (rpl == NULL) {
2849 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2850 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2851 goto bailout;
2852 }
2853
2854 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2855 sasaddr = le32toh(req->SASAddress.Low);
2856 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2857
2858 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2859 MPI2_IOCSTATUS_SUCCESS ||
2860 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2861 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2862 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2863 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2864 goto bailout;
2865 }
2866
2867 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2868 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2869
2870 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2871 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2872 else
2873 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2874
2875 bailout:
2876 /*
2877 * We sync in both directions because we had DMAs in the S/G list
2878 * in both directions.
2879 */
2880 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2881 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2882 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2883 mpr_free_command(sc, cm);
2884 xpt_done(ccb);
2885 }
2886
2887 static void
2888 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2889 {
2890 struct mpr_command *cm;
2891 uint8_t *request, *response;
2892 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2893 struct mpr_softc *sc;
2894 struct sglist *sg;
2895 int error;
2896
2897 sc = sassc->sc;
2898 sg = NULL;
2899 error = 0;
2900
2901 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2902 case CAM_DATA_PADDR:
2903 case CAM_DATA_SG_PADDR:
2904 /*
2905 * XXX We don't yet support physical addresses here.
2906 */
2907 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2908 "supported\n", __func__);
2909 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2910 xpt_done(ccb);
2911 return;
2912 case CAM_DATA_SG:
2913 /*
2914 * The chip does not support more than one buffer for the
2915 * request or response.
2916 */
2917 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2918 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2919 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2920 "response buffer segments not supported for SMP\n",
2921 __func__);
2922 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2923 xpt_done(ccb);
2924 return;
2925 }
2926
2927 /*
2928 * The CAM_SCATTER_VALID flag was originally implemented
2929 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2930 * We have two. So, just take that flag to mean that we
2931 * might have S/G lists, and look at the S/G segment count
2932 * to figure out whether that is the case for each individual
2933 * buffer.
2934 */
2935 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2936 bus_dma_segment_t *req_sg;
2937
2938 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2939 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2940 } else
2941 request = ccb->smpio.smp_request;
2942
2943 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2944 bus_dma_segment_t *rsp_sg;
2945
2946 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2947 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2948 } else
2949 response = ccb->smpio.smp_response;
2950 break;
2951 case CAM_DATA_VADDR:
2952 request = ccb->smpio.smp_request;
2953 response = ccb->smpio.smp_response;
2954 break;
2955 default:
2956 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2957 xpt_done(ccb);
2958 return;
2959 }
2960
2961 cm = mpr_alloc_command(sc);
2962 if (cm == NULL) {
2963 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2964 __func__);
2965 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2966 xpt_done(ccb);
2967 return;
2968 }
2969
2970 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2971 bzero(req, sizeof(*req));
2972 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2973
2974 /* Allow the chip to use any route to this SAS address. */
2975 req->PhysicalPort = 0xff;
2976
2977 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2978 req->SGLFlags =
2979 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2980
2981 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2982 "%#jx\n", __func__, (uintmax_t)sasaddr);
2983
2984 mpr_init_sge(cm, req, &req->SGL);
2985
2986 /*
2987 * Set up a uio to pass into mpr_map_command(). This allows us to
2988 * do one map command, and one busdma call in there.
2989 */
2990 cm->cm_uio.uio_iov = cm->cm_iovec;
2991 cm->cm_uio.uio_iovcnt = 2;
2992 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2993
2994 /*
2995 * The read/write flag isn't used by busdma, but set it just in
2996 * case. This isn't exactly accurate, either, since we're going in
2997 * both directions.
2998 */
2999 cm->cm_uio.uio_rw = UIO_WRITE;
3000
3001 cm->cm_iovec[0].iov_base = request;
3002 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3003 cm->cm_iovec[1].iov_base = response;
3004 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3005
3006 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3007 cm->cm_iovec[1].iov_len;
3008
3009 /*
3010 * Trigger a warning message in mpr_data_cb() for the user if we
3011 * wind up exceeding two S/G segments. The chip expects one
3012 * segment for the request and another for the response.
3013 */
3014 cm->cm_max_segs = 2;
3015
3016 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3017 cm->cm_complete = mprsas_smpio_complete;
3018 cm->cm_complete_data = ccb;
3019
3020 /*
3021 * Tell the mapping code that we're using a uio, and that this is
3022 * an SMP passthrough request. There is a little special-case
3023 * logic there (in mpr_data_cb()) to handle the bidirectional
3024 * transfer.
3025 */
3026 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3027 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3028
3029 /* The chip data format is little endian. */
3030 req->SASAddress.High = htole32(sasaddr >> 32);
3031 req->SASAddress.Low = htole32(sasaddr);
3032
3033 /*
3034 * XXX Note that we don't have a timeout/abort mechanism here.
3035 * From the manual, it looks like task management requests only
3036 * work for SCSI IO and SATA passthrough requests. We may need to
3037 * have a mechanism to retry requests in the event of a chip reset
3038 * at least. Hopefully the chip will insure that any errors short
3039 * of that are relayed back to the driver.
3040 */
3041 error = mpr_map_command(sc, cm);
3042 if ((error != 0) && (error != EINPROGRESS)) {
3043 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3044 "mpr_map_command()\n", __func__, error);
3045 goto bailout_error;
3046 }
3047
3048 return;
3049
3050 bailout_error:
3051 mpr_free_command(sc, cm);
3052 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3053 xpt_done(ccb);
3054 return;
3055 }
3056
3057 static void
3058 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3059 {
3060 struct mpr_softc *sc;
3061 struct mprsas_target *targ;
3062 uint64_t sasaddr = 0;
3063
3064 sc = sassc->sc;
3065
3066 /*
3067 * Make sure the target exists.
3068 */
3069 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3070 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3071 targ = &sassc->targets[ccb->ccb_h.target_id];
3072 if (targ->handle == 0x0) {
3073 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3074 __func__, ccb->ccb_h.target_id);
3075 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3076 xpt_done(ccb);
3077 return;
3078 }
3079
3080 /*
3081 * If this device has an embedded SMP target, we'll talk to it
3082 * directly.
3083 * figure out what the expander's address is.
3084 */
3085 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3086 sasaddr = targ->sasaddr;
3087
3088 /*
3089 * If we don't have a SAS address for the expander yet, try
3090 * grabbing it from the page 0x83 information cached in the
3091 * transport layer for this target. LSI expanders report the
3092 * expander SAS address as the port-associated SAS address in
3093 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3094 * 0x83.
3095 *
3096 * XXX KDM disable this for now, but leave it commented out so that
3097 * it is obvious that this is another possible way to get the SAS
3098 * address.
3099 *
3100 * The parent handle method below is a little more reliable, and
3101 * the other benefit is that it works for devices other than SES
3102 * devices. So you can send a SMP request to a da(4) device and it
3103 * will get routed to the expander that device is attached to.
3104 * (Assuming the da(4) device doesn't contain an SMP target...)
3105 */
3106 #if 0
3107 if (sasaddr == 0)
3108 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3109 #endif
3110
3111 /*
3112 * If we still don't have a SAS address for the expander, look for
3113 * the parent device of this device, which is probably the expander.
3114 */
3115 if (sasaddr == 0) {
3116 #ifdef OLD_MPR_PROBE
3117 struct mprsas_target *parent_target;
3118 #endif
3119
3120 if (targ->parent_handle == 0x0) {
3121 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3122 "a valid parent handle!\n", __func__, targ->handle);
3123 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3124 goto bailout;
3125 }
3126 #ifdef OLD_MPR_PROBE
3127 parent_target = mprsas_find_target_by_handle(sassc, 0,
3128 targ->parent_handle);
3129
3130 if (parent_target == NULL) {
3131 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3132 "a valid parent target!\n", __func__, targ->handle);
3133 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3134 goto bailout;
3135 }
3136
3137 if ((parent_target->devinfo &
3138 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3139 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3140 "does not have an SMP target!\n", __func__,
3141 targ->handle, parent_target->handle);
3142 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3143 goto bailout;
3144 }
3145
3146 sasaddr = parent_target->sasaddr;
3147 #else /* OLD_MPR_PROBE */
3148 if ((targ->parent_devinfo &
3149 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3150 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3151 "does not have an SMP target!\n", __func__,
3152 targ->handle, targ->parent_handle);
3153 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3154 goto bailout;
3155 }
3156 if (targ->parent_sasaddr == 0x0) {
3157 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3158 "%d does not have a valid SAS address!\n", __func__,
3159 targ->handle, targ->parent_handle);
3160 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3161 goto bailout;
3162 }
3163
3164 sasaddr = targ->parent_sasaddr;
3165 #endif /* OLD_MPR_PROBE */
3166 }
3167
3168 if (sasaddr == 0) {
3169 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3170 "handle %d\n", __func__, targ->handle);
3171 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3172 goto bailout;
3173 }
3174 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3175
3176 return;
3177
3178 bailout:
3179 xpt_done(ccb);
3180
3181 }
3182
3183 static void
3184 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3185 {
3186 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3187 struct mpr_softc *sc;
3188 struct mpr_command *tm;
3189 struct mprsas_target *targ;
3190
3191 MPR_FUNCTRACE(sassc->sc);
3192 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3193
3194 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3195 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3196 sc = sassc->sc;
3197 tm = mprsas_alloc_tm(sc);
3198 if (tm == NULL) {
3199 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3200 "mprsas_action_resetdev\n");
3201 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3202 xpt_done(ccb);
3203 return;
3204 }
3205
3206 targ = &sassc->targets[ccb->ccb_h.target_id];
3207 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3208 req->DevHandle = htole16(targ->handle);
3209 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3210
3211 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3212 /* SAS Hard Link Reset / SATA Link Reset */
3213 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3214 } else {
3215 /* PCIe Protocol Level Reset*/
3216 req->MsgFlags =
3217 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3218 }
3219
3220 tm->cm_data = NULL;
3221 tm->cm_complete = mprsas_resetdev_complete;
3222 tm->cm_complete_data = ccb;
3223
3224 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3225 __func__, targ->tid);
3226 tm->cm_targ = targ;
3227
3228 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3229 mpr_map_command(sc, tm);
3230 }
3231
3232 static void
3233 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3234 {
3235 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3236 union ccb *ccb;
3237
3238 MPR_FUNCTRACE(sc);
3239 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3240
3241 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3242 ccb = tm->cm_complete_data;
3243
3244 /*
3245 * Currently there should be no way we can hit this case. It only
3246 * happens when we have a failure to allocate chain frames, and
3247 * task management commands don't have S/G lists.
3248 */
3249 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3250 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3251
3252 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3253
3254 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3255 "handle %#04x! This should not happen!\n", __func__,
3256 tm->cm_flags, req->DevHandle);
3257 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3258 goto bailout;
3259 }
3260
3261 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3262 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3263
3264 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3265 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3266 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3267 CAM_LUN_WILDCARD);
3268 }
3269 else
3270 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3271
3272 bailout:
3273
3274 mprsas_free_tm(sc, tm);
3275 xpt_done(ccb);
3276 }
3277
3278 static void
3279 mprsas_poll(struct cam_sim *sim)
3280 {
3281 struct mprsas_softc *sassc;
3282
3283 sassc = cam_sim_softc(sim);
3284
3285 if (sassc->sc->mpr_debug & MPR_TRACE) {
3286 /* frequent debug messages during a panic just slow
3287 * everything down too much.
3288 */
3289 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3290 __func__);
3291 sassc->sc->mpr_debug &= ~MPR_TRACE;
3292 }
3293
3294 mpr_intr_locked(sassc->sc);
3295 }
3296
3297 static void
3298 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3299 void *arg)
3300 {
3301 struct mpr_softc *sc;
3302
3303 sc = (struct mpr_softc *)callback_arg;
3304
3305 switch (code) {
3306 case AC_ADVINFO_CHANGED: {
3307 struct mprsas_target *target;
3308 struct mprsas_softc *sassc;
3309 struct scsi_read_capacity_data_long rcap_buf;
3310 struct ccb_dev_advinfo cdai;
3311 struct mprsas_lun *lun;
3312 lun_id_t lunid;
3313 int found_lun;
3314 uintptr_t buftype;
3315
3316 buftype = (uintptr_t)arg;
3317
3318 found_lun = 0;
3319 sassc = sc->sassc;
3320
3321 /*
3322 * We're only interested in read capacity data changes.
3323 */
3324 if (buftype != CDAI_TYPE_RCAPLONG)
3325 break;
3326
3327 /*
3328 * We should have a handle for this, but check to make sure.
3329 */
3330 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3331 ("Target %d out of bounds in mprsas_async\n",
3332 xpt_path_target_id(path)));
3333 target = &sassc->targets[xpt_path_target_id(path)];
3334 if (target->handle == 0)
3335 break;
3336
3337 lunid = xpt_path_lun_id(path);
3338
3339 SLIST_FOREACH(lun, &target->luns, lun_link) {
3340 if (lun->lun_id == lunid) {
3341 found_lun = 1;
3342 break;
3343 }
3344 }
3345
3346 if (found_lun == 0) {
3347 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3348 M_NOWAIT | M_ZERO);
3349 if (lun == NULL) {
3350 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3351 "LUN for EEDP support.\n");
3352 break;
3353 }
3354 lun->lun_id = lunid;
3355 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3356 }
3357
3358 bzero(&rcap_buf, sizeof(rcap_buf));
3359 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3360 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3361 cdai.ccb_h.flags = CAM_DIR_IN;
3362 cdai.buftype = CDAI_TYPE_RCAPLONG;
3363 cdai.flags = CDAI_FLAG_NONE;
3364 cdai.bufsiz = sizeof(rcap_buf);
3365 cdai.buf = (uint8_t *)&rcap_buf;
3366 xpt_action((union ccb *)&cdai);
3367 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3368 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3369
3370 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3371 && (rcap_buf.prot & SRC16_PROT_EN)) {
3372 switch (rcap_buf.prot & SRC16_P_TYPE) {
3373 case SRC16_PTYPE_1:
3374 case SRC16_PTYPE_3:
3375 lun->eedp_formatted = TRUE;
3376 lun->eedp_block_size =
3377 scsi_4btoul(rcap_buf.length);
3378 break;
3379 case SRC16_PTYPE_2:
3380 default:
3381 lun->eedp_formatted = FALSE;
3382 lun->eedp_block_size = 0;
3383 break;
3384 }
3385 } else {
3386 lun->eedp_formatted = FALSE;
3387 lun->eedp_block_size = 0;
3388 }
3389 break;
3390 }
3391 case AC_FOUND_DEVICE:
3392 default:
3393 break;
3394 }
3395 }
3396
3397 /*
3398 * Set the INRESET flag for this target so that no I/O will be sent to
3399 * the target until the reset has completed. If an I/O request does
3400 * happen, the devq will be frozen. The CCB holds the path which is
3401 * used to release the devq. The devq is released and the CCB is freed
3402 * when the TM completes.
3403 */
3404 void
3405 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3406 struct mprsas_target *target, lun_id_t lun_id)
3407 {
3408 union ccb *ccb;
3409 path_id_t path_id;
3410
3411 ccb = xpt_alloc_ccb_nowait();
3412 if (ccb) {
3413 path_id = cam_sim_path(sc->sassc->sim);
3414 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3415 target->tid, lun_id) != CAM_REQ_CMP) {
3416 xpt_free_ccb(ccb);
3417 } else {
3418 tm->cm_ccb = ccb;
3419 tm->cm_targ = target;
3420 target->flags |= MPRSAS_TARGET_INRESET;
3421 }
3422 }
3423 }
3424
3425 int
3426 mprsas_startup(struct mpr_softc *sc)
3427 {
3428 /*
3429 * Send the port enable message and set the wait_for_port_enable flag.
3430 * This flag helps to keep the simq frozen until all discovery events
3431 * are processed.
3432 */
3433 sc->wait_for_port_enable = 1;
3434 mprsas_send_portenable(sc);
3435 return (0);
3436 }
3437
3438 static int
3439 mprsas_send_portenable(struct mpr_softc *sc)
3440 {
3441 MPI2_PORT_ENABLE_REQUEST *request;
3442 struct mpr_command *cm;
3443
3444 MPR_FUNCTRACE(sc);
3445
3446 if ((cm = mpr_alloc_command(sc)) == NULL)
3447 return (EBUSY);
3448 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3449 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3450 request->MsgFlags = 0;
3451 request->VP_ID = 0;
3452 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3453 cm->cm_complete = mprsas_portenable_complete;
3454 cm->cm_data = NULL;
3455 cm->cm_sge = NULL;
3456
3457 mpr_map_command(sc, cm);
3458 mpr_dprint(sc, MPR_XINFO,
3459 "mpr_send_portenable finished cm %p req %p complete %p\n",
3460 cm, cm->cm_req, cm->cm_complete);
3461 return (0);
3462 }
3463
3464 static void
3465 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3466 {
3467 MPI2_PORT_ENABLE_REPLY *reply;
3468 struct mprsas_softc *sassc;
3469
3470 MPR_FUNCTRACE(sc);
3471 sassc = sc->sassc;
3472
3473 /*
3474 * Currently there should be no way we can hit this case. It only
3475 * happens when we have a failure to allocate chain frames, and
3476 * port enable commands don't have S/G lists.
3477 */
3478 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3479 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3480 "This should not happen!\n", __func__, cm->cm_flags);
3481 }
3482
3483 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3484 if (reply == NULL)
3485 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3486 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3487 MPI2_IOCSTATUS_SUCCESS)
3488 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3489
3490 mpr_free_command(sc, cm);
3491 /*
3492 * Done waiting for port enable to complete. Decrement the refcount.
3493 * If refcount is 0, discovery is complete and a rescan of the bus can
3494 * take place.
3495 */
3496 sc->wait_for_port_enable = 0;
3497 sc->port_enable_complete = 1;
3498 wakeup(&sc->port_enable_complete);
3499 mprsas_startup_decrement(sassc);
3500 }
3501
3502 int
3503 mprsas_check_id(struct mprsas_softc *sassc, int id)
3504 {
3505 struct mpr_softc *sc = sassc->sc;
3506 char *ids;
3507 char *name;
3508
3509 ids = &sc->exclude_ids[0];
3510 while((name = strsep(&ids, ",")) != NULL) {
3511 if (name[0] == '\0')
3512 continue;
3513 if (strtol(name, NULL, 0) == (long)id)
3514 return (1);
3515 }
3516
3517 return (0);
3518 }
3519
3520 void
3521 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3522 {
3523 struct mprsas_softc *sassc;
3524 struct mprsas_lun *lun, *lun_tmp;
3525 struct mprsas_target *targ;
3526 int i;
3527
3528 sassc = sc->sassc;
3529 /*
3530 * The number of targets is based on IOC Facts, so free all of
3531 * the allocated LUNs for each target and then the target buffer
3532 * itself.
3533 */
3534 for (i=0; i< maxtargets; i++) {
3535 targ = &sassc->targets[i];
3536 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3537 free(lun, M_MPR);
3538 }
3539 }
3540 free(sassc->targets, M_MPR);
3541
3542 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3543 M_MPR, M_WAITOK|M_ZERO);
3544 }
Cache object: 7c99fc99beef4bdc13c82da7daadcb3f
|