FreeBSD/Linux Kernel Cross Reference
sys/dev/mpt/mpt_cam.c
1 /*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause
6 *
7 * Copyright (c) 2000, 2001 by Greg Ansley
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30 /*-
31 * Copyright (c) 2002, 2006 by Matthew Jacob
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions are
36 * met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
40 * substantially similar to the "NO WARRANTY" disclaimer below
41 * ("Disclaimer") and any redistribution must be conditioned upon including
42 * a substantially similar Disclaimer requirement for further binary
43 * redistribution.
44 * 3. Neither the names of the above listed copyright holders nor the names
45 * of any contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
49 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
52 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
58 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Support from Chris Ellsworth in order to make SAS adapters work
61 * is gratefully acknowledged.
62 *
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
65 */
66 /*-
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2005, WHEEL Sp. z o.o.
69 * Copyright (c) 2004, 2005 Justin T. Gibbs
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
74 * met:
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 * substantially similar to the "NO WARRANTY" disclaimer below
79 * ("Disclaimer") and any redistribution must be conditioned upon including
80 * a substantially similar Disclaimer requirement for further binary
81 * redistribution.
82 * 3. Neither the names of the above listed copyright holders nor the names
83 * of any contributors may be used to endorse or promote products derived
84 * from this software without specific prior written permission.
85 *
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
100
101 #include <dev/mpt/mpt.h>
102 #include <dev/mpt/mpt_cam.h>
103 #include <dev/mpt/mpt_raid.h>
104
105 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
106 #include "dev/mpt/mpilib/mpi_init.h"
107 #include "dev/mpt/mpilib/mpi_targ.h"
108 #include "dev/mpt/mpilib/mpi_fc.h"
109 #include "dev/mpt/mpilib/mpi_sas.h"
110
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
113 #include <sys/sysctl.h>
114
115 static void mpt_poll(struct cam_sim *);
116 static callout_func_t mpt_timeout;
117 static void mpt_action(struct cam_sim *, union ccb *);
118 static int
119 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
120 static void mpt_setwidth(struct mpt_softc *, int, int);
121 static void mpt_setsync(struct mpt_softc *, int, int, int);
122 static int mpt_update_spi_config(struct mpt_softc *, int);
123
124 static mpt_reply_handler_t mpt_scsi_reply_handler;
125 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
126 static mpt_reply_handler_t mpt_fc_els_reply_handler;
127 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
128 MSG_DEFAULT_REPLY *);
129 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
130 static int mpt_fc_reset_link(struct mpt_softc *, int);
131
132 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
134 static void mpt_recovery_thread(void *arg);
135 static void mpt_recover_commands(struct mpt_softc *mpt);
136
137 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
138 target_id_t, lun_id_t, u_int, int);
139
140 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
141 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
142 static int mpt_add_els_buffers(struct mpt_softc *mpt);
143 static int mpt_add_target_commands(struct mpt_softc *mpt);
144 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
146 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
147 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
148 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
149 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
150 uint8_t, uint8_t const *, u_int);
151 static void
152 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
153 tgt_resource_t *, int);
154 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
155 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
156 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
157 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
158
159 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
161 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
162 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
163
164 static mpt_probe_handler_t mpt_cam_probe;
165 static mpt_attach_handler_t mpt_cam_attach;
166 static mpt_enable_handler_t mpt_cam_enable;
167 static mpt_ready_handler_t mpt_cam_ready;
168 static mpt_event_handler_t mpt_cam_event;
169 static mpt_reset_handler_t mpt_cam_ioc_reset;
170 static mpt_detach_handler_t mpt_cam_detach;
171
172 static struct mpt_personality mpt_cam_personality =
173 {
174 .name = "mpt_cam",
175 .probe = mpt_cam_probe,
176 .attach = mpt_cam_attach,
177 .enable = mpt_cam_enable,
178 .ready = mpt_cam_ready,
179 .event = mpt_cam_event,
180 .reset = mpt_cam_ioc_reset,
181 .detach = mpt_cam_detach,
182 };
183
184 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
185 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
186
187 int mpt_enable_sata_wc = -1;
188 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
189
190 static int
191 mpt_cam_probe(struct mpt_softc *mpt)
192 {
193 int role;
194
195 /*
196 * Only attach to nodes that support the initiator or target role
197 * (or want to) or have RAID physical devices that need CAM pass-thru
198 * support.
199 */
200 if (mpt->do_cfg_role) {
201 role = mpt->cfg_role;
202 } else {
203 role = mpt->role;
204 }
205 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
206 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
207 return (0);
208 }
209 return (ENODEV);
210 }
211
212 static int
213 mpt_cam_attach(struct mpt_softc *mpt)
214 {
215 struct cam_devq *devq;
216 mpt_handler_t handler;
217 int maxq;
218 int error;
219
220 MPT_LOCK(mpt);
221 TAILQ_INIT(&mpt->request_timeout_list);
222 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
223 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
224
225 handler.reply_handler = mpt_scsi_reply_handler;
226 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
227 &scsi_io_handler_id);
228 if (error != 0) {
229 MPT_UNLOCK(mpt);
230 goto cleanup;
231 }
232
233 handler.reply_handler = mpt_scsi_tmf_reply_handler;
234 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
235 &scsi_tmf_handler_id);
236 if (error != 0) {
237 MPT_UNLOCK(mpt);
238 goto cleanup;
239 }
240
241 /*
242 * If we're fibre channel and could support target mode, we register
243 * an ELS reply handler and give it resources.
244 */
245 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
246 handler.reply_handler = mpt_fc_els_reply_handler;
247 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
248 &fc_els_handler_id);
249 if (error != 0) {
250 MPT_UNLOCK(mpt);
251 goto cleanup;
252 }
253 if (mpt_add_els_buffers(mpt) == FALSE) {
254 error = ENOMEM;
255 MPT_UNLOCK(mpt);
256 goto cleanup;
257 }
258 maxq -= mpt->els_cmds_allocated;
259 }
260
261 /*
262 * If we support target mode, we register a reply handler for it,
263 * but don't add command resources until we actually enable target
264 * mode.
265 */
266 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
267 handler.reply_handler = mpt_scsi_tgt_reply_handler;
268 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
269 &mpt->scsi_tgt_handler_id);
270 if (error != 0) {
271 MPT_UNLOCK(mpt);
272 goto cleanup;
273 }
274 }
275
276 if (mpt->is_sas) {
277 handler.reply_handler = mpt_sata_pass_reply_handler;
278 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
279 &sata_pass_handler_id);
280 if (error != 0) {
281 MPT_UNLOCK(mpt);
282 goto cleanup;
283 }
284 }
285
286 /*
287 * We keep one request reserved for timeout TMF requests.
288 */
289 mpt->tmf_req = mpt_get_request(mpt, FALSE);
290 if (mpt->tmf_req == NULL) {
291 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
292 error = ENOMEM;
293 MPT_UNLOCK(mpt);
294 goto cleanup;
295 }
296
297 /*
298 * Mark the request as free even though not on the free list.
299 * There is only one TMF request allowed to be outstanding at
300 * a time and the TMF routines perform their own allocation
301 * tracking using the standard state flags.
302 */
303 mpt->tmf_req->state = REQ_STATE_FREE;
304 maxq--;
305
306 /*
307 * The rest of this is CAM foo, for which we need to drop our lock
308 */
309 MPT_UNLOCK(mpt);
310
311 if (mpt_spawn_recovery_thread(mpt) != 0) {
312 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
313 error = ENOMEM;
314 goto cleanup;
315 }
316
317 /*
318 * Create the device queue for our SIM(s).
319 */
320 devq = cam_simq_alloc(maxq);
321 if (devq == NULL) {
322 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
323 error = ENOMEM;
324 goto cleanup;
325 }
326
327 /*
328 * Construct our SIM entry.
329 */
330 mpt->sim =
331 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
332 if (mpt->sim == NULL) {
333 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
334 cam_simq_free(devq);
335 error = ENOMEM;
336 goto cleanup;
337 }
338
339 /*
340 * Register exactly this bus.
341 */
342 MPT_LOCK(mpt);
343 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
344 mpt_prt(mpt, "Bus registration Failed!\n");
345 error = ENOMEM;
346 MPT_UNLOCK(mpt);
347 goto cleanup;
348 }
349
350 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
351 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
352 mpt_prt(mpt, "Unable to allocate Path!\n");
353 error = ENOMEM;
354 MPT_UNLOCK(mpt);
355 goto cleanup;
356 }
357 MPT_UNLOCK(mpt);
358
359 /*
360 * Only register a second bus for RAID physical
361 * devices if the controller supports RAID.
362 */
363 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
364 return (0);
365 }
366
367 /*
368 * Create a "bus" to export all hidden disks to CAM.
369 */
370 mpt->phydisk_sim =
371 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
372 if (mpt->phydisk_sim == NULL) {
373 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
374 error = ENOMEM;
375 goto cleanup;
376 }
377
378 /*
379 * Register this bus.
380 */
381 MPT_LOCK(mpt);
382 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
383 CAM_SUCCESS) {
384 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
385 error = ENOMEM;
386 MPT_UNLOCK(mpt);
387 goto cleanup;
388 }
389
390 if (xpt_create_path(&mpt->phydisk_path, NULL,
391 cam_sim_path(mpt->phydisk_sim),
392 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
393 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
394 error = ENOMEM;
395 MPT_UNLOCK(mpt);
396 goto cleanup;
397 }
398 MPT_UNLOCK(mpt);
399 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
400 return (0);
401
402 cleanup:
403 mpt_cam_detach(mpt);
404 return (error);
405 }
406
407 /*
408 * Read FC configuration information
409 */
410 static int
411 mpt_read_config_info_fc(struct mpt_softc *mpt)
412 {
413 struct sysctl_ctx_list *ctx;
414 struct sysctl_oid *tree;
415 char *topology = NULL;
416 int rv;
417
418 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
419 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
420 if (rv) {
421 return (-1);
422 }
423 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
424 mpt->mpt_fcport_page0.Header.PageVersion,
425 mpt->mpt_fcport_page0.Header.PageLength,
426 mpt->mpt_fcport_page0.Header.PageNumber,
427 mpt->mpt_fcport_page0.Header.PageType);
428
429 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
430 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
431 if (rv) {
432 mpt_prt(mpt, "failed to read FC Port Page 0\n");
433 return (-1);
434 }
435 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
436
437 switch (mpt->mpt_fcport_page0.CurrentSpeed) {
438 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
439 mpt->mpt_fcport_speed = 1;
440 break;
441 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
442 mpt->mpt_fcport_speed = 2;
443 break;
444 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
445 mpt->mpt_fcport_speed = 10;
446 break;
447 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
448 mpt->mpt_fcport_speed = 4;
449 break;
450 default:
451 mpt->mpt_fcport_speed = 0;
452 break;
453 }
454
455 switch (mpt->mpt_fcport_page0.Flags &
456 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
457 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
458 mpt->mpt_fcport_speed = 0;
459 topology = "<NO LOOP>";
460 break;
461 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
462 topology = "N-Port";
463 break;
464 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
465 topology = "NL-Port";
466 break;
467 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
468 topology = "F-Port";
469 break;
470 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
471 topology = "FL-Port";
472 break;
473 default:
474 mpt->mpt_fcport_speed = 0;
475 topology = "?";
476 break;
477 }
478
479 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
480 | mpt->mpt_fcport_page0.WWNN.Low;
481 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
482 | mpt->mpt_fcport_page0.WWPN.Low;
483 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
484
485 mpt_lprt(mpt, MPT_PRT_INFO,
486 "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
487 "Speed %u-Gbit\n", topology,
488 (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
489 mpt->mpt_fcport_speed);
490 MPT_UNLOCK(mpt);
491 ctx = device_get_sysctl_ctx(mpt->dev);
492 tree = device_get_sysctl_tree(mpt->dev);
493
494 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
495 "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
496 "World Wide Node Name");
497
498 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
499 "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
500 "World Wide Port Name");
501
502 MPT_LOCK(mpt);
503 return (0);
504 }
505
506 /*
507 * Set FC configuration information.
508 */
509 static int
510 mpt_set_initial_config_fc(struct mpt_softc *mpt)
511 {
512 CONFIG_PAGE_FC_PORT_1 fc;
513 U32 fl;
514 int r, doit = 0;
515 int role;
516
517 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
518 &fc.Header, FALSE, 5000);
519 if (r) {
520 mpt_prt(mpt, "failed to read FC page 1 header\n");
521 return (mpt_fc_reset_link(mpt, 1));
522 }
523
524 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
525 &fc.Header, sizeof (fc), FALSE, 5000);
526 if (r) {
527 mpt_prt(mpt, "failed to read FC page 1\n");
528 return (mpt_fc_reset_link(mpt, 1));
529 }
530 mpt2host_config_page_fc_port_1(&fc);
531
532 /*
533 * Check our flags to make sure we support the role we want.
534 */
535 doit = 0;
536 role = 0;
537 fl = fc.Flags;
538
539 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
540 role |= MPT_ROLE_INITIATOR;
541 }
542 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
543 role |= MPT_ROLE_TARGET;
544 }
545
546 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
547
548 if (mpt->do_cfg_role == 0) {
549 role = mpt->cfg_role;
550 } else {
551 mpt->do_cfg_role = 0;
552 }
553
554 if (role != mpt->cfg_role) {
555 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
556 if ((role & MPT_ROLE_INITIATOR) == 0) {
557 mpt_prt(mpt, "adding initiator role\n");
558 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
559 doit++;
560 } else {
561 mpt_prt(mpt, "keeping initiator role\n");
562 }
563 } else if (role & MPT_ROLE_INITIATOR) {
564 mpt_prt(mpt, "removing initiator role\n");
565 doit++;
566 }
567 if (mpt->cfg_role & MPT_ROLE_TARGET) {
568 if ((role & MPT_ROLE_TARGET) == 0) {
569 mpt_prt(mpt, "adding target role\n");
570 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
571 doit++;
572 } else {
573 mpt_prt(mpt, "keeping target role\n");
574 }
575 } else if (role & MPT_ROLE_TARGET) {
576 mpt_prt(mpt, "removing target role\n");
577 doit++;
578 }
579 mpt->role = mpt->cfg_role;
580 }
581
582 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
583 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
584 mpt_prt(mpt, "adding OXID option\n");
585 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
586 doit++;
587 }
588 }
589
590 if (doit) {
591 fc.Flags = fl;
592 host2mpt_config_page_fc_port_1(&fc);
593 r = mpt_write_cfg_page(mpt,
594 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
595 sizeof(fc), FALSE, 5000);
596 if (r != 0) {
597 mpt_prt(mpt, "failed to update NVRAM with changes\n");
598 return (0);
599 }
600 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
601 "effect until next reboot or IOC reset\n");
602 }
603 return (0);
604 }
605
606 static int
607 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
608 {
609 ConfigExtendedPageHeader_t hdr;
610 struct mptsas_phyinfo *phyinfo;
611 SasIOUnitPage0_t *buffer;
612 int error, len, i;
613
614 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
615 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
616 &hdr, 0, 10000);
617 if (error)
618 goto out;
619 if (hdr.ExtPageLength == 0) {
620 error = ENXIO;
621 goto out;
622 }
623
624 len = hdr.ExtPageLength * 4;
625 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
626 if (buffer == NULL) {
627 error = ENOMEM;
628 goto out;
629 }
630
631 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
632 0, &hdr, buffer, len, 0, 10000);
633 if (error) {
634 free(buffer, M_DEVBUF);
635 goto out;
636 }
637
638 portinfo->num_phys = buffer->NumPhys;
639 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
640 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
641 if (portinfo->phy_info == NULL) {
642 free(buffer, M_DEVBUF);
643 error = ENOMEM;
644 goto out;
645 }
646
647 for (i = 0; i < portinfo->num_phys; i++) {
648 phyinfo = &portinfo->phy_info[i];
649 phyinfo->phy_num = i;
650 phyinfo->port_id = buffer->PhyData[i].Port;
651 phyinfo->negotiated_link_rate =
652 buffer->PhyData[i].NegotiatedLinkRate;
653 phyinfo->handle =
654 le16toh(buffer->PhyData[i].ControllerDevHandle);
655 }
656
657 free(buffer, M_DEVBUF);
658 out:
659 return (error);
660 }
661
662 static int
663 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
664 uint32_t form, uint32_t form_specific)
665 {
666 ConfigExtendedPageHeader_t hdr;
667 SasPhyPage0_t *buffer;
668 int error;
669
670 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
671 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
672 0, 10000);
673 if (error)
674 goto out;
675 if (hdr.ExtPageLength == 0) {
676 error = ENXIO;
677 goto out;
678 }
679
680 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
681 if (buffer == NULL) {
682 error = ENOMEM;
683 goto out;
684 }
685
686 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
687 form + form_specific, &hdr, buffer,
688 sizeof(SasPhyPage0_t), 0, 10000);
689 if (error) {
690 free(buffer, M_DEVBUF);
691 goto out;
692 }
693
694 phy_info->hw_link_rate = buffer->HwLinkRate;
695 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
696 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
697 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
698
699 free(buffer, M_DEVBUF);
700 out:
701 return (error);
702 }
703
704 static int
705 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
706 uint32_t form, uint32_t form_specific)
707 {
708 ConfigExtendedPageHeader_t hdr;
709 SasDevicePage0_t *buffer;
710 uint64_t sas_address;
711 int error = 0;
712
713 bzero(device_info, sizeof(*device_info));
714 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
715 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
716 &hdr, 0, 10000);
717 if (error)
718 goto out;
719 if (hdr.ExtPageLength == 0) {
720 error = ENXIO;
721 goto out;
722 }
723
724 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
725 if (buffer == NULL) {
726 error = ENOMEM;
727 goto out;
728 }
729
730 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
731 form + form_specific, &hdr, buffer,
732 sizeof(SasDevicePage0_t), 0, 10000);
733 if (error) {
734 free(buffer, M_DEVBUF);
735 goto out;
736 }
737
738 device_info->dev_handle = le16toh(buffer->DevHandle);
739 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
740 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
741 device_info->slot = le16toh(buffer->Slot);
742 device_info->phy_num = buffer->PhyNum;
743 device_info->physical_port = buffer->PhysicalPort;
744 device_info->target_id = buffer->TargetID;
745 device_info->bus = buffer->Bus;
746 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
747 device_info->sas_address = le64toh(sas_address);
748 device_info->device_info = le32toh(buffer->DeviceInfo);
749
750 free(buffer, M_DEVBUF);
751 out:
752 return (error);
753 }
754
755 /*
756 * Read SAS configuration information. Nothing to do yet.
757 */
758 static int
759 mpt_read_config_info_sas(struct mpt_softc *mpt)
760 {
761 struct mptsas_portinfo *portinfo;
762 struct mptsas_phyinfo *phyinfo;
763 int error, i;
764
765 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
766 if (portinfo == NULL)
767 return (ENOMEM);
768
769 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
770 if (error) {
771 free(portinfo, M_DEVBUF);
772 return (0);
773 }
774
775 for (i = 0; i < portinfo->num_phys; i++) {
776 phyinfo = &portinfo->phy_info[i];
777 error = mptsas_sas_phy_pg0(mpt, phyinfo,
778 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
779 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
780 if (error)
781 break;
782 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
783 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
784 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
785 phyinfo->handle);
786 if (error)
787 break;
788 phyinfo->identify.phy_num = phyinfo->phy_num = i;
789 if (phyinfo->attached.dev_handle)
790 error = mptsas_sas_device_pg0(mpt,
791 &phyinfo->attached,
792 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
793 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
794 phyinfo->attached.dev_handle);
795 if (error)
796 break;
797 }
798 mpt->sas_portinfo = portinfo;
799 return (0);
800 }
801
802 static void
803 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
804 int enabled)
805 {
806 SataPassthroughRequest_t *pass;
807 request_t *req;
808 int error, status;
809
810 req = mpt_get_request(mpt, 0);
811 if (req == NULL)
812 return;
813
814 pass = req->req_vbuf;
815 bzero(pass, sizeof(SataPassthroughRequest_t));
816 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
817 pass->TargetID = devinfo->target_id;
818 pass->Bus = devinfo->bus;
819 pass->PassthroughFlags = 0;
820 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
821 pass->DataLength = 0;
822 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
823 pass->CommandFIS[0] = 0x27;
824 pass->CommandFIS[1] = 0x80;
825 pass->CommandFIS[2] = 0xef;
826 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
827 pass->CommandFIS[7] = 0x40;
828 pass->CommandFIS[15] = 0x08;
829
830 mpt_check_doorbell(mpt);
831 mpt_send_cmd(mpt, req);
832 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
833 10 * 1000);
834 if (error) {
835 mpt_free_request(mpt, req);
836 printf("error %d sending passthrough\n", error);
837 return;
838 }
839
840 status = le16toh(req->IOCStatus);
841 if (status != MPI_IOCSTATUS_SUCCESS) {
842 mpt_free_request(mpt, req);
843 printf("IOCSTATUS %d\n", status);
844 return;
845 }
846
847 mpt_free_request(mpt, req);
848 }
849
850 /*
851 * Set SAS configuration information. Nothing to do yet.
852 */
853 static int
854 mpt_set_initial_config_sas(struct mpt_softc *mpt)
855 {
856 struct mptsas_phyinfo *phyinfo;
857 int i;
858
859 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
860 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
861 phyinfo = &mpt->sas_portinfo->phy_info[i];
862 if (phyinfo->attached.dev_handle == 0)
863 continue;
864 if ((phyinfo->attached.device_info &
865 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
866 continue;
867 if (bootverbose)
868 device_printf(mpt->dev,
869 "%sabling SATA WC on phy %d\n",
870 (mpt_enable_sata_wc) ? "En" : "Dis", i);
871 mptsas_set_sata_wc(mpt, &phyinfo->attached,
872 mpt_enable_sata_wc);
873 }
874 }
875
876 return (0);
877 }
878
879 static int
880 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
881 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
882 {
883
884 if (req != NULL) {
885 if (reply_frame != NULL) {
886 req->IOCStatus = le16toh(reply_frame->IOCStatus);
887 }
888 req->state &= ~REQ_STATE_QUEUED;
889 req->state |= REQ_STATE_DONE;
890 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
891 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
892 wakeup(req);
893 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
894 /*
895 * Whew- we can free this request (late completion)
896 */
897 mpt_free_request(mpt, req);
898 }
899 }
900
901 return (TRUE);
902 }
903
904 /*
905 * Read SCSI configuration information
906 */
907 static int
908 mpt_read_config_info_spi(struct mpt_softc *mpt)
909 {
910 int rv, i;
911
912 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
913 &mpt->mpt_port_page0.Header, FALSE, 5000);
914 if (rv) {
915 return (-1);
916 }
917 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
918 mpt->mpt_port_page0.Header.PageVersion,
919 mpt->mpt_port_page0.Header.PageLength,
920 mpt->mpt_port_page0.Header.PageNumber,
921 mpt->mpt_port_page0.Header.PageType);
922
923 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
924 &mpt->mpt_port_page1.Header, FALSE, 5000);
925 if (rv) {
926 return (-1);
927 }
928 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
929 mpt->mpt_port_page1.Header.PageVersion,
930 mpt->mpt_port_page1.Header.PageLength,
931 mpt->mpt_port_page1.Header.PageNumber,
932 mpt->mpt_port_page1.Header.PageType);
933
934 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
935 &mpt->mpt_port_page2.Header, FALSE, 5000);
936 if (rv) {
937 return (-1);
938 }
939 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
940 mpt->mpt_port_page2.Header.PageVersion,
941 mpt->mpt_port_page2.Header.PageLength,
942 mpt->mpt_port_page2.Header.PageNumber,
943 mpt->mpt_port_page2.Header.PageType);
944
945 for (i = 0; i < 16; i++) {
946 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
947 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
948 if (rv) {
949 return (-1);
950 }
951 mpt_lprt(mpt, MPT_PRT_DEBUG,
952 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
953 mpt->mpt_dev_page0[i].Header.PageVersion,
954 mpt->mpt_dev_page0[i].Header.PageLength,
955 mpt->mpt_dev_page0[i].Header.PageNumber,
956 mpt->mpt_dev_page0[i].Header.PageType);
957
958 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
959 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
960 if (rv) {
961 return (-1);
962 }
963 mpt_lprt(mpt, MPT_PRT_DEBUG,
964 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
965 mpt->mpt_dev_page1[i].Header.PageVersion,
966 mpt->mpt_dev_page1[i].Header.PageLength,
967 mpt->mpt_dev_page1[i].Header.PageNumber,
968 mpt->mpt_dev_page1[i].Header.PageType);
969 }
970
971 /*
972 * At this point, we don't *have* to fail. As long as we have
973 * valid config header information, we can (barely) lurch
974 * along.
975 */
976
977 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
978 sizeof(mpt->mpt_port_page0), FALSE, 5000);
979 if (rv) {
980 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
981 } else {
982 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
983 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
984 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
985 mpt->mpt_port_page0.Capabilities,
986 mpt->mpt_port_page0.PhysicalInterface);
987 }
988
989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
990 sizeof(mpt->mpt_port_page1), FALSE, 5000);
991 if (rv) {
992 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
993 } else {
994 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
995 mpt_lprt(mpt, MPT_PRT_DEBUG,
996 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
997 mpt->mpt_port_page1.Configuration,
998 mpt->mpt_port_page1.OnBusTimerValue);
999 }
1000
1001 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1002 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1003 if (rv) {
1004 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1005 } else {
1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 "Port Page 2: Flags %x Settings %x\n",
1008 mpt->mpt_port_page2.PortFlags,
1009 mpt->mpt_port_page2.PortSettings);
1010 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1011 for (i = 0; i < 16; i++) {
1012 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1013 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1014 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1015 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1016 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1017 }
1018 }
1019
1020 for (i = 0; i < 16; i++) {
1021 rv = mpt_read_cur_cfg_page(mpt, i,
1022 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1023 FALSE, 5000);
1024 if (rv) {
1025 mpt_prt(mpt,
1026 "cannot read SPI Target %d Device Page 0\n", i);
1027 continue;
1028 }
1029 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1030 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1031 "target %d page 0: Negotiated Params %x Information %x\n",
1032 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1033 mpt->mpt_dev_page0[i].Information);
1034
1035 rv = mpt_read_cur_cfg_page(mpt, i,
1036 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1037 FALSE, 5000);
1038 if (rv) {
1039 mpt_prt(mpt,
1040 "cannot read SPI Target %d Device Page 1\n", i);
1041 continue;
1042 }
1043 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1044 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1045 "target %d page 1: Requested Params %x Configuration %x\n",
1046 i, mpt->mpt_dev_page1[i].RequestedParameters,
1047 mpt->mpt_dev_page1[i].Configuration);
1048 }
1049 return (0);
1050 }
1051
1052 /*
1053 * Validate SPI configuration information.
1054 *
1055 * In particular, validate SPI Port Page 1.
1056 */
1057 static int
1058 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1059 {
1060 int error, i, pp1val;
1061
1062 mpt->mpt_disc_enable = 0xff;
1063 mpt->mpt_tag_enable = 0;
1064
1065 pp1val = ((1 << mpt->mpt_ini_id) <<
1066 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1067 if (mpt->mpt_port_page1.Configuration != pp1val) {
1068 CONFIG_PAGE_SCSI_PORT_1 tmp;
1069
1070 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1071 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1072 tmp = mpt->mpt_port_page1;
1073 tmp.Configuration = pp1val;
1074 host2mpt_config_page_scsi_port_1(&tmp);
1075 error = mpt_write_cur_cfg_page(mpt, 0,
1076 &tmp.Header, sizeof(tmp), FALSE, 5000);
1077 if (error) {
1078 return (-1);
1079 }
1080 error = mpt_read_cur_cfg_page(mpt, 0,
1081 &tmp.Header, sizeof(tmp), FALSE, 5000);
1082 if (error) {
1083 return (-1);
1084 }
1085 mpt2host_config_page_scsi_port_1(&tmp);
1086 if (tmp.Configuration != pp1val) {
1087 mpt_prt(mpt,
1088 "failed to reset SPI Port Page 1 Config value\n");
1089 return (-1);
1090 }
1091 mpt->mpt_port_page1 = tmp;
1092 }
1093
1094 /*
1095 * The purpose of this exercise is to get
1096 * all targets back to async/narrow.
1097 *
1098 * We skip this step if the BIOS has already negotiated
1099 * speeds with the targets.
1100 */
1101 i = mpt->mpt_port_page2.PortSettings &
1102 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1103 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1104 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1105 "honoring BIOS transfer negotiations\n");
1106 } else {
1107 for (i = 0; i < 16; i++) {
1108 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1109 mpt->mpt_dev_page1[i].Configuration = 0;
1110 (void) mpt_update_spi_config(mpt, i);
1111 }
1112 }
1113 return (0);
1114 }
1115
1116 static int
1117 mpt_cam_enable(struct mpt_softc *mpt)
1118 {
1119 int error;
1120
1121 MPT_LOCK(mpt);
1122
1123 error = EIO;
1124 if (mpt->is_fc) {
1125 if (mpt_read_config_info_fc(mpt)) {
1126 goto out;
1127 }
1128 if (mpt_set_initial_config_fc(mpt)) {
1129 goto out;
1130 }
1131 } else if (mpt->is_sas) {
1132 if (mpt_read_config_info_sas(mpt)) {
1133 goto out;
1134 }
1135 if (mpt_set_initial_config_sas(mpt)) {
1136 goto out;
1137 }
1138 } else if (mpt->is_spi) {
1139 if (mpt_read_config_info_spi(mpt)) {
1140 goto out;
1141 }
1142 if (mpt_set_initial_config_spi(mpt)) {
1143 goto out;
1144 }
1145 }
1146 error = 0;
1147
1148 out:
1149 MPT_UNLOCK(mpt);
1150 return (error);
1151 }
1152
1153 static void
1154 mpt_cam_ready(struct mpt_softc *mpt)
1155 {
1156
1157 /*
1158 * If we're in target mode, hang out resources now
1159 * so we don't cause the world to hang talking to us.
1160 */
1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1162 /*
1163 * Try to add some target command resources
1164 */
1165 MPT_LOCK(mpt);
1166 if (mpt_add_target_commands(mpt) == FALSE) {
1167 mpt_prt(mpt, "failed to add target commands\n");
1168 }
1169 MPT_UNLOCK(mpt);
1170 }
1171 mpt->ready = 1;
1172 }
1173
1174 static void
1175 mpt_cam_detach(struct mpt_softc *mpt)
1176 {
1177 mpt_handler_t handler;
1178
1179 MPT_LOCK(mpt);
1180 mpt->ready = 0;
1181 mpt_terminate_recovery_thread(mpt);
1182
1183 handler.reply_handler = mpt_scsi_reply_handler;
1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 scsi_io_handler_id);
1186 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 scsi_tmf_handler_id);
1189 handler.reply_handler = mpt_fc_els_reply_handler;
1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 fc_els_handler_id);
1192 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1194 mpt->scsi_tgt_handler_id);
1195 handler.reply_handler = mpt_sata_pass_reply_handler;
1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1197 sata_pass_handler_id);
1198
1199 if (mpt->tmf_req != NULL) {
1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1201 mpt_free_request(mpt, mpt->tmf_req);
1202 mpt->tmf_req = NULL;
1203 }
1204 if (mpt->sas_portinfo != NULL) {
1205 free(mpt->sas_portinfo, M_DEVBUF);
1206 mpt->sas_portinfo = NULL;
1207 }
1208
1209 if (mpt->sim != NULL) {
1210 xpt_free_path(mpt->path);
1211 xpt_bus_deregister(cam_sim_path(mpt->sim));
1212 cam_sim_free(mpt->sim, TRUE);
1213 mpt->sim = NULL;
1214 }
1215
1216 if (mpt->phydisk_sim != NULL) {
1217 xpt_free_path(mpt->phydisk_path);
1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1219 cam_sim_free(mpt->phydisk_sim, TRUE);
1220 mpt->phydisk_sim = NULL;
1221 }
1222 MPT_UNLOCK(mpt);
1223 }
1224
1225 /* This routine is used after a system crash to dump core onto the swap device.
1226 */
1227 static void
1228 mpt_poll(struct cam_sim *sim)
1229 {
1230 struct mpt_softc *mpt;
1231
1232 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1233 mpt_intr(mpt);
1234 }
1235
1236 /*
1237 * Watchdog timeout routine for SCSI requests.
1238 */
1239 static void
1240 mpt_timeout(void *arg)
1241 {
1242 union ccb *ccb;
1243 struct mpt_softc *mpt;
1244 request_t *req;
1245
1246 ccb = (union ccb *)arg;
1247 mpt = ccb->ccb_h.ccb_mpt_ptr;
1248
1249 MPT_LOCK_ASSERT(mpt);
1250 req = ccb->ccb_h.ccb_req_ptr;
1251 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1252 req->serno, ccb, req->ccb);
1253 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1254 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1255 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1256 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1257 req->state |= REQ_STATE_TIMEDOUT;
1258 mpt_wakeup_recovery_thread(mpt);
1259 }
1260 }
1261
1262 /*
1263 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1264 * directly.
1265 *
1266 * Takes a list of physical segments and builds the SGL for SCSI IO command
1267 * and forwards the commard to the IOC after one last check that CAM has not
1268 * aborted the transaction.
1269 */
1270 static void
1271 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1272 {
1273 request_t *req, *trq;
1274 char *mpt_off;
1275 union ccb *ccb;
1276 struct mpt_softc *mpt;
1277 bus_addr_t chain_list_addr;
1278 int first_lim, seg, this_seg_lim;
1279 uint32_t addr, cur_off, flags, nxt_off, tf;
1280 void *sglp = NULL;
1281 MSG_REQUEST_HEADER *hdrp;
1282 SGE_SIMPLE64 *se;
1283 SGE_CHAIN64 *ce;
1284 int istgt = 0;
1285
1286 req = (request_t *)arg;
1287 ccb = req->ccb;
1288
1289 mpt = ccb->ccb_h.ccb_mpt_ptr;
1290 req = ccb->ccb_h.ccb_req_ptr;
1291
1292 hdrp = req->req_vbuf;
1293 mpt_off = req->req_vbuf;
1294
1295 if (error == 0) {
1296 switch (hdrp->Function) {
1297 case MPI_FUNCTION_SCSI_IO_REQUEST:
1298 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1299 istgt = 0;
1300 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1301 break;
1302 case MPI_FUNCTION_TARGET_ASSIST:
1303 istgt = 1;
1304 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1305 break;
1306 default:
1307 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1308 hdrp->Function);
1309 error = EINVAL;
1310 break;
1311 }
1312 }
1313
1314 bad:
1315 if (error != 0) {
1316 if (error != EFBIG && error != ENOMEM) {
1317 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1318 }
1319 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1320 cam_status status;
1321 mpt_freeze_ccb(ccb);
1322 if (error == EFBIG) {
1323 status = CAM_REQ_TOO_BIG;
1324 } else if (error == ENOMEM) {
1325 if (mpt->outofbeer == 0) {
1326 mpt->outofbeer = 1;
1327 xpt_freeze_simq(mpt->sim, 1);
1328 mpt_lprt(mpt, MPT_PRT_DEBUG,
1329 "FREEZEQ\n");
1330 }
1331 status = CAM_REQUEUE_REQ;
1332 } else {
1333 status = CAM_REQ_CMP_ERR;
1334 }
1335 mpt_set_ccb_status(ccb, status);
1336 }
1337 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1338 request_t *cmd_req =
1339 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1340 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1341 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1342 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1343 }
1344 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1345 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1346 xpt_done(ccb);
1347 mpt_free_request(mpt, req);
1348 return;
1349 }
1350
1351 /*
1352 * No data to transfer?
1353 * Just make a single simple SGL with zero length.
1354 */
1355
1356 if (mpt->verbose >= MPT_PRT_DEBUG) {
1357 int tidx = ((char *)sglp) - mpt_off;
1358 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1359 }
1360
1361 if (nseg == 0) {
1362 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1363 MPI_pSGE_SET_FLAGS(se1,
1364 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1365 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1366 se1->FlagsLength = htole32(se1->FlagsLength);
1367 goto out;
1368 }
1369
1370 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1371 if (istgt == 0) {
1372 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1373 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1374 }
1375 } else {
1376 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1377 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1378 }
1379 }
1380
1381 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1382 bus_dmasync_op_t op;
1383 if (istgt == 0) {
1384 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1385 op = BUS_DMASYNC_PREREAD;
1386 } else {
1387 op = BUS_DMASYNC_PREWRITE;
1388 }
1389 } else {
1390 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1391 op = BUS_DMASYNC_PREWRITE;
1392 } else {
1393 op = BUS_DMASYNC_PREREAD;
1394 }
1395 }
1396 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1397 }
1398
1399 /*
1400 * Okay, fill in what we can at the end of the command frame.
1401 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1402 * the command frame.
1403 *
1404 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1405 * SIMPLE64 pointers and start doing CHAIN64 entries after
1406 * that.
1407 */
1408
1409 if (nseg < MPT_NSGL_FIRST(mpt)) {
1410 first_lim = nseg;
1411 } else {
1412 /*
1413 * Leave room for CHAIN element
1414 */
1415 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1416 }
1417
1418 se = (SGE_SIMPLE64 *) sglp;
1419 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1420 tf = flags;
1421 memset(se, 0, sizeof (*se));
1422 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1423 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1424 if (sizeof(bus_addr_t) > 4) {
1425 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1426 /* SAS1078 36GB limitation WAR */
1427 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1428 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1429 addr |= (1U << 31);
1430 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1431 }
1432 se->Address.High = htole32(addr);
1433 }
1434 if (seg == first_lim - 1) {
1435 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1436 }
1437 if (seg == nseg - 1) {
1438 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1439 MPI_SGE_FLAGS_END_OF_BUFFER;
1440 }
1441 MPI_pSGE_SET_FLAGS(se, tf);
1442 se->FlagsLength = htole32(se->FlagsLength);
1443 }
1444
1445 if (seg == nseg) {
1446 goto out;
1447 }
1448
1449 /*
1450 * Tell the IOC where to find the first chain element.
1451 */
1452 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1453 nxt_off = MPT_RQSL(mpt);
1454 trq = req;
1455
1456 /*
1457 * Make up the rest of the data segments out of a chain element
1458 * (contained in the current request frame) which points to
1459 * SIMPLE64 elements in the next request frame, possibly ending
1460 * with *another* chain element (if there's more).
1461 */
1462 while (seg < nseg) {
1463 /*
1464 * Point to the chain descriptor. Note that the chain
1465 * descriptor is at the end of the *previous* list (whether
1466 * chain or simple).
1467 */
1468 ce = (SGE_CHAIN64 *) se;
1469
1470 /*
1471 * Before we change our current pointer, make sure we won't
1472 * overflow the request area with this frame. Note that we
1473 * test against 'greater than' here as it's okay in this case
1474 * to have next offset be just outside the request area.
1475 */
1476 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1477 nxt_off = MPT_REQUEST_AREA;
1478 goto next_chain;
1479 }
1480
1481 /*
1482 * Set our SGE element pointer to the beginning of the chain
1483 * list and update our next chain list offset.
1484 */
1485 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1486 cur_off = nxt_off;
1487 nxt_off += MPT_RQSL(mpt);
1488
1489 /*
1490 * Now initialize the chain descriptor.
1491 */
1492 memset(ce, 0, sizeof (*ce));
1493
1494 /*
1495 * Get the physical address of the chain list.
1496 */
1497 chain_list_addr = trq->req_pbuf;
1498 chain_list_addr += cur_off;
1499 if (sizeof (bus_addr_t) > 4) {
1500 ce->Address.High =
1501 htole32(((uint64_t)chain_list_addr) >> 32);
1502 }
1503 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1504 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1505 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1506
1507 /*
1508 * If we have more than a frame's worth of segments left,
1509 * set up the chain list to have the last element be another
1510 * chain descriptor.
1511 */
1512 if ((nseg - seg) > MPT_NSGL(mpt)) {
1513 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1514 /*
1515 * The length of the chain is the length in bytes of the
1516 * number of segments plus the next chain element.
1517 *
1518 * The next chain descriptor offset is the length,
1519 * in words, of the number of segments.
1520 */
1521 ce->Length = (this_seg_lim - seg) *
1522 sizeof (SGE_SIMPLE64);
1523 ce->NextChainOffset = ce->Length >> 2;
1524 ce->Length += sizeof (SGE_CHAIN64);
1525 } else {
1526 this_seg_lim = nseg;
1527 ce->Length = (this_seg_lim - seg) *
1528 sizeof (SGE_SIMPLE64);
1529 }
1530 ce->Length = htole16(ce->Length);
1531
1532 /*
1533 * Fill in the chain list SGE elements with our segment data.
1534 *
1535 * If we're the last element in this chain list, set the last
1536 * element flag. If we're the completely last element period,
1537 * set the end of list and end of buffer flags.
1538 */
1539 while (seg < this_seg_lim) {
1540 tf = flags;
1541 memset(se, 0, sizeof (*se));
1542 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1543 se->Address.Low = htole32(dm_segs->ds_addr &
1544 0xffffffff);
1545 if (sizeof (bus_addr_t) > 4) {
1546 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1547 /* SAS1078 36GB limitation WAR */
1548 if (mpt->is_1078 &&
1549 (((uint64_t)dm_segs->ds_addr +
1550 MPI_SGE_LENGTH(se->FlagsLength)) >>
1551 32) == 9) {
1552 addr |= (1U << 31);
1553 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1554 }
1555 se->Address.High = htole32(addr);
1556 }
1557 if (seg == this_seg_lim - 1) {
1558 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1559 }
1560 if (seg == nseg - 1) {
1561 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1562 MPI_SGE_FLAGS_END_OF_BUFFER;
1563 }
1564 MPI_pSGE_SET_FLAGS(se, tf);
1565 se->FlagsLength = htole32(se->FlagsLength);
1566 se++;
1567 seg++;
1568 dm_segs++;
1569 }
1570
1571 next_chain:
1572 /*
1573 * If we have more segments to do and we've used up all of
1574 * the space in a request area, go allocate another one
1575 * and chain to that.
1576 */
1577 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1578 request_t *nrq;
1579
1580 nrq = mpt_get_request(mpt, FALSE);
1581
1582 if (nrq == NULL) {
1583 error = ENOMEM;
1584 goto bad;
1585 }
1586
1587 /*
1588 * Append the new request area on the tail of our list.
1589 */
1590 if ((trq = req->chain) == NULL) {
1591 req->chain = nrq;
1592 } else {
1593 while (trq->chain != NULL) {
1594 trq = trq->chain;
1595 }
1596 trq->chain = nrq;
1597 }
1598 trq = nrq;
1599 mpt_off = trq->req_vbuf;
1600 if (mpt->verbose >= MPT_PRT_DEBUG) {
1601 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1602 }
1603 nxt_off = 0;
1604 }
1605 }
1606 out:
1607
1608 /*
1609 * Last time we need to check if this CCB needs to be aborted.
1610 */
1611 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1612 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1613 request_t *cmd_req =
1614 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1615 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1616 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1617 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1618 }
1619 mpt_prt(mpt,
1620 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1621 ccb->ccb_h.status & CAM_STATUS_MASK);
1622 if (nseg) {
1623 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1624 }
1625 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1626 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1627 xpt_done(ccb);
1628 mpt_free_request(mpt, req);
1629 return;
1630 }
1631
1632 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1633 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1634 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1635 mpt_timeout, ccb);
1636 }
1637 if (mpt->verbose > MPT_PRT_DEBUG) {
1638 int nc = 0;
1639 mpt_print_request(req->req_vbuf);
1640 for (trq = req->chain; trq; trq = trq->chain) {
1641 printf(" Additional Chain Area %d\n", nc++);
1642 mpt_dump_sgl(trq->req_vbuf, 0);
1643 }
1644 }
1645
1646 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1647 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1648 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1649 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1650 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1651 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1652 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1653 } else {
1654 tgt->state = TGT_STATE_MOVING_DATA;
1655 }
1656 #else
1657 tgt->state = TGT_STATE_MOVING_DATA;
1658 #endif
1659 }
1660 mpt_send_cmd(mpt, req);
1661 }
1662
1663 static void
1664 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1665 {
1666 request_t *req, *trq;
1667 char *mpt_off;
1668 union ccb *ccb;
1669 struct mpt_softc *mpt;
1670 int seg, first_lim;
1671 uint32_t flags, nxt_off;
1672 void *sglp = NULL;
1673 MSG_REQUEST_HEADER *hdrp;
1674 SGE_SIMPLE32 *se;
1675 SGE_CHAIN32 *ce;
1676 int istgt = 0;
1677
1678 req = (request_t *)arg;
1679 ccb = req->ccb;
1680
1681 mpt = ccb->ccb_h.ccb_mpt_ptr;
1682 req = ccb->ccb_h.ccb_req_ptr;
1683
1684 hdrp = req->req_vbuf;
1685 mpt_off = req->req_vbuf;
1686
1687 if (error == 0) {
1688 switch (hdrp->Function) {
1689 case MPI_FUNCTION_SCSI_IO_REQUEST:
1690 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1691 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1692 break;
1693 case MPI_FUNCTION_TARGET_ASSIST:
1694 istgt = 1;
1695 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1696 break;
1697 default:
1698 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1699 hdrp->Function);
1700 error = EINVAL;
1701 break;
1702 }
1703 }
1704
1705 bad:
1706 if (error != 0) {
1707 if (error != EFBIG && error != ENOMEM) {
1708 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1709 }
1710 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1711 cam_status status;
1712 mpt_freeze_ccb(ccb);
1713 if (error == EFBIG) {
1714 status = CAM_REQ_TOO_BIG;
1715 } else if (error == ENOMEM) {
1716 if (mpt->outofbeer == 0) {
1717 mpt->outofbeer = 1;
1718 xpt_freeze_simq(mpt->sim, 1);
1719 mpt_lprt(mpt, MPT_PRT_DEBUG,
1720 "FREEZEQ\n");
1721 }
1722 status = CAM_REQUEUE_REQ;
1723 } else {
1724 status = CAM_REQ_CMP_ERR;
1725 }
1726 mpt_set_ccb_status(ccb, status);
1727 }
1728 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1729 request_t *cmd_req =
1730 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1731 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1732 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1733 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1734 }
1735 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1736 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1737 xpt_done(ccb);
1738 mpt_free_request(mpt, req);
1739 return;
1740 }
1741
1742 /*
1743 * No data to transfer?
1744 * Just make a single simple SGL with zero length.
1745 */
1746
1747 if (mpt->verbose >= MPT_PRT_DEBUG) {
1748 int tidx = ((char *)sglp) - mpt_off;
1749 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1750 }
1751
1752 if (nseg == 0) {
1753 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1754 MPI_pSGE_SET_FLAGS(se1,
1755 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1756 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1757 se1->FlagsLength = htole32(se1->FlagsLength);
1758 goto out;
1759 }
1760
1761 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1762 if (istgt == 0) {
1763 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1764 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1765 }
1766 } else {
1767 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1768 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1769 }
1770 }
1771
1772 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1773 bus_dmasync_op_t op;
1774 if (istgt) {
1775 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1776 op = BUS_DMASYNC_PREREAD;
1777 } else {
1778 op = BUS_DMASYNC_PREWRITE;
1779 }
1780 } else {
1781 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1782 op = BUS_DMASYNC_PREWRITE;
1783 } else {
1784 op = BUS_DMASYNC_PREREAD;
1785 }
1786 }
1787 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1788 }
1789
1790 /*
1791 * Okay, fill in what we can at the end of the command frame.
1792 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1793 * the command frame.
1794 *
1795 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1796 * SIMPLE32 pointers and start doing CHAIN32 entries after
1797 * that.
1798 */
1799
1800 if (nseg < MPT_NSGL_FIRST(mpt)) {
1801 first_lim = nseg;
1802 } else {
1803 /*
1804 * Leave room for CHAIN element
1805 */
1806 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1807 }
1808
1809 se = (SGE_SIMPLE32 *) sglp;
1810 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1811 uint32_t tf;
1812
1813 memset(se, 0,sizeof (*se));
1814 se->Address = htole32(dm_segs->ds_addr);
1815
1816 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1817 tf = flags;
1818 if (seg == first_lim - 1) {
1819 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1820 }
1821 if (seg == nseg - 1) {
1822 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1823 MPI_SGE_FLAGS_END_OF_BUFFER;
1824 }
1825 MPI_pSGE_SET_FLAGS(se, tf);
1826 se->FlagsLength = htole32(se->FlagsLength);
1827 }
1828
1829 if (seg == nseg) {
1830 goto out;
1831 }
1832
1833 /*
1834 * Tell the IOC where to find the first chain element.
1835 */
1836 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1837 nxt_off = MPT_RQSL(mpt);
1838 trq = req;
1839
1840 /*
1841 * Make up the rest of the data segments out of a chain element
1842 * (contained in the current request frame) which points to
1843 * SIMPLE32 elements in the next request frame, possibly ending
1844 * with *another* chain element (if there's more).
1845 */
1846 while (seg < nseg) {
1847 int this_seg_lim;
1848 uint32_t tf, cur_off;
1849 bus_addr_t chain_list_addr;
1850
1851 /*
1852 * Point to the chain descriptor. Note that the chain
1853 * descriptor is at the end of the *previous* list (whether
1854 * chain or simple).
1855 */
1856 ce = (SGE_CHAIN32 *) se;
1857
1858 /*
1859 * Before we change our current pointer, make sure we won't
1860 * overflow the request area with this frame. Note that we
1861 * test against 'greater than' here as it's okay in this case
1862 * to have next offset be just outside the request area.
1863 */
1864 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1865 nxt_off = MPT_REQUEST_AREA;
1866 goto next_chain;
1867 }
1868
1869 /*
1870 * Set our SGE element pointer to the beginning of the chain
1871 * list and update our next chain list offset.
1872 */
1873 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1874 cur_off = nxt_off;
1875 nxt_off += MPT_RQSL(mpt);
1876
1877 /*
1878 * Now initialize the chain descriptor.
1879 */
1880 memset(ce, 0, sizeof (*ce));
1881
1882 /*
1883 * Get the physical address of the chain list.
1884 */
1885 chain_list_addr = trq->req_pbuf;
1886 chain_list_addr += cur_off;
1887
1888 ce->Address = htole32(chain_list_addr);
1889 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1890
1891 /*
1892 * If we have more than a frame's worth of segments left,
1893 * set up the chain list to have the last element be another
1894 * chain descriptor.
1895 */
1896 if ((nseg - seg) > MPT_NSGL(mpt)) {
1897 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1898 /*
1899 * The length of the chain is the length in bytes of the
1900 * number of segments plus the next chain element.
1901 *
1902 * The next chain descriptor offset is the length,
1903 * in words, of the number of segments.
1904 */
1905 ce->Length = (this_seg_lim - seg) *
1906 sizeof (SGE_SIMPLE32);
1907 ce->NextChainOffset = ce->Length >> 2;
1908 ce->Length += sizeof (SGE_CHAIN32);
1909 } else {
1910 this_seg_lim = nseg;
1911 ce->Length = (this_seg_lim - seg) *
1912 sizeof (SGE_SIMPLE32);
1913 }
1914 ce->Length = htole16(ce->Length);
1915
1916 /*
1917 * Fill in the chain list SGE elements with our segment data.
1918 *
1919 * If we're the last element in this chain list, set the last
1920 * element flag. If we're the completely last element period,
1921 * set the end of list and end of buffer flags.
1922 */
1923 while (seg < this_seg_lim) {
1924 memset(se, 0, sizeof (*se));
1925 se->Address = htole32(dm_segs->ds_addr);
1926
1927 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1928 tf = flags;
1929 if (seg == this_seg_lim - 1) {
1930 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1931 }
1932 if (seg == nseg - 1) {
1933 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1934 MPI_SGE_FLAGS_END_OF_BUFFER;
1935 }
1936 MPI_pSGE_SET_FLAGS(se, tf);
1937 se->FlagsLength = htole32(se->FlagsLength);
1938 se++;
1939 seg++;
1940 dm_segs++;
1941 }
1942
1943 next_chain:
1944 /*
1945 * If we have more segments to do and we've used up all of
1946 * the space in a request area, go allocate another one
1947 * and chain to that.
1948 */
1949 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1950 request_t *nrq;
1951
1952 nrq = mpt_get_request(mpt, FALSE);
1953
1954 if (nrq == NULL) {
1955 error = ENOMEM;
1956 goto bad;
1957 }
1958
1959 /*
1960 * Append the new request area on the tail of our list.
1961 */
1962 if ((trq = req->chain) == NULL) {
1963 req->chain = nrq;
1964 } else {
1965 while (trq->chain != NULL) {
1966 trq = trq->chain;
1967 }
1968 trq->chain = nrq;
1969 }
1970 trq = nrq;
1971 mpt_off = trq->req_vbuf;
1972 if (mpt->verbose >= MPT_PRT_DEBUG) {
1973 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1974 }
1975 nxt_off = 0;
1976 }
1977 }
1978 out:
1979
1980 /*
1981 * Last time we need to check if this CCB needs to be aborted.
1982 */
1983 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1984 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1985 request_t *cmd_req =
1986 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1987 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1988 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1989 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1990 }
1991 mpt_prt(mpt,
1992 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1993 ccb->ccb_h.status & CAM_STATUS_MASK);
1994 if (nseg) {
1995 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1996 }
1997 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1998 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1999 xpt_done(ccb);
2000 mpt_free_request(mpt, req);
2001 return;
2002 }
2003
2004 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2005 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2006 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2007 mpt_timeout, ccb);
2008 }
2009 if (mpt->verbose > MPT_PRT_DEBUG) {
2010 int nc = 0;
2011 mpt_print_request(req->req_vbuf);
2012 for (trq = req->chain; trq; trq = trq->chain) {
2013 printf(" Additional Chain Area %d\n", nc++);
2014 mpt_dump_sgl(trq->req_vbuf, 0);
2015 }
2016 }
2017
2018 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2019 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2020 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2021 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2022 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2023 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2024 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2025 } else {
2026 tgt->state = TGT_STATE_MOVING_DATA;
2027 }
2028 #else
2029 tgt->state = TGT_STATE_MOVING_DATA;
2030 #endif
2031 }
2032 mpt_send_cmd(mpt, req);
2033 }
2034
2035 static void
2036 mpt_start(struct cam_sim *sim, union ccb *ccb)
2037 {
2038 request_t *req;
2039 struct mpt_softc *mpt;
2040 MSG_SCSI_IO_REQUEST *mpt_req;
2041 struct ccb_scsiio *csio = &ccb->csio;
2042 struct ccb_hdr *ccbh = &ccb->ccb_h;
2043 bus_dmamap_callback_t *cb;
2044 target_id_t tgt;
2045 int raid_passthru;
2046 int error;
2047
2048 /* Get the pointer for the physical addapter */
2049 mpt = ccb->ccb_h.ccb_mpt_ptr;
2050 raid_passthru = (sim == mpt->phydisk_sim);
2051
2052 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2053 if (mpt->outofbeer == 0) {
2054 mpt->outofbeer = 1;
2055 xpt_freeze_simq(mpt->sim, 1);
2056 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2057 }
2058 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2059 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2060 xpt_done(ccb);
2061 return;
2062 }
2063 #ifdef INVARIANTS
2064 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2065 #endif
2066
2067 if (sizeof (bus_addr_t) > 4) {
2068 cb = mpt_execute_req_a64;
2069 } else {
2070 cb = mpt_execute_req;
2071 }
2072
2073 /*
2074 * Link the ccb and the request structure so we can find
2075 * the other knowing either the request or the ccb
2076 */
2077 req->ccb = ccb;
2078 ccb->ccb_h.ccb_req_ptr = req;
2079
2080 /* Now we build the command for the IOC */
2081 mpt_req = req->req_vbuf;
2082 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2083
2084 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2085 if (raid_passthru) {
2086 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2087 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2088 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2089 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2090 xpt_done(ccb);
2091 return;
2092 }
2093 mpt_req->Bus = 0; /* we never set bus here */
2094 } else {
2095 tgt = ccb->ccb_h.target_id;
2096 mpt_req->Bus = 0; /* XXX */
2097
2098 }
2099 mpt_req->SenseBufferLength =
2100 (csio->sense_len < MPT_SENSE_SIZE) ?
2101 csio->sense_len : MPT_SENSE_SIZE;
2102
2103 /*
2104 * We use the message context to find the request structure when we
2105 * Get the command completion interrupt from the IOC.
2106 */
2107 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2108
2109 /* Which physical device to do the I/O on */
2110 mpt_req->TargetID = tgt;
2111
2112 be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2113
2114 /* Set the direction of the transfer */
2115 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2116 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2117 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2118 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2119 } else {
2120 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2121 }
2122
2123 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2124 switch(ccb->csio.tag_action) {
2125 case MSG_HEAD_OF_Q_TAG:
2126 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2127 break;
2128 case MSG_ACA_TASK:
2129 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2130 break;
2131 case MSG_ORDERED_Q_TAG:
2132 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2133 break;
2134 case MSG_SIMPLE_Q_TAG:
2135 default:
2136 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2137 break;
2138 }
2139 } else {
2140 if (mpt->is_fc || mpt->is_sas) {
2141 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2142 } else {
2143 /* XXX No such thing for a target doing packetized. */
2144 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2145 }
2146 }
2147
2148 if (mpt->is_spi) {
2149 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2150 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2151 }
2152 }
2153 mpt_req->Control = htole32(mpt_req->Control);
2154
2155 /* Copy the scsi command block into place */
2156 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2157 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2158 } else {
2159 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2160 }
2161
2162 mpt_req->CDBLength = csio->cdb_len;
2163 mpt_req->DataLength = htole32(csio->dxfer_len);
2164 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2165
2166 /*
2167 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2168 */
2169 if (mpt->verbose == MPT_PRT_DEBUG) {
2170 U32 df;
2171 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2172 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2173 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2174 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2175 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2176 mpt_prtc(mpt, "(%s %u byte%s ",
2177 (df == MPI_SCSIIO_CONTROL_READ)?
2178 "read" : "write", csio->dxfer_len,
2179 (csio->dxfer_len == 1)? ")" : "s)");
2180 }
2181 mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2182 (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2183 }
2184
2185 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2186 req, 0);
2187 if (error == EINPROGRESS) {
2188 /*
2189 * So as to maintain ordering, freeze the controller queue
2190 * until our mapping is returned.
2191 */
2192 xpt_freeze_simq(mpt->sim, 1);
2193 ccbh->status |= CAM_RELEASE_SIMQ;
2194 }
2195 }
2196
2197 static int
2198 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2199 int sleep_ok)
2200 {
2201 int error;
2202 uint16_t status;
2203 uint8_t response;
2204
2205 error = mpt_scsi_send_tmf(mpt,
2206 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2207 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2208 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2209 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2210 0, /* XXX How do I get the channel ID? */
2211 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2212 lun != CAM_LUN_WILDCARD ? lun : 0,
2213 0, sleep_ok);
2214
2215 if (error != 0) {
2216 /*
2217 * mpt_scsi_send_tmf hard resets on failure, so no
2218 * need to do so here.
2219 */
2220 mpt_prt(mpt,
2221 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2222 return (EIO);
2223 }
2224
2225 /* Wait for bus reset to be processed by the IOC. */
2226 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2227 REQ_STATE_DONE, sleep_ok, 5000);
2228
2229 status = le16toh(mpt->tmf_req->IOCStatus);
2230 response = mpt->tmf_req->ResponseCode;
2231 mpt->tmf_req->state = REQ_STATE_FREE;
2232
2233 if (error) {
2234 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2235 "Resetting controller.\n");
2236 mpt_reset(mpt, TRUE);
2237 return (ETIMEDOUT);
2238 }
2239
2240 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2241 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2242 "Resetting controller.\n", status);
2243 mpt_reset(mpt, TRUE);
2244 return (EIO);
2245 }
2246
2247 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2248 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2249 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2250 "Resetting controller.\n", response);
2251 mpt_reset(mpt, TRUE);
2252 return (EIO);
2253 }
2254 return (0);
2255 }
2256
2257 static int
2258 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2259 {
2260 int r = 0;
2261 request_t *req;
2262 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2263
2264 req = mpt_get_request(mpt, FALSE);
2265 if (req == NULL) {
2266 return (ENOMEM);
2267 }
2268 fc = req->req_vbuf;
2269 memset(fc, 0, sizeof(*fc));
2270 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2271 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2272 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2273 mpt_send_cmd(mpt, req);
2274 if (dowait) {
2275 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2276 REQ_STATE_DONE, FALSE, 60 * 1000);
2277 if (r == 0) {
2278 mpt_free_request(mpt, req);
2279 }
2280 }
2281 return (r);
2282 }
2283
2284 static int
2285 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2286 MSG_EVENT_NOTIFY_REPLY *msg)
2287 {
2288 uint32_t data0, data1;
2289
2290 data0 = le32toh(msg->Data[0]);
2291 data1 = le32toh(msg->Data[1]);
2292 switch(msg->Event & 0xFF) {
2293 case MPI_EVENT_UNIT_ATTENTION:
2294 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2295 (data0 >> 8) & 0xff, data0 & 0xff);
2296 break;
2297
2298 case MPI_EVENT_IOC_BUS_RESET:
2299 /* We generated a bus reset */
2300 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2301 (data0 >> 8) & 0xff);
2302 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2303 break;
2304
2305 case MPI_EVENT_EXT_BUS_RESET:
2306 /* Someone else generated a bus reset */
2307 mpt_prt(mpt, "External Bus Reset Detected\n");
2308 /*
2309 * These replies don't return EventData like the MPI
2310 * spec says they do
2311 */
2312 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2313 break;
2314
2315 case MPI_EVENT_RESCAN:
2316 {
2317 union ccb *ccb;
2318 uint32_t pathid;
2319 /*
2320 * In general this means a device has been added to the loop.
2321 */
2322 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2323 if (mpt->ready == 0) {
2324 break;
2325 }
2326 if (mpt->phydisk_sim) {
2327 pathid = cam_sim_path(mpt->phydisk_sim);
2328 } else {
2329 pathid = cam_sim_path(mpt->sim);
2330 }
2331 /*
2332 * Allocate a CCB, create a wildcard path for this bus,
2333 * and schedule a rescan.
2334 */
2335 ccb = xpt_alloc_ccb_nowait();
2336 if (ccb == NULL) {
2337 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2338 break;
2339 }
2340
2341 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2342 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2343 mpt_prt(mpt, "unable to create path for rescan\n");
2344 xpt_free_ccb(ccb);
2345 break;
2346 }
2347 xpt_rescan(ccb);
2348 break;
2349 }
2350
2351 case MPI_EVENT_LINK_STATUS_CHANGE:
2352 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2353 (data1 >> 8) & 0xff,
2354 ((data0 & 0xff) == 0)? "Failed" : "Active");
2355 break;
2356
2357 case MPI_EVENT_LOOP_STATE_CHANGE:
2358 switch ((data0 >> 16) & 0xff) {
2359 case 0x01:
2360 mpt_prt(mpt,
2361 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2362 "(Loop Initialization)\n",
2363 (data1 >> 8) & 0xff,
2364 (data0 >> 8) & 0xff,
2365 (data0 ) & 0xff);
2366 switch ((data0 >> 8) & 0xff) {
2367 case 0xF7:
2368 if ((data0 & 0xff) == 0xF7) {
2369 mpt_prt(mpt, "Device needs AL_PA\n");
2370 } else {
2371 mpt_prt(mpt, "Device %02x doesn't like "
2372 "FC performance\n",
2373 data0 & 0xFF);
2374 }
2375 break;
2376 case 0xF8:
2377 if ((data0 & 0xff) == 0xF7) {
2378 mpt_prt(mpt, "Device had loop failure "
2379 "at its receiver prior to acquiring"
2380 " AL_PA\n");
2381 } else {
2382 mpt_prt(mpt, "Device %02x detected loop"
2383 " failure at its receiver\n",
2384 data0 & 0xFF);
2385 }
2386 break;
2387 default:
2388 mpt_prt(mpt, "Device %02x requests that device "
2389 "%02x reset itself\n",
2390 data0 & 0xFF,
2391 (data0 >> 8) & 0xFF);
2392 break;
2393 }
2394 break;
2395 case 0x02:
2396 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2397 "LPE(%02x,%02x) (Loop Port Enable)\n",
2398 (data1 >> 8) & 0xff, /* Port */
2399 (data0 >> 8) & 0xff, /* Character 3 */
2400 (data0 ) & 0xff /* Character 4 */);
2401 break;
2402 case 0x03:
2403 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2404 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2405 (data1 >> 8) & 0xff, /* Port */
2406 (data0 >> 8) & 0xff, /* Character 3 */
2407 (data0 ) & 0xff /* Character 4 */);
2408 break;
2409 default:
2410 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2411 "FC event (%02x %02x %02x)\n",
2412 (data1 >> 8) & 0xff, /* Port */
2413 (data0 >> 16) & 0xff, /* Event */
2414 (data0 >> 8) & 0xff, /* Character 3 */
2415 (data0 ) & 0xff /* Character 4 */);
2416 }
2417 break;
2418
2419 case MPI_EVENT_LOGOUT:
2420 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2421 (data1 >> 8) & 0xff, data0);
2422 break;
2423 case MPI_EVENT_QUEUE_FULL:
2424 {
2425 struct cam_sim *sim;
2426 struct cam_path *tmppath;
2427 struct ccb_relsim crs;
2428 PTR_EVENT_DATA_QUEUE_FULL pqf;
2429 lun_id_t lun_id;
2430
2431 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2432 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2433 if (bootverbose) {
2434 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2435 "Depth %d\n",
2436 pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2437 }
2438 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2439 pqf->TargetID) != 0) {
2440 sim = mpt->phydisk_sim;
2441 } else {
2442 sim = mpt->sim;
2443 }
2444 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2445 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2446 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2447 mpt_prt(mpt, "unable to create a path to send "
2448 "XPT_REL_SIMQ");
2449 break;
2450 }
2451 memset(&crs, 0, sizeof(crs));
2452 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2453 crs.ccb_h.func_code = XPT_REL_SIMQ;
2454 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2455 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2456 crs.openings = pqf->CurrentDepth - 1;
2457 xpt_action((union ccb *)&crs);
2458 if (crs.ccb_h.status != CAM_REQ_CMP) {
2459 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2460 }
2461 xpt_free_path(tmppath);
2462 }
2463 break;
2464 }
2465 case MPI_EVENT_IR_RESYNC_UPDATE:
2466 mpt_prt(mpt, "IR resync update %d completed\n",
2467 (data0 >> 16) & 0xff);
2468 break;
2469 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2470 {
2471 union ccb *ccb;
2472 struct cam_sim *sim;
2473 struct cam_path *tmppath;
2474 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2475
2476 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2477 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2478 psdsc->TargetID) != 0)
2479 sim = mpt->phydisk_sim;
2480 else
2481 sim = mpt->sim;
2482 switch(psdsc->ReasonCode) {
2483 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2484 ccb = xpt_alloc_ccb_nowait();
2485 if (ccb == NULL) {
2486 mpt_prt(mpt,
2487 "unable to alloc CCB for rescan\n");
2488 break;
2489 }
2490 if (xpt_create_path(&ccb->ccb_h.path, NULL,
2491 cam_sim_path(sim), psdsc->TargetID,
2492 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2493 mpt_prt(mpt,
2494 "unable to create path for rescan\n");
2495 xpt_free_ccb(ccb);
2496 break;
2497 }
2498 xpt_rescan(ccb);
2499 break;
2500 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2501 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2502 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2503 CAM_REQ_CMP) {
2504 mpt_prt(mpt,
2505 "unable to create path for async event");
2506 break;
2507 }
2508 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2509 xpt_free_path(tmppath);
2510 break;
2511 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2512 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2513 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2514 break;
2515 default:
2516 mpt_lprt(mpt, MPT_PRT_WARN,
2517 "SAS device status change: Bus: 0x%02x TargetID: "
2518 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2519 psdsc->TargetID, psdsc->ReasonCode);
2520 break;
2521 }
2522 break;
2523 }
2524 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2525 {
2526 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2527
2528 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2529 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2530 mpt_lprt(mpt, MPT_PRT_WARN,
2531 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2532 pde->Port, pde->DiscoveryStatus);
2533 break;
2534 }
2535 case MPI_EVENT_EVENT_CHANGE:
2536 case MPI_EVENT_INTEGRATED_RAID:
2537 case MPI_EVENT_IR2:
2538 case MPI_EVENT_LOG_ENTRY_ADDED:
2539 case MPI_EVENT_SAS_DISCOVERY:
2540 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2541 case MPI_EVENT_SAS_SES:
2542 break;
2543 default:
2544 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2545 msg->Event & 0xFF);
2546 return (0);
2547 }
2548 return (1);
2549 }
2550
2551 /*
2552 * Reply path for all SCSI I/O requests, called from our
2553 * interrupt handler by extracting our handler index from
2554 * the MsgContext field of the reply from the IOC.
2555 *
2556 * This routine is optimized for the common case of a
2557 * completion without error. All exception handling is
2558 * offloaded to non-inlined helper routines to minimize
2559 * cache footprint.
2560 */
2561 static int
2562 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2563 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2564 {
2565 MSG_SCSI_IO_REQUEST *scsi_req;
2566 union ccb *ccb;
2567
2568 if (req->state == REQ_STATE_FREE) {
2569 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2570 return (TRUE);
2571 }
2572
2573 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2574 ccb = req->ccb;
2575 if (ccb == NULL) {
2576 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2577 req, req->serno);
2578 return (TRUE);
2579 }
2580
2581 mpt_req_untimeout(req, mpt_timeout, ccb);
2582 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2583
2584 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2585 bus_dmasync_op_t op;
2586
2587 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2588 op = BUS_DMASYNC_POSTREAD;
2589 else
2590 op = BUS_DMASYNC_POSTWRITE;
2591 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2592 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2593 }
2594
2595 if (reply_frame == NULL) {
2596 /*
2597 * Context only reply, completion without error status.
2598 */
2599 ccb->csio.resid = 0;
2600 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2601 ccb->csio.scsi_status = SCSI_STATUS_OK;
2602 } else {
2603 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2604 }
2605
2606 if (mpt->outofbeer) {
2607 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2608 mpt->outofbeer = 0;
2609 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2610 }
2611 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2612 struct scsi_inquiry_data *iq =
2613 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2614 if (scsi_req->Function ==
2615 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2616 /*
2617 * Fake out the device type so that only the
2618 * pass-thru device will attach.
2619 */
2620 iq->device &= ~0x1F;
2621 iq->device |= T_NODEVICE;
2622 }
2623 }
2624 if (mpt->verbose == MPT_PRT_DEBUG) {
2625 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2626 req, req->serno);
2627 }
2628 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2629 xpt_done(ccb);
2630 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2631 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2632 } else {
2633 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2634 req, req->serno);
2635 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2636 }
2637 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2638 ("CCB req needed wakeup"));
2639 #ifdef INVARIANTS
2640 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2641 #endif
2642 mpt_free_request(mpt, req);
2643 return (TRUE);
2644 }
2645
2646 static int
2647 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2648 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2649 {
2650 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2651
2652 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2653 #ifdef INVARIANTS
2654 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2655 #endif
2656 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2657 /* Record IOC Status and Response Code of TMF for any waiters. */
2658 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2659 req->ResponseCode = tmf_reply->ResponseCode;
2660
2661 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2662 req, req->serno, le16toh(tmf_reply->IOCStatus));
2663 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2664 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2665 req->state |= REQ_STATE_DONE;
2666 wakeup(req);
2667 } else {
2668 mpt->tmf_req->state = REQ_STATE_FREE;
2669 }
2670 return (TRUE);
2671 }
2672
2673 /*
2674 * XXX: Move to definitions file
2675 */
2676 #define ELS 0x22
2677 #define FC4LS 0x32
2678 #define ABTS 0x81
2679 #define BA_ACC 0x84
2680
2681 #define LS_RJT 0x01
2682 #define LS_ACC 0x02
2683 #define PLOGI 0x03
2684 #define LOGO 0x05
2685 #define SRR 0x14
2686 #define PRLI 0x20
2687 #define PRLO 0x21
2688 #define ADISC 0x52
2689 #define RSCN 0x61
2690
2691 static void
2692 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2693 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2694 {
2695 uint32_t fl;
2696 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2697 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2698
2699 /*
2700 * We are going to reuse the ELS request to send this response back.
2701 */
2702 rsp = &tmp;
2703 memset(rsp, 0, sizeof(*rsp));
2704
2705 #ifdef USE_IMMEDIATE_LINK_DATA
2706 /*
2707 * Apparently the IMMEDIATE stuff doesn't seem to work.
2708 */
2709 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2710 #endif
2711 rsp->RspLength = length;
2712 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2713 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2714
2715 /*
2716 * Copy over information from the original reply frame to
2717 * it's correct place in the response.
2718 */
2719 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2720
2721 /*
2722 * And now copy back the temporary area to the original frame.
2723 */
2724 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2725 rsp = req->req_vbuf;
2726
2727 #ifdef USE_IMMEDIATE_LINK_DATA
2728 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2729 #else
2730 {
2731 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2732 bus_addr_t paddr = req->req_pbuf;
2733 paddr += MPT_RQSL(mpt);
2734
2735 fl =
2736 MPI_SGE_FLAGS_HOST_TO_IOC |
2737 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2738 MPI_SGE_FLAGS_LAST_ELEMENT |
2739 MPI_SGE_FLAGS_END_OF_LIST |
2740 MPI_SGE_FLAGS_END_OF_BUFFER;
2741 fl <<= MPI_SGE_FLAGS_SHIFT;
2742 fl |= (length);
2743 se->FlagsLength = htole32(fl);
2744 se->Address = htole32((uint32_t) paddr);
2745 }
2746 #endif
2747
2748 /*
2749 * Send it on...
2750 */
2751 mpt_send_cmd(mpt, req);
2752 }
2753
2754 static int
2755 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2756 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2757 {
2758 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2759 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2760 U8 rctl;
2761 U8 type;
2762 U8 cmd;
2763 U16 status = le16toh(reply_frame->IOCStatus);
2764 U32 *elsbuf;
2765 int ioindex;
2766 int do_refresh = TRUE;
2767
2768 #ifdef INVARIANTS
2769 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2770 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2771 req, req->serno, rp->Function));
2772 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2773 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2774 } else {
2775 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2776 }
2777 #endif
2778 mpt_lprt(mpt, MPT_PRT_DEBUG,
2779 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2780 req, req->serno, reply_frame, reply_frame->Function);
2781
2782 if (status != MPI_IOCSTATUS_SUCCESS) {
2783 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2784 status, reply_frame->Function);
2785 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2786 /*
2787 * XXX: to get around shutdown issue
2788 */
2789 mpt->disabled = 1;
2790 return (TRUE);
2791 }
2792 return (TRUE);
2793 }
2794
2795 /*
2796 * If the function of a link service response, we recycle the
2797 * response to be a refresh for a new link service request.
2798 *
2799 * The request pointer is bogus in this case and we have to fetch
2800 * it based upon the TransactionContext.
2801 */
2802 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2803 /* Freddie Uncle Charlie Katie */
2804 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2805 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2806 if (mpt->els_cmd_ptrs[ioindex] == req) {
2807 break;
2808 }
2809
2810 KASSERT(ioindex < mpt->els_cmds_allocated,
2811 ("can't find my mommie!"));
2812
2813 /* remove from active list as we're going to re-post it */
2814 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2815 req->state &= ~REQ_STATE_QUEUED;
2816 req->state |= REQ_STATE_DONE;
2817 mpt_fc_post_els(mpt, req, ioindex);
2818 return (TRUE);
2819 }
2820
2821 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2822 /* remove from active list as we're done */
2823 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2824 req->state &= ~REQ_STATE_QUEUED;
2825 req->state |= REQ_STATE_DONE;
2826 if (req->state & REQ_STATE_TIMEDOUT) {
2827 mpt_lprt(mpt, MPT_PRT_DEBUG,
2828 "Sync Primitive Send Completed After Timeout\n");
2829 mpt_free_request(mpt, req);
2830 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2831 mpt_lprt(mpt, MPT_PRT_DEBUG,
2832 "Async Primitive Send Complete\n");
2833 mpt_free_request(mpt, req);
2834 } else {
2835 mpt_lprt(mpt, MPT_PRT_DEBUG,
2836 "Sync Primitive Send Complete- Waking Waiter\n");
2837 wakeup(req);
2838 }
2839 return (TRUE);
2840 }
2841
2842 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2843 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2844 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2845 rp->MsgLength, rp->MsgFlags);
2846 return (TRUE);
2847 }
2848
2849 if (rp->MsgLength <= 5) {
2850 /*
2851 * This is just a ack of an original ELS buffer post
2852 */
2853 mpt_lprt(mpt, MPT_PRT_DEBUG,
2854 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2855 return (TRUE);
2856 }
2857
2858 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2859 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2860
2861 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2862 cmd = be32toh(elsbuf[0]) >> 24;
2863
2864 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2865 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2866 return (TRUE);
2867 }
2868
2869 ioindex = le32toh(rp->TransactionContext);
2870 req = mpt->els_cmd_ptrs[ioindex];
2871
2872 if (rctl == ELS && type == 1) {
2873 switch (cmd) {
2874 case PRLI:
2875 /*
2876 * Send back a PRLI ACC
2877 */
2878 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2879 le32toh(rp->Wwn.PortNameHigh),
2880 le32toh(rp->Wwn.PortNameLow));
2881 elsbuf[0] = htobe32(0x02100014);
2882 elsbuf[1] |= htobe32(0x00000100);
2883 elsbuf[4] = htobe32(0x00000002);
2884 if (mpt->role & MPT_ROLE_TARGET)
2885 elsbuf[4] |= htobe32(0x00000010);
2886 if (mpt->role & MPT_ROLE_INITIATOR)
2887 elsbuf[4] |= htobe32(0x00000020);
2888 /* remove from active list as we're done */
2889 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2890 req->state &= ~REQ_STATE_QUEUED;
2891 req->state |= REQ_STATE_DONE;
2892 mpt_fc_els_send_response(mpt, req, rp, 20);
2893 do_refresh = FALSE;
2894 break;
2895 case PRLO:
2896 memset(elsbuf, 0, 5 * (sizeof (U32)));
2897 elsbuf[0] = htobe32(0x02100014);
2898 elsbuf[1] = htobe32(0x08000100);
2899 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2900 le32toh(rp->Wwn.PortNameHigh),
2901 le32toh(rp->Wwn.PortNameLow));
2902 /* remove from active list as we're done */
2903 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2904 req->state &= ~REQ_STATE_QUEUED;
2905 req->state |= REQ_STATE_DONE;
2906 mpt_fc_els_send_response(mpt, req, rp, 20);
2907 do_refresh = FALSE;
2908 break;
2909 default:
2910 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2911 break;
2912 }
2913 } else if (rctl == ABTS && type == 0) {
2914 uint16_t rx_id = le16toh(rp->Rxid);
2915 uint16_t ox_id = le16toh(rp->Oxid);
2916 mpt_tgt_state_t *tgt;
2917 request_t *tgt_req = NULL;
2918 union ccb *ccb;
2919 uint32_t ct_id;
2920
2921 mpt_prt(mpt,
2922 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2923 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2924 le32toh(rp->Wwn.PortNameLow));
2925 if (rx_id >= mpt->mpt_max_tgtcmds) {
2926 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2927 } else if (mpt->tgt_cmd_ptrs == NULL) {
2928 mpt_prt(mpt, "No TGT CMD PTRS\n");
2929 } else {
2930 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2931 }
2932 if (tgt_req == NULL) {
2933 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2934 goto skip;
2935 }
2936 tgt = MPT_TGT_STATE(mpt, tgt_req);
2937
2938 /* Check to make sure we have the correct command. */
2939 ct_id = GET_IO_INDEX(tgt->reply_desc);
2940 if (ct_id != rx_id) {
2941 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2942 "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2943 goto skip;
2944 }
2945 if (tgt->itag != ox_id) {
2946 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2947 "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2948 goto skip;
2949 }
2950
2951 if ((ccb = tgt->ccb) != NULL) {
2952 mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2953 ccb, (uintmax_t)ccb->ccb_h.target_lun,
2954 ccb->ccb_h.flags, ccb->ccb_h.status);
2955 }
2956 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2957 "%x nxfers %x\n", tgt->state, tgt->resid,
2958 tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2959 if (mpt_abort_target_cmd(mpt, tgt_req))
2960 mpt_prt(mpt, "unable to start TargetAbort\n");
2961
2962 skip:
2963 memset(elsbuf, 0, 5 * (sizeof (U32)));
2964 elsbuf[0] = htobe32(0);
2965 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2966 elsbuf[2] = htobe32(0x000ffff);
2967 /*
2968 * Dork with the reply frame so that the response to it
2969 * will be correct.
2970 */
2971 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2972 /* remove from active list as we're done */
2973 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2974 req->state &= ~REQ_STATE_QUEUED;
2975 req->state |= REQ_STATE_DONE;
2976 mpt_fc_els_send_response(mpt, req, rp, 12);
2977 do_refresh = FALSE;
2978 } else {
2979 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2980 }
2981 if (do_refresh == TRUE) {
2982 /* remove from active list as we're done */
2983 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2984 req->state &= ~REQ_STATE_QUEUED;
2985 req->state |= REQ_STATE_DONE;
2986 mpt_fc_post_els(mpt, req, ioindex);
2987 }
2988 return (TRUE);
2989 }
2990
2991 /*
2992 * Clean up all SCSI Initiator personality state in response
2993 * to a controller reset.
2994 */
2995 static void
2996 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2997 {
2998
2999 /*
3000 * The pending list is already run down by
3001 * the generic handler. Perform the same
3002 * operation on the timed out request list.
3003 */
3004 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3005 MPI_IOCSTATUS_INVALID_STATE);
3006
3007 /*
3008 * XXX: We need to repost ELS and Target Command Buffers?
3009 */
3010
3011 /*
3012 * Inform the XPT that a bus reset has occurred.
3013 */
3014 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3015 }
3016
3017 /*
3018 * Parse additional completion information in the reply
3019 * frame for SCSI I/O requests.
3020 */
3021 static int
3022 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3023 MSG_DEFAULT_REPLY *reply_frame)
3024 {
3025 union ccb *ccb;
3026 MSG_SCSI_IO_REPLY *scsi_io_reply;
3027 u_int ioc_status;
3028 u_int sstate;
3029
3030 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3031 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3032 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3033 ("MPT SCSI I/O Handler called with incorrect reply type"));
3034 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3035 ("MPT SCSI I/O Handler called with continuation reply"));
3036
3037 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3038 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3039 ioc_status &= MPI_IOCSTATUS_MASK;
3040 sstate = scsi_io_reply->SCSIState;
3041
3042 ccb = req->ccb;
3043 ccb->csio.resid =
3044 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3045
3046 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3047 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3048 uint32_t sense_returned;
3049
3050 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3051
3052 sense_returned = le32toh(scsi_io_reply->SenseCount);
3053 if (sense_returned < ccb->csio.sense_len)
3054 ccb->csio.sense_resid = ccb->csio.sense_len -
3055 sense_returned;
3056 else
3057 ccb->csio.sense_resid = 0;
3058
3059 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3060 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3061 min(ccb->csio.sense_len, sense_returned));
3062 }
3063
3064 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3065 /*
3066 * Tag messages rejected, but non-tagged retry
3067 * was successful.
3068 XXXX
3069 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3070 */
3071 }
3072
3073 switch(ioc_status) {
3074 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3075 /*
3076 * XXX
3077 * Linux driver indicates that a zero
3078 * transfer length with this error code
3079 * indicates a CRC error.
3080 *
3081 * No need to swap the bytes for checking
3082 * against zero.
3083 */
3084 if (scsi_io_reply->TransferCount == 0) {
3085 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3086 break;
3087 }
3088 /* FALLTHROUGH */
3089 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3090 case MPI_IOCSTATUS_SUCCESS:
3091 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3092 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3093 /*
3094 * Status was never returned for this transaction.
3095 */
3096 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3097 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3098 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3099 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3100 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3101 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3102 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3103 /* XXX Handle SPI-Packet and FCP-2 response info. */
3104 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3105 } else
3106 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3107 break;
3108 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3109 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3110 break;
3111 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3112 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3113 break;
3114 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3115 /*
3116 * Since selection timeouts and "device really not
3117 * there" are grouped into this error code, report
3118 * selection timeout. Selection timeouts are
3119 * typically retried before giving up on the device
3120 * whereas "device not there" errors are considered
3121 * unretryable.
3122 */
3123 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3124 break;
3125 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3126 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3127 break;
3128 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3129 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3130 break;
3131 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3132 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3133 break;
3134 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3135 ccb->ccb_h.status = CAM_UA_TERMIO;
3136 break;
3137 case MPI_IOCSTATUS_INVALID_STATE:
3138 /*
3139 * The IOC has been reset. Emulate a bus reset.
3140 */
3141 /* FALLTHROUGH */
3142 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3143 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3144 break;
3145 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3146 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3147 /*
3148 * Don't clobber any timeout status that has
3149 * already been set for this transaction. We
3150 * want the SCSI layer to be able to differentiate
3151 * between the command we aborted due to timeout
3152 * and any innocent bystanders.
3153 */
3154 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3155 break;
3156 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3157 break;
3158
3159 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3160 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3161 break;
3162 case MPI_IOCSTATUS_BUSY:
3163 mpt_set_ccb_status(ccb, CAM_BUSY);
3164 break;
3165 case MPI_IOCSTATUS_INVALID_FUNCTION:
3166 case MPI_IOCSTATUS_INVALID_SGL:
3167 case MPI_IOCSTATUS_INTERNAL_ERROR:
3168 case MPI_IOCSTATUS_INVALID_FIELD:
3169 default:
3170 /* XXX
3171 * Some of the above may need to kick
3172 * of a recovery action!!!!
3173 */
3174 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3175 break;
3176 }
3177
3178 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3179 mpt_freeze_ccb(ccb);
3180 }
3181
3182 return (TRUE);
3183 }
3184
3185 static void
3186 mpt_action(struct cam_sim *sim, union ccb *ccb)
3187 {
3188 struct mpt_softc *mpt;
3189 struct ccb_trans_settings *cts;
3190 target_id_t tgt;
3191 lun_id_t lun;
3192 int raid_passthru;
3193
3194 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3195
3196 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3197 raid_passthru = (sim == mpt->phydisk_sim);
3198 MPT_LOCK_ASSERT(mpt);
3199
3200 tgt = ccb->ccb_h.target_id;
3201 lun = ccb->ccb_h.target_lun;
3202 if (raid_passthru &&
3203 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3204 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3205 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3206 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3207 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3208 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3209 xpt_done(ccb);
3210 return;
3211 }
3212 }
3213 ccb->ccb_h.ccb_mpt_ptr = mpt;
3214
3215 switch (ccb->ccb_h.func_code) {
3216 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3217 /*
3218 * Do a couple of preliminary checks...
3219 */
3220 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3221 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3222 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3223 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3224 break;
3225 }
3226 }
3227 /* Max supported CDB length is 16 bytes */
3228 /* XXX Unless we implement the new 32byte message type */
3229 if (ccb->csio.cdb_len >
3230 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3231 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3232 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3233 break;
3234 }
3235 #ifdef MPT_TEST_MULTIPATH
3236 if (mpt->failure_id == ccb->ccb_h.target_id) {
3237 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3238 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3239 break;
3240 }
3241 #endif
3242 ccb->csio.scsi_status = SCSI_STATUS_OK;
3243 mpt_start(sim, ccb);
3244 return;
3245
3246 case XPT_RESET_BUS:
3247 if (raid_passthru) {
3248 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3249 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3250 break;
3251 }
3252 case XPT_RESET_DEV:
3253 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3254 if (bootverbose) {
3255 xpt_print(ccb->ccb_h.path, "reset bus\n");
3256 }
3257 } else {
3258 xpt_print(ccb->ccb_h.path, "reset device\n");
3259 }
3260 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3261
3262 /*
3263 * mpt_bus_reset is always successful in that it
3264 * will fall back to a hard reset should a bus
3265 * reset attempt fail.
3266 */
3267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3268 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3269 break;
3270
3271 case XPT_ABORT:
3272 {
3273 union ccb *accb = ccb->cab.abort_ccb;
3274 switch (accb->ccb_h.func_code) {
3275 case XPT_ACCEPT_TARGET_IO:
3276 case XPT_IMMEDIATE_NOTIFY:
3277 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3278 break;
3279 case XPT_CONT_TARGET_IO:
3280 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3281 ccb->ccb_h.status = CAM_UA_ABORT;
3282 break;
3283 case XPT_SCSI_IO:
3284 ccb->ccb_h.status = CAM_UA_ABORT;
3285 break;
3286 default:
3287 ccb->ccb_h.status = CAM_REQ_INVALID;
3288 break;
3289 }
3290 break;
3291 }
3292
3293 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3294
3295 #define DP_DISC_ENABLE 0x1
3296 #define DP_DISC_DISABL 0x2
3297 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3298
3299 #define DP_TQING_ENABLE 0x4
3300 #define DP_TQING_DISABL 0x8
3301 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3302
3303 #define DP_WIDE 0x10
3304 #define DP_NARROW 0x20
3305 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3306
3307 #define DP_SYNC 0x40
3308
3309 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3310 {
3311 struct ccb_trans_settings_scsi *scsi;
3312 struct ccb_trans_settings_spi *spi;
3313 uint8_t dval;
3314 u_int period;
3315 u_int offset;
3316 int i, j;
3317
3318 cts = &ccb->cts;
3319
3320 if (mpt->is_fc || mpt->is_sas) {
3321 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3322 break;
3323 }
3324
3325 scsi = &cts->proto_specific.scsi;
3326 spi = &cts->xport_specific.spi;
3327
3328 /*
3329 * We can be called just to valid transport and proto versions
3330 */
3331 if (scsi->valid == 0 && spi->valid == 0) {
3332 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3333 break;
3334 }
3335
3336 /*
3337 * Skip attempting settings on RAID volume disks.
3338 * Other devices on the bus get the normal treatment.
3339 */
3340 if (mpt->phydisk_sim && raid_passthru == 0 &&
3341 mpt_is_raid_volume(mpt, tgt) != 0) {
3342 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3343 "no transfer settings for RAID vols\n");
3344 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3345 break;
3346 }
3347
3348 i = mpt->mpt_port_page2.PortSettings &
3349 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3350 j = mpt->mpt_port_page2.PortFlags &
3351 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3352 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3353 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3354 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3355 "honoring BIOS transfer negotiations\n");
3356 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3357 break;
3358 }
3359
3360 dval = 0;
3361 period = 0;
3362 offset = 0;
3363
3364 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3365 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3366 DP_DISC_ENABLE : DP_DISC_DISABL;
3367 }
3368
3369 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3370 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3371 DP_TQING_ENABLE : DP_TQING_DISABL;
3372 }
3373
3374 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3375 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3376 DP_WIDE : DP_NARROW;
3377 }
3378
3379 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3380 dval |= DP_SYNC;
3381 offset = spi->sync_offset;
3382 } else {
3383 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3384 &mpt->mpt_dev_page1[tgt];
3385 offset = ptr->RequestedParameters;
3386 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3387 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3388 }
3389 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3390 dval |= DP_SYNC;
3391 period = spi->sync_period;
3392 } else {
3393 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3394 &mpt->mpt_dev_page1[tgt];
3395 period = ptr->RequestedParameters;
3396 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3397 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3398 }
3399
3400 if (dval & DP_DISC_ENABLE) {
3401 mpt->mpt_disc_enable |= (1 << tgt);
3402 } else if (dval & DP_DISC_DISABL) {
3403 mpt->mpt_disc_enable &= ~(1 << tgt);
3404 }
3405 if (dval & DP_TQING_ENABLE) {
3406 mpt->mpt_tag_enable |= (1 << tgt);
3407 } else if (dval & DP_TQING_DISABL) {
3408 mpt->mpt_tag_enable &= ~(1 << tgt);
3409 }
3410 if (dval & DP_WIDTH) {
3411 mpt_setwidth(mpt, tgt, 1);
3412 }
3413 if (dval & DP_SYNC) {
3414 mpt_setsync(mpt, tgt, period, offset);
3415 }
3416 if (dval == 0) {
3417 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3418 break;
3419 }
3420 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3421 "set [%d]: 0x%x period 0x%x offset %d\n",
3422 tgt, dval, period, offset);
3423 if (mpt_update_spi_config(mpt, tgt)) {
3424 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3425 } else {
3426 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3427 }
3428 break;
3429 }
3430 case XPT_GET_TRAN_SETTINGS:
3431 {
3432 struct ccb_trans_settings_scsi *scsi;
3433 cts = &ccb->cts;
3434 cts->protocol = PROTO_SCSI;
3435 if (mpt->is_fc) {
3436 struct ccb_trans_settings_fc *fc =
3437 &cts->xport_specific.fc;
3438 cts->protocol_version = SCSI_REV_SPC;
3439 cts->transport = XPORT_FC;
3440 cts->transport_version = 0;
3441 if (mpt->mpt_fcport_speed != 0) {
3442 fc->valid = CTS_FC_VALID_SPEED;
3443 fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3444 }
3445 } else if (mpt->is_sas) {
3446 struct ccb_trans_settings_sas *sas =
3447 &cts->xport_specific.sas;
3448 cts->protocol_version = SCSI_REV_SPC2;
3449 cts->transport = XPORT_SAS;
3450 cts->transport_version = 0;
3451 sas->valid = CTS_SAS_VALID_SPEED;
3452 sas->bitrate = 300000;
3453 } else {
3454 cts->protocol_version = SCSI_REV_2;
3455 cts->transport = XPORT_SPI;
3456 cts->transport_version = 2;
3457 if (mpt_get_spi_settings(mpt, cts) != 0) {
3458 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3459 break;
3460 }
3461 }
3462 scsi = &cts->proto_specific.scsi;
3463 scsi->valid = CTS_SCSI_VALID_TQ;
3464 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3465 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3466 break;
3467 }
3468 case XPT_CALC_GEOMETRY:
3469 {
3470 struct ccb_calc_geometry *ccg;
3471
3472 ccg = &ccb->ccg;
3473 if (ccg->block_size == 0) {
3474 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3475 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3476 break;
3477 }
3478 cam_calc_geometry(ccg, /* extended */ 1);
3479 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3480 break;
3481 }
3482 case XPT_GET_SIM_KNOB:
3483 {
3484 struct ccb_sim_knob *kp = &ccb->knob;
3485
3486 if (mpt->is_fc) {
3487 kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3488 kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3489 switch (mpt->role) {
3490 case MPT_ROLE_NONE:
3491 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3492 break;
3493 case MPT_ROLE_INITIATOR:
3494 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3495 break;
3496 case MPT_ROLE_TARGET:
3497 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3498 break;
3499 case MPT_ROLE_BOTH:
3500 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3501 break;
3502 }
3503 kp->xport_specific.fc.valid =
3504 KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3505 ccb->ccb_h.status = CAM_REQ_CMP;
3506 } else {
3507 ccb->ccb_h.status = CAM_REQ_INVALID;
3508 }
3509 xpt_done(ccb);
3510 break;
3511 }
3512 case XPT_PATH_INQ: /* Path routing inquiry */
3513 {
3514 struct ccb_pathinq *cpi = &ccb->cpi;
3515
3516 cpi->version_num = 1;
3517 cpi->target_sprt = 0;
3518 cpi->hba_eng_cnt = 0;
3519 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3520 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3521 /*
3522 * FC cards report MAX_DEVICES of 512, but
3523 * the MSG_SCSI_IO_REQUEST target id field
3524 * is only 8 bits. Until we fix the driver
3525 * to support 'channels' for bus overflow,
3526 * just limit it.
3527 */
3528 if (cpi->max_target > 255) {
3529 cpi->max_target = 255;
3530 }
3531
3532 /*
3533 * VMware ESX reports > 16 devices and then dies when we probe.
3534 */
3535 if (mpt->is_spi && cpi->max_target > 15) {
3536 cpi->max_target = 15;
3537 }
3538 if (mpt->is_spi)
3539 cpi->max_lun = 7;
3540 else
3541 cpi->max_lun = MPT_MAX_LUNS;
3542 cpi->initiator_id = mpt->mpt_ini_id;
3543 cpi->bus_id = cam_sim_bus(sim);
3544
3545 /*
3546 * The base speed is the speed of the underlying connection.
3547 */
3548 cpi->protocol = PROTO_SCSI;
3549 if (mpt->is_fc) {
3550 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3551 PIM_EXTLUNS;
3552 cpi->base_transfer_speed = 100000;
3553 cpi->hba_inquiry = PI_TAG_ABLE;
3554 cpi->transport = XPORT_FC;
3555 cpi->transport_version = 0;
3556 cpi->protocol_version = SCSI_REV_SPC;
3557 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3558 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3559 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3560 cpi->xport_specific.fc.bitrate =
3561 100000 * mpt->mpt_fcport_speed;
3562 } else if (mpt->is_sas) {
3563 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3564 PIM_EXTLUNS;
3565 cpi->base_transfer_speed = 300000;
3566 cpi->hba_inquiry = PI_TAG_ABLE;
3567 cpi->transport = XPORT_SAS;
3568 cpi->transport_version = 0;
3569 cpi->protocol_version = SCSI_REV_SPC2;
3570 } else {
3571 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3572 PIM_EXTLUNS;
3573 cpi->base_transfer_speed = 3300;
3574 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3575 cpi->transport = XPORT_SPI;
3576 cpi->transport_version = 2;
3577 cpi->protocol_version = SCSI_REV_2;
3578 }
3579
3580 /*
3581 * We give our fake RAID passhtru bus a width that is MaxVolumes
3582 * wide and restrict it to one lun.
3583 */
3584 if (raid_passthru) {
3585 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3586 cpi->initiator_id = cpi->max_target + 1;
3587 cpi->max_lun = 0;
3588 }
3589
3590 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3591 cpi->hba_misc |= PIM_NOINITIATOR;
3592 }
3593 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3594 cpi->target_sprt =
3595 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3596 } else {
3597 cpi->target_sprt = 0;
3598 }
3599 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3600 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3601 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3602 cpi->unit_number = cam_sim_unit(sim);
3603 cpi->ccb_h.status = CAM_REQ_CMP;
3604 break;
3605 }
3606 case XPT_EN_LUN: /* Enable LUN as a target */
3607 {
3608 int result;
3609
3610 if (ccb->cel.enable)
3611 result = mpt_enable_lun(mpt,
3612 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3613 else
3614 result = mpt_disable_lun(mpt,
3615 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3616 if (result == 0) {
3617 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3618 } else {
3619 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3620 }
3621 break;
3622 }
3623 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
3624 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3625 {
3626 tgt_resource_t *trtp;
3627 lun_id_t lun = ccb->ccb_h.target_lun;
3628 ccb->ccb_h.sim_priv.entries[0].field = 0;
3629 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3630
3631 if (lun == CAM_LUN_WILDCARD) {
3632 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3633 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3634 break;
3635 }
3636 trtp = &mpt->trt_wildcard;
3637 } else if (lun >= MPT_MAX_LUNS) {
3638 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3639 break;
3640 } else {
3641 trtp = &mpt->trt[lun];
3642 }
3643 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3644 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3645 "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3646 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3647 sim_links.stqe);
3648 } else {
3649 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3650 "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3651 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3652 sim_links.stqe);
3653 }
3654 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3655 return;
3656 }
3657 case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */
3658 {
3659 request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3660
3661 mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3662 mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3663 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3664 break;
3665 }
3666 case XPT_CONT_TARGET_IO:
3667 mpt_target_start_io(mpt, ccb);
3668 return;
3669
3670 default:
3671 ccb->ccb_h.status = CAM_REQ_INVALID;
3672 break;
3673 }
3674 xpt_done(ccb);
3675 }
3676
3677 static int
3678 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3679 {
3680 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3681 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3682 target_id_t tgt;
3683 uint32_t dval, pval, oval;
3684 int rv;
3685
3686 if (IS_CURRENT_SETTINGS(cts) == 0) {
3687 tgt = cts->ccb_h.target_id;
3688 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3689 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3690 return (-1);
3691 }
3692 } else {
3693 tgt = cts->ccb_h.target_id;
3694 }
3695
3696 /*
3697 * We aren't looking at Port Page 2 BIOS settings here-
3698 * sometimes these have been known to be bogus XXX.
3699 *
3700 * For user settings, we pick the max from port page 0
3701 *
3702 * For current settings we read the current settings out from
3703 * device page 0 for that target.
3704 */
3705 if (IS_CURRENT_SETTINGS(cts)) {
3706 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3707 dval = 0;
3708
3709 tmp = mpt->mpt_dev_page0[tgt];
3710 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3711 sizeof(tmp), FALSE, 5000);
3712 if (rv) {
3713 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3714 return (rv);
3715 }
3716 mpt2host_config_page_scsi_device_0(&tmp);
3717
3718 mpt_lprt(mpt, MPT_PRT_DEBUG,
3719 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3720 tmp.NegotiatedParameters, tmp.Information);
3721 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3722 DP_WIDE : DP_NARROW;
3723 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3724 DP_DISC_ENABLE : DP_DISC_DISABL;
3725 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3726 DP_TQING_ENABLE : DP_TQING_DISABL;
3727 oval = tmp.NegotiatedParameters;
3728 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3729 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3730 pval = tmp.NegotiatedParameters;
3731 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3732 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3733 mpt->mpt_dev_page0[tgt] = tmp;
3734 } else {
3735 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3736 oval = mpt->mpt_port_page0.Capabilities;
3737 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3738 pval = mpt->mpt_port_page0.Capabilities;
3739 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3740 }
3741
3742 spi->valid = 0;
3743 scsi->valid = 0;
3744 spi->flags = 0;
3745 scsi->flags = 0;
3746 spi->sync_offset = oval;
3747 spi->sync_period = pval;
3748 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3749 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3750 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3751 if (dval & DP_WIDE) {
3752 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3753 } else {
3754 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3755 }
3756 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3757 scsi->valid = CTS_SCSI_VALID_TQ;
3758 if (dval & DP_TQING_ENABLE) {
3759 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3760 }
3761 spi->valid |= CTS_SPI_VALID_DISC;
3762 if (dval & DP_DISC_ENABLE) {
3763 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3764 }
3765 }
3766
3767 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3768 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3769 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3770 return (0);
3771 }
3772
3773 static void
3774 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3775 {
3776 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3777
3778 ptr = &mpt->mpt_dev_page1[tgt];
3779 if (onoff) {
3780 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3781 } else {
3782 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3783 }
3784 }
3785
3786 static void
3787 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3788 {
3789 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3790
3791 ptr = &mpt->mpt_dev_page1[tgt];
3792 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3793 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3794 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3795 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3796 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3797 if (period == 0) {
3798 return;
3799 }
3800 ptr->RequestedParameters |=
3801 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3802 ptr->RequestedParameters |=
3803 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3804 if (period < 0xa) {
3805 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3806 }
3807 if (period < 0x9) {
3808 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3809 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3810 }
3811 }
3812
3813 static int
3814 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3815 {
3816 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3817 int rv;
3818
3819 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3820 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3821 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3822 tmp = mpt->mpt_dev_page1[tgt];
3823 host2mpt_config_page_scsi_device_1(&tmp);
3824 rv = mpt_write_cur_cfg_page(mpt, tgt,
3825 &tmp.Header, sizeof(tmp), FALSE, 5000);
3826 if (rv) {
3827 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3828 return (-1);
3829 }
3830 return (0);
3831 }
3832
3833 /****************************** Timeout Recovery ******************************/
3834 static int
3835 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3836 {
3837 int error;
3838
3839 error = kproc_create(mpt_recovery_thread, mpt,
3840 &mpt->recovery_thread, /*flags*/0,
3841 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3842 return (error);
3843 }
3844
3845 static void
3846 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3847 {
3848
3849 if (mpt->recovery_thread == NULL) {
3850 return;
3851 }
3852 mpt->shutdwn_recovery = 1;
3853 wakeup(mpt);
3854 /*
3855 * Sleep on a slightly different location
3856 * for this interlock just for added safety.
3857 */
3858 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3859 }
3860
3861 static void
3862 mpt_recovery_thread(void *arg)
3863 {
3864 struct mpt_softc *mpt;
3865
3866 mpt = (struct mpt_softc *)arg;
3867 MPT_LOCK(mpt);
3868 for (;;) {
3869 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3870 if (mpt->shutdwn_recovery == 0) {
3871 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3872 }
3873 }
3874 if (mpt->shutdwn_recovery != 0) {
3875 break;
3876 }
3877 mpt_recover_commands(mpt);
3878 }
3879 mpt->recovery_thread = NULL;
3880 wakeup(&mpt->recovery_thread);
3881 MPT_UNLOCK(mpt);
3882 kproc_exit(0);
3883 }
3884
3885 static int
3886 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3887 u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3888 int sleep_ok)
3889 {
3890 MSG_SCSI_TASK_MGMT *tmf_req;
3891 int error;
3892
3893 /*
3894 * Wait for any current TMF request to complete.
3895 * We're only allowed to issue one TMF at a time.
3896 */
3897 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3898 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3899 if (error != 0) {
3900 mpt_reset(mpt, TRUE);
3901 return (ETIMEDOUT);
3902 }
3903
3904 mpt_assign_serno(mpt, mpt->tmf_req);
3905 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3906
3907 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3908 memset(tmf_req, 0, sizeof(*tmf_req));
3909 tmf_req->TargetID = target;
3910 tmf_req->Bus = channel;
3911 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3912 tmf_req->TaskType = type;
3913 tmf_req->MsgFlags = flags;
3914 tmf_req->MsgContext =
3915 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3916 be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3917 tmf_req->TaskMsgContext = abort_ctx;
3918
3919 mpt_lprt(mpt, MPT_PRT_DEBUG,
3920 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3921 mpt->tmf_req->serno, tmf_req->MsgContext);
3922 if (mpt->verbose > MPT_PRT_DEBUG) {
3923 mpt_print_request(tmf_req);
3924 }
3925
3926 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3927 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3928 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3929 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3930 if (error != MPT_OK) {
3931 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3932 mpt->tmf_req->state = REQ_STATE_FREE;
3933 mpt_reset(mpt, TRUE);
3934 }
3935 return (error);
3936 }
3937
3938 /*
3939 * When a command times out, it is placed on the requeust_timeout_list
3940 * and we wake our recovery thread. The MPT-Fusion architecture supports
3941 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3942 * the timedout transactions. The next TMF is issued either by the
3943 * completion handler of the current TMF waking our recovery thread,
3944 * or the TMF timeout handler causing a hard reset sequence.
3945 */
3946 static void
3947 mpt_recover_commands(struct mpt_softc *mpt)
3948 {
3949 request_t *req;
3950 union ccb *ccb;
3951 int error;
3952
3953 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3954 /*
3955 * No work to do- leave.
3956 */
3957 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3958 return;
3959 }
3960
3961 /*
3962 * Flush any commands whose completion coincides with their timeout.
3963 */
3964 mpt_intr(mpt);
3965
3966 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3967 /*
3968 * The timedout commands have already
3969 * completed. This typically means
3970 * that either the timeout value was on
3971 * the hairy edge of what the device
3972 * requires or - more likely - interrupts
3973 * are not happening.
3974 */
3975 mpt_prt(mpt, "Timedout requests already complete. "
3976 "Interrupts may not be functioning.\n");
3977 mpt_enable_ints(mpt);
3978 return;
3979 }
3980
3981 /*
3982 * We have no visibility into the current state of the
3983 * controller, so attempt to abort the commands in the
3984 * order they timed-out. For initiator commands, we
3985 * depend on the reply handler pulling requests off
3986 * the timeout list.
3987 */
3988 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3989 uint16_t status;
3990 uint8_t response;
3991 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3992
3993 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3994 req, req->serno, hdrp->Function);
3995 ccb = req->ccb;
3996 if (ccb == NULL) {
3997 mpt_prt(mpt, "null ccb in timed out request. "
3998 "Resetting Controller.\n");
3999 mpt_reset(mpt, TRUE);
4000 continue;
4001 }
4002 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4003
4004 /*
4005 * Check to see if this is not an initiator command and
4006 * deal with it differently if it is.
4007 */
4008 switch (hdrp->Function) {
4009 case MPI_FUNCTION_SCSI_IO_REQUEST:
4010 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4011 break;
4012 default:
4013 /*
4014 * XXX: FIX ME: need to abort target assists...
4015 */
4016 mpt_prt(mpt, "just putting it back on the pend q\n");
4017 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4018 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4019 links);
4020 continue;
4021 }
4022
4023 error = mpt_scsi_send_tmf(mpt,
4024 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4025 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4026 htole32(req->index | scsi_io_handler_id), TRUE);
4027
4028 if (error != 0) {
4029 /*
4030 * mpt_scsi_send_tmf hard resets on failure, so no
4031 * need to do so here. Our queue should be emptied
4032 * by the hard reset.
4033 */
4034 continue;
4035 }
4036
4037 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4038 REQ_STATE_DONE, TRUE, 500);
4039
4040 status = le16toh(mpt->tmf_req->IOCStatus);
4041 response = mpt->tmf_req->ResponseCode;
4042 mpt->tmf_req->state = REQ_STATE_FREE;
4043
4044 if (error != 0) {
4045 /*
4046 * If we've errored out,, reset the controller.
4047 */
4048 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4049 "Resetting controller\n");
4050 mpt_reset(mpt, TRUE);
4051 continue;
4052 }
4053
4054 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4055 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4056 "Resetting controller.\n", status);
4057 mpt_reset(mpt, TRUE);
4058 continue;
4059 }
4060
4061 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4062 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4063 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4064 "Resetting controller.\n", response);
4065 mpt_reset(mpt, TRUE);
4066 continue;
4067 }
4068 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4069 }
4070 }
4071
4072 /************************ Target Mode Support ****************************/
4073 static void
4074 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4075 {
4076 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4077 PTR_SGE_TRANSACTION32 tep;
4078 PTR_SGE_SIMPLE32 se;
4079 bus_addr_t paddr;
4080 uint32_t fl;
4081
4082 paddr = req->req_pbuf;
4083 paddr += MPT_RQSL(mpt);
4084
4085 fc = req->req_vbuf;
4086 memset(fc, 0, MPT_REQUEST_AREA);
4087 fc->BufferCount = 1;
4088 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4089 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4090
4091 /*
4092 * Okay, set up ELS buffer pointers. ELS buffer pointers
4093 * consist of a TE SGL element (with details length of zero)
4094 * followed by a SIMPLE SGL element which holds the address
4095 * of the buffer.
4096 */
4097
4098 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4099
4100 tep->ContextSize = 4;
4101 tep->Flags = 0;
4102 tep->TransactionContext[0] = htole32(ioindex);
4103
4104 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4105 fl =
4106 MPI_SGE_FLAGS_HOST_TO_IOC |
4107 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4108 MPI_SGE_FLAGS_LAST_ELEMENT |
4109 MPI_SGE_FLAGS_END_OF_LIST |
4110 MPI_SGE_FLAGS_END_OF_BUFFER;
4111 fl <<= MPI_SGE_FLAGS_SHIFT;
4112 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4113 se->FlagsLength = htole32(fl);
4114 se->Address = htole32((uint32_t) paddr);
4115 mpt_lprt(mpt, MPT_PRT_DEBUG,
4116 "add ELS index %d ioindex %d for %p:%u\n",
4117 req->index, ioindex, req, req->serno);
4118 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4119 ("mpt_fc_post_els: request not locked"));
4120 mpt_send_cmd(mpt, req);
4121 }
4122
4123 static void
4124 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4125 {
4126 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4127 PTR_CMD_BUFFER_DESCRIPTOR cb;
4128 bus_addr_t paddr;
4129
4130 paddr = req->req_pbuf;
4131 paddr += MPT_RQSL(mpt);
4132 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4133 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4134
4135 fc = req->req_vbuf;
4136 fc->BufferCount = 1;
4137 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4138 fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4139 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4140
4141 cb = &fc->Buffer[0];
4142 cb->IoIndex = htole16(ioindex);
4143 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4144
4145 mpt_check_doorbell(mpt);
4146 mpt_send_cmd(mpt, req);
4147 }
4148
4149 static int
4150 mpt_add_els_buffers(struct mpt_softc *mpt)
4151 {
4152 int i;
4153
4154 if (mpt->is_fc == 0) {
4155 return (TRUE);
4156 }
4157
4158 if (mpt->els_cmds_allocated) {
4159 return (TRUE);
4160 }
4161
4162 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4163 M_DEVBUF, M_NOWAIT | M_ZERO);
4164
4165 if (mpt->els_cmd_ptrs == NULL) {
4166 return (FALSE);
4167 }
4168
4169 /*
4170 * Feed the chip some ELS buffer resources
4171 */
4172 for (i = 0; i < MPT_MAX_ELS; i++) {
4173 request_t *req = mpt_get_request(mpt, FALSE);
4174 if (req == NULL) {
4175 break;
4176 }
4177 req->state |= REQ_STATE_LOCKED;
4178 mpt->els_cmd_ptrs[i] = req;
4179 mpt_fc_post_els(mpt, req, i);
4180 }
4181
4182 if (i == 0) {
4183 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4184 free(mpt->els_cmd_ptrs, M_DEVBUF);
4185 mpt->els_cmd_ptrs = NULL;
4186 return (FALSE);
4187 }
4188 if (i != MPT_MAX_ELS) {
4189 mpt_lprt(mpt, MPT_PRT_INFO,
4190 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4191 }
4192 mpt->els_cmds_allocated = i;
4193 return(TRUE);
4194 }
4195
4196 static int
4197 mpt_add_target_commands(struct mpt_softc *mpt)
4198 {
4199 int i, max;
4200
4201 if (mpt->tgt_cmd_ptrs) {
4202 return (TRUE);
4203 }
4204
4205 max = MPT_MAX_REQUESTS(mpt) >> 1;
4206 if (max > mpt->mpt_max_tgtcmds) {
4207 max = mpt->mpt_max_tgtcmds;
4208 }
4209 mpt->tgt_cmd_ptrs =
4210 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4211 if (mpt->tgt_cmd_ptrs == NULL) {
4212 mpt_prt(mpt,
4213 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4214 return (FALSE);
4215 }
4216
4217 for (i = 0; i < max; i++) {
4218 request_t *req;
4219
4220 req = mpt_get_request(mpt, FALSE);
4221 if (req == NULL) {
4222 break;
4223 }
4224 req->state |= REQ_STATE_LOCKED;
4225 mpt->tgt_cmd_ptrs[i] = req;
4226 mpt_post_target_command(mpt, req, i);
4227 }
4228
4229 if (i == 0) {
4230 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4231 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4232 mpt->tgt_cmd_ptrs = NULL;
4233 return (FALSE);
4234 }
4235
4236 mpt->tgt_cmds_allocated = i;
4237
4238 if (i < max) {
4239 mpt_lprt(mpt, MPT_PRT_INFO,
4240 "added %d of %d target bufs\n", i, max);
4241 }
4242 return (i);
4243 }
4244
4245 static int
4246 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4247 {
4248
4249 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4250 mpt->twildcard = 1;
4251 } else if (lun >= MPT_MAX_LUNS) {
4252 return (EINVAL);
4253 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4254 return (EINVAL);
4255 }
4256 if (mpt->tenabled == 0) {
4257 if (mpt->is_fc) {
4258 (void) mpt_fc_reset_link(mpt, 0);
4259 }
4260 mpt->tenabled = 1;
4261 }
4262 if (lun == CAM_LUN_WILDCARD) {
4263 mpt->trt_wildcard.enabled = 1;
4264 } else {
4265 mpt->trt[lun].enabled = 1;
4266 }
4267 return (0);
4268 }
4269
4270 static int
4271 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4272 {
4273 int i;
4274
4275 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4276 mpt->twildcard = 0;
4277 } else if (lun >= MPT_MAX_LUNS) {
4278 return (EINVAL);
4279 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4280 return (EINVAL);
4281 }
4282 if (lun == CAM_LUN_WILDCARD) {
4283 mpt->trt_wildcard.enabled = 0;
4284 } else {
4285 mpt->trt[lun].enabled = 0;
4286 }
4287 for (i = 0; i < MPT_MAX_LUNS; i++) {
4288 if (mpt->trt[i].enabled) {
4289 break;
4290 }
4291 }
4292 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4293 if (mpt->is_fc) {
4294 (void) mpt_fc_reset_link(mpt, 0);
4295 }
4296 mpt->tenabled = 0;
4297 }
4298 return (0);
4299 }
4300
4301 /*
4302 * Called with MPT lock held
4303 */
4304 static void
4305 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4306 {
4307 struct ccb_scsiio *csio = &ccb->csio;
4308 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4309 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4310
4311 switch (tgt->state) {
4312 case TGT_STATE_IN_CAM:
4313 break;
4314 case TGT_STATE_MOVING_DATA:
4315 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4316 xpt_freeze_simq(mpt->sim, 1);
4317 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4318 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4319 xpt_done(ccb);
4320 return;
4321 default:
4322 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4323 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4324 mpt_tgt_dump_req_state(mpt, cmd_req);
4325 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4326 xpt_done(ccb);
4327 return;
4328 }
4329
4330 if (csio->dxfer_len) {
4331 bus_dmamap_callback_t *cb;
4332 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4333 request_t *req;
4334 int error;
4335
4336 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4337 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4338
4339 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4340 if (mpt->outofbeer == 0) {
4341 mpt->outofbeer = 1;
4342 xpt_freeze_simq(mpt->sim, 1);
4343 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4344 }
4345 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4346 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4347 xpt_done(ccb);
4348 return;
4349 }
4350 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4351 if (sizeof (bus_addr_t) > 4) {
4352 cb = mpt_execute_req_a64;
4353 } else {
4354 cb = mpt_execute_req;
4355 }
4356
4357 req->ccb = ccb;
4358 ccb->ccb_h.ccb_req_ptr = req;
4359
4360 /*
4361 * Record the currently active ccb and the
4362 * request for it in our target state area.
4363 */
4364 tgt->ccb = ccb;
4365 tgt->req = req;
4366
4367 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4368 ta = req->req_vbuf;
4369
4370 if (mpt->is_sas) {
4371 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4372 cmd_req->req_vbuf;
4373 ta->QueueTag = ssp->InitiatorTag;
4374 } else if (mpt->is_spi) {
4375 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4376 cmd_req->req_vbuf;
4377 ta->QueueTag = sp->Tag;
4378 }
4379 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4380 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4381 ta->ReplyWord = htole32(tgt->reply_desc);
4382 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4383
4384 ta->RelativeOffset = tgt->bytes_xfered;
4385 ta->DataLength = ccb->csio.dxfer_len;
4386 if (ta->DataLength > tgt->resid) {
4387 ta->DataLength = tgt->resid;
4388 }
4389
4390 /*
4391 * XXX Should be done after data transfer completes?
4392 */
4393 csio->resid = csio->dxfer_len - ta->DataLength;
4394 tgt->resid -= csio->dxfer_len;
4395 tgt->bytes_xfered += csio->dxfer_len;
4396
4397 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4398 ta->TargetAssistFlags |=
4399 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4400 }
4401
4402 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4403 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4404 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4405 ta->TargetAssistFlags |=
4406 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4407 }
4408 #endif
4409 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4410
4411 mpt_lprt(mpt, MPT_PRT_DEBUG,
4412 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4413 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4414 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4415
4416 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4417 cb, req, 0);
4418 if (error == EINPROGRESS) {
4419 xpt_freeze_simq(mpt->sim, 1);
4420 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4421 }
4422 } else {
4423 /*
4424 * XXX: I don't know why this seems to happen, but
4425 * XXX: completing the CCB seems to make things happy.
4426 * XXX: This seems to happen if the initiator requests
4427 * XXX: enough data that we have to do multiple CTIOs.
4428 */
4429 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4430 mpt_lprt(mpt, MPT_PRT_DEBUG,
4431 "Meaningless STATUS CCB (%p): flags %x status %x "
4432 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4433 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4434 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4435 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4436 xpt_done(ccb);
4437 return;
4438 }
4439 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4440 (void *)&csio->sense_data,
4441 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4442 csio->sense_len : 0);
4443 }
4444 }
4445
4446 static void
4447 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4448 lun_id_t lun, int send, uint8_t *data, size_t length)
4449 {
4450 mpt_tgt_state_t *tgt;
4451 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4452 SGE_SIMPLE32 *se;
4453 uint32_t flags;
4454 uint8_t *dptr;
4455 bus_addr_t pptr;
4456 request_t *req;
4457
4458 /*
4459 * We enter with resid set to the data load for the command.
4460 */
4461 tgt = MPT_TGT_STATE(mpt, cmd_req);
4462 if (length == 0 || tgt->resid == 0) {
4463 tgt->resid = 0;
4464 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4465 return;
4466 }
4467
4468 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4469 mpt_prt(mpt, "out of resources- dropping local response\n");
4470 return;
4471 }
4472 tgt->is_local = 1;
4473
4474 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4475 ta = req->req_vbuf;
4476
4477 if (mpt->is_sas) {
4478 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4479 ta->QueueTag = ssp->InitiatorTag;
4480 } else if (mpt->is_spi) {
4481 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4482 ta->QueueTag = sp->Tag;
4483 }
4484 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4485 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4486 ta->ReplyWord = htole32(tgt->reply_desc);
4487 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4488 ta->RelativeOffset = 0;
4489 ta->DataLength = length;
4490
4491 dptr = req->req_vbuf;
4492 dptr += MPT_RQSL(mpt);
4493 pptr = req->req_pbuf;
4494 pptr += MPT_RQSL(mpt);
4495 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4496
4497 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4498 memset(se, 0,sizeof (*se));
4499
4500 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4501 if (send) {
4502 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4503 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4504 }
4505 se->Address = pptr;
4506 MPI_pSGE_SET_LENGTH(se, length);
4507 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4508 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4509 MPI_pSGE_SET_FLAGS(se, flags);
4510
4511 tgt->ccb = NULL;
4512 tgt->req = req;
4513 tgt->resid -= length;
4514 tgt->bytes_xfered = length;
4515 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4516 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4517 #else
4518 tgt->state = TGT_STATE_MOVING_DATA;
4519 #endif
4520 mpt_send_cmd(mpt, req);
4521 }
4522
4523 /*
4524 * Abort queued up CCBs
4525 */
4526 static cam_status
4527 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4528 {
4529 struct mpt_hdr_stailq *lp;
4530 struct ccb_hdr *srch;
4531 union ccb *accb = ccb->cab.abort_ccb;
4532 tgt_resource_t *trtp;
4533 mpt_tgt_state_t *tgt;
4534 request_t *req;
4535 uint32_t tag;
4536
4537 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4538 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4539 trtp = &mpt->trt_wildcard;
4540 else
4541 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4542 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4543 lp = &trtp->atios;
4544 tag = accb->atio.tag_id;
4545 } else {
4546 lp = &trtp->inots;
4547 tag = accb->cin1.tag_id;
4548 }
4549
4550 /* Search the CCB among queued. */
4551 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4552 if (srch != &accb->ccb_h)
4553 continue;
4554 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4555 accb->ccb_h.status = CAM_REQ_ABORTED;
4556 xpt_done(accb);
4557 return (CAM_REQ_CMP);
4558 }
4559
4560 /* Search the CCB among running. */
4561 req = MPT_TAG_2_REQ(mpt, tag);
4562 tgt = MPT_TGT_STATE(mpt, req);
4563 if (tgt->tag_id == tag) {
4564 mpt_abort_target_cmd(mpt, req);
4565 return (CAM_REQ_CMP);
4566 }
4567
4568 return (CAM_UA_ABORT);
4569 }
4570
4571 /*
4572 * Ask the MPT to abort the current target command
4573 */
4574 static int
4575 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4576 {
4577 int error;
4578 request_t *req;
4579 PTR_MSG_TARGET_MODE_ABORT abtp;
4580
4581 req = mpt_get_request(mpt, FALSE);
4582 if (req == NULL) {
4583 return (-1);
4584 }
4585 abtp = req->req_vbuf;
4586 memset(abtp, 0, sizeof (*abtp));
4587
4588 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4589 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4590 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4591 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4592 error = 0;
4593 if (mpt->is_fc || mpt->is_sas) {
4594 mpt_send_cmd(mpt, req);
4595 } else {
4596 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4597 }
4598 return (error);
4599 }
4600
4601 /*
4602 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4603 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4604 * FC929 to set bogus FC_RSP fields (nonzero residuals
4605 * but w/o RESID fields set). This causes QLogic initiators
4606 * to think maybe that a frame was lost.
4607 *
4608 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4609 * we use allocated requests to do TARGET_ASSIST and we
4610 * need to know when to release them.
4611 */
4612
4613 static void
4614 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4615 uint8_t status, uint8_t const *sense_data, u_int sense_len)
4616 {
4617 uint8_t *cmd_vbuf;
4618 mpt_tgt_state_t *tgt;
4619 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4620 request_t *req;
4621 bus_addr_t paddr;
4622 int resplen = 0;
4623 uint32_t fl;
4624
4625 cmd_vbuf = cmd_req->req_vbuf;
4626 cmd_vbuf += MPT_RQSL(mpt);
4627 tgt = MPT_TGT_STATE(mpt, cmd_req);
4628
4629 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4630 if (mpt->outofbeer == 0) {
4631 mpt->outofbeer = 1;
4632 xpt_freeze_simq(mpt->sim, 1);
4633 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4634 }
4635 if (ccb) {
4636 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4637 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4638 xpt_done(ccb);
4639 } else {
4640 mpt_prt(mpt,
4641 "could not allocate status request- dropping\n");
4642 }
4643 return;
4644 }
4645 req->ccb = ccb;
4646 if (ccb) {
4647 ccb->ccb_h.ccb_mpt_ptr = mpt;
4648 ccb->ccb_h.ccb_req_ptr = req;
4649 }
4650
4651 /*
4652 * Record the currently active ccb, if any, and the
4653 * request for it in our target state area.
4654 */
4655 tgt->ccb = ccb;
4656 tgt->req = req;
4657 tgt->state = TGT_STATE_SENDING_STATUS;
4658
4659 tp = req->req_vbuf;
4660 paddr = req->req_pbuf;
4661 paddr += MPT_RQSL(mpt);
4662
4663 memset(tp, 0, sizeof (*tp));
4664 tp->StatusCode = status;
4665 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4666 if (mpt->is_fc) {
4667 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4668 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4669 uint8_t *sts_vbuf;
4670 uint32_t *rsp;
4671
4672 sts_vbuf = req->req_vbuf;
4673 sts_vbuf += MPT_RQSL(mpt);
4674 rsp = (uint32_t *) sts_vbuf;
4675 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4676
4677 /*
4678 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4679 * It has to be big-endian in memory and is organized
4680 * in 32 bit words, which are much easier to deal with
4681 * as words which are swizzled as needed.
4682 *
4683 * All we're filling here is the FC_RSP payload.
4684 * We may just have the chip synthesize it if
4685 * we have no residual and an OK status.
4686 *
4687 */
4688 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4689
4690 rsp[2] = htobe32(status);
4691 #define MIN_FCP_RESPONSE_SIZE 24
4692 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4693 resplen = MIN_FCP_RESPONSE_SIZE;
4694 #endif
4695 if (tgt->resid < 0) {
4696 rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4697 rsp[3] = htobe32(-tgt->resid);
4698 resplen = MIN_FCP_RESPONSE_SIZE;
4699 } else if (tgt->resid > 0) {
4700 rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4701 rsp[3] = htobe32(tgt->resid);
4702 resplen = MIN_FCP_RESPONSE_SIZE;
4703 }
4704 if (sense_len > 0) {
4705 rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4706 rsp[4] = htobe32(sense_len);
4707 memcpy(&rsp[6], sense_data, sense_len);
4708 resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4709 }
4710 } else if (mpt->is_sas) {
4711 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4712 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4713 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4714 } else {
4715 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4716 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4717 tp->QueueTag = htole16(sp->Tag);
4718 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4719 }
4720
4721 tp->ReplyWord = htole32(tgt->reply_desc);
4722 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4723
4724 #ifdef WE_CAN_USE_AUTO_REPOST
4725 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4726 #endif
4727 if (status == SCSI_STATUS_OK && resplen == 0) {
4728 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4729 } else {
4730 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4731 fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4732 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4733 MPI_SGE_FLAGS_LAST_ELEMENT |
4734 MPI_SGE_FLAGS_END_OF_LIST |
4735 MPI_SGE_FLAGS_END_OF_BUFFER;
4736 fl <<= MPI_SGE_FLAGS_SHIFT;
4737 fl |= resplen;
4738 tp->StatusDataSGE.FlagsLength = htole32(fl);
4739 }
4740
4741 mpt_lprt(mpt, MPT_PRT_DEBUG,
4742 "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4743 ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4744 req, req->serno, tgt->resid);
4745 if (mpt->verbose > MPT_PRT_DEBUG)
4746 mpt_print_request(req->req_vbuf);
4747 if (ccb) {
4748 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4749 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4750 }
4751 mpt_send_cmd(mpt, req);
4752 }
4753
4754 static void
4755 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4756 tgt_resource_t *trtp, int init_id)
4757 {
4758 struct ccb_immediate_notify *inot;
4759 mpt_tgt_state_t *tgt;
4760
4761 tgt = MPT_TGT_STATE(mpt, req);
4762 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4763 if (inot == NULL) {
4764 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4765 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4766 return;
4767 }
4768 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4769 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4770 "Get FREE INOT %p lun %jx\n", inot,
4771 (uintmax_t)inot->ccb_h.target_lun);
4772
4773 inot->initiator_id = init_id; /* XXX */
4774 inot->tag_id = tgt->tag_id;
4775 inot->seq_id = 0;
4776 /*
4777 * This is a somewhat grotesque attempt to map from task management
4778 * to old style SCSI messages. God help us all.
4779 */
4780 switch (fc) {
4781 case MPT_QUERY_TASK_SET:
4782 inot->arg = MSG_QUERY_TASK_SET;
4783 break;
4784 case MPT_ABORT_TASK_SET:
4785 inot->arg = MSG_ABORT_TASK_SET;
4786 break;
4787 case MPT_CLEAR_TASK_SET:
4788 inot->arg = MSG_CLEAR_TASK_SET;
4789 break;
4790 case MPT_QUERY_ASYNC_EVENT:
4791 inot->arg = MSG_QUERY_ASYNC_EVENT;
4792 break;
4793 case MPT_LOGICAL_UNIT_RESET:
4794 inot->arg = MSG_LOGICAL_UNIT_RESET;
4795 break;
4796 case MPT_TARGET_RESET:
4797 inot->arg = MSG_TARGET_RESET;
4798 break;
4799 case MPT_CLEAR_ACA:
4800 inot->arg = MSG_CLEAR_ACA;
4801 break;
4802 default:
4803 inot->arg = MSG_NOOP;
4804 break;
4805 }
4806 tgt->ccb = (union ccb *) inot;
4807 inot->ccb_h.status = CAM_MESSAGE_RECV;
4808 xpt_done((union ccb *)inot);
4809 }
4810
4811 static void
4812 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4813 {
4814 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4815 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4816 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4817 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4818 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4819 '', '', '', '1'
4820 };
4821 struct ccb_accept_tio *atiop;
4822 lun_id_t lun;
4823 int tag_action = 0;
4824 mpt_tgt_state_t *tgt;
4825 tgt_resource_t *trtp = NULL;
4826 U8 *lunptr;
4827 U8 *vbuf;
4828 U16 ioindex;
4829 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4830 uint8_t *cdbp;
4831
4832 /*
4833 * Stash info for the current command where we can get at it later.
4834 */
4835 vbuf = req->req_vbuf;
4836 vbuf += MPT_RQSL(mpt);
4837 if (mpt->verbose >= MPT_PRT_DEBUG) {
4838 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4839 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4840 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4841 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4842 }
4843
4844 /*
4845 * Get our state pointer set up.
4846 */
4847 tgt = MPT_TGT_STATE(mpt, req);
4848 if (tgt->state != TGT_STATE_LOADED) {
4849 mpt_tgt_dump_req_state(mpt, req);
4850 panic("bad target state in mpt_scsi_tgt_atio");
4851 }
4852 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4853 tgt->state = TGT_STATE_IN_CAM;
4854 tgt->reply_desc = reply_desc;
4855 ioindex = GET_IO_INDEX(reply_desc);
4856
4857 /*
4858 * The tag we construct here allows us to find the
4859 * original request that the command came in with.
4860 *
4861 * This way we don't have to depend on anything but the
4862 * tag to find things when CCBs show back up from CAM.
4863 */
4864 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4865
4866 if (mpt->is_fc) {
4867 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4868 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4869 if (fc->FcpCntl[2]) {
4870 /*
4871 * Task Management Request
4872 */
4873 switch (fc->FcpCntl[2]) {
4874 case 0x1:
4875 fct = MPT_QUERY_TASK_SET;
4876 break;
4877 case 0x2:
4878 fct = MPT_ABORT_TASK_SET;
4879 break;
4880 case 0x4:
4881 fct = MPT_CLEAR_TASK_SET;
4882 break;
4883 case 0x8:
4884 fct = MPT_QUERY_ASYNC_EVENT;
4885 break;
4886 case 0x10:
4887 fct = MPT_LOGICAL_UNIT_RESET;
4888 break;
4889 case 0x20:
4890 fct = MPT_TARGET_RESET;
4891 break;
4892 case 0x40:
4893 fct = MPT_CLEAR_ACA;
4894 break;
4895 default:
4896 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4897 fc->FcpCntl[2]);
4898 mpt_scsi_tgt_status(mpt, NULL, req,
4899 SCSI_STATUS_OK, NULL, 0);
4900 return;
4901 }
4902 } else {
4903 switch (fc->FcpCntl[1]) {
4904 case 0:
4905 tag_action = MSG_SIMPLE_Q_TAG;
4906 break;
4907 case 1:
4908 tag_action = MSG_HEAD_OF_Q_TAG;
4909 break;
4910 case 2:
4911 tag_action = MSG_ORDERED_Q_TAG;
4912 break;
4913 default:
4914 /*
4915 * Bah. Ignore Untagged Queing and ACA
4916 */
4917 tag_action = MSG_SIMPLE_Q_TAG;
4918 break;
4919 }
4920 }
4921 tgt->resid = be32toh(fc->FcpDl);
4922 cdbp = fc->FcpCdb;
4923 lunptr = fc->FcpLun;
4924 tgt->itag = fc->OptionalOxid;
4925 } else if (mpt->is_sas) {
4926 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4927 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4928 cdbp = ssp->CDB;
4929 lunptr = ssp->LogicalUnitNumber;
4930 tgt->itag = ssp->InitiatorTag;
4931 } else {
4932 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4933 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4934 cdbp = sp->CDB;
4935 lunptr = sp->LogicalUnitNumber;
4936 tgt->itag = sp->Tag;
4937 }
4938
4939 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4940
4941 /*
4942 * Deal with non-enabled or bad luns here.
4943 */
4944 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4945 mpt->trt[lun].enabled == 0) {
4946 if (mpt->twildcard) {
4947 trtp = &mpt->trt_wildcard;
4948 } else if (fct == MPT_NIL_TMT_VALUE) {
4949 /*
4950 * In this case, we haven't got an upstream listener
4951 * for either a specific lun or wildcard luns. We
4952 * have to make some sensible response. For regular
4953 * inquiry, just return some NOT HERE inquiry data.
4954 * For VPD inquiry, report illegal field in cdb.
4955 * For REQUEST SENSE, just return NO SENSE data.
4956 * REPORT LUNS gets illegal command.
4957 * All other commands get 'no such device'.
4958 */
4959 uint8_t sense[MPT_SENSE_SIZE];
4960 size_t len;
4961
4962 memset(sense, 0, sizeof(sense));
4963 sense[0] = 0xf0;
4964 sense[2] = 0x5;
4965 sense[7] = 0x8;
4966
4967 switch (cdbp[0]) {
4968 case INQUIRY:
4969 {
4970 if (cdbp[1] != 0) {
4971 sense[12] = 0x26;
4972 sense[13] = 0x01;
4973 break;
4974 }
4975 len = min(tgt->resid, cdbp[4]);
4976 len = min(len, sizeof (null_iqd));
4977 mpt_lprt(mpt, MPT_PRT_DEBUG,
4978 "local inquiry %ld bytes\n", (long) len);
4979 mpt_scsi_tgt_local(mpt, req, lun, 1,
4980 null_iqd, len);
4981 return;
4982 }
4983 case REQUEST_SENSE:
4984 {
4985 sense[2] = 0x0;
4986 len = min(tgt->resid, cdbp[4]);
4987 len = min(len, sizeof (sense));
4988 mpt_lprt(mpt, MPT_PRT_DEBUG,
4989 "local reqsense %ld bytes\n", (long) len);
4990 mpt_scsi_tgt_local(mpt, req, lun, 1,
4991 sense, len);
4992 return;
4993 }
4994 case REPORT_LUNS:
4995 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
4996 sense[12] = 0x26;
4997 return;
4998 default:
4999 mpt_lprt(mpt, MPT_PRT_DEBUG,
5000 "CMD 0x%x to unmanaged lun %jx\n",
5001 cdbp[0], (uintmax_t)lun);
5002 sense[12] = 0x25;
5003 break;
5004 }
5005 mpt_scsi_tgt_status(mpt, NULL, req,
5006 SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5007 return;
5008 }
5009 /* otherwise, leave trtp NULL */
5010 } else {
5011 trtp = &mpt->trt[lun];
5012 }
5013
5014 /*
5015 * Deal with any task management
5016 */
5017 if (fct != MPT_NIL_TMT_VALUE) {
5018 if (trtp == NULL) {
5019 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5020 fct);
5021 mpt_scsi_tgt_status(mpt, NULL, req,
5022 SCSI_STATUS_OK, NULL, 0);
5023 } else {
5024 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5025 GET_INITIATOR_INDEX(reply_desc));
5026 }
5027 return;
5028 }
5029
5030 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5031 if (atiop == NULL) {
5032 mpt_lprt(mpt, MPT_PRT_WARN,
5033 "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5034 mpt->tenabled? "QUEUE FULL" : "BUSY");
5035 mpt_scsi_tgt_status(mpt, NULL, req,
5036 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5037 NULL, 0);
5038 return;
5039 }
5040 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5041 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5042 "Get FREE ATIO %p lun %jx\n", atiop,
5043 (uintmax_t)atiop->ccb_h.target_lun);
5044 atiop->ccb_h.ccb_mpt_ptr = mpt;
5045 atiop->ccb_h.status = CAM_CDB_RECVD;
5046 atiop->ccb_h.target_lun = lun;
5047 atiop->sense_len = 0;
5048 atiop->tag_id = tgt->tag_id;
5049 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5050 atiop->cdb_len = 16;
5051 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5052 if (tag_action) {
5053 atiop->tag_action = tag_action;
5054 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5055 }
5056 if (mpt->verbose >= MPT_PRT_DEBUG) {
5057 int i;
5058 mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5059 (uintmax_t)atiop->ccb_h.target_lun);
5060 for (i = 0; i < atiop->cdb_len; i++) {
5061 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5062 (i == (atiop->cdb_len - 1))? '>' : ' ');
5063 }
5064 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5065 tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5066 }
5067
5068 xpt_done((union ccb *)atiop);
5069 }
5070
5071 static void
5072 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5073 {
5074 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5075
5076 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5077 "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5078 tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5079 tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5080 }
5081
5082 static void
5083 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5084 {
5085
5086 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5087 req->index, req->index, req->state);
5088 mpt_tgt_dump_tgt_state(mpt, req);
5089 }
5090
5091 static int
5092 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5093 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5094 {
5095 int dbg;
5096 union ccb *ccb;
5097 U16 status;
5098
5099 if (reply_frame == NULL) {
5100 /*
5101 * Figure out what the state of the command is.
5102 */
5103 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5104
5105 #ifdef INVARIANTS
5106 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5107 if (tgt->req) {
5108 mpt_req_not_spcl(mpt, tgt->req,
5109 "turbo scsi_tgt_reply associated req", __LINE__);
5110 }
5111 #endif
5112 switch(tgt->state) {
5113 case TGT_STATE_LOADED:
5114 /*
5115 * This is a new command starting.
5116 */
5117 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5118 break;
5119 case TGT_STATE_MOVING_DATA:
5120 {
5121 ccb = tgt->ccb;
5122 if (tgt->req == NULL) {
5123 panic("mpt: turbo target reply with null "
5124 "associated request moving data");
5125 /* NOTREACHED */
5126 }
5127 if (ccb == NULL) {
5128 if (tgt->is_local == 0) {
5129 panic("mpt: turbo target reply with "
5130 "null associated ccb moving data");
5131 /* NOTREACHED */
5132 }
5133 mpt_lprt(mpt, MPT_PRT_DEBUG,
5134 "TARGET_ASSIST local done\n");
5135 TAILQ_REMOVE(&mpt->request_pending_list,
5136 tgt->req, links);
5137 mpt_free_request(mpt, tgt->req);
5138 tgt->req = NULL;
5139 mpt_scsi_tgt_status(mpt, NULL, req,
5140 0, NULL, 0);
5141 return (TRUE);
5142 }
5143 tgt->ccb = NULL;
5144 tgt->nxfers++;
5145 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5146 mpt_lprt(mpt, MPT_PRT_DEBUG,
5147 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5148 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5149 /*
5150 * Free the Target Assist Request
5151 */
5152 KASSERT(tgt->req->ccb == ccb,
5153 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5154 tgt->req->serno, tgt->req->ccb));
5155 TAILQ_REMOVE(&mpt->request_pending_list,
5156 tgt->req, links);
5157 mpt_free_request(mpt, tgt->req);
5158 tgt->req = NULL;
5159
5160 /*
5161 * Do we need to send status now? That is, are
5162 * we done with all our data transfers?
5163 */
5164 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5165 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5166 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5167 KASSERT(ccb->ccb_h.status,
5168 ("zero ccb sts at %d", __LINE__));
5169 tgt->state = TGT_STATE_IN_CAM;
5170 if (mpt->outofbeer) {
5171 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5172 mpt->outofbeer = 0;
5173 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5174 }
5175 xpt_done(ccb);
5176 break;
5177 }
5178 /*
5179 * Otherwise, send status (and sense)
5180 */
5181 mpt_scsi_tgt_status(mpt, ccb, req,
5182 ccb->csio.scsi_status,
5183 (void *)&ccb->csio.sense_data,
5184 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5185 ccb->csio.sense_len : 0);
5186 break;
5187 }
5188 case TGT_STATE_SENDING_STATUS:
5189 case TGT_STATE_MOVING_DATA_AND_STATUS:
5190 {
5191 int ioindex;
5192 ccb = tgt->ccb;
5193
5194 if (tgt->req == NULL) {
5195 panic("mpt: turbo target reply with null "
5196 "associated request sending status");
5197 /* NOTREACHED */
5198 }
5199
5200 if (ccb) {
5201 tgt->ccb = NULL;
5202 if (tgt->state ==
5203 TGT_STATE_MOVING_DATA_AND_STATUS) {
5204 tgt->nxfers++;
5205 }
5206 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5207 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5208 ccb->ccb_h.status |= CAM_SENT_SENSE;
5209 }
5210 mpt_lprt(mpt, MPT_PRT_DEBUG,
5211 "TARGET_STATUS tag %x sts %x flgs %x req "
5212 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5213 ccb->ccb_h.flags, tgt->req);
5214 /*
5215 * Free the Target Send Status Request
5216 */
5217 KASSERT(tgt->req->ccb == ccb,
5218 ("tgt->req %p:%u tgt->req->ccb %p",
5219 tgt->req, tgt->req->serno, tgt->req->ccb));
5220 /*
5221 * Notify CAM that we're done
5222 */
5223 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5224 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5225 KASSERT(ccb->ccb_h.status,
5226 ("ZERO ccb sts at %d", __LINE__));
5227 tgt->ccb = NULL;
5228 } else {
5229 mpt_lprt(mpt, MPT_PRT_DEBUG,
5230 "TARGET_STATUS non-CAM for req %p:%u\n",
5231 tgt->req, tgt->req->serno);
5232 }
5233 TAILQ_REMOVE(&mpt->request_pending_list,
5234 tgt->req, links);
5235 mpt_free_request(mpt, tgt->req);
5236 tgt->req = NULL;
5237
5238 /*
5239 * And re-post the Command Buffer.
5240 * This will reset the state.
5241 */
5242 ioindex = GET_IO_INDEX(reply_desc);
5243 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5244 tgt->is_local = 0;
5245 mpt_post_target_command(mpt, req, ioindex);
5246
5247 /*
5248 * And post a done for anyone who cares
5249 */
5250 if (ccb) {
5251 if (mpt->outofbeer) {
5252 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5253 mpt->outofbeer = 0;
5254 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5255 }
5256 xpt_done(ccb);
5257 }
5258 break;
5259 }
5260 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5261 tgt->state = TGT_STATE_LOADED;
5262 break;
5263 default:
5264 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5265 "Reply Function\n", tgt->state);
5266 }
5267 return (TRUE);
5268 }
5269
5270 status = le16toh(reply_frame->IOCStatus);
5271 if (status != MPI_IOCSTATUS_SUCCESS) {
5272 dbg = MPT_PRT_ERROR;
5273 } else {
5274 dbg = MPT_PRT_DEBUG1;
5275 }
5276
5277 mpt_lprt(mpt, dbg,
5278 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5279 req, req->serno, reply_frame, reply_frame->Function, status);
5280
5281 switch (reply_frame->Function) {
5282 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5283 {
5284 mpt_tgt_state_t *tgt;
5285 #ifdef INVARIANTS
5286 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5287 #endif
5288 if (status != MPI_IOCSTATUS_SUCCESS) {
5289 /*
5290 * XXX What to do?
5291 */
5292 break;
5293 }
5294 tgt = MPT_TGT_STATE(mpt, req);
5295 KASSERT(tgt->state == TGT_STATE_LOADING,
5296 ("bad state 0x%x on reply to buffer post", tgt->state));
5297 mpt_assign_serno(mpt, req);
5298 tgt->state = TGT_STATE_LOADED;
5299 break;
5300 }
5301 case MPI_FUNCTION_TARGET_ASSIST:
5302 #ifdef INVARIANTS
5303 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5304 #endif
5305 mpt_prt(mpt, "target assist completion\n");
5306 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5307 mpt_free_request(mpt, req);
5308 break;
5309 case MPI_FUNCTION_TARGET_STATUS_SEND:
5310 #ifdef INVARIANTS
5311 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5312 #endif
5313 mpt_prt(mpt, "status send completion\n");
5314 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5315 mpt_free_request(mpt, req);
5316 break;
5317 case MPI_FUNCTION_TARGET_MODE_ABORT:
5318 {
5319 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5320 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5321 PTR_MSG_TARGET_MODE_ABORT abtp =
5322 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5323 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5324 #ifdef INVARIANTS
5325 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5326 #endif
5327 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5328 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5329 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5330 mpt_free_request(mpt, req);
5331 break;
5332 }
5333 default:
5334 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5335 "0x%x\n", reply_frame->Function);
5336 break;
5337 }
5338 return (TRUE);
5339 }
Cache object: 00ecd71f005fd72ca0251e170e86d739
|