1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 1997-2009 by Matthew Jacob
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
32 */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <dev/isp/isp_freebsd.h>
37 #include <sys/unistd.h>
38 #include <sys/kthread.h>
39 #include <sys/conf.h>
40 #include <sys/module.h>
41 #include <sys/ioccom.h>
42 #include <dev/isp/isp_ioctl.h>
43 #include <sys/devicestat.h>
44 #include <cam/cam_periph.h>
45 #include <cam/cam_xpt_periph.h>
46
47 MODULE_VERSION(isp, 1);
48 MODULE_DEPEND(isp, cam, 1, 1, 1);
49 int isp_announced = 0;
50 int isp_loop_down_limit = 60; /* default loop down limit */
51 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */
52 int isp_gone_device_time = 30; /* grace time before reporting device lost */
53 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
54
55 static void isp_freeze_loopdown(ispsoftc_t *, int);
56 static void isp_loop_changed(ispsoftc_t *isp, int chan);
57 static void isp_rq_check_above(ispsoftc_t *);
58 static void isp_rq_check_below(ispsoftc_t *);
59 static d_ioctl_t ispioctl;
60 static void isp_poll(struct cam_sim *);
61 static callout_func_t isp_watchdog;
62 static callout_func_t isp_gdt;
63 static task_fn_t isp_gdt_task;
64 static void isp_kthread(void *);
65 static void isp_action(struct cam_sim *, union ccb *);
66 static int isp_timer_count;
67 static void isp_timer(void *);
68
69 static struct cdevsw isp_cdevsw = {
70 .d_version = D_VERSION,
71 .d_ioctl = ispioctl,
72 .d_name = "isp",
73 };
74
75 static int
76 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
77 {
78 ispsoftc_t *isp = (ispsoftc_t *)arg1;
79 int chan = arg2;
80 int error, old, value;
81
82 value = FCPARAM(isp, chan)->role;
83
84 error = sysctl_handle_int(oidp, &value, 0, req);
85 if ((error != 0) || (req->newptr == NULL))
86 return (error);
87
88 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
89 return (EINVAL);
90
91 ISP_LOCK(isp);
92 old = FCPARAM(isp, chan)->role;
93
94 /* We don't allow target mode switch from here. */
95 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
96
97 /* If nothing has changed -- we are done. */
98 if (value == old) {
99 ISP_UNLOCK(isp);
100 return (0);
101 }
102
103 /* Actually change the role. */
104 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
105 ISP_UNLOCK(isp);
106 return (error);
107 }
108
109 static int
110 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
111 {
112 fcparam *fcp = FCPARAM(isp, chan);
113 struct isp_fc *fc = ISP_FC_PC(isp, chan);
114 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
115 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
116 char name[16];
117 struct cam_sim *sim;
118 struct cam_path *path;
119 #ifdef ISP_TARGET_MODE
120 int i;
121 #endif
122
123 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
124 device_get_unit(isp->isp_dev), &isp->isp_lock,
125 isp->isp_maxcmds, isp->isp_maxcmds, devq);
126 if (sim == NULL)
127 return (ENOMEM);
128
129 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
130 cam_sim_free(sim, FALSE);
131 return (EIO);
132 }
133 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
134 xpt_bus_deregister(cam_sim_path(sim));
135 cam_sim_free(sim, FALSE);
136 return (ENXIO);
137 }
138
139 ISP_LOCK(isp);
140 fc->sim = sim;
141 fc->path = path;
142 fc->isp = isp;
143 fc->ready = 1;
144 fcp->isp_use_gft_id = 1;
145 fcp->isp_use_gff_id = 1;
146
147 callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
148 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
149 #ifdef ISP_TARGET_MODE
150 TAILQ_INIT(&fc->waitq);
151 STAILQ_INIT(&fc->ntfree);
152 for (i = 0; i < ATPDPSIZE; i++)
153 STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
154 LIST_INIT(&fc->atfree);
155 for (i = ATPDPSIZE-1; i >= 0; i--)
156 LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
157 for (i = 0; i < ATPDPHASHSIZE; i++)
158 LIST_INIT(&fc->atused[i]);
159 #endif
160 isp_loop_changed(isp, chan);
161 ISP_UNLOCK(isp);
162 if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
163 "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
164 xpt_free_path(fc->path);
165 xpt_bus_deregister(cam_sim_path(fc->sim));
166 cam_sim_free(fc->sim, FALSE);
167 return (ENOMEM);
168 }
169 fc->num_threads += 1;
170 if (chan > 0) {
171 snprintf(name, sizeof(name), "chan%d", chan);
172 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
173 OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
174 "Virtual channel");
175 }
176 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
177 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
178 "World Wide Node Name");
179 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
180 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
181 "World Wide Port Name");
182 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
183 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
184 "Loop Down Limit");
185 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
186 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
187 "Gone Device Time");
188 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
189 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
190 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
191 "Cause a Lost Frame on a Read");
192 #endif
193 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
194 "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
195 isp, chan, isp_role_sysctl, "I", "Current role");
196 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
197 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
198 "Connection speed in gigabits");
199 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
200 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
201 "Link state");
202 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
203 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
204 "Firmware state");
205 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
206 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
207 "Loop state");
208 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
209 "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
210 "Connection topology");
211 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
212 "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
213 "Use GFT_ID during fabric scan");
214 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
215 "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
216 "Use GFF_ID during fabric scan");
217 return (0);
218 }
219
220 static void
221 isp_detach_chan(ispsoftc_t *isp, int chan)
222 {
223 struct isp_fc *fc = ISP_FC_PC(isp, chan);
224
225 xpt_free_path(fc->path);
226 xpt_bus_deregister(cam_sim_path(fc->sim));
227 cam_sim_free(fc->sim, FALSE);
228
229 /* Wait for the channel's spawned threads to exit. */
230 wakeup(fc);
231 while (fc->num_threads != 0)
232 mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0);
233 }
234
235 int
236 isp_attach(ispsoftc_t *isp)
237 {
238 const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
239 int du = device_get_unit(isp->isp_dev);
240 int chan;
241
242 /*
243 * Create the device queue for our SIM(s).
244 */
245 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
246 if (isp->isp_osinfo.devq == NULL) {
247 return (EIO);
248 }
249
250 for (chan = 0; chan < isp->isp_nchan; chan++) {
251 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
252 goto unwind;
253 }
254 }
255
256 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
257 isp_timer_count = hz >> 2;
258 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
259
260 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
261 if (isp->isp_osinfo.cdev) {
262 isp->isp_osinfo.cdev->si_drv1 = isp;
263 }
264 return (0);
265
266 unwind:
267 ISP_LOCK(isp);
268 isp->isp_osinfo.is_exiting = 1;
269 while (--chan >= 0)
270 isp_detach_chan(isp, chan);
271 ISP_UNLOCK(isp);
272 cam_simq_free(isp->isp_osinfo.devq);
273 isp->isp_osinfo.devq = NULL;
274 return (-1);
275 }
276
277 int
278 isp_detach(ispsoftc_t *isp)
279 {
280 int chan;
281
282 if (isp->isp_osinfo.cdev) {
283 destroy_dev(isp->isp_osinfo.cdev);
284 isp->isp_osinfo.cdev = NULL;
285 }
286 ISP_LOCK(isp);
287 /* Tell spawned threads that we're exiting. */
288 isp->isp_osinfo.is_exiting = 1;
289 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
290 isp_detach_chan(isp, chan);
291 ISP_UNLOCK(isp);
292 callout_drain(&isp->isp_osinfo.tmo);
293 cam_simq_free(isp->isp_osinfo.devq);
294 return (0);
295 }
296
297 static void
298 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
299 {
300 struct isp_fc *fc = ISP_FC_PC(isp, chan);
301
302 if (fc->sim == NULL)
303 return;
304 if (fc->simqfrozen == 0) {
305 isp_prt(isp, ISP_LOGDEBUG0,
306 "Chan %d Freeze simq (loopdown)", chan);
307 fc->simqfrozen = SIMQFRZ_LOOPDOWN;
308 xpt_hold_boot();
309 xpt_freeze_simq(fc->sim, 1);
310 } else {
311 isp_prt(isp, ISP_LOGDEBUG0,
312 "Chan %d Mark simq frozen (loopdown)", chan);
313 fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
314 }
315 }
316
317 static void
318 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
319 {
320 struct isp_fc *fc = ISP_FC_PC(isp, chan);
321
322 if (fc->sim == NULL)
323 return;
324 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
325 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
326 if (wasfrozen && fc->simqfrozen == 0) {
327 isp_prt(isp, ISP_LOGDEBUG0,
328 "Chan %d Release simq", chan);
329 xpt_release_simq(fc->sim, 1);
330 xpt_release_boot();
331 }
332 }
333
334 /*
335 * Functions to protect from request queue overflow by freezing SIM queue.
336 * XXX: freezing only one arbitrary SIM, since they all share the queue.
337 */
338 static void
339 isp_rq_check_above(ispsoftc_t *isp)
340 {
341 struct isp_fc *fc = ISP_FC_PC(isp, 0);
342
343 if (isp->isp_rqovf || fc->sim == NULL)
344 return;
345 if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
346 xpt_freeze_simq(fc->sim, 1);
347 isp->isp_rqovf = 1;
348 }
349 }
350
351 static void
352 isp_rq_check_below(ispsoftc_t *isp)
353 {
354 struct isp_fc *fc = ISP_FC_PC(isp, 0);
355
356 if (!isp->isp_rqovf || fc->sim == NULL)
357 return;
358 if (isp_rqentry_avail(isp, QENTRY_MAX)) {
359 xpt_release_simq(fc->sim, 0);
360 isp->isp_rqovf = 0;
361 }
362 }
363
364 static int
365 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
366 {
367 ispsoftc_t *isp;
368 int nr, chan, retval = ENOTTY;
369
370 isp = dev->si_drv1;
371
372 switch (c) {
373 case ISP_SDBLEV:
374 {
375 int olddblev = isp->isp_dblev;
376 isp->isp_dblev = *(int *)addr;
377 *(int *)addr = olddblev;
378 retval = 0;
379 break;
380 }
381 case ISP_GETROLE:
382 chan = *(int *)addr;
383 if (chan < 0 || chan >= isp->isp_nchan) {
384 retval = -ENXIO;
385 break;
386 }
387 *(int *)addr = FCPARAM(isp, chan)->role;
388 retval = 0;
389 break;
390 case ISP_SETROLE:
391 nr = *(int *)addr;
392 chan = nr >> 8;
393 if (chan < 0 || chan >= isp->isp_nchan) {
394 retval = -ENXIO;
395 break;
396 }
397 nr &= 0xff;
398 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
399 retval = EINVAL;
400 break;
401 }
402 ISP_LOCK(isp);
403 *(int *)addr = FCPARAM(isp, chan)->role;
404 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
405 ISP_UNLOCK(isp);
406 break;
407
408 case ISP_RESETHBA:
409 ISP_LOCK(isp);
410 isp_reinit(isp, 0);
411 ISP_UNLOCK(isp);
412 retval = 0;
413 break;
414
415 case ISP_RESCAN:
416 chan = *(intptr_t *)addr;
417 if (chan < 0 || chan >= isp->isp_nchan) {
418 retval = -ENXIO;
419 break;
420 }
421 ISP_LOCK(isp);
422 if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
423 retval = EIO;
424 } else {
425 retval = 0;
426 }
427 ISP_UNLOCK(isp);
428 break;
429
430 case ISP_FC_LIP:
431 chan = *(intptr_t *)addr;
432 if (chan < 0 || chan >= isp->isp_nchan) {
433 retval = -ENXIO;
434 break;
435 }
436 ISP_LOCK(isp);
437 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
438 retval = EIO;
439 } else {
440 retval = 0;
441 }
442 ISP_UNLOCK(isp);
443 break;
444 case ISP_FC_GETDINFO:
445 {
446 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
447 fcportdb_t *lp;
448
449 if (ifc->loopid >= MAX_FC_TARG) {
450 retval = EINVAL;
451 break;
452 }
453 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
454 if (lp->state != FC_PORTDB_STATE_NIL) {
455 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
456 ifc->loopid = lp->handle;
457 ifc->portid = lp->portid;
458 ifc->node_wwn = lp->node_wwn;
459 ifc->port_wwn = lp->port_wwn;
460 retval = 0;
461 } else {
462 retval = ENODEV;
463 }
464 break;
465 }
466 case ISP_FC_GETHINFO:
467 {
468 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
469 int chan = hba->fc_channel;
470
471 if (chan < 0 || chan >= isp->isp_nchan) {
472 retval = ENXIO;
473 break;
474 }
475 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
476 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
477 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
478 hba->fc_nchannels = isp->isp_nchan;
479 hba->fc_nports = MAX_FC_TARG;
480 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
481 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
482 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
483 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
484 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
485 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
486 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
487 retval = 0;
488 break;
489 }
490 case ISP_TSK_MGMT:
491 {
492 int needmarker;
493 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
494 uint16_t nphdl;
495 isp24xx_tmf_t tmf;
496 isp24xx_statusreq_t sp;
497 fcparam *fcp;
498 fcportdb_t *lp;
499 int i;
500
501 chan = fct->chan;
502 if (chan < 0 || chan >= isp->isp_nchan) {
503 retval = -ENXIO;
504 break;
505 }
506
507 needmarker = retval = 0;
508 nphdl = fct->loopid;
509 ISP_LOCK(isp);
510 fcp = FCPARAM(isp, chan);
511
512 for (i = 0; i < MAX_FC_TARG; i++) {
513 lp = &fcp->portdb[i];
514 if (lp->handle == nphdl) {
515 break;
516 }
517 }
518 if (i == MAX_FC_TARG) {
519 retval = ENXIO;
520 ISP_UNLOCK(isp);
521 break;
522 }
523 ISP_MEMZERO(&tmf, sizeof(tmf));
524 tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
525 tmf.tmf_header.rqs_entry_count = 1;
526 tmf.tmf_nphdl = lp->handle;
527 tmf.tmf_delay = 2;
528 tmf.tmf_timeout = 4;
529 tmf.tmf_tidlo = lp->portid;
530 tmf.tmf_tidhi = lp->portid >> 16;
531 tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
532 tmf.tmf_lun[1] = fct->lun & 0xff;
533 if (fct->lun >= 256) {
534 tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
535 }
536 switch (fct->action) {
537 case IPT_CLEAR_ACA:
538 tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
539 break;
540 case IPT_TARGET_RESET:
541 tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
542 needmarker = 1;
543 break;
544 case IPT_LUN_RESET:
545 tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
546 needmarker = 1;
547 break;
548 case IPT_CLEAR_TASK_SET:
549 tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
550 needmarker = 1;
551 break;
552 case IPT_ABORT_TASK_SET:
553 tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
554 needmarker = 1;
555 break;
556 default:
557 retval = EINVAL;
558 break;
559 }
560 if (retval) {
561 ISP_UNLOCK(isp);
562 break;
563 }
564
565 retval = isp_exec_entry_queue(isp, &tmf, &sp, 5);
566 if (retval != 0) {
567 isp_prt(isp, ISP_LOGERR, "%s: TMF of chan %d error %d",
568 __func__, chan, retval);
569 ISP_UNLOCK(isp);
570 break;
571 }
572
573 if (sp.req_completion_status != 0)
574 retval = EIO;
575 else if (needmarker)
576 fcp->sendmarker = 1;
577 ISP_UNLOCK(isp);
578 break;
579 }
580 default:
581 break;
582 }
583 return (retval);
584 }
585
586 /*
587 * Local Inlines
588 */
589
590 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
591 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
592
593 static ISP_INLINE int
594 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
595 {
596 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
597 if (ISP_PCMD(ccb) == NULL) {
598 return (-1);
599 }
600 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
601 return (0);
602 }
603
604 static ISP_INLINE void
605 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
606 {
607 if (ISP_PCMD(ccb)) {
608 #ifdef ISP_TARGET_MODE
609 PISP_PCMD(ccb)->datalen = 0;
610 #endif
611 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
612 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
613 ISP_PCMD(ccb) = NULL;
614 }
615 }
616
617 /*
618 * Put the target mode functions here, because some are inlines
619 */
620 #ifdef ISP_TARGET_MODE
621 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
622 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
623 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
624 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
625 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
626 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
627 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
628 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *);
629 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
630 static void isp_enable_lun(ispsoftc_t *, union ccb *);
631 static void isp_disable_lun(ispsoftc_t *, union ccb *);
632 static callout_func_t isp_refire_notify_ack;
633 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
634 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
635 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
636 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
637 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
638 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
639 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
640 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
641
642 static ISP_INLINE tstate_t *
643 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
644 {
645 struct isp_fc *fc = ISP_FC_PC(isp, bus);
646 tstate_t *tptr;
647
648 SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) {
649 if (tptr->ts_lun == lun)
650 return (tptr);
651 }
652 return (NULL);
653 }
654
655 static int
656 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
657 {
658 inot_private_data_t *ntp;
659 struct ntpdlist rq;
660
661 if (STAILQ_EMPTY(&tptr->restart_queue))
662 return (0);
663 STAILQ_INIT(&rq);
664 STAILQ_CONCAT(&rq, &tptr->restart_queue);
665 while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
666 STAILQ_REMOVE_HEAD(&rq, next);
667 isp_prt(isp, ISP_LOGTDEBUG0,
668 "%s: restarting resrc deprived %x", __func__,
669 ((at7_entry_t *)ntp->data)->at_rxid);
670 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
671 isp_put_ntpd(isp, bus, ntp);
672 if (!STAILQ_EMPTY(&tptr->restart_queue))
673 break;
674 }
675 if (!STAILQ_EMPTY(&rq)) {
676 STAILQ_CONCAT(&rq, &tptr->restart_queue);
677 STAILQ_CONCAT(&tptr->restart_queue, &rq);
678 }
679 return (!STAILQ_EMPTY(&tptr->restart_queue));
680 }
681
682 static void
683 isp_tmcmd_restart(ispsoftc_t *isp)
684 {
685 struct isp_fc *fc;
686 tstate_t *tptr;
687 union ccb *ccb;
688 int bus, i;
689
690 for (bus = 0; bus < isp->isp_nchan; bus++) {
691 fc = ISP_FC_PC(isp, bus);
692 for (i = 0; i < LUN_HASH_SIZE; i++) {
693 SLIST_FOREACH(tptr, &fc->lun_hash[i], next)
694 isp_atio_restart(isp, bus, tptr);
695 }
696
697 /*
698 * We only need to do this once per channel.
699 */
700 ccb = (union ccb *)TAILQ_FIRST(&fc->waitq);
701 if (ccb != NULL) {
702 TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe);
703 isp_target_start_ctio(isp, ccb, FROM_TIMER);
704 }
705 }
706 isp_rq_check_above(isp);
707 isp_rq_check_below(isp);
708 }
709
710 static atio_private_data_t *
711 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
712 {
713 struct isp_fc *fc = ISP_FC_PC(isp, chan);
714 atio_private_data_t *atp;
715
716 atp = LIST_FIRST(&fc->atfree);
717 if (atp) {
718 LIST_REMOVE(atp, next);
719 atp->tag = tag;
720 LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
721 }
722 return (atp);
723 }
724
725 static atio_private_data_t *
726 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
727 {
728 struct isp_fc *fc = ISP_FC_PC(isp, chan);
729 atio_private_data_t *atp;
730
731 LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
732 if (atp->tag == tag)
733 return (atp);
734 }
735 return (NULL);
736 }
737
738 static void
739 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
740 {
741 struct isp_fc *fc = ISP_FC_PC(isp, chan);
742
743 if (atp->ests)
744 isp_put_ecmd(isp, atp->ests);
745 LIST_REMOVE(atp, next);
746 memset(atp, 0, sizeof (*atp));
747 LIST_INSERT_HEAD(&fc->atfree, atp, next);
748 }
749
750 static void
751 isp_dump_atpd(ispsoftc_t *isp, int chan)
752 {
753 struct isp_fc *fc = ISP_FC_PC(isp, chan);
754 atio_private_data_t *atp;
755 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
756
757 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
758 if (atp->state == ATPD_STATE_FREE)
759 continue;
760 isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
761 chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
762 }
763 }
764
765 static inot_private_data_t *
766 isp_get_ntpd(ispsoftc_t *isp, int chan)
767 {
768 struct isp_fc *fc = ISP_FC_PC(isp, chan);
769 inot_private_data_t *ntp;
770
771 ntp = STAILQ_FIRST(&fc->ntfree);
772 if (ntp)
773 STAILQ_REMOVE_HEAD(&fc->ntfree, next);
774 return (ntp);
775 }
776
777 static inot_private_data_t *
778 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
779 {
780 struct isp_fc *fc = ISP_FC_PC(isp, chan);
781 inot_private_data_t *ntp;
782
783 for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) {
784 if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
785 return (ntp);
786 }
787 return (NULL);
788 }
789
790 static void
791 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
792 {
793 struct isp_fc *fc = ISP_FC_PC(isp, chan);
794
795 ntp->tag_id = ntp->seq_id = 0;
796 STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next);
797 }
798
799 tstate_t *
800 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path)
801 {
802 struct isp_fc *fc = ISP_FC_PC(isp, bus);
803 lun_id_t lun;
804 tstate_t *tptr;
805
806 lun = xpt_path_lun_id(path);
807 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
808 if (tptr == NULL)
809 return (NULL);
810 tptr->ts_lun = lun;
811 SLIST_INIT(&tptr->atios);
812 SLIST_INIT(&tptr->inots);
813 STAILQ_INIT(&tptr->restart_queue);
814 SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next);
815 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
816 return (tptr);
817 }
818
819 static void
820 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
821 {
822 struct isp_fc *fc = ISP_FC_PC(isp, bus);
823 union ccb *ccb;
824 inot_private_data_t *ntp;
825
826 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
827 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
828 ccb->ccb_h.status = CAM_REQ_ABORTED;
829 xpt_done(ccb);
830 };
831 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
832 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
833 ccb->ccb_h.status = CAM_REQ_ABORTED;
834 xpt_done(ccb);
835 }
836 while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
837 isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
838 STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
839 isp_put_ntpd(isp, bus, ntp);
840 }
841 SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next);
842 free(tptr, M_DEVBUF);
843 }
844
845 static void
846 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
847 {
848 tstate_t *tptr;
849 int bus = XS_CHANNEL(ccb);
850 target_id_t target = ccb->ccb_h.target_id;
851 lun_id_t lun = ccb->ccb_h.target_lun;
852
853 /*
854 * We only support either target and lun both wildcard
855 * or target and lun both non-wildcard.
856 */
857 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
858 "enabling lun %jx\n", (uintmax_t)lun);
859 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
860 ccb->ccb_h.status = CAM_LUN_INVALID;
861 xpt_done(ccb);
862 return;
863 }
864
865 /* Create the state pointer. It should not already exist. */
866 tptr = get_lun_statep(isp, bus, lun);
867 if (tptr) {
868 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
869 xpt_done(ccb);
870 return;
871 }
872 tptr = create_lun_state(isp, bus, ccb->ccb_h.path);
873 if (tptr == NULL) {
874 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
875 xpt_done(ccb);
876 return;
877 }
878
879 ccb->ccb_h.status = CAM_REQ_CMP;
880 xpt_done(ccb);
881 }
882
883 static void
884 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
885 {
886 tstate_t *tptr;
887 int bus = XS_CHANNEL(ccb);
888 target_id_t target = ccb->ccb_h.target_id;
889 lun_id_t lun = ccb->ccb_h.target_lun;
890
891 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
892 "disabling lun %jx\n", (uintmax_t)lun);
893 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
894 ccb->ccb_h.status = CAM_LUN_INVALID;
895 xpt_done(ccb);
896 return;
897 }
898
899 /* Find the state pointer. */
900 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
901 ccb->ccb_h.status = CAM_PATH_INVALID;
902 xpt_done(ccb);
903 return;
904 }
905
906 destroy_lun_state(isp, bus, tptr);
907 ccb->ccb_h.status = CAM_REQ_CMP;
908 xpt_done(ccb);
909 }
910
911 static void
912 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
913 {
914 int fctape, sendstatus, resid;
915 fcparam *fcp;
916 atio_private_data_t *atp;
917 struct ccb_scsiio *cso;
918 struct isp_ccbq *waitq;
919 uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
920 ct7_entry_t local, *cto = &local;
921
922 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
923 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
924
925 waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq;
926 switch (how) {
927 case FROM_CAM:
928 /*
929 * Insert at the tail of the list, if any, waiting CTIO CCBs
930 */
931 TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
932 break;
933 case FROM_TIMER:
934 case FROM_SRR:
935 case FROM_CTIO_DONE:
936 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
937 break;
938 }
939
940 while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
941 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
942
943 cso = &ccb->csio;
944 xfrlen = cso->dxfer_len;
945 if (xfrlen == 0) {
946 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
947 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
948 ccb->ccb_h.status = CAM_REQ_INVALID;
949 xpt_done(ccb);
950 continue;
951 }
952 }
953
954 atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
955 if (atp == NULL) {
956 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
957 isp_dump_atpd(isp, XS_CHANNEL(ccb));
958 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
959 xpt_done(ccb);
960 continue;
961 }
962
963 /*
964 * Is this command a dead duck?
965 */
966 if (atp->dead) {
967 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
968 ccb->ccb_h.status = CAM_REQ_ABORTED;
969 xpt_done(ccb);
970 continue;
971 }
972
973 /*
974 * Check to make sure we're still in target mode.
975 */
976 fcp = FCPARAM(isp, XS_CHANNEL(ccb));
977 if ((fcp->role & ISP_ROLE_TARGET) == 0) {
978 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
979 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
980 xpt_done(ccb);
981 continue;
982 }
983
984 /*
985 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
986 * could be split into two CTIOs to split data and status).
987 */
988 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
989 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
990 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
991 break;
992 }
993
994 /*
995 * Does the initiator expect FC-Tape style responses?
996 */
997 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
998 fctape = 1;
999 } else {
1000 fctape = 0;
1001 }
1002
1003 /*
1004 * If we already did the data xfer portion of a CTIO that sends data
1005 * and status, don't do it again and do the status portion now.
1006 */
1007 if (atp->sendst) {
1008 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1009 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1010 xfrlen = 0; /* we already did the data transfer */
1011 atp->sendst = 0;
1012 }
1013 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1014 sendstatus = 1;
1015 } else {
1016 sendstatus = 0;
1017 }
1018
1019 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1020 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1021 /*
1022 * Sense length is not the entire sense data structure size. Periph
1023 * drivers don't seem to be setting sense_len to reflect the actual
1024 * size. We'll peek inside to get the right amount.
1025 */
1026 sense_length = cso->sense_len;
1027
1028 /*
1029 * This 'cannot' happen
1030 */
1031 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1032 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1033 }
1034 } else {
1035 sense_length = 0;
1036 }
1037
1038 /*
1039 * Check for overflow
1040 */
1041 tmp = atp->bytes_xfered + atp->bytes_in_transit;
1042 if (xfrlen > 0 && tmp > atp->orig_datalen) {
1043 isp_prt(isp, ISP_LOGERR,
1044 "%s: [0x%x] data overflow by %u bytes", __func__,
1045 cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1046 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1047 xpt_done(ccb);
1048 continue;
1049 }
1050 if (xfrlen > atp->orig_datalen - tmp) {
1051 xfrlen = atp->orig_datalen - tmp;
1052 if (xfrlen == 0 && !sendstatus) {
1053 cso->resid = cso->dxfer_len;
1054 ccb->ccb_h.status = CAM_REQ_CMP;
1055 xpt_done(ccb);
1056 continue;
1057 }
1058 }
1059
1060 memset(cto, 0, QENTRY_LEN);
1061 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1062 cto->ct_header.rqs_entry_count = 1;
1063 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1064 ATPD_SET_SEQNO(cto, atp);
1065 cto->ct_nphdl = atp->nphdl;
1066 cto->ct_rxid = atp->tag;
1067 cto->ct_iid_lo = atp->sid;
1068 cto->ct_iid_hi = atp->sid >> 16;
1069 cto->ct_oxid = atp->oxid;
1070 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1071 cto->ct_timeout = XS_TIME(ccb);
1072 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1073
1074 /*
1075 * Mode 1, status, no data. Only possible when we are sending status, have
1076 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1077 *
1078 * Mode 2, status, no data. We have to use this in the case that
1079 * the sense data won't fit into a ct7_entry_t.
1080 *
1081 */
1082 if (sendstatus && xfrlen == 0) {
1083 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1084 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1085 if (sense_length <= MAXRESPLEN_24XX) {
1086 cto->ct_flags |= CT7_FLAG_MODE1;
1087 cto->ct_scsi_status = cso->scsi_status;
1088 if (resid < 0) {
1089 cto->ct_resid = -resid;
1090 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1091 } else if (resid > 0) {
1092 cto->ct_resid = resid;
1093 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1094 }
1095 if (fctape) {
1096 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1097 }
1098 if (sense_length) {
1099 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1100 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1101 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1102 }
1103 } else {
1104 bus_addr_t addr;
1105 fcp_rsp_iu_t rp;
1106
1107 if (atp->ests == NULL) {
1108 atp->ests = isp_get_ecmd(isp);
1109 if (atp->ests == NULL) {
1110 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1111 break;
1112 }
1113 }
1114 memset(&rp, 0, sizeof(rp));
1115 if (fctape) {
1116 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1117 rp.fcp_rsp_bits |= FCP_CONF_REQ;
1118 }
1119 cto->ct_flags |= CT7_FLAG_MODE2;
1120 rp.fcp_rsp_scsi_status = cso->scsi_status;
1121 if (resid < 0) {
1122 rp.fcp_rsp_resid = -resid;
1123 rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1124 } else if (resid > 0) {
1125 rp.fcp_rsp_resid = resid;
1126 rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1127 }
1128 if (sense_length) {
1129 rp.fcp_rsp_snslen = sense_length;
1130 cto->ct_senselen = sense_length;
1131 rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1132 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1133 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1134 } else {
1135 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1136 }
1137 if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1138 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1139 }
1140 bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1141 addr = isp->isp_osinfo.ecmd_dma;
1142 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1143 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1144 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1145 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1146 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1147 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1148 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1149 }
1150 if (sense_length) {
1151 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1152 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1153 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1154 } else {
1155 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1156 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1157 }
1158 atp->state = ATPD_STATE_LAST_CTIO;
1159 }
1160
1161 /*
1162 * Mode 0 data transfers, *possibly* with status.
1163 */
1164 if (xfrlen != 0) {
1165 cto->ct_flags |= CT7_FLAG_MODE0;
1166 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1167 cto->ct_flags |= CT7_DATA_IN;
1168 } else {
1169 cto->ct_flags |= CT7_DATA_OUT;
1170 }
1171
1172 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1173 cto->rsp.m0.ct_xfrlen = xfrlen;
1174
1175 #ifdef DEBUG
1176 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1177 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1178 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1179 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1180 }
1181 #endif
1182 if (sendstatus) {
1183 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1184 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1185 cto->ct_flags |= CT7_SENDSTATUS;
1186 atp->state = ATPD_STATE_LAST_CTIO;
1187 if (fctape) {
1188 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1189 }
1190 } else {
1191 atp->sendst = 1; /* send status later */
1192 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1193 atp->state = ATPD_STATE_CTIO;
1194 }
1195 } else {
1196 atp->state = ATPD_STATE_CTIO;
1197 }
1198 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1199 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1200 }
1201
1202 if (isp_get_pcmd(isp, ccb)) {
1203 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1204 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1205 break;
1206 }
1207 handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1208 if (handle == 0) {
1209 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1210 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1211 isp_free_pcmd(isp, ccb);
1212 break;
1213 }
1214 atp->bytes_in_transit += xfrlen;
1215 PISP_PCMD(ccb)->datalen = xfrlen;
1216
1217 /*
1218 * Call the dma setup routines for this entry (and any subsequent
1219 * CTIOs) if there's data to move, and then tell the f/w it's got
1220 * new things to play with. As with isp_start's usage of DMA setup,
1221 * any swizzling is done in the machine dependent layer. Because
1222 * of this, we put the request onto the queue area first in native
1223 * format.
1224 */
1225 cto->ct_syshandle = handle;
1226 dmaresult = ISP_DMASETUP(isp, cso, cto);
1227 if (dmaresult != 0) {
1228 isp_destroy_handle(isp, handle);
1229 isp_free_pcmd(isp, ccb);
1230 if (dmaresult == CMD_EAGAIN) {
1231 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1232 break;
1233 }
1234 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1235 xpt_done(ccb);
1236 continue;
1237 }
1238 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1239 if (xfrlen) {
1240 ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1241 } else {
1242 ccb->ccb_h.spriv_field0 = ~0;
1243 }
1244 atp->ctcnt++;
1245 atp->seqno++;
1246 }
1247 }
1248
1249 static void
1250 isp_refire_notify_ack(void *arg)
1251 {
1252 isp_tna_t *tp = arg;
1253 ispsoftc_t *isp = tp->isp;
1254
1255 ISP_ASSERT_LOCKED(isp);
1256 if (isp_notify_ack(isp, tp->not)) {
1257 callout_schedule(&tp->timer, 5);
1258 } else {
1259 free(tp, M_DEVBUF);
1260 }
1261 }
1262
1263
1264 static void
1265 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1266 {
1267
1268 isp_rq_check_below(isp);
1269 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1270 xpt_done(ccb);
1271 }
1272
1273 static void
1274 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1275 {
1276 int cdbxlen;
1277 lun_id_t lun;
1278 uint16_t chan, nphdl = NIL_HANDLE;
1279 uint32_t did, sid;
1280 fcportdb_t *lp;
1281 tstate_t *tptr;
1282 struct ccb_accept_tio *atiop;
1283 atio_private_data_t *atp = NULL;
1284 atio_private_data_t *oatp;
1285 inot_private_data_t *ntp;
1286
1287 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1288 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1289 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1290
1291 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1292 /* Channel has to be derived from D_ID */
1293 isp_find_chan_by_did(isp, did, &chan);
1294 if (chan == ISP_NOCHAN) {
1295 isp_prt(isp, ISP_LOGWARN,
1296 "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1297 __func__, aep->at_rxid, did);
1298 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1299 ECMD_TERMINATE, 0);
1300 return;
1301 }
1302 } else {
1303 chan = 0;
1304 }
1305
1306 /*
1307 * Find the PDB entry for this initiator
1308 */
1309 if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1310 /*
1311 * If we're not in the port database terminate the exchange.
1312 */
1313 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1314 __func__, aep->at_rxid, did, chan, sid);
1315 isp_dump_portdb(isp, chan);
1316 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1317 return;
1318 }
1319 nphdl = lp->handle;
1320
1321 /*
1322 * Get the tstate pointer
1323 */
1324 tptr = get_lun_statep(isp, chan, lun);
1325 if (tptr == NULL) {
1326 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1327 if (tptr == NULL) {
1328 isp_prt(isp, ISP_LOGWARN,
1329 "%s: [0x%x] no state pointer for lun %jx or wildcard",
1330 __func__, aep->at_rxid, (uintmax_t)lun);
1331 if (lun == 0) {
1332 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1333 } else {
1334 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1335 }
1336 return;
1337 }
1338 }
1339
1340 /*
1341 * Start any commands pending resources first.
1342 */
1343 if (isp_atio_restart(isp, chan, tptr))
1344 goto noresrc;
1345
1346 /*
1347 * If the f/w is out of resources, just send a BUSY status back.
1348 */
1349 if (aep->at_rxid == AT7_NORESRC_RXID) {
1350 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1351 return;
1352 }
1353
1354 /*
1355 * If we're out of resources, just send a BUSY status back.
1356 */
1357 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1358 if (atiop == NULL) {
1359 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1360 goto noresrc;
1361 }
1362
1363 oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1364 if (oatp) {
1365 isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1366 ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1367 "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1368 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1369 /*
1370 * It's not a "no resource" condition- but we can treat it like one
1371 */
1372 goto noresrc;
1373 }
1374 atp = isp_get_atpd(isp, chan, aep->at_rxid);
1375 if (atp == NULL) {
1376 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1377 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1378 return;
1379 }
1380 atp->word3 = lp->prli_word3;
1381 atp->state = ATPD_STATE_ATIO;
1382 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1383 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1384 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1385 atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1386 atiop->ccb_h.target_lun = lun;
1387 atiop->sense_len = 0;
1388 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1389 if (cdbxlen) {
1390 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1391 }
1392 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1393 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1394 atiop->cdb_len = cdbxlen;
1395 atiop->ccb_h.status = CAM_CDB_RECVD;
1396 atiop->tag_id = atp->tag;
1397 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1398 case FCP_CMND_TASK_ATTR_SIMPLE:
1399 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1400 atiop->tag_action = MSG_SIMPLE_TASK;
1401 break;
1402 case FCP_CMND_TASK_ATTR_HEAD:
1403 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1404 atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1405 break;
1406 case FCP_CMND_TASK_ATTR_ORDERED:
1407 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1408 atiop->tag_action = MSG_ORDERED_TASK;
1409 break;
1410 case FCP_CMND_TASK_ATTR_ACA:
1411 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1412 atiop->tag_action = MSG_ACA_TASK;
1413 break;
1414 case FCP_CMND_TASK_ATTR_UNTAGGED:
1415 default:
1416 atiop->tag_action = 0;
1417 break;
1418 }
1419 atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1420 FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1421 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1422 atp->bytes_xfered = 0;
1423 atp->lun = lun;
1424 atp->nphdl = nphdl;
1425 atp->sid = sid;
1426 atp->did = did;
1427 atp->oxid = aep->at_hdr.ox_id;
1428 atp->rxid = aep->at_hdr.rx_id;
1429 atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1430 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1431 atp->state = ATPD_STATE_CAM;
1432 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1433 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1434 xpt_done((union ccb *)atiop);
1435 return;
1436 noresrc:
1437 KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1438 ntp = isp_get_ntpd(isp, chan);
1439 if (ntp == NULL) {
1440 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1441 return;
1442 }
1443 memcpy(ntp->data, aep, QENTRY_LEN);
1444 STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1445 }
1446
1447
1448 /*
1449 * Handle starting an SRR (sequence retransmit request)
1450 * We get here when we've gotten the immediate notify
1451 * and the return of all outstanding CTIOs for this
1452 * transaction.
1453 */
1454 static void
1455 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1456 {
1457 in_fcentry_24xx_t *inot;
1458 uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1459 union ccb *ccb;
1460
1461 inot = (in_fcentry_24xx_t *)atp->srr;
1462 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1463 ccb = atp->srr_ccb;
1464 atp->srr_ccb = NULL;
1465 atp->nsrr++;
1466 if (ccb == NULL) {
1467 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1468 goto fail;
1469 }
1470
1471 ccb_off = ccb->ccb_h.spriv_field0;
1472 ccb_len = ccb->csio.dxfer_len;
1473 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1474
1475 switch (inot->in_srr_iu) {
1476 case R_CTL_INFO_SOLICITED_DATA:
1477 /*
1478 * We have to restart a FCP_DATA data out transaction
1479 */
1480 atp->sendst = 0;
1481 atp->bytes_xfered = srr_off;
1482 if (ccb_len == 0) {
1483 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1484 goto mdp;
1485 }
1486 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1487 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1488 goto mdp;
1489 }
1490 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1491 break;
1492 case R_CTL_INFO_COMMAND_STATUS:
1493 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1494 atp->sendst = 1;
1495 /*
1496 * We have to restart a FCP_RSP IU transaction
1497 */
1498 break;
1499 case R_CTL_INFO_DATA_DESCRIPTOR:
1500 /*
1501 * We have to restart an FCP DATA in transaction
1502 */
1503 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1504 goto fail;
1505
1506 default:
1507 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1508 goto fail;
1509 }
1510
1511 /*
1512 * We can't do anything until this is acked, so we might as well start it now.
1513 * We aren't going to do the usual asynchronous ack issue because we need
1514 * to make sure this gets on the wire first.
1515 */
1516 if (isp_notify_ack(isp, inot)) {
1517 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1518 goto fail;
1519 }
1520 isp_target_start_ctio(isp, ccb, FROM_SRR);
1521 return;
1522 fail:
1523 inot->in_reserved = 1;
1524 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1525 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1526 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1527 isp_complete_ctio(isp, ccb);
1528 return;
1529 mdp:
1530 if (isp_notify_ack(isp, inot)) {
1531 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1532 goto fail;
1533 }
1534 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1535 ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1536 /*
1537 * This is not a strict interpretation of MDP, but it's close
1538 */
1539 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1540 ccb->csio.msg_len = 7;
1541 ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1542 ccb->csio.msg_ptr[1] = 5;
1543 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */
1544 ccb->csio.msg_ptr[3] = srr_off >> 24;
1545 ccb->csio.msg_ptr[4] = srr_off >> 16;
1546 ccb->csio.msg_ptr[5] = srr_off >> 8;
1547 ccb->csio.msg_ptr[6] = srr_off;
1548 isp_complete_ctio(isp, ccb);
1549 }
1550
1551
1552 static void
1553 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1554 {
1555 in_fcentry_24xx_t *inot = notify->nt_lreserved;
1556 atio_private_data_t *atp;
1557 uint32_t tag = notify->nt_tagval & 0xffffffff;
1558
1559 atp = isp_find_atpd(isp, notify->nt_channel, tag);
1560 if (atp == NULL) {
1561 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1562 __func__, tag);
1563 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1564 return;
1565 }
1566 atp->srr_notify_rcvd = 1;
1567 memcpy(atp->srr, inot, sizeof (atp->srr));
1568 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1569 inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1570 ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1571 if (atp->srr_ccb)
1572 isp_handle_srr_start(isp, atp);
1573 }
1574
1575 static void
1576 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1577 {
1578 union ccb *ccb;
1579 int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1580 atio_private_data_t *atp = NULL;
1581 int bus;
1582 uint32_t handle, data_requested, resid;
1583
1584 handle = ct->ct_syshandle;
1585 ccb = isp_find_xs(isp, handle);
1586 if (ccb == NULL) {
1587 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1588 return;
1589 }
1590 isp_destroy_handle(isp, handle);
1591 resid = data_requested = PISP_PCMD(ccb)->datalen;
1592 isp_free_pcmd(isp, ccb);
1593
1594 bus = XS_CHANNEL(ccb);
1595 atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1596 if (atp == NULL) {
1597 /*
1598 * XXX: isp_clear_commands() generates fake CTIO with zero
1599 * ct_rxid value, filling only ct_syshandle. Workaround
1600 * that using tag_id from the CCB, pointed by ct_syshandle.
1601 */
1602 atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1603 }
1604 if (atp == NULL) {
1605 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1606 return;
1607 }
1608 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1609 atp->bytes_in_transit -= data_requested;
1610 atp->ctcnt -= 1;
1611 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1612
1613 if (ct->ct_nphdl == CT7_SRR) {
1614 atp->srr_ccb = ccb;
1615 if (atp->srr_notify_rcvd)
1616 isp_handle_srr_start(isp, atp);
1617 return;
1618 }
1619 if (ct->ct_nphdl == CT_HBA_RESET) {
1620 sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1621 (atp->sendst == 0);
1622 failure = CAM_UNREC_HBA_ERROR;
1623 } else {
1624 sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1625 ok = (ct->ct_nphdl == CT7_OK);
1626 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1627 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1628 resid = ct->ct_resid;
1629 }
1630 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1631 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1632 if (ok) {
1633 if (data_requested > 0) {
1634 atp->bytes_xfered += data_requested - resid;
1635 ccb->csio.resid = ccb->csio.dxfer_len -
1636 (data_requested - resid);
1637 }
1638 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1639 ccb->ccb_h.status |= CAM_SENT_SENSE;
1640 ccb->ccb_h.status |= CAM_REQ_CMP;
1641 } else {
1642 notify_cam = 1;
1643 if (failure == CAM_UNREC_HBA_ERROR)
1644 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1645 else
1646 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1647 }
1648 atp->state = ATPD_STATE_PDON;
1649
1650 /*
1651 * We never *not* notify CAM when there has been any error (ok == 0),
1652 * so we never need to do an ATIO putback if we're not notifying CAM.
1653 */
1654 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1655 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1656 if (notify_cam == 0) {
1657 if (atp->sendst) {
1658 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1659 }
1660 return;
1661 }
1662
1663 /*
1664 * We are done with this ATIO if we successfully sent status.
1665 * In all other cases expect either another CTIO or XPT_ABORT.
1666 */
1667 if (ok && sentstatus)
1668 isp_put_atpd(isp, bus, atp);
1669
1670 /*
1671 * We're telling CAM we're done with this CTIO transaction.
1672 *
1673 * 24XX cards never need an ATIO put back.
1674 */
1675 isp_complete_ctio(isp, ccb);
1676 }
1677
1678 static int
1679 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1680 {
1681 ct7_entry_t local, *cto = &local;
1682
1683 if (isp->isp_state != ISP_RUNSTATE) {
1684 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1685 return (0);
1686 }
1687
1688 /*
1689 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1690 */
1691 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1692 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1693 fcportdb_t *lp;
1694 uint32_t sid;
1695 uint16_t nphdl;
1696
1697 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1698 if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1699 nphdl = lp->handle;
1700 } else {
1701 nphdl = NIL_HANDLE;
1702 }
1703 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1704 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1705 cto->ct_header.rqs_entry_count = 1;
1706 cto->ct_nphdl = nphdl;
1707 cto->ct_rxid = aep->at_rxid;
1708 cto->ct_vpidx = mp->nt_channel;
1709 cto->ct_iid_lo = sid;
1710 cto->ct_iid_hi = sid >> 16;
1711 cto->ct_oxid = aep->at_hdr.ox_id;
1712 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1713 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1714 if (rsp != 0) {
1715 cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1716 cto->rsp.m1.ct_resplen = 4;
1717 ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1718 cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1719 cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1720 cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1721 cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1722 }
1723 return (isp_send_entry(isp, cto));
1724 }
1725
1726 /*
1727 * This case is for a responding to an ABTS frame
1728 */
1729 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1730
1731 /*
1732 * Overload nt_need_ack here to mark whether we've terminated the associated command.
1733 */
1734 if (mp->nt_need_ack) {
1735 abts_t *abts = (abts_t *)mp->nt_lreserved;
1736
1737 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1738 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
1739 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1740 cto->ct_header.rqs_entry_count = 1;
1741 cto->ct_nphdl = mp->nt_nphdl;
1742 cto->ct_rxid = abts->abts_rxid_task;
1743 cto->ct_iid_lo = mp->nt_sid;
1744 cto->ct_iid_hi = mp->nt_sid >> 16;
1745 cto->ct_oxid = abts->abts_ox_id;
1746 cto->ct_vpidx = mp->nt_channel;
1747 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
1748 if (isp_send_entry(isp, cto)) {
1749 return (ENOMEM);
1750 }
1751 mp->nt_need_ack = 0;
1752 }
1753 return (isp_acknak_abts(isp, mp->nt_lreserved, 0));
1754 }
1755
1756 /*
1757 * General purpose acknowledgement
1758 */
1759 if (mp->nt_need_ack) {
1760 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1761 /*
1762 * Don't need to use the guaranteed send because the caller can retry
1763 */
1764 return (isp_notify_ack(isp, mp->nt_lreserved));
1765 }
1766 return (0);
1767 }
1768
1769 /*
1770 * Handle task management functions.
1771 *
1772 * We show up here with a notify structure filled out.
1773 *
1774 * The nt_lreserved tag points to the original queue entry
1775 */
1776 static void
1777 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1778 {
1779 tstate_t *tptr;
1780 fcportdb_t *lp;
1781 struct ccb_immediate_notify *inot;
1782 inot_private_data_t *ntp = NULL;
1783 atio_private_data_t *atp;
1784 lun_id_t lun;
1785
1786 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1787 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1788 if (notify->nt_lun == LUN_ANY) {
1789 if (notify->nt_tagval == TAG_ANY) {
1790 lun = CAM_LUN_WILDCARD;
1791 } else {
1792 atp = isp_find_atpd(isp, notify->nt_channel,
1793 notify->nt_tagval & 0xffffffff);
1794 lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1795 }
1796 } else {
1797 lun = notify->nt_lun;
1798 }
1799 tptr = get_lun_statep(isp, notify->nt_channel, lun);
1800 if (tptr == NULL) {
1801 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1802 if (tptr == NULL) {
1803 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1804 goto bad;
1805 }
1806 }
1807 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1808 if (inot == NULL) {
1809 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1810 goto bad;
1811 }
1812
1813 inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1814 inot->ccb_h.target_lun = lun;
1815 if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1816 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1817 inot->initiator_id = CAM_TARGET_WILDCARD;
1818 } else {
1819 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1820 }
1821 inot->seq_id = notify->nt_tagval;
1822 inot->tag_id = notify->nt_tagval >> 32;
1823
1824 switch (notify->nt_ncode) {
1825 case NT_ABORT_TASK:
1826 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1827 inot->arg = MSG_ABORT_TASK;
1828 break;
1829 case NT_ABORT_TASK_SET:
1830 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1831 inot->arg = MSG_ABORT_TASK_SET;
1832 break;
1833 case NT_CLEAR_ACA:
1834 inot->arg = MSG_CLEAR_ACA;
1835 break;
1836 case NT_CLEAR_TASK_SET:
1837 inot->arg = MSG_CLEAR_TASK_SET;
1838 break;
1839 case NT_LUN_RESET:
1840 inot->arg = MSG_LOGICAL_UNIT_RESET;
1841 break;
1842 case NT_TARGET_RESET:
1843 inot->arg = MSG_TARGET_RESET;
1844 break;
1845 case NT_QUERY_TASK_SET:
1846 inot->arg = MSG_QUERY_TASK_SET;
1847 break;
1848 case NT_QUERY_ASYNC_EVENT:
1849 inot->arg = MSG_QUERY_ASYNC_EVENT;
1850 break;
1851 default:
1852 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1853 goto bad;
1854 }
1855
1856 ntp = isp_get_ntpd(isp, notify->nt_channel);
1857 if (ntp == NULL) {
1858 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1859 goto bad;
1860 }
1861 ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1862 if (notify->nt_lreserved) {
1863 ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1864 ntp->nt.nt_lreserved = &ntp->data;
1865 }
1866 ntp->seq_id = notify->nt_tagval;
1867 ntp->tag_id = notify->nt_tagval >> 32;
1868
1869 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1870 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1871 inot->ccb_h.status = CAM_MESSAGE_RECV;
1872 xpt_done((union ccb *)inot);
1873 return;
1874 bad:
1875 if (notify->nt_need_ack) {
1876 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1877 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1878 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1879 }
1880 } else {
1881 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1882 }
1883 }
1884 }
1885
1886 static void
1887 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1888 {
1889 struct isp_fc *fc = ISP_FC_PC(isp, chan);
1890 atio_private_data_t *atp;
1891 inot_private_data_t *ntp, *tmp;
1892 uint32_t this_tag_id;
1893
1894 /*
1895 * First, clean any commands pending restart
1896 */
1897 STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1898 this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1899 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1900 isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1901 ECMD_TERMINATE, 0);
1902 isp_put_ntpd(isp, chan, ntp);
1903 STAILQ_REMOVE(&tptr->restart_queue, ntp,
1904 inot_private_data, next);
1905 }
1906 }
1907
1908 /*
1909 * Now mark other ones dead as well.
1910 */
1911 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
1912 if (atp->lun != tptr->ts_lun)
1913 continue;
1914 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
1915 atp->dead = 1;
1916 }
1917 }
1918 #endif
1919
1920 static void
1921 isp_poll(struct cam_sim *sim)
1922 {
1923 ispsoftc_t *isp = cam_sim_softc(sim);
1924
1925 ISP_RUN_ISR(isp);
1926 }
1927
1928
1929 static void
1930 isp_watchdog(void *arg)
1931 {
1932 struct ccb_scsiio *xs = arg;
1933 ispsoftc_t *isp;
1934 uint32_t ohandle = ISP_HANDLE_FREE, handle;
1935
1936 isp = XS_ISP(xs);
1937
1938 handle = isp_find_handle(isp, xs);
1939
1940 /*
1941 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
1942 */
1943 if (handle != ISP_HANDLE_FREE) {
1944 ISP_RUN_ISR(isp);
1945 ohandle = handle;
1946 handle = isp_find_handle(isp, xs);
1947 }
1948 if (handle != ISP_HANDLE_FREE) {
1949 /*
1950 * Try and make sure the command is really dead before
1951 * we release the handle (and DMA resources) for reuse.
1952 *
1953 * If we are successful in aborting the command then
1954 * we're done here because we'll get the command returned
1955 * back separately.
1956 */
1957 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
1958 return;
1959 }
1960
1961 /*
1962 * Note that after calling the above, the command may in
1963 * fact have been completed.
1964 */
1965 xs = isp_find_xs(isp, handle);
1966
1967 /*
1968 * If the command no longer exists, then we won't
1969 * be able to find the xs again with this handle.
1970 */
1971 if (xs == NULL) {
1972 return;
1973 }
1974
1975 /*
1976 * After this point, the command is really dead.
1977 */
1978 ISP_DMAFREE(isp, xs);
1979 isp_destroy_handle(isp, handle);
1980 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
1981 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1982 isp_done(xs);
1983 } else {
1984 if (ohandle != ISP_HANDLE_FREE) {
1985 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
1986 } else {
1987 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
1988 }
1989 }
1990 }
1991
1992 static void
1993 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
1994 {
1995 union ccb *ccb;
1996 struct isp_fc *fc = ISP_FC_PC(isp, chan);
1997
1998 /*
1999 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
2000 */
2001 ccb = xpt_alloc_ccb_nowait();
2002 if (ccb == NULL) {
2003 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
2004 return;
2005 }
2006 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
2007 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2008 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
2009 xpt_free_ccb(ccb);
2010 return;
2011 }
2012 xpt_rescan(ccb);
2013 }
2014
2015 static void
2016 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2017 {
2018 struct cam_path *tp;
2019 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2020
2021 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2022 xpt_async(AC_LOST_DEVICE, tp, NULL);
2023 xpt_free_path(tp);
2024 }
2025 }
2026
2027 /*
2028 * Gone Device Timer Function- when we have decided that a device has gone
2029 * away, we wait a specific period of time prior to telling the OS it has
2030 * gone away.
2031 *
2032 * This timer function fires once a second and then scans the port database
2033 * for devices that are marked dead but still have a virtual target assigned.
2034 * We decrement a counter for that port database entry, and when it hits zero,
2035 * we tell the OS the device has gone away.
2036 */
2037 static void
2038 isp_gdt(void *arg)
2039 {
2040 struct isp_fc *fc = arg;
2041 taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2042 }
2043
2044 static void
2045 isp_gdt_task(void *arg, int pending)
2046 {
2047 struct isp_fc *fc = arg;
2048 ispsoftc_t *isp = fc->isp;
2049 int chan = fc - ISP_FC_PC(isp, 0);
2050 fcportdb_t *lp;
2051 struct ac_contract ac;
2052 struct ac_device_changed *adc;
2053 int dbidx, more_to_do = 0;
2054
2055 ISP_LOCK(isp);
2056 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2057 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2058 lp = &FCPARAM(isp, chan)->portdb[dbidx];
2059
2060 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2061 continue;
2062 }
2063 if (lp->gone_timer != 0) {
2064 lp->gone_timer -= 1;
2065 more_to_do++;
2066 continue;
2067 }
2068 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2069 if (lp->is_target) {
2070 lp->is_target = 0;
2071 isp_make_gone(isp, lp, chan, dbidx);
2072 }
2073 if (lp->is_initiator) {
2074 lp->is_initiator = 0;
2075 ac.contract_number = AC_CONTRACT_DEV_CHG;
2076 adc = (struct ac_device_changed *) ac.contract_data;
2077 adc->wwpn = lp->port_wwn;
2078 adc->port = lp->portid;
2079 adc->target = dbidx;
2080 adc->arrived = 0;
2081 xpt_async(AC_CONTRACT, fc->path, &ac);
2082 }
2083 lp->state = FC_PORTDB_STATE_NIL;
2084 }
2085 if (fc->ready) {
2086 if (more_to_do) {
2087 callout_reset(&fc->gdt, hz, isp_gdt, fc);
2088 } else {
2089 callout_deactivate(&fc->gdt);
2090 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2091 }
2092 }
2093 ISP_UNLOCK(isp);
2094 }
2095
2096 /*
2097 * When loop goes down we remember the time and freeze CAM command queue.
2098 * During some time period we are trying to reprobe the loop. But if we
2099 * fail, we tell the OS that devices have gone away and drop the freeze.
2100 *
2101 * We don't clear the devices out of our port database because, when loop
2102 * come back up, we have to do some actual cleanup with the chip at that
2103 * point (implicit PLOGO, e.g., to get the chip's port database state right).
2104 */
2105 static void
2106 isp_loop_changed(ispsoftc_t *isp, int chan)
2107 {
2108 fcparam *fcp = FCPARAM(isp, chan);
2109 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2110
2111 if (fc->loop_down_time)
2112 return;
2113 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2114 if (fcp->role & ISP_ROLE_INITIATOR)
2115 isp_freeze_loopdown(isp, chan);
2116 fc->loop_down_time = time_uptime;
2117 wakeup(fc);
2118 }
2119
2120 static void
2121 isp_loop_up(ispsoftc_t *isp, int chan)
2122 {
2123 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2124
2125 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2126 fc->loop_seen_once = 1;
2127 fc->loop_down_time = 0;
2128 isp_unfreeze_loopdown(isp, chan);
2129 }
2130
2131 static void
2132 isp_loop_dead(ispsoftc_t *isp, int chan)
2133 {
2134 fcparam *fcp = FCPARAM(isp, chan);
2135 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2136 fcportdb_t *lp;
2137 struct ac_contract ac;
2138 struct ac_device_changed *adc;
2139 int dbidx, i;
2140
2141 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2142
2143 /*
2144 * Notify to the OS all targets who we now consider have departed.
2145 */
2146 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2147 lp = &fcp->portdb[dbidx];
2148
2149 if (lp->state == FC_PORTDB_STATE_NIL)
2150 continue;
2151
2152 for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2153 struct ccb_scsiio *xs;
2154
2155 if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2156 continue;
2157 }
2158 if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2159 continue;
2160 }
2161 if (dbidx != XS_TGT(xs)) {
2162 continue;
2163 }
2164 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2165 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2166 (uintmax_t)XS_LUN(xs));
2167
2168 /*
2169 * Just like in isp_watchdog, abort the outstanding
2170 * command or immediately free its resources if it is
2171 * not active
2172 */
2173 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2174 continue;
2175 }
2176
2177 ISP_DMAFREE(isp, xs);
2178 isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2179 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2180 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2181 (uintmax_t)XS_LUN(xs));
2182 XS_SETERR(xs, HBA_BUSRESET);
2183 isp_done(xs);
2184 }
2185
2186 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2187 if (lp->is_target) {
2188 lp->is_target = 0;
2189 isp_make_gone(isp, lp, chan, dbidx);
2190 }
2191 if (lp->is_initiator) {
2192 lp->is_initiator = 0;
2193 ac.contract_number = AC_CONTRACT_DEV_CHG;
2194 adc = (struct ac_device_changed *) ac.contract_data;
2195 adc->wwpn = lp->port_wwn;
2196 adc->port = lp->portid;
2197 adc->target = dbidx;
2198 adc->arrived = 0;
2199 xpt_async(AC_CONTRACT, fc->path, &ac);
2200 }
2201 }
2202
2203 isp_unfreeze_loopdown(isp, chan);
2204 fc->loop_down_time = 0;
2205 }
2206
2207 static void
2208 isp_kthread(void *arg)
2209 {
2210 struct isp_fc *fc = arg;
2211 ispsoftc_t *isp = fc->isp;
2212 int chan = fc - ISP_FC_PC(isp, 0);
2213 int slp = 0, d;
2214 int lb, lim;
2215
2216 ISP_LOCK(isp);
2217 while (isp->isp_osinfo.is_exiting == 0) {
2218 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2219 "Chan %d Checking FC state", chan);
2220 lb = isp_fc_runstate(isp, chan, 250000);
2221 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2222 "Chan %d FC got to %s state", chan,
2223 isp_fc_loop_statename(lb));
2224
2225 /*
2226 * Our action is different based upon whether we're supporting
2227 * Initiator mode or not. If we are, we might freeze the simq
2228 * when loop is down and set all sorts of different delays to
2229 * check again.
2230 *
2231 * If not, we simply just wait for loop to come up.
2232 */
2233 if (lb == LOOP_READY || lb < 0) {
2234 slp = 0;
2235 } else {
2236 /*
2237 * If we've never seen loop up and we've waited longer
2238 * than quickboot time, or we've seen loop up but we've
2239 * waited longer than loop_down_limit, give up and go
2240 * to sleep until loop comes up.
2241 */
2242 if (fc->loop_seen_once == 0)
2243 lim = isp_quickboot_time;
2244 else
2245 lim = fc->loop_down_limit;
2246 d = time_uptime - fc->loop_down_time;
2247 if (d >= lim)
2248 slp = 0;
2249 else if (d < 10)
2250 slp = 1;
2251 else if (d < 30)
2252 slp = 5;
2253 else if (d < 60)
2254 slp = 10;
2255 else if (d < 120)
2256 slp = 20;
2257 else
2258 slp = 30;
2259 }
2260
2261 if (slp == 0) {
2262 if (lb == LOOP_READY)
2263 isp_loop_up(isp, chan);
2264 else
2265 isp_loop_dead(isp, chan);
2266 }
2267
2268 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2269 "Chan %d sleep for %d seconds", chan, slp);
2270 msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2271 }
2272 fc->num_threads -= 1;
2273 wakeup(&fc->num_threads);
2274 ISP_UNLOCK(isp);
2275 kthread_exit();
2276 }
2277
2278 #ifdef ISP_TARGET_MODE
2279 static void
2280 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2281 {
2282 atio_private_data_t *atp;
2283 union ccb *accb = ccb->cab.abort_ccb;
2284 struct ccb_hdr *sccb;
2285 tstate_t *tptr;
2286
2287 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2288 if (tptr != NULL) {
2289 /* Search for the ATIO among queueued. */
2290 SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2291 if (sccb != &accb->ccb_h)
2292 continue;
2293 SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2294 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2295 "Abort FREE ATIO\n");
2296 accb->ccb_h.status = CAM_REQ_ABORTED;
2297 xpt_done(accb);
2298 ccb->ccb_h.status = CAM_REQ_CMP;
2299 return;
2300 }
2301 }
2302
2303 /* Search for the ATIO among running. */
2304 atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
2305 if (atp != NULL) {
2306 /* Send TERMINATE to firmware. */
2307 if (!atp->dead) {
2308 uint8_t storage[QENTRY_LEN];
2309 ct7_entry_t *cto = (ct7_entry_t *) storage;
2310
2311 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2312 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2313 cto->ct_header.rqs_entry_count = 1;
2314 cto->ct_nphdl = atp->nphdl;
2315 cto->ct_rxid = atp->tag;
2316 cto->ct_iid_lo = atp->sid;
2317 cto->ct_iid_hi = atp->sid >> 16;
2318 cto->ct_oxid = atp->oxid;
2319 cto->ct_vpidx = XS_CHANNEL(accb);
2320 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2321 isp_send_entry(isp, cto);
2322 }
2323 isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2324 ccb->ccb_h.status = CAM_REQ_CMP;
2325 } else {
2326 ccb->ccb_h.status = CAM_UA_ABORT;
2327 }
2328 }
2329
2330 static void
2331 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2332 {
2333 inot_private_data_t *ntp;
2334 union ccb *accb = ccb->cab.abort_ccb;
2335 struct ccb_hdr *sccb;
2336 tstate_t *tptr;
2337
2338 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2339 if (tptr != NULL) {
2340 /* Search for the INOT among queueued. */
2341 SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2342 if (sccb != &accb->ccb_h)
2343 continue;
2344 SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2345 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2346 "Abort FREE INOT\n");
2347 accb->ccb_h.status = CAM_REQ_ABORTED;
2348 xpt_done(accb);
2349 ccb->ccb_h.status = CAM_REQ_CMP;
2350 return;
2351 }
2352 }
2353
2354 /* Search for the INOT among running. */
2355 ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2356 if (ntp != NULL) {
2357 if (ntp->nt.nt_need_ack) {
2358 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2359 ntp->nt.nt_lreserved);
2360 }
2361 isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2362 ccb->ccb_h.status = CAM_REQ_CMP;
2363 } else {
2364 ccb->ccb_h.status = CAM_UA_ABORT;
2365 return;
2366 }
2367 }
2368 #endif
2369
2370 static void
2371 isp_action(struct cam_sim *sim, union ccb *ccb)
2372 {
2373 int bus, tgt, error;
2374 ispsoftc_t *isp;
2375 fcparam *fcp;
2376 struct ccb_trans_settings *cts;
2377 sbintime_t ts;
2378
2379 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2380
2381 isp = (ispsoftc_t *)cam_sim_softc(sim);
2382 ISP_ASSERT_LOCKED(isp);
2383 bus = cam_sim_bus(sim);
2384 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2385 ISP_PCMD(ccb) = NULL;
2386
2387 switch (ccb->ccb_h.func_code) {
2388 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2389 /*
2390 * Do a couple of preliminary checks...
2391 */
2392 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2393 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2394 ccb->ccb_h.status = CAM_REQ_INVALID;
2395 isp_done((struct ccb_scsiio *) ccb);
2396 break;
2397 }
2398 }
2399 #ifdef DIAGNOSTIC
2400 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2401 xpt_print(ccb->ccb_h.path, "invalid target\n");
2402 ccb->ccb_h.status = CAM_PATH_INVALID;
2403 }
2404 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2405 xpt_done(ccb);
2406 break;
2407 }
2408 #endif
2409 ccb->csio.scsi_status = SCSI_STATUS_OK;
2410 if (isp_get_pcmd(isp, ccb)) {
2411 isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2412 cam_freeze_devq(ccb->ccb_h.path);
2413 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2414 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2415 xpt_done(ccb);
2416 break;
2417 }
2418 error = isp_start((XS_T *) ccb);
2419 isp_rq_check_above(isp);
2420 switch (error) {
2421 case 0:
2422 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2423 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2424 break;
2425 /* Give firmware extra 10s to handle timeout. */
2426 ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2427 callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2428 isp_watchdog, ccb, 0);
2429 break;
2430 case CMD_RQLATER:
2431 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2432 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2433 cam_freeze_devq(ccb->ccb_h.path);
2434 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2435 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2436 isp_free_pcmd(isp, ccb);
2437 xpt_done(ccb);
2438 break;
2439 case CMD_EAGAIN:
2440 isp_free_pcmd(isp, ccb);
2441 cam_freeze_devq(ccb->ccb_h.path);
2442 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2443 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2444 xpt_done(ccb);
2445 break;
2446 case CMD_COMPLETE:
2447 isp_done((struct ccb_scsiio *) ccb);
2448 break;
2449 default:
2450 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2451 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2452 isp_free_pcmd(isp, ccb);
2453 xpt_done(ccb);
2454 }
2455 break;
2456
2457 #ifdef ISP_TARGET_MODE
2458 case XPT_EN_LUN: /* Enable/Disable LUN as a target */
2459 if (ccb->cel.enable) {
2460 isp_enable_lun(isp, ccb);
2461 } else {
2462 isp_disable_lun(isp, ccb);
2463 }
2464 break;
2465 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
2466 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2467 {
2468 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2469 if (tptr == NULL) {
2470 const char *str;
2471
2472 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2473 str = "XPT_IMMEDIATE_NOTIFY";
2474 else
2475 str = "XPT_ACCEPT_TARGET_IO";
2476 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2477 "%s: no state pointer found for %s\n",
2478 __func__, str);
2479 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2480 xpt_done(ccb);
2481 break;
2482 }
2483
2484 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2485 ccb->atio.tag_id = 0;
2486 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2487 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2488 "Put FREE ATIO\n");
2489 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2490 ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2491 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2492 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2493 "Put FREE INOT\n");
2494 }
2495 ccb->ccb_h.status = CAM_REQ_INPROG;
2496 break;
2497 }
2498 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
2499 {
2500 inot_private_data_t *ntp;
2501
2502 /*
2503 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2504 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2505 */
2506 /*
2507 * All the relevant path information is in the associated immediate notify
2508 */
2509 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2510 ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2511 if (ntp == NULL) {
2512 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2513 ccb->cna2.tag_id, ccb->cna2.seq_id);
2514 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2515 xpt_done(ccb);
2516 break;
2517 }
2518 if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
2519 (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
2520 cam_freeze_devq(ccb->ccb_h.path);
2521 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2522 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2523 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2524 break;
2525 }
2526 isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2527 ccb->ccb_h.status = CAM_REQ_CMP;
2528 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2529 xpt_done(ccb);
2530 break;
2531 }
2532 case XPT_CONT_TARGET_IO:
2533 isp_target_start_ctio(isp, ccb, FROM_CAM);
2534 isp_rq_check_above(isp);
2535 break;
2536 #endif
2537 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2538 tgt = ccb->ccb_h.target_id;
2539 tgt |= (bus << 16);
2540
2541 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2542 if (error) {
2543 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2544 } else {
2545 /*
2546 * If we have a FC device, reset the Command
2547 * Reference Number, because the target will expect
2548 * that we re-start the CRN at 1 after a reset.
2549 */
2550 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2551
2552 ccb->ccb_h.status = CAM_REQ_CMP;
2553 }
2554 xpt_done(ccb);
2555 break;
2556 case XPT_ABORT: /* Abort the specified CCB */
2557 {
2558 union ccb *accb = ccb->cab.abort_ccb;
2559 switch (accb->ccb_h.func_code) {
2560 #ifdef ISP_TARGET_MODE
2561 case XPT_ACCEPT_TARGET_IO:
2562 isp_abort_atio(isp, ccb);
2563 break;
2564 case XPT_IMMEDIATE_NOTIFY:
2565 isp_abort_inot(isp, ccb);
2566 break;
2567 #endif
2568 case XPT_SCSI_IO:
2569 error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2570 if (error) {
2571 ccb->ccb_h.status = CAM_UA_ABORT;
2572 } else {
2573 ccb->ccb_h.status = CAM_REQ_CMP;
2574 }
2575 break;
2576 default:
2577 ccb->ccb_h.status = CAM_REQ_INVALID;
2578 break;
2579 }
2580 /*
2581 * This is not a queued CCB, so the caller expects it to be
2582 * complete when control is returned.
2583 */
2584 break;
2585 }
2586 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2587 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2588 cts = &ccb->cts;
2589 if (!IS_CURRENT_SETTINGS(cts)) {
2590 ccb->ccb_h.status = CAM_REQ_INVALID;
2591 xpt_done(ccb);
2592 break;
2593 }
2594 ccb->ccb_h.status = CAM_REQ_CMP;
2595 xpt_done(ccb);
2596 break;
2597 case XPT_GET_TRAN_SETTINGS:
2598 {
2599 struct ccb_trans_settings_scsi *scsi;
2600 struct ccb_trans_settings_fc *fc;
2601
2602 cts = &ccb->cts;
2603 scsi = &cts->proto_specific.scsi;
2604 fc = &cts->xport_specific.fc;
2605 tgt = cts->ccb_h.target_id;
2606 fcp = FCPARAM(isp, bus);
2607
2608 cts->protocol = PROTO_SCSI;
2609 cts->protocol_version = SCSI_REV_2;
2610 cts->transport = XPORT_FC;
2611 cts->transport_version = 0;
2612
2613 scsi->valid = CTS_SCSI_VALID_TQ;
2614 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2615 fc->valid = CTS_FC_VALID_SPEED;
2616 fc->bitrate = fcp->isp_gbspeed * 100000;
2617 if (tgt < MAX_FC_TARG) {
2618 fcportdb_t *lp = &fcp->portdb[tgt];
2619 fc->wwnn = lp->node_wwn;
2620 fc->wwpn = lp->port_wwn;
2621 fc->port = lp->portid;
2622 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2623 }
2624 ccb->ccb_h.status = CAM_REQ_CMP;
2625 xpt_done(ccb);
2626 break;
2627 }
2628 case XPT_CALC_GEOMETRY:
2629 cam_calc_geometry(&ccb->ccg, 1);
2630 xpt_done(ccb);
2631 break;
2632
2633 case XPT_RESET_BUS: /* Reset the specified bus */
2634 error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2635 if (error) {
2636 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2637 xpt_done(ccb);
2638 break;
2639 }
2640 if (bootverbose) {
2641 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2642 }
2643 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2644 ccb->ccb_h.status = CAM_REQ_CMP;
2645 xpt_done(ccb);
2646 break;
2647
2648 case XPT_TERM_IO: /* Terminate the I/O process */
2649 ccb->ccb_h.status = CAM_REQ_INVALID;
2650 xpt_done(ccb);
2651 break;
2652
2653 case XPT_SET_SIM_KNOB: /* Set SIM knobs */
2654 {
2655 struct ccb_sim_knob *kp = &ccb->knob;
2656 fcparam *fcp = FCPARAM(isp, bus);
2657
2658 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2659 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2660 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2661 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2662 }
2663 ccb->ccb_h.status = CAM_REQ_CMP;
2664 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2665 int rchange = 0;
2666 int newrole = 0;
2667
2668 switch (kp->xport_specific.fc.role) {
2669 case KNOB_ROLE_NONE:
2670 if (fcp->role != ISP_ROLE_NONE) {
2671 rchange = 1;
2672 newrole = ISP_ROLE_NONE;
2673 }
2674 break;
2675 case KNOB_ROLE_TARGET:
2676 if (fcp->role != ISP_ROLE_TARGET) {
2677 rchange = 1;
2678 newrole = ISP_ROLE_TARGET;
2679 }
2680 break;
2681 case KNOB_ROLE_INITIATOR:
2682 if (fcp->role != ISP_ROLE_INITIATOR) {
2683 rchange = 1;
2684 newrole = ISP_ROLE_INITIATOR;
2685 }
2686 break;
2687 case KNOB_ROLE_BOTH:
2688 if (fcp->role != ISP_ROLE_BOTH) {
2689 rchange = 1;
2690 newrole = ISP_ROLE_BOTH;
2691 }
2692 break;
2693 }
2694 if (rchange) {
2695 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2696 if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2697 bus, newrole) != 0) {
2698 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2699 xpt_done(ccb);
2700 break;
2701 }
2702 }
2703 }
2704 xpt_done(ccb);
2705 break;
2706 }
2707 case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */
2708 case XPT_GET_SIM_KNOB: /* Get SIM knobs */
2709 {
2710 struct ccb_sim_knob *kp = &ccb->knob;
2711 fcparam *fcp = FCPARAM(isp, bus);
2712
2713 kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2714 kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2715 switch (fcp->role) {
2716 case ISP_ROLE_NONE:
2717 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2718 break;
2719 case ISP_ROLE_TARGET:
2720 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2721 break;
2722 case ISP_ROLE_INITIATOR:
2723 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2724 break;
2725 case ISP_ROLE_BOTH:
2726 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2727 break;
2728 }
2729 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2730 ccb->ccb_h.status = CAM_REQ_CMP;
2731 xpt_done(ccb);
2732 break;
2733 }
2734 case XPT_PATH_INQ: /* Path routing inquiry */
2735 {
2736 struct ccb_pathinq *cpi = &ccb->cpi;
2737
2738 cpi->version_num = 1;
2739 #ifdef ISP_TARGET_MODE
2740 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2741 #else
2742 cpi->target_sprt = 0;
2743 #endif
2744 cpi->hba_eng_cnt = 0;
2745 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2746 cpi->max_lun = 255;
2747 cpi->bus_id = cam_sim_bus(sim);
2748 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2749
2750 fcp = FCPARAM(isp, bus);
2751
2752 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2753 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2754
2755 /*
2756 * Because our loop ID can shift from time to time,
2757 * make our initiator ID out of range of our bus.
2758 */
2759 cpi->initiator_id = cpi->max_target + 1;
2760
2761 /*
2762 * Set base transfer capabilities for Fibre Channel, for this HBA.
2763 */
2764 if (IS_25XX(isp))
2765 cpi->base_transfer_speed = 8000000;
2766 else
2767 cpi->base_transfer_speed = 4000000;
2768 cpi->hba_inquiry = PI_TAG_ABLE;
2769 cpi->transport = XPORT_FC;
2770 cpi->transport_version = 0;
2771 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2772 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2773 cpi->xport_specific.fc.port = fcp->isp_portid;
2774 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2775 cpi->protocol = PROTO_SCSI;
2776 cpi->protocol_version = SCSI_REV_2;
2777 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2778 strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2779 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2780 cpi->unit_number = cam_sim_unit(sim);
2781 cpi->ccb_h.status = CAM_REQ_CMP;
2782 xpt_done(ccb);
2783 break;
2784 }
2785 default:
2786 ccb->ccb_h.status = CAM_REQ_INVALID;
2787 xpt_done(ccb);
2788 break;
2789 }
2790 }
2791
2792 void
2793 isp_done(XS_T *sccb)
2794 {
2795 ispsoftc_t *isp = XS_ISP(sccb);
2796 uint32_t status;
2797
2798 if (XS_NOERR(sccb))
2799 XS_SETERR(sccb, CAM_REQ_CMP);
2800
2801 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2802 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2803 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2804 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2805 } else {
2806 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2807 }
2808 }
2809
2810 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2811 status = sccb->ccb_h.status & CAM_STATUS_MASK;
2812 if (status != CAM_REQ_CMP &&
2813 (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2814 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2815 xpt_freeze_devq(sccb->ccb_h.path, 1);
2816 }
2817
2818 if (ISP_PCMD(sccb)) {
2819 if (callout_active(&PISP_PCMD(sccb)->wdog))
2820 callout_stop(&PISP_PCMD(sccb)->wdog);
2821 isp_free_pcmd(isp, (union ccb *) sccb);
2822 }
2823 isp_rq_check_below(isp);
2824 xpt_done((union ccb *) sccb);
2825 }
2826
2827 void
2828 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2829 {
2830 int bus;
2831 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2832 char buf[64];
2833 char *msg = NULL;
2834 target_id_t tgt = 0;
2835 fcportdb_t *lp;
2836 struct isp_fc *fc;
2837 struct ac_contract ac;
2838 struct ac_device_changed *adc;
2839 va_list ap;
2840
2841 switch (cmd) {
2842 case ISPASYNC_LOOP_RESET:
2843 {
2844 uint16_t lipp;
2845 fcparam *fcp;
2846 va_start(ap, cmd);
2847 bus = va_arg(ap, int);
2848 va_end(ap);
2849
2850 lipp = ISP_READ(isp, OUTMAILBOX1);
2851 fcp = FCPARAM(isp, bus);
2852
2853 isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2854 /*
2855 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2856 * LIPs and loop up/down events should never reset the CRN. For
2857 * an as of yet unknown reason, 24xx series cards (and
2858 * potentially others) can interrupt with a LIP Reset status
2859 * when no LIP reset came down the wire. Additionally, the LIP
2860 * primitive accompanying this status would not be a valid LIP
2861 * Reset primitive, but some variation of an invalid AL_PA
2862 * LIP. As a result, we have to verify the AL_PD in the LIP
2863 * addresses our port before blindly resetting.
2864 */
2865 if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2866 isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2867 isp_loop_changed(isp, bus);
2868 break;
2869 }
2870 case ISPASYNC_LIP:
2871 if (msg == NULL)
2872 msg = "LIP Received";
2873 /* FALLTHROUGH */
2874 case ISPASYNC_LOOP_DOWN:
2875 if (msg == NULL)
2876 msg = "LOOP Down";
2877 /* FALLTHROUGH */
2878 case ISPASYNC_LOOP_UP:
2879 if (msg == NULL)
2880 msg = "LOOP Up";
2881 va_start(ap, cmd);
2882 bus = va_arg(ap, int);
2883 va_end(ap);
2884 isp_loop_changed(isp, bus);
2885 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2886 break;
2887 case ISPASYNC_DEV_ARRIVED:
2888 va_start(ap, cmd);
2889 bus = va_arg(ap, int);
2890 lp = va_arg(ap, fcportdb_t *);
2891 va_end(ap);
2892 fc = ISP_FC_PC(isp, bus);
2893 tgt = FC_PORTDB_TGT(isp, bus, lp);
2894 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2895 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2896 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2897 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
2898 lp->is_target = 1;
2899 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2900 isp_make_here(isp, lp, bus, tgt);
2901 }
2902 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2903 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
2904 lp->is_initiator = 1;
2905 ac.contract_number = AC_CONTRACT_DEV_CHG;
2906 adc = (struct ac_device_changed *) ac.contract_data;
2907 adc->wwpn = lp->port_wwn;
2908 adc->port = lp->portid;
2909 adc->target = tgt;
2910 adc->arrived = 1;
2911 xpt_async(AC_CONTRACT, fc->path, &ac);
2912 }
2913 break;
2914 case ISPASYNC_DEV_CHANGED:
2915 case ISPASYNC_DEV_STAYED:
2916 {
2917 int crn_reset_done;
2918
2919 crn_reset_done = 0;
2920 va_start(ap, cmd);
2921 bus = va_arg(ap, int);
2922 lp = va_arg(ap, fcportdb_t *);
2923 va_end(ap);
2924 fc = ISP_FC_PC(isp, bus);
2925 tgt = FC_PORTDB_TGT(isp, bus, lp);
2926 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
2927 if (cmd == ISPASYNC_DEV_CHANGED)
2928 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
2929 else
2930 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
2931
2932 if (lp->is_target !=
2933 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2934 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
2935 lp->is_target = !lp->is_target;
2936 if (lp->is_target) {
2937 if (cmd == ISPASYNC_DEV_CHANGED) {
2938 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2939 crn_reset_done = 1;
2940 }
2941 isp_make_here(isp, lp, bus, tgt);
2942 } else {
2943 isp_make_gone(isp, lp, bus, tgt);
2944 if (cmd == ISPASYNC_DEV_CHANGED) {
2945 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2946 crn_reset_done = 1;
2947 }
2948 }
2949 }
2950 if (lp->is_initiator !=
2951 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2952 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
2953 lp->is_initiator = !lp->is_initiator;
2954 ac.contract_number = AC_CONTRACT_DEV_CHG;
2955 adc = (struct ac_device_changed *) ac.contract_data;
2956 adc->wwpn = lp->port_wwn;
2957 adc->port = lp->portid;
2958 adc->target = tgt;
2959 adc->arrived = lp->is_initiator;
2960 xpt_async(AC_CONTRACT, fc->path, &ac);
2961 }
2962
2963 if ((cmd == ISPASYNC_DEV_CHANGED) &&
2964 (crn_reset_done == 0))
2965 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2966
2967 break;
2968 }
2969 case ISPASYNC_DEV_GONE:
2970 va_start(ap, cmd);
2971 bus = va_arg(ap, int);
2972 lp = va_arg(ap, fcportdb_t *);
2973 va_end(ap);
2974 fc = ISP_FC_PC(isp, bus);
2975 tgt = FC_PORTDB_TGT(isp, bus, lp);
2976 /*
2977 * If this has a virtual target or initiator set the isp_gdt
2978 * timer running on it to delay its departure.
2979 */
2980 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2981 if (lp->is_target || lp->is_initiator) {
2982 lp->state = FC_PORTDB_STATE_ZOMBIE;
2983 lp->gone_timer = fc->gone_device_time;
2984 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
2985 if (fc->ready && !callout_active(&fc->gdt)) {
2986 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
2987 callout_reset(&fc->gdt, hz, isp_gdt, fc);
2988 }
2989 break;
2990 }
2991 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
2992 break;
2993 case ISPASYNC_CHANGE_NOTIFY:
2994 {
2995 char *msg;
2996 int evt, nphdl, nlstate, portid, reason;
2997
2998 va_start(ap, cmd);
2999 bus = va_arg(ap, int);
3000 evt = va_arg(ap, int);
3001 if (evt == ISPASYNC_CHANGE_PDB) {
3002 nphdl = va_arg(ap, int);
3003 nlstate = va_arg(ap, int);
3004 reason = va_arg(ap, int);
3005 } else if (evt == ISPASYNC_CHANGE_SNS) {
3006 portid = va_arg(ap, int);
3007 } else {
3008 nphdl = NIL_HANDLE;
3009 nlstate = reason = 0;
3010 }
3011 va_end(ap);
3012
3013 if (evt == ISPASYNC_CHANGE_PDB) {
3014 int tgt_set = 0;
3015 msg = "Port Database Changed";
3016 isp_prt(isp, ISP_LOGINFO,
3017 "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3018 bus, msg, nphdl, nlstate, reason);
3019 /*
3020 * Port database syncs are not sufficient for
3021 * determining that logins or logouts are done on the
3022 * loop, but this information is directly available from
3023 * the reason code from the incoming mbox. We must reset
3024 * the fcp crn on these events according to FCP-4
3025 */
3026 switch (reason) {
3027 case PDB24XX_AE_IMPL_LOGO_1:
3028 case PDB24XX_AE_IMPL_LOGO_2:
3029 case PDB24XX_AE_IMPL_LOGO_3:
3030 case PDB24XX_AE_PLOGI_RCVD:
3031 case PDB24XX_AE_PRLI_RCVD:
3032 case PDB24XX_AE_PRLO_RCVD:
3033 case PDB24XX_AE_LOGO_RCVD:
3034 case PDB24XX_AE_PLOGI_DONE:
3035 case PDB24XX_AE_PRLI_DONE:
3036 /*
3037 * If the event is not global, twiddle tgt and
3038 * tgt_set to nominate only the target
3039 * associated with the nphdl.
3040 */
3041 if (nphdl != PDB24XX_AE_GLOBAL) {
3042 /* Break if we don't yet have the pdb */
3043 if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3044 break;
3045 tgt = FC_PORTDB_TGT(isp, bus, lp);
3046 tgt_set = 1;
3047 }
3048 isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3049 break;
3050 default:
3051 break; /* NOP */
3052 }
3053 } else if (evt == ISPASYNC_CHANGE_SNS) {
3054 msg = "Name Server Database Changed";
3055 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3056 bus, msg, portid);
3057 } else {
3058 msg = "Other Change Notify";
3059 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3060 }
3061 isp_loop_changed(isp, bus);
3062 break;
3063 }
3064 #ifdef ISP_TARGET_MODE
3065 case ISPASYNC_TARGET_NOTIFY:
3066 {
3067 isp_notify_t *notify;
3068 va_start(ap, cmd);
3069 notify = va_arg(ap, isp_notify_t *);
3070 va_end(ap);
3071 switch (notify->nt_ncode) {
3072 case NT_ABORT_TASK:
3073 case NT_ABORT_TASK_SET:
3074 case NT_CLEAR_ACA:
3075 case NT_CLEAR_TASK_SET:
3076 case NT_LUN_RESET:
3077 case NT_TARGET_RESET:
3078 case NT_QUERY_TASK_SET:
3079 case NT_QUERY_ASYNC_EVENT:
3080 /*
3081 * These are task management functions.
3082 */
3083 isp_handle_platform_target_tmf(isp, notify);
3084 break;
3085 case NT_LIP_RESET:
3086 case NT_LINK_UP:
3087 case NT_LINK_DOWN:
3088 case NT_HBA_RESET:
3089 /*
3090 * No action need be taken here.
3091 */
3092 break;
3093 case NT_SRR:
3094 isp_handle_platform_srr(isp, notify);
3095 break;
3096 default:
3097 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3098 isp_handle_platform_target_notify_ack(isp, notify, 0);
3099 break;
3100 }
3101 break;
3102 }
3103 case ISPASYNC_TARGET_NOTIFY_ACK:
3104 {
3105 void *inot;
3106 va_start(ap, cmd);
3107 inot = va_arg(ap, void *);
3108 va_end(ap);
3109 if (isp_notify_ack(isp, inot)) {
3110 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3111 if (tp) {
3112 tp->isp = isp;
3113 memcpy(tp->data, inot, sizeof (tp->data));
3114 tp->not = tp->data;
3115 callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3116 callout_reset(&tp->timer, 5,
3117 isp_refire_notify_ack, tp);
3118 } else {
3119 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3120 }
3121 }
3122 break;
3123 }
3124 case ISPASYNC_TARGET_ACTION:
3125 {
3126 isphdr_t *hp;
3127
3128 va_start(ap, cmd);
3129 hp = va_arg(ap, isphdr_t *);
3130 va_end(ap);
3131 switch (hp->rqs_entry_type) {
3132 case RQSTYPE_ATIO:
3133 isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3134 break;
3135 case RQSTYPE_CTIO7:
3136 isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3137 break;
3138 default:
3139 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3140 __func__, hp->rqs_entry_type);
3141 break;
3142 }
3143 break;
3144 }
3145 #endif
3146 case ISPASYNC_FW_CRASH:
3147 {
3148 uint16_t mbox1;
3149 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3150 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3151 #if 0
3152 isp_reinit(isp, 1);
3153 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3154 #endif
3155 break;
3156 }
3157 default:
3158 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3159 break;
3160 }
3161 }
3162
3163 uint64_t
3164 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3165 {
3166 uint64_t seed;
3167 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3168
3169 /* First try to use explicitly configured WWNs. */
3170 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3171 if (seed)
3172 return (seed);
3173
3174 /* Otherwise try to use WWNs from NVRAM. */
3175 if (isactive) {
3176 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3177 FCPARAM(isp, chan)->isp_wwpn_nvram;
3178 if (seed)
3179 return (seed);
3180 }
3181
3182 /* If still no WWNs, try to steal them from the first channel. */
3183 if (chan > 0) {
3184 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3185 ISP_FC_PC(isp, 0)->def_wwpn;
3186 if (seed == 0) {
3187 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3188 FCPARAM(isp, 0)->isp_wwpn_nvram;
3189 }
3190 }
3191
3192 /* If still nothing -- improvise. */
3193 if (seed == 0) {
3194 seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3195 if (!iswwnn)
3196 seed ^= 0x0100000000000000ULL;
3197 }
3198
3199 /* For additional channels we have to improvise even more. */
3200 if (!iswwnn && chan > 0) {
3201 /*
3202 * We'll stick our channel number plus one first into bits
3203 * 57..59 and thence into bits 52..55 which allows for 8 bits
3204 * of channel which is enough for our maximum of 255 channels.
3205 */
3206 seed ^= 0x0100000000000000ULL;
3207 seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3208 seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3209 }
3210 return (seed);
3211 }
3212
3213 void
3214 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3215 {
3216 int loc;
3217 char lbuf[200];
3218 va_list ap;
3219
3220 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3221 return;
3222 }
3223 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3224 loc = strlen(lbuf);
3225 va_start(ap, fmt);
3226 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3227 va_end(ap);
3228 printf("%s\n", lbuf);
3229 }
3230
3231 void
3232 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3233 {
3234 va_list ap;
3235 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3236 return;
3237 }
3238 xpt_print_path(xs->ccb_h.path);
3239 va_start(ap, fmt);
3240 vprintf(fmt, ap);
3241 va_end(ap);
3242 printf("\n");
3243 }
3244
3245 uint64_t
3246 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3247 {
3248 uint64_t elapsed;
3249 struct timespec x;
3250
3251 timespecsub(b, a, &x);
3252 elapsed = GET_NANOSEC(&x);
3253 if (elapsed == 0)
3254 elapsed++;
3255 return (elapsed);
3256 }
3257
3258 int
3259 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3260 {
3261 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3262
3263 if (fc->fcbsy)
3264 return (-1);
3265 fc->fcbsy = 1;
3266 return (0);
3267 }
3268
3269 void
3270 isp_platform_intr(void *arg)
3271 {
3272 ispsoftc_t *isp = arg;
3273
3274 ISP_LOCK(isp);
3275 ISP_RUN_ISR(isp);
3276 ISP_UNLOCK(isp);
3277 }
3278
3279 void
3280 isp_platform_intr_resp(void *arg)
3281 {
3282 ispsoftc_t *isp = arg;
3283
3284 ISP_LOCK(isp);
3285 isp_intr_respq(isp);
3286 ISP_UNLOCK(isp);
3287
3288 /* We have handshake enabled, so explicitly complete interrupt */
3289 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3290 }
3291
3292 void
3293 isp_platform_intr_atio(void *arg)
3294 {
3295 ispsoftc_t *isp = arg;
3296
3297 ISP_LOCK(isp);
3298 #ifdef ISP_TARGET_MODE
3299 isp_intr_atioq(isp);
3300 #endif
3301 ISP_UNLOCK(isp);
3302
3303 /* We have handshake enabled, so explicitly complete interrupt */
3304 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3305 }
3306
3307 typedef struct {
3308 ispsoftc_t *isp;
3309 struct ccb_scsiio *csio;
3310 void *qe;
3311 int error;
3312 } mush_t;
3313
3314 static void
3315 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3316 {
3317 mush_t *mp = (mush_t *) arg;
3318 ispsoftc_t *isp= mp->isp;
3319 struct ccb_scsiio *csio = mp->csio;
3320 bus_dmasync_op_t op;
3321
3322 if (error) {
3323 mp->error = error;
3324 return;
3325 }
3326 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3327 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3328 op = BUS_DMASYNC_PREREAD;
3329 else
3330 op = BUS_DMASYNC_PREWRITE;
3331 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3332
3333 mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3334 if (mp->error)
3335 isp_dmafree(isp, csio);
3336 }
3337
3338 int
3339 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3340 {
3341 mush_t mp;
3342 int error;
3343
3344 if (XS_XFRLEN(csio)) {
3345 mp.isp = isp;
3346 mp.csio = csio;
3347 mp.qe = qe;
3348 mp.error = 0;
3349 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3350 (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3351 if (error == 0)
3352 error = mp.error;
3353 } else {
3354 error = ISP_SEND_CMD(isp, qe, NULL, 0);
3355 }
3356 switch (error) {
3357 case 0:
3358 case CMD_COMPLETE:
3359 case CMD_EAGAIN:
3360 case CMD_RQLATER:
3361 break;
3362 case ENOMEM:
3363 error = CMD_EAGAIN;
3364 break;
3365 case EINVAL:
3366 case EFBIG:
3367 csio->ccb_h.status = CAM_REQ_INVALID;
3368 error = CMD_COMPLETE;
3369 break;
3370 default:
3371 csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3372 error = CMD_COMPLETE;
3373 break;
3374 }
3375 return (error);
3376 }
3377
3378 void
3379 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3380 {
3381 bus_dmasync_op_t op;
3382
3383 if (XS_XFRLEN(csio) == 0)
3384 return;
3385
3386 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3387 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3388 op = BUS_DMASYNC_POSTREAD;
3389 else
3390 op = BUS_DMASYNC_POSTWRITE;
3391 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3392 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3393 }
3394
3395 /*
3396 * Reset the command reference number for all LUNs on a specific target
3397 * (needed when a target arrives again) or for all targets on a port
3398 * (needed for events like a LIP).
3399 */
3400 void
3401 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3402 {
3403 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3404 struct isp_nexus *nxp;
3405 int i;
3406
3407 if (tgt_set == 0)
3408 isp_prt(isp, ISP_LOGDEBUG0,
3409 "Chan %d resetting CRN on all targets", chan);
3410 else
3411 isp_prt(isp, ISP_LOGDEBUG0,
3412 "Chan %d resetting CRN on target %u", chan, tgt);
3413
3414 for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3415 for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3416 if (tgt_set == 0 || tgt == nxp->tgt)
3417 nxp->crnseed = 0;
3418 }
3419 }
3420 }
3421
3422 int
3423 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3424 {
3425 lun_id_t lun;
3426 uint32_t chan, tgt;
3427 struct isp_fc *fc;
3428 struct isp_nexus *nxp;
3429 int idx;
3430
3431 chan = XS_CHANNEL(cmd);
3432 tgt = XS_TGT(cmd);
3433 lun = XS_LUN(cmd);
3434 fc = ISP_FC_PC(isp, chan);
3435 idx = NEXUS_HASH(tgt, lun);
3436 nxp = fc->nexus_hash[idx];
3437
3438 while (nxp) {
3439 if (nxp->tgt == tgt && nxp->lun == lun)
3440 break;
3441 nxp = nxp->next;
3442 }
3443 if (nxp == NULL) {
3444 nxp = fc->nexus_free_list;
3445 if (nxp == NULL) {
3446 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3447 if (nxp == NULL) {
3448 return (-1);
3449 }
3450 } else {
3451 fc->nexus_free_list = nxp->next;
3452 }
3453 nxp->tgt = tgt;
3454 nxp->lun = lun;
3455 nxp->next = fc->nexus_hash[idx];
3456 fc->nexus_hash[idx] = nxp;
3457 }
3458 if (nxp->crnseed == 0)
3459 nxp->crnseed = 1;
3460 *crnp = nxp->crnseed++;
3461 return (0);
3462 }
3463
3464 /*
3465 * We enter with the lock held
3466 */
3467 void
3468 isp_timer(void *arg)
3469 {
3470 ispsoftc_t *isp = arg;
3471 #ifdef ISP_TARGET_MODE
3472 isp_tmcmd_restart(isp);
3473 #endif
3474 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3475 }
3476
3477 #ifdef ISP_TARGET_MODE
3478 isp_ecmd_t *
3479 isp_get_ecmd(ispsoftc_t *isp)
3480 {
3481 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3482 if (ecmd) {
3483 isp->isp_osinfo.ecmd_free = ecmd->next;
3484 }
3485 return (ecmd);
3486 }
3487
3488 void
3489 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3490 {
3491 ecmd->next = isp->isp_osinfo.ecmd_free;
3492 isp->isp_osinfo.ecmd_free = ecmd;
3493 }
3494 #endif
Cache object: 886643b8b4f28df37222320ac4ccfd4e
|