1 /* $NetBSD: isp_netbsd.c,v 1.98 2021/08/07 16:19:12 thorpej Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 */
5 /*
6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7 * All rights reserved.
8 *
9 * Additional Copyright (C) 2000-2007 by Matthew Jacob
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.98 2021/08/07 16:19:12 thorpej Exp $");
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41
42 /*
43 * Set a timeout for the watchdogging of a command.
44 *
45 * The dimensional analysis is
46 *
47 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
48 *
49 * =
50 *
51 * (milliseconds / 1000) * hz = ticks
52 *
53 *
54 * For timeouts less than 1 second, we'll get zero. Because of this, and
55 * because we want to establish *our* timeout to be longer than what the
56 * firmware might do, we just add 3 seconds at the back end.
57 */
58 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
59
60 static void isp_config_interrupts(device_t);
61 static void ispminphys_1020(struct buf *);
62 static void ispminphys(struct buf *);
63 static void ispcmd(struct ispsoftc *, XS_T *);
64 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
65 static int
66 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
67
68 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
69 static void isp_dog(void *);
70 static void isp_gdt(void *);
71 static void isp_ldt(void *);
72 static void isp_make_here(ispsoftc_t *, int);
73 static void isp_make_gone(ispsoftc_t *, int);
74 static void isp_fc_worker(void *);
75
76 static const char *roles[4] = {
77 "(none)", "Target", "Initiator", "Target/Initiator"
78 };
79 static const char prom3[] =
80 "PortID %#06x Departed from Target %u because of %s";
81 int isp_change_is_bad = 0; /* "changed" devices are bad */
82 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */
83 static int isp_fabric_hysteresis = 5;
84 #define isp_change_is_bad 0
85
86 /*
87 * Complete attachment of hardware, include subdevices.
88 */
89
90 void
91 isp_attach(struct ispsoftc *isp)
92 {
93 device_t self = isp->isp_osinfo.dev;
94 int i;
95
96 isp->isp_state = ISP_RUNSTATE;
97
98 isp->isp_osinfo.adapter.adapt_dev = self;
99 isp->isp_osinfo.adapter.adapt_openings = isp->isp_maxcmds;
100 isp->isp_osinfo.loop_down_limit = 300;
101
102 /*
103 * It's not stated whether max_periph is limited by SPI
104 * tag uage, but let's assume that it is.
105 */
106 isp->isp_osinfo.adapter.adapt_max_periph = uimin(isp->isp_maxcmds, 255);
107 isp->isp_osinfo.adapter.adapt_ioctl = ispioctl;
108 isp->isp_osinfo.adapter.adapt_request = isprequest;
109 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
110 isp->isp_osinfo.adapter.adapt_minphys = ispminphys_1020;
111 } else {
112 isp->isp_osinfo.adapter.adapt_minphys = ispminphys;
113 }
114
115 callout_init(&isp->isp_osinfo.gdt, 0);
116 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
117 callout_init(&isp->isp_osinfo.ldt, 0);
118 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
119 if (IS_FC(isp)) {
120 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
121 &isp->isp_osinfo.thread, "%s:fc_thrd",
122 device_xname(self))) {
123 isp_prt(isp, ISP_LOGERR,
124 "unable to create FC worker thread");
125 return;
126 }
127 }
128
129 for (i = 0; i != isp->isp_osinfo.adapter.adapt_nchannels; i++) {
130 isp->isp_osinfo.chan[i].chan_adapter =
131 &isp->isp_osinfo.adapter;
132 isp->isp_osinfo.chan[i].chan_bustype = &scsi_bustype;
133 isp->isp_osinfo.chan[i].chan_channel = i;
134 /*
135 * Until the midlayer is fixed to use REPORT LUNS,
136 * limit to 8 luns.
137 */
138 isp->isp_osinfo.chan[i].chan_nluns = uimin(isp->isp_maxluns, 8);
139 if (IS_FC(isp)) {
140 isp->isp_osinfo.chan[i].chan_ntargets = MAX_FC_TARG;
141 if (ISP_CAP_2KLOGIN(isp) == 0 && MAX_FC_TARG > 256) {
142 isp->isp_osinfo.chan[i].chan_ntargets = 256;
143 }
144 isp->isp_osinfo.chan[i].chan_id = MAX_FC_TARG;
145 } else {
146 isp->isp_osinfo.chan[i].chan_ntargets = MAX_TARGETS;
147 isp->isp_osinfo.chan[i].chan_id =
148 SDPARAM(isp, i)->isp_initiator_id;
149 ISP_LOCK(isp);
150 (void) isp_control(isp, ISPCTL_RESET_BUS, i);
151 ISP_UNLOCK(isp);
152 }
153 }
154
155 /*
156 * Defer enabling mailbox interrupts until later.
157 */
158 config_interrupts(self, isp_config_interrupts);
159 }
160
161 static void
162 isp_config_interrupts(device_t self)
163 {
164 int i;
165 struct ispsoftc *isp = device_private(self);
166
167 isp->isp_osinfo.mbox_sleep_ok = 1;
168
169 if (IS_FC(isp) && (FCPARAM(isp, 0)->isp_fwstate != FW_READY ||
170 FCPARAM(isp, 0)->isp_loopstate != LOOP_READY)) {
171 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
172 "Starting Initial Loop Down Timer");
173 callout_schedule(&isp->isp_osinfo.ldt, isp_quickboot_time * hz);
174 }
175
176 /*
177 * And attach children (if any).
178 */
179 for (i = 0; i < isp->isp_osinfo.adapter.adapt_nchannels; i++) {
180 config_found(self, &isp->isp_osinfo.chan[i], scsiprint,
181 CFARGS_NONE);
182 }
183 }
184
185 /*
186 * minphys our xfers
187 */
188 static void
189 ispminphys_1020(struct buf *bp)
190 {
191 if (bp->b_bcount >= (1 << 24)) {
192 bp->b_bcount = (1 << 24);
193 }
194 minphys(bp);
195 }
196
197 static void
198 ispminphys(struct buf *bp)
199 {
200 if (bp->b_bcount >= (1 << 30)) {
201 bp->b_bcount = (1 << 30);
202 }
203 minphys(bp);
204 }
205
206 static int
207 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
208 struct proc *p)
209 {
210 struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev);
211 int nr, bus, retval = ENOTTY;
212
213 switch (cmd) {
214 case ISP_SDBLEV:
215 {
216 int olddblev = isp->isp_dblev;
217 isp->isp_dblev = *(int *)addr;
218 *(int *)addr = olddblev;
219 retval = 0;
220 break;
221 }
222 case ISP_GETROLE:
223 bus = *(int *)addr;
224 if (bus < 0 || bus >= isp->isp_nchan) {
225 retval = -ENXIO;
226 break;
227 }
228 if (IS_FC(isp)) {
229 *(int *)addr = FCPARAM(isp, bus)->role;
230 } else {
231 *(int *)addr = SDPARAM(isp, bus)->role;
232 }
233 retval = 0;
234 break;
235 case ISP_SETROLE:
236
237 nr = *(int *)addr;
238 bus = nr >> 8;
239 if (bus < 0 || bus >= isp->isp_nchan) {
240 retval = -ENXIO;
241 break;
242 }
243 nr &= 0xff;
244 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
245 retval = EINVAL;
246 break;
247 }
248 if (IS_FC(isp)) {
249 *(int *)addr = FCPARAM(isp, bus)->role;
250 FCPARAM(isp, bus)->role = nr;
251 } else {
252 *(int *)addr = SDPARAM(isp, bus)->role;
253 SDPARAM(isp, bus)->role = nr;
254 }
255 retval = 0;
256 break;
257
258 case ISP_RESETHBA:
259 ISP_LOCK(isp);
260 isp_reinit(isp, 0);
261 ISP_UNLOCK(isp);
262 retval = 0;
263 break;
264
265 case ISP_RESCAN:
266 if (IS_FC(isp)) {
267 bus = *(int *)addr;
268 if (bus < 0 || bus >= isp->isp_nchan) {
269 retval = -ENXIO;
270 break;
271 }
272 ISP_LOCK(isp);
273 if (isp_fc_runstate(isp, bus, 5 * 1000000)) {
274 retval = EIO;
275 } else {
276 retval = 0;
277 }
278 ISP_UNLOCK(isp);
279 }
280 break;
281
282 case ISP_FC_LIP:
283 if (IS_FC(isp)) {
284 bus = *(int *)addr;
285 if (bus < 0 || bus >= isp->isp_nchan) {
286 retval = -ENXIO;
287 break;
288 }
289 ISP_LOCK(isp);
290 if (isp_control(isp, ISPCTL_SEND_LIP, bus)) {
291 retval = EIO;
292 } else {
293 retval = 0;
294 }
295 ISP_UNLOCK(isp);
296 }
297 break;
298 case ISP_FC_GETDINFO:
299 {
300 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
301 fcportdb_t *lp;
302
303 if (IS_SCSI(isp)) {
304 break;
305 }
306 if (ifc->loopid >= MAX_FC_TARG) {
307 retval = EINVAL;
308 break;
309 }
310 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
311 if (lp->state == FC_PORTDB_STATE_VALID) {
312 ifc->role = lp->roles;
313 ifc->loopid = lp->handle;
314 ifc->portid = lp->portid;
315 ifc->node_wwn = lp->node_wwn;
316 ifc->port_wwn = lp->port_wwn;
317 retval = 0;
318 } else {
319 retval = ENODEV;
320 }
321 break;
322 }
323 case ISP_GET_STATS:
324 {
325 isp_stats_t *sp = (isp_stats_t *) addr;
326
327 ISP_MEMZERO(sp, sizeof (*sp));
328 sp->isp_stat_version = ISP_STATS_VERSION;
329 sp->isp_type = isp->isp_type;
330 sp->isp_revision = isp->isp_revision;
331 ISP_LOCK(isp);
332 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
333 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
334 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
335 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
336 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
337 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
338 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
339 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
340 ISP_UNLOCK(isp);
341 retval = 0;
342 break;
343 }
344 case ISP_CLR_STATS:
345 ISP_LOCK(isp);
346 isp->isp_intcnt = 0;
347 isp->isp_intbogus = 0;
348 isp->isp_intmboxc = 0;
349 isp->isp_intoasync = 0;
350 isp->isp_rsltccmplt = 0;
351 isp->isp_fphccmplt = 0;
352 isp->isp_rscchiwater = 0;
353 isp->isp_fpcchiwater = 0;
354 ISP_UNLOCK(isp);
355 retval = 0;
356 break;
357 case ISP_FC_GETHINFO:
358 {
359 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
360 bus = hba->fc_channel;
361
362 if (bus < 0 || bus >= isp->isp_nchan) {
363 retval = ENXIO;
364 break;
365 }
366 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
367 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
368 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
369 hba->fc_nchannels = isp->isp_nchan;
370 hba->fc_nports = isp->isp_nchan;/* XXXX 24XX STUFF? XXX */
371 if (IS_FC(isp)) {
372 hba->fc_speed = FCPARAM(isp, bus)->isp_gbspeed;
373 hba->fc_topology = FCPARAM(isp, bus)->isp_topo + 1;
374 hba->fc_loopid = FCPARAM(isp, bus)->isp_loopid;
375 hba->nvram_node_wwn = FCPARAM(isp, bus)->isp_wwnn_nvram;
376 hba->nvram_port_wwn = FCPARAM(isp, bus)->isp_wwpn_nvram;
377 hba->active_node_wwn = FCPARAM(isp, bus)->isp_wwnn;
378 hba->active_port_wwn = FCPARAM(isp, bus)->isp_wwpn;
379 } else {
380 hba->fc_speed = 0;
381 hba->fc_topology = 0;
382 hba->nvram_node_wwn = 0ull;
383 hba->nvram_port_wwn = 0ull;
384 hba->active_node_wwn = 0ull;
385 hba->active_port_wwn = 0ull;
386 }
387 retval = 0;
388 break;
389 }
390 case ISP_TSK_MGMT:
391 {
392 int needmarker;
393 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
394 uint16_t loopid;
395 mbreg_t mbs;
396
397 if (IS_SCSI(isp)) {
398 break;
399 }
400
401 bus = fct->chan;
402 if (bus < 0 || bus >= isp->isp_nchan) {
403 retval = -ENXIO;
404 break;
405 }
406
407 memset(&mbs, 0, sizeof (mbs));
408 needmarker = retval = 0;
409 loopid = fct->loopid;
410 if (ISP_CAP_2KLOGIN(isp) == 0) {
411 loopid <<= 8;
412 }
413 switch (fct->action) {
414 case IPT_CLEAR_ACA:
415 mbs.param[0] = MBOX_CLEAR_ACA;
416 mbs.param[1] = loopid;
417 mbs.param[2] = fct->lun;
418 break;
419 case IPT_TARGET_RESET:
420 mbs.param[0] = MBOX_TARGET_RESET;
421 mbs.param[1] = loopid;
422 needmarker = 1;
423 break;
424 case IPT_LUN_RESET:
425 mbs.param[0] = MBOX_LUN_RESET;
426 mbs.param[1] = loopid;
427 mbs.param[2] = fct->lun;
428 needmarker = 1;
429 break;
430 case IPT_CLEAR_TASK_SET:
431 mbs.param[0] = MBOX_CLEAR_TASK_SET;
432 mbs.param[1] = loopid;
433 mbs.param[2] = fct->lun;
434 needmarker = 1;
435 break;
436 case IPT_ABORT_TASK_SET:
437 mbs.param[0] = MBOX_ABORT_TASK_SET;
438 mbs.param[1] = loopid;
439 mbs.param[2] = fct->lun;
440 needmarker = 1;
441 break;
442 default:
443 retval = EINVAL;
444 break;
445 }
446 if (retval == 0) {
447 if (needmarker) {
448 FCPARAM(isp, bus)->sendmarker = 1;
449 }
450 ISP_LOCK(isp);
451 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
452 ISP_UNLOCK(isp);
453 if (retval) {
454 retval = EIO;
455 }
456 }
457 break;
458 }
459 case ISP_FC_GETDLIST:
460 {
461 isp_dlist_t local, *ua;
462 uint16_t nph, nphe, count, channel, lim;
463 struct wwnpair pair, *uptr;
464
465 if (IS_SCSI(isp)) {
466 retval = EINVAL;
467 break;
468 }
469
470 ua = *(isp_dlist_t **)addr;
471 if (copyin(ua, &local, sizeof (isp_dlist_t))) {
472 retval = EFAULT;
473 break;
474 }
475 lim = local.count;
476 channel = local.channel;
477 if (channel >= isp->isp_nchan) {
478 retval = EINVAL;
479 break;
480 }
481
482 ua = *(isp_dlist_t **)addr;
483 uptr = &ua->wwns[0];
484
485 if (ISP_CAP_2KLOGIN(isp)) {
486 nphe = NPH_MAX_2K;
487 } else {
488 nphe = NPH_MAX;
489 }
490 for (count = 0, nph = 0; count < lim && nph != nphe; nph++) {
491 ISP_LOCK(isp);
492 retval = isp_control(isp, ISPCTL_GET_NAMES, channel,
493 nph, &pair.wwnn, &pair.wwpn);
494 ISP_UNLOCK(isp);
495 if (retval || (pair.wwpn == INI_NONE &&
496 pair.wwnn == INI_NONE)) {
497 retval = 0;
498 continue;
499 }
500 if (copyout(&pair, (void *)uptr++, sizeof (pair))) {
501 retval = EFAULT;
502 break;
503 }
504 count++;
505 }
506 if (retval == 0) {
507 if (copyout(&count, (void *)&ua->count,
508 sizeof (count))) {
509 retval = EFAULT;
510 }
511 }
512 break;
513 }
514 case SCBUSIORESET:
515 ISP_LOCK(isp);
516 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
517 retval = EIO;
518 } else {
519 retval = 0;
520 }
521 ISP_UNLOCK(isp);
522 break;
523 default:
524 break;
525 }
526 return (retval);
527 }
528
529 static void
530 ispcmd(struct ispsoftc *isp, XS_T *xs)
531 {
532 volatile uint8_t ombi;
533 int lim, chan;
534
535 ISP_LOCK(isp);
536 if (isp->isp_state < ISP_RUNSTATE) {
537 ISP_DISABLE_INTS(isp);
538 isp_init(isp);
539 if (isp->isp_state != ISP_INITSTATE) {
540 ISP_ENABLE_INTS(isp);
541 ISP_UNLOCK(isp);
542 isp_prt(isp, ISP_LOGERR, "isp not at init state");
543 XS_SETERR(xs, HBA_BOTCH);
544 scsipi_done(xs);
545 return;
546 }
547 isp->isp_state = ISP_RUNSTATE;
548 ISP_ENABLE_INTS(isp);
549 }
550 chan = XS_CHANNEL(xs);
551
552 /*
553 * Handle the case of a FC card where the FC thread hasn't
554 * fired up yet and we don't yet have a known loop state.
555 */
556 if (IS_FC(isp) && (FCPARAM(isp, chan)->isp_fwstate != FW_READY ||
557 FCPARAM(isp, chan)->isp_loopstate != LOOP_READY) &&
558 isp->isp_osinfo.thread == NULL) {
559 ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
560 int delay_time;
561
562 if (xs->xs_control & XS_CTL_POLL) {
563 isp->isp_osinfo.mbox_sleep_ok = 0;
564 }
565
566 if (isp->isp_osinfo.loop_checked == 0) {
567 delay_time = 10 * 1000000;
568 isp->isp_osinfo.loop_checked = 1;
569 } else {
570 delay_time = 250000;
571 }
572
573 if (isp_fc_runstate(isp, XS_CHANNEL(xs), delay_time) != 0) {
574 if (xs->xs_control & XS_CTL_POLL) {
575 isp->isp_osinfo.mbox_sleep_ok = ombi;
576 }
577 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) {
578 XS_SETERR(xs, HBA_SELTIMEOUT);
579 scsipi_done(xs);
580 ISP_UNLOCK(isp);
581 return;
582 }
583 /*
584 * Otherwise, fall thru to be queued up for later.
585 */
586 } else {
587 int wasblocked =
588 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
589 isp->isp_osinfo.blocked = isp->isp_osinfo.paused = 0;
590 if (wasblocked) {
591 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
592 "THAW QUEUES @ LINE %d", __LINE__);
593 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan],
594 1);
595 }
596 }
597 if (xs->xs_control & XS_CTL_POLL) {
598 isp->isp_osinfo.mbox_sleep_ok = ombi;
599 }
600 }
601
602 if (isp->isp_osinfo.paused) {
603 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
604 xs->error = XS_RESOURCE_SHORTAGE;
605 scsipi_done(xs);
606 ISP_UNLOCK(isp);
607 return;
608 }
609 if (isp->isp_osinfo.blocked) {
610 isp_prt(isp, ISP_LOGWARN,
611 "I/O while blocked with retries %d", xs->xs_retries);
612 if (xs->xs_retries) {
613 xs->error = XS_REQUEUE;
614 xs->xs_retries--;
615 } else {
616 XS_SETERR(xs, HBA_SELTIMEOUT);
617 }
618 scsipi_done(xs);
619 ISP_UNLOCK(isp);
620 return;
621 }
622
623 if (xs->xs_control & XS_CTL_POLL) {
624 ombi = isp->isp_osinfo.mbox_sleep_ok;
625 isp->isp_osinfo.mbox_sleep_ok = 0;
626 }
627
628 switch (isp_start(xs)) {
629 case CMD_QUEUED:
630 if (IS_FC(isp) && isp->isp_osinfo.wwns[XS_TGT(xs)] == 0) {
631 fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs));
632 int dbidx = fcp->isp_dev_map[XS_TGT(xs)] - 1;
633 device_t dev = xs->xs_periph->periph_dev;
634
635 if (dbidx >= 0 && dev &&
636 prop_dictionary_set_uint64(device_properties(dev),
637 "port-wwn", fcp->portdb[dbidx].port_wwn) == TRUE) {
638 isp->isp_osinfo.wwns[XS_TGT(xs)] =
639 fcp->portdb[dbidx].port_wwn;
640 }
641 }
642 if (xs->xs_control & XS_CTL_POLL) {
643 isp_polled_cmd_wait(isp, xs);
644 isp->isp_osinfo.mbox_sleep_ok = ombi;
645 } else if (xs->timeout) {
646 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
647 }
648 break;
649 case CMD_EAGAIN:
650 isp->isp_osinfo.paused = 1;
651 xs->error = XS_RESOURCE_SHORTAGE;
652 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
653 "FREEZE QUEUES @ LINE %d", __LINE__);
654 for (chan = 0; chan < isp->isp_nchan; chan++) {
655 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1);
656 }
657 scsipi_done(xs);
658 break;
659 case CMD_RQLATER:
660 /*
661 * We can only get RQLATER from FC devices (1 channel only)
662 *
663 * If we've never seen loop up see if if we've been down
664 * quickboot time, otherwise wait loop down limit time.
665 * If so, then we start giving up on commands.
666 */
667 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) {
668 lim = isp_quickboot_time;
669 } else {
670 lim = isp->isp_osinfo.loop_down_limit;
671 }
672 if (isp->isp_osinfo.loop_down_time >= lim) {
673 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
674 "RQLATER->SELTIMEOUT for %d (%d >= %d)", XS_TGT(xs),
675 isp->isp_osinfo.loop_down_time, lim);
676 XS_SETERR(xs, HBA_SELTIMEOUT);
677 scsipi_done(xs);
678 break;
679 }
680 if (isp->isp_osinfo.blocked == 0) {
681 isp->isp_osinfo.blocked = 1;
682 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1);
683 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
684 "FREEZE QUEUES @ LINE %d", __LINE__);
685 } else {
686 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
687 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
688 }
689 xs->error = XS_REQUEUE;
690 scsipi_done(xs);
691 break;
692 case CMD_COMPLETE:
693 scsipi_done(xs);
694 break;
695 }
696 ISP_UNLOCK(isp);
697 }
698
699 static void
700 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
701 {
702 struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev);
703
704 switch (req) {
705 case ADAPTER_REQ_RUN_XFER:
706 ispcmd(isp, (XS_T *) arg);
707 break;
708
709 case ADAPTER_REQ_GROW_RESOURCES:
710 /* Not supported. */
711 break;
712
713 case ADAPTER_REQ_SET_XFER_MODE:
714 if (IS_SCSI(isp)) {
715 struct scsipi_xfer_mode *xm = arg;
716 int dflags = 0;
717 sdparam *sdp = SDPARAM(isp, chan->chan_channel);
718
719 if (xm->xm_mode & PERIPH_CAP_TQING)
720 dflags |= DPARM_TQING;
721 if (xm->xm_mode & PERIPH_CAP_WIDE16)
722 dflags |= DPARM_WIDE;
723 if (xm->xm_mode & PERIPH_CAP_SYNC)
724 dflags |= DPARM_SYNC;
725 ISP_LOCK(isp);
726 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
727 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
728 sdp->isp_devparam[xm->xm_target].dev_update = 1;
729 sdp->update = 1;
730 ISP_UNLOCK(isp);
731 isp_prt(isp, ISP_LOGDEBUG1,
732 "isprequest: device flags %#x for %d.%d.X",
733 dflags, chan->chan_channel, xm->xm_target);
734 break;
735 }
736 default:
737 break;
738 }
739 }
740
741 static void
742 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
743 {
744 int infinite = 0, mswait;
745
746 /*
747 * If we can't use interrupts, poll on completion.
748 */
749 if ((mswait = XS_TIME(xs)) == 0) {
750 infinite = 1;
751 }
752
753 while (mswait || infinite) {
754 uint32_t isr;
755 uint16_t sema, mbox;
756 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
757 isp_intr(isp, isr, sema, mbox);
758 if (XS_CMD_DONE_P(xs)) {
759 break;
760 }
761 }
762 ISP_DELAY(1000);
763 mswait -= 1;
764 }
765
766 /*
767 * If no other error occurred but we didn't finish
768 * something bad happened, so abort the command.
769 */
770 if (XS_CMD_DONE_P(xs) == 0) {
771 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
772 isp_reinit(isp, 0);
773 }
774 if (XS_NOERR(xs)) {
775 isp_prt(isp, ISP_LOGERR, "polled command timed out");
776 XS_SETERR(xs, HBA_BOTCH);
777 }
778 }
779 scsipi_done(xs);
780 }
781
782 void
783 isp_done(XS_T *xs)
784 {
785 if (XS_CMD_WDOG_P(xs) == 0) {
786 struct ispsoftc *isp = XS_ISP(xs);
787 callout_stop(&xs->xs_callout);
788 if (XS_CMD_GRACE_P(xs)) {
789 isp_prt(isp, ISP_LOGDEBUG1,
790 "finished command on borrowed time");
791 }
792 XS_CMD_S_CLEAR(xs);
793 /*
794 * Fixup- if we get a QFULL, we need
795 * to set XS_BUSY as the error.
796 */
797 if (xs->status == SCSI_QUEUE_FULL) {
798 xs->error = XS_BUSY;
799 }
800 if (isp->isp_osinfo.paused) {
801 int i;
802 isp->isp_osinfo.paused = 0;
803 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
804 "THAW QUEUES @ LINE %d", __LINE__);
805 for (i = 0; i < isp->isp_nchan; i++) {
806 scsipi_channel_timed_thaw(&isp->isp_osinfo.chan[i]);
807 }
808 }
809 if (xs->error == XS_DRIVER_STUFFUP) {
810 isp_prt(isp, ISP_LOGERR,
811 "BOTCHED cmd for %d.%d.%d cmd %#x datalen %ld",
812 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
813 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
814 }
815 scsipi_done(xs);
816 }
817 }
818
819 static void
820 isp_dog(void *arg)
821 {
822 XS_T *xs = arg;
823 struct ispsoftc *isp = XS_ISP(xs);
824 uint32_t handle;
825 int sok;
826
827
828 ISP_ILOCK(isp);
829 sok = isp->isp_osinfo.mbox_sleep_ok;
830 isp->isp_osinfo.mbox_sleep_ok = 0;
831 /*
832 * We've decided this command is dead. Make sure we're not trying
833 * to kill a command that's already dead by getting its handle and
834 * and seeing whether it's still alive.
835 */
836 handle = isp_find_handle(isp, xs);
837 if (handle) {
838 uint32_t isr;
839 uint16_t mbox, sema;
840
841 if (XS_CMD_DONE_P(xs)) {
842 isp_prt(isp, ISP_LOGDEBUG1,
843 "watchdog found done cmd (handle %#x)", handle);
844 goto out;
845 }
846
847 if (XS_CMD_WDOG_P(xs)) {
848 isp_prt(isp, ISP_LOGDEBUG1,
849 "recursive watchdog (handle %#x)", handle);
850 goto out;
851 }
852
853 XS_CMD_S_WDOG(xs);
854
855 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
856 isp_intr(isp, isr, sema, mbox);
857
858 }
859 if (XS_CMD_DONE_P(xs)) {
860 isp_prt(isp, ISP_LOGDEBUG1,
861 "watchdog cleanup for handle %#x", handle);
862 XS_CMD_C_WDOG(xs);
863 isp_done(xs);
864 } else if (XS_CMD_GRACE_P(xs)) {
865 isp_prt(isp, ISP_LOGDEBUG1,
866 "watchdog timeout for handle %#x", handle);
867 /*
868 * Make sure the command is *really* dead before we
869 * release the handle (and DMA resources) for reuse.
870 */
871 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
872
873 /*
874 * After this point, the command is really dead.
875 */
876 if (XS_XFRLEN(xs)) {
877 ISP_DMAFREE(isp, xs, handle);
878 }
879 isp_destroy_handle(isp, handle);
880 XS_SETERR(xs, XS_TIMEOUT);
881 XS_CMD_S_CLEAR(xs);
882 isp_done(xs);
883 } else {
884 void *qe;
885 isp_marker_t local, *mp = &local;
886 isp_prt(isp, ISP_LOGDEBUG2,
887 "possible command timeout on handle %x", handle);
888 XS_CMD_C_WDOG(xs);
889 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
890 qe = isp_getrqentry(isp);
891 if (qe == NULL)
892 goto out;
893 XS_CMD_S_GRACE(xs);
894 ISP_MEMZERO((void *) mp, sizeof (*mp));
895 mp->mrk_header.rqs_entry_count = 1;
896 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
897 mp->mrk_modifier = SYNC_ALL;
898 mp->mrk_target = XS_CHANNEL(xs) << 7;
899 isp_put_marker(isp, mp, qe);
900 ISP_SYNC_REQUEST(isp);
901 }
902 } else {
903 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
904 }
905 out:
906 isp->isp_osinfo.mbox_sleep_ok = sok;
907 ISP_IUNLOCK(isp);
908 }
909
910 /*
911 * Gone Device Timer Function- when we have decided that a device has gone
912 * away, we wait a specific period of time prior to telling the OS it has
913 * gone away.
914 *
915 * This timer function fires once a second and then scans the port database
916 * for devices that are marked dead but still have a virtual target assigned.
917 * We decrement a counter for that port database entry, and when it hits zero,
918 * we tell the OS the device has gone away.
919 */
920 static void
921 isp_gdt(void *arg)
922 {
923 ispsoftc_t *isp = arg;
924 fcportdb_t *lp;
925 int dbidx, tgt, more_to_do = 0;
926
927 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
928 ISP_LOCK(isp);
929 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
930 lp = &FCPARAM(isp, 0)->portdb[dbidx];
931
932 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
933 continue;
934 }
935 if (lp->dev_map_idx == 0) {
936 continue;
937 }
938 if (lp->new_reserved == 0) {
939 continue;
940 }
941 lp->new_reserved -= 1;
942 if (lp->new_reserved != 0) {
943 more_to_do++;
944 continue;
945 }
946 tgt = lp->dev_map_idx - 1;
947 FCPARAM(isp, 0)->isp_dev_map[tgt] = 0;
948 lp->dev_map_idx = 0;
949 lp->state = FC_PORTDB_STATE_NIL;
950 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
951 "Gone Device Timeout");
952 isp_make_gone(isp, tgt);
953 }
954 if (more_to_do) {
955 callout_schedule(&isp->isp_osinfo.gdt, hz);
956 } else {
957 isp->isp_osinfo.gdt_running = 0;
958 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
959 "stopping Gone Device Timer");
960 }
961 ISP_UNLOCK(isp);
962 }
963
964 /*
965 * Loop Down Timer Function- when loop goes down, a timer is started and
966 * and after it expires we come here and take all probational devices that
967 * the OS knows about and the tell the OS that they've gone away.
968 *
969 * We don't clear the devices out of our port database because, when loop
970 * come back up, we have to do some actual cleanup with the chip at that
971 * point (implicit PLOGO, e.g., to get the chip's port database state right).
972 */
973 static void
974 isp_ldt(void *arg)
975 {
976 ispsoftc_t *isp = arg;
977 fcportdb_t *lp;
978 int dbidx, tgt;
979
980 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
981 ISP_LOCK(isp);
982
983 /*
984 * Notify to the OS all targets who we now consider have departed.
985 */
986 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
987 lp = &FCPARAM(isp, 0)->portdb[dbidx];
988
989 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
990 continue;
991 }
992 if (lp->dev_map_idx == 0) {
993 continue;
994 }
995
996 /*
997 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
998 */
999
1000 /*
1001 * Mark that we've announced that this device is gone....
1002 */
1003 lp->reserved = 1;
1004
1005 /*
1006 * but *don't* change the state of the entry. Just clear
1007 * any target id stuff and announce to CAM that the
1008 * device is gone. This way any necessary PLOGO stuff
1009 * will happen when loop comes back up.
1010 */
1011
1012 tgt = lp->dev_map_idx - 1;
1013 FCPARAM(isp, 0)->isp_dev_map[tgt] = 0;
1014 lp->dev_map_idx = 0;
1015 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
1016 "Loop Down Timeout");
1017 isp_make_gone(isp, tgt);
1018 }
1019
1020 /*
1021 * The loop down timer has expired. Wake up the kthread
1022 * to notice that fact (or make it false).
1023 */
1024 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
1025 wakeup(&isp->isp_osinfo.thread);
1026 ISP_UNLOCK(isp);
1027 }
1028
1029 static void
1030 isp_make_here(ispsoftc_t *isp, int tgt)
1031 {
1032 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
1033 }
1034
1035 static void
1036 isp_make_gone(ispsoftc_t *isp, int tgt)
1037 {
1038 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
1039 }
1040
1041 static void
1042 isp_fc_worker(void *arg)
1043 {
1044 ispsoftc_t *isp = arg;
1045 int slp = 0;
1046 int chan = 0;
1047
1048 int s = splbio();
1049 /*
1050 * The first loop is for our usage where we have yet to have
1051 * gotten good fibre channel state.
1052 */
1053 while (isp->isp_osinfo.thread != NULL) {
1054 int sok, lb, lim;
1055
1056 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
1057 sok = isp->isp_osinfo.mbox_sleep_ok;
1058 isp->isp_osinfo.mbox_sleep_ok = 1;
1059 lb = isp_fc_runstate(isp, chan, 250000);
1060 isp->isp_osinfo.mbox_sleep_ok = sok;
1061 if (lb) {
1062 /*
1063 * Increment loop down time by the last sleep interval
1064 */
1065 isp->isp_osinfo.loop_down_time += slp;
1066
1067 if (lb < 0) {
1068 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1069 "FC loop not up (down count %d)",
1070 isp->isp_osinfo.loop_down_time);
1071 } else {
1072 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1073 "FC got to %d (down count %d)",
1074 lb, isp->isp_osinfo.loop_down_time);
1075 }
1076
1077
1078 /*
1079 * If we've never seen loop up and we've waited longer
1080 * than quickboot time, or we've seen loop up but we've
1081 * waited longer than loop_down_limit, give up and go
1082 * to sleep until loop comes up.
1083 */
1084 if (FCPARAM(isp, 0)->loop_seen_once == 0) {
1085 lim = isp_quickboot_time;
1086 } else {
1087 lim = isp->isp_osinfo.loop_down_limit;
1088 }
1089 if (isp->isp_osinfo.loop_down_time >= lim) {
1090 /*
1091 * If we're now past our limit, release
1092 * the queues and let them come in and
1093 * either get HBA_SELTIMOUT or cause
1094 * another freeze.
1095 */
1096 isp->isp_osinfo.blocked = 1;
1097 slp = 0;
1098 } else if (isp->isp_osinfo.loop_down_time < 10) {
1099 slp = 1;
1100 } else if (isp->isp_osinfo.loop_down_time < 30) {
1101 slp = 5;
1102 } else if (isp->isp_osinfo.loop_down_time < 60) {
1103 slp = 10;
1104 } else if (isp->isp_osinfo.loop_down_time < 120) {
1105 slp = 20;
1106 } else {
1107 slp = 30;
1108 }
1109
1110 } else {
1111 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1112 "FC state OK");
1113 isp->isp_osinfo.loop_down_time = 0;
1114 slp = 0;
1115 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1116 "THAW QUEUES @ LINE %d", __LINE__);
1117 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1);
1118 }
1119
1120 /*
1121 * If we'd frozen the queues, unfreeze them now so that
1122 * we can start getting commands. If the FC state isn't
1123 * okay yet, they'll hit that in isp_start which will
1124 * freeze the queues again.
1125 */
1126 if (isp->isp_osinfo.blocked) {
1127 isp->isp_osinfo.blocked = 0;
1128 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1129 "THAW QUEUES @ LINE %d", __LINE__);
1130 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1);
1131 }
1132 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
1133 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
1134
1135 /*
1136 * If slp is zero, we're waking up for the first time after
1137 * things have been okay. In this case, we set a deferral state
1138 * for all commands and delay hysteresis seconds before starting
1139 * the FC state evaluation. This gives the loop/fabric a chance
1140 * to settle.
1141 */
1142 if (slp == 0 && isp_fabric_hysteresis) {
1143 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1144 "sleep hysteresis tick time %d",
1145 isp_fabric_hysteresis * hz);
1146 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
1147 (isp_fabric_hysteresis * hz));
1148 }
1149 }
1150 splx(s);
1151
1152 /* In case parent is waiting for us to exit. */
1153 wakeup(&isp->isp_osinfo.thread);
1154 kthread_exit(0);
1155 }
1156
1157 /*
1158 * Free any associated resources prior to decommissioning and
1159 * set the card to a known state (so it doesn't wake up and kick
1160 * us when we aren't expecting it to).
1161 *
1162 * Locks are held before coming here.
1163 */
1164 void
1165 isp_uninit(struct ispsoftc *isp)
1166 {
1167 isp_lock(isp);
1168 /*
1169 * Leave with interrupts disabled.
1170 */
1171 ISP_DISABLE_INTS(isp);
1172 isp_unlock(isp);
1173 }
1174
1175 void
1176 isp_async(struct ispsoftc *isp, ispasync_t cmd, ...)
1177 {
1178 int bus, tgt;
1179 const char *msg = NULL;
1180 static const char prom[] =
1181 "PortID %#06x handle %#x role %s %s\n"
1182 " WWNN %#08x%08x WWPN %#08x%08x";
1183 static const char prom2[] =
1184 "PortID %#06x handle %#x role %s %s tgt %u\n"
1185 " WWNN %#08x%08x WWPN %#08x%08x";
1186 fcportdb_t *lp;
1187 va_list ap;
1188
1189 switch (cmd) {
1190 case ISPASYNC_NEW_TGT_PARAMS:
1191 if (IS_SCSI(isp)) {
1192 sdparam *sdp;
1193 int flags;
1194 struct scsipi_xfer_mode xm;
1195
1196 va_start(ap, cmd);
1197 bus = va_arg(ap, int);
1198 tgt = va_arg(ap, int);
1199 va_end(ap);
1200 sdp = SDPARAM(isp, bus);
1201 flags = sdp->isp_devparam[tgt].actv_flags;
1202
1203 xm.xm_mode = 0;
1204 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1205 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1206 xm.xm_target = tgt;
1207
1208 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1209 xm.xm_mode |= PERIPH_CAP_SYNC;
1210 if (flags & DPARM_WIDE)
1211 xm.xm_mode |= PERIPH_CAP_WIDE16;
1212 if (flags & DPARM_TQING)
1213 xm.xm_mode |= PERIPH_CAP_TQING;
1214 scsipi_async_event(&isp->isp_osinfo.chan[bus],
1215 ASYNC_EVENT_XFER_MODE, &xm);
1216 break;
1217 }
1218 /* FALLTHROUGH */
1219 case ISPASYNC_BUS_RESET:
1220 va_start(ap, cmd);
1221 bus = va_arg(ap, int);
1222 va_end(ap);
1223 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1224 scsipi_async_event(&isp->isp_osinfo.chan[bus],
1225 ASYNC_EVENT_RESET, NULL);
1226 break;
1227 case ISPASYNC_LIP:
1228 if (msg == NULL) {
1229 msg = "LIP Received";
1230 }
1231 /* FALLTHROUGH */
1232 case ISPASYNC_LOOP_RESET:
1233 if (msg == NULL) {
1234 msg = "LOOP Reset Received";
1235 }
1236 /* FALLTHROUGH */
1237 case ISPASYNC_LOOP_DOWN:
1238 if (msg == NULL) {
1239 msg = "Loop DOWN";
1240 }
1241 va_start(ap, cmd);
1242 bus = va_arg(ap, int);
1243 va_end(ap);
1244
1245 /*
1246 * Don't do queue freezes or blockage until we have the
1247 * thread running and interrupts that can unfreeze/unblock us.
1248 */
1249 if (isp->isp_osinfo.mbox_sleep_ok &&
1250 isp->isp_osinfo.blocked == 0 &&
1251 isp->isp_osinfo.thread) {
1252 isp->isp_osinfo.blocked = 1;
1253 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1254 "FREEZE QUEUES @ LINE %d", __LINE__);
1255 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1256 if (callout_pending(&isp->isp_osinfo.ldt) == 0) {
1257 callout_schedule(&isp->isp_osinfo.ldt,
1258 isp->isp_osinfo.loop_down_limit * hz);
1259 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1260 "Starting Loop Down Timer");
1261 }
1262 }
1263 isp_prt(isp, ISP_LOGINFO, "%s", msg);
1264 break;
1265 case ISPASYNC_LOOP_UP:
1266 /*
1267 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1268 * the FC worker thread. When the FC worker thread
1269 * is done, let *it* call scsipi_channel_thaw...
1270 */
1271 isp_prt(isp, ISP_LOGINFO, "Loop UP");
1272 break;
1273 case ISPASYNC_DEV_ARRIVED:
1274 va_start(ap, cmd);
1275 bus = va_arg(ap, int);
1276 lp = va_arg(ap, fcportdb_t *);
1277 va_end(ap);
1278 lp->reserved = 0;
1279 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
1280 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1281 int dbidx = lp - FCPARAM(isp, bus)->portdb;
1282 int i;
1283
1284 for (i = 0; i < MAX_FC_TARG; i++) {
1285 if (i >= FL_ID && i <= SNS_ID) {
1286 continue;
1287 }
1288 if (FCPARAM(isp, bus)->isp_dev_map[i] == 0) {
1289 break;
1290 }
1291 }
1292 if (i < MAX_FC_TARG) {
1293 FCPARAM(isp, bus)->isp_dev_map[i] = dbidx + 1;
1294 lp->dev_map_idx = i + 1;
1295 } else {
1296 isp_prt(isp, ISP_LOGWARN, "out of target ids");
1297 isp_dump_portdb(isp, bus);
1298 }
1299 }
1300 if (lp->dev_map_idx) {
1301 tgt = lp->dev_map_idx - 1;
1302 isp_prt(isp, ISP_LOGCONFIG, prom2,
1303 lp->portid, lp->handle,
1304 roles[lp->roles], "arrived at", tgt,
1305 (uint32_t) (lp->node_wwn >> 32),
1306 (uint32_t) lp->node_wwn,
1307 (uint32_t) (lp->port_wwn >> 32),
1308 (uint32_t) lp->port_wwn);
1309 isp_make_here(isp, tgt);
1310 } else {
1311 isp_prt(isp, ISP_LOGCONFIG, prom,
1312 lp->portid, lp->handle,
1313 roles[lp->roles], "arrived",
1314 (uint32_t) (lp->node_wwn >> 32),
1315 (uint32_t) lp->node_wwn,
1316 (uint32_t) (lp->port_wwn >> 32),
1317 (uint32_t) lp->port_wwn);
1318 }
1319 break;
1320 case ISPASYNC_DEV_CHANGED:
1321 va_start(ap, cmd);
1322 bus = va_arg(ap, int);
1323 lp = va_arg(ap, fcportdb_t *);
1324 va_end(ap);
1325 if (isp_change_is_bad) {
1326 lp->state = FC_PORTDB_STATE_NIL;
1327 if (lp->dev_map_idx) {
1328 tgt = lp->dev_map_idx - 1;
1329 FCPARAM(isp, bus)->isp_dev_map[tgt] = 0;
1330 lp->dev_map_idx = 0;
1331 isp_prt(isp, ISP_LOGCONFIG, prom3,
1332 lp->portid, tgt, "change is bad");
1333 isp_make_gone(isp, tgt);
1334 } else {
1335 isp_prt(isp, ISP_LOGCONFIG, prom,
1336 lp->portid, lp->handle,
1337 roles[lp->roles],
1338 "changed and departed",
1339 (uint32_t) (lp->node_wwn >> 32),
1340 (uint32_t) lp->node_wwn,
1341 (uint32_t) (lp->port_wwn >> 32),
1342 (uint32_t) lp->port_wwn);
1343 }
1344 } else {
1345 lp->portid = lp->new_portid;
1346 lp->roles = lp->new_roles;
1347 if (lp->dev_map_idx) {
1348 int t = lp->dev_map_idx - 1;
1349 FCPARAM(isp, bus)->isp_dev_map[t] =
1350 (lp - FCPARAM(isp, bus)->portdb) + 1;
1351 tgt = lp->dev_map_idx - 1;
1352 isp_prt(isp, ISP_LOGCONFIG, prom2,
1353 lp->portid, lp->handle,
1354 roles[lp->roles], "changed at", tgt,
1355 (uint32_t) (lp->node_wwn >> 32),
1356 (uint32_t) lp->node_wwn,
1357 (uint32_t) (lp->port_wwn >> 32),
1358 (uint32_t) lp->port_wwn);
1359 } else {
1360 isp_prt(isp, ISP_LOGCONFIG, prom,
1361 lp->portid, lp->handle,
1362 roles[lp->roles], "changed",
1363 (uint32_t) (lp->node_wwn >> 32),
1364 (uint32_t) lp->node_wwn,
1365 (uint32_t) (lp->port_wwn >> 32),
1366 (uint32_t) lp->port_wwn);
1367 }
1368 }
1369 break;
1370 case ISPASYNC_DEV_STAYED:
1371 va_start(ap, cmd);
1372 bus = va_arg(ap, int);
1373 lp = va_arg(ap, fcportdb_t *);
1374 va_end(ap);
1375 if (lp->dev_map_idx) {
1376 tgt = lp->dev_map_idx - 1;
1377 isp_prt(isp, ISP_LOGCONFIG, prom2,
1378 lp->portid, lp->handle,
1379 roles[lp->roles], "stayed at", tgt,
1380 (uint32_t) (lp->node_wwn >> 32),
1381 (uint32_t) lp->node_wwn,
1382 (uint32_t) (lp->port_wwn >> 32),
1383 (uint32_t) lp->port_wwn);
1384 } else {
1385 isp_prt(isp, ISP_LOGCONFIG, prom,
1386 lp->portid, lp->handle,
1387 roles[lp->roles], "stayed",
1388 (uint32_t) (lp->node_wwn >> 32),
1389 (uint32_t) lp->node_wwn,
1390 (uint32_t) (lp->port_wwn >> 32),
1391 (uint32_t) lp->port_wwn);
1392 }
1393 break;
1394 case ISPASYNC_DEV_GONE:
1395 va_start(ap, cmd);
1396 bus = va_arg(ap, int);
1397 lp = va_arg(ap, fcportdb_t *);
1398 va_end(ap);
1399 /*
1400 * If this has a virtual target and we haven't marked it
1401 * that we're going to have isp_gdt tell the OS it's gone,
1402 * set the isp_gdt timer running on it.
1403 *
1404 * If it isn't marked that isp_gdt is going to get rid of it,
1405 * announce that it's gone.
1406 */
1407 if (lp->dev_map_idx && lp->reserved == 0) {
1408 lp->reserved = 1;
1409 lp->new_reserved = isp->isp_osinfo.gone_device_time;
1410 lp->state = FC_PORTDB_STATE_ZOMBIE;
1411 if (isp->isp_osinfo.gdt_running == 0) {
1412 isp->isp_osinfo.gdt_running = 1;
1413 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1414 "starting Gone Device Timer");
1415 callout_schedule(&isp->isp_osinfo.gdt, hz);
1416 }
1417 tgt = lp->dev_map_idx - 1;
1418 isp_prt(isp, ISP_LOGCONFIG, prom2,
1419 lp->portid, lp->handle,
1420 roles[lp->roles], "gone zombie at", tgt,
1421 (uint32_t) (lp->node_wwn >> 32),
1422 (uint32_t) lp->node_wwn,
1423 (uint32_t) (lp->port_wwn >> 32),
1424 (uint32_t) lp->port_wwn);
1425 } else if (lp->reserved == 0) {
1426 isp_prt(isp, ISP_LOGCONFIG, prom,
1427 lp->portid, lp->handle,
1428 roles[lp->roles], "departed",
1429 (uint32_t) (lp->node_wwn >> 32),
1430 (uint32_t) lp->node_wwn,
1431 (uint32_t) (lp->port_wwn >> 32),
1432 (uint32_t) lp->port_wwn);
1433 }
1434 break;
1435 case ISPASYNC_CHANGE_NOTIFY:
1436 {
1437 int opt;
1438
1439 va_start(ap, cmd);
1440 bus = va_arg(ap, int);
1441 opt = va_arg(ap, int);
1442 va_end(ap);
1443
1444 if (opt == ISPASYNC_CHANGE_PDB) {
1445 msg = "Port Database Changed";
1446 } else if (opt == ISPASYNC_CHANGE_SNS) {
1447 msg = "Name Server Database Changed";
1448 } else {
1449 msg = "Other Change Notify";
1450 }
1451 /*
1452 * If the loop down timer is running, cancel it.
1453 */
1454 if (callout_pending(&isp->isp_osinfo.ldt)) {
1455 callout_stop(&isp->isp_osinfo.ldt);
1456 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1457 "Stopping Loop Down Timer");
1458 }
1459 isp_prt(isp, ISP_LOGINFO, "%s", msg);
1460 /*
1461 * We can set blocked here because we know it's now okay
1462 * to try and run isp_fc_runstate (in order to build loop
1463 * state). But we don't try and freeze the midlayer's queue
1464 * if we have no thread that we can wake to later unfreeze
1465 * it.
1466 */
1467 if (isp->isp_osinfo.blocked == 0) {
1468 isp->isp_osinfo.blocked = 1;
1469 if (isp->isp_osinfo.thread) {
1470 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1471 "FREEZE QUEUES @ LINE %d", __LINE__);
1472 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1473 }
1474 }
1475 /*
1476 * Note that we have work for the thread to do, and
1477 * if the thread is here already, wake it up.
1478 */
1479 if (isp->isp_osinfo.thread) {
1480 wakeup(&isp->isp_osinfo.thread);
1481 } else {
1482 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1483 }
1484 break;
1485 }
1486 case ISPASYNC_FW_CRASH:
1487 {
1488 uint16_t mbox1;
1489 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1490 if (IS_DUALBUS(isp)) {
1491 bus = ISP_READ(isp, OUTMAILBOX6);
1492 } else {
1493 bus = 0;
1494 }
1495 isp_prt(isp, ISP_LOGERR,
1496 "Internal Firmware Error on bus %d @ RISC Address %#x",
1497 bus, mbox1);
1498 if (IS_FC(isp)) {
1499 if (isp->isp_osinfo.blocked == 0) {
1500 isp->isp_osinfo.blocked = 1;
1501 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1502 "FREEZE QUEUES @ LINE %d", __LINE__);
1503 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1504 }
1505 }
1506 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1507 isp->isp_osinfo.mbox_sleep_ok = 0;
1508 isp_reinit(isp, 0);
1509 isp->isp_osinfo.mbox_sleep_ok = mbox1;
1510 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1511 break;
1512 }
1513 default:
1514 break;
1515 }
1516 }
1517
1518 void
1519 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1520 {
1521 va_list ap;
1522 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1523 return;
1524 }
1525 printf("%s: ", device_xname(isp->isp_osinfo.dev));
1526 va_start(ap, fmt);
1527 vprintf(fmt, ap);
1528 va_end(ap);
1529 printf("\n");
1530 }
1531
1532 void
1533 isp_xs_prt(struct ispsoftc *isp, XS_T *xs, int level, const char *fmt, ...)
1534 {
1535 va_list ap;
1536 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1537 return;
1538 }
1539 scsipi_printaddr(xs->xs_periph);
1540 va_start(ap, fmt);
1541 vprintf(fmt, ap);
1542 va_end(ap);
1543 printf("\n");
1544 }
1545
1546 void
1547 isp_lock(struct ispsoftc *isp)
1548 {
1549 int s = splbio();
1550 if (isp->isp_osinfo.islocked++ == 0) {
1551 isp->isp_osinfo.splsaved = s;
1552 } else {
1553 splx(s);
1554 }
1555 }
1556
1557 void
1558 isp_unlock(struct ispsoftc *isp)
1559 {
1560 if (isp->isp_osinfo.islocked-- <= 1) {
1561 isp->isp_osinfo.islocked = 0;
1562 splx(isp->isp_osinfo.splsaved);
1563 }
1564 }
1565
1566 uint64_t
1567 isp_microtime_sub(struct timeval *b, struct timeval *a)
1568 {
1569 struct timeval x;
1570 uint64_t elapsed;
1571 timersub(b, a, &x);
1572 elapsed = GET_NANOSEC(&x);
1573 if (elapsed == 0)
1574 elapsed++;
1575 return (elapsed);
1576 }
1577
1578 int
1579 isp_mbox_acquire(ispsoftc_t *isp)
1580 {
1581 if (isp->isp_osinfo.mboxbsy) {
1582 return (1);
1583 } else {
1584 isp->isp_osinfo.mboxcmd_done = 0;
1585 isp->isp_osinfo.mboxbsy = 1;
1586 return (0);
1587 }
1588 }
1589
1590 void
1591 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1592 {
1593 unsigned int usecs = mbp->timeout;
1594 unsigned int maxc, olim, ilim;
1595 struct timeval start;
1596
1597 if (usecs == 0) {
1598 usecs = MBCMD_DEFAULT_TIMEOUT;
1599 }
1600 maxc = isp->isp_mbxwrk0 + 1;
1601
1602 microtime(&start);
1603 if (isp->isp_osinfo.mbox_sleep_ok) {
1604 int to;
1605 struct timeval tv, utv;
1606
1607 tv.tv_sec = 0;
1608 tv.tv_usec = 0;
1609 for (olim = 0; olim < maxc; olim++) {
1610 utv.tv_sec = usecs / 1000000;
1611 utv.tv_usec = usecs % 1000000;
1612 timeradd(&tv, &utv, &tv);
1613 }
1614 to = tvtohz(&tv);
1615 if (to == 0)
1616 to = 1;
1617 timeradd(&tv, &start, &tv);
1618
1619 isp->isp_osinfo.mbox_sleep_ok = 0;
1620 isp->isp_osinfo.mbox_sleeping = 1;
1621 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1622 isp->isp_osinfo.mbox_sleeping = 0;
1623 isp->isp_osinfo.mbox_sleep_ok = 1;
1624 } else {
1625 for (olim = 0; olim < maxc; olim++) {
1626 for (ilim = 0; ilim < usecs; ilim += 100) {
1627 uint32_t isr;
1628 uint16_t sema, mbox;
1629 if (isp->isp_osinfo.mboxcmd_done) {
1630 break;
1631 }
1632 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1633 isp_intr(isp, isr, sema, mbox);
1634 if (isp->isp_osinfo.mboxcmd_done) {
1635 break;
1636 }
1637 }
1638 ISP_DELAY(100);
1639 }
1640 if (isp->isp_osinfo.mboxcmd_done) {
1641 break;
1642 }
1643 }
1644 }
1645 if (isp->isp_osinfo.mboxcmd_done == 0) {
1646 struct timeval finish, elapsed;
1647
1648 microtime(&finish);
1649 timersub(&finish, &start, &elapsed);
1650 isp_prt(isp, ISP_LOGWARN,
1651 "%s Mailbox Command (%#x) Timeout (%juus actual)",
1652 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1653 isp->isp_lastmbxcmd, (intmax_t)(elapsed.tv_sec * 1000000) +
1654 elapsed.tv_usec);
1655 mbp->param[0] = MBOX_TIMEOUT;
1656 isp->isp_osinfo.mboxcmd_done = 1;
1657 }
1658 }
1659
1660 void
1661 isp_mbox_notify_done(ispsoftc_t *isp)
1662 {
1663 if (isp->isp_osinfo.mbox_sleeping) {
1664 wakeup(&isp->isp_mbxworkp);
1665 }
1666 isp->isp_osinfo.mboxcmd_done = 1;
1667 }
1668
1669 void
1670 isp_mbox_release(ispsoftc_t *isp)
1671 {
1672 isp->isp_osinfo.mboxbsy = 0;
1673 }
Cache object: 3dfcd0fd147debcd1cd2e4e9086a9b00
|