1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 /**
35 * @file
36 * Implement remote device state machine for target and initiator.
37 */
38
39 /*!
40 @defgroup device_sm Node State Machine: Remote Device States
41 */
42
43 #include "ocs.h"
44 #include "ocs_device.h"
45 #include "ocs_fabric.h"
46 #include "ocs_els.h"
47
48 static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
49 static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
50 static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
51 static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr);
52
53 /**
54 * @ingroup device_sm
55 * @brief Send response to PRLI.
56 *
57 * <h3 class="desc">Description</h3>
58 * For device nodes, this function sends a PRLI response.
59 *
60 * @param io Pointer to a SCSI IO object.
61 * @param ox_id OX_ID of PRLI
62 *
63 * @return Returns None.
64 */
65
66 void
67 ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id)
68 {
69 ocs_t *ocs = io->ocs;
70 ocs_node_t *node = io->node;
71
72 /* If the back-end doesn't support the fc-type, we send an LS_RJT */
73 if (ocs->fc_type != node->fc_type) {
74 node_printf(node, "PRLI rejected by target-server, fc-type not supported\n");
75 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
76 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
77 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
78 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
79 }
80
81 /* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */
82 if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) {
83 node_printf(node, "PRLI rejected by target-server\n");
84 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
85 FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL);
86 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
87 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
88 } else {
89 /*sm: process PRLI payload, send PRLI acc */
90 ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL);
91
92 /* Immediately go to ready state to avoid window where we're
93 * waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs
94 */
95 ocs_node_transition(node, __ocs_d_device_ready, NULL);
96 }
97 }
98
99 /**
100 * @ingroup device_sm
101 * @brief Device node state machine: Initiate node shutdown
102 *
103 * @param ctx Remote node state machine context.
104 * @param evt Event to process.
105 * @param arg Per event optional argument.
106 *
107 * @return Returns NULL.
108 */
109
110 void *
111 __ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
112 {
113 std_node_state_decl();
114
115 node_sm_trace();
116
117 switch(evt) {
118 case OCS_EVT_ENTER: {
119 int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
120
121 ocs_scsi_io_alloc_disable(node);
122
123 /* make necessary delete upcall(s) */
124 if (node->init && !node->targ) {
125 ocs_log_debug(node->ocs,
126 "[%s] delete (initiator) WWPN %s WWNN %s\n",
127 node->display_name, node->wwpn, node->wwnn);
128 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
129 if (node->sport->enable_tgt) {
130 rc = ocs_scsi_del_initiator(node,
131 OCS_SCSI_INITIATOR_DELETED);
132 }
133 if (rc == OCS_SCSI_CALL_COMPLETE) {
134 ocs_node_post_event(node,
135 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
136 }
137 } else if (node->targ && !node->init) {
138 ocs_log_debug(node->ocs,
139 "[%s] delete (target) WWPN %s WWNN %s\n",
140 node->display_name, node->wwpn, node->wwnn);
141 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
142 if (node->sport->enable_ini) {
143 rc = ocs_scsi_del_target(node,
144 OCS_SCSI_TARGET_DELETED);
145 }
146 if (rc == OCS_SCSI_CALL_COMPLETE) {
147 ocs_node_post_event(node,
148 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
149 }
150 } else if (node->init && node->targ) {
151 ocs_log_debug(node->ocs,
152 "[%s] delete (initiator+target) WWPN %s WWNN %s\n",
153 node->display_name, node->wwpn, node->wwnn);
154 ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL);
155 if (node->sport->enable_tgt) {
156 rc = ocs_scsi_del_initiator(node,
157 OCS_SCSI_INITIATOR_DELETED);
158 }
159 if (rc == OCS_SCSI_CALL_COMPLETE) {
160 ocs_node_post_event(node,
161 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
162 }
163 rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
164 if (node->sport->enable_ini) {
165 rc = ocs_scsi_del_target(node,
166 OCS_SCSI_TARGET_DELETED);
167 }
168 if (rc == OCS_SCSI_CALL_COMPLETE) {
169 ocs_node_post_event(node,
170 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
171 }
172 }
173
174 /* we've initiated the upcalls as needed, now kick off the node
175 * detach to precipitate the aborting of outstanding exchanges
176 * associated with said node
177 *
178 * Beware: if we've made upcall(s), we've already transitioned
179 * to a new state by the time we execute this.
180 * TODO: consider doing this before the upcalls...
181 */
182 if (node->attached) {
183 /* issue hw node free; don't care if succeeds right away
184 * or sometime later, will check node->attached later in
185 * shutdown process
186 */
187 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
188 if (node->rnode.free_group) {
189 ocs_remote_node_group_free(node->node_group);
190 node->node_group = NULL;
191 node->rnode.free_group = FALSE;
192 }
193 if (rc != OCS_HW_RTN_SUCCESS &&
194 rc != OCS_HW_RTN_SUCCESS_SYNC) {
195 node_printf(node,
196 "Failed freeing HW node, rc=%d\n", rc);
197 }
198 }
199
200 /* if neither initiator nor target, proceed to cleanup */
201 if (!node->init && !node->targ){
202 /*
203 * node has either been detached or is in the process
204 * of being detached, call common node's initiate
205 * cleanup function.
206 */
207 ocs_node_initiate_cleanup(node);
208 }
209 break;
210 }
211 case OCS_EVT_ALL_CHILD_NODES_FREE:
212 /* Ignore, this can happen if an ELS is aborted,
213 * while in a delay/retry state */
214 break;
215 default:
216 __ocs_d_common(__func__, ctx, evt, arg);
217 return NULL;
218 }
219 return NULL;
220 }
221
222 /**
223 * @ingroup device_sm
224 * @brief Device node state machine: Common device event handler.
225 *
226 * <h3 class="desc">Description</h3>
227 * For device nodes, this event handler manages default and common events.
228 *
229 * @param funcname Function name text.
230 * @param ctx Remote node state machine context.
231 * @param evt Event to process.
232 * @param arg Per event optional argument.
233 *
234 * @return Returns NULL.
235 */
236
237 static void *
238 __ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
239 {
240 ocs_node_t *node = NULL;
241 ocs_t *ocs = NULL;
242 ocs_assert(ctx, NULL);
243 node = ctx->app;
244 ocs_assert(node, NULL);
245 ocs = node->ocs;
246 ocs_assert(ocs, NULL);
247
248 switch(evt) {
249 /* Handle shutdown events */
250 case OCS_EVT_SHUTDOWN:
251 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
252 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
253 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
254 break;
255 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
256 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
257 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
258 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
259 break;
260 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
261 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
262 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
263 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
264 break;
265
266 default:
267 /* call default event handler common to all nodes */
268 __ocs_node_common(funcname, ctx, evt, arg);
269 break;
270 }
271 return NULL;
272 }
273
274 /**
275 * @ingroup device_sm
276 * @brief Device node state machine: Wait for a domain-attach completion in loop topology.
277 *
278 * <h3 class="desc">Description</h3>
279 * State waits for a domain-attached completion while in loop topology.
280 *
281 * @param ctx Remote node state machine context.
282 * @param evt Event to process.
283 * @param arg Per event optional argument.
284 *
285 * @return Returns NULL.
286 */
287
288 void *
289 __ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
290 {
291 std_node_state_decl();
292
293 node_sm_trace();
294
295 switch(evt) {
296 case OCS_EVT_ENTER:
297 ocs_node_hold_frames(node);
298 break;
299
300 case OCS_EVT_EXIT:
301 ocs_node_accept_frames(node);
302 break;
303
304 case OCS_EVT_DOMAIN_ATTACH_OK: {
305 /* send PLOGI automatically if initiator */
306 ocs_node_init_device(node, TRUE);
307 break;
308 }
309 default:
310 __ocs_d_common(__func__, ctx, evt, arg);
311 return NULL;
312 }
313
314 return NULL;
315 }
316
317 /**
318 * @ingroup device_sm
319 * @brief state: wait for node resume event
320 *
321 * State is entered when a node is in I+T mode and sends a delete initiator/target
322 * call to the target-server/initiator-client and needs to wait for that work to complete.
323 *
324 * @param ctx Remote node state machine context.
325 * @param evt Event to process.
326 * @param arg per event optional argument
327 *
328 * @return returns NULL
329 */
330
331 void *
332 __ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
333 {
334 std_node_state_decl();
335
336 node_sm_trace();
337
338 switch(evt) {
339 case OCS_EVT_ENTER:
340 ocs_node_hold_frames(node);
341 /* Fall through */
342
343 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
344 case OCS_EVT_ALL_CHILD_NODES_FREE:
345 /* These are expected events. */
346 break;
347
348 case OCS_EVT_NODE_DEL_INI_COMPLETE:
349 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
350 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
351 break;
352
353 case OCS_EVT_EXIT:
354 ocs_node_accept_frames(node);
355 break;
356
357 case OCS_EVT_SRRS_ELS_REQ_FAIL:
358 /* Can happen as ELS IO IO's complete */
359 ocs_assert(node->els_req_cnt, NULL);
360 node->els_req_cnt--;
361 break;
362
363 /* ignore shutdown events as we're already in shutdown path */
364 case OCS_EVT_SHUTDOWN:
365 /* have default shutdown event take precedence */
366 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
367 /* fall through */
368 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
369 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
370 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
371 break;
372 case OCS_EVT_DOMAIN_ATTACH_OK:
373 /* don't care about domain_attach_ok */
374 break;
375 default:
376 __ocs_d_common(__func__, ctx, evt, arg);
377 return NULL;
378 }
379
380 return NULL;
381 }
382
383 /**
384 * @ingroup device_sm
385 * @brief state: Wait for node resume event.
386 *
387 * State is entered when a node sends a delete initiator/target call to the
388 * target-server/initiator-client and needs to wait for that work to complete.
389 *
390 * @param ctx Remote node state machine context.
391 * @param evt Event to process.
392 * @param arg Per event optional argument.
393 *
394 * @return Returns NULL.
395 */
396
397 void *
398 __ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
399 {
400 std_node_state_decl();
401
402 node_sm_trace();
403
404 switch(evt) {
405 case OCS_EVT_ENTER:
406 ocs_node_hold_frames(node);
407 /* Fall through */
408
409 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
410 case OCS_EVT_ALL_CHILD_NODES_FREE:
411 /* These are expected events. */
412 break;
413
414 case OCS_EVT_NODE_DEL_INI_COMPLETE:
415 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
416 /*
417 * node has either been detached or is in the process of being detached,
418 * call common node's initiate cleanup function
419 */
420 ocs_node_initiate_cleanup(node);
421 break;
422
423 case OCS_EVT_EXIT:
424 ocs_node_accept_frames(node);
425 break;
426
427 case OCS_EVT_SRRS_ELS_REQ_FAIL:
428 /* Can happen as ELS IO IO's complete */
429 ocs_assert(node->els_req_cnt, NULL);
430 node->els_req_cnt--;
431 break;
432
433 /* ignore shutdown events as we're already in shutdown path */
434 case OCS_EVT_SHUTDOWN:
435 /* have default shutdown event take precedence */
436 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
437 /* fall through */
438 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
439 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
440 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
441 break;
442 case OCS_EVT_DOMAIN_ATTACH_OK:
443 /* don't care about domain_attach_ok */
444 break;
445 default:
446 __ocs_d_common(__func__, ctx, evt, arg);
447 return NULL;
448 }
449
450 return NULL;
451 }
452
453 /**
454 * @brief Save the OX_ID for sending LS_ACC sometime later.
455 *
456 * <h3 class="desc">Description</h3>
457 * When deferring the response to an ELS request, the OX_ID of the request
458 * is saved using this function.
459 *
460 * @param io Pointer to a SCSI IO object.
461 * @param hdr Pointer to the FC header.
462 * @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI;
463 * or LSS_ACC for PRLI.
464 *
465 * @return None.
466 */
467
468 void
469 ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls)
470 {
471 ocs_node_t *node = io->node;
472 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
473
474 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE);
475
476 node->ls_acc_oxid = ox_id;
477 node->send_ls_acc = ls;
478 node->ls_acc_io = io;
479 node->ls_acc_did = fc_be24toh(hdr->d_id);
480 }
481
482 /**
483 * @brief Process the PRLI payload.
484 *
485 * <h3 class="desc">Description</h3>
486 * The PRLI payload is processed; the initiator/target capabilities of the
487 * remote node are extracted and saved in the node object.
488 *
489 * @param node Pointer to the node object.
490 * @param prli Pointer to the PRLI payload.
491 *
492 * @return None.
493 */
494
495 void
496 ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli)
497 {
498 node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0;
499 node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0;
500 node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0;
501 node->fc_type = prli->type;
502 }
503
504 /**
505 * @brief Process the ABTS.
506 *
507 * <h3 class="desc">Description</h3>
508 * Common code to process a received ABTS. If an active IO can be found
509 * that matches the OX_ID of the ABTS request, a call is made to the
510 * backend. Otherwise, a BA_ACC is returned to the initiator.
511 *
512 * @param io Pointer to a SCSI IO object.
513 * @param hdr Pointer to the FC header.
514 *
515 * @return Returns 0 on success, or a negative error value on failure.
516 */
517
518 static int32_t
519 ocs_process_abts(ocs_io_t *io, fc_header_t *hdr)
520 {
521 ocs_node_t *node = io->node;
522 ocs_t *ocs = node->ocs;
523 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
524 uint16_t rx_id = ocs_be16toh(hdr->rx_id);
525 ocs_io_t *abortio;
526
527 abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id);
528
529 /* If an IO was found, attempt to take a reference on it */
530 if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) {
531 /* Got a reference on the IO. Hold it until backend is notified below */
532 node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n",
533 ox_id, rx_id);
534
535 /*
536 * Save the ox_id for the ABTS as the init_task_tag in our manufactured
537 * TMF IO object
538 */
539 io->display_name = "abts";
540 io->init_task_tag = ox_id;
541 /* don't set tgt_task_tag, don't want to confuse with XRI */
542
543 /*
544 * Save the rx_id from the ABTS as it is needed for the BLS response,
545 * regardless of the IO context's rx_id
546 */
547 io->abort_rx_id = rx_id;
548
549 /* Call target server command abort */
550 io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK;
551 ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0);
552
553 /*
554 * Backend will have taken an additional reference on the IO if needed;
555 * done with current reference.
556 */
557 ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */
558 } else {
559 /*
560 * Either IO was not found or it has been freed between finding it
561 * and attempting to get the reference,
562 */
563 node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n",
564 ox_id, (abortio != NULL));
565
566 /* Send a BA_ACC */
567 ocs_bls_send_acc_hdr(io, hdr);
568 }
569 return 0;
570 }
571
572 /**
573 * @ingroup device_sm
574 * @brief Device node state machine: Wait for the PLOGI accept to complete.
575 *
576 * @param ctx Remote node state machine context.
577 * @param evt Event to process.
578 * @param arg Per event optional argument.
579 *
580 * @return Returns NULL.
581 */
582
583 void *
584 __ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
585 {
586 std_node_state_decl();
587
588 node_sm_trace();
589
590 switch(evt) {
591 case OCS_EVT_ENTER:
592 ocs_node_hold_frames(node);
593 break;
594
595 case OCS_EVT_EXIT:
596 ocs_node_accept_frames(node);
597 break;
598
599 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
600 ocs_assert(node->els_cmpl_cnt, NULL);
601 node->els_cmpl_cnt--;
602 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
603 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
604 break;
605
606 case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
607 ocs_assert(node->els_cmpl_cnt, NULL);
608 node->els_cmpl_cnt--;
609 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
610 break;
611
612 default:
613 __ocs_d_common(__func__, ctx, evt, arg);
614 return NULL;
615 }
616
617 return NULL;
618 }
619
620 /**
621 * @ingroup device_sm
622 * @brief Device node state machine: Wait for the LOGO response.
623 *
624 * @param ctx Remote node state machine context.
625 * @param evt Event to process.
626 * @param arg Per event optional argument.
627 *
628 * @return Returns NULL.
629 */
630
631 void *
632 __ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
633 {
634 std_node_state_decl();
635
636 node_sm_trace();
637
638 switch(evt) {
639 case OCS_EVT_ENTER:
640 /* TODO: may want to remove this;
641 * if we'll want to know about PLOGI */
642 ocs_node_hold_frames(node);
643 break;
644
645 case OCS_EVT_EXIT:
646 ocs_node_accept_frames(node);
647 break;
648
649 case OCS_EVT_SRRS_ELS_REQ_OK:
650 case OCS_EVT_SRRS_ELS_REQ_RJT:
651 case OCS_EVT_SRRS_ELS_REQ_FAIL:
652 /* LOGO response received, sent shutdown */
653 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) {
654 return NULL;
655 }
656 ocs_assert(node->els_req_cnt, NULL);
657 node->els_req_cnt--;
658 node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt));
659 /* sm: post explicit logout */
660 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
661 break;
662
663 /* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */
664
665 default:
666 __ocs_d_common(__func__, ctx, evt, arg);
667 return NULL;
668 }
669 return NULL;
670 }
671
672 /**
673 * @ingroup device_sm
674 * @brief Device node state machine: Wait for the PRLO response.
675 *
676 * @param ctx Remote node state machine context.
677 * @param evt Event to process.
678 * @param arg Per event optional argument.
679 *
680 * @return Returns NULL.
681 */
682
683 void *
684 __ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
685 {
686 std_node_state_decl();
687
688 node_sm_trace();
689
690 switch(evt) {
691 case OCS_EVT_ENTER:
692 ocs_node_hold_frames(node);
693 break;
694
695 case OCS_EVT_EXIT:
696 ocs_node_accept_frames(node);
697 break;
698
699 case OCS_EVT_SRRS_ELS_REQ_OK:
700 case OCS_EVT_SRRS_ELS_REQ_RJT:
701 case OCS_EVT_SRRS_ELS_REQ_FAIL:
702 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) {
703 return NULL;
704 }
705 ocs_assert(node->els_req_cnt, NULL);
706 node->els_req_cnt--;
707 node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt));
708 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
709 break;
710
711 default:
712 __ocs_node_common(__func__, ctx, evt, arg);
713 return NULL;
714 }
715 return NULL;
716 }
717
718 /**
719 * @brief Initialize device node.
720 *
721 * Initialize device node. If a node is an initiator, then send a PLOGI and transition
722 * to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init.
723 *
724 * @param node Pointer to the node object.
725 * @param send_plogi Boolean indicating to send PLOGI command or not.
726 *
727 * @return none
728 */
729
730 void
731 ocs_node_init_device(ocs_node_t *node, int send_plogi)
732 {
733 node->send_plogi = send_plogi;
734 if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) {
735 node->nodedb_state = __ocs_d_init;
736 ocs_node_transition(node, __ocs_node_paused, NULL);
737 } else {
738 ocs_node_transition(node, __ocs_d_init, NULL);
739 }
740 }
741
742 /**
743 * @ingroup device_sm
744 * @brief Device node state machine: Initial node state for an initiator or a target.
745 *
746 * <h3 class="desc">Description</h3>
747 * This state is entered when a node is instantiated, either having been
748 * discovered from a name services query, or having received a PLOGI/FLOGI.
749 *
750 * @param ctx Remote node state machine context.
751 * @param evt Event to process.
752 * @param arg Per event optional argument.
753 * - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on
754 * entry (initiator-only); 0 indicates a PLOGI is
755 * not sent on entry (initiator-only). Not applicable for a target.
756 *
757 * @return Returns NULL.
758 */
759
760 void *
761 __ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
762 {
763 int32_t rc;
764 ocs_node_cb_t *cbdata = arg;
765 std_node_state_decl();
766
767 node_sm_trace();
768
769 switch(evt) {
770 case OCS_EVT_ENTER:
771 /* check if we need to send PLOGI */
772 if (node->send_plogi) {
773 /* only send if we have initiator capability, and domain is attached */
774 if (node->sport->enable_ini && node->sport->domain->attached) {
775 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
776 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
777 ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL);
778 } else {
779 node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n",
780 node->sport->enable_ini, node->sport->domain->attached);
781 }
782 }
783 break;
784 case OCS_EVT_PLOGI_RCVD: {
785 /* T, or I+T */
786 fc_header_t *hdr = cbdata->header->dma.virt;
787 uint32_t d_id = fc_be24toh(hdr->d_id);
788
789 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
790 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
791
792 /* domain already attached */
793 if (node->sport->domain->attached) {
794 rc = ocs_node_attach(node);
795 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
796 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
797 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
798 }
799 break;
800 }
801
802 /* domain not attached; several possibilities: */
803 switch (node->sport->topology) {
804 case OCS_SPORT_TOPOLOGY_P2P:
805 /* we're not attached and sport is p2p, need to attach */
806 ocs_domain_attach(node->sport->domain, d_id);
807 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
808 break;
809 case OCS_SPORT_TOPOLOGY_FABRIC:
810 /* we're not attached and sport is fabric, domain attach should have
811 * already been requested as part of the fabric state machine, wait for it
812 */
813 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
814 break;
815 case OCS_SPORT_TOPOLOGY_UNKNOWN:
816 /* Two possibilities:
817 * 1. received a PLOGI before our FLOGI has completed (possible since
818 * completion comes in on another CQ), thus we don't know what we're
819 * connected to yet; transition to a state to wait for the fabric
820 * node to tell us;
821 * 2. PLOGI received before link went down and we haven't performed
822 * domain attach yet.
823 * Note: we cannot distinguish between 1. and 2. so have to assume PLOGI
824 * was received after link back up.
825 */
826 node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id);
827 ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
828 break;
829 default:
830 node_printf(node, "received PLOGI, with unexpectd topology %d\n",
831 node->sport->topology);
832 ocs_assert(FALSE, NULL);
833 break;
834 }
835 break;
836 }
837
838 case OCS_EVT_FDISC_RCVD: {
839 __ocs_d_common(__func__, ctx, evt, arg);
840 break;
841 }
842
843 case OCS_EVT_FLOGI_RCVD: {
844 fc_header_t *hdr = cbdata->header->dma.virt;
845
846 /* this better be coming from an NPort */
847 ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL);
848
849 /* sm: save sparams, send FLOGI acc */
850 ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt);
851
852 /* send FC LS_ACC response, override s_id */
853 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
854 ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL);
855 if (ocs_p2p_setup(node->sport)) {
856 node_printf(node, "p2p setup failed, shutting down node\n");
857 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
858 } else {
859 ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL);
860 }
861
862 break;
863 }
864
865 case OCS_EVT_LOGO_RCVD: {
866 fc_header_t *hdr = cbdata->header->dma.virt;
867
868 if (!node->sport->domain->attached) {
869 /* most likely a frame left over from before a link down; drop and
870 * shut node down w/ "explicit logout" so pending frames are processed */
871 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
872 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
873 break;
874 }
875 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
876 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
877 break;
878 }
879
880 case OCS_EVT_PRLI_RCVD:
881 case OCS_EVT_PRLO_RCVD:
882 case OCS_EVT_PDISC_RCVD:
883 case OCS_EVT_ADISC_RCVD:
884 case OCS_EVT_RSCN_RCVD: {
885 fc_header_t *hdr = cbdata->header->dma.virt;
886 if (!node->sport->domain->attached) {
887 /* most likely a frame left over from before a link down; drop and
888 * shut node down w/ "explicit logout" so pending frames are processed */
889 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
890 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
891 break;
892 }
893 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
894 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
895 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
896 NULL, NULL);
897
898 break;
899 }
900
901 case OCS_EVT_FCP_CMD_RCVD: {
902 /* note: problem, we're now expecting an ELS REQ completion
903 * from both the LOGO and PLOGI */
904 if (!node->sport->domain->attached) {
905 /* most likely a frame left over from before a link down; drop and
906 * shut node down w/ "explicit logout" so pending frames are processed */
907 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
908 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
909 break;
910 }
911
912 /* Send LOGO */
913 node_printf(node, "FCP_CMND received, send LOGO\n");
914 if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) {
915 /* failed to send LOGO, go ahead and cleanup node anyways */
916 node_printf(node, "Failed to send LOGO\n");
917 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
918 } else {
919 /* sent LOGO, wait for response */
920 ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
921 }
922 break;
923 }
924 case OCS_EVT_DOMAIN_ATTACH_OK:
925 /* don't care about domain_attach_ok */
926 break;
927
928 default:
929 __ocs_d_common(__func__, ctx, evt, arg);
930 return NULL;
931 }
932
933 return NULL;
934 }
935
936 /**
937 * @ingroup device_sm
938 * @brief Device node state machine: Wait on a response for a sent PLOGI.
939 *
940 * <h3 class="desc">Description</h3>
941 * State is entered when an initiator-capable node has sent
942 * a PLOGI and is waiting for a response.
943 *
944 * @param ctx Remote node state machine context.
945 * @param evt Event to process.
946 * @param arg Per event optional argument.
947 *
948 * @return Returns NULL.
949 */
950
951 void *
952 __ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
953 {
954 int32_t rc;
955 ocs_node_cb_t *cbdata = arg;
956 std_node_state_decl();
957
958 node_sm_trace();
959
960 switch(evt) {
961 case OCS_EVT_PLOGI_RCVD: {
962 /* T, or I+T */
963 /* received PLOGI with svc parms, go ahead and attach node
964 * when PLOGI that was sent ultimately completes, it'll be a no-op
965 */
966
967 /* TODO: there is an outstanding PLOGI sent, can we set a flag
968 * to indicate that we don't want to retry it if it times out? */
969 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
970 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
971 /* sm: domain->attached / ocs_node_attach */
972 rc = ocs_node_attach(node);
973 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
974 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
975 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
976 }
977 break;
978 }
979
980 case OCS_EVT_PRLI_RCVD:
981 /* I, or I+T */
982 /* sent PLOGI and before completion was seen, received the
983 * PRLI from the remote node (WCQEs and RCQEs come in on
984 * different queues and order of processing cannot be assumed)
985 * Save OXID so PRLI can be sent after the attach and continue
986 * to wait for PLOGI response
987 */
988 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
989 if (ocs->fc_type == node->fc_type) {
990 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
991 ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL);
992 } else {
993 /* TODO this need to be looked at. What do we do here ? */
994 }
995 break;
996
997 /* TODO this need to be looked at. we could very well be logged in */
998 case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
999 case OCS_EVT_PRLO_RCVD:
1000 case OCS_EVT_PDISC_RCVD:
1001 case OCS_EVT_FDISC_RCVD:
1002 case OCS_EVT_ADISC_RCVD:
1003 case OCS_EVT_RSCN_RCVD:
1004 case OCS_EVT_SCR_RCVD: {
1005 fc_header_t *hdr = cbdata->header->dma.virt;
1006 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
1007 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1008 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
1009 NULL, NULL);
1010
1011 break;
1012 }
1013
1014 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1015 /* Completion from PLOGI sent */
1016 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1017 return NULL;
1018 }
1019 ocs_assert(node->els_req_cnt, NULL);
1020 node->els_req_cnt--;
1021 /* sm: save sparams, ocs_node_attach */
1022 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1023 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1024 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1025 rc = ocs_node_attach(node);
1026 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1027 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1028 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1029 }
1030 break;
1031
1032 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1033 /* PLOGI failed, shutdown the node */
1034 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1035 return NULL;
1036 }
1037 ocs_assert(node->els_req_cnt, NULL);
1038 node->els_req_cnt--;
1039 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1040 break;
1041
1042 case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */
1043 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1044 return NULL;
1045 }
1046 ocs_assert(node->els_req_cnt, NULL);
1047 node->els_req_cnt--;
1048 break;
1049
1050 case OCS_EVT_FCP_CMD_RCVD: {
1051 /* not logged in yet and outstanding PLOGI so don't send LOGO,
1052 * just drop
1053 */
1054 node_printf(node, "FCP_CMND received, drop\n");
1055 break;
1056 }
1057
1058 default:
1059 __ocs_d_common(__func__, ctx, evt, arg);
1060 return NULL;
1061 }
1062
1063 return NULL;
1064 }
1065
1066 /**
1067 * @ingroup device_sm
1068 * @brief Device node state machine: Waiting on a response for a
1069 * sent PLOGI.
1070 *
1071 * <h3 class="desc">Description</h3>
1072 * State is entered when an initiator-capable node has sent
1073 * a PLOGI and is waiting for a response. Before receiving the
1074 * response, a PRLI was received, implying that the PLOGI was
1075 * successful.
1076 *
1077 * @param ctx Remote node state machine context.
1078 * @param evt Event to process.
1079 * @param arg Per event optional argument.
1080 *
1081 * @return Returns NULL.
1082 */
1083
1084 void *
1085 __ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1086 {
1087 int32_t rc;
1088 ocs_node_cb_t *cbdata = arg;
1089 std_node_state_decl();
1090
1091 node_sm_trace();
1092
1093 switch(evt) {
1094 case OCS_EVT_ENTER:
1095 /*
1096 * Since we've received a PRLI, we have a port login and will
1097 * just need to wait for the PLOGI response to do the node
1098 * attach and then we can send the LS_ACC for the PRLI. If,
1099 * during this time, we receive FCP_CMNDs (which is possible
1100 * since we've already sent a PRLI and our peer may have accepted).
1101 * At this time, we are not waiting on any other unsolicited
1102 * frames to continue with the login process. Thus, it will not
1103 * hurt to hold frames here.
1104 */
1105 ocs_node_hold_frames(node);
1106 break;
1107
1108 case OCS_EVT_EXIT:
1109 ocs_node_accept_frames(node);
1110 break;
1111
1112 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1113 /* Completion from PLOGI sent */
1114 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1115 return NULL;
1116 }
1117 ocs_assert(node->els_req_cnt, NULL);
1118 node->els_req_cnt--;
1119 /* sm: save sparams, ocs_node_attach */
1120 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1121 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1122 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1123 rc = ocs_node_attach(node);
1124 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1125 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1126 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1127 }
1128 break;
1129
1130 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1131 case OCS_EVT_SRRS_ELS_REQ_RJT:
1132 /* PLOGI failed, shutdown the node */
1133 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1134 return NULL;
1135 }
1136 ocs_assert(node->els_req_cnt, NULL);
1137 node->els_req_cnt--;
1138 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1139 break;
1140
1141 default:
1142 __ocs_d_common(__func__, ctx, evt, arg);
1143 return NULL;
1144 }
1145
1146 return NULL;
1147 }
1148
1149 /**
1150 * @ingroup device_sm
1151 * @brief Device node state machine: Wait for a domain attach.
1152 *
1153 * <h3 class="desc">Description</h3>
1154 * Waits for a domain-attach complete ok event.
1155 *
1156 * @param ctx Remote node state machine context.
1157 * @param evt Event to process.
1158 * @param arg Per event optional argument.
1159 *
1160 * @return Returns NULL.
1161 */
1162
1163 void *
1164 __ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1165 {
1166 int32_t rc;
1167 std_node_state_decl();
1168
1169 node_sm_trace();
1170
1171 switch(evt) {
1172 case OCS_EVT_ENTER:
1173 ocs_node_hold_frames(node);
1174 break;
1175
1176 case OCS_EVT_EXIT:
1177 ocs_node_accept_frames(node);
1178 break;
1179
1180 case OCS_EVT_DOMAIN_ATTACH_OK:
1181 ocs_assert(node->sport->domain->attached, NULL);
1182 /* sm: ocs_node_attach */
1183 rc = ocs_node_attach(node);
1184 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1185 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1186 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1187 }
1188 break;
1189
1190 default:
1191 __ocs_d_common(__func__, ctx, evt, arg);
1192 return NULL;
1193 }
1194 return NULL;
1195 }
1196
1197 /**
1198 * @ingroup device_sm
1199 * @brief Device node state machine: Wait for topology
1200 * notification
1201 *
1202 * <h3 class="desc">Description</h3>
1203 * Waits for topology notification from fabric node, then
1204 * attaches domain and node.
1205 *
1206 * @param ctx Remote node state machine context.
1207 * @param evt Event to process.
1208 * @param arg Per event optional argument.
1209 *
1210 * @return Returns NULL.
1211 */
1212
1213 void *
1214 __ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1215 {
1216 int32_t rc;
1217 std_node_state_decl();
1218
1219 node_sm_trace();
1220
1221 switch(evt) {
1222 case OCS_EVT_ENTER:
1223 ocs_node_hold_frames(node);
1224 break;
1225
1226 case OCS_EVT_EXIT:
1227 ocs_node_accept_frames(node);
1228 break;
1229
1230 case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: {
1231 ocs_sport_topology_e topology = (ocs_sport_topology_e)(uintptr_t)arg;
1232 ocs_assert(!node->sport->domain->attached, NULL);
1233 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
1234 node_printf(node, "topology notification, topology=%d\n", topology);
1235
1236 /* At the time the PLOGI was received, the topology was unknown,
1237 * so we didn't know which node would perform the domain attach:
1238 * 1. The node from which the PLOGI was sent (p2p) or
1239 * 2. The node to which the FLOGI was sent (fabric).
1240 */
1241 if (topology == OCS_SPORT_TOPOLOGY_P2P) {
1242 /* if this is p2p, need to attach to the domain using the
1243 * d_id from the PLOGI received
1244 */
1245 ocs_domain_attach(node->sport->domain, node->ls_acc_did);
1246 }
1247 /* else, if this is fabric, the domain attach should be performed
1248 * by the fabric node (node sending FLOGI); just wait for attach
1249 * to complete
1250 */
1251
1252 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
1253 break;
1254 }
1255 case OCS_EVT_DOMAIN_ATTACH_OK:
1256 ocs_assert(node->sport->domain->attached, NULL);
1257 node_printf(node, "domain attach ok\n");
1258 /*sm: ocs_node_attach */
1259 rc = ocs_node_attach(node);
1260 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1261 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1262 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1263 }
1264 break;
1265
1266 default:
1267 __ocs_d_common(__func__, ctx, evt, arg);
1268 return NULL;
1269 }
1270 return NULL;
1271 }
1272
1273 /**
1274 * @ingroup device_sm
1275 * @brief Device node state machine: Wait for a node attach when found by a remote node.
1276 *
1277 * @param ctx Remote node state machine context.
1278 * @param evt Event to process.
1279 * @param arg Per event optional argument.
1280 *
1281 * @return Returns NULL.
1282 */
1283
1284 void *
1285 __ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1286 {
1287 std_node_state_decl();
1288
1289 node_sm_trace();
1290
1291 switch(evt) {
1292 case OCS_EVT_ENTER:
1293 ocs_node_hold_frames(node);
1294 break;
1295
1296 case OCS_EVT_EXIT:
1297 ocs_node_accept_frames(node);
1298 break;
1299
1300 case OCS_EVT_NODE_ATTACH_OK:
1301 node->attached = TRUE;
1302 switch (node->send_ls_acc) {
1303 case OCS_NODE_SEND_LS_ACC_PLOGI: {
1304 /* sm: send_plogi_acc is set / send PLOGI acc */
1305 /* Normal case for T, or I+T */
1306 ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL);
1307 ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL);
1308 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1309 node->ls_acc_io = NULL;
1310 break;
1311 }
1312 case OCS_NODE_SEND_LS_ACC_PRLI: {
1313 ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1314 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1315 node->ls_acc_io = NULL;
1316 break;
1317 }
1318 case OCS_NODE_SEND_LS_ACC_NONE:
1319 default:
1320 /* Normal case for I */
1321 /* sm: send_plogi_acc is not set / send PLOGI acc */
1322 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1323 break;
1324 }
1325 break;
1326
1327 case OCS_EVT_NODE_ATTACH_FAIL:
1328 /* node attach failed, shutdown the node */
1329 node->attached = FALSE;
1330 node_printf(node, "node attach failed\n");
1331 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1332 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1333 break;
1334
1335 /* Handle shutdown events */
1336 case OCS_EVT_SHUTDOWN:
1337 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1338 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1339 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1340 break;
1341 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1342 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1343 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
1344 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1345 break;
1346 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1347 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1348 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
1349 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1350 break;
1351 default:
1352 __ocs_d_common(__func__, ctx, evt, arg);
1353 return NULL;
1354 }
1355
1356 return NULL;
1357 }
1358
1359 /**
1360 * @ingroup device_sm
1361 * @brief Device node state machine: Wait for a node/domain
1362 * attach then shutdown node.
1363 *
1364 * @param ctx Remote node state machine context.
1365 * @param evt Event to process.
1366 * @param arg Per event optional argument.
1367 *
1368 * @return Returns NULL.
1369 */
1370
1371 void *
1372 __ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1373 {
1374 std_node_state_decl();
1375
1376 node_sm_trace();
1377
1378 switch(evt) {
1379 case OCS_EVT_ENTER:
1380 ocs_node_hold_frames(node);
1381 break;
1382
1383 case OCS_EVT_EXIT:
1384 ocs_node_accept_frames(node);
1385 break;
1386
1387 /* wait for any of these attach events and then shutdown */
1388 case OCS_EVT_NODE_ATTACH_OK:
1389 node->attached = TRUE;
1390 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1391 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1392 break;
1393
1394 case OCS_EVT_NODE_ATTACH_FAIL:
1395 /* node attach failed, shutdown the node */
1396 node->attached = FALSE;
1397 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1398 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1399 break;
1400
1401 /* ignore shutdown events as we're already in shutdown path */
1402 case OCS_EVT_SHUTDOWN:
1403 /* have default shutdown event take precedence */
1404 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1405 /* fall through */
1406 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1407 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1408 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1409 break;
1410
1411 default:
1412 __ocs_d_common(__func__, ctx, evt, arg);
1413 return NULL;
1414 }
1415
1416 return NULL;
1417 }
1418
1419 /**
1420 * @ingroup device_sm
1421 * @brief Device node state machine: Port is logged in.
1422 *
1423 * <h3 class="desc">Description</h3>
1424 * This state is entered when a remote port has completed port login (PLOGI).
1425 *
1426 * @param ctx Remote node state machine context.
1427 * @param evt Event to process
1428 * @param arg Per event optional argument
1429 *
1430 * @return Returns NULL.
1431 */
1432 void *
1433 __ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1434 {
1435 ocs_node_cb_t *cbdata = arg;
1436 std_node_state_decl();
1437
1438 node_sm_trace();
1439
1440 /* TODO: I+T: what if PLOGI response not yet received ? */
1441
1442 switch(evt) {
1443 case OCS_EVT_ENTER:
1444 /* Normal case for I or I+T */
1445 if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)
1446 && !node->sent_prli) {
1447 /* sm: if enable_ini / send PRLI */
1448 ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1449 node->sent_prli = TRUE;
1450 /* can now expect ELS_REQ_OK/FAIL/RJT */
1451 }
1452 break;
1453
1454 case OCS_EVT_FCP_CMD_RCVD: {
1455 /* For target functionality send PRLO and drop the CMD frame. */
1456 if (node->sport->enable_tgt) {
1457 if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
1458 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) {
1459 ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL);
1460 }
1461 }
1462 break;
1463 }
1464
1465 case OCS_EVT_PRLI_RCVD: {
1466 fc_header_t *hdr = cbdata->header->dma.virt;
1467
1468 /* Normal for T or I+T */
1469
1470 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1471 ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id));
1472 break;
1473 }
1474
1475 case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
1476 /* Normal case for I or I+T */
1477 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1478 return NULL;
1479 }
1480 ocs_assert(node->els_req_cnt, NULL);
1481 node->els_req_cnt--;
1482 /* sm: process PRLI payload */
1483 ocs_process_prli_payload(node, cbdata->els->els_rsp.virt);
1484 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1485 break;
1486 }
1487
1488 case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
1489 /* I, I+T, assume some link failure, shutdown node */
1490 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1491 return NULL;
1492 }
1493 ocs_assert(node->els_req_cnt, NULL);
1494 node->els_req_cnt--;
1495 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1496 break;
1497 }
1498
1499 case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */
1500 /* Normal for I, I+T (connected to an I) */
1501 /* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node
1502 * if it really wants to connect to us as target */
1503 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1504 return NULL;
1505 }
1506 ocs_assert(node->els_req_cnt, NULL);
1507 node->els_req_cnt--;
1508 break;
1509 }
1510
1511 case OCS_EVT_SRRS_ELS_CMPL_OK: {
1512 /* Normal T, I+T, target-server rejected the process login */
1513 /* This would be received only in the case where we sent LS_RJT for the PRLI, so
1514 * do nothing. (note: as T only we could shutdown the node)
1515 */
1516 ocs_assert(node->els_cmpl_cnt, NULL);
1517 node->els_cmpl_cnt--;
1518 break;
1519 }
1520
1521 case OCS_EVT_PLOGI_RCVD: {
1522 /* sm: save sparams, set send_plogi_acc, post implicit logout
1523 * Save plogi parameters */
1524 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1525 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1526
1527 /* Restart node attach with new service parameters, and send ACC */
1528 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1529 break;
1530 }
1531
1532 case OCS_EVT_LOGO_RCVD: {
1533 /* I, T, I+T */
1534 fc_header_t *hdr = cbdata->header->dma.virt;
1535 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1536 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1537 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1538 break;
1539 }
1540
1541 default:
1542 __ocs_d_common(__func__, ctx, evt, arg);
1543 return NULL;
1544 }
1545
1546 return NULL;
1547 }
1548
1549 /**
1550 * @ingroup device_sm
1551 * @brief Device node state machine: Wait for a LOGO accept.
1552 *
1553 * <h3 class="desc">Description</h3>
1554 * Waits for a LOGO accept completion.
1555 *
1556 * @param ctx Remote node state machine context.
1557 * @param evt Event to process
1558 * @param arg Per event optional argument
1559 *
1560 * @return Returns NULL.
1561 */
1562
1563 void *
1564 __ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1565 {
1566 std_node_state_decl();
1567
1568 node_sm_trace();
1569
1570 switch(evt) {
1571 case OCS_EVT_ENTER:
1572 ocs_node_hold_frames(node);
1573 break;
1574
1575 case OCS_EVT_EXIT:
1576 ocs_node_accept_frames(node);
1577 break;
1578
1579 case OCS_EVT_SRRS_ELS_CMPL_OK:
1580 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1581 /* sm: / post explicit logout */
1582 ocs_assert(node->els_cmpl_cnt, NULL);
1583 node->els_cmpl_cnt--;
1584 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1585 break;
1586 default:
1587 __ocs_d_common(__func__, ctx, evt, arg);
1588 return NULL;
1589 }
1590
1591 return NULL;
1592 }
1593
1594 /**
1595 * @ingroup device_sm
1596 * @brief Device node state machine: Device is ready.
1597 *
1598 * @param ctx Remote node state machine context.
1599 * @param evt Event to process.
1600 * @param arg Per event optional argument.
1601 *
1602 * @return Returns NULL.
1603 */
1604
1605 void *
1606 __ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1607 {
1608 ocs_node_cb_t *cbdata = arg;
1609 std_node_state_decl();
1610
1611 if (evt != OCS_EVT_FCP_CMD_RCVD) {
1612 node_sm_trace();
1613 }
1614
1615 switch(evt) {
1616 case OCS_EVT_ENTER:
1617 node->fcp_enabled = TRUE;
1618 if (node->init) {
1619 device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name,
1620 node->wwpn, node->wwnn);
1621 if (node->sport->enable_tgt)
1622 ocs_scsi_new_initiator(node);
1623 }
1624 if (node->targ) {
1625 device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name,
1626 node->wwpn, node->wwnn);
1627 if (node->sport->enable_ini)
1628 ocs_scsi_new_target(node);
1629 }
1630 break;
1631
1632 case OCS_EVT_EXIT:
1633 node->fcp_enabled = FALSE;
1634 break;
1635
1636 case OCS_EVT_PLOGI_RCVD: {
1637 /* sm: save sparams, set send_plogi_acc, post implicit logout
1638 * Save plogi parameters */
1639 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1640 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1641
1642 /* Restart node attach with new service parameters, and send ACC */
1643 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1644 break;
1645 }
1646
1647 case OCS_EVT_PDISC_RCVD: {
1648 fc_header_t *hdr = cbdata->header->dma.virt;
1649 ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1650 break;
1651 }
1652
1653 case OCS_EVT_PRLI_RCVD: {
1654 /* T, I+T: remote initiator is slow to get started */
1655 fc_header_t *hdr = cbdata->header->dma.virt;
1656
1657 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1658
1659 /* sm: send PRLI acc/reject */
1660 if (ocs->fc_type == node->fc_type)
1661 ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1662 else
1663 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1664 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1665 break;
1666 }
1667
1668 case OCS_EVT_PRLO_RCVD: {
1669 fc_header_t *hdr = cbdata->header->dma.virt;
1670 fc_prlo_payload_t *prlo = cbdata->payload->dma.virt;
1671
1672 /* sm: send PRLO acc/reject */
1673 if (ocs->fc_type == prlo->type)
1674 ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1675 else
1676 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1677 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1678 /*TODO: need implicit logout */
1679 break;
1680 }
1681
1682 case OCS_EVT_LOGO_RCVD: {
1683 fc_header_t *hdr = cbdata->header->dma.virt;
1684 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1685 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1686 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1687 break;
1688 }
1689
1690 case OCS_EVT_ADISC_RCVD: {
1691 fc_header_t *hdr = cbdata->header->dma.virt;
1692 ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1693 break;
1694 }
1695
1696 case OCS_EVT_RRQ_RCVD: {
1697 fc_header_t *hdr = cbdata->header->dma.virt;
1698 /* Send LS_ACC */
1699 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1700 break;
1701 }
1702
1703 case OCS_EVT_ABTS_RCVD:
1704 ocs_process_abts(cbdata->io, cbdata->header->dma.virt);
1705 break;
1706
1707 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1708 break;
1709
1710 case OCS_EVT_NODE_REFOUND:
1711 break;
1712
1713 case OCS_EVT_NODE_MISSING:
1714 if (node->sport->enable_rscn) {
1715 ocs_node_transition(node, __ocs_d_device_gone, NULL);
1716 }
1717 break;
1718
1719 case OCS_EVT_SRRS_ELS_CMPL_OK:
1720 /* T, or I+T, PRLI accept completed ok */
1721 ocs_assert(node->els_cmpl_cnt, NULL);
1722 node->els_cmpl_cnt--;
1723 break;
1724
1725 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1726 /* T, or I+T, PRLI accept failed to complete */
1727 ocs_assert(node->els_cmpl_cnt, NULL);
1728 node->els_cmpl_cnt--;
1729 node_printf(node, "Failed to send PRLI LS_ACC\n");
1730 break;
1731
1732 default:
1733 __ocs_d_common(__func__, ctx, evt, arg);
1734 return NULL;
1735 }
1736
1737 return NULL;
1738 }
1739
1740 /**
1741 * @ingroup device_sm
1742 * @brief Device node state machine: Node is gone (absent from GID_PT).
1743 *
1744 * <h3 class="desc">Description</h3>
1745 * State entered when a node is detected as being gone (absent from GID_PT).
1746 *
1747 * @param ctx Remote node state machine context.
1748 * @param evt Event to process
1749 * @param arg Per event optional argument
1750 *
1751 * @return Returns NULL.
1752 */
1753
1754 void *
1755 __ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1756 {
1757 int32_t rc = OCS_SCSI_CALL_COMPLETE;
1758 int32_t rc_2 = OCS_SCSI_CALL_COMPLETE;
1759 ocs_node_cb_t *cbdata = arg;
1760 std_node_state_decl();
1761
1762 node_sm_trace();
1763
1764 switch(evt) {
1765 case OCS_EVT_ENTER: {
1766 const char *labels[] = {"none", "initiator", "target", "initiator+target"};
1767
1768 device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name,
1769 labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn);
1770
1771 switch(ocs_node_get_enable(node)) {
1772 case OCS_NODE_ENABLE_T_TO_T:
1773 case OCS_NODE_ENABLE_I_TO_T:
1774 case OCS_NODE_ENABLE_IT_TO_T:
1775 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1776 break;
1777
1778 case OCS_NODE_ENABLE_T_TO_I:
1779 case OCS_NODE_ENABLE_I_TO_I:
1780 case OCS_NODE_ENABLE_IT_TO_I:
1781 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1782 break;
1783
1784 case OCS_NODE_ENABLE_T_TO_IT:
1785 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1786 break;
1787
1788 case OCS_NODE_ENABLE_I_TO_IT:
1789 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1790 break;
1791
1792 case OCS_NODE_ENABLE_IT_TO_IT:
1793 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1794 rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1795 break;
1796
1797 default:
1798 rc = OCS_SCSI_CALL_COMPLETE;
1799 break;
1800 }
1801
1802 if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) {
1803 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1804 }
1805
1806 break;
1807 }
1808 case OCS_EVT_NODE_REFOUND:
1809 /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
1810
1811 /* reauthenticate with PLOGI/PRLI */
1812 /* ocs_node_transition(node, __ocs_d_discovered, NULL); */
1813
1814 /* reauthenticate with ADISC
1815 * sm: send ADISC */
1816 ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1817 ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL);
1818 break;
1819
1820 case OCS_EVT_PLOGI_RCVD: {
1821 /* sm: save sparams, set send_plogi_acc, post implicit logout
1822 * Save plogi parameters */
1823 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1824 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1825
1826 /* Restart node attach with new service parameters, and send ACC */
1827 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1828 break;
1829 }
1830
1831 case OCS_EVT_FCP_CMD_RCVD: {
1832 /* most likely a stale frame (received prior to link down), if attempt
1833 * to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND
1834 */
1835 node_printf(node, "FCP_CMND received, drop\n");
1836 break;
1837 }
1838 case OCS_EVT_LOGO_RCVD: {
1839 /* I, T, I+T */
1840 fc_header_t *hdr = cbdata->header->dma.virt;
1841 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1842 /* sm: send LOGO acc */
1843 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1844 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1845 break;
1846 }
1847 default:
1848 __ocs_d_common(__func__, ctx, evt, arg);
1849 return NULL;
1850 }
1851
1852 return NULL;
1853 }
1854
1855 /**
1856 * @ingroup device_sm
1857 * @brief Device node state machine: Wait for the ADISC response.
1858 *
1859 * <h3 class="desc">Description</h3>
1860 * Waits for the ADISC response from the remote node.
1861 *
1862 * @param ctx Remote node state machine context.
1863 * @param evt Event to process.
1864 * @param arg Per event optional argument.
1865 *
1866 * @return Returns NULL.
1867 */
1868
1869 void *
1870 __ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1871 {
1872 ocs_node_cb_t *cbdata = arg;
1873 std_node_state_decl();
1874
1875 node_sm_trace();
1876
1877 switch(evt) {
1878 case OCS_EVT_SRRS_ELS_REQ_OK:
1879 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1880 return NULL;
1881 }
1882 ocs_assert(node->els_req_cnt, NULL);
1883 node->els_req_cnt--;
1884 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1885 break;
1886
1887 case OCS_EVT_SRRS_ELS_REQ_RJT:
1888 /* received an LS_RJT, in this case, send shutdown (explicit logo)
1889 * event which will unregister the node, and start over with PLOGI
1890 */
1891 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1892 return NULL;
1893 }
1894 ocs_assert(node->els_req_cnt, NULL);
1895 node->els_req_cnt--;
1896 /*sm: post explicit logout */
1897 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1898 break;
1899
1900 case OCS_EVT_LOGO_RCVD: {
1901 /* In this case, we have the equivalent of an LS_RJT for the ADISC,
1902 * so we need to abort the ADISC, and re-login with PLOGI
1903 */
1904 /*sm: request abort, send LOGO acc */
1905 fc_header_t *hdr = cbdata->header->dma.virt;
1906 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1907 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1908 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1909 break;
1910 }
1911 default:
1912 __ocs_d_common(__func__, ctx, evt, arg);
1913 return NULL;
1914 }
1915
1916 return NULL;
1917 }
Cache object: 5bfc59dee4bc32ff16c056c169d16a0b
|