The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ocs_fc/ocs_fabric.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2017 Broadcom. All rights reserved.
    3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions are met:
    7  *
    8  * 1. Redistributions of source code must retain the above copyright notice,
    9  *    this list of conditions and the following disclaimer.
   10  *
   11  * 2. Redistributions in binary form must reproduce the above copyright notice,
   12  *    this list of conditions and the following disclaimer in the documentation
   13  *    and/or other materials provided with the distribution.
   14  *
   15  * 3. Neither the name of the copyright holder nor the names of its contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
   23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  *
   31  * $FreeBSD$
   32  */
   33 
   34 /**
   35  * @file
   36  *
   37  * This file implements remote node state machines for:
   38  * - Fabric logins.
   39  * - Fabric controller events.
   40  * - Name/directory services interaction.
   41  * - Point-to-point logins.
   42  */
   43 
   44 /*!
   45 @defgroup fabric_sm Node State Machine: Fabric States
   46 @defgroup ns_sm Node State Machine: Name/Directory Services States
   47 @defgroup p2p_sm Node State Machine: Point-to-Point Node States
   48 */
   49 
   50 #include "ocs.h"
   51 #include "ocs_fabric.h"
   52 #include "ocs_els.h"
   53 #include "ocs_device.h"
   54 
   55 static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
   56 static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
   57 static int32_t ocs_start_ns_node(ocs_sport_t *sport);
   58 static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
   59 static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
   60 static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
   61 static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
   62 static void gidpt_delay_timer_cb(void *arg);
   63 
   64 /**
   65  * @ingroup fabric_sm
   66  * @brief Fabric node state machine: Initial state.
   67  *
   68  * @par Description
   69  * Send an FLOGI to a well-known fabric.
   70  *
   71  * @param ctx Remote node sm context.
   72  * @param evt Event to process.
   73  * @param arg Per event optional argument.
   74  *
   75  * @return Returns NULL.
   76  */
   77 void *
   78 __ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
   79 {
   80         std_node_state_decl();
   81 
   82         node_sm_trace();
   83 
   84         switch(evt) {
   85         case OCS_EVT_REENTER:   /* not sure why we're getting these ... */
   86                 ocs_log_debug(node->ocs, ">>> reenter !!\n");
   87                 /* fall through */
   88         case OCS_EVT_ENTER:
   89                 /* sm: / send FLOGI */
   90                 ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
   91                 ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
   92                 break;
   93 
   94         default:
   95                 __ocs_fabric_common(__func__, ctx, evt, arg);
   96                 break;
   97         }
   98 
   99         return NULL;
  100 }
  101 
  102 /**
  103  * @ingroup fabric_sm
  104  * @brief Set sport topology.
  105  *
  106  * @par Description
  107  * Set sport topology.
  108  *
  109  * @param node Pointer to the node for which the topology is set.
  110  * @param topology Topology to set.
  111  *
  112  * @return Returns NULL.
  113  */
  114 void
  115 ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
  116 {
  117         node->sport->topology = topology;
  118 }
  119 
  120 /**
  121  * @ingroup fabric_sm
  122  * @brief Notify sport topology.
  123  * @par Description
  124  * notify sport topology.
  125  * @param node Pointer to the node for which the topology is set.
  126  * @return Returns NULL.
  127  */
  128 void
  129 ocs_fabric_notify_topology(ocs_node_t *node)
  130 {
  131         ocs_node_t *tmp_node;
  132         ocs_node_t *next;
  133         ocs_sport_topology_e topology = node->sport->topology;
  134 
  135         /* now loop through the nodes in the sport and send topology notification */
  136         ocs_sport_lock(node->sport);
  137         ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
  138                 if (tmp_node != node) {
  139                         ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
  140                 }
  141         }
  142         ocs_sport_unlock(node->sport);
  143 }
  144 
  145 /**
  146  * @ingroup fabric_sm
  147  * @brief Fabric node state machine: Wait for an FLOGI response.
  148  *
  149  * @par Description
  150  * Wait for an FLOGI response event.
  151  *
  152  * @param ctx Remote node state machine context.
  153  * @param evt Event to process.
  154  * @param arg Per event optional argument.
  155  *
  156  * @return Returns NULL.
  157  */
  158 
  159 void *
  160 __ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  161 {
  162         ocs_node_cb_t *cbdata = arg;
  163         std_node_state_decl();
  164 
  165         node_sm_trace();
  166 
  167         switch(evt) {
  168         case OCS_EVT_SRRS_ELS_REQ_OK: {
  169                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
  170                         return NULL;
  171                 }
  172                 ocs_assert(node->els_req_cnt, NULL);
  173                 node->els_req_cnt--;
  174 
  175                 ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
  176 
  177                 ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
  178                         ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
  179 
  180                 /* Check to see if the fabric is an F_PORT or and N_PORT */
  181                 if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
  182                         /* sm: if nport and p2p_winner / ocs_domain_attach */
  183                         ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
  184                         if (ocs_p2p_setup(node->sport)) {
  185                                 node_printf(node, "p2p setup failed, shutting down node\n");
  186                                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
  187                                 ocs_fabric_initiate_shutdown(node);
  188                         } else {
  189                                 if (node->sport->p2p_winner) {
  190                                         ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
  191                                         if (!node->sport->domain->attached) {
  192                                                 node_printf(node, "p2p winner, domain not attached\n");
  193                                                 ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
  194                                         } else {
  195                                                 /* already attached, just send ATTACH_OK */
  196                                                 node_printf(node, "p2p winner, domain already attached\n");
  197                                                 ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
  198                                         }
  199                                 } else {
  200                                         /* peer is p2p winner; PLOGI will be received on the
  201                                          * remote SID=1 node; this node has served its purpose
  202                                          */
  203                                         node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
  204                                         ocs_fabric_initiate_shutdown(node);
  205                                 }
  206                         }
  207                 } else {
  208                         /* sm: if not nport / ocs_domain_attach */
  209                         /* ext_status has the fc_id, attach domain */
  210                         ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
  211                         ocs_fabric_notify_topology(node);
  212                         ocs_assert(!node->sport->domain->attached, NULL);
  213                         ocs_domain_attach(node->sport->domain, cbdata->ext_status);
  214                         ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
  215                 }
  216 
  217                 break;
  218         }
  219 
  220         case OCS_EVT_ELS_REQ_ABORTED:
  221         case OCS_EVT_SRRS_ELS_REQ_RJT:
  222         case OCS_EVT_SRRS_ELS_REQ_FAIL: {
  223                 ocs_sport_t *sport = node->sport;
  224                 /*
  225                  * with these errors, we have no recovery, so shutdown the sport, leave the link
  226                  * up and the domain ready
  227                  */
  228                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
  229                         return NULL;
  230                 }
  231                 node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
  232                         sport->display_name);
  233                 ocs_assert(node->els_req_cnt, NULL);
  234                 node->els_req_cnt--;
  235                 ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
  236                 break;
  237         }
  238 
  239         default:
  240                 __ocs_fabric_common(__func__, ctx, evt, arg);
  241                 break;
  242         }
  243 
  244         return NULL;
  245 }
  246 
  247 /**
  248  * @ingroup fabric_sm
  249  * @brief Fabric node state machine: Initial state for a virtual port.
  250  *
  251  * @par Description
  252  * State entered when a virtual port is created. Send FDISC.
  253  *
  254  * @param ctx Remote node state machine context.
  255  * @param evt Event to process.
  256  * @param arg Per event optional argument.
  257  *
  258  * @return Returns NULL.
  259  */
  260 void *
  261 __ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  262 {
  263         std_node_state_decl();
  264 
  265         node_sm_trace();
  266 
  267         switch(evt) {
  268         case OCS_EVT_ENTER:
  269                 /* sm: send FDISC */
  270                 ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  271                 ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
  272                 break;
  273 
  274         default:
  275                 __ocs_fabric_common(__func__, ctx, evt, arg);
  276                 break;
  277         }
  278 
  279         return NULL;
  280 }
  281 
  282 /**
  283  * @ingroup fabric_sm
  284  * @brief Fabric node state machine: Wait for an FDISC response
  285  *
  286  * @par Description
  287  * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
  288  *
  289  * @param ctx Remote node state machine context.
  290  * @param evt Event to process.
  291  * @param arg Per event optional argument.
  292  *
  293  * @return Returns NULL.
  294  */
  295 void *
  296 __ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  297 {
  298         ocs_node_cb_t *cbdata = arg;
  299         std_node_state_decl();
  300 
  301         node_sm_trace();
  302 
  303         switch(evt) {
  304         case OCS_EVT_SRRS_ELS_REQ_OK: {
  305                 /* fc_id is in ext_status */
  306                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
  307                         return NULL;
  308                 }
  309 
  310                 ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
  311                         ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
  312 
  313                 ocs_assert(node->els_req_cnt, NULL);
  314                 node->els_req_cnt--;
  315                 /* sm: ocs_sport_attach */
  316                 ocs_sport_attach(node->sport, cbdata->ext_status);
  317                 ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
  318                 break;
  319         }
  320 
  321         case OCS_EVT_SRRS_ELS_REQ_RJT:
  322         case OCS_EVT_SRRS_ELS_REQ_FAIL: {
  323                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
  324                         return NULL;
  325                 }
  326                 ocs_assert(node->els_req_cnt, NULL);
  327                 node->els_req_cnt--;
  328                 ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
  329                 /* sm: shutdown sport */
  330                 ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
  331                 break;
  332         }
  333 
  334         default:
  335                 __ocs_fabric_common(__func__, ctx, evt, arg);
  336                 break;
  337         }
  338 
  339         return NULL;
  340 }
  341 
  342 /**
  343  * @ingroup fabric_sm
  344  * @brief Fabric node state machine: Wait for a domain/sport attach event.
  345  *
  346  * @par Description
  347  * Waits for a domain/sport attach event.
  348  *
  349  * @param ctx Remote node state machine context.
  350  * @param evt Event to process.
  351  * @param arg Per event optional argument.
  352  *
  353  * @return Returns NULL.
  354  */
  355 void *
  356 __ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  357 {
  358         std_node_state_decl();
  359 
  360         node_sm_trace();
  361 
  362         switch(evt) {
  363         case OCS_EVT_ENTER:
  364                 ocs_node_hold_frames(node);
  365                 break;
  366 
  367         case OCS_EVT_EXIT:
  368                 ocs_node_accept_frames(node);
  369                 break;
  370         case OCS_EVT_DOMAIN_ATTACH_OK:
  371         case OCS_EVT_SPORT_ATTACH_OK: {
  372                 int rc;
  373 
  374                 rc = ocs_start_ns_node(node->sport);
  375                 if (rc)
  376                         return NULL;
  377 
  378                 /* sm: if enable_ini / start fabctl node
  379                  * Instantiate the fabric controller (sends SCR) */
  380                 if (node->sport->enable_rscn) {
  381                         rc = ocs_start_fabctl_node(node->sport);
  382                         if (rc)
  383                                 return NULL;
  384                 }
  385                 ocs_node_transition(node, __ocs_fabric_idle, NULL);
  386                 break;
  387         }
  388         default:
  389                 __ocs_fabric_common(__func__, ctx, evt, arg);
  390                 return NULL;
  391         }
  392 
  393         return NULL;
  394 }
  395 
  396 /**
  397  * @ingroup fabric_sm
  398  * @brief Fabric node state machine: Fabric node is idle.
  399  *
  400  * @par Description
  401  * Wait for fabric node events.
  402  *
  403  * @param ctx Remote node state machine context.
  404  * @param evt Event to process.
  405  * @param arg Per event optional argument.
  406  *
  407  * @return Returns NULL.
  408  */
  409 void *
  410 __ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  411 {
  412         std_node_state_decl();
  413 
  414         node_sm_trace();
  415 
  416         switch(evt) {
  417         case OCS_EVT_DOMAIN_ATTACH_OK:
  418                 break;
  419         default:
  420                 __ocs_fabric_common(__func__, ctx, evt, arg);
  421                 return NULL;
  422         }
  423 
  424         return NULL;
  425 }
  426 
  427 /**
  428  * @ingroup ns_sm
  429  * @brief Name services node state machine: Initialize.
  430  *
  431  * @par Description
  432  * A PLOGI is sent to the well-known name/directory services node.
  433  *
  434  * @param ctx Remote node state machine context.
  435  * @param evt Event to process.
  436  * @param arg Per event optional argument.
  437  *
  438  * @return Returns NULL.
  439  */
  440 void *
  441 __ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  442 {
  443         std_node_state_decl();
  444 
  445         node_sm_trace();
  446 
  447         switch(evt) {
  448         case OCS_EVT_ENTER:
  449                 /* sm: send PLOGI */
  450                 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  451                 ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
  452                 break;
  453         default:
  454                 __ocs_fabric_common(__func__, ctx, evt, arg);
  455                 break;
  456         }
  457 
  458         return NULL;
  459 }
  460 
  461 /**
  462  * @ingroup ns_sm
  463  * @brief Name services node state machine: Wait for a PLOGI response.
  464  *
  465  * @par Description
  466  * Waits for a response from PLOGI to name services node, then issues a
  467  * node attach request to the HW.
  468  *
  469  * @param ctx Remote node state machine context.
  470  * @param evt Event to process.
  471  * @param arg Per event optional argument.
  472  *
  473  * @return Returns NULL.
  474  */
  475 void *
  476 __ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  477 {
  478         int32_t rc;
  479         ocs_node_cb_t *cbdata = arg;
  480         std_node_state_decl();
  481 
  482         node_sm_trace();
  483 
  484         switch(evt) {
  485         case OCS_EVT_SRRS_ELS_REQ_OK: {
  486                 /* Save service parameters */
  487                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
  488                         return NULL;
  489                 }
  490                 ocs_assert(node->els_req_cnt, NULL);
  491                 node->els_req_cnt--;
  492                 /* sm: save sparams, ocs_node_attach */
  493                 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
  494                 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
  495                         ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
  496                 rc = ocs_node_attach(node);
  497                 ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
  498                 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
  499                         ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
  500                 }
  501                 break;
  502         }
  503         default:
  504                 __ocs_fabric_common(__func__, ctx, evt, arg);
  505                 return NULL;
  506         }
  507 
  508         return NULL;
  509 }
  510 
  511 /**
  512  * @ingroup ns_sm
  513  * @brief Name services node state machine: Wait for a node attach completion.
  514  *
  515  * @par Description
  516  * Waits for a node attach completion, then issues an RFTID name services
  517  * request.
  518  *
  519  * @param ctx Remote node state machine context.
  520  * @param evt Event to process.
  521  * @param arg Per event optional argument.
  522  *
  523  * @return Returns NULL.
  524  */
  525 void *
  526 __ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  527 {
  528         std_node_state_decl();
  529 
  530         node_sm_trace();
  531 
  532         switch(evt) {
  533         case OCS_EVT_ENTER:
  534                 ocs_node_hold_frames(node);
  535                 break;
  536 
  537         case OCS_EVT_EXIT:
  538                 ocs_node_accept_frames(node);
  539                 break;
  540 
  541         case OCS_EVT_NODE_ATTACH_OK:
  542                 node->attached = TRUE;
  543                 /* sm: send RFTID */
  544                 ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
  545                                  OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  546                 ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
  547                 break;
  548 
  549         case OCS_EVT_NODE_ATTACH_FAIL:
  550                 /* node attach failed, shutdown the node */
  551                 node->attached = FALSE;
  552                 node_printf(node, "Node attach failed\n");
  553                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
  554                 ocs_fabric_initiate_shutdown(node);
  555                 break;
  556 
  557         case OCS_EVT_SHUTDOWN:
  558                 node_printf(node, "Shutdown event received\n");
  559                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
  560                 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
  561                 break;
  562 
  563         /* if receive RSCN just ignore, 
  564          * we haven't sent GID_PT yet (ACC sent by fabctl node) */
  565         case OCS_EVT_RSCN_RCVD:
  566                 break;
  567 
  568         default:
  569                 __ocs_fabric_common(__func__, ctx, evt, arg);
  570                 return NULL;
  571         }
  572 
  573         return NULL;
  574 }
  575 
  576 /**
  577  * @ingroup ns_sm
  578  * @brief Wait for a domain/sport/node attach completion, then
  579  * shutdown.
  580  *
  581  * @par Description
  582  * Waits for a domain/sport/node attach completion, then shuts
  583  * node down.
  584  *
  585  * @param ctx Remote node state machine context.
  586  * @param evt Event to process.
  587  * @param arg Per event optional argument.
  588  *
  589  * @return Returns NULL.
  590  */
  591 void *
  592 __ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  593 {
  594         std_node_state_decl();
  595 
  596         node_sm_trace();
  597 
  598         switch(evt) {
  599         case OCS_EVT_ENTER:
  600                 ocs_node_hold_frames(node);
  601                 break;
  602 
  603         case OCS_EVT_EXIT:
  604                 ocs_node_accept_frames(node);
  605                 break;
  606 
  607         /* wait for any of these attach events and then shutdown */
  608         case OCS_EVT_NODE_ATTACH_OK:
  609                 node->attached = TRUE;
  610                 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
  611                 ocs_fabric_initiate_shutdown(node);
  612                 break;
  613 
  614         case OCS_EVT_NODE_ATTACH_FAIL:
  615                 node->attached = FALSE;
  616                 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
  617                 ocs_fabric_initiate_shutdown(node);
  618                 break;
  619 
  620         /* ignore shutdown event as we're already in shutdown path */
  621         case OCS_EVT_SHUTDOWN:
  622                 node_printf(node, "Shutdown event received\n");
  623                 break;
  624 
  625         default:
  626                 __ocs_fabric_common(__func__, ctx, evt, arg);
  627                 return NULL;
  628         }
  629 
  630         return NULL;
  631 }
  632 
  633 /**
  634  * @ingroup ns_sm
  635  * @brief Name services node state machine: Wait for an RFTID response event.
  636  *
  637  * @par Description
  638  * Waits for an RFTID response event; if configured for an initiator operation,
  639  * a GIDPT name services request is issued.
  640  *
  641  * @param ctx Remote node state machine context.
  642  * @param evt Event to process.
  643  * @param arg Per event optional argument.
  644  *
  645  * @return Returns NULL.
  646  */
  647 void *
  648 __ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  649 {
  650         std_node_state_decl();
  651 
  652         node_sm_trace();
  653 
  654         switch(evt) {
  655         case OCS_EVT_SRRS_ELS_REQ_OK:
  656                 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
  657                         return NULL;
  658                 }
  659                 ocs_assert(node->els_req_cnt, NULL);
  660                 node->els_req_cnt--;
  661                 /*sm: send RFFID */
  662                 ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
  663                                 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  664                 ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
  665                 break;
  666 
  667         /* if receive RSCN just ignore,
  668          * we haven't sent GID_PT yet (ACC sent by fabctl node) */
  669         case OCS_EVT_RSCN_RCVD:
  670                 break;
  671 
  672         default:
  673                 __ocs_fabric_common(__func__, ctx, evt, arg);
  674                 return NULL;
  675         }
  676 
  677         return NULL;
  678 }
  679 
  680 /**
  681  * @ingroup ns_sm
  682  * @brief Fabric node state machine: Wait for RFFID response event.
  683  *
  684  * @par Description
  685  * Waits for an RFFID response event; if configured for an initiator operation,
  686  * a GIDPT name services request is issued.
  687  *
  688  * @param ctx Remote node state machine context.
  689  * @param evt Event to process.
  690  * @param arg Per event optional argument.
  691  *
  692  * @return Returns NULL.
  693  */
  694 void *
  695 __ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  696 {
  697         std_node_state_decl();
  698 
  699         node_sm_trace();
  700 
  701         switch(evt) {
  702         case OCS_EVT_SRRS_ELS_REQ_OK:   {
  703                 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
  704                         return NULL;
  705                 }
  706                 ocs_assert(node->els_req_cnt, NULL);
  707                 node->els_req_cnt--;
  708                 if (node->sport->enable_rscn) {
  709                         /* sm: if enable_rscn / send GIDPT */
  710                         ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
  711                                         OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  712                         ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
  713                 } else {
  714                         /* if 'T' only, we're done, go to idle */
  715                         ocs_node_transition(node, __ocs_ns_idle, NULL);
  716                 }
  717                 break;
  718         }
  719         /* if receive RSCN just ignore, 
  720          * we haven't sent GID_PT yet (ACC sent by fabctl node) */
  721         case OCS_EVT_RSCN_RCVD:
  722                 break;
  723 
  724         default:
  725                 __ocs_fabric_common(__func__, ctx, evt, arg);
  726                 return NULL;
  727         }
  728 
  729         return NULL;
  730 }
  731 
  732 /**
  733  * @ingroup ns_sm
  734  * @brief Name services node state machine: Wait for a GIDPT response.
  735  *
  736  * @par Description
  737  * Wait for a GIDPT response from the name server. Process the FC_IDs that are
  738  * reported by creating new remote ports, as needed.
  739  *
  740  * @param ctx Remote node state machine context.
  741  * @param evt Event to process.
  742  * @param arg Per event optional argument.
  743  *
  744  * @return Returns NULL.
  745  */
  746 void *
  747 __ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  748 {
  749         ocs_node_cb_t *cbdata = arg;
  750         std_node_state_decl();
  751 
  752         node_sm_trace();
  753 
  754         switch(evt) {
  755         case OCS_EVT_SRRS_ELS_REQ_OK:   {
  756                 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
  757                         return NULL;
  758                 }
  759                 ocs_assert(node->els_req_cnt, NULL);
  760                 node->els_req_cnt--;
  761                 /* sm: / process GIDPT payload */
  762                 ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
  763                 /* TODO: should we logout at this point or just go idle */
  764                 ocs_node_transition(node, __ocs_ns_idle, NULL);
  765                 break;
  766         }
  767 
  768         case OCS_EVT_SRRS_ELS_REQ_FAIL: {
  769                 /* not much we can do; will retry with the next RSCN */
  770                 node_printf(node, "GID_PT failed to complete\n");
  771                 ocs_assert(node->els_req_cnt, NULL);
  772                 node->els_req_cnt--;
  773                 ocs_node_transition(node, __ocs_ns_idle, NULL);
  774                 break;
  775         }
  776 
  777         /* if receive RSCN here, queue up another discovery processing */
  778         case OCS_EVT_RSCN_RCVD: {
  779                 node_printf(node, "RSCN received during GID_PT processing\n");
  780                 node->rscn_pending = 1;
  781                 break;
  782         }
  783 
  784         default:
  785                 __ocs_fabric_common(__func__, ctx, evt, arg);
  786                 return NULL;
  787         }
  788 
  789         return NULL;
  790 }
  791 
  792 /**
  793  * @ingroup ns_sm
  794  * @brief Name services node state machine: Idle state.
  795  *
  796  * @par Description
  797  * Idle. Waiting for RSCN received events (posted from the fabric controller), and
  798  * restarts the GIDPT name services query and processing.
  799  *
  800  * @param ctx Remote node state machine context.
  801  * @param evt Event to process.
  802  * @param arg Per event optional argument.
  803  *
  804  * @return Returns NULL.
  805  */
  806 void *
  807 __ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  808 {
  809         std_node_state_decl();
  810 
  811         node_sm_trace();
  812 
  813         switch(evt) {
  814         case OCS_EVT_ENTER:
  815                 if (!node->rscn_pending) {
  816                         break;
  817                 }
  818                 node_printf(node, "RSCN pending, restart discovery\n");
  819                 node->rscn_pending = 0;
  820 
  821                         /* fall through */
  822 
  823         case OCS_EVT_RSCN_RCVD: {
  824                 /* sm: / send GIDPT
  825                  * If target RSCN processing is enabled, and this is target only
  826                  * (not initiator), and tgt_rscn_delay is non-zero,
  827                  * then we delay issuing the GID_PT
  828                  */
  829                 if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
  830                         enable_target_rscn(ocs)) {
  831                         ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
  832                 } else {
  833                         ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
  834                                         OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  835                         ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
  836                 }
  837                 break;
  838         }
  839 
  840         default:
  841                 __ocs_fabric_common(__func__, ctx, evt, arg);
  842                 break;
  843         }
  844 
  845         return NULL;
  846 }
  847 
  848 /**
  849  * @brief Handle GIDPT delay timer callback
  850  *
  851  * @par Description
  852  * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
  853  *
  854  * @param arg Pointer to node.
  855  *
  856  * @return None.
  857  */
  858 static void
  859 gidpt_delay_timer_cb(void *arg)
  860 {
  861         ocs_node_t *node = arg;
  862         int32_t rc;
  863 
  864         ocs_del_timer(&node->gidpt_delay_timer);
  865         rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
  866         if (rc) {
  867                 ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
  868         }
  869 }
  870 
  871 /**
  872  * @ingroup ns_sm
  873  * @brief Name services node state machine: Delayed GIDPT.
  874  *
  875  * @par Description
  876  * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
  877  *
  878  * @param ctx Remote node state machine context.
  879  * @param evt Event to process.
  880  * @param arg Per event optional argument.
  881  *
  882  * @return Returns NULL.
  883  */
  884 void *
  885 __ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  886 {
  887         std_node_state_decl();
  888 
  889         node_sm_trace();
  890 
  891         switch(evt) {
  892         case OCS_EVT_ENTER: {
  893                 time_t delay_msec;
  894 
  895                 ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
  896 
  897                 /*
  898                  * Compute the delay time.   Set to tgt_rscn_delay, if the time since last GIDPT
  899                  * is less than tgt_rscn_period, then use tgt_rscn_period.
  900                  */
  901                 delay_msec = ocs->tgt_rscn_delay_msec;
  902                 if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
  903                         delay_msec = ocs->tgt_rscn_period_msec;
  904                 }
  905 
  906                 ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
  907 
  908                 break;
  909         }
  910 
  911         case OCS_EVT_GIDPT_DELAY_EXPIRED:
  912                 node->time_last_gidpt_msec = ocs_msectime();
  913                 ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
  914                                 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  915                 ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
  916                 break;
  917 
  918         case OCS_EVT_RSCN_RCVD: {
  919                 ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
  920                 break;
  921         }
  922 
  923         default:
  924                 __ocs_fabric_common(__func__, ctx, evt, arg);
  925                 break;
  926         }
  927 
  928         return NULL;
  929 }
  930 
  931 /**
  932  * @ingroup fabric_sm
  933  * @brief Fabric controller node state machine: Initial state.
  934  *
  935  * @par Description
  936  * Issue a PLOGI to a well-known fabric controller address.
  937  *
  938  * @param ctx Remote node state machine context.
  939  * @param evt Event to process.
  940  * @param arg Per event optional argument.
  941  *
  942  * @return Returns NULL.
  943  */
  944 void *
  945 __ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  946 {
  947         ocs_node_t *node = ctx->app;
  948 
  949         node_sm_trace();
  950 
  951         switch(evt) {
  952         case OCS_EVT_ENTER:
  953                 /* no need to login to fabric controller, just send SCR */
  954                 ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
  955                 ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
  956                 break;
  957 
  958         case OCS_EVT_NODE_ATTACH_OK:
  959                 node->attached = TRUE;
  960                 break;
  961 
  962         default:
  963                 __ocs_fabric_common(__func__, ctx, evt, arg);
  964                 return NULL;
  965         }
  966 
  967         return NULL;
  968 }
  969 
  970 /**
  971  * @ingroup fabric_sm
  972  * @brief Fabric controller node state machine: Wait for a node attach request
  973  * to complete.
  974  *
  975  * @par Description
  976  * Wait for a node attach to complete. If successful, issue an SCR
  977  * to the fabric controller, subscribing to all RSCN.
  978  *
  979  * @param ctx Remote node state machine context.
  980  * @param evt Event to process.
  981  * @param arg Per event optional argument.
  982  *
  983  * @return Returns NULL.
  984  *
  985  */
  986 void *
  987 __ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
  988 {
  989         std_node_state_decl();
  990 
  991         node_sm_trace();
  992 
  993         switch(evt) {
  994         case OCS_EVT_ENTER:
  995                 ocs_node_hold_frames(node);
  996                 break;
  997 
  998         case OCS_EVT_EXIT:
  999                 ocs_node_accept_frames(node);
 1000                 break;
 1001 
 1002         case OCS_EVT_NODE_ATTACH_OK:
 1003                 node->attached = TRUE;
 1004                 /* sm: / send SCR */
 1005                 ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
 1006                 ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
 1007                 break;
 1008 
 1009         case OCS_EVT_NODE_ATTACH_FAIL:
 1010                 /* node attach failed, shutdown the node */
 1011                 node->attached = FALSE;
 1012                 node_printf(node, "Node attach failed\n");
 1013                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1014                 ocs_fabric_initiate_shutdown(node);
 1015                 break;
 1016 
 1017         case OCS_EVT_SHUTDOWN:
 1018                 node_printf(node, "Shutdown event received\n");
 1019                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1020                 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
 1021                 break;
 1022 
 1023         default:
 1024                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1025                 return NULL;
 1026         }
 1027 
 1028         return NULL;
 1029 }
 1030 
 1031 /**
 1032  * @ingroup fabric_sm
 1033  * @brief Fabric controller node state machine: Wait for an SCR response from the
 1034  * fabric controller.
 1035  *
 1036  * @par Description
 1037  * Waits for an SCR response from the fabric controller.
 1038  *
 1039  * @param ctx Remote node state machine context.
 1040  * @param evt Event to process.
 1041  * @param arg Per event optional argument.
 1042  *
 1043  * @return Returns NULL.
 1044  */
 1045 void *
 1046 __ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1047 {
 1048         std_node_state_decl();
 1049 
 1050         node_sm_trace();
 1051 
 1052         switch(evt) {
 1053         case OCS_EVT_SRRS_ELS_REQ_OK:
 1054                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
 1055                         return NULL;
 1056                 }
 1057                 ocs_assert(node->els_req_cnt, NULL);
 1058                 node->els_req_cnt--;
 1059                 ocs_node_transition(node, __ocs_fabctl_ready, NULL);
 1060                 break;
 1061 
 1062         default:
 1063                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1064                 return NULL;
 1065         }
 1066 
 1067         return NULL;
 1068 }
 1069 
 1070 /**
 1071  * @ingroup fabric_sm
 1072  * @brief Fabric controller node state machine: Ready.
 1073  *
 1074  * @par Description
 1075  * In this state, the fabric controller sends a RSCN, which is received
 1076  * by this node and is forwarded to the name services node object; and
 1077  * the RSCN LS_ACC is sent.
 1078  *
 1079  * @param ctx Remote node state machine context.
 1080  * @param evt Event to process.
 1081  * @param arg Per event optional argument.
 1082  *
 1083  * @return Returns NULL.
 1084  */
 1085 
 1086 void *
 1087 __ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1088 {
 1089         ocs_node_cb_t *cbdata = arg;
 1090         std_node_state_decl();
 1091 
 1092         node_sm_trace();
 1093 
 1094         switch(evt) {
 1095         case OCS_EVT_RSCN_RCVD: {
 1096                 fc_header_t *hdr = cbdata->header->dma.virt;
 1097 
 1098                 /* sm: / process RSCN (forward to name services node),
 1099                  * send LS_ACC */
 1100                 ocs_process_rscn(node, cbdata);
 1101                 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
 1102                 ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
 1103                 break;
 1104         }
 1105 
 1106         default:
 1107                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1108                 return NULL;
 1109         }
 1110 
 1111         return NULL;
 1112 }
 1113 
 1114 /**
 1115  * @ingroup fabric_sm
 1116  * @brief Fabric controller node state machine: Wait for LS_ACC.
 1117  *
 1118  * @par Description
 1119  * Waits for the LS_ACC from the fabric controller.
 1120  *
 1121  * @param ctx Remote node state machine context.
 1122  * @param evt Event to process.
 1123  * @param arg Per event optional argument.
 1124  *
 1125  * @return Returns NULL.
 1126  */
 1127 
 1128 void *
 1129 __ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1130 {
 1131         std_node_state_decl();
 1132 
 1133         node_sm_trace();
 1134 
 1135         switch(evt) {
 1136         case OCS_EVT_ENTER:
 1137                 ocs_node_hold_frames(node);
 1138                 break;
 1139 
 1140         case OCS_EVT_EXIT:
 1141                 ocs_node_accept_frames(node);
 1142                 break;
 1143 
 1144         case OCS_EVT_SRRS_ELS_CMPL_OK:
 1145                 ocs_assert(node->els_cmpl_cnt, NULL);
 1146                 node->els_cmpl_cnt--;
 1147                 ocs_node_transition(node, __ocs_fabctl_ready, NULL);
 1148                 break;
 1149 
 1150         default:
 1151                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1152                 return NULL;
 1153         }
 1154 
 1155         return NULL;
 1156 }
 1157 
 1158 /**
 1159  * @ingroup fabric_sm
 1160  * @brief Initiate fabric node shutdown.
 1161  *
 1162  * @param node Node for which shutdown is initiated.
 1163  *
 1164  * @return Returns None.
 1165  */
 1166 
 1167 static void
 1168 ocs_fabric_initiate_shutdown(ocs_node_t *node)
 1169 {
 1170         ocs_hw_rtn_e rc;
 1171         ocs_t *ocs = node->ocs;
 1172         ocs_scsi_io_alloc_disable(node);
 1173 
 1174         if (node->attached) {
 1175                 /* issue hw node free; don't care if succeeds right away
 1176                  * or sometime later, will check node->attached later in
 1177                  * shutdown process
 1178                  */
 1179                 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
 1180                 if (node->rnode.free_group) {
 1181                         ocs_remote_node_group_free(node->node_group);
 1182                         node->node_group = NULL;
 1183                         node->rnode.free_group = FALSE;
 1184                 }
 1185                 if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
 1186                         node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
 1187                 }
 1188         }
 1189         /*
 1190          * node has either been detached or is in the process of being detached,
 1191          * call common node's initiate cleanup function
 1192          */
 1193         ocs_node_initiate_cleanup(node);
 1194 }
 1195 
 1196 /**
 1197  * @ingroup fabric_sm
 1198  * @brief Fabric node state machine: Handle the common fabric node events.
 1199  *
 1200  * @param funcname Function name text.
 1201  * @param ctx Remote node state machine context.
 1202  * @param evt Event to process.
 1203  * @param arg Per event optional argument.
 1204  *
 1205  * @return Returns NULL.
 1206  */
 1207 
 1208 static void *
 1209 __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1210 {
 1211         ocs_node_t *node = NULL;
 1212         ocs_assert(ctx, NULL);
 1213         ocs_assert(ctx->app, NULL);
 1214         node = ctx->app;
 1215 
 1216         switch(evt) {
 1217         case OCS_EVT_DOMAIN_ATTACH_OK:
 1218                 break;
 1219         case OCS_EVT_SHUTDOWN:
 1220                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1221                 ocs_fabric_initiate_shutdown(node);
 1222                 break;
 1223 
 1224         default:
 1225                 /* call default event handler common to all nodes */
 1226                 __ocs_node_common(funcname, ctx, evt, arg);
 1227                 break;
 1228         }
 1229         return NULL;
 1230 }
 1231 
 1232 /**
 1233  * @brief Return TRUE if the remote node is an NPORT.
 1234  *
 1235  * @par Description
 1236  * Examines the service parameters. Returns TRUE if the node reports itself as
 1237  * an NPORT.
 1238  *
 1239  * @param remote_sparms Remote node service parameters.
 1240  *
 1241  * @return Returns TRUE if NPORT.
 1242  */
 1243 
 1244 int32_t
 1245 ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
 1246 {
 1247         return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
 1248 }
 1249 
 1250 /**
 1251  * @brief Return the node's WWPN as an uint64_t.
 1252  *
 1253  * @par Description
 1254  * The WWPN is computed from service parameters, and returned as a uint64_t.
 1255  *
 1256  * @param sp Pointer to service parameters.
 1257  *
 1258  * @return Returns WWPN.
 1259  *
 1260  */
 1261 
 1262 static uint64_t
 1263 ocs_get_wwpn(fc_plogi_payload_t *sp)
 1264 {
 1265         return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
 1266 }
 1267 
 1268 /**
 1269  * @brief Return TRUE if the remote node is the point-to-point winner.
 1270  *
 1271  * @par Description
 1272  * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
 1273  * higher than the local node's WWPN.
 1274  *
 1275  * @param sport Pointer to the sport object.
 1276  *
 1277  * @return
 1278  * - 0, if the remote node is the loser.
 1279  * - 1, if the remote node is the winner.
 1280  * - (-1), if remote node is neither the loser nor the winner
 1281  *   (WWPNs match)
 1282  */
 1283 
 1284 static int32_t
 1285 ocs_rnode_is_winner(ocs_sport_t *sport)
 1286 {
 1287         fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
 1288         uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
 1289         uint64_t local_wwpn = sport->wwpn;
 1290         char prop_buf[32];
 1291         uint64_t wwn_bump = 0;
 1292 
 1293         if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
 1294                 wwn_bump = ocs_strtoull(prop_buf, 0, 0);
 1295         }
 1296         local_wwpn ^= wwn_bump;
 1297 
 1298         remote_wwpn = ocs_get_wwpn(remote_sparms);
 1299 
 1300         ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
 1301         ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
 1302 
 1303         if (remote_wwpn == local_wwpn) {
 1304                 ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
 1305                         (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
 1306                 return (-1);
 1307         }
 1308 
 1309         return (remote_wwpn > local_wwpn);
 1310 }
 1311 
 1312 /**
 1313  * @ingroup p2p_sm
 1314  * @brief Point-to-point state machine: Wait for the domain attach to complete.
 1315  *
 1316  * @par Description
 1317  * Once the domain attach has completed, a PLOGI is sent (if we're the
 1318  * winning point-to-point node).
 1319  *
 1320  * @param ctx Remote node state machine context.
 1321  * @param evt Event to process.
 1322  * @param arg Per event optional argument.
 1323  *
 1324  * @return Returns NULL.
 1325  */
 1326 
 1327 void *
 1328 __ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1329 {
 1330         std_node_state_decl();
 1331 
 1332         node_sm_trace();
 1333 
 1334         switch(evt) {
 1335         case OCS_EVT_ENTER:
 1336                 ocs_node_hold_frames(node);
 1337                 break;
 1338 
 1339         case OCS_EVT_EXIT:
 1340                 ocs_node_accept_frames(node);
 1341                 break;
 1342 
 1343         case OCS_EVT_DOMAIN_ATTACH_OK: {
 1344                 ocs_sport_t *sport = node->sport;
 1345                 ocs_node_t *rnode;
 1346 
 1347                 /* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
 1348                  * is the p2p winner, will use a separate node to send PLOGI to peer
 1349                  */
 1350                 ocs_assert (node->sport->p2p_winner, NULL);
 1351 
 1352                 rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
 1353                 if (rnode != NULL) {
 1354                         /* the "other" transient p2p node has already kicked off the
 1355                          * new node from which PLOGI is sent */
 1356                         node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
 1357                         ocs_assert (rnode != node, NULL);
 1358                 } else {
 1359                         /* create new node (SID=1, DID=2) from which to send PLOGI */
 1360                         rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
 1361                         if (rnode == NULL) {
 1362                                 ocs_log_err(ocs, "node alloc failed\n");
 1363                                 return NULL;
 1364                         }
 1365 
 1366                         ocs_fabric_notify_topology(node);
 1367                         /* sm: allocate p2p remote node */
 1368                         ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
 1369                 }
 1370 
 1371                 /* the transient node (SID=0 or DID=fabric) has served its purpose */
 1372                 if (node->rnode.fc_id == 0) {
 1373                         /* if this is the SID=0 node, move to the init state in case peer
 1374                          * has restarted FLOGI discovery and FLOGI is pending
 1375                          */
 1376                         /* don't send PLOGI on ocs_d_init entry */
 1377                         ocs_node_init_device(node, FALSE);
 1378                 } else {
 1379                         /* if this is the DID=fabric node (we initiated FLOGI), shut it down */
 1380                         node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1381                         ocs_fabric_initiate_shutdown(node);
 1382                 }
 1383                 break;
 1384         }
 1385 
 1386         default:
 1387                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1388                 return NULL;
 1389         }
 1390 
 1391         return NULL;
 1392 }
 1393 
 1394 /**
 1395  * @ingroup p2p_sm
 1396  * @brief Point-to-point state machine: Remote node initialization state.
 1397  *
 1398  * @par Description
 1399  * This state is entered after winning point-to-point, and the remote node
 1400  * is instantiated.
 1401  *
 1402  * @param ctx Remote node state machine context.
 1403  * @param evt Event to process.
 1404  * @param arg Per event optional argument.
 1405  *
 1406  * @return Returns NULL.
 1407  */
 1408 
 1409 void *
 1410 __ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1411 {
 1412         ocs_node_cb_t *cbdata = arg;
 1413         std_node_state_decl();
 1414 
 1415         node_sm_trace();
 1416 
 1417         switch(evt) {
 1418         case OCS_EVT_ENTER:
 1419                 /* sm: / send PLOGI */
 1420                 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
 1421                 ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
 1422                 break;
 1423 
 1424         case OCS_EVT_ABTS_RCVD:
 1425                 /* sm: send BA_ACC */
 1426                 ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
 1427                 break;
 1428 
 1429         default:
 1430                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1431                 return NULL;
 1432         }
 1433 
 1434         return NULL;
 1435 }
 1436 
 1437 /**
 1438  * @ingroup p2p_sm
 1439  * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
 1440  *
 1441  * @par Description
 1442  * Wait for the FLOGI accept completion.
 1443  *
 1444  * @param ctx Remote node state machine context.
 1445  * @param evt Event to process.
 1446  * @param arg Per event optional argument.
 1447  *
 1448  * @return Returns NULL.
 1449  */
 1450 
 1451 void *
 1452 __ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1453 {
 1454         ocs_node_cb_t *cbdata = arg;
 1455         std_node_state_decl();
 1456 
 1457         node_sm_trace();
 1458 
 1459         switch(evt) {
 1460         case OCS_EVT_ENTER:
 1461                 ocs_node_hold_frames(node);
 1462                 break;
 1463 
 1464         case OCS_EVT_EXIT:
 1465                 ocs_node_accept_frames(node);
 1466                 break;
 1467 
 1468         case OCS_EVT_SRRS_ELS_CMPL_OK:
 1469                 ocs_assert(node->els_cmpl_cnt, NULL);
 1470                 node->els_cmpl_cnt--;
 1471 
 1472                 /* sm: if p2p_winner / domain_attach */
 1473                 if (node->sport->p2p_winner) {
 1474                         ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
 1475                         if (node->sport->domain->attached &&
 1476                             !(node->sport->domain->domain_notify_pend)) {
 1477                                 node_printf(node, "Domain already attached\n");
 1478                                 ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
 1479                         }
 1480                 } else {
 1481                         /* this node has served its purpose; we'll expect a PLOGI on a separate
 1482                          * node (remote SID=0x1); return this node to init state in case peer
 1483                          * restarts discovery -- it may already have (pending frames may exist).
 1484                          */
 1485                         /* don't send PLOGI on ocs_d_init entry */
 1486                         ocs_node_init_device(node, FALSE);
 1487                 }
 1488                 break;
 1489 
 1490         case OCS_EVT_SRRS_ELS_CMPL_FAIL:
 1491                 /* LS_ACC failed, possibly due to link down; shutdown node and wait
 1492                  * for FLOGI discovery to restart */
 1493                 node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
 1494                 ocs_assert(node->els_cmpl_cnt, NULL);
 1495                 node->els_cmpl_cnt--;
 1496                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1497                 ocs_fabric_initiate_shutdown(node);
 1498                 break;
 1499 
 1500         case OCS_EVT_ABTS_RCVD: {
 1501                 /* sm: / send BA_ACC */
 1502                 ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
 1503                 break;
 1504         }
 1505 
 1506         default:
 1507                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1508                 return NULL;
 1509         }
 1510 
 1511         return NULL;
 1512 }
 1513 
 1514 /**
 1515  * @ingroup p2p_sm
 1516  * @brief Point-to-point node state machine: Wait for a PLOGI response
 1517  * as a point-to-point winner.
 1518  *
 1519  * @par Description
 1520  * Wait for a PLOGI response from the remote node as a point-to-point winner.
 1521  * Submit node attach request to the HW.
 1522  *
 1523  * @param ctx Remote node state machine context.
 1524  * @param evt Event to process.
 1525  * @param arg Per event optional argument.
 1526  *
 1527  * @return Returns NULL.
 1528  */
 1529 
 1530 void *
 1531 __ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1532 {
 1533         int32_t rc;
 1534         ocs_node_cb_t *cbdata = arg;
 1535         std_node_state_decl();
 1536 
 1537         node_sm_trace();
 1538 
 1539         switch(evt) {
 1540         case OCS_EVT_SRRS_ELS_REQ_OK: {
 1541                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
 1542                         return NULL;
 1543                 }
 1544                 ocs_assert(node->els_req_cnt, NULL);
 1545                 node->els_req_cnt--;
 1546                 /* sm: / save sparams, ocs_node_attach */
 1547                 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
 1548                 rc = ocs_node_attach(node);
 1549                 ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
 1550                 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
 1551                         ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
 1552                 }
 1553                 break;
 1554         }
 1555         case OCS_EVT_SRRS_ELS_REQ_FAIL: {
 1556                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
 1557                         return NULL;
 1558                 }
 1559                 node_printf(node, "PLOGI failed, shutting down\n");
 1560                 ocs_assert(node->els_req_cnt, NULL);
 1561                 node->els_req_cnt--;
 1562                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1563                 ocs_fabric_initiate_shutdown(node);
 1564                 break;
 1565         }
 1566 
 1567         case OCS_EVT_PLOGI_RCVD: {
 1568                 fc_header_t *hdr = cbdata->header->dma.virt;
 1569                 /* if we're in external loopback mode, just send LS_ACC */
 1570                 if (node->ocs->external_loopback) {
 1571                         ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
 1572                         break;
 1573                 } else{
 1574                         /* if this isn't external loopback, pass to default handler */
 1575                         __ocs_fabric_common(__func__, ctx, evt, arg);
 1576                 }
 1577                 break;
 1578         }
 1579         case OCS_EVT_PRLI_RCVD:
 1580                 /* I, or I+T */
 1581                 /* sent PLOGI and before completion was seen, received the
 1582                  * PRLI from the remote node (WCQEs and RCQEs come in on
 1583                  * different queues and order of processing cannot be assumed)
 1584                  * Save OXID so PRLI can be sent after the attach and continue
 1585                  * to wait for PLOGI response
 1586                  */
 1587                 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
 1588                 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
 1589                 ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
 1590                 break;
 1591         default:
 1592                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1593                 return NULL;
 1594         }
 1595 
 1596         return NULL;
 1597 }
 1598 
 1599 /**
 1600  * @ingroup p2p_sm
 1601  * @brief Point-to-point node state machine: Waiting on a response for a
 1602  *      sent PLOGI.
 1603  *
 1604  * @par Description
 1605  * State is entered when the point-to-point winner has sent
 1606  * a PLOGI and is waiting for a response. Before receiving the
 1607  * response, a PRLI was received, implying that the PLOGI was
 1608  * successful.
 1609  *
 1610  * @param ctx Remote node state machine context.
 1611  * @param evt Event to process.
 1612  * @param arg Per event optional argument.
 1613  *
 1614  * @return Returns NULL.
 1615  */
 1616 
 1617 void *
 1618 __ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1619 {
 1620         int32_t rc;
 1621         ocs_node_cb_t *cbdata = arg;
 1622         std_node_state_decl();
 1623 
 1624         node_sm_trace();
 1625 
 1626         switch(evt) {
 1627         case OCS_EVT_ENTER:
 1628                 /*
 1629                  * Since we've received a PRLI, we have a port login and will
 1630                  * just need to wait for the PLOGI response to do the node
 1631                  * attach and then we can send the LS_ACC for the PRLI. If,
 1632                  * during this time, we receive FCP_CMNDs (which is possible
 1633                  * since we've already sent a PRLI and our peer may have accepted).
 1634                  * At this time, we are not waiting on any other unsolicited
 1635                  * frames to continue with the login process. Thus, it will not
 1636                  * hurt to hold frames here.
 1637                  */
 1638                 ocs_node_hold_frames(node);
 1639                 break;
 1640 
 1641         case OCS_EVT_EXIT:
 1642                 ocs_node_accept_frames(node);
 1643                 break;
 1644 
 1645         case OCS_EVT_SRRS_ELS_REQ_OK:   /* PLOGI response received */
 1646                 /* Completion from PLOGI sent */
 1647                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
 1648                         return NULL;
 1649                 }
 1650                 ocs_assert(node->els_req_cnt, NULL);
 1651                 node->els_req_cnt--;
 1652                 /* sm: / save sparams, ocs_node_attach */
 1653                 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
 1654                 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
 1655                         ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
 1656                 rc = ocs_node_attach(node);
 1657                 ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
 1658                 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
 1659                         ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
 1660                 }
 1661                 break;
 1662 
 1663         case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
 1664         case OCS_EVT_SRRS_ELS_REQ_RJT:
 1665                 /* PLOGI failed, shutdown the node */
 1666                 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
 1667                         return NULL;
 1668                 }
 1669                 ocs_assert(node->els_req_cnt, NULL);
 1670                 node->els_req_cnt--;
 1671                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1672                 ocs_fabric_initiate_shutdown(node);
 1673                 break;
 1674 
 1675         default:
 1676                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1677                 return NULL;
 1678         }
 1679 
 1680         return NULL;
 1681 }
 1682 
 1683 /**
 1684  * @ingroup p2p_sm
 1685  * @brief Point-to-point node state machine: Wait for a point-to-point node attach
 1686  * to complete.
 1687  *
 1688  * @par Description
 1689  * Waits for the point-to-point node attach to complete.
 1690  *
 1691  * @param ctx Remote node state machine context.
 1692  * @param evt Event to process.
 1693  * @param arg Per event optional argument.
 1694  *
 1695  * @return Returns NULL.
 1696  */
 1697 
 1698 void *
 1699 __ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
 1700 {
 1701         ocs_node_cb_t *cbdata = arg;
 1702         std_node_state_decl();
 1703 
 1704         node_sm_trace();
 1705 
 1706         switch(evt) {
 1707         case OCS_EVT_ENTER:
 1708                 ocs_node_hold_frames(node);
 1709                 break;
 1710 
 1711         case OCS_EVT_EXIT:
 1712                 ocs_node_accept_frames(node);
 1713                 break;
 1714 
 1715         case OCS_EVT_NODE_ATTACH_OK:
 1716                 node->attached = TRUE;
 1717                 switch (node->send_ls_acc) {
 1718                 case OCS_NODE_SEND_LS_ACC_PRLI: {
 1719                         ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
 1720                         node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
 1721                         node->ls_acc_io = NULL;
 1722                         break;
 1723                 }
 1724                 case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
 1725                 case OCS_NODE_SEND_LS_ACC_NONE:
 1726                 default:
 1727                         /* Normal case for I */
 1728                         /* sm: send_plogi_acc is not set / send PLOGI acc */
 1729                         ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
 1730                         break;
 1731                 }
 1732                 break;
 1733 
 1734         case OCS_EVT_NODE_ATTACH_FAIL:
 1735                 /* node attach failed, shutdown the node */
 1736                 node->attached = FALSE;
 1737                 node_printf(node, "Node attach failed\n");
 1738                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1739                 ocs_fabric_initiate_shutdown(node);
 1740                 break;
 1741 
 1742         case OCS_EVT_SHUTDOWN:
 1743                 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
 1744                 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
 1745                 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
 1746                 break;
 1747         case OCS_EVT_PRLI_RCVD:
 1748                 node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
 1749                 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
 1750                 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
 1751                 break;
 1752         default:
 1753                 __ocs_fabric_common(__func__, ctx, evt, arg);
 1754                 return NULL;
 1755         }
 1756 
 1757         return NULL;
 1758 }
 1759 
 1760 /**
 1761  * @brief Start up the name services node.
 1762  *
 1763  * @par Description
 1764  * Allocates and starts up the name services node.
 1765  *
 1766  * @param sport Pointer to the sport structure.
 1767  *
 1768  * @return Returns 0 on success, or a negative error value on failure.
 1769  */
 1770 
 1771 static int32_t
 1772 ocs_start_ns_node(ocs_sport_t *sport)
 1773 {
 1774         ocs_node_t *ns;
 1775 
 1776         /* Instantiate a name services node */
 1777         ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
 1778         if (ns == NULL) {
 1779                 ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
 1780                 if (ns == NULL) {
 1781                         return -1;
 1782                 }
 1783         }
 1784         /* TODO: for found ns, should we be transitioning from here?
 1785          * breaks transition only 1. from within state machine or
 1786          * 2. if after alloc 
 1787          */
 1788         if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
 1789                 ocs_node_pause(ns, __ocs_ns_init);
 1790         } else {
 1791                 ocs_node_transition(ns, __ocs_ns_init, NULL);
 1792         }
 1793         return 0;
 1794 }
 1795 
 1796 /**
 1797  * @brief Start up the fabric controller node.
 1798  *
 1799  * @par Description
 1800  * Allocates and starts up the fabric controller node.
 1801  *
 1802  * @param sport Pointer to the sport structure.
 1803  *
 1804  * @return Returns 0 on success, or a negative error value on failure.
 1805  */
 1806 
 1807 static int32_t
 1808 ocs_start_fabctl_node(ocs_sport_t *sport)
 1809 {
 1810         ocs_node_t *fabctl;
 1811 
 1812         fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
 1813         if (fabctl == NULL) {
 1814                 fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
 1815                 if (fabctl == NULL) {
 1816                         return -1;
 1817                 }
 1818         }
 1819         /* TODO: for found ns, should we be transitioning from here?
 1820          * breaks transition only 1. from within state machine or
 1821          * 2. if after alloc
 1822          */
 1823         ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
 1824         return 0;
 1825 }
 1826 
 1827 /**
 1828  * @brief Process the GIDPT payload.
 1829  *
 1830  * @par Description
 1831  * The GIDPT payload is parsed, and new nodes are created, as needed.
 1832  *
 1833  * @param node Pointer to the node structure.
 1834  * @param gidpt Pointer to the GIDPT payload.
 1835  * @param gidpt_len Payload length
 1836  *
 1837  * @return Returns 0 on success, or a negative error value on failure.
 1838  */
 1839 
 1840 static int32_t
 1841 ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
 1842 {
 1843         uint32_t i;
 1844         uint32_t j;
 1845         ocs_node_t *newnode;
 1846         ocs_sport_t *sport = node->sport;
 1847         ocs_t *ocs = node->ocs;
 1848         uint32_t port_id;
 1849         uint32_t port_count;
 1850         ocs_node_t *n;
 1851         ocs_node_t **active_nodes;
 1852         uint32_t portlist_count;
 1853         uint16_t residual;
 1854 
 1855         residual = ocs_be16toh(gidpt->hdr.max_residual_size);
 1856 
 1857         if (residual != 0) {
 1858                 ocs_log_debug(node->ocs, "residual is %u words\n", residual);
 1859         }
 1860 
 1861         if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
 1862                 node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
 1863                         gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
 1864                 return -1;
 1865         }
 1866 
 1867         portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
 1868 
 1869         /* Count the number of nodes */
 1870         port_count = 0;
 1871         ocs_sport_lock(sport);
 1872                 ocs_list_foreach(&sport->node_list, n) {
 1873                         port_count ++;
 1874                 }
 1875 
 1876                 /* Allocate a buffer for all nodes */
 1877                 active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
 1878                 if (active_nodes == NULL) {
 1879                         node_printf(node, "ocs_malloc failed\n");
 1880                         ocs_sport_unlock(sport);
 1881                         return -1;
 1882                 }
 1883 
 1884                 /* Fill buffer with fc_id of active nodes */
 1885                 i = 0;
 1886                 ocs_list_foreach(&sport->node_list, n) {
 1887                         port_id = n->rnode.fc_id;
 1888                         switch (port_id) {
 1889                         case FC_ADDR_FABRIC:
 1890                         case FC_ADDR_CONTROLLER:
 1891                         case FC_ADDR_NAMESERVER:
 1892                                 break;
 1893                         default:
 1894                                 if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
 1895                                         active_nodes[i++] = n;
 1896                                 }
 1897                                 break;
 1898                         }
 1899                 }
 1900 
 1901                 /* update the active nodes buffer */
 1902                 for (i = 0; i < portlist_count; i ++) {
 1903                         port_id = fc_be24toh(gidpt->port_list[i].port_id);
 1904 
 1905                         for (j = 0; j < port_count; j ++) {
 1906                                 if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
 1907                                         active_nodes[j] = NULL;
 1908                                 }
 1909                         }
 1910 
 1911                         if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
 1912                                 break;
 1913                 }
 1914 
 1915                 /* Those remaining in the active_nodes[] are now gone ! */
 1916                 for (i = 0; i < port_count; i ++) {
 1917                         /* if we're an initiator and the remote node is a target, then
 1918                          * post the node missing event.   if we're target and we have enabled
 1919                          * target RSCN, then post the node missing event.
 1920                          */
 1921                         if (active_nodes[i] != NULL) {
 1922                                 if ((node->sport->enable_ini && active_nodes[i]->targ) ||
 1923                                     (node->sport->enable_tgt && enable_target_rscn(ocs))) {
 1924                                         ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
 1925                                 } else {
 1926                                         node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
 1927                                                 active_nodes[i]->rnode.fc_id);
 1928                                 }
 1929                         }
 1930                 }
 1931                 ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
 1932 
 1933                 for(i = 0; i < portlist_count; i ++) {
 1934                         uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
 1935 
 1936                         /* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
 1937 
 1938                         /* Don't create node for ourselves or the associated NPIV ports */
 1939                         if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
 1940                                 newnode = ocs_node_find(sport, port_id);
 1941                                 if (newnode) {
 1942                                         /* TODO: what if node deleted here?? */
 1943                                         if (node->sport->enable_ini && newnode->targ) {
 1944                                                 ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
 1945                                         }
 1946                                         /* original code sends ADISC, has notion of "refound" */
 1947                                 } else {
 1948                                         if (node->sport->enable_ini) {
 1949                                                 newnode = ocs_node_alloc(sport, port_id, 0, 0);
 1950                                                 if (newnode == NULL) {
 1951                                                         ocs_log_err(ocs, "ocs_node_alloc() failed\n");
 1952                                                         ocs_sport_unlock(sport);
 1953                                                         return -1;
 1954                                                 }
 1955                                                 /* send PLOGI automatically if initiator */
 1956                                                 ocs_node_init_device(newnode, TRUE);
 1957                                         }
 1958                                 }
 1959                         }
 1960 
 1961                         if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
 1962                                 break;
 1963                         }
 1964                 }
 1965         ocs_sport_unlock(sport);
 1966         return 0;
 1967 }
 1968 
 1969 /**
 1970  * @brief Set up the domain point-to-point parameters.
 1971  *
 1972  * @par Description
 1973  * The remote node service parameters are examined, and various point-to-point
 1974  * variables are set.
 1975  *
 1976  * @param sport Pointer to the sport object.
 1977  *
 1978  * @return Returns 0 on success, or a negative error value on failure.
 1979  */
 1980 
 1981 int32_t
 1982 ocs_p2p_setup(ocs_sport_t *sport)
 1983 {
 1984         ocs_t *ocs = sport->ocs;
 1985         int32_t rnode_winner;
 1986         rnode_winner = ocs_rnode_is_winner(sport);
 1987 
 1988         /* set sport flags to indicate p2p "winner" */
 1989         if (rnode_winner == 1) {
 1990                 sport->p2p_remote_port_id = 0;
 1991                 sport->p2p_port_id = 0;
 1992                 sport->p2p_winner = FALSE;
 1993         } else if (rnode_winner == 0) {
 1994                 sport->p2p_remote_port_id = 2;
 1995                 sport->p2p_port_id = 1;
 1996                 sport->p2p_winner = TRUE;
 1997         } else {
 1998                 /* no winner; only okay if external loopback enabled */
 1999                 if (sport->ocs->external_loopback) {
 2000                         /*
 2001                          * External loopback mode enabled; local sport and remote node
 2002                          * will be registered with an NPortID = 1;
 2003                          */
 2004                         ocs_log_debug(ocs, "External loopback mode enabled\n");
 2005                         sport->p2p_remote_port_id = 1;
 2006                         sport->p2p_port_id = 1;
 2007                         sport->p2p_winner = TRUE;
 2008                 } else {
 2009                         ocs_log_warn(ocs, "failed to determine p2p winner\n");
 2010                         return rnode_winner;
 2011                 }
 2012         }
 2013         return 0;
 2014 }
 2015 
 2016 /**
 2017  * @brief Process the FABCTL node RSCN.
 2018  *
 2019  * <h3 class="desc">Description</h3>
 2020  * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
 2021  *
 2022  * @param node Pointer to the node structure.
 2023  * @param cbdata Callback data to pass forward.
 2024  *
 2025  * @return None.
 2026  */
 2027 
 2028 static void
 2029 ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
 2030 {
 2031         ocs_t *ocs = node->ocs;
 2032         ocs_sport_t *sport = node->sport;
 2033         ocs_node_t *ns;
 2034 
 2035         /* Forward this event to the name-services node */
 2036         ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
 2037         if (ns != NULL)  {
 2038                 ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
 2039         } else {
 2040                 ocs_log_warn(ocs, "can't find name server node\n");
 2041         }
 2042 }

Cache object: b3e8e358be7347a31761b67647fafe48


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.