1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 /**
35 * @file
36 * Details SLI port (sport) functions.
37 */
38
39 #include "ocs.h"
40 #include "ocs_fabric.h"
41 #include "ocs_els.h"
42 #include "ocs_device.h"
43
44 static void ocs_vport_update_spec(ocs_sport_t *sport);
45 static void ocs_vport_link_down(ocs_sport_t *sport);
46
47 void ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *sport);
48 void ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *sport);
49 int ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *sport);
50 int ocs_mgmt_sport_set(char *parent, char *name, char *value, void *sport);
51 int ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
52 void *arg_out, uint32_t arg_out_length, void *sport);
53 static ocs_mgmt_functions_t sport_mgmt_functions = {
54 .get_list_handler = ocs_mgmt_sport_list,
55 .get_handler = ocs_mgmt_sport_get,
56 .get_all_handler = ocs_mgmt_sport_get_all,
57 .set_handler = ocs_mgmt_sport_set,
58 .exec_handler = ocs_mgmt_sport_exec,
59 };
60
61 /*!
62 @defgroup sport_sm SLI Port (sport) State Machine: States
63 */
64
65 /**
66 * @ingroup sport_sm
67 * @brief SLI port HW callback.
68 *
69 * @par Description
70 * This function is called in response to a HW sport event. This code resolves
71 * the reference to the sport object, and posts the corresponding event.
72 *
73 * @param arg Pointer to the OCS context.
74 * @param event HW sport event.
75 * @param data Application-specific event (pointer to the sport).
76 *
77 * @return Returns 0 on success, or a negative error value on failure.
78 */
79
80 int32_t
81 ocs_port_cb(void *arg, ocs_hw_port_event_e event, void *data)
82 {
83 ocs_t *ocs = arg;
84 ocs_sli_port_t *sport = data;
85
86 switch (event) {
87 case OCS_HW_PORT_ALLOC_OK:
88 ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_OK\n");
89 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_OK, NULL);
90 break;
91 case OCS_HW_PORT_ALLOC_FAIL:
92 ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_FAIL\n");
93 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_FAIL, NULL);
94 break;
95 case OCS_HW_PORT_ATTACH_OK:
96 ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_OK\n");
97 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_OK, NULL);
98 break;
99 case OCS_HW_PORT_ATTACH_FAIL:
100 ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_FAIL\n");
101 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_FAIL, NULL);
102 break;
103 case OCS_HW_PORT_FREE_OK:
104 ocs_log_debug(ocs, "OCS_HW_PORT_FREE_OK\n");
105 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_OK, NULL);
106 break;
107 case OCS_HW_PORT_FREE_FAIL:
108 ocs_log_debug(ocs, "OCS_HW_PORT_FREE_FAIL\n");
109 ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_FAIL, NULL);
110 break;
111 default:
112 ocs_log_test(ocs, "unknown event %#x\n", event);
113 }
114
115 return 0;
116 }
117
118 /**
119 * @ingroup sport_sm
120 * @brief Allocate a SLI port object.
121 *
122 * @par Description
123 * A sport object is allocated and associated with the domain. Various
124 * structure members are initialized.
125 *
126 * @param domain Pointer to the domain structure.
127 * @param wwpn World wide port name in host endian.
128 * @param wwnn World wide node name in host endian.
129 * @param fc_id Port ID of sport may be specified, use UINT32_MAX to fabric choose
130 * @param enable_ini Enables initiator capability on this port using a non-zero value.
131 * @param enable_tgt Enables target capability on this port using a non-zero value.
132 *
133 * @return Pointer to an ocs_sport_t object; or NULL.
134 */
135
136 ocs_sport_t *
137 ocs_sport_alloc(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id, uint8_t enable_ini, uint8_t enable_tgt)
138 {
139 ocs_sport_t *sport;
140
141 if (domain->ocs->ctrlmask & OCS_CTRLMASK_INHIBIT_INITIATOR) {
142 enable_ini = 0;
143 }
144
145 /* Return a failure if this sport has already been allocated */
146 if (wwpn != 0) {
147 sport = ocs_sport_find_wwn(domain, wwnn, wwpn);
148 if (sport != NULL) {
149 ocs_log_test(domain->ocs, "Failed: SPORT %016llx %016llx already allocated\n",
150 (unsigned long long)wwnn, (unsigned long long)wwpn);
151 return NULL;
152 }
153 }
154
155 sport = ocs_malloc(domain->ocs, sizeof(*sport), OCS_M_NOWAIT | OCS_M_ZERO);
156 if (sport) {
157 sport->ocs = domain->ocs;
158 ocs_snprintf(sport->display_name, sizeof(sport->display_name), "------");
159 sport->domain = domain;
160 sport->lookup = spv_new(domain->ocs);
161 sport->instance_index = domain->sport_instance_count++;
162 ocs_sport_lock_init(sport);
163 ocs_list_init(&sport->node_list, ocs_node_t, link);
164 sport->sm.app = sport;
165 sport->enable_ini = enable_ini;
166 sport->enable_tgt = enable_tgt;
167 sport->enable_rscn = (sport->enable_ini || (sport->enable_tgt && enable_target_rscn(sport->ocs)));
168
169 /* Copy service parameters from domain */
170 ocs_memcpy(sport->service_params, domain->service_params, sizeof(fc_plogi_payload_t));
171
172 /* Update requested fc_id */
173 sport->fc_id = fc_id;
174
175 /* Update the sport's service parameters for the new wwn's */
176 sport->wwpn = wwpn;
177 sport->wwnn = wwnn;
178 ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx" , (unsigned long long)wwnn);
179
180 /* Initialize node group list */
181 ocs_lock_init(sport->ocs, &sport->node_group_lock, "node_group_lock[%d]", sport->instance_index);
182 ocs_list_init(&sport->node_group_dir_list, ocs_node_group_dir_t, link);
183
184 /* if this is the "first" sport of the domain, then make it the "phys" sport */
185 ocs_domain_lock(domain);
186 if (ocs_list_empty(&domain->sport_list)) {
187 domain->sport = sport;
188 }
189
190 ocs_list_add_tail(&domain->sport_list, sport);
191 ocs_domain_unlock(domain);
192
193 sport->mgmt_functions = &sport_mgmt_functions;
194
195 ocs_log_debug(domain->ocs, "[%s] allocate sport\n", sport->display_name);
196 }
197 return sport;
198 }
199
200 /**
201 * @ingroup sport_sm
202 * @brief Free a SLI port object.
203 *
204 * @par Description
205 * The sport object is freed.
206 *
207 * @param sport Pointer to the SLI port object.
208 *
209 * @return None.
210 */
211
212 void
213 ocs_sport_free(ocs_sport_t *sport)
214 {
215 ocs_domain_t *domain;
216 ocs_node_group_dir_t *node_group_dir;
217 ocs_node_group_dir_t *node_group_dir_next;
218 int post_all_free = FALSE;
219
220 if (sport) {
221 domain = sport->domain;
222 ocs_log_debug(domain->ocs, "[%s] free sport\n", sport->display_name);
223 ocs_domain_lock(domain);
224 ocs_list_remove(&domain->sport_list, sport);
225 ocs_sport_lock(sport);
226 spv_del(sport->lookup);
227 sport->lookup = NULL;
228
229 ocs_lock(&domain->lookup_lock);
230 /* Remove the sport from the domain's sparse vector lookup table */
231 spv_set(domain->lookup, sport->fc_id, NULL);
232 ocs_unlock(&domain->lookup_lock);
233
234 /* if this is the physical sport, then clear it out of the domain */
235 if (sport == domain->sport) {
236 domain->sport = NULL;
237 }
238
239 /*
240 * If the domain's sport_list is empty, then post the ALL_NODES_FREE event to the domain,
241 * after the lock is released. The domain may be free'd as a result of the event.
242 */
243 if (ocs_list_empty(&domain->sport_list)) {
244 post_all_free = TRUE;
245 }
246
247 /* Free any node group directories */
248 ocs_lock(&sport->node_group_lock);
249 ocs_list_foreach_safe(&sport->node_group_dir_list, node_group_dir, node_group_dir_next) {
250 ocs_unlock(&sport->node_group_lock);
251 ocs_node_group_dir_free(node_group_dir);
252 ocs_lock(&sport->node_group_lock);
253 }
254 ocs_unlock(&sport->node_group_lock);
255 ocs_sport_unlock(sport);
256 ocs_domain_unlock(domain);
257
258 if (post_all_free) {
259 ocs_domain_post_event(domain, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
260 }
261
262 ocs_sport_lock_free(sport);
263 ocs_lock_free(&sport->node_group_lock);
264 ocs_scsi_sport_deleted(sport);
265
266 ocs_free(domain->ocs, sport, sizeof(*sport));
267
268 }
269 }
270
271 /**
272 * @ingroup sport_sm
273 * @brief Free memory resources of a SLI port object.
274 *
275 * @par Description
276 * After the sport object is freed, its child objects are freed.
277 *
278 * @param sport Pointer to the SLI port object.
279 *
280 * @return None.
281 */
282
283 void ocs_sport_force_free(ocs_sport_t *sport)
284 {
285 ocs_node_t *node;
286 ocs_node_t *next;
287
288 /* shutdown sm processing */
289 ocs_sm_disable(&sport->sm);
290
291 ocs_scsi_notify_sport_force_free(sport);
292
293 ocs_sport_lock(sport);
294 ocs_list_foreach_safe(&sport->node_list, node, next) {
295 ocs_node_force_free(node);
296 }
297 ocs_sport_unlock(sport);
298 ocs_sport_free(sport);
299 }
300
301 /**
302 * @ingroup sport_sm
303 * @brief Return a SLI port object, given an instance index.
304 *
305 * @par Description
306 * A pointer to a sport object is returned, given its instance @c index.
307 *
308 * @param domain Pointer to the domain.
309 * @param index Instance index value to find.
310 *
311 * @return Returns a pointer to the ocs_sport_t object; or NULL.
312 */
313
314 ocs_sport_t *
315 ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index)
316 {
317 ocs_sport_t *sport;
318
319 ocs_domain_lock(domain);
320 ocs_list_foreach(&domain->sport_list, sport) {
321 if (sport->instance_index == index) {
322 ocs_domain_unlock(domain);
323 return sport;
324 }
325 }
326 ocs_domain_unlock(domain);
327 return NULL;
328 }
329
330 /**
331 * @ingroup sport_sm
332 * @brief Find a SLI port object, given an FC_ID.
333 *
334 * @par Description
335 * Returns a pointer to the sport object, given an FC_ID.
336 *
337 * @param domain Pointer to the domain.
338 * @param d_id FC_ID to find.
339 *
340 * @return Returns a pointer to the ocs_sport_t; or NULL.
341 */
342
343 ocs_sport_t *
344 ocs_sport_find(ocs_domain_t *domain, uint32_t d_id)
345 {
346 ocs_sport_t *sport;
347
348 ocs_assert(domain, NULL);
349 ocs_lock(&domain->lookup_lock);
350 if (domain->lookup == NULL) {
351 ocs_log_test(domain->ocs, "assertion failed: domain->lookup is not valid\n");
352 ocs_unlock(&domain->lookup_lock);
353 return NULL;
354 }
355
356 sport = spv_get(domain->lookup, d_id);
357 ocs_unlock(&domain->lookup_lock);
358 return sport;
359 }
360
361 /**
362 * @ingroup sport_sm
363 * @brief Find a SLI port, given the WWNN and WWPN.
364 *
365 * @par Description
366 * Return a pointer to a sport, given the WWNN and WWPN.
367 *
368 * @param domain Pointer to the domain.
369 * @param wwnn World wide node name.
370 * @param wwpn World wide port name.
371 *
372 * @return Returns a pointer to a SLI port, if found; or NULL.
373 */
374
375 ocs_sport_t *
376 ocs_sport_find_wwn(ocs_domain_t *domain, uint64_t wwnn, uint64_t wwpn)
377 {
378 ocs_sport_t *sport = NULL;
379
380 ocs_domain_lock(domain);
381 ocs_list_foreach(&domain->sport_list, sport) {
382 if ((sport->wwnn == wwnn) && (sport->wwpn == wwpn)) {
383 ocs_domain_unlock(domain);
384 return sport;
385 }
386 }
387 ocs_domain_unlock(domain);
388 return NULL;
389 }
390
391 /**
392 * @ingroup sport_sm
393 * @brief Request a SLI port attach.
394 *
395 * @par Description
396 * External call to request an attach for a sport, given an FC_ID.
397 *
398 * @param sport Pointer to the sport context.
399 * @param fc_id FC_ID of which to attach.
400 *
401 * @return Returns 0 on success, or a negative error value on failure.
402 */
403
404 int32_t
405 ocs_sport_attach(ocs_sport_t *sport, uint32_t fc_id)
406 {
407 ocs_hw_rtn_e rc;
408 ocs_node_t *node;
409
410 /* Set our lookup */
411 ocs_lock(&sport->domain->lookup_lock);
412 spv_set(sport->domain->lookup, fc_id, sport);
413 ocs_unlock(&sport->domain->lookup_lock);
414
415 /* Update our display_name */
416 ocs_node_fcid_display(fc_id, sport->display_name, sizeof(sport->display_name));
417 ocs_sport_lock(sport);
418 ocs_list_foreach(&sport->node_list, node) {
419 ocs_node_update_display_name(node);
420 }
421 ocs_sport_unlock(sport);
422 ocs_log_debug(sport->ocs, "[%s] attach sport: fc_id x%06x\n", sport->display_name, fc_id);
423
424 rc = ocs_hw_port_attach(&sport->ocs->hw, sport, fc_id);
425 if (rc != OCS_HW_RTN_SUCCESS) {
426 ocs_log_err(sport->ocs, "ocs_hw_port_attach failed: %d\n", rc);
427 return -1;
428 }
429 return 0;
430 }
431
432 /**
433 * @brief Common SLI port state machine declarations and initialization.
434 */
435 #define std_sport_state_decl() \
436 ocs_sport_t *sport = NULL; \
437 ocs_domain_t *domain = NULL; \
438 ocs_t *ocs = NULL; \
439 \
440 ocs_assert(ctx, NULL); \
441 sport = ctx->app; \
442 ocs_assert(sport, NULL); \
443 \
444 domain = sport->domain; \
445 ocs_assert(domain, NULL); \
446 ocs = sport->ocs; \
447 ocs_assert(ocs, NULL);
448
449 /**
450 * @brief Common SLI port state machine trace logging.
451 */
452 #define sport_sm_trace(sport) \
453 do { \
454 if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(ocs)) \
455 ocs_log_debug(ocs, "[%s] %-20s\n", sport->display_name, ocs_sm_event_name(evt)); \
456 } while (0)
457
458 /**
459 * @brief SLI port state machine: Common event handler.
460 *
461 * @par Description
462 * Handle common sport events.
463 *
464 * @param funcname Function name to display.
465 * @param ctx Sport state machine context.
466 * @param evt Event to process.
467 * @param arg Per event optional argument.
468 *
469 * @return Returns NULL.
470 */
471
472 static void *
473 __ocs_sport_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
474 {
475 std_sport_state_decl();
476
477 switch(evt) {
478 case OCS_EVT_ENTER:
479 case OCS_EVT_REENTER:
480 case OCS_EVT_EXIT:
481 case OCS_EVT_ALL_CHILD_NODES_FREE:
482 break;
483 case OCS_EVT_SPORT_ATTACH_OK:
484 ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
485 break;
486 case OCS_EVT_SHUTDOWN: {
487 ocs_node_t *node;
488 ocs_node_t *node_next;
489 int node_list_empty;
490
491 /* Flag this sport as shutting down */
492 sport->shutting_down = 1;
493
494 if (sport->is_vport) {
495 ocs_vport_link_down(sport);
496 }
497
498 ocs_sport_lock(sport);
499 node_list_empty = ocs_list_empty(&sport->node_list);
500 ocs_sport_unlock(sport);
501
502 if (node_list_empty) {
503 /* sm: node list is empty / ocs_hw_port_free
504 * Remove the sport from the domain's sparse vector lookup table */
505 ocs_lock(&domain->lookup_lock);
506 spv_set(domain->lookup, sport->fc_id, NULL);
507 ocs_unlock(&domain->lookup_lock);
508 ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
509 if (ocs_hw_port_free(&ocs->hw, sport)) {
510 ocs_log_test(sport->ocs, "ocs_hw_port_free failed\n");
511 /* Not much we can do, free the sport anyways */
512 ocs_sport_free(sport);
513 }
514 } else {
515 /* sm: node list is not empty / shutdown nodes */
516 ocs_sm_transition(ctx, __ocs_sport_wait_shutdown, NULL);
517 ocs_sport_lock(sport);
518 ocs_list_foreach_safe(&sport->node_list, node, node_next) {
519 /*
520 * If this is a vport, logout of the fabric controller so that it
521 * deletes the vport on the switch.
522 */
523 if((node->rnode.fc_id == FC_ADDR_FABRIC) && (sport->is_vport)) {
524 /* if link is down, don't send logo */
525 if (sport->ocs->hw.link.status == SLI_LINK_STATUS_DOWN) {
526 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
527 } else {
528 ocs_log_debug(ocs,"[%s] sport shutdown vport,sending logo to node\n",
529 node->display_name);
530
531 if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
532 0, NULL, NULL) == NULL) {
533 /* failed to send LOGO, go ahead and cleanup node anyways */
534 node_printf(node, "Failed to send LOGO\n");
535 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
536 } else {
537 /* sent LOGO, wait for response */
538 ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
539 }
540 }
541 } else {
542 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
543 }
544 }
545 ocs_sport_unlock(sport);
546 }
547 break;
548 }
549 default:
550 ocs_log_test(sport->ocs, "[%s] %-20s %-20s not handled\n", sport->display_name, funcname, ocs_sm_event_name(evt));
551 break;
552 }
553
554 return NULL;
555 }
556
557 /**
558 * @ingroup sport_sm
559 * @brief SLI port state machine: Physical sport allocated.
560 *
561 * @par Description
562 * This is the initial state for sport objects.
563 *
564 * @param ctx Remote node state machine context.
565 * @param evt Event to process.
566 * @param arg Per event optional argument.
567 *
568 * @return Returns NULL.
569 */
570
571 void *
572 __ocs_sport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
573 {
574 std_sport_state_decl();
575
576 sport_sm_trace(sport);
577
578 switch(evt) {
579 /* the physical sport is attached */
580 case OCS_EVT_SPORT_ATTACH_OK:
581 ocs_assert(sport == domain->sport, NULL);
582 ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
583 break;
584
585 case OCS_EVT_SPORT_ALLOC_OK:
586 /* ignore */
587 break;
588 default:
589 __ocs_sport_common(__func__, ctx, evt, arg);
590 return NULL;
591 }
592 return NULL;
593 }
594
595 /**
596 * @ingroup sport_sm
597 * @brief SLI port state machine: Handle initial virtual port events.
598 *
599 * @par Description
600 * This state is entered when a virtual port is instantiated,
601 *
602 * @param ctx Remote node state machine context.
603 * @param evt Event to process.
604 * @param arg Per event optional argument.
605 *
606 * @return Returns NULL.
607 */
608
609 void *
610 __ocs_sport_vport_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
611 {
612 std_sport_state_decl();
613
614 sport_sm_trace(sport);
615
616 switch(evt) {
617 case OCS_EVT_ENTER: {
618 uint64_t be_wwpn = ocs_htobe64(sport->wwpn);
619
620 if (sport->wwpn == 0) {
621 ocs_log_debug(ocs, "vport: letting f/w select WWN\n");
622 }
623
624 if (sport->fc_id != UINT32_MAX) {
625 ocs_log_debug(ocs, "vport: hard coding port id: %x\n", sport->fc_id);
626 }
627
628 ocs_sm_transition(ctx, __ocs_sport_vport_wait_alloc, NULL);
629 /* If wwpn is zero, then we'll let the f/w */
630 if (ocs_hw_port_alloc(&ocs->hw, sport, sport->domain,
631 (sport->wwpn == 0) ? NULL : (uint8_t *)&be_wwpn)) {
632 ocs_log_err(ocs, "Can't allocate port\n");
633 break;
634 }
635
636 break;
637 }
638 default:
639 __ocs_sport_common(__func__, ctx, evt, arg);
640 return NULL;
641 }
642 return NULL;
643 }
644
645 /**
646 * @ingroup sport_sm
647 * @brief SLI port state machine: Wait for the HW SLI port allocation to complete.
648 *
649 * @par Description
650 * Waits for the HW sport allocation request to complete.
651 *
652 * @param ctx Remote node state machine context.
653 * @param evt Event to process.
654 * @param arg Per event optional argument.
655 *
656 * @return Returns NULL.
657 */
658
659 void *
660 __ocs_sport_vport_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
661 {
662 std_sport_state_decl();
663
664 sport_sm_trace(sport);
665
666 switch(evt) {
667 case OCS_EVT_SPORT_ALLOC_OK: {
668 fc_plogi_payload_t *sp = (fc_plogi_payload_t*) sport->service_params;
669 ocs_node_t *fabric;
670
671 /* If we let f/w assign wwn's, then sport wwn's with those returned by hw */
672 if (sport->wwnn == 0) {
673 sport->wwnn = ocs_be64toh(sport->sli_wwnn);
674 sport->wwpn = ocs_be64toh(sport->sli_wwpn);
675 ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long) sport->wwpn);
676 }
677
678 /* Update the sport's service parameters */
679 sp->port_name_hi = ocs_htobe32((uint32_t) (sport->wwpn >> 32ll));
680 sp->port_name_lo = ocs_htobe32((uint32_t) sport->wwpn);
681 sp->node_name_hi = ocs_htobe32((uint32_t) (sport->wwnn >> 32ll));
682 sp->node_name_lo = ocs_htobe32((uint32_t) sport->wwnn);
683
684 /* if sport->fc_id is uninitialized, then request that the fabric node use FDISC
685 * to find an fc_id. Otherwise we're restoring vports, or we're in
686 * fabric emulation mode, so attach the fc_id
687 */
688 if (sport->fc_id == UINT32_MAX) {
689 fabric = ocs_node_alloc(sport, FC_ADDR_FABRIC, FALSE, FALSE);
690 if (fabric == NULL) {
691 ocs_log_err(ocs, "ocs_node_alloc() failed\n");
692 return NULL;
693 }
694 ocs_node_transition(fabric, __ocs_vport_fabric_init, NULL);
695 } else {
696 ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long)sport->wwpn);
697 ocs_sport_attach(sport, sport->fc_id);
698 }
699 ocs_sm_transition(ctx, __ocs_sport_vport_allocated, NULL);
700 break;
701 }
702 default:
703 __ocs_sport_common(__func__, ctx, evt, arg);
704 return NULL;
705 }
706 return NULL;
707 }
708
709 /**
710 * @ingroup sport_sm
711 * @brief SLI port state machine: virtual sport allocated.
712 *
713 * @par Description
714 * This state is entered after the sport is allocated; it then waits for a fabric node
715 * FDISC to complete, which requests a sport attach.
716 * The sport attach complete is handled in this state.
717 *
718 * @param ctx Remote node state machine context.
719 * @param evt Event to process.
720 * @param arg Per event optional argument.
721 *
722 * @return Returns NULL.
723 */
724
725 void *
726 __ocs_sport_vport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
727 {
728 std_sport_state_decl();
729
730 sport_sm_trace(sport);
731
732 switch(evt) {
733 case OCS_EVT_SPORT_ATTACH_OK: {
734 ocs_node_t *node;
735
736 if (!(domain->femul_enable)) {
737 /* Find our fabric node, and forward this event */
738 node = ocs_node_find(sport, FC_ADDR_FABRIC);
739 if (node == NULL) {
740 ocs_log_test(ocs, "can't find node %06x\n", FC_ADDR_FABRIC);
741 break;
742 }
743 /* sm: / forward sport attach to fabric node */
744 ocs_node_post_event(node, evt, NULL);
745 }
746 ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
747 break;
748 }
749 default:
750 __ocs_sport_common(__func__, ctx, evt, arg);
751 return NULL;
752 }
753 return NULL;
754 }
755
756 /**
757 * @ingroup sport_sm
758 * @brief SLI port state machine: Attached.
759 *
760 * @par Description
761 * State entered after the sport attach has completed.
762 *
763 * @param ctx Remote node state machine context.
764 * @param evt Event to process.
765 * @param arg Per event optional argument.
766 *
767 * @return Returns NULL.
768 */
769
770 void *
771 __ocs_sport_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
772 {
773 std_sport_state_decl();
774
775 sport_sm_trace(sport);
776
777 switch(evt) {
778 case OCS_EVT_ENTER: {
779 ocs_node_t *node;
780
781 ocs_log_debug(ocs, "[%s] SPORT attached WWPN %016llx WWNN %016llx \n", sport->display_name,
782 sport->wwpn, sport->wwnn);
783 ocs_sport_lock(sport);
784 ocs_list_foreach(&sport->node_list, node) {
785 ocs_node_update_display_name(node);
786 }
787 ocs_sport_unlock(sport);
788 sport->tgt_id = sport->fc_id;
789 if (sport->enable_ini) {
790 ocs_scsi_ini_new_sport(sport);
791 }
792 if (sport->enable_tgt) {
793 ocs_scsi_tgt_new_sport(sport);
794 }
795
796 /* Update the vport (if its not the physical sport) parameters */
797 if (sport->is_vport) {
798 ocs_vport_update_spec(sport);
799 }
800
801 break;
802 }
803
804 case OCS_EVT_EXIT:
805 ocs_log_debug(ocs, "[%s] SPORT deattached WWPN %016llx WWNN %016llx \n", sport->display_name,
806 sport->wwpn, sport->wwnn);
807 if (sport->enable_ini) {
808 ocs_scsi_ini_del_sport(sport);
809 }
810 if (sport->enable_tgt) {
811 ocs_scsi_tgt_del_sport(sport);
812 }
813 break;
814 default:
815 __ocs_sport_common(__func__, ctx, evt, arg);
816 return NULL;
817 }
818 return NULL;
819 }
820
821 /**
822 * @ingroup sport_sm
823 * @brief SLI port state machine: Wait for the node shutdowns to complete.
824 *
825 * @par Description
826 * Waits for the ALL_CHILD_NODES_FREE event to be posted from the node
827 * shutdown process.
828 *
829 * @param ctx Remote node state machine context.
830 * @param evt Event to process.
831 * @param arg Per event optional argument.
832 *
833 * @return Returns NULL.
834 */
835
836 void *
837 __ocs_sport_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
838 {
839 std_sport_state_decl();
840
841 sport_sm_trace(sport);
842
843 switch(evt) {
844 case OCS_EVT_SPORT_ALLOC_OK:
845 case OCS_EVT_SPORT_ALLOC_FAIL:
846 case OCS_EVT_SPORT_ATTACH_OK:
847 case OCS_EVT_SPORT_ATTACH_FAIL:
848 /* ignore these events - just wait for the all free event */
849 break;
850
851 case OCS_EVT_ALL_CHILD_NODES_FREE: {
852 /* Remove the sport from the domain's sparse vector lookup table */
853 ocs_lock(&domain->lookup_lock);
854 spv_set(domain->lookup, sport->fc_id, NULL);
855 ocs_unlock(&domain->lookup_lock);
856 ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
857 if (ocs_hw_port_free(&ocs->hw, sport)) {
858 ocs_log_err(sport->ocs, "ocs_hw_port_free failed\n");
859 /* Not much we can do, free the sport anyways */
860 ocs_sport_free(sport);
861 }
862 break;
863 }
864 default:
865 __ocs_sport_common(__func__, ctx, evt, arg);
866 return NULL;
867 }
868 return NULL;
869 }
870
871 /**
872 * @ingroup sport_sm
873 * @brief SLI port state machine: Wait for the HW's port free to complete.
874 *
875 * @par Description
876 * Waits for the HW's port free to complete.
877 *
878 * @param ctx Remote node state machine context.
879 * @param evt Event to process.
880 * @param arg Per event optional argument.
881 *
882 * @return Returns NULL.
883 */
884
885 void *
886 __ocs_sport_wait_port_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
887 {
888 std_sport_state_decl();
889
890 sport_sm_trace(sport);
891
892 switch(evt) {
893 case OCS_EVT_SPORT_ATTACH_OK:
894 /* Ignore as we are waiting for the free CB */
895 break;
896 case OCS_EVT_SPORT_FREE_OK: {
897 /* All done, free myself */
898 ocs_sport_free(sport);
899 break;
900 }
901 default:
902 __ocs_sport_common(__func__, ctx, evt, arg);
903 return NULL;
904 }
905 return NULL;
906 }
907
908 /**
909 * @ingroup sport_sm
910 * @brief Start the vports on a domain
911 *
912 * @par Description
913 * Use the vport specification to find the associated vports and start them.
914 *
915 * @param domain Pointer to the domain context.
916 *
917 * @return Returns 0 on success, or a negative error value on failure.
918 */
919 int32_t
920 ocs_vport_start(ocs_domain_t *domain)
921 {
922 ocs_t *ocs = domain->ocs;
923 ocs_xport_t *xport = ocs->xport;
924 ocs_vport_spec_t *vport;
925 ocs_vport_spec_t *next;
926 ocs_sport_t *sport;
927 int32_t rc = 0;
928
929 ocs_device_lock(ocs);
930 ocs_list_foreach_safe(&xport->vport_list, vport, next) {
931 if (vport->domain_instance == domain->instance_index &&
932 vport->sport == NULL) {
933 /* If role not set, skip this vport */
934 if (!(vport->enable_ini || vport->enable_tgt)) {
935 continue;
936 }
937
938 /* Allocate a sport */
939 vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
940 vport->enable_ini, vport->enable_tgt);
941 if (sport == NULL) {
942 rc = -1;
943 } else {
944 sport->is_vport = 1;
945 sport->tgt_data = vport->tgt_data;
946 sport->ini_data = vport->ini_data;
947
948 /* Transition to vport_init */
949 ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
950 }
951 }
952 }
953 ocs_device_unlock(ocs);
954 return rc;
955 }
956
957 /**
958 * @ingroup sport_sm
959 * @brief Clear the sport reference in the vport specification.
960 *
961 * @par Description
962 * Clear the sport pointer on the vport specification when the vport is torn down. This allows it to be
963 * re-created when the link is re-established.
964 *
965 * @param sport Pointer to the sport context.
966 */
967 static void
968 ocs_vport_link_down(ocs_sport_t *sport)
969 {
970 ocs_t *ocs = sport->ocs;
971 ocs_xport_t *xport = ocs->xport;
972 ocs_vport_spec_t *vport;
973
974 ocs_device_lock(ocs);
975 ocs_list_foreach(&xport->vport_list, vport) {
976 if (vport->sport == sport) {
977 vport->sport = NULL;
978 break;
979 }
980 }
981 ocs_device_unlock(ocs);
982 }
983
984 /**
985 * @ingroup sport_sm
986 * @brief Allocate a new virtual SLI port.
987 *
988 * @par Description
989 * A new sport is created, in response to an external management request.
990 *
991 * @n @b Note: If the WWPN is zero, the firmware will assign the WWNs.
992 *
993 * @param domain Pointer to the domain context.
994 * @param wwpn World wide port name.
995 * @param wwnn World wide node name
996 * @param fc_id Requested port ID (used in fabric emulation mode).
997 * @param ini TRUE, if port is created as an initiator node.
998 * @param tgt TRUE, if port is created as a target node.
999 * @param tgt_data Pointer to target specific data
1000 * @param ini_data Pointer to initiator specific data
1001 * @param restore_vport If TRUE, then the vport will be re-created automatically
1002 * on link disruption.
1003 *
1004 * @return Returns 0 on success; or a negative error value on failure.
1005 */
1006
1007 int32_t
1008 ocs_sport_vport_new(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn,
1009 uint32_t fc_id, uint8_t ini, uint8_t tgt, void *tgt_data,
1010 void *ini_data, uint8_t restore_vport)
1011 {
1012 ocs_sport_t *sport;
1013
1014 if (ini && (domain->ocs->enable_ini == 0)) {
1015 ocs_log_test(domain->ocs, "driver initiator functionality not enabled\n");
1016 return -1;
1017 }
1018
1019 if (tgt && (domain->ocs->enable_tgt == 0)) {
1020 ocs_log_test(domain->ocs, "driver target functionality not enabled\n");
1021 return -1;
1022 }
1023
1024 /* Create a vport spec if we need to recreate this vport after a link up event */
1025 if (restore_vport) {
1026 if (ocs_vport_create_spec(domain->ocs, wwnn, wwpn, fc_id, ini, tgt, tgt_data, ini_data)) {
1027 ocs_log_test(domain->ocs, "failed to create vport object entry\n");
1028 return -1;
1029 }
1030 return ocs_vport_start(domain);
1031 }
1032
1033 /* Allocate a sport */
1034 sport = ocs_sport_alloc(domain, wwpn, wwnn, fc_id, ini, tgt);
1035
1036 if (sport == NULL) {
1037 return -1;
1038 }
1039
1040 sport->is_vport = 1;
1041 sport->tgt_data = tgt_data;
1042 sport->ini_data = ini_data;
1043
1044 /* Transition to vport_init */
1045 ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1046
1047 return 0;
1048 }
1049
1050 int32_t
1051 ocs_sport_vport_alloc(ocs_domain_t *domain, ocs_vport_spec_t *vport)
1052 {
1053 ocs_sport_t *sport = NULL;
1054
1055 if (domain == NULL) {
1056 return (0);
1057 }
1058
1059 ocs_assert((vport->sport == NULL), -1);
1060
1061 /* Allocate a sport */
1062 vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, UINT32_MAX, vport->enable_ini, vport->enable_tgt);
1063
1064 if (sport == NULL) {
1065 return -1;
1066 }
1067
1068 sport->is_vport = 1;
1069 sport->tgt_data = vport->tgt_data;
1070 sport->ini_data = vport->tgt_data;
1071
1072 /* Transition to vport_init */
1073 ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1074
1075 return (0);
1076 }
1077
1078 /**
1079 * @ingroup sport_sm
1080 * @brief Remove a previously-allocated virtual port.
1081 *
1082 * @par Description
1083 * A previously-allocated virtual port is removed by posting the shutdown event to the
1084 * sport with a matching WWN.
1085 *
1086 * @param ocs Pointer to the device object.
1087 * @param domain Pointer to the domain structure (may be NULL).
1088 * @param wwpn World wide port name of the port to delete (host endian).
1089 * @param wwnn World wide node name of the port to delete (host endian).
1090 *
1091 * @return Returns 0 on success, or a negative error value on failure.
1092 */
1093
1094 int32_t ocs_sport_vport_del(ocs_t *ocs, ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn)
1095 {
1096 ocs_xport_t *xport = ocs->xport;
1097 ocs_sport_t *sport;
1098 int found = 0;
1099 ocs_vport_spec_t *vport;
1100 ocs_vport_spec_t *next;
1101 uint32_t instance;
1102
1103 /* If no domain is given, use instance 0, otherwise use domain instance */
1104 if (domain == NULL) {
1105 instance = 0;
1106 } else {
1107 instance = domain->instance_index;
1108 }
1109
1110 /* walk the ocs_vport_list and remove from there */
1111
1112 ocs_device_lock(ocs);
1113 ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1114 if ((vport->domain_instance == instance) &&
1115 (vport->wwpn == wwpn) && (vport->wwnn == wwnn)) {
1116 vport->sport = NULL;
1117 break;
1118 }
1119 }
1120 ocs_device_unlock(ocs);
1121
1122 if (domain == NULL) {
1123 /* No domain means no sport to look for */
1124 return 0;
1125 }
1126
1127 ocs_domain_lock(domain);
1128 ocs_list_foreach(&domain->sport_list, sport) {
1129 if ((sport->wwpn == wwpn) && (sport->wwnn == wwnn)) {
1130 found = 1;
1131 break;
1132 }
1133 }
1134 if (found) {
1135 /* Shutdown this SPORT */
1136 ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
1137 }
1138 ocs_domain_unlock(domain);
1139 return 0;
1140 }
1141
1142 /**
1143 * @brief Force free all saved vports.
1144 *
1145 * @par Description
1146 * Delete all device vports.
1147 *
1148 * @param ocs Pointer to the device object.
1149 *
1150 * @return None.
1151 */
1152
1153 void
1154 ocs_vport_del_all(ocs_t *ocs)
1155 {
1156 ocs_xport_t *xport = ocs->xport;
1157 ocs_vport_spec_t *vport;
1158 ocs_vport_spec_t *next;
1159
1160 ocs_device_lock(ocs);
1161 ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1162 ocs_list_remove(&xport->vport_list, vport);
1163 ocs_free(ocs, vport, sizeof(*vport));
1164 }
1165 ocs_device_unlock(ocs);
1166 }
1167
1168 /**
1169 * @ingroup sport_sm
1170 * @brief Generate a SLI port ddump.
1171 *
1172 * @par Description
1173 * Generates the SLI port ddump data.
1174 *
1175 * @param textbuf Pointer to the text buffer.
1176 * @param sport Pointer to the SLI-4 port.
1177 *
1178 * @return Returns 0 on success, or a negative value on failure.
1179 */
1180
1181 int
1182 ocs_ddump_sport(ocs_textbuf_t *textbuf, ocs_sli_port_t *sport)
1183 {
1184 ocs_node_t *node;
1185 ocs_node_group_dir_t *node_group_dir;
1186 int retval = 0;
1187
1188 ocs_ddump_section(textbuf, "sport", sport->instance_index);
1189 ocs_ddump_value(textbuf, "display_name", "%s", sport->display_name);
1190
1191 ocs_ddump_value(textbuf, "is_vport", "%d", sport->is_vport);
1192 ocs_ddump_value(textbuf, "enable_ini", "%d", sport->enable_ini);
1193 ocs_ddump_value(textbuf, "enable_tgt", "%d", sport->enable_tgt);
1194 ocs_ddump_value(textbuf, "shutting_down", "%d", sport->shutting_down);
1195 ocs_ddump_value(textbuf, "topology", "%d", sport->topology);
1196 ocs_ddump_value(textbuf, "p2p_winner", "%d", sport->p2p_winner);
1197 ocs_ddump_value(textbuf, "p2p_port_id", "%06x", sport->p2p_port_id);
1198 ocs_ddump_value(textbuf, "p2p_remote_port_id", "%06x", sport->p2p_remote_port_id);
1199 ocs_ddump_value(textbuf, "wwpn", "%016llx", (unsigned long long)sport->wwpn);
1200 ocs_ddump_value(textbuf, "wwnn", "%016llx", (unsigned long long)sport->wwnn);
1201 /*TODO: service_params */
1202
1203 ocs_ddump_value(textbuf, "indicator", "x%x", sport->indicator);
1204 ocs_ddump_value(textbuf, "fc_id", "x%06x", sport->fc_id);
1205 ocs_ddump_value(textbuf, "index", "%d", sport->index);
1206
1207 ocs_display_sparams(NULL, "sport_sparams", 1, textbuf, sport->service_params+4);
1208
1209 /* HLM dump */
1210 ocs_ddump_section(textbuf, "hlm", sport->instance_index);
1211 ocs_lock(&sport->node_group_lock);
1212 ocs_list_foreach(&sport->node_group_dir_list, node_group_dir) {
1213 ocs_remote_node_group_t *remote_node_group;
1214
1215 ocs_ddump_section(textbuf, "node_group_dir", node_group_dir->instance_index);
1216
1217 ocs_ddump_value(textbuf, "node_group_list_count", "%d", node_group_dir->node_group_list_count);
1218 ocs_ddump_value(textbuf, "next_idx", "%d", node_group_dir->next_idx);
1219 ocs_list_foreach(&node_group_dir->node_group_list, remote_node_group) {
1220 ocs_ddump_section(textbuf, "node_group", remote_node_group->instance_index);
1221 ocs_ddump_value(textbuf, "indicator", "x%x", remote_node_group->indicator);
1222 ocs_ddump_value(textbuf, "index", "x%x", remote_node_group->index);
1223 ocs_ddump_value(textbuf, "instance_index", "x%x", remote_node_group->instance_index);
1224 ocs_ddump_endsection(textbuf, "node_group", 0);
1225 }
1226 ocs_ddump_endsection(textbuf, "node_group_dir", 0);
1227 }
1228 ocs_unlock(&sport->node_group_lock);
1229 ocs_ddump_endsection(textbuf, "hlm", sport->instance_index);
1230
1231 ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1232 ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1233
1234 /* Dump all the nodes */
1235 if (ocs_sport_lock_try(sport) != TRUE) {
1236 /* Didn't get lock */
1237 return -1;
1238 }
1239 /* Here the sport lock is held */
1240 ocs_list_foreach(&sport->node_list, node) {
1241 retval = ocs_ddump_node(textbuf, node);
1242 if (retval != 0) {
1243 break;
1244 }
1245 }
1246 ocs_sport_unlock(sport);
1247
1248 ocs_ddump_endsection(textbuf, "sport", sport->index);
1249
1250 return retval;
1251 }
1252
1253 void
1254 ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *object)
1255 {
1256 ocs_node_t *node;
1257 ocs_sport_t *sport = (ocs_sport_t *)object;
1258
1259 ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1260
1261 /* Add my status values to textbuf */
1262 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1263 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1264 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "index");
1265 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1266 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "is_vport");
1267 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_ini");
1268 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_tgt");
1269 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p");
1270 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_winner");
1271 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_port_id");
1272 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_remote_port_id");
1273 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1274 ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1275
1276 if (ocs_sport_lock_try(sport) == TRUE) {
1277 /* If we get here, then we are holding the sport lock */
1278 ocs_list_foreach(&sport->node_list, node) {
1279 if ((node->mgmt_functions) && (node->mgmt_functions->get_list_handler)) {
1280 node->mgmt_functions->get_list_handler(textbuf, node);
1281 }
1282 }
1283 ocs_sport_unlock(sport);
1284 }
1285
1286 ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1287 }
1288
1289 int
1290 ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1291 {
1292 ocs_node_t *node;
1293 ocs_sport_t *sport = (ocs_sport_t *)object;
1294 char qualifier[80];
1295 int retval = -1;
1296
1297 ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1298
1299 snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1300
1301 /* If it doesn't start with my qualifier I don't know what to do with it */
1302 if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1303 char *unqualified_name = name + strlen(qualifier) +1;
1304
1305 /* See if it's a value I can supply */
1306 if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1307 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1308 retval = 0;
1309 } else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1310 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1311 retval = 0;
1312 } else if (ocs_strcmp(unqualified_name, "index") == 0) {
1313 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1314 retval = 0;
1315 } else if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1316 ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1317 retval = 0;
1318 } else if (ocs_strcmp(unqualified_name, "is_vport") == 0) {
1319 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport", sport->is_vport);
1320 retval = 0;
1321 } else if (ocs_strcmp(unqualified_name, "enable_ini") == 0) {
1322 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini", sport->enable_ini);
1323 retval = 0;
1324 } else if (ocs_strcmp(unqualified_name, "enable_tgt") == 0) {
1325 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt", sport->enable_tgt);
1326 retval = 0;
1327 } else if (ocs_strcmp(unqualified_name, "p2p_winner") == 0) {
1328 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner", sport->p2p_winner);
1329 retval = 0;
1330 } else if (ocs_strcmp(unqualified_name, "p2p_port_id") == 0) {
1331 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1332 retval = 0;
1333 } else if (ocs_strcmp(unqualified_name, "p2p_remote_port_id") == 0) {
1334 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1335 retval = 0;
1336 } else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1337 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx", (unsigned long long)sport->wwpn);
1338 retval = 0;
1339 } else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1340 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1341 retval = 0;
1342 } else {
1343 /* If I didn't know the value of this status pass the request to each of my children */
1344 ocs_sport_lock(sport);
1345 ocs_list_foreach(&sport->node_list, node) {
1346 if ((node->mgmt_functions) && (node->mgmt_functions->get_handler)) {
1347 retval = node->mgmt_functions->get_handler(textbuf, qualifier, name, node);
1348 }
1349
1350 if (retval == 0) {
1351 break;
1352 }
1353 }
1354 ocs_sport_unlock(sport);
1355 }
1356 }
1357
1358 ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1359
1360 return retval;
1361 }
1362
1363 void
1364 ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *object)
1365 {
1366 ocs_node_t *node;
1367 ocs_sport_t *sport = (ocs_sport_t *)object;
1368
1369 ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1370
1371 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1372 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1373 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1374 ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1375 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport", sport->is_vport);
1376 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini", sport->enable_ini);
1377 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt", sport->enable_tgt);
1378 ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner", sport->p2p_winner);
1379 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1380 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1381 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx" , (unsigned long long)sport->wwpn);
1382 ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1383
1384 ocs_sport_lock(sport);
1385 ocs_list_foreach(&sport->node_list, node) {
1386 if ((node->mgmt_functions) && (node->mgmt_functions->get_all_handler)) {
1387 node->mgmt_functions->get_all_handler(textbuf, node);
1388 }
1389 }
1390 ocs_sport_unlock(sport);
1391
1392 ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1393 }
1394
1395 int
1396 ocs_mgmt_sport_set(char *parent, char *name, char *value, void *object)
1397 {
1398 ocs_node_t *node;
1399 ocs_sport_t *sport = (ocs_sport_t *)object;
1400 char qualifier[80];
1401 int retval = -1;
1402
1403 snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1404
1405 /* If it doesn't start with my qualifier I don't know what to do with it */
1406 if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1407 /* The sport has no settable values. Pass the request to each node. */
1408
1409 ocs_sport_lock(sport);
1410 ocs_list_foreach(&sport->node_list, node) {
1411 if ((node->mgmt_functions) && (node->mgmt_functions->set_handler)) {
1412 retval = node->mgmt_functions->set_handler(qualifier, name, value, node);
1413 }
1414 if (retval == 0) {
1415 break;
1416 }
1417 }
1418 ocs_sport_unlock(sport);
1419 }
1420
1421 return retval;
1422 }
1423
1424 int
1425 ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1426 void *arg_out, uint32_t arg_out_length, void *object)
1427 {
1428 ocs_node_t *node;
1429 ocs_sport_t *sport = (ocs_sport_t *)object;
1430 char qualifier[80];
1431 int retval = -1;
1432
1433 snprintf(qualifier, sizeof(qualifier), "%s.sport%d", parent, sport->instance_index);
1434
1435 /* If it doesn't start with my qualifier I don't know what to do with it */
1436 if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1437 /* See if it's an action I can perform */
1438
1439 /* if (ocs_strcmp ....
1440 * {
1441 * } else
1442 */
1443
1444 {
1445 /* If I didn't know how to do this action pass the request to each of my children */
1446 ocs_sport_lock(sport);
1447 ocs_list_foreach(&sport->node_list, node) {
1448 if ((node->mgmt_functions) && (node->mgmt_functions->exec_handler)) {
1449 retval = node->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1450 arg_out, arg_out_length, node);
1451 }
1452
1453 if (retval == 0) {
1454 break;
1455 }
1456 }
1457 ocs_sport_unlock(sport);
1458 }
1459 }
1460
1461 return retval;
1462 }
1463
1464 /**
1465 * @brief Save the virtual port's parameters.
1466 *
1467 * @par Description
1468 * The information required to restore a virtual port is saved.
1469 *
1470 * @param sport Pointer to the sport context.
1471 *
1472 * @return None.
1473 */
1474
1475 static void
1476 ocs_vport_update_spec(ocs_sport_t *sport)
1477 {
1478 ocs_t *ocs = sport->ocs;
1479 ocs_xport_t *xport = ocs->xport;
1480 ocs_vport_spec_t *vport;
1481
1482 ocs_device_lock(ocs);
1483 ocs_list_foreach(&xport->vport_list, vport) {
1484 if (vport->sport == sport) {
1485 vport->wwnn = sport->wwnn;
1486 vport->wwpn = sport->wwpn;
1487 vport->tgt_data = sport->tgt_data;
1488 vport->ini_data = sport->ini_data;
1489 break;
1490 }
1491 }
1492 ocs_device_unlock(ocs);
1493 }
1494
1495 /**
1496 * @brief Create a saved vport entry.
1497 *
1498 * A saved vport entry is added to the vport list, which is restored following
1499 * a link up. This function is used to allow vports to be created the first time
1500 * the link comes up without having to go through the ioctl() API.
1501 *
1502 * @param ocs Pointer to device context.
1503 * @param wwnn World wide node name (may be zero for auto-select).
1504 * @param wwpn World wide port name (may be zero for auto-select).
1505 * @param fc_id Requested port ID (used in fabric emulation mode).
1506 * @param enable_ini TRUE if vport is to be an initiator port.
1507 * @param enable_tgt TRUE if vport is to be a target port.
1508 * @param tgt_data Pointer to target specific data.
1509 * @param ini_data Pointer to initiator specific data.
1510 *
1511 * @return None.
1512 */
1513
1514 int8_t
1515 ocs_vport_create_spec(ocs_t *ocs, uint64_t wwnn, uint64_t wwpn, uint32_t fc_id, uint32_t enable_ini, uint32_t enable_tgt, void *tgt_data, void *ini_data)
1516 {
1517 ocs_xport_t *xport = ocs->xport;
1518 ocs_vport_spec_t *vport;
1519
1520 /* walk the ocs_vport_list and return failure if a valid(vport with non zero WWPN and WWNN) vport entry
1521 is already created */
1522 ocs_list_foreach(&xport->vport_list, vport) {
1523 if ((wwpn && (vport->wwpn == wwpn)) && (wwnn && (vport->wwnn == wwnn))) {
1524 ocs_log_test(ocs, "Failed: VPORT %016llx %016llx already allocated\n",
1525 (unsigned long long)wwnn, (unsigned long long)wwpn);
1526 return -1;
1527 }
1528 }
1529
1530 vport = ocs_malloc(ocs, sizeof(*vport), OCS_M_ZERO | OCS_M_NOWAIT);
1531 if (vport == NULL) {
1532 ocs_log_err(ocs, "ocs_malloc failed\n");
1533 return -1;
1534 }
1535
1536 vport->wwnn = wwnn;
1537 vport->wwpn = wwpn;
1538 vport->fc_id = fc_id;
1539 vport->domain_instance = 0; /*TODO: may need to change this */
1540 vport->enable_tgt = enable_tgt;
1541 vport->enable_ini = enable_ini;
1542 vport->tgt_data = tgt_data;
1543 vport->ini_data = ini_data;
1544
1545 ocs_device_lock(ocs);
1546 ocs_list_add_tail(&xport->vport_list, vport);
1547 ocs_device_unlock(ocs);
1548 return 0;
1549 }
1550
1551 /* node group api */
1552
1553 /**
1554 * @brief Perform the AND operation on source vectors.
1555 *
1556 * @par Description
1557 * Performs an AND operation on the 8-bit values in source vectors @c b and @c c.
1558 * The resulting value is stored in @c a.
1559 *
1560 * @param a Destination-byte vector.
1561 * @param b Source-byte vector.
1562 * @param c Source-byte vector.
1563 * @param n Byte count.
1564 *
1565 * @return None.
1566 */
1567
1568 static void
1569 and8(uint8_t *a, uint8_t *b, uint8_t *c, uint32_t n)
1570 {
1571 uint32_t i;
1572
1573 for (i = 0; i < n; i ++) {
1574 *a = *b & *c;
1575 a++;
1576 b++;
1577 c++;
1578 }
1579 }
1580
1581 /**
1582 * @brief Service parameters mask data.
1583 */
1584 static fc_sparms_t sparms_cmp_mask = {
1585 0, /*uint32_t command_code: 8, */
1586 0, /* resv1: 24; */
1587 {~0, ~0, ~0, ~0}, /* uint32_t common_service_parameters[4]; */
1588 0, /* uint32_t port_name_hi; */
1589 0, /* uint32_t port_name_lo; */
1590 0, /* uint32_t node_name_hi; */
1591 0, /* uint32_t node_name_lo; */
1592 {~0, ~0, ~0, ~0}, /* uint32_t class1_service_parameters[4]; */
1593 {~0, ~0, ~0, ~0}, /* uint32_t class2_service_parameters[4]; */
1594 {~0, ~0, ~0, ~0}, /* uint32_t class3_service_parameters[4]; */
1595 {~0, ~0, ~0, ~0}, /* uint32_t class4_service_parameters[4]; */
1596 {~0, ~0, ~0, ~0}}; /* uint32_t vendor_version_level[4]; */
1597
1598 /**
1599 * @brief Compare service parameters.
1600 *
1601 * @par Description
1602 * Returns 0 if the two service parameters are the same, excluding the port/node name
1603 * elements.
1604 *
1605 * @param sp1 Pointer to service parameters 1.
1606 * @param sp2 Pointer to service parameters 2.
1607 *
1608 * @return Returns 0 if parameters match; otherwise, returns a positive or negative value,
1609 * depending on the arithmetic magnitude of the first mismatching byte.
1610 */
1611
1612 int
1613 ocs_sparm_cmp(uint8_t *sp1, uint8_t *sp2)
1614 {
1615 int i;
1616 int v;
1617 uint8_t *sp3 = (uint8_t*) &sparms_cmp_mask;
1618
1619 for (i = 0; i < OCS_SERVICE_PARMS_LENGTH; i ++) {
1620 v = ((int)(sp1[i] & sp3[i])) - ((int)(sp2[i] & sp3[i]));
1621 if (v) {
1622 break;
1623 }
1624 }
1625 return v;
1626 }
1627
1628 /**
1629 * @brief Allocate a node group directory entry.
1630 *
1631 * @par Description
1632 * A node group directory entry is allocated, initialized, and added to the sport's
1633 * node group directory list.
1634 *
1635 * @param sport Pointer to the sport object.
1636 * @param sparms Pointer to the service parameters.
1637 *
1638 * @return Returns a pointer to the allocated ocs_node_group_dir_t; or NULL.
1639 */
1640
1641 ocs_node_group_dir_t *
1642 ocs_node_group_dir_alloc(ocs_sport_t *sport, uint8_t *sparms)
1643 {
1644 ocs_node_group_dir_t *node_group_dir;
1645
1646 node_group_dir = ocs_malloc(sport->ocs, sizeof(*node_group_dir), OCS_M_ZERO | OCS_M_NOWAIT);
1647 if (node_group_dir != NULL) {
1648 node_group_dir->sport = sport;
1649
1650 ocs_lock(&sport->node_group_lock);
1651 node_group_dir->instance_index = sport->node_group_dir_next_instance++;
1652 and8(node_group_dir->service_params, sparms, (uint8_t*)&sparms_cmp_mask, OCS_SERVICE_PARMS_LENGTH);
1653 ocs_list_init(&node_group_dir->node_group_list, ocs_remote_node_group_t, link);
1654
1655 node_group_dir->node_group_list_count = 0;
1656 node_group_dir->next_idx = 0;
1657 ocs_list_add_tail(&sport->node_group_dir_list, node_group_dir);
1658 ocs_unlock(&sport->node_group_lock);
1659
1660 ocs_log_debug(sport->ocs, "[%s] [%d] allocating node group directory\n", sport->display_name,
1661 node_group_dir->instance_index);
1662 }
1663 return node_group_dir;
1664 }
1665
1666 /**
1667 * @brief Free a node group directory entry.
1668 *
1669 * @par Description
1670 * The node group directory entry @c node_group_dir is removed
1671 * from the sport's node group directory list and freed.
1672 *
1673 * @param node_group_dir Pointer to the node group directory entry.
1674 *
1675 * @return None.
1676 */
1677
1678 void
1679 ocs_node_group_dir_free(ocs_node_group_dir_t *node_group_dir)
1680 {
1681 ocs_sport_t *sport;
1682 if (node_group_dir != NULL) {
1683 sport = node_group_dir->sport;
1684 ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group directory\n", sport->display_name,
1685 node_group_dir->instance_index);
1686 ocs_lock(&sport->node_group_lock);
1687 if (!ocs_list_empty(&node_group_dir->node_group_list)) {
1688 ocs_log_test(sport->ocs, "[%s] WARNING: node group list not empty\n", sport->display_name);
1689 }
1690 ocs_list_remove(&sport->node_group_dir_list, node_group_dir);
1691 ocs_unlock(&sport->node_group_lock);
1692 ocs_free(sport->ocs, node_group_dir, sizeof(*node_group_dir));
1693 }
1694 }
1695
1696 /**
1697 * @brief Find a matching node group directory entry.
1698 *
1699 * @par Description
1700 * The sport's node group directory list is searched for a matching set of
1701 * service parameters. The first matching entry is returned; otherwise
1702 * NULL is returned.
1703 *
1704 * @param sport Pointer to the sport object.
1705 * @param sparms Pointer to the sparams to match.
1706 *
1707 * @return Returns a pointer to the first matching entry found; or NULL.
1708 */
1709
1710 ocs_node_group_dir_t *
1711 ocs_node_group_dir_find(ocs_sport_t *sport, uint8_t *sparms)
1712 {
1713 ocs_node_group_dir_t *node_dir = NULL;
1714
1715 ocs_lock(&sport->node_group_lock);
1716 ocs_list_foreach(&sport->node_group_dir_list, node_dir) {
1717 if (ocs_sparm_cmp(sparms, node_dir->service_params) == 0) {
1718 ocs_unlock(&sport->node_group_lock);
1719 return node_dir;
1720 }
1721 }
1722 ocs_unlock(&sport->node_group_lock);
1723 return NULL;
1724 }
1725
1726 /**
1727 * @brief Allocate a remote node group object.
1728 *
1729 * @par Description
1730 * A remote node group object is allocated, initialized, and placed on the node group
1731 * list of @c node_group_dir. The HW remote node group @b alloc function is called.
1732 *
1733 * @param node_group_dir Pointer to the node group directory.
1734 *
1735 * @return Returns a pointer to the allocated remote node group object; or NULL.
1736 */
1737
1738 ocs_remote_node_group_t *
1739 ocs_remote_node_group_alloc(ocs_node_group_dir_t *node_group_dir)
1740 {
1741 ocs_t *ocs;
1742 ocs_sport_t *sport;
1743 ocs_remote_node_group_t *node_group;
1744 ocs_hw_rtn_e hrc;
1745
1746 ocs_assert(node_group_dir, NULL);
1747 ocs_assert(node_group_dir->sport, NULL);
1748 ocs_assert(node_group_dir->sport->ocs, NULL);
1749
1750 sport = node_group_dir->sport;
1751 ocs = sport->ocs;
1752
1753 node_group = ocs_malloc(ocs, sizeof(*node_group), OCS_M_ZERO | OCS_M_NOWAIT);
1754 if (node_group != NULL) {
1755 /* set pointer to node group directory */
1756 node_group->node_group_dir = node_group_dir;
1757
1758 ocs_lock(&node_group_dir->sport->node_group_lock);
1759 node_group->instance_index = sport->node_group_next_instance++;
1760 ocs_unlock(&node_group_dir->sport->node_group_lock);
1761
1762 /* invoke HW node group inialization */
1763 hrc = ocs_hw_node_group_alloc(&ocs->hw, node_group);
1764 if (hrc != OCS_HW_RTN_SUCCESS) {
1765 ocs_log_err(ocs, "ocs_hw_node_group_alloc() failed: %d\n", hrc);
1766 ocs_free(ocs, node_group, sizeof(*node_group));
1767 return NULL;
1768 }
1769
1770 ocs_log_debug(ocs, "[%s] [%d] indicator x%03x allocating node group\n", sport->display_name,
1771 node_group->indicator, node_group->instance_index);
1772
1773 /* add to the node group directory entry node group list */
1774 ocs_lock(&node_group_dir->sport->node_group_lock);
1775 ocs_list_add_tail(&node_group_dir->node_group_list, node_group);
1776 node_group_dir->node_group_list_count ++;
1777 ocs_unlock(&node_group_dir->sport->node_group_lock);
1778 }
1779 return node_group;
1780 }
1781
1782 /**
1783 * @brief Free a remote node group object.
1784 *
1785 * @par Description
1786 * The remote node group object @c node_group is removed from its
1787 * node group directory entry and freed.
1788 *
1789 * @param node_group Pointer to the remote node group object.
1790 *
1791 * @return None.
1792 */
1793
1794 void
1795 ocs_remote_node_group_free(ocs_remote_node_group_t *node_group)
1796 {
1797 ocs_sport_t *sport;
1798 ocs_node_group_dir_t *node_group_dir;
1799
1800 if (node_group != NULL) {
1801 ocs_assert(node_group->node_group_dir);
1802 ocs_assert(node_group->node_group_dir->sport);
1803 ocs_assert(node_group->node_group_dir->sport->ocs);
1804
1805 node_group_dir = node_group->node_group_dir;
1806 sport = node_group_dir->sport;
1807
1808 ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group\n", sport->display_name, node_group->instance_index);
1809
1810 /* Remove from node group directory node group list */
1811 ocs_lock(&sport->node_group_lock);
1812 ocs_list_remove(&node_group_dir->node_group_list, node_group);
1813 node_group_dir->node_group_list_count --;
1814 /* TODO: note that we're going to have the node_group_dir entry persist forever ... we could delete it if
1815 * the group_list_count goes to zero (or the linked list is empty */
1816 ocs_unlock(&sport->node_group_lock);
1817 ocs_free(sport->ocs, node_group, sizeof(*node_group));
1818 }
1819 }
1820
1821 /**
1822 * @brief Initialize a node for high login mode.
1823 *
1824 * @par Description
1825 * The @c node is initialized for high login mode. The following steps are performed:
1826 * 1. The sports node group directory is searched for a matching set of service parameters.
1827 * 2. If a matching set is not found, a node group directory entry is allocated.
1828 * 3. If less than the @c hlm_group_size number of remote node group objects is present in the
1829 * node group directory, a new remote node group object is allocated and added to the list.
1830 * 4. A remote node group object is selected, and the node is attached to the node group.
1831 *
1832 * @param node Pointer to the node.
1833 *
1834 * @return Returns 0 on success, or a negative error value on failure.
1835 */
1836
1837 int
1838 ocs_node_group_init(ocs_node_t *node)
1839 {
1840 ocs_t *ocs;
1841 ocs_sport_t *sport;
1842 ocs_node_group_dir_t *node_group_dir;
1843 ocs_remote_node_group_t *node_group;
1844 ocs_hw_rtn_e hrc;
1845
1846 ocs_assert(node, -1);
1847 ocs_assert(node->sport, -1);
1848 ocs_assert(node->ocs, -1);
1849
1850 ocs = node->ocs;
1851 sport = node->sport;
1852
1853 ocs_assert(ocs->enable_hlm, -1);
1854
1855 /* see if there's a node group directory allocated for this service parameter set */
1856 node_group_dir = ocs_node_group_dir_find(sport, node->service_params);
1857 if (node_group_dir == NULL) {
1858 /* not found, so allocate one */
1859 node_group_dir = ocs_node_group_dir_alloc(sport, node->service_params);
1860 if (node_group_dir == NULL) {
1861 /* node group directory allocation failed ... can't continue, however,
1862 * the node will be allocated with a normal (not shared) RPI
1863 */
1864 ocs_log_err(ocs, "ocs_node_group_dir_alloc() failed\n");
1865 return -1;
1866 }
1867 }
1868
1869 /* check to see if we've allocated hlm_group_size's worth of node group structures for this
1870 * directory entry, if not, then allocate and use a new one, otherwise pick the next one.
1871 */
1872 ocs_lock(&node->sport->node_group_lock);
1873 if (node_group_dir->node_group_list_count < ocs->hlm_group_size) {
1874 ocs_unlock(&node->sport->node_group_lock);
1875 node_group = ocs_remote_node_group_alloc(node_group_dir);
1876 if (node_group == NULL) {
1877 ocs_log_err(ocs, "ocs_remote_node_group_alloc() failed\n");
1878 return -1;
1879 }
1880 ocs_lock(&node->sport->node_group_lock);
1881 } else {
1882 uint32_t idx = 0;
1883
1884 ocs_list_foreach(&node_group_dir->node_group_list, node_group) {
1885 if (idx >= ocs->hlm_group_size) {
1886 ocs_log_err(node->ocs, "assertion failed: idx >= ocs->hlm_group_size\n");
1887 ocs_unlock(&node->sport->node_group_lock);
1888 return -1;
1889 }
1890
1891 if (idx == node_group_dir->next_idx) {
1892 break;
1893 }
1894 idx ++;
1895 }
1896 if (idx == ocs->hlm_group_size) {
1897 node_group = ocs_list_get_head(&node_group_dir->node_group_list);
1898 }
1899 if (++node_group_dir->next_idx >= node_group_dir->node_group_list_count) {
1900 node_group_dir->next_idx = 0;
1901 }
1902 }
1903 ocs_unlock(&node->sport->node_group_lock);
1904
1905 /* Initialize a pointer in the node back to the node group */
1906 node->node_group = node_group;
1907
1908 /* Join this node into the group */
1909 hrc = ocs_hw_node_group_attach(&ocs->hw, node_group, &node->rnode);
1910
1911 return (hrc == OCS_HW_RTN_SUCCESS) ? 0 : -1;
1912 }
Cache object: 5690be63eafa0459c66d1b72c964ba47
|